issue_numbers
listlengths 1
2
| version
stringclasses 20
values | base_commit
stringlengths 40
40
| patch
stringlengths 214
153k
| hints_text
stringlengths 0
66.9k
| created_at
stringlengths 20
20
| pull_number
int64 20
3.81k
| problem_statement
stringlengths 22
7.42k
| instance_id
stringlengths 18
20
| test_patch
stringlengths 288
262k
| repo
stringclasses 1
value | environment_setup_commit
stringclasses 21
values |
|---|---|---|---|---|---|---|---|---|---|---|---|
[
"3811"
] |
0.14
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -248,7 +248,11 @@ where
}
}
if curr_len > 0 {
+ trace!("partial headers; {} bytes so far", curr_len);
self.partial_len = Some(curr_len);
+ } else {
+ // 1xx gobled some bytes
+ self.partial_len = None;
}
}
}
|
Thanks for the sample code, I'll get this fixed today 🫡
|
2024-12-16T15:39:30Z
| 3,813
|
Intermittent panic in is_complete_fast
**Version**
1.5.1
**Platform**
`Darwin ghost.local 23.5.0 Darwin Kernel Version 23.5.0: Wed May 1 20:19:05 PDT 2024; root:xnu-10063.121.3~5/RELEASE_ARM64_T8112 arm64`
**Description**
Hyper client can panic when processing broken up 1xx HTTP1 responses.
When a server responds with `HTTP/1.1 100 Continue\r\nContent-Type: text/plain\r\nServer: BaseHTTP/0.6 Python/3.12.5\r\nDate: Mon, 16 Dec 2024 03:08:27 GMT\r\n\r\nThis is a sample text/plain document.\n\nThis is not an HTML document.\n\n`, it's possible for hyper to first read `HTTP/1.1 100 Continue\r\nContent-Type: text/plain\r\nServer: BaseHTTP/0.6 Python/3.12.5\r\nDate: Mon, 16 Dec 2024 03:08:27 GMT\r\n\r\n`, followed by `This is a sample text/plain document.\n\nThis is not an HTML document.\n\n`.
This triggers a panic in [the code introduced in #3764](https://github.com/hyperium/hyper/pull/3764/files#diff-aa04de01d67bf1f61d03ef30830c744fa331c7918335393ace8c6137fa4d84d6R94), since the prev_length value stored after the first response is longer than the length of the second response.
This has been hit independently by both deno and Servo upon upgrading to hyper 1.5.1, since there are web-platform-tests that exercise 1xx responses: https://github.com/web-platform-tests/wpt/blob/master/fetch/security/1xx-response.any.js
|
hyperium__hyper-3813
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2908,6 +2908,63 @@ mod conn {
assert_eq!(vec, b"bar=foo");
}
+ #[tokio::test]
+ async fn client_100_then_http09() {
+ let _ = ::pretty_env_logger::try_init();
+
+ let server = TcpListener::bind("127.0.0.1:0").unwrap();
+ let addr = server.local_addr().unwrap();
+
+ thread::spawn(move || {
+ let mut sock = server.accept().unwrap().0;
+ sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
+ sock.set_write_timeout(Some(Duration::from_secs(5)))
+ .unwrap();
+ let mut buf = [0; 4096];
+ sock.read(&mut buf).expect("read 1");
+ sock.write_all(
+ b"\
+ HTTP/1.1 100 Continue\r\n\
+ Content-Type: text/plain\r\n\
+ Server: BaseHTTP/0.6 Python/3.12.5\r\n\
+ Date: Mon, 16 Dec 2024 03:08:27 GMT\r\n\
+ ",
+ )
+ .unwrap();
+ // That it's separate writes is important to this test
+ thread::sleep(Duration::from_millis(50));
+ sock.write_all(
+ b"\
+ \r\n\
+ ",
+ )
+ .expect("write 2");
+ thread::sleep(Duration::from_millis(50));
+ sock.write_all(
+ b"\
+ This is a sample text/plain document, without final headers.\
+ \n\n\
+ ",
+ )
+ .expect("write 3");
+ });
+
+ let tcp = tcp_connect(&addr).await.unwrap();
+
+ let (mut client, conn) = conn::Builder::new()
+ .http09_responses(true)
+ .handshake(tcp)
+ .await
+ .unwrap();
+
+ tokio::spawn(async move {
+ let _ = conn.await;
+ });
+
+ let req = Request::builder().uri("/a").body(Body::empty()).unwrap();
+ let _res = client.send_request(req).await.expect("send_request");
+ }
+
#[tokio::test]
async fn http2_detect_conn_eof() {
use futures_util::future;
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"3811"
] |
1.5
|
a3bda62da36060a38638fba983a0c07c0ab6259d
|
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -205,7 +205,11 @@ where
return Poll::Ready(Err(crate::Error::new_too_large()));
}
if curr_len > 0 {
+ trace!("partial headers; {} bytes so far", curr_len);
self.partial_len = Some(curr_len);
+ } else {
+ // 1xx gobled some bytes
+ self.partial_len = None;
}
}
}
|
Thanks for the sample code, I'll get this fixed today 🫡
|
2024-12-16T14:50:50Z
| 3,812
|
Intermittent panic in is_complete_fast
**Version**
1.5.1
**Platform**
`Darwin ghost.local 23.5.0 Darwin Kernel Version 23.5.0: Wed May 1 20:19:05 PDT 2024; root:xnu-10063.121.3~5/RELEASE_ARM64_T8112 arm64`
**Description**
Hyper client can panic when processing broken up 1xx HTTP1 responses.
When a server responds with `HTTP/1.1 100 Continue\r\nContent-Type: text/plain\r\nServer: BaseHTTP/0.6 Python/3.12.5\r\nDate: Mon, 16 Dec 2024 03:08:27 GMT\r\n\r\nThis is a sample text/plain document.\n\nThis is not an HTML document.\n\n`, it's possible for hyper to first read `HTTP/1.1 100 Continue\r\nContent-Type: text/plain\r\nServer: BaseHTTP/0.6 Python/3.12.5\r\nDate: Mon, 16 Dec 2024 03:08:27 GMT\r\n\r\n`, followed by `This is a sample text/plain document.\n\nThis is not an HTML document.\n\n`.
This triggers a panic in [the code introduced in #3764](https://github.com/hyperium/hyper/pull/3764/files#diff-aa04de01d67bf1f61d03ef30830c744fa331c7918335393ace8c6137fa4d84d6R94), since the prev_length value stored after the first response is longer than the length of the second response.
This has been hit independently by both deno and Servo upon upgrading to hyper 1.5.1, since there are web-platform-tests that exercise 1xx responses: https://github.com/web-platform-tests/wpt/blob/master/fetch/security/1xx-response.any.js
|
hyperium__hyper-3812
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2041,6 +2041,63 @@ mod conn {
assert_eq!(vec, b"bar=foo");
}
+ #[tokio::test]
+ async fn client_100_then_http09() {
+ let (server, addr) = setup_std_test_server();
+
+ thread::spawn(move || {
+ let mut sock = server.accept().unwrap().0;
+ sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
+ sock.set_write_timeout(Some(Duration::from_secs(5)))
+ .unwrap();
+ let mut buf = [0; 4096];
+ sock.read(&mut buf).expect("read 1");
+ sock.write_all(
+ b"\
+ HTTP/1.1 100 Continue\r\n\
+ Content-Type: text/plain\r\n\
+ Server: BaseHTTP/0.6 Python/3.12.5\r\n\
+ Date: Mon, 16 Dec 2024 03:08:27 GMT\r\n\
+ ",
+ )
+ .unwrap();
+ // That it's separate writes is important to this test
+ thread::sleep(Duration::from_millis(50));
+ sock.write_all(
+ b"\
+ \r\n\
+ ",
+ )
+ .expect("write 2");
+ thread::sleep(Duration::from_millis(50));
+ sock.write_all(
+ b"\
+ This is a sample text/plain document, without final headers.\
+ \n\n\
+ ",
+ )
+ .expect("write 3");
+ });
+
+ let tcp = tcp_connect(&addr).await.unwrap();
+
+ let (mut client, conn) = conn::http1::Builder::new()
+ .http09_responses(true)
+ .handshake(tcp)
+ .await
+ .unwrap();
+
+ tokio::spawn(async move {
+ let _ = conn.await;
+ });
+
+ let req = Request::builder()
+ .uri("/a")
+ .body(Empty::<Bytes>::new())
+ .unwrap();
+ let _res = client.send_request(req).await.expect("send_request");
+ }
+
#[tokio::test]
async fn test_try_send_request() {
use std::future::Future;
|
hyperium/hyper
|
a3bda62da36060a38638fba983a0c07c0ab6259d
|
[
"3790"
] |
1.5
|
eaf2267cdc148604469fb09da22646f31710107a
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -279,7 +279,7 @@ jobs:
- name: Install Rust
uses: dtolnay/rust-toolchain@master
with:
- toolchain: nightly-2024-05-01 # Compatible version for cargo-check-external-types
+ toolchain: nightly-2024-05-01 # Compatible version for cargo-check-external-types
- name: Install cargo-check-external-types
uses: taiki-e/cache-cargo-install-action@v2
diff --git a/examples/hello-http2.rs b/examples/hello-http2.rs
--- a/examples/hello-http2.rs
+++ b/examples/hello-http2.rs
@@ -1,13 +1,14 @@
#![deny(warnings)]
-
-use std::convert::Infallible;
-use std::net::SocketAddr;
+#![allow(unused_imports)]
use http_body_util::Full;
use hyper::body::Bytes;
+#[cfg(feature = "server")]
use hyper::server::conn::http2;
use hyper::service::service_fn;
use hyper::{Request, Response};
+use std::convert::Infallible;
+use std::net::SocketAddr;
use tokio::net::TcpListener;
// This would normally come from the `hyper-util` crate, but we can't depend
diff --git a/examples/hello-http2.rs b/examples/hello-http2.rs
--- a/examples/hello-http2.rs
+++ b/examples/hello-http2.rs
@@ -18,6 +19,7 @@ use support::TokioIo;
// An async function that consumes a request, does nothing with it and returns a
// response.
+#[cfg(feature = "server")]
async fn hello(_: Request<hyper::body::Incoming>) -> Result<Response<Full<Bytes>>, Infallible> {
Ok(Response::new(Full::new(Bytes::from("Hello, World!"))))
}
diff --git a/examples/hello-http2.rs b/examples/hello-http2.rs
--- a/examples/hello-http2.rs
+++ b/examples/hello-http2.rs
@@ -40,6 +42,7 @@ where
}
}
+#[cfg(feature = "server")]
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
pretty_env_logger::init();
diff --git a/examples/hello-http2.rs b/examples/hello-http2.rs
--- a/examples/hello-http2.rs
+++ b/examples/hello-http2.rs
@@ -79,3 +82,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
});
}
}
+
+#[cfg(not(feature = "server"))]
+fn main() {
+ panic!("This example requires the 'server' feature to be enabled");
+}
|
2024-11-28T16:18:04Z
| 3,799
|
Make client tests work without server feature
Running `cargo test --features http1,http2,client` fails to compile because it uses some types only available when the `server` feature is also enabled. Fixing this would just require adding or adjusting `#[cfg(feature = ...)]` attributes, paying attention to the compiler error messages.
For CI, the `ffi` job can be adjusted to no longer enable `server` when running the tests.
|
hyperium__hyper-3799
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -195,7 +195,7 @@ jobs:
- name: Run FFI unit tests
env:
RUSTFLAGS: --cfg hyper_unstable_ffi
- run: cargo test --features server,client,http1,http2,ffi --lib
+ run: cargo test --features client,http1,http2,ffi --lib
ffi-header:
name: Verify hyper.h is up to date
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -506,6 +506,7 @@ mod tests {
assert!(encoder.end::<()>().unwrap().is_none());
}
+ #[cfg(feature = "server")]
#[test]
fn eof() {
let mut encoder = Encoder::close_delimited();
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1646,6 +1646,7 @@ mod tests {
use super::*;
+ #[cfg(feature = "server")]
#[test]
fn test_parse_request() {
let _ = pretty_env_logger::try_init();
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1701,6 +1702,7 @@ mod tests {
assert_eq!(msg.head.headers["Content-Length"], "0");
}
+ #[cfg(feature = "server")]
#[test]
fn test_parse_request_errors() {
let mut raw = BytesMut::from("GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1814,6 +1816,7 @@ mod tests {
Client::parse(&mut raw, ctx).unwrap_err();
}
+ #[cfg(feature = "server")]
#[test]
fn test_parse_preserve_header_case_in_request() {
let mut raw =
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1852,6 +1855,7 @@ mod tests {
);
}
+ #[cfg(feature = "server")]
#[test]
fn test_decoder_request() {
fn parse(s: &str) -> ParsedMessage<RequestLine> {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2462,9 +2466,11 @@ mod tests {
Encode {
head: &mut head,
body: Some(BodyLength::Known(10)),
+ #[cfg(feature = "server")]
keep_alive: true,
req_method: &mut None,
title_case_headers: true,
+ #[cfg(feature = "server")]
date_header: true,
},
&mut vec,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2494,9 +2500,11 @@ mod tests {
Encode {
head: &mut head,
body: Some(BodyLength::Known(10)),
+ #[cfg(feature = "server")]
keep_alive: true,
req_method: &mut None,
title_case_headers: false,
+ #[cfg(feature = "server")]
date_header: true,
},
&mut vec,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2529,9 +2537,11 @@ mod tests {
Encode {
head: &mut head,
body: Some(BodyLength::Known(10)),
+ #[cfg(feature = "server")]
keep_alive: true,
req_method: &mut None,
title_case_headers: true,
+ #[cfg(feature = "server")]
date_header: true,
},
&mut vec,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2545,6 +2555,7 @@ mod tests {
);
}
+ #[cfg(feature = "server")]
#[test]
fn test_server_encode_connect_method() {
let mut head = MessageHead::default();
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2566,6 +2577,7 @@ mod tests {
assert!(encoder.is_last());
}
+ #[cfg(feature = "server")]
#[test]
fn test_server_response_encode_title_case() {
use crate::proto::BodyLength;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2599,6 +2611,7 @@ mod tests {
assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
}
+ #[cfg(feature = "server")]
#[test]
fn test_server_response_encode_orig_case() {
use crate::proto::BodyLength;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2634,6 +2647,7 @@ mod tests {
assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
}
+ #[cfg(feature = "server")]
#[test]
fn test_server_response_encode_orig_and_title_case() {
use crate::proto::BodyLength;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2670,6 +2684,7 @@ mod tests {
assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
}
+ #[cfg(feature = "server")]
#[test]
fn test_disabled_date_header() {
use crate::proto::BodyLength;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2729,6 +2744,7 @@ mod tests {
assert_eq!(parsed.head.headers["server"], "hello\tworld");
}
+ #[cfg(feature = "server")]
#[test]
fn parse_too_large_headers() {
fn gen_req_with_headers(num: usize) -> String {
|
hyperium/hyper
|
a3bda62da36060a38638fba983a0c07c0ab6259d
|
|
[
"3786"
] |
1.5
|
3b7375a16f23fb9a0975304ba6616af9323f3f13
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -23,6 +23,7 @@ jobs:
- features
- ffi
- ffi-header
+ - ffi-cargo-c
- doc
- check-external-types
- udeps
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -85,6 +85,7 @@ server = ["dep:httpdate", "dep:pin-project-lite", "dep:smallvec"]
# C-API support (currently unstable (no semver))
ffi = ["dep:http-body-util", "futures-util?/alloc"]
+capi = []
# Utilize tracing (currently unstable)
tracing = ["dep:tracing"]
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -106,6 +107,13 @@ rustdoc-args = ["--cfg", "hyper_unstable_ffi", "--cfg", "hyper_unstable_tracing"
[package.metadata.playground]
features = ["full"]
+[package.metadata.capi.header]
+generation = false
+subdirectory = false
+
+[package.metadata.capi.install.include]
+asset = [{ from="capi/include/hyper.h" }]
+
[profile.release]
codegen-units = 1
incremental = false
diff --git a/capi/README.md b/capi/README.md
--- a/capi/README.md
+++ b/capi/README.md
@@ -15,3 +15,11 @@ The C API is part of the Rust library, but isn't compiled by default. Using `car
```
RUSTFLAGS="--cfg hyper_unstable_ffi" cargo rustc --features client,http1,http2,ffi --crate-type cdylib
```
+
+### (Optional) With `cargo-c`
+
+If using `cargo-c`, you can build and install a shared library with the following command:
+
+```
+RUSTFLAGS="--cfg hyper_unstable_ffi" cargo cbuild --features client,http1,http2,ffi --release
+```
|
For what it's worth I've been happy with cargo-c and the upstream maintainers have been very helpful. In rustls-ffi I'm moving towards replacing the old `Makefile` approach (as linked to from the current curl docs) and instead recommending cargo-c as the blessed path for building the library (static or dynamic).
I've installed `cargo-c` locally and tried it out, seems like the only thing that is missing is adding a `capi` feature, and perhaps some options to use the pre-existing header file, and... that's it?
|
2024-11-19T21:40:38Z
| 3,787
|
cargo-c integration and dynamic linking
**Is your feature request related to a problem? Please describe.**
I'm trying to get the FFI interface of hyper up to speed with rustls-ffi, for integration in an operating system.
Compare the current [curl hyper documentation](https://github.com/curl/curl/blob/cb2ae6e8a8614a34bbe7f77f0540cd27aa890b59/docs/internals/HYPER.md) (which wants you to manually build and link cdylib objects) to the current [curl rustls documentation](https://github.com/curl/curl/blob/cb2ae6e8a8614a34bbe7f77f0540cd27aa890b59/docs/RUSTLS.md) (~~that generates and installs a .so file, headers and and pkg-config integration for you~~ @cpu has pointed out this document is not using cargo-c, the rustls-ffi pull request linked below is a better resource to look into).
Most of the work to get rustls to this point was done in this pull request: https://github.com/rustls/rustls-ffi/pull/274
**Describe the solution you'd like**
I'd like hyper to integrate with the pkg-config ecosystem that Linux distributions build upon, preferably through cargo-c.
**Describe alternatives you've considered**
The curl hyper integration is currently [planned for removal](https://github.com/curl/curl/blob/7b12c36ca972d9e9a14088cdd88232385e619d44/docs/DEPRECATE.md#Hyper) in January 2025.
**Additional context**
There may be some additional context in https://github.com/rustls/rustls-ffi/issues/345.
There was development work done on cargo-c itself specifically for rustls-ffi that you may want to consider, in case you have concerns about dynamic linking and ABI stability: https://github.com/lu-zero/cargo-c/issues/345
|
hyperium__hyper-3787
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -226,6 +227,31 @@ jobs:
- name: Ensure that hyper.h is up to date
run: ./capi/gen_header.sh --verify
+ ffi-cargo-c:
+ name: Test cargo-c support (FFI)
+ needs: [style]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@stable
+
+ - uses: Swatinem/rust-cache@v2
+
+ - name: Install cargo-c
+ env:
+ LINK: https://github.com/lu-zero/cargo-c/releases/latest/download
+ CARGO_C_FILE: cargo-c-x86_64-unknown-linux-musl.tar.gz
+ run: |
+ curl -L $LINK/$CARGO_C_FILE | tar xz -C ~/.cargo/bin
+
+ - name: Build with cargo-c
+ env:
+ RUSTFLAGS: --cfg hyper_unstable_ffi
+ run: cargo cbuild --features client,http1,http2,ffi
+
doc:
name: Build docs
needs: [style, test]
|
hyperium/hyper
|
a3bda62da36060a38638fba983a0c07c0ab6259d
|
[
"3720"
] |
1.4
|
4c4de90a7e2aa0629b9c167a482399e28ccb0975
|
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -72,7 +72,7 @@ macro_rules! bench_server {
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
.unwrap();
let mut buf = Vec::new();
- tcp.read_to_end(&mut buf).unwrap()
+ tcp.read_to_end(&mut buf).unwrap() - "connection: close\r\n".len()
};
let mut tcp = TcpStream::connect(addr).unwrap();
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -21,7 +21,7 @@ use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext
use crate::body::DecodedLength;
#[cfg(feature = "server")]
use crate::common::time::Time;
-use crate::headers::connection_keep_alive;
+use crate::headers;
use crate::proto::{BodyLength, MessageHead};
#[cfg(feature = "server")]
use crate::rt::Sleep;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -657,7 +657,7 @@ where
let outgoing_is_keep_alive = head
.headers
.get(CONNECTION)
- .map_or(false, connection_keep_alive);
+ .map_or(false, headers::connection_keep_alive);
if !outgoing_is_keep_alive {
match head.version {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -680,12 +680,21 @@ where
// If we know the remote speaks an older version, we try to fix up any messages
// to work with our older peer.
fn enforce_version(&mut self, head: &mut MessageHead<T::Outgoing>) {
- if let Version::HTTP_10 = self.state.version {
- // Fixes response or connection when keep-alive header is not present
- self.fix_keep_alive(head);
- // If the remote only knows HTTP/1.0, we should force ourselves
- // to do only speak HTTP/1.0 as well.
- head.version = Version::HTTP_10;
+ match self.state.version {
+ Version::HTTP_10 => {
+ // Fixes response or connection when keep-alive header is not present
+ self.fix_keep_alive(head);
+ // If the remote only knows HTTP/1.0, we should force ourselves
+ // to do only speak HTTP/1.0 as well.
+ head.version = Version::HTTP_10;
+ }
+ Version::HTTP_11 => {
+ if let KA::Disabled = self.state.keep_alive.status() {
+ head.headers
+ .insert(CONNECTION, HeaderValue::from_static("close"));
+ }
+ }
+ _ => (),
}
// If the remote speaks HTTP/1.1, then it *should* be fine with
// both HTTP/1.0 and HTTP/1.1 from us. So again, we just let
|
Thanks for the report! I could have sworn we did this, but then tweaked the test and you're right, not done. PR is up at #3725.
Thank you @seanmonstar!
Reopened this until the PR #3725 gets merged.
|
2024-08-02T20:50:20Z
| 3,725
|
No `Connection: close` on HTTP1 Connection Drain
When HTTP1 connection draining is activated, the Connection: close header is not attached to responses sent to active connections. This prevents active clients from realizing that the server is requesting that the connection be closed and prevents graceful draining of HTTP1 connections.
|
hyperium__hyper-3725
|
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -76,7 +76,7 @@ fn hello_world_16(b: &mut test::Bencher) {
tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
.unwrap();
let mut buf = Vec::new();
- tcp.read_to_end(&mut buf).unwrap()
+ tcp.read_to_end(&mut buf).unwrap() - "connection: close\r\n".len()
} * PIPELINED_REQUESTS;
let mut tcp = TcpStream::connect(addr).unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1140,6 +1140,8 @@ fn pipeline_enabled() {
assert_eq!(s(lines.next().unwrap()), "HTTP/1.1 200 OK\r");
assert_eq!(s(lines.next().unwrap()), "content-length: 12\r");
+ // close because the last request said to close
+ assert_eq!(s(lines.next().unwrap()), "connection: close\r");
lines.next().unwrap(); // Date
assert_eq!(s(lines.next().unwrap()), "\r");
assert_eq!(s(lines.next().unwrap()), "Hello World");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1181,7 +1183,7 @@ fn http_11_uri_too_long() {
let mut req = connect(server.addr());
req.write_all(request_line.as_bytes()).unwrap();
- let expected = "HTTP/1.1 414 URI Too Long\r\ncontent-length: 0\r\n";
+ let expected = "HTTP/1.1 414 URI Too Long\r\nconnection: close\r\ncontent-length: 0\r\n";
let mut buf = [0; 256];
let n = req.read(&mut buf).unwrap();
assert!(n >= expected.len(), "read: {:?} >= {:?}", n, expected.len());
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1208,6 +1210,12 @@ async fn disable_keep_alive_mid_request() {
"should receive OK response, but buf: {:?}",
buf,
);
+ let sbuf = s(&buf);
+ assert!(
+ sbuf.contains("connection: close\r\n"),
+ "response should have sent close: {:?}",
+ sbuf,
+ );
});
let (socket, _) = listener.accept().await.unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2366,7 +2374,7 @@ fn streaming_body() {
buf.starts_with(b"HTTP/1.1 200 OK\r\n"),
"response is 200 OK"
);
- assert_eq!(buf.len(), 100_789, "full streamed body read");
+ assert_eq!(buf.len(), 100_808, "full streamed body read");
}
#[test]
|
hyperium/hyper
|
4c4de90a7e2aa0629b9c167a482399e28ccb0975
|
[
"3673"
] |
1.3
|
aa7ff605da3b706e855f9633b8dddeb9463217d4
|
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -12,7 +12,7 @@ use futures_util::ready;
use http::{Request, Response};
use httparse::ParserConfig;
-use super::super::dispatch;
+use super::super::dispatch::{self, TrySendError};
use crate::body::{Body, Incoming as IncomingBody};
use crate::proto;
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -200,33 +200,38 @@ where
}
}
- /*
- pub(super) fn send_request_retryable(
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// # Error
+ ///
+ /// If there was an error before trying to serialize the request to the
+ /// connection, the message will be returned as part of this error.
+ pub fn try_send_request(
&mut self,
req: Request<B>,
- ) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>> + Unpin
- where
- B: Send,
- {
- match self.dispatch.try_send(req) {
- Ok(rx) => {
- Either::Left(rx.then(move |res| {
- match res {
- Ok(Ok(res)) => future::ok(res),
- Ok(Err(err)) => future::err(err),
- // this is definite bug if it happens, but it shouldn't happen!
- Err(_) => panic!("dispatch dropped without returning error"),
- }
- }))
- }
- Err(req) => {
- debug!("connection was not ready");
- let err = crate::Error::new_canceled().with("connection was not ready");
- Either::Right(future::err((err, Some(req))))
+ ) -> impl Future<Output = Result<Response<IncomingBody>, TrySendError<Request<B>>>> {
+ let sent = self.dispatch.try_send(req);
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(res)) => Ok(res),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_) => panic!("dispatch dropped without returning error"),
+ },
+ Err(req) => {
+ debug!("connection was not ready");
+ let error = crate::Error::new_canceled().with("connection was not ready");
+ Err(TrySendError {
+ error,
+ message: Some(req),
+ })
+ }
}
}
}
- */
}
impl<B> fmt::Debug for SendRequest<B> {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -13,7 +13,7 @@ use crate::rt::{Read, Write};
use futures_util::ready;
use http::{Request, Response};
-use super::super::dispatch;
+use super::super::dispatch::{self, TrySendError};
use crate::body::{Body, Incoming as IncomingBody};
use crate::common::time::Time;
use crate::proto;
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -152,33 +152,38 @@ where
}
}
- /*
- pub(super) fn send_request_retryable(
+ /// Sends a `Request` on the associated connection.
+ ///
+ /// Returns a future that if successful, yields the `Response`.
+ ///
+ /// # Error
+ ///
+ /// If there was an error before trying to serialize the request to the
+ /// connection, the message will be returned as part of this error.
+ pub fn try_send_request(
&mut self,
req: Request<B>,
- ) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>> + Unpin
- where
- B: Send,
- {
- match self.dispatch.try_send(req) {
- Ok(rx) => {
- Either::Left(rx.then(move |res| {
- match res {
- Ok(Ok(res)) => future::ok(res),
- Ok(Err(err)) => future::err(err),
- // this is definite bug if it happens, but it shouldn't happen!
- Err(_) => panic!("dispatch dropped without returning error"),
- }
- }))
- }
- Err(req) => {
- debug!("connection was not ready");
- let err = crate::Error::new_canceled().with("connection was not ready");
- Either::Right(future::err((err, Some(req))))
+ ) -> impl Future<Output = Result<Response<IncomingBody>, TrySendError<Request<B>>>> {
+ let sent = self.dispatch.try_send(req);
+ async move {
+ match sent {
+ Ok(rx) => match rx.await {
+ Ok(Ok(res)) => Ok(res),
+ Ok(Err(err)) => Err(err),
+ // this is definite bug if it happens, but it shouldn't happen!
+ Err(_) => panic!("dispatch dropped without returning error"),
+ },
+ Err(req) => {
+ debug!("connection was not ready");
+ let error = crate::Error::new_canceled().with("connection was not ready");
+ Err(TrySendError {
+ error,
+ message: Some(req),
+ })
+ }
}
}
}
- */
}
impl<B> fmt::Debug for SendRequest<B> {
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -18,3 +18,5 @@
pub mod http1;
#[cfg(feature = "http2")]
pub mod http2;
+
+pub use super::dispatch::TrySendError;
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -210,17 +220,17 @@ struct Envelope<T, U>(Option<(T, Callback<T, U>)>);
impl<T, U> Drop for Envelope<T, U> {
fn drop(&mut self) {
if let Some((val, cb)) = self.0.take() {
- cb.send(Err((
- crate::Error::new_canceled().with("connection closed"),
- Some(val),
- )));
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_canceled().with("connection closed"),
+ message: Some(val),
+ }));
}
}
}
pub(crate) enum Callback<T, U> {
#[allow(unused)]
- Retry(Option<oneshot::Sender<Result<U, (crate::Error, Option<T>)>>>),
+ Retry(Option<oneshot::Sender<Result<U, TrySendError<T>>>>),
NoRetry(Option<oneshot::Sender<Result<U, crate::Error>>>),
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -229,7 +239,10 @@ impl<T, U> Drop for Callback<T, U> {
match self {
Callback::Retry(tx) => {
if let Some(tx) = tx.take() {
- let _ = tx.send(Err((dispatch_gone(), None)));
+ let _ = tx.send(Err(TrySendError {
+ error: dispatch_gone(),
+ message: None,
+ }));
}
}
Callback::NoRetry(tx) => {
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -269,18 +282,34 @@ impl<T, U> Callback<T, U> {
}
}
- pub(crate) fn send(mut self, val: Result<U, (crate::Error, Option<T>)>) {
+ pub(crate) fn send(mut self, val: Result<U, TrySendError<T>>) {
match self {
Callback::Retry(ref mut tx) => {
let _ = tx.take().unwrap().send(val);
}
Callback::NoRetry(ref mut tx) => {
- let _ = tx.take().unwrap().send(val.map_err(|e| e.0));
+ let _ = tx.take().unwrap().send(val.map_err(|e| e.error));
}
}
}
}
+impl<T> TrySendError<T> {
+ /// Take the message from this error.
+ ///
+ /// The message will not always have been recovered. If an error occurs
+ /// after the message has been serialized onto the connection, it will not
+ /// be available here.
+ pub fn take_message(&mut self) -> Option<T> {
+ self.message.take()
+ }
+
+ /// Consumes this to return the inner error.
+ pub fn into_error(self) -> crate::Error {
+ self.error
+ }
+}
+
#[cfg(feature = "http2")]
pin_project! {
pub struct SendWhen<B>
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -325,8 +354,8 @@ where
trace!("send_when canceled");
Poll::Ready(())
}
- Poll::Ready(Err(err)) => {
- call_back.send(Err(err));
+ Poll::Ready(Err((error, message))) => {
+ call_back.send(Err(TrySendError { error, message }));
Poll::Ready(())
}
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -13,6 +13,8 @@ use http::Request;
use super::{Http1Transaction, Wants};
use crate::body::{Body, DecodedLength, Incoming as IncomingBody};
+#[cfg(feature = "client")]
+use crate::client::dispatch::TrySendError;
use crate::common::task;
use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead};
use crate::upgrade::OnUpgrade;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -655,7 +657,10 @@ cfg_client! {
}
Err(err) => {
if let Some(cb) = self.callback.take() {
- cb.send(Err((err, None)));
+ cb.send(Err(TrySendError {
+ error: err,
+ message: None,
+ }));
Ok(())
} else if !self.rx_closed {
self.rx.close();
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -663,7 +668,10 @@ cfg_client! {
trace!("canceling queued request with connection error: {}", err);
// in this case, the message was never even started, so it's safe to tell
// the user that the request was completely canceled
- cb.send(Err((crate::Error::new_canceled().with(err), Some(req))));
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_canceled().with(err),
+ message: Some(req),
+ }));
Ok(())
} else {
Err(err)
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -22,7 +22,7 @@ use pin_project_lite::pin_project;
use super::ping::{Ponger, Recorder};
use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
use crate::body::{Body, Incoming as IncomingBody};
-use crate::client::dispatch::{Callback, SendWhen};
+use crate::client::dispatch::{Callback, SendWhen, TrySendError};
use crate::common::io::Compat;
use crate::common::time::Time;
use crate::ext::Protocol;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -662,10 +662,10 @@ where
.map_or(false, |len| len != 0)
{
warn!("h2 connect request with non-zero body not supported");
- cb.send(Err((
- crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
- None,
- )));
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
+ message: None,
+ }));
continue;
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -677,7 +677,10 @@ where
Ok(ok) => ok,
Err(err) => {
debug!("client send request error: {}", err);
- cb.send(Err((crate::Error::new_h2(err), None)));
+ cb.send(Err(TrySendError {
+ error: crate::Error::new_h2(err),
+ message: None,
+ }));
continue;
}
};
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -702,7 +705,10 @@ where
}
Poll::Ready(Ok(())) => (),
Poll::Ready(Err(err)) => {
- f.cb.send(Err((crate::Error::new_h2(err), None)));
+ f.cb.send(Err(TrySendError {
+ error: crate::Error::new_h2(err),
+ message: None,
+ }));
continue;
}
}
|
i found the error msg from the drop func,just as follow
if Envelope still has req, it means req never send ,should it has the chance to retry?
struct Envelope<T, U>(Option<(T, Callback<T, U>)>);
impl<T, U> Drop for Envelope<T, U> {
fn drop(&mut self) {
if let Some((val, cb)) = self.0.take() {
cb.send(Err((
crate::Error::new_canceled().with("connection closed"),
Some(val),
)));
}
}
}
hyper client idle timeout is 20s,and server is tomcat,default keep alive timeout is 60s, it should not cause connection close error
interesting,keep eye on it
```
async fn send_request(
self,
mut req: Request<B>,
pool_key: PoolKey,
) -> Result<Response<hyper::body::Incoming>, Error> {
let mut pooled = self.connection_for(pool_key).await?;
req.extensions_mut()
.get_mut::<CaptureConnectionExtension>()
.map(|conn| conn.set(&pooled.conn_info));
if pooled.is_http1() {
if req.version() == Version::HTTP_2 {
warn!("Connection is HTTP/1, but request requires HTTP/2");
return Err(e!(UserUnsupportedVersion));
}
if self.config.set_host {
let uri = req.uri().clone();
req.headers_mut().entry(HOST).or_insert_with(|| {
let hostname = uri.host().expect("authority implies host");
if let Some(port) = get_non_default_port(&uri) {
let s = format!("{}:{}", hostname, port);
HeaderValue::from_str(&s)
} else {
HeaderValue::from_str(hostname)
}
.expect("uri host is valid header value")
});
}
// CONNECT always sends authority-form, so check it first...
if req.method() == Method::CONNECT {
authority_form(req.uri_mut());
} else if pooled.conn_info.is_proxied {
absolute_form(req.uri_mut());
} else {
origin_form(req.uri_mut());
}
} else if req.method() == Method::CONNECT {
authority_form(req.uri_mut());
}
let fut = pooled.send_request(req);
//.send_request_retryable(req)
//.map_err(ClientError::map_with_reused(pooled.is_reused()));
// If the Connector included 'extra' info, add to Response...
let extra_info = pooled.conn_info.extra.clone();
let fut = fut.map_ok(move |mut res| {
if let Some(extra) = extra_info {
extra.set(res.extensions_mut());
}
res
});
// As of futures@0.1.21, there is a race condition in the mpsc
// channel, such that sending when the receiver is closing can
// result in the message being stuck inside the queue. It won't
// ever notify until the Sender side is dropped.
//
// To counteract this, we must check if our senders 'want' channel
// has been closed after having tried to send. If so, error out...
if pooled.is_closed() {
return fut.await;
}
let res = fut.await?;
// If pooled is HTTP/2, we can toss this reference immediately.
//
// when pooled is dropped, it will try to insert back into the
// pool. To delay that, spawn a future that completes once the
// sender is ready again.
//
// This *should* only be once the related `Connection` has polled
// for a new request to start.
//
// It won't be ready if there is a body to stream.
if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
drop(pooled);
} else if !res.body().is_end_stream() {
//let (delayed_tx, delayed_rx) = oneshot::channel::<()>();
//res.body_mut().delayed_eof(delayed_rx);
let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| {
// At this point, `pooled` is dropped, and had a chance
// to insert into the pool (if conn was idle)
//drop(delayed_tx);
});
self.exec.execute(on_idle);
} else {
// There's no body to delay, but the connection isn't
// ready yet. Only re-insert when it's ready
let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ());
self.exec.execute(on_idle);
}
Ok(res)
}
```
```
// As of futures@0.1.21, there is a race condition in the mpsc
// channel, such that sending when the receiver is closing can
// result in the message being stuck inside the queue. It won't
// ever notify until the Sender side is dropped.
//
// To counteract this, we must check if our senders 'want' channel
// has been closed after having tried to send. If so, error out...
if pooled.is_closed() {
return fut.await;
}
```
then
Is mpsc::UnboundedSender dropped causing this problem?
Any suggestions for modifications?
|
2024-06-21T20:09:56Z
| 3,691
|
hyper client happens cacel error which should retry
**Version**
List the version(s) of `hyper`, and any relevant hyper dependency (such as `h2` if this is related to HTTP/2).
hyper = { version = "1.1.0", features = ["full"] }
hyper-util = { version = "0.1.3", features = ["full"] }
**Platform**
The output of `uname -a` (UNIX), or version and 32 or 64-bit (Windows)
linux 64
**Description**
hyper client happens cacel error which should retry
[short summary of the bug]
```rust
let mut connector = HttpConnector::new();
connector.set_nodelay(true);
//禁止使用该功能,其会在拿到IPV6和IPV4的时候优先尝试链接IPV6
connector.set_happy_eyeballs_timeout(None);
connector.set_connect_timeout(Some(Duration::from_millis(constant::constant_config::CONNECT_TIME_OUT)));
// connector.set_send_buffer_size(Some(constant::constant_config::BUFFER_SIZE));
// connector.set_recv_buffer_size(Some(constant::constant_config::BUFFER_SIZE));
connector.set_reuse_address(true);
//hyper_util::rt::TokioExecutor::new() 是否会导致上下文切换更多?
let http_client = Client::builder(TokioExecutor::new())
.http1_preserve_header_case(true)
.http1_title_case_headers(false)
.pool_idle_timeout(Duration::from_millis(constant::constant_config::CLIENT_IDLE_TIME_OUT)) //空闲链接
// .pool_max_idle_per_host(constant::constant_config::MAX_IDLE_CONNECTIONS_PER_HOST_HTTP_ALL)
// .executor()
// .http1_max_buf_size(constant::constant_config::BUFFER_SIZE)
.http1_ignore_invalid_headers_in_responses(false)
.http1_allow_spaces_after_header_name_in_responses(false)
.http1_allow_obsolete_multiline_headers_in_responses(false)
.timer(TokioTimer::default())
.pool_timer(TokioTimer::default())
.build(connector);
http_client
```
when use http_client send request,i got some error message as follows:
```
request::error("message", "Error { kind: SendRequest, source: Some(hyper::Error(Canceled, \"connection closed\")) }"
equest::error("message", "Error { kind: SendRequest, source: Some(hyper::Error(Canceled, \"connection was not ready\")) }")
```
retry_canceled_requests default is true, i think it should not happen these errors
|
hyperium__hyper-3691
|
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -13,10 +13,21 @@ use tokio::sync::{mpsc, oneshot};
#[cfg(feature = "http2")]
use crate::{body::Incoming, proto::h2::client::ResponseFutMap};
-#[cfg(test)]
-pub(crate) type RetryPromise<T, U> = oneshot::Receiver<Result<U, (crate::Error, Option<T>)>>;
+pub(crate) type RetryPromise<T, U> = oneshot::Receiver<Result<U, TrySendError<T>>>;
pub(crate) type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
+/// An error when calling `try_send_request`.
+///
+/// There is a possibility of an error occuring on a connection in-between the
+/// time that a request is queued and when it is actually written to the IO
+/// transport. If that happens, it is safe to return the request back to the
+/// caller, as it was never fully sent.
+#[derive(Debug)]
+pub struct TrySendError<T> {
+ pub(crate) error: crate::Error,
+ pub(crate) message: Option<T>,
+}
+
pub(crate) fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {
let (tx, rx) = mpsc::unbounded_channel();
let (giver, taker) = want::new();
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -92,7 +103,7 @@ impl<T, U> Sender<T, U> {
}
}
- #[cfg(test)]
+ #[cfg(feature = "http1")]
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
if !self.can_send() {
return Err(val);
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -135,7 +146,6 @@ impl<T, U> UnboundedSender<T, U> {
self.giver.is_canceled()
}
- #[cfg(test)]
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
let (tx, rx) = oneshot::channel();
self.inner
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -389,8 +418,8 @@ mod tests {
let err = fulfilled
.expect("fulfilled")
.expect_err("promise should error");
- match (err.0.kind(), err.1) {
- (&crate::error::Kind::Canceled, Some(_)) => (),
+ match (err.error.is_canceled(), err.message) {
+ (true, Some(_)) => (),
e => panic!("expected Error::Cancel(_), found {:?}", e),
}
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -729,9 +737,9 @@ mod tests {
let err = tokio_test::assert_ready_ok!(Pin::new(&mut res_rx).poll(cx))
.expect_err("callback should send error");
- match (err.0.kind(), err.1) {
- (&crate::error::Kind::Canceled, Some(_)) => (),
- other => panic!("expected Canceled, got {:?}", other),
+ match (err.error.is_canceled(), err.message.as_ref()) {
+ (true, Some(_)) => (),
+ _ => panic!("expected Canceled, got {:?}", err),
}
});
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2041,6 +2041,91 @@ mod conn {
assert_eq!(vec, b"bar=foo");
}
+ #[tokio::test]
+ async fn test_try_send_request() {
+ use std::future::Future;
+ let (listener, addr) = setup_tk_test_server().await;
+ let (done_tx, done_rx) = tokio::sync::oneshot::channel::<()>();
+
+ tokio::spawn(async move {
+ let mut sock = listener.accept().await.unwrap().0;
+ let mut buf = [0u8; 8192];
+ sock.read(&mut buf).await.expect("read 1");
+ sock.write_all(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
+ .await
+ .expect("write 1");
+ let _ = done_rx.await;
+ });
+
+ // make polling fair by putting both in spawns
+ tokio::spawn(async move {
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+ let (mut client, mut conn) = conn::http1::Builder::new()
+ .handshake::<_, Empty<Bytes>>(io)
+ .await
+ .expect("http handshake");
+
+ // get the conn ready
+ assert!(
+ future::poll_fn(|cx| Poll::Ready(Pin::new(&mut conn).poll(cx)))
+ .await
+ .is_pending()
+ );
+ assert!(client.is_ready());
+
+ // use the connection once
+ let mut fut1 = std::pin::pin!(client.send_request(http::Request::new(Empty::new())));
+ let _res1 = future::poll_fn(|cx| loop {
+ if let Poll::Ready(res) = fut1.as_mut().poll(cx) {
+ return Poll::Ready(res);
+ }
+ return match Pin::new(&mut conn).poll(cx) {
+ Poll::Ready(_) => panic!("ruh roh"),
+ Poll::Pending => Poll::Pending,
+ };
+ })
+ .await
+ .expect("resp 1");
+
+ assert!(client.is_ready());
+
+ // simulate the server dropping the conn
+ let _ = done_tx.send(());
+ // let the server task die
+ tokio::task::yield_now().await;
+
+ let mut fut2 =
+ std::pin::pin!(client.try_send_request(http::Request::new(Empty::new())));
+ let poll1 = future::poll_fn(|cx| Poll::Ready(fut2.as_mut().poll(cx))).await;
+ assert!(poll1.is_pending(), "not already known to error");
+
+ let mut conn_opt = Some(conn);
+ // wasn't a known error, req is in queue, and now the next poll, the
+ // conn will be noticed as errored
+ let mut err = future::poll_fn(|cx| {
+ loop {
+ if let Poll::Ready(res) = fut2.as_mut().poll(cx) {
+ return Poll::Ready(res);
+ }
+ if let Some(ref mut conn) = conn_opt {
+ match Pin::new(conn).poll(cx) {
+ Poll::Ready(_) => {
+ conn_opt = None;
+ } // ok
+ Poll::Pending => return Poll::Pending,
+ };
+ }
+ }
+ })
+ .await
+ .expect_err("resp 2");
+
+ assert!(err.take_message().is_some(), "request was returned");
+ })
+ .await
+ .unwrap();
+ }
+
#[tokio::test]
async fn http2_detect_conn_eof() {
use futures_util::future;
|
hyperium/hyper
|
aa7ff605da3b706e855f9633b8dddeb9463217d4
|
[
"2703"
] |
1.3
|
721785efad8537513e48d900a85c05ce79483018
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -11,6 +11,7 @@ use bytes::{Buf, Bytes};
use futures_util::ready;
use http::header::{HeaderValue, CONNECTION, TE};
use http::{HeaderMap, Method, Version};
+use http_body::Frame;
use httparse::ParserConfig;
use super::io::Buffered;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -268,10 +269,20 @@ where
self.try_keep_alive(cx);
}
} else if msg.expect_continue && msg.head.version.gt(&Version::HTTP_10) {
- self.state.reading = Reading::Continue(Decoder::new(msg.decode));
+ let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support
+ self.state.reading = Reading::Continue(Decoder::new(
+ msg.decode,
+ self.state.h1_max_headers,
+ h1_max_header_size,
+ ));
wants = wants.add(Wants::EXPECT);
} else {
- self.state.reading = Reading::Body(Decoder::new(msg.decode));
+ let h1_max_header_size = None; // TODO: remove this when we land h1_max_header_size support
+ self.state.reading = Reading::Body(Decoder::new(
+ msg.decode,
+ self.state.h1_max_headers,
+ h1_max_header_size,
+ ));
}
self.state.allow_trailer_fields = msg
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -312,33 +323,41 @@ where
pub(crate) fn poll_read_body(
&mut self,
cx: &mut Context<'_>,
- ) -> Poll<Option<io::Result<Bytes>>> {
+ ) -> Poll<Option<io::Result<Frame<Bytes>>>> {
debug_assert!(self.can_read_body());
let (reading, ret) = match self.state.reading {
Reading::Body(ref mut decoder) => {
match ready!(decoder.decode(cx, &mut self.io)) {
- Ok(slice) => {
- let (reading, chunk) = if decoder.is_eof() {
- debug!("incoming body completed");
- (
- Reading::KeepAlive,
- if !slice.is_empty() {
- Some(Ok(slice))
- } else {
- None
- },
- )
- } else if slice.is_empty() {
- error!("incoming body unexpectedly ended");
- // This should be unreachable, since all 3 decoders
- // either set eof=true or return an Err when reading
- // an empty slice...
- (Reading::Closed, None)
+ Ok(frame) => {
+ if frame.is_data() {
+ let slice = frame.data_ref().unwrap_or_else(|| unreachable!());
+ let (reading, maybe_frame) = if decoder.is_eof() {
+ debug!("incoming body completed");
+ (
+ Reading::KeepAlive,
+ if !slice.is_empty() {
+ Some(Ok(frame))
+ } else {
+ None
+ },
+ )
+ } else if slice.is_empty() {
+ error!("incoming body unexpectedly ended");
+ // This should be unreachable, since all 3 decoders
+ // either set eof=true or return an Err when reading
+ // an empty slice...
+ (Reading::Closed, None)
+ } else {
+ return Poll::Ready(Some(Ok(frame)));
+ };
+ (reading, Poll::Ready(maybe_frame))
+ } else if frame.is_trailers() {
+ (Reading::Closed, Poll::Ready(Some(Ok(frame))))
} else {
- return Poll::Ready(Some(Ok(slice)));
- };
- (reading, Poll::Ready(chunk))
+ trace!("discarding unknown frame");
+ (Reading::Closed, Poll::Ready(None))
+ }
}
Err(e) => {
debug!("incoming body decode error: {}", e);
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -4,10 +4,13 @@ use std::io;
use std::task::{Context, Poll};
use std::usize;
-use bytes::Bytes;
+use bytes::{BufMut, Bytes, BytesMut};
use futures_util::ready;
+use http::{HeaderMap, HeaderName, HeaderValue};
+use http_body::Frame;
use super::io::MemRead;
+use super::role::DEFAULT_MAX_HEADERS;
use super::DecodedLength;
use self::Kind::{Chunked, Eof, Length};
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -17,6 +20,11 @@ use self::Kind::{Chunked, Eof, Length};
/// This limit is currentlty applied for the entire body, not per chunk.
const CHUNKED_EXTENSIONS_LIMIT: u64 = 1024 * 16;
+/// Maximum number of bytes allowed for all trailer fields.
+///
+/// TODO: remove this when we land h1_max_header_size support
+const TRAILER_LIMIT: usize = 1024 * 16;
+
/// Decoders to handle different Transfer-Encodings.
///
/// If a message body does not include a Transfer-Encoding, it *should*
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -26,7 +34,7 @@ pub(crate) struct Decoder {
kind: Kind,
}
-#[derive(Debug, Clone, Copy, PartialEq)]
+#[derive(Debug, Clone, PartialEq)]
enum Kind {
/// A Reader used when a Content-Length header is passed with a positive integer.
Length(u64),
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -35,6 +43,10 @@ enum Kind {
state: ChunkedState,
chunk_len: u64,
extensions_cnt: u64,
+ trailers_buf: Option<BytesMut>,
+ trailers_cnt: usize,
+ h1_max_headers: Option<usize>,
+ h1_max_header_size: Option<usize>,
},
/// A Reader used for responses that don't indicate a length or chunked.
///
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -81,12 +93,19 @@ impl Decoder {
}
}
- pub(crate) fn chunked() -> Decoder {
+ pub(crate) fn chunked(
+ h1_max_headers: Option<usize>,
+ h1_max_header_size: Option<usize>,
+ ) -> Decoder {
Decoder {
kind: Kind::Chunked {
state: ChunkedState::new(),
chunk_len: 0,
extensions_cnt: 0,
+ trailers_buf: None,
+ trailers_cnt: 0,
+ h1_max_headers,
+ h1_max_header_size,
},
}
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -97,9 +116,13 @@ impl Decoder {
}
}
- pub(super) fn new(len: DecodedLength) -> Self {
+ pub(super) fn new(
+ len: DecodedLength,
+ h1_max_headers: Option<usize>,
+ h1_max_header_size: Option<usize>,
+ ) -> Self {
match len {
- DecodedLength::CHUNKED => Decoder::chunked(),
+ DecodedLength::CHUNKED => Decoder::chunked(h1_max_headers, h1_max_header_size),
DecodedLength::CLOSE_DELIMITED => Decoder::eof(),
length => Decoder::length(length.danger_len()),
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -123,12 +146,12 @@ impl Decoder {
&mut self,
cx: &mut Context<'_>,
body: &mut R,
- ) -> Poll<Result<Bytes, io::Error>> {
+ ) -> Poll<Result<Frame<Bytes>, io::Error>> {
trace!("decode; state={:?}", self.kind);
match self.kind {
Length(ref mut remaining) => {
if *remaining == 0 {
- Poll::Ready(Ok(Bytes::new()))
+ Poll::Ready(Ok(Frame::data(Bytes::new())))
} else {
let to_read = *remaining as usize;
let buf = ready!(body.read_mem(cx, to_read))?;
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -143,37 +166,77 @@ impl Decoder {
} else {
*remaining -= num;
}
- Poll::Ready(Ok(buf))
+ Poll::Ready(Ok(Frame::data(buf)))
}
}
Chunked {
ref mut state,
ref mut chunk_len,
ref mut extensions_cnt,
+ ref mut trailers_buf,
+ ref mut trailers_cnt,
+ ref h1_max_headers,
+ ref h1_max_header_size,
} => {
+ let h1_max_headers = h1_max_headers.unwrap_or(DEFAULT_MAX_HEADERS);
+ let h1_max_header_size = h1_max_header_size.unwrap_or(TRAILER_LIMIT);
loop {
let mut buf = None;
// advances the chunked state
- *state = ready!(state.step(cx, body, chunk_len, extensions_cnt, &mut buf))?;
+ *state = ready!(state.step(
+ cx,
+ body,
+ chunk_len,
+ extensions_cnt,
+ &mut buf,
+ trailers_buf,
+ trailers_cnt,
+ h1_max_headers,
+ h1_max_header_size
+ ))?;
if *state == ChunkedState::End {
trace!("end of chunked");
- return Poll::Ready(Ok(Bytes::new()));
+
+ if trailers_buf.is_some() {
+ trace!("found possible trailers");
+
+ // decoder enforces that trailers count will not exceed h1_max_headers
+ if *trailers_cnt >= h1_max_headers {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "chunk trailers count overflow",
+ )));
+ }
+ match decode_trailers(
+ &mut trailers_buf.take().expect("Trailer is None"),
+ *trailers_cnt,
+ ) {
+ Ok(headers) => {
+ return Poll::Ready(Ok(Frame::trailers(headers)));
+ }
+ Err(e) => {
+ return Poll::Ready(Err(e));
+ }
+ }
+ }
+
+ return Poll::Ready(Ok(Frame::data(Bytes::new())));
}
if let Some(buf) = buf {
- return Poll::Ready(Ok(buf));
+ return Poll::Ready(Ok(Frame::data(buf)));
}
}
}
Eof(ref mut is_eof) => {
if *is_eof {
- Poll::Ready(Ok(Bytes::new()))
+ Poll::Ready(Ok(Frame::data(Bytes::new())))
} else {
// 8192 chosen because its about 2 packets, there probably
// won't be that much available, so don't have MemReaders
// allocate buffers to big
body.read_mem(cx, 8192).map_ok(|slice| {
*is_eof = slice.is_empty();
- slice
+ Frame::data(slice)
})
}
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -216,6 +279,19 @@ macro_rules! or_overflow {
)
}
+macro_rules! put_u8 {
+ ($trailers_buf:expr, $byte:expr, $limit:expr) => {
+ $trailers_buf.put_u8($byte);
+
+ if $trailers_buf.len() >= $limit {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "chunk trailers bytes over limit",
+ )));
+ }
+ };
+}
+
impl ChunkedState {
fn new() -> ChunkedState {
ChunkedState::Start
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -227,6 +303,10 @@ impl ChunkedState {
size: &mut u64,
extensions_cnt: &mut u64,
buf: &mut Option<Bytes>,
+ trailers_buf: &mut Option<BytesMut>,
+ trailers_cnt: &mut usize,
+ h1_max_headers: usize,
+ h1_max_header_size: usize,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -238,10 +318,17 @@ impl ChunkedState {
Body => ChunkedState::read_body(cx, body, size, buf),
BodyCr => ChunkedState::read_body_cr(cx, body),
BodyLf => ChunkedState::read_body_lf(cx, body),
- Trailer => ChunkedState::read_trailer(cx, body),
- TrailerLf => ChunkedState::read_trailer_lf(cx, body),
- EndCr => ChunkedState::read_end_cr(cx, body),
- EndLf => ChunkedState::read_end_lf(cx, body),
+ Trailer => ChunkedState::read_trailer(cx, body, trailers_buf, h1_max_header_size),
+ TrailerLf => ChunkedState::read_trailer_lf(
+ cx,
+ body,
+ trailers_buf,
+ trailers_cnt,
+ h1_max_headers,
+ h1_max_header_size,
+ ),
+ EndCr => ChunkedState::read_end_cr(cx, body, trailers_buf, h1_max_header_size),
+ EndLf => ChunkedState::read_end_lf(cx, body, trailers_buf, h1_max_header_size),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -442,19 +529,51 @@ impl ChunkedState {
fn read_trailer<R: MemRead>(
cx: &mut Context<'_>,
rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ h1_max_header_size: usize,
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("read_trailer");
- match byte!(rdr, cx) {
+ let byte = byte!(rdr, cx);
+
+ put_u8!(
+ trailers_buf.as_mut().expect("trailers_buf is None"),
+ byte,
+ h1_max_header_size
+ );
+
+ match byte {
b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)),
_ => Poll::Ready(Ok(ChunkedState::Trailer)),
}
}
+
fn read_trailer_lf<R: MemRead>(
cx: &mut Context<'_>,
rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ trailers_cnt: &mut usize,
+ h1_max_headers: usize,
+ h1_max_header_size: usize,
) -> Poll<Result<ChunkedState, io::Error>> {
- match byte!(rdr, cx) {
- b'\n' => Poll::Ready(Ok(ChunkedState::EndCr)),
+ let byte = byte!(rdr, cx);
+ match byte {
+ b'\n' => {
+ if *trailers_cnt >= h1_max_headers {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "chunk trailers count overflow",
+ )));
+ }
+ *trailers_cnt += 1;
+
+ put_u8!(
+ trailers_buf.as_mut().expect("trailers_buf is None"),
+ byte,
+ h1_max_header_size
+ );
+
+ Poll::Ready(Ok(ChunkedState::EndCr))
+ }
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid trailer end LF",
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -465,18 +584,48 @@ impl ChunkedState {
fn read_end_cr<R: MemRead>(
cx: &mut Context<'_>,
rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ h1_max_header_size: usize,
) -> Poll<Result<ChunkedState, io::Error>> {
- match byte!(rdr, cx) {
- b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
- _ => Poll::Ready(Ok(ChunkedState::Trailer)),
+ let byte = byte!(rdr, cx);
+ match byte {
+ b'\r' => {
+ if let Some(trailers_buf) = trailers_buf {
+ put_u8!(trailers_buf, byte, h1_max_header_size);
+ }
+ Poll::Ready(Ok(ChunkedState::EndLf))
+ }
+ byte => {
+ match trailers_buf {
+ None => {
+ // 64 will fit a single Expires header without reallocating
+ let mut buf = BytesMut::with_capacity(64);
+ buf.put_u8(byte);
+ *trailers_buf = Some(buf);
+ }
+ Some(ref mut trailers_buf) => {
+ put_u8!(trailers_buf, byte, h1_max_header_size);
+ }
+ }
+
+ Poll::Ready(Ok(ChunkedState::Trailer))
+ }
}
}
fn read_end_lf<R: MemRead>(
cx: &mut Context<'_>,
rdr: &mut R,
+ trailers_buf: &mut Option<BytesMut>,
+ h1_max_header_size: usize,
) -> Poll<Result<ChunkedState, io::Error>> {
- match byte!(rdr, cx) {
- b'\n' => Poll::Ready(Ok(ChunkedState::End)),
+ let byte = byte!(rdr, cx);
+ match byte {
+ b'\n' => {
+ if let Some(trailers_buf) = trailers_buf {
+ put_u8!(trailers_buf, byte, h1_max_header_size);
+ }
+ Poll::Ready(Ok(ChunkedState::End))
+ }
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -485,6 +634,48 @@ impl ChunkedState {
}
}
+// TODO: disallow Transfer-Encoding, Content-Length, Trailer, etc in trailers ??
+fn decode_trailers(buf: &mut BytesMut, count: usize) -> Result<HeaderMap, io::Error> {
+ let mut trailers = HeaderMap::new();
+ let mut headers = vec![httparse::EMPTY_HEADER; count];
+ let res = httparse::parse_headers(&buf, &mut headers);
+ match res {
+ Ok(httparse::Status::Complete((_, headers))) => {
+ for header in headers.iter() {
+ use std::convert::TryFrom;
+ let name = match HeaderName::try_from(header.name) {
+ Ok(name) => name,
+ Err(_) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("Invalid header name: {:?}", &header),
+ ));
+ }
+ };
+
+ let value = match HeaderValue::from_bytes(header.value) {
+ Ok(value) => value,
+ Err(_) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("Invalid header value: {:?}", &header),
+ ));
+ }
+ };
+
+ trailers.insert(name, value);
+ }
+
+ Ok(trailers)
+ }
+ Ok(httparse::Status::Partial) => Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Partial header",
+ )),
+ Err(e) => Err(io::Error::new(io::ErrorKind::InvalidInput, e)),
+ }
+}
+
#[derive(Debug)]
struct IncompleteBody;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -213,17 +213,39 @@ where
}
}
match self.conn.poll_read_body(cx) {
- Poll::Ready(Some(Ok(chunk))) => match body.try_send_data(chunk) {
- Ok(()) => {
- self.body_tx = Some(body);
- }
- Err(_canceled) => {
- if self.conn.can_read_body() {
- trace!("body receiver dropped before eof, closing");
- self.conn.close_read();
+ Poll::Ready(Some(Ok(frame))) => {
+ if frame.is_data() {
+ let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());
+ match body.try_send_data(chunk) {
+ Ok(()) => {
+ self.body_tx = Some(body);
+ }
+ Err(_canceled) => {
+ if self.conn.can_read_body() {
+ trace!("body receiver dropped before eof, closing");
+ self.conn.close_read();
+ }
+ }
+ }
+ } else if frame.is_trailers() {
+ let trailers =
+ frame.into_trailers().unwrap_or_else(|_| unreachable!());
+ match body.try_send_trailers(trailers) {
+ Ok(()) => {
+ self.body_tx = Some(body);
+ }
+ Err(_canceled) => {
+ if self.conn.can_read_body() {
+ trace!("body receiver dropped before eof, closing");
+ self.conn.close_read();
+ }
+ }
}
+ } else {
+ // we should have dropped all unknown frames in poll_read_body
+ error!("unexpected frame");
}
- },
+ }
Poll::Ready(None) => {
// just drop, the body will close automatically
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -30,7 +30,7 @@ use crate::proto::h1::{
use crate::proto::RequestHead;
use crate::proto::{BodyLength, MessageHead, RequestLine};
-const DEFAULT_MAX_HEADERS: usize = 100;
+pub(crate) const DEFAULT_MAX_HEADERS: usize = 100;
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
#[cfg(feature = "server")]
const MAX_URI_LEN: usize = (u16::MAX - 1) as usize;
|
2024-04-17T00:18:10Z
| 3,637
|
Receiving HTTP/1.1 trailers
|
hyperium__hyper-3637
|
diff --git a/src/body/incoming.rs b/src/body/incoming.rs
--- a/src/body/incoming.rs
+++ b/src/body/incoming.rs
@@ -403,6 +403,19 @@ impl Sender {
.map_err(|err| err.into_inner().expect("just sent Ok"))
}
+ #[cfg(feature = "http1")]
+ pub(crate) fn try_send_trailers(
+ &mut self,
+ trailers: HeaderMap,
+ ) -> Result<(), Option<HeaderMap>> {
+ let tx = match self.trailers_tx.take() {
+ Some(tx) => tx,
+ None => return Err(None),
+ };
+
+ tx.send(trailers).map_err(|err| Some(err))
+ }
+
#[cfg(test)]
pub(crate) fn abort(mut self) {
self.send_error(crate::Error::new_body_write_aborted());
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -181,7 +244,7 @@ impl Decoder {
}
#[cfg(test)]
- async fn decode_fut<R: MemRead>(&mut self, body: &mut R) -> Result<Bytes, io::Error> {
+ async fn decode_fut<R: MemRead>(&mut self, body: &mut R) -> Result<Frame<Bytes>, io::Error> {
futures_util::future::poll_fn(move |cx| self.decode(cx, body)).await
}
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -554,9 +745,20 @@ mod tests {
let rdr = &mut s.as_bytes();
let mut size = 0;
let mut ext_cnt = 0;
+ let mut trailers_cnt = 0;
loop {
let result = futures_util::future::poll_fn(|cx| {
- state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None)
+ state.step(
+ cx,
+ rdr,
+ &mut size,
+ &mut ext_cnt,
+ &mut None,
+ &mut None,
+ &mut trailers_cnt,
+ DEFAULT_MAX_HEADERS,
+ TRAILER_LIMIT,
+ )
})
.await;
let desc = format!("read_size failed for {:?}", s);
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -573,9 +775,20 @@ mod tests {
let rdr = &mut s.as_bytes();
let mut size = 0;
let mut ext_cnt = 0;
+ let mut trailers_cnt = 0;
loop {
let result = futures_util::future::poll_fn(|cx| {
- state.step(cx, rdr, &mut size, &mut ext_cnt, &mut None)
+ state.step(
+ cx,
+ rdr,
+ &mut size,
+ &mut ext_cnt,
+ &mut None,
+ &mut None,
+ &mut trailers_cnt,
+ DEFAULT_MAX_HEADERS,
+ TRAILER_LIMIT,
+ )
})
.await;
state = match result {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -639,7 +852,16 @@ mod tests {
async fn test_read_sized_early_eof() {
let mut bytes = &b"foo bar"[..];
let mut decoder = Decoder::length(10);
- assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7);
+ assert_eq!(
+ decoder
+ .decode_fut(&mut bytes)
+ .await
+ .unwrap()
+ .data_ref()
+ .unwrap()
+ .len(),
+ 7
+ );
let e = decoder.decode_fut(&mut bytes).await.unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -651,8 +873,17 @@ mod tests {
9\r\n\
foo bar\
"[..];
- let mut decoder = Decoder::chunked();
- assert_eq!(decoder.decode_fut(&mut bytes).await.unwrap().len(), 7);
+ let mut decoder = Decoder::chunked(None, None);
+ assert_eq!(
+ decoder
+ .decode_fut(&mut bytes)
+ .await
+ .unwrap()
+ .data_ref()
+ .unwrap()
+ .len(),
+ 7
+ );
let e = decoder.decode_fut(&mut bytes).await.unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -661,10 +892,12 @@ mod tests {
#[tokio::test]
async fn test_read_chunked_single_read() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..];
- let buf = Decoder::chunked()
+ let buf = Decoder::chunked(None, None)
.decode_fut(&mut mock_buf)
.await
- .expect("decode");
+ .expect("decode")
+ .into_data()
+ .expect("unknown frame type");
assert_eq!(16, buf.len());
let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
assert_eq!("1234567890abcdef", &result);
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -684,8 +917,13 @@ mod tests {
scratch.extend(b"0\r\n\r\n");
let mut mock_buf = Bytes::from(scratch);
- let mut decoder = Decoder::chunked();
- let buf1 = decoder.decode_fut(&mut mock_buf).await.expect("decode1");
+ let mut decoder = Decoder::chunked(None, None);
+ let buf1 = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode1")
+ .into_data()
+ .expect("unknown frame type");
assert_eq!(&buf1[..], b"A");
let err = decoder
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -700,7 +938,7 @@ mod tests {
#[tokio::test]
async fn test_read_chunked_trailer_with_missing_lf() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..];
- let mut decoder = Decoder::chunked();
+ let mut decoder = Decoder::chunked(None, None);
decoder.decode_fut(&mut mock_buf).await.expect("decode");
let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err();
assert_eq!(e.kind(), io::ErrorKind::InvalidInput);
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -710,20 +948,35 @@ mod tests {
#[tokio::test]
async fn test_read_chunked_after_eof() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
- let mut decoder = Decoder::chunked();
+ let mut decoder = Decoder::chunked(None, None);
// normal read
- let buf = decoder.decode_fut(&mut mock_buf).await.unwrap();
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
assert_eq!(16, buf.len());
let result = String::from_utf8(buf.as_ref().to_vec()).expect("decode String");
assert_eq!("1234567890abcdef", &result);
// eof read
- let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode");
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode")
+ .into_data()
+ .expect("unknown frame type");
assert_eq!(0, buf.len());
// ensure read after eof also returns eof
- let buf = decoder.decode_fut(&mut mock_buf).await.expect("decode");
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect("decode")
+ .into_data()
+ .expect("unknown frame type");
assert_eq!(0, buf.len());
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -751,7 +1004,9 @@ mod tests {
let buf = decoder
.decode_fut(&mut ins)
.await
- .expect("unexpected decode error");
+ .expect("unexpected decode error")
+ .into_data()
+ .expect("unexpected frame type");
if buf.is_empty() {
break; // eof
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -783,7 +1038,7 @@ mod tests {
async fn test_read_chunked_async() {
let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n";
let expected = "foobar";
- all_async_cases(content, expected, Decoder::chunked()).await;
+ all_async_cases(content, expected, Decoder::chunked(None, None)).await;
}
#[cfg(not(miri))]
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -808,10 +1063,15 @@ mod tests {
b.bytes = LEN as u64;
b.iter(|| {
- let mut decoder = Decoder::chunked();
+ let mut decoder = Decoder::chunked(None, None);
rt.block_on(async {
let mut raw = content.clone();
- let chunk = decoder.decode_fut(&mut raw).await.unwrap();
+ let chunk = decoder
+ .decode_fut(&mut raw)
+ .await
+ .unwrap()
+ .into_data()
+ .unwrap();
assert_eq!(chunk.len(), LEN);
});
});
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -830,7 +1090,12 @@ mod tests {
let mut decoder = Decoder::length(LEN as u64);
rt.block_on(async {
let mut raw = content.clone();
- let chunk = decoder.decode_fut(&mut raw).await.unwrap();
+ let chunk = decoder
+ .decode_fut(&mut raw)
+ .await
+ .unwrap()
+ .into_data()
+ .unwrap();
assert_eq!(chunk.len(), LEN);
});
});
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -843,4 +1108,110 @@ mod tests {
.build()
.expect("rt build")
}
+
+ #[test]
+ fn test_decode_trailers() {
+ let mut buf = BytesMut::new();
+ buf.extend_from_slice(
+ b"Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\nX-Stream-Error: failed to decode\r\n\r\n",
+ );
+ let headers = decode_trailers(&mut buf, 2).expect("decode_trailers");
+ assert_eq!(headers.len(), 2);
+ assert_eq!(
+ headers.get("Expires").unwrap(),
+ "Wed, 21 Oct 2015 07:28:00 GMT"
+ );
+ assert_eq!(headers.get("X-Stream-Error").unwrap(), "failed to decode");
+ }
+
+ #[tokio::test]
+ async fn test_trailer_max_headers_enforced() {
+ let h1_max_headers = 10;
+ let mut scratch = vec![];
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
+ for i in 0..h1_max_headers {
+ scratch.extend(format!("trailer{}: {}\r\n", i, i).as_bytes());
+ }
+ scratch.extend(b"\r\n");
+ let mut mock_buf = Bytes::from(scratch);
+
+ let mut decoder = Decoder::chunked(Some(h1_max_headers), None);
+
+ // ready chunked body
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+
+ // eof read
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("trailer fields over limit");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
+ }
+
+ #[tokio::test]
+ async fn test_trailer_max_header_size_huge_trailer() {
+ let max_header_size = 1024;
+ let mut scratch = vec![];
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
+ scratch.extend(format!("huge_trailer: {}\r\n", "x".repeat(max_header_size)).as_bytes());
+ scratch.extend(b"\r\n");
+ let mut mock_buf = Bytes::from(scratch);
+
+ let mut decoder = Decoder::chunked(None, Some(max_header_size));
+
+ // ready chunked body
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+
+ // eof read
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("trailers over limit");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
+ }
+
+ #[tokio::test]
+ async fn test_trailer_max_header_size_many_small_trailers() {
+ let max_headers = 10;
+ let header_size = 64;
+ let mut scratch = vec![];
+ scratch.extend(b"10\r\n1234567890abcdef\r\n0\r\n");
+
+ for i in 0..max_headers {
+ scratch.extend(format!("trailer{}: {}\r\n", i, "x".repeat(header_size)).as_bytes());
+ }
+
+ scratch.extend(b"\r\n");
+ let mut mock_buf = Bytes::from(scratch);
+
+ let mut decoder = Decoder::chunked(None, Some(max_headers * header_size));
+
+ // ready chunked body
+ let buf = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .unwrap()
+ .into_data()
+ .expect("unknown frame type");
+ assert_eq!(16, buf.len());
+
+ // eof read
+ let err = decoder
+ .decode_fut(&mut mock_buf)
+ .await
+ .expect_err("trailers over limit");
+ assert_eq!(err.kind(), io::ErrorKind::InvalidData);
+ }
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -34,6 +34,17 @@ where
b.collect().await.map(|c| c.to_bytes())
}
+async fn concat_with_trailers<B>(b: B) -> Result<(Bytes, Option<HeaderMap>), B::Error>
+where
+ B: hyper::body::Body,
+{
+ let collect = b.collect().await?;
+ let trailers = collect.trailers().cloned();
+ let bytes = collect.to_bytes();
+
+ Ok((bytes, trailers))
+}
+
async fn tcp_connect(addr: &SocketAddr) -> std::io::Result<TokioIo<TcpStream>> {
TcpStream::connect(*addr).await.map(TokioIo::new)
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -122,6 +133,9 @@ macro_rules! test {
status: $client_status:ident,
headers: { $($response_header_name:expr => $response_header_val:expr,)* },
body: $response_body:expr,
+ $(trailers: {$(
+ $response_trailer_name:expr => $response_trailer_val:expr,
+ )*},)?
) => (
#[test]
fn $name() {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -158,12 +172,23 @@ macro_rules! test {
);
)*
- let body = rt.block_on(concat(res))
+ let (body, _trailers) = rt.block_on(concat_with_trailers(res))
.expect("body concat wait");
let expected_res_body = Option::<&[u8]>::from($response_body)
.unwrap_or_default();
assert_eq!(body.as_ref(), expected_res_body);
+
+ $($(
+ assert_eq!(
+ _trailers.as_ref().expect("trailers is None")
+ .get($response_trailer_name)
+ .expect(concat!("trailer header '", stringify!($response_trailer_name), "'")),
+ $response_trailer_val,
+ "trailer '{}'",
+ stringify!($response_trailer_name),
+ );
+ )*)?
}
);
(
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -679,6 +704,94 @@ test! {
body: None,
}
+test! {
+ name: client_res_body_chunked_with_trailer,
+
+ server:
+ expected: "GET / HTTP/1.1\r\nte: trailers\r\nhost: {addr}\r\n\r\n",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ transfer-encoding: chunked\r\n\
+ trailer: chunky-trailer\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ chunky-trailer: header data\r\n\
+ \r\n\
+ ",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ headers: {
+ "te" => "trailers",
+ },
+ },
+ response:
+ status: OK,
+ headers: {
+ "Transfer-Encoding" => "chunked",
+ },
+ body: &b"hello"[..],
+ trailers: {
+ "chunky-trailer" => "header data",
+ },
+}
+
+test! {
+ name: client_res_body_chunked_with_pathological_trailers,
+
+ server:
+ expected: "GET / HTTP/1.1\r\nte: trailers\r\nhost: {addr}\r\n\r\n",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ transfer-encoding: chunked\r\n\
+ trailer: chunky-trailer1, chunky-trailer2, chunky-trailer3, chunky-trailer4, chunky-trailer5\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ chunky-trailer1: header data1\r\n\
+ chunky-trailer2: header data2\r\n\
+ chunky-trailer3: header data3\r\n\
+ chunky-trailer4: header data4\r\n\
+ chunky-trailer5: header data5\r\n\
+ sneaky-trailer: not in trailer header\r\n\
+ transfer-encoding: chunked\r\n\
+ content-length: 5\r\n\
+ trailer: foo\r\n\
+ \r\n\
+ ",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ headers: {
+ "te" => "trailers",
+ },
+ },
+ response:
+ status: OK,
+ headers: {
+ "Transfer-Encoding" => "chunked",
+ },
+ body: &b"hello"[..],
+ trailers: {
+ "chunky-trailer1" => "header data1",
+ "chunky-trailer2" => "header data2",
+ "chunky-trailer3" => "header data3",
+ "chunky-trailer4" => "header data4",
+ "chunky-trailer5" => "header data5",
+ "sneaky-trailer" => "not in trailer header",
+ "transfer-encoding" => "chunked",
+ "content-length" => "5",
+ "trailer" => "foo",
+ },
+}
+
test! {
name: client_get_req_body_sized,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2693,7 +2693,7 @@ async fn http2_keep_alive_count_server_pings() {
}
#[test]
-fn http1_trailer_fields() {
+fn http1_trailer_send_fields() {
let body = futures_util::stream::once(async move { Ok("hello".into()) });
let mut headers = HeaderMap::new();
headers.insert("chunky-trailer", "header data".parse().unwrap());
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2780,6 +2780,35 @@ fn http1_trailer_fields_not_allowed() {
assert_eq!(body, expected_body);
}
+#[test]
+fn http1_trailer_recv_fields() {
+ let server = serve();
+ let mut req = connect(server.addr());
+ req.write_all(
+ b"\
+ POST / HTTP/1.1\r\n\
+ trailer: chunky-trailer\r\n\
+ host: example.domain\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ chunky-trailer: header data\r\n\
+ \r\n\
+ ",
+ )
+ .expect("writing");
+
+ assert_eq!(server.body(), b"hello");
+
+ let trailers = server.trailers();
+ assert_eq!(
+ trailers.get("chunky-trailer"),
+ Some(&"header data".parse().unwrap())
+ );
+}
+
// -------------------------------------------------
// the Server that is used to run all the tests with
// -------------------------------------------------
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2787,6 +2816,7 @@ fn http1_trailer_fields_not_allowed() {
struct Serve {
addr: SocketAddr,
msg_rx: mpsc::Receiver<Msg>,
+ trailers_rx: mpsc::Receiver<HeaderMap>,
reply_tx: Mutex<spmc::Sender<Reply>>,
shutdown_signal: Option<oneshot::Sender<()>>,
thread: Option<thread::JoinHandle<()>>,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2820,6 +2850,10 @@ impl Serve {
Ok(buf)
}
+ fn trailers(&self) -> HeaderMap {
+ self.trailers_rx.recv().expect("trailers")
+ }
+
fn reply(&self) -> ReplyBuilder<'_> {
ReplyBuilder { tx: &self.reply_tx }
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2933,6 +2967,7 @@ impl Drop for Serve {
#[derive(Clone)]
struct TestService {
tx: mpsc::Sender<Msg>,
+ trailers_tx: mpsc::Sender<HeaderMap>,
reply: spmc::Receiver<Reply>,
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2963,6 +2998,7 @@ impl Service<Request<IncomingBody>> for TestService {
fn call(&self, mut req: Request<IncomingBody>) -> Self::Future {
let tx = self.tx.clone();
+ let trailers_tx = self.trailers_tx.clone();
let replies = self.reply.clone();
Box::pin(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2972,6 +3008,9 @@ impl Service<Request<IncomingBody>> for TestService {
if frame.is_data() {
tx.send(Msg::Chunk(frame.into_data().unwrap().to_vec()))
.unwrap();
+ } else if frame.is_trailers() {
+ let trailers = frame.into_trailers().unwrap();
+ trailers_tx.send(trailers).unwrap();
}
}
Err(err) => {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3100,6 +3139,7 @@ impl ServeOptions {
let (addr_tx, addr_rx) = mpsc::channel();
let (msg_tx, msg_rx) = mpsc::channel();
+ let (trailers_tx, trailers_rx) = mpsc::channel();
let (reply_tx, reply_rx) = spmc::channel();
let (shutdown_tx, mut shutdown_rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3123,6 +3163,7 @@ impl ServeOptions {
loop {
let msg_tx = msg_tx.clone();
+ let trailers_tx = trailers_tx.clone();
let reply_rx = reply_rx.clone();
tokio::select! {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3135,6 +3176,7 @@ impl ServeOptions {
let reply_rx = reply_rx.clone();
let service = TestService {
tx: msg_tx,
+ trailers_tx,
reply: reply_rx,
};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3162,6 +3204,7 @@ impl ServeOptions {
Serve {
msg_rx,
+ trailers_rx,
reply_tx: Mutex::new(reply_tx),
addr,
shutdown_signal: Some(shutdown_tx),
|
hyperium/hyper
|
aa7ff605da3b706e855f9633b8dddeb9463217d4
|
|
[
"3615",
"3615"
] |
1.2
|
bc9a86f58f8bd5c35b2bfd7e632ec132280d79ba
|
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -482,7 +482,11 @@ where
/// This `Connection` should continue to be polled until shutdown
/// can finish.
pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
- Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown()
+ // Connection (`inner`) is `None` if it was upgraded (and `poll` is `Ready`).
+ // In that case, we don't need to call `graceful_shutdown`.
+ if let Some(conn) = self.inner.as_mut() {
+ Pin::new(conn).graceful_shutdown()
+ }
}
}
|
2024-03-30T11:42:44Z
| 3,616
|
Panic on graceful shutdown for http/1
**Version**
Encountered with `1.x`
**Platform**
Doesn't matter
**Description**
Attempt to call `graceful_shutdown` for H1 connection which is upgraded (=> `Poll::Ready`) leads to panic:
```
panic was raised: panicked at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/hyper-1.2.0/src/server/conn/http1.rs:483:38:
called `Option::unwrap()` on a `None` value
```
The reason is this line:
```rs
Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown()
```
When connection is Upgraded (& future is `Ready`), it's `None` https://github.com/hyperium/hyper/blob/bc9a86f58f8bd5c35b2bfd7e632ec132280d79ba/src/server/conn/http1.rs#L502-L506
So we should avoid unwrapping
Panic on graceful shutdown for http/1
**Version**
Encountered with `1.x`
**Platform**
Doesn't matter
**Description**
Attempt to call `graceful_shutdown` for H1 connection which is upgraded (=> `Poll::Ready`) leads to panic:
```
panic was raised: panicked at /usr/local/cargo/registry/src/index.crates.io-6f17d22bba15001f/hyper-1.2.0/src/server/conn/http1.rs:483:38:
called `Option::unwrap()` on a `None` value
```
The reason is this line:
```rs
Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown()
```
When connection is Upgraded (& future is `Ready`), it's `None` https://github.com/hyperium/hyper/blob/bc9a86f58f8bd5c35b2bfd7e632ec132280d79ba/src/server/conn/http1.rs#L502-L506
So we should avoid unwrapping
|
hyperium__hyper-3616
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1256,6 +1256,67 @@ async fn disable_keep_alive_post_request() {
child.join().unwrap();
}
+#[tokio::test]
+async fn http1_graceful_shutdown_after_upgrade() {
+ let (listener, addr) = setup_tcp_listener();
+ let (read_101_tx, read_101_rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ let mut tcp = connect(&addr);
+ tcp.write_all(
+ b"\
+ GET / HTTP/1.1\r\n\
+ Upgrade: foobar\r\n\
+ Connection: upgrade\r\n\
+ \r\n\
+ eagerly optimistic\
+ ",
+ )
+ .expect("write 1");
+ let mut buf = [0; 256];
+ tcp.read(&mut buf).expect("read 1");
+
+ let response = s(&buf);
+ assert!(response.starts_with("HTTP/1.1 101 Switching Protocols\r\n"));
+ assert!(!has_header(&response, "content-length"));
+ let _ = read_101_tx.send(());
+ });
+
+ let (upgrades_tx, upgrades_rx) = mpsc::channel();
+ let svc = service_fn(move |req: Request<IncomingBody>| {
+ let on_upgrade = hyper::upgrade::on(req);
+ let _ = upgrades_tx.send(on_upgrade);
+ future::ok::<_, hyper::Error>(
+ Response::builder()
+ .status(101)
+ .header("upgrade", "foobar")
+ .body(Empty::<Bytes>::new())
+ .unwrap(),
+ )
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
+
+ let mut conn = http1::Builder::new()
+ .serve_connection(socket, svc)
+ .with_upgrades();
+ (&mut conn).await.unwrap();
+
+ let on_upgrade = upgrades_rx.recv().unwrap();
+
+ // wait so that we don't write until other side saw 101 response
+ read_101_rx.await.unwrap();
+
+ let upgraded = on_upgrade.await.expect("on_upgrade");
+ let parts = upgraded.downcast::<TokioIo<TkTcpStream>>().unwrap();
+ assert_eq!(parts.read_buf, "eagerly optimistic");
+
+ pin!(conn);
+ // graceful shutdown doesn't cause issues or panic. It should be ignored after upgrade
+ conn.as_mut().graceful_shutdown();
+}
+
#[tokio::test]
async fn empty_parse_eof_does_not_return_error() {
let (listener, addr) = setup_tcp_listener();
|
hyperium/hyper
|
bc9a86f58f8bd5c35b2bfd7e632ec132280d79ba
|
|
[
"3477"
] |
1.1
|
a9fa893f18c6409abae2e1dcbba0f4487df54d4f
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -41,6 +41,8 @@ want = { version = "0.3", optional = true }
[dev-dependencies]
form_urlencoded = "1"
+futures-channel = { version = "0.3", features = ["sink"] }
+futures-util = { version = "0.3", default-features = false, features = ["sink"] }
http-body-util = "0.1"
pretty_env_logger = "0.5"
spmc = "0.3"
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -294,7 +291,7 @@ impl Opts {
.build()
.expect("rt build"),
);
- //let exec = rt.clone();
+ let exec = rt.clone();
let req_len = self.request_body.map(|b| b.len()).unwrap_or(0) as u64;
let req_len = if self.request_chunks > 0 {
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -344,19 +341,21 @@ impl Opts {
let make_request = || {
let chunk_cnt = self.request_chunks;
let body = if chunk_cnt > 0 {
- /*
- let (mut tx, body) = Body::channel();
+ let (mut tx, rx) = futures_channel::mpsc::channel(0);
+
let chunk = self
.request_body
.expect("request_chunks means request_body");
exec.spawn(async move {
+ use futures_util::SinkExt;
+ use hyper::body::Frame;
for _ in 0..chunk_cnt {
- tx.send_data(chunk.into()).await.expect("send_data");
+ tx.send(Ok(Frame::data(bytes::Bytes::from(chunk))))
+ .await
+ .expect("send_data");
}
});
- body
- */
- todo!("request_chunks");
+ http_body_util::StreamBody::new(rx).boxed()
} else if let Some(chunk) = self.request_body {
http_body_util::Full::new(bytes::Bytes::from(chunk)).boxed()
} else {
|
Can I take this one?
|
2024-01-08T16:38:08Z
| 3,517
|
Re-enable end-to-end request chunks benchmarks
The request chunks benchmarks were disabled as part of the upgrade to v1.0. It should now be easier to re-enable them.
- Update the [code](https://github.com/hyperium/hyper/blob/d3cfb9e0b1928701cfdd96c9551c4bd81c24e83a/benches/end_to_end.rs#L347-L358) to use an async `mpsc` channel, and wrap it in a `http_body_util::StreamBody`.
- Remove `#[ignore]` from the benchmarks that were using chunks.
|
hyperium__hyper-3517
|
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -140,7 +140,6 @@ fn http2_parallel_x10_req_10mb(b: &mut test::Bencher) {
}
#[bench]
-#[ignore]
fn http2_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 10];
opts()
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -152,7 +151,6 @@ fn http2_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
}
#[bench]
-#[ignore]
fn http2_parallel_x10_req_10kb_100_chunks_adaptive_window(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 10];
opts()
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -165,7 +163,6 @@ fn http2_parallel_x10_req_10kb_100_chunks_adaptive_window(b: &mut test::Bencher)
}
#[bench]
-#[ignore]
fn http2_parallel_x10_req_10kb_100_chunks_max_window(b: &mut test::Bencher) {
let body = &[b'x'; 1024 * 10];
opts()
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -342,7 +342,7 @@ mod tests {
use super::{channel, Callback, Receiver};
#[derive(Debug)]
- struct Custom(i32);
+ struct Custom(#[allow(dead_code)] i32);
impl<T, U> Future for Receiver<T, U> {
type Output = Option<(T, Callback<T, U>)>;
|
hyperium/hyper
|
a9fa893f18c6409abae2e1dcbba0f4487df54d4f
|
[
"2719"
] |
0.3
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -8,7 +8,7 @@ use std::time::Duration;
use crate::rt::{Read, Write};
use bytes::{Buf, Bytes};
-use http::header::{HeaderValue, CONNECTION};
+use http::header::{HeaderValue, CONNECTION, TE};
use http::{HeaderMap, Method, Version};
use httparse::ParserConfig;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -75,6 +75,7 @@ where
// We assume a modern world where the remote speaks HTTP/1.1.
// If they tell us otherwise, we'll downgrade in `read_head`.
version: Version::HTTP_11,
+ allow_trailer_fields: false,
},
_marker: PhantomData,
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -264,6 +265,13 @@ where
self.state.reading = Reading::Body(Decoder::new(msg.decode));
}
+ self.state.allow_trailer_fields = msg
+ .head
+ .headers
+ .get(TE)
+ .map(|te_header| te_header == "trailers")
+ .unwrap_or(false);
+
Poll::Ready(Some(Ok((msg.head, msg.decode, wants))))
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -640,6 +648,31 @@ where
self.state.writing = state;
}
+ pub(crate) fn write_trailers(&mut self, trailers: HeaderMap) {
+ if T::is_server() && self.state.allow_trailer_fields == false {
+ debug!("trailers not allowed to be sent");
+ return;
+ }
+ debug_assert!(self.can_write_body() && self.can_buffer_body());
+
+ match self.state.writing {
+ Writing::Body(ref encoder) => {
+ if let Some(enc_buf) =
+ encoder.encode_trailers(trailers, self.state.title_case_headers)
+ {
+ self.io.buffer(enc_buf);
+
+ self.state.writing = if encoder.is_last() || encoder.is_close_delimited() {
+ Writing::Closed
+ } else {
+ Writing::KeepAlive
+ };
+ }
+ }
+ _ => unreachable!("write_trailers invalid state: {:?}", self.state.writing),
+ }
+ }
+
pub(crate) fn write_body_and_end(&mut self, chunk: B) {
debug_assert!(self.can_write_body() && self.can_buffer_body());
// empty chunks should be discarded at Dispatcher level
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -842,6 +875,8 @@ struct State {
upgrade: Option<crate::upgrade::Pending>,
/// Either HTTP/1.0 or 1.1 connection
version: Version,
+ /// Flag to track if trailer fields are allowed to be sent
+ allow_trailer_fields: bool,
}
#[derive(Debug)]
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -351,27 +351,33 @@ where
*clear_body = true;
crate::Error::new_user_body(e)
})?;
- let chunk = if let Ok(data) = frame.into_data() {
- data
- } else {
- trace!("discarding non-data frame");
- continue;
- };
- let eos = body.is_end_stream();
- if eos {
- *clear_body = true;
- if chunk.remaining() == 0 {
- trace!("discarding empty chunk");
- self.conn.end_body()?;
+
+ if frame.is_data() {
+ let chunk = frame.into_data().unwrap_or_else(|_| unreachable!());
+ let eos = body.is_end_stream();
+ if eos {
+ *clear_body = true;
+ if chunk.remaining() == 0 {
+ trace!("discarding empty chunk");
+ self.conn.end_body()?;
+ } else {
+ self.conn.write_body_and_end(chunk);
+ }
} else {
- self.conn.write_body_and_end(chunk);
+ if chunk.remaining() == 0 {
+ trace!("discarding empty chunk");
+ continue;
+ }
+ self.conn.write_body(chunk);
}
+ } else if frame.is_trailers() {
+ *clear_body = true;
+ self.conn.write_trailers(
+ frame.into_trailers().unwrap_or_else(|_| unreachable!()),
+ );
} else {
- if chunk.remaining() == 0 {
- trace!("discarding empty chunk");
- continue;
- }
- self.conn.write_body(chunk);
+ trace!("discarding unknown frame");
+ continue;
}
} else {
*clear_body = true;
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -1,10 +1,19 @@
+use std::collections::HashMap;
use std::fmt;
use std::io::IoSlice;
use bytes::buf::{Chain, Take};
-use bytes::Buf;
+use bytes::{Buf, Bytes};
+use http::{
+ header::{
+ AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE,
+ CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING,
+ },
+ HeaderMap, HeaderName, HeaderValue,
+};
use super::io::WriteBuf;
+use super::role::{write_headers, write_headers_title_case};
type StaticBuf = &'static [u8];
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -26,7 +35,7 @@ pub(crate) struct NotEof(u64);
#[derive(Debug, PartialEq, Clone)]
enum Kind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
- Chunked,
+ Chunked(Option<Vec<HeaderValue>>),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -45,6 +54,7 @@ enum BufKind<B> {
Limited(Take<B>),
Chunked(Chain<Chain<ChunkSize, B>, StaticBuf>),
ChunkedEnd(StaticBuf),
+ Trailers(Chain<Chain<StaticBuf, Bytes>, StaticBuf>),
}
impl Encoder {
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -55,7 +65,7 @@ impl Encoder {
}
}
pub(crate) fn chunked() -> Encoder {
- Encoder::new(Kind::Chunked)
+ Encoder::new(Kind::Chunked(None))
}
pub(crate) fn length(len: u64) -> Encoder {
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -67,6 +77,16 @@ impl Encoder {
Encoder::new(Kind::CloseDelimited)
}
+ pub(crate) fn into_chunked_with_trailing_fields(self, trailers: Vec<HeaderValue>) -> Encoder {
+ match self.kind {
+ Kind::Chunked(_) => Encoder {
+ kind: Kind::Chunked(Some(trailers)),
+ is_last: self.is_last,
+ },
+ _ => self,
+ }
+ }
+
pub(crate) fn is_eof(&self) -> bool {
matches!(self.kind, Kind::Length(0))
}
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -89,10 +109,17 @@ impl Encoder {
}
}
+ pub(crate) fn is_chunked(&self) -> bool {
+ match self.kind {
+ Kind::Chunked(_) => true,
+ _ => false,
+ }
+ }
+
pub(crate) fn end<B>(&self) -> Result<Option<EncodedBuf<B>>, NotEof> {
match self.kind {
Kind::Length(0) => Ok(None),
- Kind::Chunked => Ok(Some(EncodedBuf {
+ Kind::Chunked(_) => Ok(Some(EncodedBuf {
kind: BufKind::ChunkedEnd(b"0\r\n\r\n"),
})),
#[cfg(feature = "server")]
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -109,7 +136,7 @@ impl Encoder {
debug_assert!(len > 0, "encode() called with empty buf");
let kind = match self.kind {
- Kind::Chunked => {
+ Kind::Chunked(_) => {
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -136,6 +163,53 @@ impl Encoder {
EncodedBuf { kind }
}
+ pub(crate) fn encode_trailers<B>(
+ &self,
+ trailers: HeaderMap,
+ title_case_headers: bool,
+ ) -> Option<EncodedBuf<B>> {
+ match &self.kind {
+ Kind::Chunked(Some(ref allowed_trailer_fields)) => {
+ let allowed_trailer_field_map = allowed_trailer_field_map(&allowed_trailer_fields);
+
+ let mut cur_name = None;
+ let mut allowed_trailers = HeaderMap::new();
+
+ for (opt_name, value) in trailers {
+ if let Some(n) = opt_name {
+ cur_name = Some(n);
+ }
+ let name = cur_name.as_ref().expect("current header name");
+
+ if allowed_trailer_field_map.contains_key(name.as_str())
+ && valid_trailer_field(name)
+ {
+ allowed_trailers.insert(name, value);
+ }
+ }
+
+ let mut buf = Vec::new();
+ if title_case_headers {
+ write_headers_title_case(&allowed_trailers, &mut buf);
+ } else {
+ write_headers(&allowed_trailers, &mut buf);
+ }
+
+ if buf.is_empty() {
+ return None;
+ }
+
+ Some(EncodedBuf {
+ kind: BufKind::Trailers(b"0\r\n".chain(Bytes::from(buf)).chain(b"\r\n")),
+ })
+ }
+ _ => {
+ debug!("attempted to encode trailers for non-chunked response");
+ None
+ }
+ }
+ }
+
pub(super) fn encode_and_end<B>(&self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>) -> bool
where
B: Buf,
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -144,7 +218,7 @@ impl Encoder {
debug_assert!(len > 0, "encode() called with empty buf");
match self.kind {
- Kind::Chunked => {
+ Kind::Chunked(_) => {
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -181,6 +255,40 @@ impl Encoder {
}
}
+fn valid_trailer_field(name: &HeaderName) -> bool {
+ match name {
+ &AUTHORIZATION => false,
+ &CACHE_CONTROL => false,
+ &CONTENT_ENCODING => false,
+ &CONTENT_LENGTH => false,
+ &CONTENT_RANGE => false,
+ &CONTENT_TYPE => false,
+ &HOST => false,
+ &MAX_FORWARDS => false,
+ &SET_COOKIE => false,
+ &TRAILER => false,
+ &TRANSFER_ENCODING => false,
+ &TE => false,
+ _ => true,
+ }
+}
+
+fn allowed_trailer_field_map(allowed_trailer_fields: &Vec<HeaderValue>) -> HashMap<String, ()> {
+ let mut trailer_map = HashMap::new();
+
+ for header_value in allowed_trailer_fields {
+ if let Ok(header_str) = header_value.to_str() {
+ let items: Vec<&str> = header_str.split(',').map(|item| item.trim()).collect();
+
+ for item in items {
+ trailer_map.entry(item.to_string()).or_insert(());
+ }
+ }
+ }
+
+ trailer_map
+}
+
impl<B> Buf for EncodedBuf<B>
where
B: Buf,
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -192,6 +300,7 @@ where
BufKind::Limited(ref b) => b.remaining(),
BufKind::Chunked(ref b) => b.remaining(),
BufKind::ChunkedEnd(ref b) => b.remaining(),
+ BufKind::Trailers(ref b) => b.remaining(),
}
}
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -202,6 +311,7 @@ where
BufKind::Limited(ref b) => b.chunk(),
BufKind::Chunked(ref b) => b.chunk(),
BufKind::ChunkedEnd(ref b) => b.chunk(),
+ BufKind::Trailers(ref b) => b.chunk(),
}
}
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -212,6 +322,7 @@ where
BufKind::Limited(ref mut b) => b.advance(cnt),
BufKind::Chunked(ref mut b) => b.advance(cnt),
BufKind::ChunkedEnd(ref mut b) => b.advance(cnt),
+ BufKind::Trailers(ref mut b) => b.advance(cnt),
}
}
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -222,6 +333,7 @@ where
BufKind::Limited(ref b) => b.chunks_vectored(dst),
BufKind::Chunked(ref b) => b.chunks_vectored(dst),
BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst),
+ BufKind::Trailers(ref b) => b.chunks_vectored(dst),
}
}
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -629,6 +629,7 @@ impl Server {
};
let mut encoder = Encoder::length(0);
+ let mut allowed_trailer_fields: Option<Vec<HeaderValue>> = None;
let mut wrote_date = false;
let mut cur_name = None;
let mut is_name_written = false;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -815,6 +816,38 @@ impl Server {
header::DATE => {
wrote_date = true;
}
+ header::TRAILER => {
+ // check that we actually can send a chunked body...
+ if msg.head.version == Version::HTTP_10
+ || !Server::can_chunked(msg.req_method, msg.head.subject)
+ {
+ continue;
+ }
+
+ if !is_name_written {
+ is_name_written = true;
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "trailer: ",
+ header::TRAILER,
+ );
+ extend(dst, value.as_bytes());
+ } else {
+ extend(dst, b", ");
+ extend(dst, value.as_bytes());
+ }
+
+ match allowed_trailer_fields {
+ Some(ref mut allowed_trailer_fields) => {
+ allowed_trailer_fields.push(value);
+ }
+ None => {
+ allowed_trailer_fields = Some(vec![value]);
+ }
+ }
+
+ continue 'headers;
+ }
_ => (),
}
//TODO: this should perhaps instead combine them into
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -899,6 +932,12 @@ impl Server {
extend(dst, b"\r\n");
}
+ if encoder.is_chunked() {
+ if let Some(allowed_trailer_fields) = allowed_trailer_fields {
+ encoder = encoder.into_chunked_with_trailing_fields(allowed_trailer_fields);
+ }
+ }
+
Ok(encoder.set_last(is_last))
}
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1306,6 +1345,19 @@ impl Client {
}
};
+ let encoder = encoder.map(|enc| {
+ if enc.is_chunked() {
+ let allowed_trailer_fields: Vec<HeaderValue> =
+ headers.get_all(header::TRAILER).iter().cloned().collect();
+
+ if !allowed_trailer_fields.is_empty() {
+ return enc.into_chunked_with_trailing_fields(allowed_trailer_fields);
+ }
+ }
+
+ enc
+ });
+
// This is because we need a second mutable borrow to remove
// content-length header.
if let Some(encoder) = encoder {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1468,8 +1520,7 @@ fn title_case(dst: &mut Vec<u8>, name: &[u8]) {
}
}
-#[cfg(feature = "client")]
-fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec<u8>) {
+pub(crate) fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec<u8>) {
for (name, value) in headers {
title_case(dst, name.as_str().as_bytes());
extend(dst, b": ");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1478,8 +1529,7 @@ fn write_headers_title_case(headers: &HeaderMap, dst: &mut Vec<u8>) {
}
}
-#[cfg(feature = "client")]
-fn write_headers(headers: &HeaderMap, dst: &mut Vec<u8>) {
+pub(crate) fn write_headers(headers: &HeaderMap, dst: &mut Vec<u8>) {
for (name, value) in headers {
extend(dst, name.as_str().as_bytes());
extend(dst, b": ");
|
@Xuanwo I've updated the issue above to include a list of things I think that needs to be done. If anything is missing, we can add more instructions here!
|
2023-10-26T10:31:05Z
| 3,375
|
Sending HTTP/1.1 trailers
Here's a list of pieces needed to make this work:
- Update [`proto::h1::Dispatcher::poll_write()`](https://github.com/hyperium/hyper/blob/f1b89c117cffebed4b2b8eb2d221fd9b25c1d3d1/src/proto/h1/dispatch.rs#L365) so that when all data items are done (is `None`), check the body for trails `poll_trailers`.
- It might be we want to add some state to the dispatcher like `Wants::TRAILERS`, if we received a request with `TE: trailers`. This could be useful to skip checking for trailers if the request never said it supports them.
- It's likely that new state will need to be added, in case the data is done, but polling the trailers is `Pending`.
- Add `proto::h1::Conn::write_trailers()` [after `write_body()`](https://github.com/hyperium/hyper/blob/f1b89c117cffebed4b2b8eb2d221fd9b25c1d3d1/src/proto/h1/conn.rs#L626). The dispatcher would call this.
- Add [`proto::h1::Encoder::encode_trailers()`](https://github.com/hyperium/hyper/blob/f1b89c117cffebed4b2b8eb2d221fd9b25c1d3d1/src/proto/h1/encode.rs#L51) that flattens the `HeaderMap` into a `Buf`.
- A few unit tests for encoding trailers in the `encode` file.
- A couple tests, at least one in each of `tests/client.rs` and `tests/server.rs` that both sides can send trailers.
|
hyperium__hyper-3375
|
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -327,7 +439,16 @@ impl std::error::Error for NotEof {}
#[cfg(test)]
mod tests {
+ use std::iter::FromIterator;
+
use bytes::BufMut;
+ use http::{
+ header::{
+ AUTHORIZATION, CACHE_CONTROL, CONTENT_ENCODING, CONTENT_LENGTH, CONTENT_RANGE,
+ CONTENT_TYPE, HOST, MAX_FORWARDS, SET_COOKIE, TE, TRAILER, TRANSFER_ENCODING,
+ },
+ HeaderMap, HeaderName, HeaderValue,
+ };
use super::super::io::Cursor;
use super::Encoder;
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -402,4 +523,145 @@ mod tests {
assert!(!encoder.is_eof());
encoder.end::<()>().unwrap();
}
+
+ #[test]
+ fn chunked_with_valid_trailers() {
+ let encoder = Encoder::chunked();
+ let trailers = vec![HeaderValue::from_static("chunky-trailer")];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let headers = HeaderMap::from_iter(
+ vec![
+ (
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ ),
+ (
+ HeaderName::from_static("should-not-be-included"),
+ HeaderValue::from_static("oops"),
+ ),
+ ]
+ .into_iter(),
+ );
+
+ let buf1 = encoder.encode_trailers::<&[u8]>(headers, false).unwrap();
+
+ let mut dst = Vec::new();
+ dst.put(buf1);
+ assert_eq!(dst, b"0\r\nchunky-trailer: header data\r\n\r\n");
+ }
+
+ #[test]
+ fn chunked_with_multiple_trailer_headers() {
+ let encoder = Encoder::chunked();
+ let trailers = vec![
+ HeaderValue::from_static("chunky-trailer"),
+ HeaderValue::from_static("chunky-trailer-2"),
+ ];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let headers = HeaderMap::from_iter(
+ vec![
+ (
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ ),
+ (
+ HeaderName::from_static("chunky-trailer-2"),
+ HeaderValue::from_static("more header data"),
+ ),
+ ]
+ .into_iter(),
+ );
+
+ let buf1 = encoder.encode_trailers::<&[u8]>(headers, false).unwrap();
+
+ let mut dst = Vec::new();
+ dst.put(buf1);
+ assert_eq!(
+ dst,
+ b"0\r\nchunky-trailer: header data\r\nchunky-trailer-2: more header data\r\n\r\n"
+ );
+ }
+
+ #[test]
+ fn chunked_with_no_trailer_header() {
+ let encoder = Encoder::chunked();
+
+ let headers = HeaderMap::from_iter(
+ vec![(
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ )]
+ .into_iter(),
+ );
+
+ assert!(encoder
+ .encode_trailers::<&[u8]>(headers.clone(), false)
+ .is_none());
+
+ let trailers = vec![];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ assert!(encoder.encode_trailers::<&[u8]>(headers, false).is_none());
+ }
+
+ #[test]
+ fn chunked_with_invalid_trailers() {
+ let encoder = Encoder::chunked();
+
+ let trailers = format!(
+ "{},{},{},{},{},{},{},{},{},{},{},{}",
+ AUTHORIZATION,
+ CACHE_CONTROL,
+ CONTENT_ENCODING,
+ CONTENT_LENGTH,
+ CONTENT_RANGE,
+ CONTENT_TYPE,
+ HOST,
+ MAX_FORWARDS,
+ SET_COOKIE,
+ TRAILER,
+ TRANSFER_ENCODING,
+ TE,
+ );
+ let trailers = vec![HeaderValue::from_str(&trailers).unwrap()];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let mut headers = HeaderMap::new();
+ headers.insert(AUTHORIZATION, HeaderValue::from_static("header data"));
+ headers.insert(CACHE_CONTROL, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_ENCODING, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_LENGTH, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_RANGE, HeaderValue::from_static("header data"));
+ headers.insert(CONTENT_TYPE, HeaderValue::from_static("header data"));
+ headers.insert(HOST, HeaderValue::from_static("header data"));
+ headers.insert(MAX_FORWARDS, HeaderValue::from_static("header data"));
+ headers.insert(SET_COOKIE, HeaderValue::from_static("header data"));
+ headers.insert(TRAILER, HeaderValue::from_static("header data"));
+ headers.insert(TRANSFER_ENCODING, HeaderValue::from_static("header data"));
+ headers.insert(TE, HeaderValue::from_static("header data"));
+
+ assert!(encoder.encode_trailers::<&[u8]>(headers, true).is_none());
+ }
+
+ #[test]
+ fn chunked_with_title_case_headers() {
+ let encoder = Encoder::chunked();
+ let trailers = vec![HeaderValue::from_static("chunky-trailer")];
+ let encoder = encoder.into_chunked_with_trailing_fields(trailers);
+
+ let headers = HeaderMap::from_iter(
+ vec![(
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data"),
+ )]
+ .into_iter(),
+ );
+ let buf1 = encoder.encode_trailers::<&[u8]>(headers, true).unwrap();
+
+ let mut dst = Vec::new();
+ dst.put(buf1);
+ assert_eq!(dst, b"0\r\nChunky-Trailer: header data\r\n\r\n");
+ }
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -5,6 +5,7 @@ use std::convert::Infallible;
use std::fmt;
use std::future::Future;
use std::io::{Read, Write};
+use std::iter::FromIterator;
use std::net::{SocketAddr, TcpListener};
use std::pin::Pin;
use std::thread;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -13,7 +14,7 @@ use std::time::Duration;
use http::uri::PathAndQuery;
use http_body_util::{BodyExt, StreamBody};
use hyper::body::Frame;
-use hyper::header::HeaderValue;
+use hyper::header::{HeaderMap, HeaderName, HeaderValue};
use hyper::{Method, Request, StatusCode, Uri, Version};
use bytes::Bytes;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -408,6 +409,15 @@ macro_rules! __client_req_prop {
Frame::data,
)));
}};
+
+ ($req_builder:ident, $body:ident, $addr:ident, body_stream_with_trailers: $body_e:expr) => {{
+ use support::trailers::StreamBodyWithTrailers;
+ let (body, trailers) = $body_e;
+ $body = BodyExt::boxed(StreamBodyWithTrailers::with_trailers(
+ futures_util::TryStreamExt::map_ok(body, Frame::data),
+ trailers,
+ ));
+ }};
}
macro_rules! __client_req_header {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -631,6 +641,44 @@ test! {
body: &b"hello"[..],
}
+test! {
+ name: client_post_req_body_chunked_with_trailer,
+
+ server:
+ expected: "\
+ POST / HTTP/1.1\r\n\
+ trailer: chunky-trailer\r\n\
+ host: {addr}\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ chunky-trailer: header data\r\n\
+ \r\n\
+ ",
+ reply: REPLY_OK,
+
+ client:
+ request: {
+ method: POST,
+ url: "http://{addr}/",
+ headers: {
+ "trailer" => "chunky-trailer",
+ },
+ body_stream_with_trailers: (
+ (futures_util::stream::once(async { Ok::<_, Infallible>(Bytes::from("hello"))})),
+ HeaderMap::from_iter(vec![(
+ HeaderName::from_static("chunky-trailer"),
+ HeaderValue::from_static("header data")
+ )].into_iter())),
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: None,
+}
+
test! {
name: client_get_req_body_sized,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -19,7 +19,7 @@ use futures_channel::oneshot;
use futures_util::future::{self, Either, FutureExt};
use h2::client::SendRequest;
use h2::{RecvStream, SendStream};
-use http::header::{HeaderName, HeaderValue};
+use http::header::{HeaderMap, HeaderName, HeaderValue};
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full, StreamBody};
use hyper::rt::Timer;
use hyper::rt::{Read as AsyncRead, Write as AsyncWrite};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2595,6 +2595,94 @@ async fn http2_keep_alive_count_server_pings() {
.expect("timed out waiting for pings");
}
+#[test]
+fn http1_trailer_fields() {
+ let body = futures_util::stream::once(async move { Ok("hello".into()) });
+ let mut headers = HeaderMap::new();
+ headers.insert("chunky-trailer", "header data".parse().unwrap());
+ // Invalid trailer field that should not be sent
+ headers.insert("Host", "www.example.com".parse().unwrap());
+ // Not specified in Trailer header, so should not be sent
+ headers.insert("foo", "bar".parse().unwrap());
+
+ let server = serve();
+ server
+ .reply()
+ .header("transfer-encoding", "chunked")
+ .header("trailer", "chunky-trailer")
+ .body_stream_with_trailers(body, headers);
+ let mut req = connect(server.addr());
+ req.write_all(
+ b"\
+ GET / HTTP/1.1\r\n\
+ Host: example.domain\r\n\
+ Connection: keep-alive\r\n\
+ TE: trailers\r\n\
+ \r\n\
+ ",
+ )
+ .expect("writing");
+
+ let chunky_trailer_chunk = b"\r\nchunky-trailer: header data\r\n\r\n";
+ let res = read_until(&mut req, |buf| buf.ends_with(chunky_trailer_chunk)).expect("reading");
+ let sres = s(&res);
+
+ let expected_head =
+ "HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\ntrailer: chunky-trailer\r\n";
+ assert_eq!(&sres[..expected_head.len()], expected_head);
+
+ // skip the date header
+ let date_fragment = "GMT\r\n\r\n";
+ let pos = sres.find(date_fragment).expect("find GMT");
+ let body = &sres[pos + date_fragment.len()..];
+
+ let expected_body = "5\r\nhello\r\n0\r\nchunky-trailer: header data\r\n\r\n";
+ assert_eq!(body, expected_body);
+}
+
+#[test]
+fn http1_trailer_fields_not_allowed() {
+ let body = futures_util::stream::once(async move { Ok("hello".into()) });
+ let mut headers = HeaderMap::new();
+ headers.insert("chunky-trailer", "header data".parse().unwrap());
+
+ let server = serve();
+ server
+ .reply()
+ .header("transfer-encoding", "chunked")
+ .header("trailer", "chunky-trailer")
+ .body_stream_with_trailers(body, headers);
+ let mut req = connect(server.addr());
+
+ // TE: trailers is not specified in request headers
+ req.write_all(
+ b"\
+ GET / HTTP/1.1\r\n\
+ Host: example.domain\r\n\
+ Connection: keep-alive\r\n\
+ \r\n\
+ ",
+ )
+ .expect("writing");
+
+ let last_chunk = b"\r\n0\r\n\r\n";
+ let res = read_until(&mut req, |buf| buf.ends_with(last_chunk)).expect("reading");
+ let sres = s(&res);
+
+ let expected_head =
+ "HTTP/1.1 200 OK\r\ntransfer-encoding: chunked\r\ntrailer: chunky-trailer\r\n";
+ assert_eq!(&sres[..expected_head.len()], expected_head);
+
+ // skip the date header
+ let date_fragment = "GMT\r\n\r\n";
+ let pos = sres.find(date_fragment).expect("find GMT");
+ let body = &sres[pos + date_fragment.len()..];
+
+ // no trailer fields should be sent because TE: trailers was not in request headers
+ let expected_body = "5\r\nhello\r\n0\r\n\r\n";
+ assert_eq!(body, expected_body);
+}
+
// -------------------------------------------------
// the Server that is used to run all the tests with
// -------------------------------------------------
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2700,6 +2788,19 @@ impl<'a> ReplyBuilder<'a> {
self.tx.lock().unwrap().send(Reply::Body(body)).unwrap();
}
+ fn body_stream_with_trailers<S>(self, stream: S, trailers: HeaderMap)
+ where
+ S: futures_util::Stream<Item = Result<Bytes, BoxError>> + Send + Sync + 'static,
+ {
+ use futures_util::TryStreamExt;
+ use hyper::body::Frame;
+ use support::trailers::StreamBodyWithTrailers;
+ let mut stream_body = StreamBodyWithTrailers::new(stream.map_ok(Frame::data));
+ stream_body.set_trailers(trailers);
+ let body = BodyExt::boxed(stream_body);
+ self.tx.lock().unwrap().send(Reply::Body(body)).unwrap();
+ }
+
#[allow(dead_code)]
fn error<E: Into<BoxError>>(self, err: E) {
self.tx
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -24,6 +24,8 @@ mod tokiort;
#[allow(unused)]
pub use tokiort::{TokioExecutor, TokioIo, TokioTimer};
+pub mod trailers;
+
#[allow(unused_macros)]
macro_rules! t {
(
diff --git /dev/null b/tests/support/trailers.rs
new file mode 100644
--- /dev/null
+++ b/tests/support/trailers.rs
@@ -0,0 +1,76 @@
+use bytes::Buf;
+use futures_util::stream::Stream;
+use http::header::HeaderMap;
+use http_body::{Body, Frame};
+use pin_project_lite::pin_project;
+use std::{
+ pin::Pin,
+ task::{Context, Poll},
+};
+
+pin_project! {
+ /// A body created from a [`Stream`].
+ #[derive(Clone, Debug)]
+ pub struct StreamBodyWithTrailers<S> {
+ #[pin]
+ stream: S,
+ trailers: Option<HeaderMap>,
+ }
+}
+
+impl<S> StreamBodyWithTrailers<S> {
+ /// Create a new `StreamBodyWithTrailers`.
+ pub fn new(stream: S) -> Self {
+ Self {
+ stream,
+ trailers: None,
+ }
+ }
+
+ pub fn with_trailers(stream: S, trailers: HeaderMap) -> Self {
+ Self {
+ stream,
+ trailers: Some(trailers),
+ }
+ }
+
+ pub fn set_trailers(&mut self, trailers: HeaderMap) {
+ self.trailers = Some(trailers);
+ }
+}
+
+impl<S, D, E> Body for StreamBodyWithTrailers<S>
+where
+ S: Stream<Item = Result<Frame<D>, E>>,
+ D: Buf,
+{
+ type Data = D;
+ type Error = E;
+
+ fn poll_frame(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
+ let project = self.project();
+ match project.stream.poll_next(cx) {
+ Poll::Ready(Some(result)) => Poll::Ready(Some(result)),
+ Poll::Ready(None) => match project.trailers.take() {
+ Some(trailers) => Poll::Ready(Some(Ok(Frame::trailers(trailers)))),
+ None => Poll::Ready(None),
+ },
+ Poll::Pending => Poll::Pending,
+ }
+ }
+}
+
+impl<S: Stream> Stream for StreamBodyWithTrailers<S> {
+ type Item = S::Item;
+
+ fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ self.project().stream.poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.stream.size_hint()
+ }
+}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2844"
] |
0.3
|
0891c9e3084453f4ff239c5059f3dddcbe25f1fb
|
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -8,6 +8,18 @@ pub type Result<T> = std::result::Result<T, Error>;
type Cause = Box<dyn StdError + Send + Sync>;
/// Represents errors that can occur handling HTTP streams.
+///
+/// # Formatting
+///
+/// The `Display` implementation of this type will only print the details of
+/// this level of error, even though it may have been caused by another error
+/// and contain that error in its source. To print all the relevant
+/// information, including the source chain, using something like
+/// `std::error::Report`, or equivalent 3rd party types.
+///
+/// The contents of the formatted error message of this specific `Error` type
+/// is unspecified. **You must not depend on it.** The wording and details may
+/// change in any version, with the goal of improving error messages.
pub struct Error {
inner: Box<ErrorImpl>,
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -170,11 +182,6 @@ impl Error {
self.find_source::<TimedOut>().is_some()
}
- /// Consumes the error, returning its cause.
- pub fn into_cause(self) -> Option<Box<dyn StdError + Send + Sync>> {
- self.inner.cause
- }
-
pub(super) fn new(kind: Kind) -> Error {
Error {
inner: Box::new(ErrorImpl { kind, cause: None }),
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -324,11 +331,6 @@ impl Error {
}
}
- /// The error's standalone message, without the message from the source.
- pub fn message(&self) -> impl fmt::Display + '_ {
- self.description()
- }
-
fn description(&self) -> &str {
match self.inner.kind {
Kind::Parse(Parse::Method) => "invalid HTTP method parsed",
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -410,11 +412,7 @@ impl fmt::Debug for Error {
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- if let Some(ref cause) = self.inner.cause {
- write!(f, "{}: {}", self.description(), cause)
- } else {
- f.write_str(self.description())
- }
+ f.write_str(self.description())
}
}
|
I filed a proposal for the working group: https://github.com/rust-lang/project-error-handling/issues/53
|
2023-09-11T20:09:02Z
| 3,312
|
Figure out hyper::Error Display including source chain
The current `Display` output of `Error` [doesn't match what many people think it should](https://github.com/hyperium/hyper/blob/master/docs/ROADMAP.md#errors), namely that it prints the error chain. We need to either:
- Change `hyper::Error` to not print the source as well.
- Influence the errors-wg to allow printing the chain in `Display`, like proposed in https://github.com/seanmonstar/errors/issues/1.
|
hyperium__hyper-3312
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2318,10 +2318,6 @@ mod conn {
let error = client.send_request(req).await.unwrap_err();
assert!(error.is_user());
- assert_eq!(
- error.to_string(),
- "dispatch task is gone: user code panicked"
- );
}
async fn drain_til_eof<T: tokio::io::AsyncRead + Unpin>(mut sock: T) -> io::Result<()> {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -523,6 +523,7 @@ fn post_with_chunked_body() {
#[test]
fn post_with_chunked_overflow() {
+ use std::error::Error as _;
let server = serve();
let mut req = connect(server.addr());
req.write_all(
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -542,7 +543,7 @@ fn post_with_chunked_overflow() {
.unwrap();
req.read(&mut [0; 256]).unwrap();
- let err = server.body_err().to_string();
+ let err = server.body_err().source().unwrap().to_string();
assert!(
err.contains("overflow"),
"error should be overflow: {:?}",
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2872"
] |
0.3
|
a45d5d5a04369f93334fc893875d8d1a49054e04
|
diff --git a/src/body/incoming.rs b/src/body/incoming.rs
--- a/src/body/incoming.rs
+++ b/src/body/incoming.rs
@@ -201,7 +201,16 @@ impl Body for Incoming {
ping.record_data(bytes.len());
return Poll::Ready(Some(Ok(Frame::data(bytes))));
}
- Some(Err(e)) => return Poll::Ready(Some(Err(crate::Error::new_body(e)))),
+ Some(Err(e)) => {
+ return match e.reason() {
+ // These reasons should cause the body reading to stop, but not fail it.
+ // The same logic as for `Read for H2Upgraded` is applied here.
+ Some(h2::Reason::NO_ERROR) | Some(h2::Reason::CANCEL) => {
+ Poll::Ready(None)
+ }
+ _ => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
+ };
+ }
None => {
*data_done = true;
// fall through to trailers
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -50,7 +50,7 @@ pub(crate) enum BodyLength {
Unknown,
}
-/// Status of when a Disaptcher future completes.
+/// Status of when a Dispatcher future completes.
pub(crate) enum Dispatched {
/// Dispatcher completely shutdown connection.
Shutdown,
|
Yea, I think we've talked about this in a previous issue, but don't remember where. `h2` is making the "error" (the reset) trump any other frames that have been received. It should likely be changed to return all other received frames, and *then* return the error.
But _somewhere_ in the stack it should probably just suppress the `RST_STREAM(NO_ERROR)` and return the response, because the response is what's going to be meaningful to the user. The `RST_STREAM` here is just being used as a "shut up and listen" signal.
Yes, it should return the response, that's why I mean. And then the body can return that there was a `NO_ERROR` error. It should still be given to the user, so they know something happened.
Stumbled on this. How can I suppress the specific failure `RST_STREAM(NO_ERROR)` somehow? How can I workaround this? I'm also in for contributing this fix :)
|
2023-07-23T01:01:36Z
| 3,275
|
Client: handle `RST_STREAM` with `NO_ERROR` set for the reason
**Version**
```
hyper = "0.14.18"
h2 = "0.3.13"
```
**Platform**
```
> uname -a
Linux <REDACTED> 5.17.5-76051705-generic #202204271406~1651504840~22.04~63e51bd SMP PREEMPT Mon May 2 15: x86_64 x86_64 x86_64 GNU/Linux
```
**Description**
I've found that Google Cloud Storage's API can respond with HTTP/2 `RST_STREAM` frame with `NO_ERROR` set for the reason, which appears to mean "stop sending the request body and read my response" according to https://datatracker.ietf.org/doc/html/rfc7540#section-8.1
> A server can send a complete response prior to the client sending an entire
request if the response does not depend on any portion of the request
that has not been sent and received. When this is true, a server MAY
request that the client abort transmission of a request without error
by sending a RST_STREAM with an error code of NO_ERROR after sending
a complete response (i.e., a frame with the END_STREAM flag).
Clients MUST NOT discard responses as a result of receiving such a
RST_STREAM, though clients can always discard responses at their
discretion for other reasons.
I believe this is happening in response to a `PutObject` request when the bucket is being rate limited for writes. The server is trying to tell the client to stop sending the request body because it won't be processed, and instead it should immediately read the response to discover the `429 Too Many Requests` error code.
However, Hyper's client implementation appears to just return the `RST_STREAM` message as an error and discards the response instead of handling it, which gives a hilariously confusing error message of:
```
error reading a body from connection: stream error received: not a result of an error
```
To be compliant with the spec, the implementation should stop sending the body and immediately read the response and return it.
For context, I'm using the Gcloud Storage API via https://crates.io/crates/aws-sdk-s3 (because the Gcloud Rust SDK doesn't support streaming bodies, but thankfully Gcloud Storage exposes an S3-compatible API), which uses Hyper internally. `aws-sdk-s3` appears to be returning the error from Hyper verbatim, however.
|
hyperium__hyper-3275
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1338,7 +1338,7 @@ mod conn {
use bytes::{Buf, Bytes};
use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
- use http_body_util::{BodyExt, Empty, StreamBody};
+ use http_body_util::{BodyExt, Empty, Full, StreamBody};
use hyper::rt::Timer;
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2126,6 +2126,62 @@ mod conn {
.expect("client should be open");
}
+ #[tokio::test]
+ async fn http2_responds_before_consuming_request_body() {
+ // Test that a early-response from server works correctly (request body wasn't fully consumed).
+ // https://github.com/hyperium/hyper/issues/2872
+ use hyper::service::service_fn;
+
+ let _ = pretty_env_logger::try_init();
+
+ let (listener, addr) = setup_tk_test_server().await;
+
+ // Spawn an HTTP2 server that responds before reading the whole request body.
+ // It's normal case to decline the request due to headers or size of the body.
+ tokio::spawn(async move {
+ let sock = TokioIo::new(listener.accept().await.unwrap().0);
+ hyper::server::conn::http2::Builder::new(TokioExecutor)
+ .timer(TokioTimer)
+ .serve_connection(
+ sock,
+ service_fn(|_req| async move {
+ Ok::<_, hyper::Error>(Response::new(Full::new(Bytes::from(
+ "No bread for you!",
+ ))))
+ }),
+ )
+ .await
+ .expect("serve_connection");
+ });
+
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+ let (mut client, conn) = conn::http2::Builder::new(TokioExecutor)
+ .timer(TokioTimer)
+ .handshake(io)
+ .await
+ .expect("http handshake");
+
+ tokio::spawn(async move {
+ conn.await.expect("client conn shouldn't error");
+ });
+
+ // Use a channel to keep request stream open
+ let (_tx, recv) = mpsc::channel::<Result<Frame<Bytes>, Box<dyn Error + Send + Sync>>>(0);
+ let req = Request::post("/a").body(StreamBody::new(recv)).unwrap();
+ let resp = client.send_request(req).await.expect("send_request");
+ assert!(resp.status().is_success());
+
+ let mut body = String::new();
+ concat(resp.into_body())
+ .await
+ .unwrap()
+ .reader()
+ .read_to_string(&mut body)
+ .unwrap();
+
+ assert_eq!(&body, "No bread for you!");
+ }
+
#[tokio::test]
async fn h2_connect() {
let (listener, addr) = setup_tk_test_server().await;
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2872"
] |
0.14
|
d77c2599bc023b258b90a17f5b633c8b7b0cbd4b
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -323,7 +323,12 @@ impl Body {
ping.record_data(bytes.len());
Poll::Ready(Some(Ok(bytes)))
}
- Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
+ Some(Err(e)) => match e.reason() {
+ // These reasons should cause stop of body reading, but nor fail it.
+ // The same logic as for `AsyncRead for H2Upgraded` is applied here.
+ Some(h2::Reason::NO_ERROR) | Some(h2::Reason::CANCEL) => Poll::Ready(None),
+ _ => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
+ },
None => Poll::Ready(None),
},
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -50,7 +50,7 @@ pub(crate) enum BodyLength {
Unknown,
}
-/// Status of when a Disaptcher future completes.
+/// Status of when a Dispatcher future completes.
pub(crate) enum Dispatched {
/// Dispatcher completely shutdown connection.
Shutdown,
|
Yea, I think we've talked about this in a previous issue, but don't remember where. `h2` is making the "error" (the reset) trump any other frames that have been received. It should likely be changed to return all other received frames, and *then* return the error.
But _somewhere_ in the stack it should probably just suppress the `RST_STREAM(NO_ERROR)` and return the response, because the response is what's going to be meaningful to the user. The `RST_STREAM` here is just being used as a "shut up and listen" signal.
Yes, it should return the response, that's why I mean. And then the body can return that there was a `NO_ERROR` error. It should still be given to the user, so they know something happened.
Stumbled on this. How can I suppress the specific failure `RST_STREAM(NO_ERROR)` somehow? How can I workaround this? I'm also in for contributing this fix :)
|
2023-07-23T01:01:19Z
| 3,274
|
Client: handle `RST_STREAM` with `NO_ERROR` set for the reason
**Version**
```
hyper = "0.14.18"
h2 = "0.3.13"
```
**Platform**
```
> uname -a
Linux <REDACTED> 5.17.5-76051705-generic #202204271406~1651504840~22.04~63e51bd SMP PREEMPT Mon May 2 15: x86_64 x86_64 x86_64 GNU/Linux
```
**Description**
I've found that Google Cloud Storage's API can respond with HTTP/2 `RST_STREAM` frame with `NO_ERROR` set for the reason, which appears to mean "stop sending the request body and read my response" according to https://datatracker.ietf.org/doc/html/rfc7540#section-8.1
> A server can send a complete response prior to the client sending an entire
request if the response does not depend on any portion of the request
that has not been sent and received. When this is true, a server MAY
request that the client abort transmission of a request without error
by sending a RST_STREAM with an error code of NO_ERROR after sending
a complete response (i.e., a frame with the END_STREAM flag).
Clients MUST NOT discard responses as a result of receiving such a
RST_STREAM, though clients can always discard responses at their
discretion for other reasons.
I believe this is happening in response to a `PutObject` request when the bucket is being rate limited for writes. The server is trying to tell the client to stop sending the request body because it won't be processed, and instead it should immediately read the response to discover the `429 Too Many Requests` error code.
However, Hyper's client implementation appears to just return the `RST_STREAM` message as an error and discards the response instead of handling it, which gives a hilariously confusing error message of:
```
error reading a body from connection: stream error received: not a result of an error
```
To be compliant with the spec, the implementation should stop sending the body and immediately read the response and return it.
For context, I'm using the Gcloud Storage API via https://crates.io/crates/aws-sdk-s3 (because the Gcloud Rust SDK doesn't support streaming bodies, but thankfully Gcloud Storage exposes an S3-compatible API), which uses Hyper internally. `aws-sdk-s3` appears to be returning the error from Hyper verbatim, however.
|
hyperium__hyper-3274
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -3154,6 +3154,61 @@ mod conn {
.expect("client should be open");
}
+ #[tokio::test]
+ async fn http2_responds_before_consuming_request_body() {
+ // Test that a early-response from server works correctly (request body wasn't fully consumed).
+ // https://github.com/hyperium/hyper/issues/2872
+ use hyper::service::service_fn;
+
+ let _ = pretty_env_logger::try_init();
+
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ // Spawn an HTTP2 server that responds before reading the whole request body.
+ // It's normal case to decline the request due to headers or size of the body.
+ tokio::spawn(async move {
+ let sock = listener.accept().await.unwrap().0;
+ hyper::server::conn::Http::new()
+ .http2_only(true)
+ .serve_connection(
+ sock,
+ service_fn(|_req| async move {
+ Ok::<_, hyper::Error>(http::Response::new(hyper::Body::from(
+ "No bread for you!",
+ )))
+ }),
+ )
+ .await
+ .expect("serve_connection");
+ });
+
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+ let (mut client, conn) = conn::Builder::new()
+ .http2_only(true)
+ .handshake::<_, Body>(io)
+ .await
+ .expect("http handshake");
+
+ tokio::spawn(async move {
+ conn.await.expect("client conn shouldn't error");
+ });
+
+ // Use a channel to keep request stream open
+ let (_tx, body) = hyper::Body::channel();
+ let req = Request::post("/a").body(body).unwrap();
+ let resp = client.send_request(req).await.expect("send_request");
+ assert!(resp.status().is_success());
+
+ let body = hyper::body::to_bytes(resp.into_body())
+ .await
+ .expect("get response body with no error");
+
+ assert_eq!(body.as_ref(), b"No bread for you!");
+ }
+
#[tokio::test]
async fn h2_connect() {
let _ = pretty_env_logger::try_init();
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2730"
] |
0.3
|
d92d3917d950e4c61c37c2170f3ce273d2a0f7d1
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -175,6 +175,13 @@ where
}
}
+ #[cfg(feature = "server")]
+ pub(crate) fn has_initial_read_write_state(&self) -> bool {
+ matches!(self.state.reading, Reading::Init)
+ && matches!(self.state.writing, Writing::Init)
+ && self.io.read_buf().is_empty()
+ }
+
fn should_error_on_eof(&self) -> bool {
// If we're idle, it's probably just the connection closing gracefully.
T::should_error_on_parse_eof() && !self.state.is_idle()
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -82,7 +82,11 @@ where
#[cfg(feature = "server")]
pub(crate) fn disable_keep_alive(&mut self) {
self.conn.disable_keep_alive();
- if self.conn.is_write_closed() {
+
+ // If keep alive has been disabled and no read or write has been seen on
+ // the connection yet, we must be in a state where the server is being asked to
+ // shut down before any data has been seen on the connection
+ if self.conn.is_write_closed() || self.conn.has_initial_read_write_state() {
self.close();
}
}
|
Yea, I think this is behavior is currently on purpose. Whether it _should_ be is a fair question. I think at the time, I assumed that a _new_ connection would usually have a request incoming ASAP, whereas an idle connection might not, and so it was better to allow that request to come in.
We could alter that behavior, if we document a good reason for _why_ we're making that change. Also, I imagine using the `http1_header_read_timeout` option could help here.
I agree that it's *more likely* for the first request to show up than a follow up request, but it seems weird to bake that assumption into graceful_shutdown IMO. For reference, I'm using graceful_shutdown in some logic to shut down connections after a certain period of idleness (I can't just terminate the connection future since I want the TLS session to shut down cleanly for session reuse). Right now, the current behavior will miss connections that are opened and never make a request.
Does `http1_header_read_timeout` start counting immediately or just once the first byte of the header is received? In any case, I don't think that'd help for an h2 connection, right?
Any progress on this? I was trying to figure out why my program wasn't exiting when I expected it to and found this issue. If I send the graceful shutdown signal the server will process one more request before actually exiting. So a user sending the shutdown command through a web app won't actually be shutting anything down.
> Does http1_header_read_timeout start counting immediately or just once the first byte of the header is received? In any case, I don't think that'd help for an h2 connection, right?
From some testing just now, the answer to the first part is "no". If I start-up an `http1::Connection` with a header timeout configured and connect to it simply with `nc` then the connection stays up indefinitely. Once I send any data (even a blank line) the timer starts counting and the connection is closed as expected after the timeout.
This seems like a thing to slide into RC2 or RC3.
> I can't just terminate the connection future since I want the TLS session to shut down cleanly for session reuse
@sfackler Does this mean you'd have the connection write out a 408 response and then finish? Or simply calling `poll_shutdown` and then return the future like if it were in keep-alive state? I guess calling shutdown will make the TLS shutdown start, right?
Yeah it just needs to call shutdown on the underlying IO object, which will handle the SSL shutdown protocol.
I think I can take this one.
|
2023-07-06T18:47:14Z
| 3,261
|
Connection::graceful_shutdown always waits for the first request.
**Version**
hyper 0.14.16
**Platform**
Linux DESKTOP-DHO88R7 4.19.104-microsoft-standard #1 SMP Wed Feb 19 06:37:35 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
**Description**
If you gracefully shut down a server connection future before the first request, Hyper will not actually shut the connection down until a request is processed:
```rust
use hyper::server::conn::Http;
use hyper::Response;
use tokio::io::AsyncReadExt;
use tokio::net::{TcpListener, TcpStream};
use tokio::pin;
#[tokio::main]
async fn main() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(server(listener));
let mut stream = TcpStream::connect(addr).await.unwrap();
println!("connected");
let mut buf = vec![];
stream.read_to_end(&mut buf).await.unwrap();
}
async fn server(listener: TcpListener) {
let socket = listener.accept().await.unwrap().0;
let service = hyper::service::service_fn(|_: hyper::Request<hyper::Body>| async {
Err::<Response<hyper::Body>, _>("no")
});
let future = Http::new()
.http1_only(true)
.serve_connection(socket, service);
pin!(future);
future.as_mut().graceful_shutdown();
future.await.unwrap();
}
```
I would expect this program to exit almost instantly since there is no request being processed when the graceful_shutdown is invoked. However, it instead blocks forever waiting on the client to send headers.
The behavior actually appears to be that the shutdown is processed immediately after the first request is fully handled.
|
hyperium__hyper-3261
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -31,6 +31,7 @@ use hyper::body::{Body, Incoming as IncomingBody};
use hyper::server::conn::{http1, http2};
use hyper::service::{service_fn, Service};
use hyper::{Method, Request, Response, StatusCode, Uri, Version};
+use tokio::pin;
mod support;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1139,11 +1140,17 @@ async fn disable_keep_alive_mid_request() {
let child = thread::spawn(move || {
let mut req = connect(&addr);
req.write_all(b"GET / HTTP/1.1\r\n").unwrap();
+ thread::sleep(Duration::from_millis(10));
tx1.send(()).unwrap();
rx2.recv().unwrap();
req.write_all(b"Host: localhost\r\n\r\n").unwrap();
let mut buf = vec![];
req.read_to_end(&mut buf).unwrap();
+ assert!(
+ buf.starts_with(b"HTTP/1.1 200 OK\r\n"),
+ "should receive OK response, but buf: {:?}",
+ buf,
+ );
});
let (socket, _) = listener.accept().await.unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2152,6 +2159,31 @@ async fn max_buf_size() {
.expect_err("should TooLarge error");
}
+#[cfg(feature = "http1")]
+#[tokio::test]
+async fn graceful_shutdown_before_first_request_no_block() {
+ let (listener, addr) = setup_tcp_listener();
+
+ tokio::spawn(async move {
+ let socket = listener.accept().await.unwrap().0;
+
+ let future = http1::Builder::new().serve_connection(socket, HelloWorld);
+ pin!(future);
+ future.as_mut().graceful_shutdown();
+
+ future.await.unwrap();
+ });
+
+ let mut stream = TkTcpStream::connect(addr).await.unwrap();
+
+ let mut buf = vec![];
+
+ tokio::time::timeout(Duration::from_secs(5), stream.read_to_end(&mut buf))
+ .await
+ .expect("timed out waiting for graceful shutdown")
+ .expect("error receiving response");
+}
+
#[test]
fn streaming_body() {
use futures_util::StreamExt;
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3253"
] |
0.14
|
297dc4c8ea2dbc844d030418f79a6869fd67f496
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -602,17 +602,16 @@ impl Sender {
}
/// Aborts the body in an abnormal fashion.
- pub fn abort(self) {
+ pub fn abort(mut self) {
+ self.send_error(crate::Error::new_body_write_aborted());
+ }
+
+ pub(crate) fn send_error(&mut self, err: crate::Error) {
let _ = self
.data_tx
// clone so the send works even if buffer is full
.clone()
- .try_send(Err(crate::Error::new_body_write_aborted()));
- }
-
- #[cfg(feature = "http1")]
- pub(crate) fn send_error(&mut self, err: crate::Error) {
- let _ = self.data_tx.try_send(Err(err));
+ .try_send(Err(err));
}
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -118,6 +118,10 @@ where
should_shutdown: bool,
) -> Poll<crate::Result<Dispatched>> {
Poll::Ready(ready!(self.poll_inner(cx, should_shutdown)).or_else(|e| {
+ // Be sure to alert a streaming body of the failure.
+ if let Some(mut body) = self.body_tx.take() {
+ body.send_error(crate::Error::new_body("connection error"));
+ }
// An error means we're shutting down either way.
// We just try to give the error to the user,
// and close the connection with an Ok. If we
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -367,7 +371,12 @@ where
self.conn.end_body()?;
}
} else {
- return Poll::Pending;
+ // If there's no body_rx, end the body
+ if self.conn.can_write_body() {
+ self.conn.end_body()?;
+ } else {
+ return Poll::Pending;
+ }
}
}
}
|
I've been playing around with the demo on my own machine, and even without the timeout, I see what you're describing. When I turn on tracing, I see that the connection task encounters a _write_ error, because the socket is closed, and then that closes up the task.
I would guess that it depends on what exact option the connection task is doing when you kill the curl process: if it was trying to read, it will see a read error, if it was trying to write, it will log it and close up.
So, I'd say that at least the problem is that when the connection task is closing, the request body sender (the internal side giving you the request body bytes) doesn't notice it should send a final error item.
My problem domain is storage systems, and when user uploads a file of unknown length (via chunked encoding) it is really important to get reliable information on how stream has ended. With current state of things, drop of the client might result in new object being successfully uploaded to storage system with only partial content.
|
2023-06-19T18:36:20Z
| 3,257
|
Missing error when reading stream of data out of an interrupted chunked-encoded request
**Version**
hyper 0.14.26
tokio 1.28.1
**Platform**
```Linux my-hostname 5.10.0-18-amd64 #1 SMP Debian 5.10.140-1 (2022-09-02) x86_64 GNU/Linux```
**Description**
Error is not propagated via stream in some cases when client drops the connection during chunked-encoding stream.
I tried this code:
```rust
use futures_util::stream::StreamExt;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, Server};
use tokio::sync::mpsc;
use tokio::time::{sleep, Duration};
async fn echo(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
println!("reading body!");
// Create a channel to communicate between the reading and responding tasks
let (tx, rx) = mpsc::channel::<Vec<u8>>(1);
// Spawn a task to read the request body and discard it
tokio::spawn(async move {
while let Some(maybe_chunk) = req.body_mut().next().await {
match maybe_chunk {
Ok(chunk) => {
// Uncomment me!
// sleep(Duration::from_millis(100)).await;
let _ = tx.send(chunk.to_vec()).await;
}
Err(err) => {
println!("Got error: {}", err);
}
}
}
println!("finished reading body!");
});
// Create a response with a data stream that is filled by the reading task
let stream = tokio_stream::wrappers::ReceiverStream::new(rx);
let response = Response::builder()
.body(Body::wrap_stream(stream.map(Result::<_, hyper::Error>::Ok)))
.expect("Failed to build response");
Ok(response)
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let addr = ([0, 0, 0, 0], 3000).into();
let service = make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(echo)) });
let server = Server::bind(&addr).serve(service);
println!("Listening on http://{}", addr);
server.await?;
Ok(())
}
```
Cargo.toml dependencies:
```toml
[dependencies]
tokio = { version = "1", features = ["full"] }
hyper = { version="0.14", features=["full"] }
http = "*"
futures = "*"
futures-util = "0.3.28"
tokio-stream = "0.1.14"
```
Service above is an echo-server, which sends back the stream of data that it receives. To get the repro, please launch the service, and on another terminal start the "endless" curl command like this:
```
$ cat /dev/zero | curl -XPOST http://127.0.0.1:3000/ -H "Transfer-Encoding: chunked" -T - -o /dev/null
```
It will read `/dev/zero`, pipe it to curl, which will send it as data stream with chunked transfer encoding, sending the output stream to `/dev/null`.
After some time, terminate curl with `Ctrl+C`. Service reports the error on the stream as expected, terminal output will look like this:
```
Listening on http://0.0.0.0:3000
reading body!
Got error: error reading a body from connection: Connection reset by peer (os error 104)
finished reading body!
```
But if we uncomment the sleep line, and repeat the experiment, there will be no error, as indicated by service output:
```
Listening on http://0.0.0.0:3000
reading body!
finished reading body!
```
I'm observing this weird behavior on a larger codebase that does approximately the same as sample server presented above.
One more interesting observation, even when sleep is in place, it does not repro (i.e. error is reported properly) with this netcat:
```
$ echo -ne "GET /echo-buffer HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n6\r\naabbcc" | nc localhos
t 3000; echo
HTTP/1.1 200 OK
transfer-encoding: chunked
date: Fri, 16 Jun 2023 16:04:58 GMT
6
aabbcc
^C
```
My larger codebase also works as expected with netcat. Is there a race somewhere?
|
hyperium__hyper-3257
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -62,7 +62,7 @@ tokio = { version = "1", features = [
] }
tokio-test = "0.4"
tokio-util = { version = "0.7", features = ["codec"] }
-tower = { version = "0.4", features = ["make", "util"] }
+tower = { version = "0.4", default-features = false, features = ["make", "util"] }
url = "2.2"
[target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies]
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -93,6 +93,7 @@ mod response_body_lengths {
}
fn run_test(case: TestCase) {
+ let _ = pretty_env_logger::try_init();
assert!(
case.version == 0 || case.version == 1,
"TestCase.version must 0 or 1"
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -157,18 +158,22 @@ mod response_body_lengths {
let n = body.find("\r\n\r\n").unwrap() + 4;
if case.expects_chunked {
- let len = body.len();
- assert_eq!(
- &body[n + 1..n + 3],
- "\r\n",
- "expected body chunk size header"
- );
- assert_eq!(&body[n + 3..len - 7], body_str, "expected body");
- assert_eq!(
- &body[len - 7..],
- "\r\n0\r\n\r\n",
- "expected body final chunk size header"
- );
+ if body_str.len() > 0 {
+ let len = body.len();
+ assert_eq!(
+ &body[n + 1..n + 3],
+ "\r\n",
+ "expected body chunk size header"
+ );
+ assert_eq!(&body[n + 3..len - 7], body_str, "expected body");
+ assert_eq!(
+ &body[len - 7..],
+ "\r\n0\r\n\r\n",
+ "expected body final chunk size header"
+ );
+ } else {
+ assert_eq!(&body[n..], "0\r\n\r\n");
+ }
} else {
assert_eq!(&body[n..], body_str, "expected body");
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -219,6 +224,17 @@ mod response_body_lengths {
});
}
+ #[test]
+ fn chunked_response_known_empty() {
+ run_test(TestCase {
+ version: 1,
+ headers: &[("transfer-encoding", "chunked")],
+ body: Bd::Known(""),
+ expects_chunked: true, // should still send chunked, and 0\r\n\r\n
+ expects_con_len: false,
+ });
+ }
+
#[test]
fn chunked_response_unknown() {
run_test(TestCase {
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"3252"
] |
0.3
|
aa330bcba0ac0cb30868181ac870947604b9ecbf
|
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -366,7 +366,12 @@ where
self.conn.end_body()?;
}
} else {
- return Poll::Pending;
+ // If there's no body_rx, end the body
+ if self.conn.can_write_body() {
+ self.conn.end_body()?;
+ } else {
+ return Poll::Pending;
+ }
}
}
}
|
Oh, interesting. This is _supposed_ to work, and we have a matrix of `response_body_lengths` tests for a bunch of combinations (headers existing, body type knows it's own length, etc), and I noticed this specific combination wasn't included (chunked header set, known empty). Adding it, and the test does indeed hang. I'll look into why.
|
2023-06-16T17:01:17Z
| 3,254
|
Chunked HTTP/1.1 doesn't send the last zero-chunk when body is empty
**Version**
hyper 0.14.26, h2 0.3.19, http 0.2.9
**Platform**
Linux 5.15, Ubuntu 22.04
**Description**
In HTTP/1.1, when `transfer-encoding: chunked` is set, all the data should be sent at a format of chunk. However, in hyper, if the body part is empty, e.g. `Body::empty()` or `Body::from_static(b"")`, hyper server doesn't send `b"0\r\n\r\n"` to the client, causing infitite wait on the client side. When the body contains some bytes, e.g. `Body::from_static(b"0")`, hyper will send a non-zero size chunk as well as a zero-size last chunk. According to pseudo code from [RFC9112 7.1.3](https://datatracker.ietf.org/doc/html/rfc9112#name-decoding-chunked), zero size body should be allowed.
I expect hyper sends valid last chunk `b"0\r\n\r\n"` to the client when the body is empty. Manually sending it is impossible with public API so I have no workaround now.
Source code to reproduce the behavior:
```rust
async fn get_302() -> anyhow::Result<Response<Body>> {
let resp_builder = http::response::Builder::new();
let (mut parts, body) = resp_builder
.status(302)
.version(Version::HTTP_11)
.header(header::LOCATION, "127.0.0.1:1234/ok")
.header(header::TRANSFER_ENCODING, "chunked")
.body(Body::empty())
.unwrap().into_parts();
Ok(Response::from_parts(parts, body))
}
async fn run_server() {
let listener = TcpListener::bind("127.0.0.1:1234").await.unwrap();
let service = service_fn(|req| {
get_302()
});
while let (conn, _) = listener.accept().await.unwrap() {
let _ = Http::new().http1_only(true).serve_connection(conn, service).await;
}
}
#[tokio::main]
async fn main() {
run_server().await
}
```
|
hyperium__hyper-3254
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -92,6 +92,7 @@ mod response_body_lengths {
}
fn run_test(case: TestCase) {
+ let _ = pretty_env_logger::try_init();
assert!(
case.version == 0 || case.version == 1,
"TestCase.version must 0 or 1"
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -155,18 +156,22 @@ mod response_body_lengths {
let n = body.find("\r\n\r\n").unwrap() + 4;
if case.expects_chunked {
- let len = body.len();
- assert_eq!(
- &body[n + 1..n + 3],
- "\r\n",
- "expected body chunk size header"
- );
- assert_eq!(&body[n + 3..len - 7], body_str, "expected body");
- assert_eq!(
- &body[len - 7..],
- "\r\n0\r\n\r\n",
- "expected body final chunk size header"
- );
+ if body_str.len() > 0 {
+ let len = body.len();
+ assert_eq!(
+ &body[n + 1..n + 3],
+ "\r\n",
+ "expected body chunk size header"
+ );
+ assert_eq!(&body[n + 3..len - 7], body_str, "expected body");
+ assert_eq!(
+ &body[len - 7..],
+ "\r\n0\r\n\r\n",
+ "expected body final chunk size header"
+ );
+ } else {
+ assert_eq!(&body[n..], "0\r\n\r\n");
+ }
} else {
assert_eq!(&body[n..], body_str, "expected body");
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -217,6 +222,17 @@ mod response_body_lengths {
});
}
+ #[test]
+ fn chunked_response_known_empty() {
+ run_test(TestCase {
+ version: 1,
+ headers: &[("transfer-encoding", "chunked")],
+ body: Bd::Known(""),
+ expects_chunked: true, // should still send chunked, and 0\r\n\r\n
+ expects_con_len: false,
+ });
+ }
+
#[test]
fn chunked_response_unknown() {
run_test(TestCase {
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3110"
] |
0.3
|
f4b513009d81083081d1c60c1981847bbb17dd5d
|
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -315,7 +314,8 @@ impl Opts {
let mut client = rt.block_on(async {
if self.http2 {
- let io = tokio::net::TcpStream::connect(&addr).await.unwrap();
+ let tcp = tokio::net::TcpStream::connect(&addr).await.unwrap();
+ let io = support::TokioIo::new(tcp);
let (tx, conn) = hyper::client::conn::http2::Builder::new(support::TokioExecutor)
.initial_stream_window_size(self.http2_stream_window)
.initial_connection_window_size(self.http2_conn_window)
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -328,7 +328,8 @@ impl Opts {
} else if self.parallel_cnt > 1 {
todo!("http/1 parallel >1");
} else {
- let io = tokio::net::TcpStream::connect(&addr).await.unwrap();
+ let tcp = tokio::net::TcpStream::connect(&addr).await.unwrap();
+ let io = support::TokioIo::new(tcp);
let (tx, conn) = hyper::client::conn::http1::Builder::new()
.handshake(io)
.await
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -414,6 +415,7 @@ fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
let opts = opts.clone();
rt.spawn(async move {
while let Ok((sock, _)) = listener.accept().await {
+ let io = support::TokioIo::new(sock);
if opts.http2 {
tokio::spawn(
hyper::server::conn::http2::Builder::new(support::TokioExecutor)
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -421,7 +423,7 @@ fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
.initial_connection_window_size(opts.http2_conn_window)
.adaptive_window(opts.http2_adaptive_window)
.serve_connection(
- sock,
+ io,
service_fn(move |req: Request<hyper::body::Incoming>| async move {
let mut req_body = req.into_body();
while let Some(_chunk) = req_body.frame().await {}
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -433,7 +435,7 @@ fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
);
} else {
tokio::spawn(hyper::server::conn::http1::Builder::new().serve_connection(
- sock,
+ io,
service_fn(move |req: Request<hyper::body::Incoming>| async move {
let mut req_body = req.into_body();
while let Some(_chunk) = req_body.frame().await {}
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -38,10 +40,11 @@ macro_rules! bench_server {
rt.spawn(async move {
loop {
let (stream, _) = listener.accept().await.expect("accept");
+ let io = support::TokioIo::new(stream);
http1::Builder::new()
.serve_connection(
- stream,
+ io,
service_fn(|_| async {
Ok::<_, hyper::Error>(
Response::builder()
diff --git a/benches/support/mod.rs b/benches/support/mod.rs
--- a/benches/support/mod.rs
+++ b/benches/support/mod.rs
@@ -1,2 +1,2 @@
mod tokiort;
-pub use tokiort::{TokioExecutor, TokioTimer};
+pub use tokiort::{TokioExecutor, TokioIo, TokioTimer};
diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs
--- a/benches/support/tokiort.rs
+++ b/benches/support/tokiort.rs
@@ -88,3 +88,149 @@ impl TokioSleep {
self.project().inner.as_mut().reset(deadline.into());
}
}
+
+pin_project! {
+ #[derive(Debug)]
+ pub struct TokioIo<T> {
+ #[pin]
+ inner: T,
+ }
+}
+
+impl<T> TokioIo<T> {
+ pub fn new(inner: T) -> Self {
+ Self { inner }
+ }
+
+ pub fn inner(self) -> T {
+ self.inner
+ }
+}
+
+impl<T> hyper::rt::Read for TokioIo<T>
+where
+ T: tokio::io::AsyncRead,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ mut buf: hyper::rt::ReadBufCursor<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ let n = unsafe {
+ let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut());
+ match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) {
+ Poll::Ready(Ok(())) => tbuf.filled().len(),
+ other => return other,
+ }
+ };
+
+ unsafe {
+ buf.advance(n);
+ }
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<T> hyper::rt::Write for TokioIo<T>
+where
+ T: tokio::io::AsyncWrite,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
+ tokio::io::AsyncWrite::poll_flush(self.project().inner, cx)
+ }
+
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ tokio::io::AsyncWrite::is_write_vectored(&self.inner)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs)
+ }
+}
+
+impl<T> tokio::io::AsyncRead for TokioIo<T>
+where
+ T: hyper::rt::Read,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ tbuf: &mut tokio::io::ReadBuf<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ //let init = tbuf.initialized().len();
+ let filled = tbuf.filled().len();
+ let sub_filled = unsafe {
+ let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut());
+
+ match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) {
+ Poll::Ready(Ok(())) => buf.filled().len(),
+ other => return other,
+ }
+ };
+
+ let n_filled = filled + sub_filled;
+ // At least sub_filled bytes had to have been initialized.
+ let n_init = sub_filled;
+ unsafe {
+ tbuf.assume_init(n_init);
+ tbuf.set_filled(n_filled);
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<T> tokio::io::AsyncWrite for TokioIo<T>
+where
+ T: hyper::rt::Write,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ hyper::rt::Write::poll_write(self.project().inner, cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
+ hyper::rt::Write::poll_flush(self.project().inner, cx)
+ }
+
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ hyper::rt::Write::poll_shutdown(self.project().inner, cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ hyper::rt::Write::is_write_vectored(&self.inner)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs)
+ }
+}
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -8,6 +8,10 @@ use hyper::Request;
use tokio::io::{self, AsyncWriteExt as _};
use tokio::net::TcpStream;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -40,8 +44,9 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> {
let port = url.port_u16().unwrap_or(80);
let addr = format!("{}:{}", host, port);
let stream = TcpStream::connect(addr).await?;
+ let io = TokioIo::new(stream);
- let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
println!("Connection failed: {:?}", err);
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -7,6 +7,10 @@ use hyper::{body::Buf, Request};
use serde::Deserialize;
use tokio::net::TcpStream;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -29,8 +33,9 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
let addr = format!("{}:{}", host, port);
let stream = TcpStream::connect(addr).await?;
+ let io = TokioIo::new(stream);
- let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
println!("Connection failed: {:?}", err);
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -10,6 +10,10 @@ use hyper::service::service_fn;
use hyper::{body::Body, Method, Request, Response, StatusCode};
use tokio::net::TcpListener;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
async fn echo(
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -92,10 +96,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
println!("Listening on http://{}", addr);
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service_fn(echo))
+ .serve_connection(io, service_fn(echo))
.await
{
println!("Error serving connection: {:?}", err);
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -4,6 +4,10 @@ use hyper::{server::conn::http1, service::service_fn};
use std::net::SocketAddr;
use tokio::net::{TcpListener, TcpStream};
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
pretty_env_logger::init();
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -20,6 +24,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
// This is the `Service` that will handle the connection.
// `service_fn` is a helper to convert a function that
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -42,9 +47,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
async move {
let client_stream = TcpStream::connect(addr).await.unwrap();
+ let io = TokioIo::new(client_stream);
- let (mut sender, conn) =
- hyper::client::conn::http1::handshake(client_stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
println!("Connection failed: {:?}", err);
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -56,10 +61,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
});
tokio::task::spawn(async move {
- if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service)
- .await
- {
+ if let Err(err) = http1::Builder::new().serve_connection(io, service).await {
println!("Failed to serve the connection: {:?}", err);
}
});
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -10,6 +10,10 @@ use hyper::service::service_fn;
use hyper::{Request, Response};
use tokio::net::TcpListener;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
// An async function that consumes a request, does nothing with it and returns a
// response.
async fn hello(_: Request<hyper::body::Incoming>) -> Result<Response<Full<Bytes>>, Infallible> {
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -35,7 +39,10 @@ pub async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// has work to do. In this case, a connection arrives on the port we are listening on and
// the task is woken up, at which point the task is then put back on a thread, and is
// driven forward by the runtime, eventually yielding a TCP stream.
- let (stream, _) = listener.accept().await?;
+ let (tcp, _) = listener.accept().await?;
+ // Use an adapter to access something implementing `tokio::io` traits as if they implement
+ // `hyper::rt` IO traits.
+ let io = TokioIo::new(tcp);
// Spin up a new task in Tokio so we can continue to listen for new TCP connection on the
// current task without waiting for the processing of the HTTP1 connection we just received
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -44,7 +51,7 @@ pub async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// Handle the connection from the client using HTTP1 and pass any
// HTTP requests received on that connection to the `hello` function
if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service_fn(hello))
+ .serve_connection(io, service_fn(hello))
.await
{
println!("Error serving connection: {:?}", err);
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -12,6 +12,10 @@ use hyper::{Method, Request, Response};
use tokio::net::{TcpListener, TcpStream};
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
// To try this example:
// 1. cargo run --example http_proxy
// 2. config http_proxy in command line
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -28,12 +32,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
.preserve_header_case(true)
.title_case_headers(true)
- .serve_connection(stream, service_fn(proxy))
+ .serve_connection(io, service_fn(proxy))
.with_upgrades()
.await
{
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -88,11 +93,12 @@ async fn proxy(
let addr = format!("{}:{}", host, port);
let stream = TcpStream::connect(addr).await.unwrap();
+ let io = TokioIo::new(stream);
let (mut sender, conn) = Builder::new()
.preserve_header_case(true)
.title_case_headers(true)
- .handshake(stream)
+ .handshake(io)
.await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -123,9 +129,10 @@ fn full<T: Into<Bytes>>(chunk: T) -> BoxBody<Bytes, hyper::Error> {
// Create a TCP connection to host:port, build a tunnel between the connection and
// the upgraded connection
-async fn tunnel(mut upgraded: Upgraded, addr: String) -> std::io::Result<()> {
+async fn tunnel(upgraded: Upgraded, addr: String) -> std::io::Result<()> {
// Connect to remote server
let mut server = TcpStream::connect(addr).await?;
+ let mut upgraded = TokioIo::new(upgraded);
// Proxying data
let (from_client, from_server) =
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -11,6 +11,10 @@ use hyper::service::service_fn;
use hyper::{Request, Response};
use tokio::net::TcpListener;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
static INDEX1: &[u8] = b"The 1st service!";
static INDEX2: &[u8] = b"The 2nd service!";
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -33,10 +37,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let listener = TcpListener::bind(addr1).await.unwrap();
loop {
let (stream, _) = listener.accept().await.unwrap();
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service_fn(index1))
+ .serve_connection(io, service_fn(index1))
.await
{
println!("Error serving connection: {:?}", err);
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -49,10 +54,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let listener = TcpListener::bind(addr2).await.unwrap();
loop {
let (stream, _) = listener.accept().await.unwrap();
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service_fn(index2))
+ .serve_connection(io, service_fn(index2))
.await
{
println!("Error serving connection: {:?}", err);
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -13,6 +13,10 @@ use std::convert::Infallible;
use std::net::SocketAddr;
use url::form_urlencoded;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
static INDEX: &[u8] = b"<html><body><form action=\"post\" method=\"post\">Name: <input type=\"text\" name=\"name\"><br>Number: <input type=\"text\" name=\"number\"><br><input type=\"submit\"></body></html>";
static MISSING: &[u8] = b"Missing field";
static NOTNUMERIC: &[u8] = b"Number field is not numeric";
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -124,10 +128,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
println!("Listening on http://{}", addr);
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service_fn(param_example))
+ .serve_connection(io, service_fn(param_example))
.await
{
println!("Error serving connection: {:?}", err);
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -10,6 +10,10 @@ use http_body_util::Full;
use hyper::service::service_fn;
use hyper::{Method, Request, Response, Result, StatusCode};
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
static INDEX: &str = "examples/send_file_index.html";
static NOTFOUND: &[u8] = b"Not Found";
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -24,10 +28,11 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service_fn(response_examples))
+ .serve_connection(io, service_fn(response_examples))
.await
{
println!("Failed to serve connection: {:?}", err);
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -10,6 +10,10 @@ use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Mutex;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
type Counter = i32;
#[tokio::main]
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -21,11 +25,12 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
.serve_connection(
- stream,
+ io,
Svc {
counter: Mutex::new(81818),
},
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -5,7 +5,7 @@ use hyper::server::conn::http2;
use std::cell::Cell;
use std::net::SocketAddr;
use std::rc::Rc;
-use tokio::io::{self, AsyncRead, AsyncWrite, AsyncWriteExt};
+use tokio::io::{self, AsyncWriteExt};
use tokio::net::TcpListener;
use hyper::body::{Body as HttpBody, Bytes, Frame};
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -18,6 +18,10 @@ use std::task::{Context, Poll};
use std::thread;
use tokio::net::TcpStream;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
struct Body {
// Our Body type is !Send and !Sync:
_marker: PhantomData<*const ()>,
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -98,6 +102,7 @@ async fn server() -> Result<(), Box<dyn std::error::Error>> {
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
// For each connection, clone the counter to use in our service...
let cnt = counter.clone();
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -111,7 +116,7 @@ async fn server() -> Result<(), Box<dyn std::error::Error>> {
tokio::task::spawn_local(async move {
if let Err(err) = http2::Builder::new(LocalExec)
- .serve_connection(stream, service)
+ .serve_connection(io, service)
.await
{
let mut stdout = io::stdout();
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -127,11 +132,11 @@ async fn server() -> Result<(), Box<dyn std::error::Error>> {
struct IOTypeNotSend {
_marker: PhantomData<*const ()>,
- stream: TcpStream,
+ stream: TokioIo<TcpStream>,
}
impl IOTypeNotSend {
- fn new(stream: TcpStream) -> Self {
+ fn new(stream: TokioIo<TcpStream>) -> Self {
Self {
_marker: PhantomData,
stream,
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -139,7 +144,7 @@ impl IOTypeNotSend {
}
}
-impl AsyncWrite for IOTypeNotSend {
+impl hyper::rt::Write for IOTypeNotSend {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -163,11 +168,11 @@ impl AsyncWrite for IOTypeNotSend {
}
}
-impl AsyncRead for IOTypeNotSend {
+impl hyper::rt::Read for IOTypeNotSend {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- buf: &mut tokio::io::ReadBuf<'_>,
+ buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.stream).poll_read(cx, buf)
}
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -179,7 +184,7 @@ async fn client(url: hyper::Uri) -> Result<(), Box<dyn std::error::Error>> {
let addr = format!("{}:{}", host, port);
let stream = TcpStream::connect(addr).await?;
- let stream = IOTypeNotSend::new(stream);
+ let stream = IOTypeNotSend::new(TokioIo::new(stream));
let (mut sender, conn) = hyper::client::conn::http2::handshake(LocalExec, stream).await?;
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -12,6 +12,10 @@ use hyper::{server::conn::http1, service::service_fn};
use hyper::{Error, Response};
use tokio::net::TcpListener;
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
pretty_env_logger::init();
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -26,6 +30,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Listening on http://{}", addr);
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
// Each connection could send multiple requests, so
// the `Service` needs a clone to handle later requests.
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -46,10 +51,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
}
});
- if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service)
- .await
- {
+ if let Err(err) = http1::Builder::new().serve_connection(io, service).await {
println!("Error serving connection: {:?}", err);
}
}
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -16,11 +16,16 @@ use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
use hyper::{Request, Response, StatusCode};
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
/// Handle server-side I/O after HTTP upgraded.
-async fn server_upgraded_io(mut upgraded: Upgraded) -> Result<()> {
+async fn server_upgraded_io(upgraded: Upgraded) -> Result<()> {
+ let mut upgraded = TokioIo::new(upgraded);
// we have an upgraded connection that we can read and
// write on directly.
//
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -75,7 +80,8 @@ async fn server_upgrade(mut req: Request<hyper::body::Incoming>) -> Result<Respo
}
/// Handle client-side I/O after HTTP upgraded.
-async fn client_upgraded_io(mut upgraded: Upgraded) -> Result<()> {
+async fn client_upgraded_io(upgraded: Upgraded) -> Result<()> {
+ let mut upgraded = TokioIo::new(upgraded);
// We've gotten an upgraded connection that we can read
// and write directly on. Let's start out 'foobar' protocol.
upgraded.write_all(b"foo=bar").await?;
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -97,7 +103,8 @@ async fn client_upgrade_request(addr: SocketAddr) -> Result<()> {
.unwrap();
let stream = TcpStream::connect(addr).await?;
- let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
+ let io = TokioIo::new(stream);
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -146,10 +153,11 @@ async fn main() {
tokio::select! {
res = listener.accept() => {
let (stream, _) = res.expect("Failed to accept");
+ let io = TokioIo::new(stream);
let mut rx = rx.clone();
tokio::task::spawn(async move {
- let conn = http1::Builder::new().serve_connection(stream, service_fn(server_upgrade));
+ let conn = http1::Builder::new().serve_connection(io, service_fn(server_upgrade));
// Don't forget to enable upgrades on the connection.
let mut conn = conn.with_upgrades();
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -9,6 +9,10 @@ use hyper::service::service_fn;
use hyper::{body::Incoming as IncomingBody, header, Method, Request, Response, StatusCode};
use tokio::net::{TcpListener, TcpStream};
+#[path = "../benches/support/mod.rs"]
+mod support;
+use support::TokioIo;
+
type GenericError = Box<dyn std::error::Error + Send + Sync>;
type Result<T> = std::result::Result<T, GenericError>;
type BoxBody = http_body_util::combinators::BoxBody<Bytes, hyper::Error>;
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -30,8 +34,9 @@ async fn client_request_response() -> Result<Response<BoxBody>> {
let host = req.uri().host().expect("uri has no host");
let port = req.uri().port_u16().expect("uri has no port");
let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
+ let io = TokioIo::new(stream);
- let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(io).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -109,14 +114,12 @@ async fn main() -> Result<()> {
println!("Listening on http://{}", addr);
loop {
let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
tokio::task::spawn(async move {
let service = service_fn(move |req| response_examples(req));
- if let Err(err) = http1::Builder::new()
- .serve_connection(stream, service)
- .await
- {
+ if let Err(err) = http1::Builder::new().serve_connection(io, service).await {
println!("Failed to serve connection: {:?}", err);
}
});
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -3,10 +3,10 @@
use std::error::Error as StdError;
use std::fmt;
+use crate::rt::{Read, Write};
use bytes::Bytes;
use http::{Request, Response};
use httparse::ParserConfig;
-use tokio::io::{AsyncRead, AsyncWrite};
use super::super::dispatch;
use crate::body::{Body, Incoming as IncomingBody};
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -49,7 +49,7 @@ pub struct Parts<T> {
#[must_use = "futures do nothing unless polled"]
pub struct Connection<T, B>
where
- T: AsyncRead + AsyncWrite + Send + 'static,
+ T: Read + Write + Send + 'static,
B: Body + 'static,
{
inner: Option<Dispatcher<T, B>>,
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -57,7 +57,7 @@ where
impl<T, B> Connection<T, B>
where
- T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
+ T: Read + Write + Send + Unpin + 'static,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -114,7 +114,7 @@ pub struct Builder {
/// See [`client::conn`](crate::client::conn) for more.
pub async fn handshake<T, B>(io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ T: Read + Write + Unpin + Send + 'static,
B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -238,7 +238,7 @@ impl<B> fmt::Debug for SendRequest<B> {
impl<T, B> fmt::Debug for Connection<T, B>
where
- T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
+ T: Read + Write + fmt::Debug + Send + 'static,
B: Body + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -248,7 +248,7 @@ where
impl<T, B> Future for Connection<T, B>
where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ T: Read + Write + Unpin + Send + 'static,
B: Body + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -470,7 +470,7 @@ impl Builder {
io: T,
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ T: Read + Write + Unpin + Send + 'static,
B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -6,8 +6,8 @@ use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Duration;
+use crate::rt::{Read, Write};
use http::{Request, Response};
-use tokio::io::{AsyncRead, AsyncWrite};
use super::super::dispatch;
use crate::body::{Body, Incoming as IncomingBody};
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -37,7 +37,7 @@ impl<B> Clone for SendRequest<B> {
#[must_use = "futures do nothing unless polled"]
pub struct Connection<T, B, E>
where
- T: AsyncRead + AsyncWrite + 'static + Unpin,
+ T: Read + Write + 'static + Unpin,
B: Body + 'static,
E: ExecutorClient<B, T> + Unpin,
B::Error: Into<Box<dyn Error + Send + Sync>>,
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -64,7 +64,7 @@ pub async fn handshake<E, T, B>(
io: T,
) -> crate::Result<(SendRequest<B>, Connection<T, B, E>)>
where
- T: AsyncRead + AsyncWrite + Unpin + 'static,
+ T: Read + Write + Unpin + 'static,
B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn Error + Send + Sync>>,
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -193,7 +193,7 @@ impl<B> fmt::Debug for SendRequest<B> {
impl<T, B, E> Connection<T, B, E>
where
- T: AsyncRead + AsyncWrite + Unpin + 'static,
+ T: Read + Write + Unpin + 'static,
B: Body + Unpin + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn Error + Send + Sync>>,
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -215,7 +215,7 @@ where
impl<T, B, E> fmt::Debug for Connection<T, B, E>
where
- T: AsyncRead + AsyncWrite + fmt::Debug + 'static + Unpin,
+ T: Read + Write + fmt::Debug + 'static + Unpin,
B: Body + 'static,
E: ExecutorClient<B, T> + Unpin,
B::Error: Into<Box<dyn Error + Send + Sync>>,
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -227,7 +227,7 @@ where
impl<T, B, E> Future for Connection<T, B, E>
where
- T: AsyncRead + AsyncWrite + Unpin + 'static,
+ T: Read + Write + Unpin + 'static,
B: Body + 'static + Unpin,
B::Data: Send,
E: Unpin,
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -398,7 +398,7 @@ where
io: T,
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B, Ex>)>>
where
- T: AsyncRead + AsyncWrite + Unpin + 'static,
+ T: Read + Write + Unpin + 'static,
B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn Error + Send + Sync>>,
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -9,7 +9,9 @@
//! higher-level [Client](super) API.
//!
//! ## Example
-//! A simple example that uses the `SendRequest` struct to talk HTTP over a Tokio TCP stream
+//!
+//! A simple example that uses the `SendRequest` struct to talk HTTP over some TCP stream.
+//!
//! ```no_run
//! # #[cfg(all(feature = "client", feature = "http1"))]
//! # mod rt {
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -17,38 +19,38 @@
//! use http::{Request, StatusCode};
//! use http_body_util::Empty;
//! use hyper::client::conn;
-//! use tokio::net::TcpStream;
-//!
-//! #[tokio::main]
-//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
-//! let target_stream = TcpStream::connect("example.com:80").await?;
-//!
-//! let (mut request_sender, connection) = conn::http1::handshake(target_stream).await?;
-//!
-//! // spawn a task to poll the connection and drive the HTTP state
-//! tokio::spawn(async move {
-//! if let Err(e) = connection.await {
-//! eprintln!("Error in connection: {}", e);
-//! }
-//! });
-//!
-//! let request = Request::builder()
-//! // We need to manually add the host header because SendRequest does not
-//! .header("Host", "example.com")
-//! .method("GET")
-//! .body(Empty::<Bytes>::new())?;
-//! let response = request_sender.send_request(request).await?;
-//! assert!(response.status() == StatusCode::OK);
-//!
-//! let request = Request::builder()
-//! .header("Host", "example.com")
-//! .method("GET")
-//! .body(Empty::<Bytes>::new())?;
-//! let response = request_sender.send_request(request).await?;
-//! assert!(response.status() == StatusCode::OK);
-//! Ok(())
-//! }
-//!
+//! # use hyper::rt::{Read, Write};
+//! # async fn run<I>(tcp: I) -> Result<(), Box<dyn std::error::Error>>
+//! # where
+//! # I: Read + Write + Unpin + Send + 'static,
+//! # {
+//! let (mut request_sender, connection) = conn::http1::handshake(tcp).await?;
+//!
+//! // spawn a task to poll the connection and drive the HTTP state
+//! tokio::spawn(async move {
+//! if let Err(e) = connection.await {
+//! eprintln!("Error in connection: {}", e);
+//! }
+//! });
+//!
+//! let request = Request::builder()
+//! // We need to manually add the host header because SendRequest does not
+//! .header("Host", "example.com")
+//! .method("GET")
+//! .body(Empty::<Bytes>::new())?;
+//!
+//! let response = request_sender.send_request(request).await?;
+//! assert!(response.status() == StatusCode::OK);
+//!
+//! let request = Request::builder()
+//! .header("Host", "example.com")
+//! .method("GET")
+//! .body(Empty::<Bytes>::new())?;
+//!
+//! let response = request_sender.send_request(request).await?;
+//! assert!(response.status() == StatusCode::OK);
+//! # Ok(())
+//! # }
//! # }
//! ```
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -2,9 +2,9 @@ use std::marker::Unpin;
use std::{cmp, io};
use bytes::{Buf, Bytes};
-use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use crate::common::{task, Pin, Poll};
+use crate::rt::{Read, ReadBufCursor, Write};
/// Combine a buffer with an IO, rewinding reads to use the buffer.
#[derive(Debug)]
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -44,14 +44,14 @@ impl<T> Rewind<T> {
// }
}
-impl<T> AsyncRead for Rewind<T>
+impl<T> Read for Rewind<T>
where
- T: AsyncRead + Unpin,
+ T: Read + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
- buf: &mut ReadBuf<'_>,
+ mut buf: ReadBufCursor<'_>,
) -> Poll<io::Result<()>> {
if let Some(mut prefix) = self.pre.take() {
// If there are no remaining bytes, let the bytes get dropped.
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -72,9 +72,9 @@ where
}
}
-impl<T> AsyncWrite for Rewind<T>
+impl<T> Write for Rewind<T>
where
- T: AsyncWrite + Unpin,
+ T: Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
diff --git a/src/ffi/io.rs b/src/ffi/io.rs
--- a/src/ffi/io.rs
+++ b/src/ffi/io.rs
@@ -2,8 +2,8 @@ use std::ffi::c_void;
use std::pin::Pin;
use std::task::{Context, Poll};
+use crate::rt::{Read, Write};
use libc::size_t;
-use tokio::io::{AsyncRead, AsyncWrite};
use super::task::hyper_context;
diff --git a/src/ffi/io.rs b/src/ffi/io.rs
--- a/src/ffi/io.rs
+++ b/src/ffi/io.rs
@@ -120,13 +120,13 @@ extern "C" fn write_noop(
0
}
-impl AsyncRead for hyper_io {
+impl Read for hyper_io {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
- buf: &mut tokio::io::ReadBuf<'_>,
+ mut buf: crate::rt::ReadBufCursor<'_>,
) -> Poll<std::io::Result<()>> {
- let buf_ptr = unsafe { buf.unfilled_mut() }.as_mut_ptr() as *mut u8;
+ let buf_ptr = unsafe { buf.as_mut() }.as_mut_ptr() as *mut u8;
let buf_len = buf.remaining();
match (self.read)(self.userdata, hyper_context::wrap(cx), buf_ptr, buf_len) {
diff --git a/src/ffi/io.rs b/src/ffi/io.rs
--- a/src/ffi/io.rs
+++ b/src/ffi/io.rs
@@ -138,15 +138,14 @@ impl AsyncRead for hyper_io {
ok => {
// We have to trust that the user's read callback actually
// filled in that many bytes... :(
- unsafe { buf.assume_init(ok) };
- buf.advance(ok);
+ unsafe { buf.advance(ok) };
Poll::Ready(Ok(()))
}
}
}
}
-impl AsyncWrite for hyper_io {
+impl Write for hyper_io {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -4,11 +4,11 @@ use std::marker::PhantomData;
#[cfg(feature = "server")]
use std::time::Duration;
+use crate::rt::{Read, Write};
use bytes::{Buf, Bytes};
use http::header::{HeaderValue, CONNECTION};
use http::{HeaderMap, Method, Version};
use httparse::ParserConfig;
-use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, error, trace};
use super::io::Buffered;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -25,7 +25,7 @@ use crate::rt::Sleep;
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
/// This handles a connection, which will have been established over an
-/// `AsyncRead + AsyncWrite` (like a socket), and will likely include multiple
+/// `Read + Write` (like a socket), and will likely include multiple
/// `Transaction`s over HTTP.
///
/// The connection will determine when a message begins and ends as well as
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -39,7 +39,7 @@ pub(crate) struct Conn<I, B, T> {
impl<I, B, T> Conn<I, B, T>
where
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
B: Buf,
T: Http1Transaction,
{
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -1,8 +1,8 @@
use std::error::Error as StdError;
+use crate::rt::{Read, Write};
use bytes::{Buf, Bytes};
use http::Request;
-use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace};
use super::{Http1Transaction, Wants};
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -64,7 +64,7 @@ where
RecvItem = MessageHead<T::Incoming>,
> + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
T: Http1Transaction + Unpin,
Bs: Body + 'static,
Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -97,7 +97,7 @@ where
}
/// Run this dispatcher until HTTP says this connection is done,
- /// but don't call `AsyncWrite::shutdown` on the underlying IO.
+ /// but don't call `Write::shutdown` on the underlying IO.
///
/// This is useful for old-style HTTP upgrades, but ignores
/// newer-style upgrade API.
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -426,7 +426,7 @@ where
RecvItem = MessageHead<T::Incoming>,
> + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
T: Http1Transaction + Unpin,
Bs: Body + 'static,
Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -6,8 +6,8 @@ use std::io::{self, IoSlice};
use std::marker::Unpin;
use std::mem::MaybeUninit;
+use crate::rt::{Read, ReadBuf, Write};
use bytes::{Buf, BufMut, Bytes, BytesMut};
-use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tracing::{debug, trace};
use super::{Http1Transaction, ParseContext, ParsedMessage};
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -55,7 +55,7 @@ where
impl<T, B> Buffered<T, B>
where
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
B: Buf,
{
pub(crate) fn new(io: T) -> Buffered<T, B> {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -251,7 +251,7 @@ where
let dst = self.read_buf.chunk_mut();
let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit<u8>]) };
let mut buf = ReadBuf::uninit(dst);
- match Pin::new(&mut self.io).poll_read(cx, &mut buf) {
+ match Pin::new(&mut self.io).poll_read(cx, buf.unfilled()) {
Poll::Ready(Ok(_)) => {
let n = buf.filled().len();
trace!("received {} bytes", n);
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -359,7 +359,7 @@ pub(crate) trait MemRead {
impl<T, B> MemRead for Buffered<T, B>
where
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
B: Buf,
{
fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -2,6 +2,7 @@ use std::marker::PhantomData;
use std::time::Duration;
+use crate::rt::{Read, Write};
use bytes::Bytes;
use futures_channel::mpsc::{Receiver, Sender};
use futures_channel::{mpsc, oneshot};
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -11,13 +12,13 @@ use h2::client::{Builder, Connection, SendRequest};
use h2::SendStream;
use http::{Method, StatusCode};
use pin_project_lite::pin_project;
-use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::ping::{Ponger, Recorder};
use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
use crate::body::{Body, Incoming as IncomingBody};
use crate::client::dispatch::{Callback, SendWhen};
+use crate::common::io::Compat;
use crate::common::time::Time;
use crate::common::{task, Future, Never, Pin, Poll};
use crate::ext::Protocol;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -111,14 +112,14 @@ pub(crate) async fn handshake<T, B, E>(
timer: Time,
) -> crate::Result<ClientTask<B, E, T>>
where
- T: AsyncRead + AsyncWrite + Unpin + 'static,
+ T: Read + Write + Unpin + 'static,
B: Body + 'static,
B::Data: Send + 'static,
E: ExecutorClient<B, T> + Unpin,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
let (h2_tx, mut conn) = new_builder(config)
- .handshake::<_, SendBuf<B::Data>>(io)
+ .handshake::<_, SendBuf<B::Data>>(crate::common::io::compat(io))
.await
.map_err(crate::Error::new_h2)?;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -168,16 +169,16 @@ pin_project! {
#[pin]
ponger: Ponger,
#[pin]
- conn: Connection<T, SendBuf<<B as Body>::Data>>,
+ conn: Connection<Compat<T>, SendBuf<<B as Body>::Data>>,
}
}
impl<T, B> Conn<T, B>
where
B: Body,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
- fn new(ponger: Ponger, conn: Connection<T, SendBuf<<B as Body>::Data>>) -> Self {
+ fn new(ponger: Ponger, conn: Connection<Compat<T>, SendBuf<<B as Body>::Data>>) -> Self {
Conn { ponger, conn }
}
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -185,7 +186,7 @@ where
impl<T, B> Future for Conn<T, B>
where
B: Body,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
type Output = Result<(), h2::Error>;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -211,19 +212,19 @@ pin_project! {
struct ConnMapErr<T, B>
where
B: Body,
- T: AsyncRead,
- T: AsyncWrite,
+ T: Read,
+ T: Write,
T: Unpin,
{
#[pin]
- conn: Either<Conn<T, B>, Connection<T, SendBuf<<B as Body>::Data>>>,
+ conn: Either<Conn<T, B>, Connection<Compat<T>, SendBuf<<B as Body>::Data>>>,
}
}
impl<T, B> Future for ConnMapErr<T, B>
where
B: Body,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
type Output = Result<(), ()>;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -239,8 +240,8 @@ pin_project! {
pub struct ConnTask<T, B>
where
B: Body,
- T: AsyncRead,
- T: AsyncWrite,
+ T: Read,
+ T: Write,
T: Unpin,
{
#[pin]
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -254,7 +255,7 @@ pin_project! {
impl<T, B> ConnTask<T, B>
where
B: Body,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
fn new(
conn: ConnMapErr<T, B>,
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -272,7 +273,7 @@ where
impl<T, B> Future for ConnTask<T, B>
where
B: Body,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
type Output = ();
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -308,8 +309,8 @@ pin_project! {
B: http_body::Body,
B: 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- T: AsyncRead,
- T: AsyncWrite,
+ T: Read,
+ T: Write,
T: Unpin,
{
Pipe {
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -331,7 +332,7 @@ impl<B, T> Future for H2ClientFuture<B, T>
where
B: http_body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
type Output = ();
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -383,7 +384,7 @@ where
B: Body + 'static,
E: ExecutorClient<B, T> + Unpin,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
self.h2_tx.is_extended_connect_protocol_enabled()
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -438,7 +439,7 @@ where
B::Data: Send,
E: ExecutorClient<B, T> + Unpin,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
fn poll_pipe(&mut self, f: FutCtx<B>, cx: &mut task::Context<'_>) {
let ping = self.ping.clone();
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -573,7 +574,7 @@ where
B::Data: Send,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: ExecutorClient<B, T> + 'static + Send + Sync + Unpin,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
type Output = crate::Result<Dispatched>;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -1,13 +1,13 @@
+use crate::rt::{Read, ReadBufCursor, Write};
use bytes::{Buf, Bytes};
use h2::{Reason, RecvStream, SendStream};
use http::header::{HeaderName, CONNECTION, TE, TRAILER, TRANSFER_ENCODING, UPGRADE};
use http::HeaderMap;
use pin_project_lite::pin_project;
use std::error::Error as StdError;
-use std::io::{self, Cursor, IoSlice};
+use std::io::{Cursor, IoSlice};
use std::mem;
use std::task::Context;
-use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tracing::{debug, trace, warn};
use crate::body::Body;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -271,15 +271,15 @@ where
buf: Bytes,
}
-impl<B> AsyncRead for H2Upgraded<B>
+impl<B> Read for H2Upgraded<B>
where
B: Buf,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- read_buf: &mut ReadBuf<'_>,
- ) -> Poll<Result<(), io::Error>> {
+ mut read_buf: ReadBufCursor<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
if self.buf.is_empty() {
self.buf = loop {
match ready!(self.recv_stream.poll_data(cx)) {
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -295,7 +295,7 @@ where
return Poll::Ready(match e.reason() {
Some(Reason::NO_ERROR) | Some(Reason::CANCEL) => Ok(()),
Some(Reason::STREAM_CLOSED) => {
- Err(io::Error::new(io::ErrorKind::BrokenPipe, e))
+ Err(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e))
}
_ => Err(h2_to_io_error(e)),
})
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -311,7 +311,7 @@ where
}
}
-impl<B> AsyncWrite for H2Upgraded<B>
+impl<B> Write for H2Upgraded<B>
where
B: Buf,
{
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -319,7 +319,7 @@ where
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
- ) -> Poll<Result<usize, io::Error>> {
+ ) -> Poll<Result<usize, std::io::Error>> {
if buf.is_empty() {
return Poll::Ready(Ok(0));
}
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -344,7 +344,7 @@ where
Poll::Ready(Err(h2_to_io_error(
match ready!(self.send_stream.poll_reset(cx)) {
Ok(Reason::NO_ERROR) | Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
- return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into()))
+ return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()))
}
Ok(reason) => reason.into(),
Err(e) => e,
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -352,14 +352,14 @@ where
)))
}
- fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- ) -> Poll<Result<(), io::Error>> {
+ ) -> Poll<Result<(), std::io::Error>> {
if self.send_stream.write(&[], true).is_ok() {
return Poll::Ready(Ok(()));
}
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -368,7 +368,7 @@ where
match ready!(self.send_stream.poll_reset(cx)) {
Ok(Reason::NO_ERROR) => return Poll::Ready(Ok(())),
Ok(Reason::CANCEL) | Ok(Reason::STREAM_CLOSED) => {
- return Poll::Ready(Err(io::ErrorKind::BrokenPipe.into()))
+ return Poll::Ready(Err(std::io::ErrorKind::BrokenPipe.into()))
}
Ok(reason) => reason.into(),
Err(e) => e,
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -377,11 +377,11 @@ where
}
}
-fn h2_to_io_error(e: h2::Error) -> io::Error {
+fn h2_to_io_error(e: h2::Error) -> std::io::Error {
if e.is_io() {
e.into_io().unwrap()
} else {
- io::Error::new(io::ErrorKind::Other, e)
+ std::io::Error::new(std::io::ErrorKind::Other, e)
}
}
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -408,7 +408,7 @@ where
unsafe { self.as_inner_unchecked().poll_reset(cx) }
}
- fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> {
+ fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), std::io::Error> {
let send_buf = SendBuf::Cursor(Cursor::new(buf.into()));
unsafe {
self.as_inner_unchecked()
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -3,12 +3,12 @@ use std::marker::Unpin;
use std::time::Duration;
+use crate::rt::{Read, Write};
use bytes::Bytes;
use h2::server::{Connection, Handshake, SendResponse};
use h2::{Reason, RecvStream};
use http::{Method, Request};
use pin_project_lite::pin_project;
-use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::{ping, PipeToSendStream, SendBuf};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -89,7 +89,7 @@ where
{
Handshaking {
ping_config: ping::Config,
- hs: Handshake<T, SendBuf<B::Data>>,
+ hs: Handshake<crate::common::io::Compat<T>, SendBuf<B::Data>>,
},
Serving(Serving<T, B>),
Closed,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -100,13 +100,13 @@ where
B: Body,
{
ping: Option<(ping::Recorder, ping::Ponger)>,
- conn: Connection<T, SendBuf<B::Data>>,
+ conn: Connection<crate::common::io::Compat<T>, SendBuf<B::Data>>,
closing: Option<crate::Error>,
}
impl<T, S, B, E> Server<T, S, B, E>
where
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Body + 'static,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -132,7 +132,7 @@ where
if config.enable_connect_protocol {
builder.enable_connect_protocol();
}
- let handshake = builder.handshake(io);
+ let handshake = builder.handshake(crate::common::io::compat(io));
let bdp = if config.adaptive_window {
Some(config.initial_stream_window_size)
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -182,7 +182,7 @@ where
impl<T, S, B, E> Future for Server<T, S, B, E>
where
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Body + 'static,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -228,7 +228,7 @@ where
impl<T, B> Serving<T, B>
where
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
B: Body + 'static,
{
fn poll_server<S, E>(
diff --git a/src/rt/bounds.rs b/src/rt/bounds.rs
--- a/src/rt/bounds.rs
+++ b/src/rt/bounds.rs
@@ -13,8 +13,8 @@ pub use self::h2_client::ExecutorClient;
#[cfg_attr(docsrs, doc(cfg(all(feature = "server", feature = "http2"))))]
mod h2_client {
use std::{error::Error, future::Future};
- use tokio::io::{AsyncRead, AsyncWrite};
+ use crate::rt::{Read, Write};
use crate::{proto::h2::client::H2ClientFuture, rt::Executor};
/// An executor to spawn http2 futures for the client.
diff --git a/src/rt/bounds.rs b/src/rt/bounds.rs
--- a/src/rt/bounds.rs
+++ b/src/rt/bounds.rs
@@ -29,7 +29,7 @@ mod h2_client {
where
B: http_body::Body,
B::Error: Into<Box<dyn Error + Send + Sync>>,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
#[doc(hidden)]
fn execute_h2_future(&mut self, future: H2ClientFuture<B, T>);
diff --git a/src/rt/bounds.rs b/src/rt/bounds.rs
--- a/src/rt/bounds.rs
+++ b/src/rt/bounds.rs
@@ -41,7 +41,7 @@ mod h2_client {
B: http_body::Body + 'static,
B::Error: Into<Box<dyn Error + Send + Sync>>,
H2ClientFuture<B, T>: Future<Output = ()>,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
fn execute_h2_future(&mut self, future: H2ClientFuture<B, T>) {
self.execute(future)
diff --git a/src/rt/bounds.rs b/src/rt/bounds.rs
--- a/src/rt/bounds.rs
+++ b/src/rt/bounds.rs
@@ -54,7 +54,7 @@ mod h2_client {
B: http_body::Body + 'static,
B::Error: Into<Box<dyn Error + Send + Sync>>,
H2ClientFuture<B, T>: Future<Output = ()>,
- T: AsyncRead + AsyncWrite + Unpin,
+ T: Read + Write + Unpin,
{
}
diff --git a/src/rt/mod.rs b/src/rt/mod.rs
--- a/src/rt/mod.rs
+++ b/src/rt/mod.rs
@@ -1,14 +1,18 @@
//! Runtime components
//!
-//! By default, hyper includes the [tokio](https://tokio.rs) runtime.
+//! The traits and types within this module are used to allow plugging in
+//! runtime types. These include:
//!
-//! If the `runtime` feature is disabled, the types in this module can be used
-//! to plug in other runtimes.
+//! - Executors
+//! - Timers
+//! - IO transports
pub mod bounds;
+mod io;
mod timer;
-pub use timer::{Sleep, Timer};
+pub use self::io::{Read, ReadBuf, ReadBufCursor, Write};
+pub use self::timer::{Sleep, Timer};
/// An executor of futures.
///
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -5,8 +5,8 @@ use std::fmt;
use std::sync::Arc;
use std::time::Duration;
+use crate::rt::{Read, Write};
use bytes::Bytes;
-use tokio::io::{AsyncRead, AsyncWrite};
use crate::body::{Body, Incoming as IncomingBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -85,7 +85,7 @@ impl<I, B, S> Connection<I, S>
where
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -172,7 +172,7 @@ impl<I, B, S> Future for Connection<I, S>
where
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin + 'static,
+ I: Read + Write + Unpin + 'static,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -333,10 +333,10 @@ impl Builder {
/// # use hyper::{body::Incoming, Request, Response};
/// # use hyper::service::Service;
/// # use hyper::server::conn::http1::Builder;
- /// # use tokio::io::{AsyncRead, AsyncWrite};
+ /// # use hyper::rt::{Read, Write};
/// # async fn run<I, S>(some_io: I, some_service: S)
/// # where
- /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ /// # I: Read + Write + Unpin + Send + 'static,
/// # S: Service<hyper::Request<Incoming>, Response=hyper::Response<Incoming>> + Send + 'static,
/// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
/// # S::Future: Send,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -356,7 +356,7 @@ impl Builder {
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::ResBody: 'static,
<S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
{
let mut conn = proto::Conn::new(io);
conn.set_timer(self.timer.clone());
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -413,7 +413,7 @@ mod upgrades {
where
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -430,7 +430,7 @@ mod upgrades {
where
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ I: Read + Write + Unpin + Send + 'static,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -5,8 +5,8 @@ use std::fmt;
use std::sync::Arc;
use std::time::Duration;
+use crate::rt::{Read, Write};
use pin_project_lite::pin_project;
-use tokio::io::{AsyncRead, AsyncWrite};
use crate::body::{Body, Incoming as IncomingBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -51,7 +51,7 @@ impl<I, B, S, E> Connection<I, S, E>
where
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Http2ConnExec<S::Future, B>,
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -75,7 +75,7 @@ impl<I, B, S, E> Future for Connection<I, S, E>
where
S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin + 'static,
+ I: Read + Write + Unpin + 'static,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Http2ConnExec<S::Future, B>,
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -255,7 +255,7 @@ impl<E> Builder<E> {
S::Error: Into<Box<dyn StdError + Send + Sync>>,
Bd: Body + 'static,
Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
+ I: Read + Write + Unpin,
E: Http2ConnExec<S::Future, Bd>,
{
let proto = proto::h2::Server::new(
diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs
--- a/src/server/conn/mod.rs
+++ b/src/server/conn/mod.rs
@@ -7,43 +7,6 @@
//!
//! This module is split by HTTP version. Both work similarly, but do have
//! specific options on each builder.
-//!
-//! ## Example
-//!
-//! A simple example that prepares an HTTP/1 connection over a Tokio TCP stream.
-//!
-//! ```no_run
-//! # #[cfg(feature = "http1")]
-//! # mod rt {
-//! use http::{Request, Response, StatusCode};
-//! use http_body_util::Full;
-//! use hyper::{server::conn::http1, service::service_fn, body, body::Bytes};
-//! use std::{net::SocketAddr, convert::Infallible};
-//! use tokio::net::TcpListener;
-//!
-//! #[tokio::main]
-//! async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
-//! let addr: SocketAddr = ([127, 0, 0, 1], 8080).into();
-//!
-//! let mut tcp_listener = TcpListener::bind(addr).await?;
-//! loop {
-//! let (tcp_stream, _) = tcp_listener.accept().await?;
-//! tokio::task::spawn(async move {
-//! if let Err(http_err) = http1::Builder::new()
-//! .keep_alive(true)
-//! .serve_connection(tcp_stream, service_fn(hello))
-//! .await {
-//! eprintln!("Error while serving HTTP connection: {}", http_err);
-//! }
-//! });
-//! }
-//! }
-//!
-//! async fn hello(_req: Request<body::Incoming>) -> Result<Response<Full<Bytes>>, Infallible> {
-//! Ok(Response::new(Full::new(Bytes::from("Hello World!"))))
-//! }
-//! # }
-//! ```
#[cfg(feature = "http1")]
pub mod http1;
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -45,8 +45,8 @@ use std::fmt;
use std::io;
use std::marker::Unpin;
+use crate::rt::{Read, ReadBufCursor, Write};
use bytes::Bytes;
-use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::sync::oneshot;
#[cfg(any(feature = "http1", feature = "http2"))]
use tracing::trace;
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -133,7 +133,7 @@ impl Upgraded {
///
/// On success, returns the downcasted parts. On error, returns the
/// `Upgraded` back.
- pub fn downcast<T: AsyncRead + AsyncWrite + Unpin + 'static>(self) -> Result<Parts<T>, Self> {
+ pub fn downcast<T: Read + Write + Unpin + 'static>(self) -> Result<Parts<T>, Self> {
let (io, buf) = self.io.into_inner();
match io.__hyper_downcast() {
Ok(t) => Ok(Parts {
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -148,17 +148,17 @@ impl Upgraded {
}
}
-impl AsyncRead for Upgraded {
+impl Read for Upgraded {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
- buf: &mut ReadBuf<'_>,
+ buf: ReadBufCursor<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_read(cx, buf)
}
}
-impl AsyncWrite for Upgraded {
+impl Write for Upgraded {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -265,13 +265,13 @@ impl StdError for UpgradeExpected {}
// ===== impl Io =====
-pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static {
+pub(super) trait Io: Read + Write + Unpin + 'static {
fn __hyper_type_id(&self) -> TypeId {
TypeId::of::<Self>()
}
}
-impl<T: AsyncRead + AsyncWrite + Unpin + 'static> Io for T {}
+impl<T: Read + Write + Unpin + 'static> Io for T {}
impl dyn Io + Send {
fn __hyper_is<T: Io>(&self) -> bool {
|
cc @Noah-Kennedy @erickt @dhobsd
I am a fan of this if a proper trait could be found and the tokio experience has short-cuts such that the difference in experience with this change is minimal. But since that will go in util I don't see why hyper core it self shouldn't be based on its own io types.
I think you want to go 1.0 with Hyper long before we add such traits to std, but I think it would be great if using the std ones was in the longer-term plan. From my PoV, I'd love to know if there is anything in the current design proposal which wouldn't work for Hyper in the long run.
Text: https://github.com/nrc/portable-interoperable/tree/master/io-traits#readme
WIP code: https://github.com/nrc/async-io-traits
also really like this idea, at least until the traits are available in std, and the possibility to take them in directions that make sense for hyper while not necessarily desirable for tokio and to experiment with completion io. it also feels more in line with the design of 1.0 (from an onlooker perspective) when the main reason to keep using the tokio traits is easier integration given the other integration helpers have all (?) been moved to util
The main thing I'm _personally_ interested in here would be a mechanism to allow zero copy IO for APIs which require ownership of buffers. For IO uring, ideally implementations of the IO traits would be able to provide their own buffer types, which is essential for things like provided buffers (a facility for kernel-managed buffer pools).
If this was going to be implemented how would it work in relation to the crates Hyper depends on? For example, [h2](https://github.com/hyperium/h2) also uses `tokio::io::{AsyncRead, AsyncWrite}` so it would also need to be updated to use these custom IO traits.
Given these facts, I assume it would be necessary for a `hyper-io` crate to the introduced as `h2` couldn't import the traits from `hyper` without causing a recursive crate dependency which I am fairly sure is not supported.
I came across this problem while trying to implement this PR, I would be curious about the planned solution and if so I might be able to continue my implementation.
> Given these facts, I assume it would be necessary for a hyper-io crate to the introduced as h2 couldn't import the traits from hyper without causing a recursive crate dependency which I am fairly sure is not supported.
Oh, that's an interesting catch! I suppose it's a possibility we could define another crate, though I'm not so sure it's worth the hassle, yet. Another option is that `h2` could define some IO traits too, based on the operations it wants to do, and then inside hyper we just have an adapter.
I think that we perhaps don't have to decide that just yet, and could use an internal adapter anyways inside hyper, wrapping the IO type inside something implements Tokio's traits when we pass it to `h2`. Changing to either option later on should be backwards compatible.
I'm taking this one now. One not-too-important detail I wondered at immediately, figured I'd ask for other opinions: should there be a separate `hyper::io` module just for this? Everything else related to "runtime stuff" is in `hyper::rt`. So, these could be `hyper::rt::{Read, Write}`, or in a separate `io` module.
`hyper::io` feels better (and I was never a fan of `rt` name)
There are two considerations I have here to add.
The first one is that it's useful if hyper can avoid having to allocate memory for each in-progress TCP read that occurs. In io_uring (which we will get to in a minute) this is achieved via provided buffers. In epoll, this is achieved via buffer pools, where you wait for readiness, grab a buffer, attempt a read, and put it back if the read fails due to lack of readiness, and then you put the buffer back when you are done with it.
The second is that I'd like an io_uring compatible hyper at some point.
I think both are solvable if we get a bit more high-level and think about what hyper is _trying_ to do. Do we need something that looks like tokio's IO traits, or can we make do with something more specialized to what we need?
What I was thinking about was something resembling the below code for reads:
```rust
pub trait HyperAsyncRead {
type Buffer: AsMut<[u8]>;
fn poll_recv(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<Self::Buffer>>;
}
pub trait HyperAsyncReadExt {
fn read(&mut self) -> Read<Self> {
Read {
io: self,
_pin: PhantomPinned::default(),
}
}
}
#[pin_project]
pub struct Read<'a, I: ?Sized> {
io: &'a mut I,
#[pin]
_pin: PhantomPinned,
}
impl<'a, I> Future for Read<'a, I>
where
I: HyperAsyncRead + Unpin + ?Sized,
{
type Output = io::Result<I::Buffer>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let projected = self.project();
Pin::new(&mut **projected.io).poll_recv(cx)
}
}
impl<I: HyperAsyncRead> HyperAsyncReadExt for I {}
```
I'm still trying to think about writes.
I have far less experience with low-level concepts like these than some folks here but I'd like to point out some implications of the above `HyperAsyncRead`
In order to use your own buffer pool, you need to implement an IO type yourself (this is probably good)
```rust
struct MyOwnBufferPoolTcpStream {
inner: tokio::net::TcpStream
}
impl HyperAsyncRead for MyOwnbufferPoolTcpStream {
type Buffer = MyOwnBufferType;
...
}
```
As far as alternate suggestions, I don't have any. I thought briefly about a `HyperAsyncRead<T: AsMut<[u8]>>` but that's no different than just accepting a `&mut [u8]` as a concrete argument. I also thought about `HyperAsyncRead<T: SomeBufferTrait>` but that seems needlessly complicated. Few people will be implementing these buffering newtypes and those who are probably want to fully own the workings of their buffers.
All this said - owned buffer writes are more complicated. Hyper will likely need to acquire buffers, write into them, and then pass them to the `HyperAsyncWrite` trait as an input. For this, it _might_ be important to define a `Buffer` trait. Abstracting over buffers that can and cannot be resized will also be difficult. Treating all buffers as fixed-size would be easy, but might make some people upset.
Here's a brief sketch of an idea..
```rust
pub trait HyperBuffer: Sized {
fn poll_acquire(cx: &mut Context<'_>) -> Poll<io::Result<Self>>;
fn put_bytes(&mut self, bytes: &[u8]) -> std::io::Result<()>;
}
pub trait HyperAsyncWriter {
type Buffer;
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), (io::Error, Option<Self::Buffer>)>>;
}
pub trait HyperAsyncWrite {
type Buffer: HyperBuffer;
type Writer<'a>: HyperAsyncWriter
where
Self: 'a;
fn start_write<'a>(self: Pin<&'a mut Self>, buffer: Self::Buffer) -> Self::Writer<'a>;
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>>;
}
```
It's important that the `Buffer` type is once again tied to the IO type. This gives IO implementers access to the internals of the buffer type, and makes the `HyperBuffer` API describe how _hyper_ uses the buffer
This API forces the `HyperAsyncWrite` type to take ownership of the Buffer for the duration of the write. For `tokio::net::TcpStream` with `type Buffer = Vec<u8>;` this means building the IO type like this:
```rust
struct VecBuffer {
vec: Vec<u8>,
}
struct TcpStream {
io: tokio::net::TcpStream,
}
struct TcpStreamVecWriter<'a> {
written: usize,
buffer: Option<Vec<u8>>,
io: Pin<&'a mut TcpStream>,
}
impl HyperBuffer for VecBuffer {
fn poll_acquire(_: &mut Context<'_>) -> Poll<io::Result<Self>> {
Poll::Ready(Ok(VecBuffer {
vec: Vec::with_capacity(4096),
}))
}
fn put_bytes(&mut self, bytes: &[u8]) -> std::io::Result<()> {
self.vec.extend_from_slice(bytes);
Ok(())
}
}
impl<'a> HyperAsyncWriter for TcpStreamVecWriter<'a> {
type Buffer = VecBuffer;
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), (io::Error, Option<Self::Buffer>)>> {
let this = self.get_mut();
let to_write = this.buffer.as_ref().expect("Polled after completion");
match ready!(Pin::new(&mut this.io.io).poll_write(cx, &to_write[this.written..])) {
Ok(count) => {
this.written += count;
if this.written == to_write.len() {
this.buffer.take();
Poll::Ready(Ok(()))
} else {
Pin::new(this).poll_write(cx)
}
}
Err(e) => Poll::Ready(Err((e, this.buffer.take().map(|vec| VecBuffer { vec })))),
}
}
}
impl HyperAsyncWrite for TcpStream {
type Buffer = VecBuffer;
type Writer<'a> = TcpStreamVecWriter<'a>;
fn start_write<'a>(self: Pin<&'a mut Self>, buffer: Self::Buffer) -> Self::Writer<'a> {
TcpStreamVecWriter {
written: 0,
buffer: Some(buffer.vec),
io: self,
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
Pin::new(&mut self.get_mut().io).poll_flush(cx)
}
}
```
While I think this "would work". There's a few things I'm not happy with or unsure about.
Don't like:
- Introduces 2 additional traits to make writes work (HyperBuffer, HyperAsyncWriter)
- Cannot concurrently read and write (self is borrowed into the Writer)
Unsure about:
- API for HyperBuffer (poll_acquire doesn't have a `self` param)
- requiring `self: Pin<&mut Self>` for every method here
- start_write being infallible (this pushes submitting the buffer to the uring queue to the first `poll_write` call)
Perhaps an alternative to `HyperBuffer` could be a deliberate `BufferPool` trait that produces buffers from `self`
Thanks for considering my rambling
Edit: Here's an alternative write trait that does away with the Writer trait:
```rust
pub trait HyperAsyncWrite {
type Buffer: HyperBuffer;
type Writable;
fn start_write(self: Pin<&mut Self>, buffer: Self::Buffer) -> Self::Writable;
fn poll_write(
self: Pin<&mut Self>,
writable: Pin<&mut Self::Writable>,
cx: &mut Context<'_>,
) -> Poll<Result<(), (std::io::Error, Option<Self::Buffer>)>>;
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>>;
}
```
This enables concurrent reads and writes, since each call to `poll_write` only borrows the IO type for the call. Again, not sure what types should and should not be Pinned here. I have a hunch that `Writable` doesn't need Pin
I've opened #3230 which implements this. There are some remaining decisions to be made as part of the implementation, listed in the PR description.
Hey guys! I cannot understand why not just switch to the completion io style. I believe expressing readiness is much easier based on top of it, rather than vice versa.
Something like this:
```rust
trait Read {
async fn read<Buf: IoBuf>(&mut self, buf: Buf) -> (io::Result<usize>, Buf);
}
impl Read for tokio::net::TcpStream {
async fn read<Buf: IoBuf>(&mut self, mut buf: Buf) -> (io::Result<usize>, Buf) {
let result = tokio::io::AsyncReadExt::read(self, buf.borrow_it_somehow_as_mut_slice()).await;
(result, buf)
}
}
```
We will lose the ability to refer to stack-allocated buffers, but is it really a big deal? I believe that if you want to add support for io_uring in the future, you will need to limit the usage of these traits internally to the completion style anyway
You mentioned that you want to support io-uring eventually, so anyway you will need to provide the guarantees it require: pointer to the data must outlive the operation in kernel.
So that, even if allocating stack buffers is performance critical, you can unsafely implement `IoBuf` for `*mut u8` pointer to stack memory, which you can then pass to the io traits api. You guarantee that you do not deallocate memory until operation is completed, and async function stack is pinned somewhere, so the pointer stays valid.
It is unsound, because io trait impl may make `IoBuf` outlive the call, but you can make the trait `unsafe` and ask implementors not to do so
> Hey guys! I cannot understand why not just switch to the completion io style. I believe expressing readiness is much easier based on top of it, rather than vice versa.
>
> Something like this:
>
> ```rust
> trait Read {
> async fn read<Buf: IoBuf>(&mut self, buf: Buf) -> (io::Result<usize>, Buf);
> }
>
> impl Read for tokio::net::TcpStream {
> async fn read<Buf: IoBuf>(&mut self, mut buf: Buf) -> (io::Result<usize>, Buf) {
> let result = tokio::io::AsyncReadExt::read(self, buf.borrow_it_somehow_as_mut_slice()).await;
> (result, buf)
> }
> }
> ```
>
> We will lose the ability to refer to stack-allocated buffers, but is it really a big deal? I believe that if you want to add support for io_uring in the future, you will need to limit the usage of these traits internally to the completion style anyway
The issue here is predominantly memory usage. This approach requires you to waste a lot of memory, enough so that it can cause issues for some folks deploying hyper at scale. I've been trying to craft a solution that does not have these issues. We need a completion-based API, but not a naive one. This applies to uring as well. We need provided buffers to be supported and used.
> This approach requires you to waste a lot of memory
How exactly? I see that it doesn't allow to pass references to stack buffers in a safe way, but I don't see "predominantly memory usage". Does hyper depend on stack buffers so much?
> How exactly? I see that it doesn't allow to pass references to stack buffers in a safe way, but I don't see "predominantly memory usage". Does hyper depend on stack buffers so much?
In the trait you provided, users of hyper have to allocate a buffer for every connection to poll, whether in read-ready mode they only need one or equal to number of threads.
Io-uring's solution to this is provided buffer, by giving uring a list of buffers and let it allocate and deallocate.
It's also worth noting a similarity in how hyper can pick between `poll_write` and `poll_write_vectored` depending on if the IO says it can use it. With hyper owning these traits, when we determine how to expose `completion_write`, hyper can chose a different branch and handle the new strategy.
> In the trait you provided, users of hyper have to allocate a buffer for every connection to poll, whether in read-ready mode they only need one or equal to number of threads.
Then probably user managed buffer pools can address this issue?
> Then probably user managed buffer pools can address this issue?
Sorry I was confused about "user managed buffer pools", IMHO having hyper managed buffer pools would definitely solve this.
Or you can expose a interface similar to curl, where polling, reading and writing are all done by users.
I am not familiar with the curl interface. Is it in SANS I/O style?
@seanmonstar do you think it is theoretically possible to make hyper SANS I/O state machine?
like https://crates.io/crates/quinn-proto or https://crates.io/crates/str0m
it also will solve timers, and non-Send executor issues
> I am not familiar with the curl interface. Is it in SANS I/O style?
Sorry, this is actually what this issue proposes.
And yeah, I think having a user managed buffer would address the mem usage issue.
FWIW, I suppose it might be a sizable change at this point, but I do think sans-I/O designs are generally a good thing, and I've used them in several crates (other than quinn-proto) to good effect. That said, even if hyper came with a sans-I/O design it would likely also want to provide some I/O mechanisms (similar to the quinn-proto/quinn interface).
@NobodyXu
> this is actually what this issue proposes.
I meant more like a state machine approach
https://gist.github.com/VictorBulba/aad7632544e1fbd9c4c8f2cc9b558b6c
> I meant more like a state machine approach
Having a state machine approach is great, though I still think it would have to use `async` and have its own IO traits at some point.
Otherwise, you would basically have to implement what `async fn` does for you by hand.
Well with this, hyper can stay the same. Keep the interface as it is now. But abstract the internal implementation of it into the separate state machine crate, so that guys from monoio for example, could create more tight integration with their runtime and benefit from it.
> you would basically have to implement what async fn does for you by hand
I was inspired by <https://github.com/algesten/str0m> sans-io webrtc library, which I use in my project with my custom compute runtime and a single thread of tokio_uring. There is no need to write you own futures, use async functions from your runtime
Yeah it's certainly possible to do, though I'm not sure how difficult it is to do it with http.
Perhaps @seanmonstar can answer your question since they are the maintainer of this crate and the related crates?
I am just wondering how powerful it would be if combined with arena allocators (like bumpalo) for state machine methods
Given that provided buffers where mentioned so much, I think it is relevant to say that it has been superseded by ring mapped buffers in 5.19. There is little to no documentation about it ([this commit](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=c7fb19428d67dd0a2a78a4f237af01d39c78dc5a) and [this file](https://github.com/axboe/liburing/wiki/io_uring-and-inetworking-in-2023#provided-buffers) are the only ones as far as I know). Now, the naming seems to be a little ambiguous, it seems that provided buffers is sometimes used to refer to both ring mapped buffers and the legacy provided buffers, but I thought it was relevant.
> Given that provided buffers where mentioned so much, I think it is relevant to say that it has been superseded by ring mapped buffers in 5.19. There is little to no documentation about it ([this commit](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=c7fb19428d67dd0a2a78a4f237af01d39c78dc5a) and [this file](https://github.com/axboe/liburing/wiki/io_uring-and-inetworking-in-2023#provided-buffers) are the only ones as far as I know). Now, the naming seems to be a little ambiguous, it seems that provided buffers is sometimes used to refer to both ring mapped buffers and the legacy provided buffers, but I thought it was relevant.
My apologies, I used the term provided buffers to refer to both collectively when I spoke.
|
2023-05-17T21:38:21Z
| 3,230
|
hyper should use its own IO traits
This was actually brought up as [an unresolved question in the ROADMAP](https://github.com/hyperium/hyper/blob/master/docs/ROADMAP.md#should-there-be-hyperio-traits). I think I mentally dismissed it at some point as just being annoying, but in the past week as I've thought about how we could make use of io-uring in hyper, I've been thinking about this again. If nothing else, this _should_ be a public record of the decision, whether for or against. This will help others, including Future Us.
All other integration with a runtime (Tokio) has been removed, and helpers exist in `hyper-util`. hyper 1.0 (as of of rc.2) still depends on `tokio` with all features turned off, to ask that IO transports implement `tokio::io::{AsyncRead, AsyncWrite}`. By doing so, it makes it _easier_ for users to simply supply a `tokio::net::TcpStream`. But, let me at least bring up some downsides.
## Reasons for `hyper::io::{AsyncRead, AsyncWrite}`
- For people integrating with _other_ runtimes (curl, fuchsia, etc), it can feel odd to need to pull in `tokio` just to implement the IO traits hyper wants. People have expressed concern that they don't want to compile multiple runtimes. While we can explain it's just the traits (when the features are turned off), owning the traits could make people less confused.
- If we own the traits, we can decide to try different things that perhaps Tokio itself wouldn't want to do. This includes differences in opinion of how vectored reads and writes should be supported, but also means we could try to provide specialization paths for completion-based IO (such as io-uring).
We can provide `hyper_util::io::Tokio(T)` that implements the traits for Tokio types, to reduce friction.
|
hyperium__hyper-3230
|
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -4,8 +4,7 @@
extern crate test;
mod support;
-// TODO: Reimplement Opts::bench using hyper::server::conn and hyper::client::conn
-// (instead of Server and HttpClient).
+// TODO: Reimplement parallel for HTTP/1
use std::convert::Infallible;
use std::net::SocketAddr;
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -3,6 +3,8 @@
extern crate test;
+mod support;
+
use std::convert::Infallible;
use std::io::{Read, Write};
use std::net::{SocketAddr, TcpStream};
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -40,11 +42,12 @@ fn hello_world_16(b: &mut test::Bencher) {
rt.spawn(async move {
loop {
let (stream, _addr) = listener.accept().await.expect("accept");
+ let io = support::TokioIo::new(stream);
http1::Builder::new()
.pipeline_flush(true)
.serve_connection(
- stream,
+ io,
service_fn(|_| async {
Ok::<_, Infallible>(Response::new(Full::new(Bytes::from(
"Hello, World!",
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -3,6 +3,8 @@
extern crate test;
+mod support;
+
use std::io::{Read, Write};
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::sync::mpsc;
diff --git /dev/null b/src/common/io/compat.rs
new file mode 100644
--- /dev/null
+++ b/src/common/io/compat.rs
@@ -0,0 +1,150 @@
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+/// This adapts from `hyper` IO traits to the ones in Tokio.
+///
+/// This is currently used by `h2`, and by hyper internal unit tests.
+#[derive(Debug)]
+pub(crate) struct Compat<T>(pub(crate) T);
+
+pub(crate) fn compat<T>(io: T) -> Compat<T> {
+ Compat(io)
+}
+
+impl<T> Compat<T> {
+ fn p(self: Pin<&mut Self>) -> Pin<&mut T> {
+ // SAFETY: The simplest of projections. This is just
+ // a wrapper, we don't do anything that would undo the projection.
+ unsafe { self.map_unchecked_mut(|me| &mut me.0) }
+ }
+}
+
+impl<T> tokio::io::AsyncRead for Compat<T>
+where
+ T: crate::rt::Read,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ tbuf: &mut tokio::io::ReadBuf<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ let init = tbuf.initialized().len();
+ let filled = tbuf.filled().len();
+ let (new_init, new_filled) = unsafe {
+ let mut buf = crate::rt::ReadBuf::uninit(tbuf.inner_mut());
+ buf.set_init(init);
+ buf.set_filled(filled);
+
+ match crate::rt::Read::poll_read(self.p(), cx, buf.unfilled()) {
+ Poll::Ready(Ok(())) => (buf.init_len(), buf.len()),
+ other => return other,
+ }
+ };
+
+ let n_init = new_init - init;
+ unsafe {
+ tbuf.assume_init(n_init);
+ tbuf.set_filled(new_filled);
+ }
+
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<T> tokio::io::AsyncWrite for Compat<T>
+where
+ T: crate::rt::Write,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ crate::rt::Write::poll_write(self.p(), cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
+ crate::rt::Write::poll_flush(self.p(), cx)
+ }
+
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ crate::rt::Write::poll_shutdown(self.p(), cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ crate::rt::Write::is_write_vectored(&self.0)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ crate::rt::Write::poll_write_vectored(self.p(), cx, bufs)
+ }
+}
+
+#[cfg(test)]
+impl<T> crate::rt::Read for Compat<T>
+where
+ T: tokio::io::AsyncRead,
+{
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ mut buf: crate::rt::ReadBufCursor<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ let n = unsafe {
+ let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut());
+ match tokio::io::AsyncRead::poll_read(self.p(), cx, &mut tbuf) {
+ Poll::Ready(Ok(())) => tbuf.filled().len(),
+ other => return other,
+ }
+ };
+
+ unsafe {
+ buf.advance(n);
+ }
+ Poll::Ready(Ok(()))
+ }
+}
+
+#[cfg(test)]
+impl<T> crate::rt::Write for Compat<T>
+where
+ T: tokio::io::AsyncWrite,
+{
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ tokio::io::AsyncWrite::poll_write(self.p(), cx, buf)
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
+ tokio::io::AsyncWrite::poll_flush(self.p(), cx)
+ }
+
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>> {
+ tokio::io::AsyncWrite::poll_shutdown(self.p(), cx)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ tokio::io::AsyncWrite::is_write_vectored(&self.0)
+ }
+
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ tokio::io::AsyncWrite::poll_write_vectored(self.p(), cx, bufs)
+ }
+}
diff --git a/src/common/io/mod.rs b/src/common/io/mod.rs
--- a/src/common/io/mod.rs
+++ b/src/common/io/mod.rs
@@ -1,3 +1,7 @@
+#[cfg(any(feature = "http2", test))]
+mod compat;
mod rewind;
+#[cfg(any(feature = "http2", test))]
+pub(crate) use self::compat::{compat, Compat};
pub(crate) use self::rewind::Rewind;
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -109,6 +109,7 @@ where
mod tests {
// FIXME: re-implement tests with `async/await`, this import should
// trigger a warning to remind us
+ use super::super::compat;
use super::Rewind;
use bytes::Bytes;
use tokio::io::AsyncReadExt;
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -120,14 +121,14 @@ mod tests {
let mock = tokio_test::io::Builder::new().read(&underlying).build();
- let mut stream = Rewind::new(mock);
+ let mut stream = compat(Rewind::new(compat(mock)));
// Read off some bytes, ensure we filled o1
let mut buf = [0; 2];
stream.read_exact(&mut buf).await.expect("read1");
// Rewind the stream so that it is as if we never read in the first place.
- stream.rewind(Bytes::copy_from_slice(&buf[..]));
+ stream.0.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -143,13 +144,13 @@ mod tests {
let mock = tokio_test::io::Builder::new().read(&underlying).build();
- let mut stream = Rewind::new(mock);
+ let mut stream = compat(Rewind::new(compat(mock)));
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
// Rewind the stream so that it is as if we never read in the first place.
- stream.rewind(Bytes::copy_from_slice(&buf[..]));
+ stream.0.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1044,12 +1044,13 @@ mod tests {
#[bench]
fn bench_read_head_short(b: &mut ::test::Bencher) {
use super::*;
+ use crate::common::io::Compat;
let s = b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n";
let len = s.len();
b.bytes = len as u64;
// an empty IO, we'll be skipping and using the read buffer anyways
- let io = tokio_test::io::Builder::new().build();
+ let io = Compat(tokio_test::io::Builder::new().build());
let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io);
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -428,9 +428,9 @@ impl StdError for IncompleteBody {}
#[cfg(test)]
mod tests {
use super::*;
+ use crate::rt::{Read, ReadBuf};
use std::pin::Pin;
use std::time::Duration;
- use tokio::io::{AsyncRead, ReadBuf};
impl<'a> MemRead for &'a [u8] {
fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -446,11 +446,11 @@ mod tests {
}
}
- impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) {
+ impl<'a> MemRead for &'a mut (dyn Read + Unpin) {
fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
let mut v = vec![0; len];
let mut buf = ReadBuf::new(&mut v);
- ready!(Pin::new(self).poll_read(cx, &mut buf)?);
+ ready!(Pin::new(self).poll_read(cx, buf.unfilled())?);
Poll::Ready(Ok(Bytes::copy_from_slice(&buf.filled())))
}
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -629,7 +629,7 @@ mod tests {
async fn read_async(mut decoder: Decoder, content: &[u8], block_at: usize) -> String {
let mut outs = Vec::new();
- let mut ins = if block_at == 0 {
+ let mut ins = crate::common::io::compat(if block_at == 0 {
tokio_test::io::Builder::new()
.wait(Duration::from_millis(10))
.read(content)
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -640,9 +640,9 @@ mod tests {
.wait(Duration::from_millis(10))
.read(&content[block_at..])
.build()
- };
+ });
- let mut ins = &mut ins as &mut (dyn AsyncRead + Unpin);
+ let mut ins = &mut ins as &mut (dyn Read + Unpin);
loop {
let buf = decoder
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -664,6 +664,7 @@ cfg_client! {
#[cfg(test)]
mod tests {
use super::*;
+ use crate::common::io::compat;
use crate::proto::h1::ClientTransaction;
use std::time::Duration;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -677,7 +678,7 @@ mod tests {
// Block at 0 for now, but we will release this response before
// the request is ready to write later...
let (mut tx, rx) = crate::client::dispatch::channel();
- let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(compat(io));
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
// First poll is needed to allow tx to send...
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -714,7 +715,7 @@ mod tests {
.build_with_handle();
let (mut tx, rx) = crate::client::dispatch::channel();
- let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
+ let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(compat(io));
conn.set_write_strategy_queue();
let dispatcher = Dispatcher::new(Client::new(rx), conn);
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -745,7 +746,7 @@ mod tests {
.build();
let (mut tx, rx) = crate::client::dispatch::channel();
- let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(compat(io));
let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn));
// First poll is needed to allow tx to send...
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -662,6 +662,7 @@ enum WriteStrategy {
#[cfg(test)]
mod tests {
+ use crate::common::io::compat;
use crate::common::time::Time;
use super::*;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -717,7 +718,7 @@ mod tests {
.wait(Duration::from_secs(1))
.build();
- let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(compat(mock));
// We expect a `parse` to be not ready, and so can't await it directly.
// Rather, this `poll_fn` will wrap the `Poll` result.
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -862,7 +863,7 @@ mod tests {
#[cfg(debug_assertions)] // needs to trigger a debug_assert
fn write_buf_requires_non_empty_bufs() {
let mock = Mock::new().build();
- let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(compat(mock));
buffered.buffer(Cursor::new(Vec::new()));
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -897,7 +898,7 @@ mod tests {
let mock = Mock::new().write(b"hello world, it's hyper!").build();
- let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(compat(mock));
buffered.write_buf.set_strategy(WriteStrategy::Flatten);
buffered.headers_buf().extend(b"hello ");
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -956,7 +957,7 @@ mod tests {
.write(b"hyper!")
.build();
- let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
+ let mut buffered = Buffered::<_, Cursor<Vec<u8>>>::new(compat(mock));
buffered.write_buf.set_strategy(WriteStrategy::Queue);
// we have 4 buffers, and vec IO disabled, but explicitly said
diff --git /dev/null b/src/rt/io.rs
new file mode 100644
--- /dev/null
+++ b/src/rt/io.rs
@@ -0,0 +1,334 @@
+use std::fmt;
+use std::mem::MaybeUninit;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+// New IO traits? What?! Why, are you bonkers?
+//
+// I mean, yes, probably. But, here's the goals:
+//
+// 1. Supports poll-based IO operations.
+// 2. Opt-in vectored IO.
+// 3. Can use an optional buffer pool.
+// 4. Able to add completion-based (uring) IO eventually.
+//
+// Frankly, the last point is the entire reason we're doing this. We want to
+// have forwards-compatibility with an eventually stable io-uring runtime. We
+// don't need that to work right away. But it must be possible to add in here
+// without breaking hyper 1.0.
+//
+// While in here, if there's small tweaks to poll_read or poll_write that would
+// allow even the "slow" path to be faster, such as if someone didn't remember
+// to forward along an `is_completion` call.
+
+/// Reads bytes from a source.
+///
+/// This trait is similar to `std::io::Read`, but supports asynchronous reads.
+pub trait Read {
+ /// Attempts to read bytes into the `buf`.
+ ///
+ /// On success, returns `Poll::Ready(Ok(()))` and places data in the
+ /// unfilled portion of `buf`. If no data was read (`buf.remaining()` is
+ /// unchanged), it implies that EOF has been reached.
+ ///
+ /// If no data is available for reading, the method returns `Poll::Pending`
+ /// and arranges for the current task (via `cx.waker()`) to receive a
+ /// notification when the object becomes readable or is closed.
+ fn poll_read(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: ReadBufCursor<'_>,
+ ) -> Poll<Result<(), std::io::Error>>;
+}
+
+/// Write bytes asynchronously.
+///
+/// This trait is similar to `std::io::Write`, but for asynchronous writes.
+pub trait Write {
+ /// Attempt to write bytes from `buf` into the destination.
+ ///
+ /// On success, returns `Poll::Ready(Ok(num_bytes_written)))`. If
+ /// successful, it must be guaranteed that `n <= buf.len()`. A return value
+ /// of `0` means that the underlying object is no longer able to accept
+ /// bytes, or that the provided buffer is empty.
+ ///
+ /// If the object is not ready for writing, the method returns
+ /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to
+ /// receive a notification when the object becomes writable or is closed.
+ fn poll_write(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, std::io::Error>>;
+
+ /// Attempts to flush the object.
+ ///
+ /// On success, returns `Poll::Ready(Ok(()))`.
+ ///
+ /// If flushing cannot immediately complete, this method returns
+ /// `Poll::Pending` and arranges for the current task (via `cx.waker()`) to
+ /// receive a notification when the object can make progress.
+ fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>>;
+
+ /// Attempts to shut down this writer.
+ fn poll_shutdown(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<Result<(), std::io::Error>>;
+
+ /// Returns whether this writer has an efficient `poll_write_vectored`
+ /// implementation.
+ ///
+ /// The default implementation returns `false`.
+ fn is_write_vectored(&self) -> bool {
+ false
+ }
+
+ /// Like `poll_write`, except that it writes from a slice of buffers.
+ fn poll_write_vectored(
+ self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<Result<usize, std::io::Error>> {
+ let buf = bufs
+ .iter()
+ .find(|b| !b.is_empty())
+ .map_or(&[][..], |b| &**b);
+ self.poll_write(cx, buf)
+ }
+}
+
+/// A wrapper around a byte buffer that is incrementally filled and initialized.
+///
+/// This type is a sort of "double cursor". It tracks three regions in the
+/// buffer: a region at the beginning of the buffer that has been logically
+/// filled with data, a region that has been initialized at some point but not
+/// yet logically filled, and a region at the end that may be uninitialized.
+/// The filled region is guaranteed to be a subset of the initialized region.
+///
+/// In summary, the contents of the buffer can be visualized as:
+///
+/// ```not_rust
+/// [ capacity ]
+/// [ filled | unfilled ]
+/// [ initialized | uninitialized ]
+/// ```
+///
+/// It is undefined behavior to de-initialize any bytes from the uninitialized
+/// region, since it is merely unknown whether this region is uninitialized or
+/// not, and if part of it turns out to be initialized, it must stay initialized.
+pub struct ReadBuf<'a> {
+ raw: &'a mut [MaybeUninit<u8>],
+ filled: usize,
+ init: usize,
+}
+
+/// The cursor part of a [`ReadBuf`].
+///
+/// This is created by calling `ReadBuf::unfilled()`.
+#[derive(Debug)]
+pub struct ReadBufCursor<'a> {
+ buf: &'a mut ReadBuf<'a>,
+}
+
+impl<'data> ReadBuf<'data> {
+ #[inline]
+ #[cfg(test)]
+ pub(crate) fn new(raw: &'data mut [u8]) -> Self {
+ let len = raw.len();
+ Self {
+ // SAFETY: We never de-init the bytes ourselves.
+ raw: unsafe { &mut *(raw as *mut [u8] as *mut [MaybeUninit<u8>]) },
+ filled: 0,
+ init: len,
+ }
+ }
+
+ /// Create a new `ReadBuf` with a slice of uninitialized bytes.
+ #[inline]
+ pub fn uninit(raw: &'data mut [MaybeUninit<u8>]) -> Self {
+ Self {
+ raw,
+ filled: 0,
+ init: 0,
+ }
+ }
+
+ /// Get a slice of the buffer that has been filled in with bytes.
+ #[inline]
+ pub fn filled(&self) -> &[u8] {
+ // SAFETY: We only slice the filled part of the buffer, which is always valid
+ unsafe { &*(&self.raw[0..self.filled] as *const [MaybeUninit<u8>] as *const [u8]) }
+ }
+
+ /// Get a cursor to the unfilled portion of the buffer.
+ #[inline]
+ pub fn unfilled<'cursor>(&'cursor mut self) -> ReadBufCursor<'cursor> {
+ ReadBufCursor {
+ // SAFETY: self.buf is never re-assigned, so its safe to narrow
+ // the lifetime.
+ buf: unsafe {
+ std::mem::transmute::<&'cursor mut ReadBuf<'data>, &'cursor mut ReadBuf<'cursor>>(
+ self,
+ )
+ },
+ }
+ }
+
+ #[inline]
+ pub(crate) unsafe fn set_init(&mut self, n: usize) {
+ self.init = self.init.max(n);
+ }
+
+ #[inline]
+ pub(crate) unsafe fn set_filled(&mut self, n: usize) {
+ self.filled = self.filled.max(n);
+ }
+
+ #[inline]
+ pub(crate) fn len(&self) -> usize {
+ self.filled
+ }
+
+ #[inline]
+ pub(crate) fn init_len(&self) -> usize {
+ self.init
+ }
+
+ #[inline]
+ fn remaining(&self) -> usize {
+ self.capacity() - self.filled
+ }
+
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.raw.len()
+ }
+}
+
+impl<'data> fmt::Debug for ReadBuf<'data> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ReadBuf")
+ .field("filled", &self.filled)
+ .field("init", &self.init)
+ .field("capacity", &self.capacity())
+ .finish()
+ }
+}
+
+impl<'data> ReadBufCursor<'data> {
+ /// Access the unfilled part of the buffer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must not uninitialize any bytes that may have been
+ /// initialized before.
+ #[inline]
+ pub unsafe fn as_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ &mut self.buf.raw[self.buf.filled..]
+ }
+
+ /// Advance the `filled` cursor by `n` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The caller must take care that `n` more bytes have been initialized.
+ #[inline]
+ pub unsafe fn advance(&mut self, n: usize) {
+ self.buf.filled = self.buf.filled.checked_add(n).expect("overflow");
+ self.buf.init = self.buf.filled.max(self.buf.init);
+ }
+
+ #[inline]
+ pub(crate) fn remaining(&self) -> usize {
+ self.buf.remaining()
+ }
+
+ #[inline]
+ pub(crate) fn put_slice(&mut self, buf: &[u8]) {
+ assert!(
+ self.buf.remaining() >= buf.len(),
+ "buf.len() must fit in remaining()"
+ );
+
+ let amt = buf.len();
+ // Cannot overflow, asserted above
+ let end = self.buf.filled + amt;
+
+ // Safety: the length is asserted above
+ unsafe {
+ self.buf.raw[self.buf.filled..end]
+ .as_mut_ptr()
+ .cast::<u8>()
+ .copy_from_nonoverlapping(buf.as_ptr(), amt);
+ }
+
+ if self.buf.init < end {
+ self.buf.init = end;
+ }
+ self.buf.filled = end;
+ }
+}
+
+macro_rules! deref_async_read {
+ () => {
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: ReadBufCursor<'_>,
+ ) -> Poll<std::io::Result<()>> {
+ Pin::new(&mut **self).poll_read(cx, buf)
+ }
+ };
+}
+
+impl<T: ?Sized + Read + Unpin> Read for Box<T> {
+ deref_async_read!();
+}
+
+impl<T: ?Sized + Read + Unpin> Read for &mut T {
+ deref_async_read!();
+}
+
+macro_rules! deref_async_write {
+ () => {
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<std::io::Result<usize>> {
+ Pin::new(&mut **self).poll_write(cx, buf)
+ }
+
+ fn poll_write_vectored(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ bufs: &[std::io::IoSlice<'_>],
+ ) -> Poll<std::io::Result<usize>> {
+ Pin::new(&mut **self).poll_write_vectored(cx, bufs)
+ }
+
+ fn is_write_vectored(&self) -> bool {
+ (**self).is_write_vectored()
+ }
+
+ fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
+ Pin::new(&mut **self).poll_flush(cx)
+ }
+
+ fn poll_shutdown(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ ) -> Poll<std::io::Result<()>> {
+ Pin::new(&mut **self).poll_shutdown(cx)
+ }
+ };
+}
+
+impl<T: ?Sized + Write + Unpin> Write for Box<T> {
+ deref_async_write!();
+}
+
+impl<T: ?Sized + Write + Unpin> Write for &mut T {
+ deref_async_write!();
+}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -122,7 +122,7 @@ impl Upgraded {
#[cfg(any(feature = "http1", feature = "http2", test))]
pub(super) fn new<T>(io: T, read_buf: Bytes) -> Self
where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ T: Read + Write + Unpin + Send + 'static,
{
Upgraded {
io: Rewind::new_buffered(Box::new(io), read_buf),
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -340,7 +340,9 @@ mod tests {
fn upgraded_downcast() {
let upgraded = Upgraded::new(Mock, Bytes::new());
- let upgraded = upgraded.downcast::<std::io::Cursor<Vec<u8>>>().unwrap_err();
+ let upgraded = upgraded
+ .downcast::<crate::common::io::Compat<std::io::Cursor<Vec<u8>>>>()
+ .unwrap_err();
upgraded.downcast::<Mock>().unwrap();
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -348,17 +350,17 @@ mod tests {
// TODO: replace with tokio_test::io when it can test write_buf
struct Mock;
- impl AsyncRead for Mock {
+ impl Read for Mock {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut task::Context<'_>,
- _buf: &mut ReadBuf<'_>,
+ _buf: ReadBufCursor<'_>,
) -> Poll<io::Result<()>> {
unreachable!("Mock::poll_read")
}
}
- impl AsyncWrite for Mock {
+ impl Write for Mock {
fn poll_write(
self: Pin<&mut Self>,
_: &mut task::Context<'_>,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -22,6 +22,7 @@ use hyper::{Method, Request, StatusCode, Uri, Version};
use bytes::Bytes;
use futures_channel::oneshot;
use futures_util::future::{self, FutureExt, TryFuture, TryFutureExt};
+use support::TokioIo;
use tokio::net::TcpStream;
mod support;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -36,8 +37,8 @@ where
b.collect().await.map(|c| c.to_bytes())
}
-fn tcp_connect(addr: &SocketAddr) -> impl Future<Output = std::io::Result<TcpStream>> {
- TcpStream::connect(*addr)
+async fn tcp_connect(addr: &SocketAddr) -> std::io::Result<TokioIo<TcpStream>> {
+ TcpStream::connect(*addr).await.map(TokioIo::new)
}
struct HttpInfo {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -312,7 +313,7 @@ macro_rules! test {
req.headers_mut().append("Host", HeaderValue::from_str(&host).unwrap());
}
- let (mut sender, conn) = builder.handshake(stream).await?;
+ let (mut sender, conn) = builder.handshake(TokioIo::new(stream)).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1339,7 +1340,7 @@ mod conn {
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
use http_body_util::{BodyExt, Empty, StreamBody};
use hyper::rt::Timer;
- use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
+ use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
use hyper::body::{Body, Frame};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1349,7 +1350,7 @@ mod conn {
use super::{concat, s, support, tcp_connect, FutureHyperExt};
- use support::{TokioExecutor, TokioTimer};
+ use support::{TokioExecutor, TokioIo, TokioTimer};
fn setup_logger() {
let _ = pretty_env_logger::try_init();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1773,7 +1774,7 @@ mod conn {
}
let parts = conn.into_parts();
- let mut io = parts.io;
+ let io = parts.io;
let buf = parts.read_buf;
assert_eq!(buf, b"foobar=ready"[..]);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1785,6 +1786,7 @@ mod conn {
}))
.unwrap_err();
+ let mut io = io.tcp.inner();
let mut vec = vec![];
rt.block_on(io.write_all(b"foo=bar")).unwrap();
rt.block_on(io.read_to_end(&mut vec)).unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1861,7 +1863,7 @@ mod conn {
}
let parts = conn.into_parts();
- let mut io = parts.io;
+ let io = parts.io;
let buf = parts.read_buf;
assert_eq!(buf, b"foobar=ready"[..]);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1874,6 +1876,7 @@ mod conn {
}))
.unwrap_err();
+ let mut io = io.tcp.inner();
let mut vec = vec![];
rt.block_on(io.write_all(b"foo=bar")).unwrap();
rt.block_on(io.read_to_end(&mut vec)).unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1895,6 +1898,7 @@ mod conn {
tokio::select! {
res = listener.accept() => {
let (stream, _) = res.unwrap();
+ let stream = TokioIo::new(stream);
let service = service_fn(|_:Request<hyper::body::Incoming>| future::ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new())));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2077,7 +2081,7 @@ mod conn {
// Spawn an HTTP2 server that reads the whole body and responds
tokio::spawn(async move {
- let sock = listener.accept().await.unwrap().0;
+ let sock = TokioIo::new(listener.accept().await.unwrap().0);
hyper::server::conn::http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
.serve_connection(
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2166,7 +2170,7 @@ mod conn {
let res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), StatusCode::OK);
- let mut upgraded = hyper::upgrade::on(res).await.unwrap();
+ let mut upgraded = TokioIo::new(hyper::upgrade::on(res).await.unwrap());
let mut vec = vec![];
upgraded.read_to_end(&mut vec).await.unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2264,7 +2268,7 @@ mod conn {
);
}
- async fn drain_til_eof<T: AsyncRead + Unpin>(mut sock: T) -> io::Result<()> {
+ async fn drain_til_eof<T: tokio::io::AsyncRead + Unpin>(mut sock: T) -> io::Result<()> {
let mut buf = [0u8; 1024];
loop {
let n = sock.read(&mut buf).await?;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2276,11 +2280,11 @@ mod conn {
}
struct DebugStream {
- tcp: TcpStream,
+ tcp: TokioIo<TcpStream>,
shutdown_called: bool,
}
- impl AsyncWrite for DebugStream {
+ impl hyper::rt::Write for DebugStream {
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2305,11 +2309,11 @@ mod conn {
}
}
- impl AsyncRead for DebugStream {
+ impl hyper::rt::Read for DebugStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- buf: &mut ReadBuf<'_>,
+ buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.tcp).poll_read(cx, buf)
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -22,8 +22,8 @@ use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full, StreamBody};
use hyper::rt::Timer;
-use support::{TokioExecutor, TokioTimer};
-use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
+use hyper::rt::{Read as AsyncRead, Write as AsyncWrite};
+use support::{TokioExecutor, TokioIo, TokioTimer};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -975,6 +975,7 @@ async fn expect_continue_waits_for_body_poll() {
});
let (socket, _) = listener.accept().await.expect("accept");
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1154,6 +1155,7 @@ async fn disable_keep_alive_mid_request() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
let srv = http1::Builder::new().serve_connection(socket, HelloWorld);
future::try_select(srv, rx1)
.then(|r| match r {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1201,7 +1203,7 @@ async fn disable_keep_alive_post_request() {
let dropped2 = dropped.clone();
let (socket, _) = listener.accept().await.unwrap();
let transport = DebugStream {
- stream: socket,
+ stream: TokioIo::new(socket),
_debug: dropped2,
};
let server = http1::Builder::new().serve_connection(transport, HelloWorld);
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1229,6 +1231,7 @@ async fn empty_parse_eof_does_not_return_error() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1245,6 +1248,7 @@ async fn nonempty_parse_eof_returns_error() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1268,6 +1272,7 @@ async fn http1_allow_half_close() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.half_close(true)
.serve_connection(
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1295,6 +1300,7 @@ async fn disconnect_after_reading_request_before_responding() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.half_close(false)
.serve_connection(
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1326,6 +1332,7 @@ async fn returning_1xx_response_is_error() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(
socket,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1390,6 +1397,7 @@ async fn header_read_timeout_slow_writes() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
let conn = http1::Builder::new()
.timer(TokioTimer)
.header_read_timeout(Duration::from_secs(5))
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1465,6 +1473,7 @@ async fn header_read_timeout_slow_writes_multiple_requests() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
let conn = http1::Builder::new()
.timer(TokioTimer)
.header_read_timeout(Duration::from_secs(5))
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1511,6 +1520,7 @@ async fn upgrades() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
let conn = http1::Builder::new().serve_connection(
socket,
service_fn(|_| {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1529,7 +1539,7 @@ async fn upgrades() {
// wait so that we don't write until other side saw 101 response
rx.await.unwrap();
- let mut io = parts.io;
+ let mut io = parts.io.inner();
io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
io.read_to_end(&mut vec).await.unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1564,6 +1574,7 @@ async fn http_connect() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
let conn = http1::Builder::new().serve_connection(
socket,
service_fn(|_| {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1581,7 +1592,7 @@ async fn http_connect() {
// wait so that we don't write until other side saw 101 response
rx.await.unwrap();
- let mut io = parts.io;
+ let mut io = parts.io.inner();
io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
io.read_to_end(&mut vec).await.unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1634,6 +1645,7 @@ async fn upgrades_new() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(socket, svc)
.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1646,10 +1658,10 @@ async fn upgrades_new() {
read_101_rx.await.unwrap();
let upgraded = on_upgrade.await.expect("on_upgrade");
- let parts = upgraded.downcast::<TkTcpStream>().unwrap();
+ let parts = upgraded.downcast::<TokioIo<TkTcpStream>>().unwrap();
assert_eq!(parts.read_buf, "eagerly optimistic");
- let mut io = parts.io;
+ let mut io = parts.io.inner();
io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
io.read_to_end(&mut vec).await.unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1668,6 +1680,7 @@ async fn upgrades_ignored() {
loop {
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
tokio::task::spawn(async move {
http1::Builder::new()
.serve_connection(socket, svc)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1738,6 +1751,7 @@ async fn http_connect_new() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(socket, svc)
.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1750,10 +1764,10 @@ async fn http_connect_new() {
read_200_rx.await.unwrap();
let upgraded = on_upgrade.await.expect("on_upgrade");
- let parts = upgraded.downcast::<TkTcpStream>().unwrap();
+ let parts = upgraded.downcast::<TokioIo<TkTcpStream>>().unwrap();
assert_eq!(parts.read_buf, "eagerly optimistic");
- let mut io = parts.io;
+ let mut io = parts.io.inner();
io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
io.read_to_end(&mut vec).await.unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1799,7 +1813,7 @@ async fn h2_connect() {
let on_upgrade = hyper::upgrade::on(req);
tokio::spawn(async move {
- let mut upgraded = on_upgrade.await.expect("on_upgrade");
+ let mut upgraded = TokioIo::new(on_upgrade.await.expect("on_upgrade"));
upgraded.write_all(b"Bread?").await.unwrap();
let mut vec = vec![];
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1818,6 +1832,7 @@ async fn h2_connect() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
//.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1891,7 +1906,7 @@ async fn h2_connect_multiplex() {
assert!(upgrade_res.expect_err("upgrade cancelled").is_canceled());
return;
}
- let mut upgraded = upgrade_res.expect("upgrade successful");
+ let mut upgraded = TokioIo::new(upgrade_res.expect("upgrade successful"));
upgraded.write_all(b"Bread?").await.unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1927,6 +1942,7 @@ async fn h2_connect_multiplex() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
//.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1978,7 +1994,7 @@ async fn h2_connect_large_body() {
let on_upgrade = hyper::upgrade::on(req);
tokio::spawn(async move {
- let mut upgraded = on_upgrade.await.expect("on_upgrade");
+ let mut upgraded = TokioIo::new(on_upgrade.await.expect("on_upgrade"));
upgraded.write_all(b"Bread?").await.unwrap();
let mut vec = vec![];
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1999,6 +2015,7 @@ async fn h2_connect_large_body() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
//.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2049,7 +2066,7 @@ async fn h2_connect_empty_frames() {
let on_upgrade = hyper::upgrade::on(req);
tokio::spawn(async move {
- let mut upgraded = on_upgrade.await.expect("on_upgrade");
+ let mut upgraded = TokioIo::new(on_upgrade.await.expect("on_upgrade"));
upgraded.write_all(b"Bread?").await.unwrap();
let mut vec = vec![];
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2068,6 +2085,7 @@ async fn h2_connect_empty_frames() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
//.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2090,6 +2108,7 @@ async fn parse_errors_send_4xx_response() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2112,6 +2131,7 @@ async fn illegal_request_length_returns_400_response() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2152,6 +2172,7 @@ async fn max_buf_size() {
});
let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
http1::Builder::new()
.max_buf_size(MAX)
.serve_connection(socket, HelloWorld)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2166,6 +2187,7 @@ async fn graceful_shutdown_before_first_request_no_block() {
tokio::spawn(async move {
let socket = listener.accept().await.unwrap().0;
+ let socket = TokioIo::new(socket);
let future = http1::Builder::new().serve_connection(socket, HelloWorld);
pin!(future);
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2407,6 +2429,7 @@ async fn http2_keep_alive_detects_unresponsive_client() {
});
let (socket, _) = listener.accept().await.expect("accept");
+ let socket = TokioIo::new(socket);
let err = http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2425,6 +2448,7 @@ async fn http2_keep_alive_with_responsive_client() {
tokio::spawn(async move {
let (socket, _) = listener.accept().await.expect("accept");
+ let socket = TokioIo::new(socket);
http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2435,7 +2459,7 @@ async fn http2_keep_alive_with_responsive_client() {
.expect("serve_connection");
});
- let tcp = connect_async(addr).await;
+ let tcp = TokioIo::new(connect_async(addr).await);
let (mut client, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor)
.handshake(tcp)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2488,6 +2512,7 @@ async fn http2_keep_alive_count_server_pings() {
tokio::spawn(async move {
let (socket, _) = listener.accept().await.expect("accept");
+ let socket = TokioIo::new(socket);
http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2871,6 +2896,7 @@ impl ServeOptions {
tokio::select! {
res = listener.accept() => {
let (stream, _) = res.unwrap();
+ let stream = TokioIo::new(stream);
tokio::task::spawn(async move {
let msg_tx = msg_tx.clone();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2922,7 +2948,7 @@ fn has_header(msg: &str, name: &str) -> bool {
msg[..n].contains(name)
}
-fn tcp_bind(addr: &SocketAddr) -> ::tokio::io::Result<TcpListener> {
+fn tcp_bind(addr: &SocketAddr) -> std::io::Result<TcpListener> {
let std_listener = StdTcpListener::bind(addr).unwrap();
std_listener.set_nonblocking(true).unwrap();
TcpListener::from_std(std_listener)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3001,7 +3027,7 @@ impl<T: AsyncRead + Unpin, D: Unpin> AsyncRead for DebugStream<T, D> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- buf: &mut ReadBuf<'_>,
+ buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.stream).poll_read(cx, buf)
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3058,9 +3084,11 @@ impl TestClient {
let host = req.uri().host().expect("uri has no host");
let port = req.uri().port_u16().expect("uri has no port");
- let stream = TkTcpStream::connect(format!("{}:{}", host, port))
- .await
- .unwrap();
+ let stream = TokioIo::new(
+ TkTcpStream::connect(format!("{}:{}", host, port))
+ .await
+ .unwrap(),
+ );
if self.http2_only {
let (mut sender, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor)
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -21,7 +21,7 @@ pub use hyper::{HeaderMap, StatusCode};
pub use std::net::SocketAddr;
mod tokiort;
-pub use tokiort::{TokioExecutor, TokioTimer};
+pub use tokiort::{TokioExecutor, TokioIo, TokioTimer};
#[allow(unused_macros)]
macro_rules! t {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -357,6 +357,7 @@ async fn async_test(cfg: __TestConfig) {
loop {
let (stream, _) = listener.accept().await.expect("server error");
+ let io = TokioIo::new(stream);
// Move a clone into the service_fn
let serve_handles = serve_handles.clone();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -386,12 +387,12 @@ async fn async_test(cfg: __TestConfig) {
tokio::task::spawn(async move {
if http2_only {
server::conn::http2::Builder::new(TokioExecutor)
- .serve_connection(stream, service)
+ .serve_connection(io, service)
.await
.expect("server error");
} else {
server::conn::http1::Builder::new()
- .serve_connection(stream, service)
+ .serve_connection(io, service)
.await
.expect("server error");
}
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -425,10 +426,11 @@ async fn async_test(cfg: __TestConfig) {
async move {
let stream = TcpStream::connect(addr).await.unwrap();
+ let io = TokioIo::new(stream);
let res = if http2_only {
let (mut sender, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor)
- .handshake(stream)
+ .handshake(io)
.await
.unwrap();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -440,7 +442,7 @@ async fn async_test(cfg: __TestConfig) {
sender.send_request(req).await.unwrap()
} else {
let (mut sender, conn) = hyper::client::conn::http1::Builder::new()
- .handshake(stream)
+ .handshake(io)
.await
.unwrap();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -508,6 +510,7 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
loop {
let (stream, _) = listener.accept().await.unwrap();
+ let io = TokioIo::new(stream);
let service = service_fn(move |mut req| {
async move {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -523,11 +526,12 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
let stream = TcpStream::connect(format!("{}:{}", uri, port))
.await
.unwrap();
+ let io = TokioIo::new(stream);
let resp = if http2_only {
let (mut sender, conn) =
hyper::client::conn::http2::Builder::new(TokioExecutor)
- .handshake(stream)
+ .handshake(io)
.await
.unwrap();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -540,7 +544,7 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
sender.send_request(req).await?
} else {
let builder = hyper::client::conn::http1::Builder::new();
- let (mut sender, conn) = builder.handshake(stream).await.unwrap();
+ let (mut sender, conn) = builder.handshake(io).await.unwrap();
tokio::task::spawn(async move {
if let Err(err) = conn.await {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -569,12 +573,12 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
if http2_only {
server::conn::http2::Builder::new(TokioExecutor)
- .serve_connection(stream, service)
+ .serve_connection(io, service)
.await
.unwrap();
} else {
server::conn::http1::Builder::new()
- .serve_connection(stream, service)
+ .serve_connection(io, service)
.await
.unwrap();
}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3178"
] |
1.3
|
f9aa697ec371d36c6d553c988d84b6f709e63362
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1,10 +1,12 @@
use std::fmt;
+#[cfg(feature = "server")]
+use std::future::Future;
use std::io;
use std::marker::{PhantomData, Unpin};
use std::pin::Pin;
use std::task::{Context, Poll};
#[cfg(feature = "server")]
-use std::time::Duration;
+use std::time::{Duration, Instant};
use crate::rt::{Read, Write};
use bytes::{Buf, Bytes};
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -209,33 +211,67 @@ where
debug_assert!(self.can_read_head());
trace!("Conn::read_head");
- let msg = match ready!(self.io.parse::<T>(
+ #[cfg(feature = "server")]
+ if !self.state.h1_header_read_timeout_running {
+ if let Some(h1_header_read_timeout) = self.state.h1_header_read_timeout {
+ let deadline = Instant::now() + h1_header_read_timeout;
+ self.state.h1_header_read_timeout_running = true;
+ match self.state.h1_header_read_timeout_fut {
+ Some(ref mut h1_header_read_timeout_fut) => {
+ trace!("resetting h1 header read timeout timer");
+ self.state.timer.reset(h1_header_read_timeout_fut, deadline);
+ }
+ None => {
+ trace!("setting h1 header read timeout timer");
+ self.state.h1_header_read_timeout_fut =
+ Some(self.state.timer.sleep_until(deadline));
+ }
+ }
+ }
+ }
+
+ let msg = match self.io.parse::<T>(
cx,
ParseContext {
cached_headers: &mut self.state.cached_headers,
req_method: &mut self.state.method,
h1_parser_config: self.state.h1_parser_config.clone(),
h1_max_headers: self.state.h1_max_headers,
- #[cfg(feature = "server")]
- h1_header_read_timeout: self.state.h1_header_read_timeout,
- #[cfg(feature = "server")]
- h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut,
- #[cfg(feature = "server")]
- h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running,
- #[cfg(feature = "server")]
- timer: self.state.timer.clone(),
preserve_header_case: self.state.preserve_header_case,
#[cfg(feature = "ffi")]
preserve_header_order: self.state.preserve_header_order,
h09_responses: self.state.h09_responses,
#[cfg(feature = "ffi")]
on_informational: &mut self.state.on_informational,
+ },
+ ) {
+ Poll::Ready(Ok(msg)) => msg,
+ Poll::Ready(Err(e)) => return self.on_read_head_error(e),
+ Poll::Pending => {
+ #[cfg(feature = "server")]
+ if self.state.h1_header_read_timeout_running {
+ if let Some(ref mut h1_header_read_timeout_fut) =
+ self.state.h1_header_read_timeout_fut
+ {
+ if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() {
+ self.state.h1_header_read_timeout_running = false;
+
+ warn!("read header from client timeout");
+ return Poll::Ready(Some(Err(crate::Error::new_header_timeout())));
+ }
+ }
+ }
+
+ return Poll::Pending;
}
- )) {
- Ok(msg) => msg,
- Err(e) => return self.on_read_head_error(e),
};
+ #[cfg(feature = "server")]
+ {
+ self.state.h1_header_read_timeout_running = false;
+ self.state.h1_header_read_timeout_fut = None;
+ }
+
// Note: don't deconstruct `msg` into local variables, it appears
// the optimizer doesn't remove the extra copies.
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -1,7 +1,5 @@
use std::cmp;
use std::fmt;
-#[cfg(feature = "server")]
-use std::future::Future;
use std::io::{self, IoSlice};
use std::pin::Pin;
use std::task::{Context, Poll};
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -183,14 +181,6 @@ where
req_method: parse_ctx.req_method,
h1_parser_config: parse_ctx.h1_parser_config.clone(),
h1_max_headers: parse_ctx.h1_max_headers,
- #[cfg(feature = "server")]
- h1_header_read_timeout: parse_ctx.h1_header_read_timeout,
- #[cfg(feature = "server")]
- h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut,
- #[cfg(feature = "server")]
- h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running,
- #[cfg(feature = "server")]
- timer: parse_ctx.timer.clone(),
preserve_header_case: parse_ctx.preserve_header_case,
#[cfg(feature = "ffi")]
preserve_header_order: parse_ctx.preserve_header_order,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -201,12 +191,6 @@ where
)? {
Some(msg) => {
debug!("parsed {} headers", msg.head.headers.len());
-
- #[cfg(feature = "server")]
- {
- *parse_ctx.h1_header_read_timeout_running = false;
- parse_ctx.h1_header_read_timeout_fut.take();
- }
return Poll::Ready(Ok(msg));
}
None => {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -215,20 +199,6 @@ where
debug!("max_buf_size ({}) reached, closing", max);
return Poll::Ready(Err(crate::Error::new_too_large()));
}
-
- #[cfg(feature = "server")]
- if *parse_ctx.h1_header_read_timeout_running {
- if let Some(h1_header_read_timeout_fut) =
- parse_ctx.h1_header_read_timeout_fut
- {
- if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() {
- *parse_ctx.h1_header_read_timeout_running = false;
-
- warn!("read header from client timeout");
- return Poll::Ready(Err(crate::Error::new_header_timeout()));
- }
- }
- }
}
}
if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 {
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -1,16 +1,9 @@
-#[cfg(feature = "server")]
-use std::{pin::Pin, time::Duration};
-
use bytes::BytesMut;
use http::{HeaderMap, Method};
use httparse::ParserConfig;
use crate::body::DecodedLength;
-#[cfg(feature = "server")]
-use crate::common::time::Time;
use crate::proto::{BodyLength, MessageHead};
-#[cfg(feature = "server")]
-use crate::rt::Sleep;
pub(crate) use self::conn::Conn;
pub(crate) use self::decode::Decoder;
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -79,14 +72,6 @@ pub(crate) struct ParseContext<'a> {
req_method: &'a mut Option<Method>,
h1_parser_config: ParserConfig,
h1_max_headers: Option<usize>,
- #[cfg(feature = "server")]
- h1_header_read_timeout: Option<Duration>,
- #[cfg(feature = "server")]
- h1_header_read_timeout_fut: &'a mut Option<Pin<Box<dyn Sleep>>>,
- #[cfg(feature = "server")]
- h1_header_read_timeout_running: &'a mut bool,
- #[cfg(feature = "server")]
- timer: Time,
preserve_header_case: bool,
#[cfg(feature = "ffi")]
preserve_header_order: bool,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2,8 +2,6 @@ use std::mem::MaybeUninit;
#[cfg(feature = "client")]
use std::fmt::{self, Write as _};
-#[cfg(feature = "server")]
-use std::time::Instant;
use bytes::Bytes;
use bytes::BytesMut;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -80,24 +78,6 @@ where
let _entered = trace_span!("parse_headers");
- #[cfg(feature = "server")]
- if !*ctx.h1_header_read_timeout_running {
- if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout {
- let deadline = Instant::now() + h1_header_read_timeout;
- *ctx.h1_header_read_timeout_running = true;
- match ctx.h1_header_read_timeout_fut {
- Some(h1_header_read_timeout_fut) => {
- debug!("resetting h1 header read timeout timer");
- ctx.timer.reset(h1_header_read_timeout_fut, deadline);
- }
- None => {
- debug!("setting h1 header read timeout timer");
- *ctx.h1_header_read_timeout_fut = Some(ctx.timer.sleep_until(deadline));
- }
- }
- }
- }
-
T::parse(bytes, ctx)
}
|
Hm, I see that in #2675, it originally started the timer immediately, but it was suggested it should wait because some clients make connections eagerly. While there may be clients that do that, it does seem like the timer should probably start right away anyways. The servers resources are more constrained than a client.
@paolobarbolini or @silence-coding, any thoughts?
If the timer doesn't start immediately (for h1 & h2 at least for now), I am reluctant to expose my rust's hyper server to the internet (without a reverse-proxy), since I can't see a way to setup an initial connection timeout, to prevent malicious people to open multiple (millions) sockets to my server (and therefore multiple file descriptors) without any way for me to close them.
Is this a feature or some sort of bug / security issue?
Sorry I missed the original notification. At the time this was originally implemented there was a feeling of: let's do the bare minimum in hyper and then let's have the user do everything else.
What this meant is that hyper itself has to enforce an header read timeout, because it can't be efficiently done outside of it, while a connection idle timeout wouldn't need to be handled by hyper because it could be implemented by a wrapper around the connection. This isn't perfect, because it means that your crate is vulnerable by default to DoS attacks, but that's probably something that could be handled a different way, like a second crate? But then having good defaults would be nice?
I remember @silence-coding didn't like that in my initial PR it was handled like #3185 does because then browsers couldn't preconnect because the connect would be shutdown a few seconds later because of the header timeout.
What do we think about this?
Thanks a lot for taking this into consideration. Will https://github.com/hyperium/hyper/pull/3185 also allow to have a proper read timeout for h2 / h3 only servers or is it just for http1 servers?
> I remember @silence-coding didn't like that in my initial PR it was handled like #3185 does because then browsers couldn't preconnect because the connect would be shutdown a few seconds later because of the header timeout.
> What do we think about this?
Couldn’t we consider it is the hyper server’s owner responsibility to figure out a suitable header read timeout value, long enough to allow the legitimate browser pre-connect use case, but short enough to prevent DoS attacks?
For the sake of this issue, as long as I can set up a value, I am happy. I am more worried now that the fix only allows mitigating attacks for http1 hyper servers, not h2 nor h3 ones (and did not find the proper variable to set that yet).
> Thanks a lot for taking this into consideration. Will #3185 also allow to have a proper read timeout for h2 / h3 only servers or is it just for http1 servers?
Just h1
> For the sake of this issue, as long as I can set up a value, I am happy.
It's not right forcing everyone to set the same value for both timeouts though. If hyper really has to handle this it should be a separate timeout.
> > Thanks a lot for taking this into consideration. Will #3185 also allow to have a proper read timeout for h2 / h3 only servers or is it just for http1 servers?
>
> Just h1
How can I prevent DoS attacks (multiple idle opened connections with no headers sent) to my rust hyper h2 server, if I expose it directly to the internet?
Disable h2 or have someone implement timeouts for it too
So, @programatik29 recently shared this with me: https://gist.github.com/programatik29/36d371c657392fd7f322e7342957b6d1
The idea there is to share a timer between the `AsyncRead` and the `Service`. Then it can coordinate that "headers" are received to the `Service` some certain amount of time after a `poll_read` has been called.
This really should just have a default timeout in hyper. Many crates use hyper as their server base. At this point it's starting to become the base implementation for the majority of web traffic in Rust. It shouldn't be the case that you can by-default DDOS web traffic. If we want to provide an override so that pre connect can be customized, sure, but we definitely should fix this.
> So, @programatik29 recently shared this with me: https://gist.github.com/programatik29/36d371c657392fd7f322e7342957b6d1
>
> The idea there is to share a timer between the `AsyncRead` and the `Service`. Then it can coordinate that "headers" are received to the `Service` some certain amount of time after a `poll_read` has been called.
Hi, @seanmonstar. Is that included in the [hyper 1.0 roadmap](https://github.com/hyperium/hyper/blob/master/docs/ROADMAP.md)?
It is not a requirement for 1.0, but it could be added at any time.
> Then it can coordinate that "headers" are received to the `Service` some certain amount of time after a `poll_read` has been called.
@seanmonstar, is it just me, or does that implementation potentially fail to work on http2 if the first stream issues prompt headers but the others don't? I'm not super familiar with axum, but it looks like Accept is at the _connection_ level, so the state transitions it's detecting are across all streams?
(I'm attempting to mitigate http2 slow header attacks in my server and found this bug when looking for suggestions. It looks like in 1.1 there are still no mitigations built into hyper, so an approach like this one would be useful... if it works.)
**Edit:** Basically, I'm having a hard time convincing myself that this shouldn't have e.g. a counting semaphore for tracking whether something is currently in header/trailer transfer state, rather than two booleans per connection.
@cbiffle I'm curious about your case. HTTP2 is different, so feel free to open another issue, with more details of what you're trying to stop.
Hi, @seanmonstar. This feature was not included in version 1.0. Right?
I'm trying to implement it on the Axum server level.
I've created a sample repo:
https://github.com/josecelano/axum-server-timeout
I've only set the timeout for `http1_header_read_timeout`
In case other people are looking for the same, this is supposed to be the way to do it (but not tested):
https://gist.github.com/programatik29/36d371c657392fd7f322e7342957b6d1
I think including a default timeout for "waiting for headers" would be a good security patch. Does anybody agree? Or are there some cases where you might not want it?
|
2023-03-24T19:15:51Z
| 3,185
|
timeout while waiting for headers
There doesn't seem a way to specify, on a server, after a client connection has been accepted, a read timeout so as to close the client connection after a while when no client headers have been sent at all. See for instance the [ReadTimeout](https://blog.cloudflare.com/exposing-go-on-the-internet/#timeouts) from GoLang’s HTTP Server class where you can configure this.
The http1_header_read_timeout method only starts the timer when the first line of the http1 header is sent. So if I just netcat into my hyper http server and don't send anything, the connection is kept open forever.
|
hyperium__hyper-3185
|
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -660,10 +630,8 @@ enum WriteStrategy {
#[cfg(test)]
mod tests {
- use crate::common::io::Compat;
- use crate::common::time::Time;
-
use super::*;
+ use crate::common::io::Compat;
use std::time::Duration;
use tokio_test::io::Builder as Mock;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -726,10 +694,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1631,8 +1611,6 @@ fn extend(dst: &mut Vec<u8>, data: &[u8]) {
mod tests {
use bytes::BytesMut;
- use crate::common::time::Time;
-
use super::*;
#[test]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1647,10 +1625,6 @@ mod tests {
req_method: &mut method,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1679,10 +1653,6 @@ mod tests {
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1706,10 +1676,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1731,10 +1697,6 @@ mod tests {
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1758,10 +1720,6 @@ mod tests {
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1789,10 +1747,6 @@ mod tests {
req_method: &mut Some(crate::Method::GET),
h1_parser_config,
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1817,10 +1771,6 @@ mod tests {
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1840,10 +1790,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: true,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1884,10 +1830,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1909,10 +1851,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2143,10 +2081,6 @@ mod tests {
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2168,10 +2102,6 @@ mod tests {
req_method: &mut Some(m),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2193,10 +2123,6 @@ mod tests {
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2756,10 +2682,6 @@ mod tests {
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2803,10 +2725,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: max_headers,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2831,10 +2749,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: max_headers,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2982,10 +2896,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -3031,10 +2941,6 @@ mod tests {
req_method: &mut None,
h1_parser_config: Default::default(),
h1_max_headers: None,
- h1_header_read_timeout: None,
- h1_header_read_timeout_fut: &mut None,
- h1_header_read_timeout_running: &mut false,
- timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1508,6 +1508,27 @@ async fn header_read_timeout_slow_writes() {
conn.without_shutdown().await.expect_err("header timeout");
}
+#[tokio::test]
+async fn header_read_timeout_starts_immediately() {
+ let (listener, addr) = setup_tcp_listener();
+
+ thread::spawn(move || {
+ let mut tcp = connect(&addr);
+ thread::sleep(Duration::from_secs(3));
+ let mut buf = [0u8; 256];
+ let n = tcp.read(&mut buf).expect("read 1");
+ assert_eq!(n, 0); //eof
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ let socket = TokioIo::new(socket);
+ let conn = http1::Builder::new()
+ .timer(TokioTimer)
+ .header_read_timeout(Duration::from_secs(2))
+ .serve_connection(socket, unreachable_service());
+ conn.await.expect_err("header timeout");
+}
+
#[tokio::test]
async fn header_read_timeout_slow_writes_multiple_requests() {
let (listener, addr) = setup_tcp_listener();
|
hyperium/hyper
|
aa7ff605da3b706e855f9633b8dddeb9463217d4
|
[
"2925"
] |
0.3
|
9feb70e9249d9fb99634ec96f83566e6bb3b3128
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -24,6 +24,7 @@ jobs:
- ffi
- ffi-header
- doc
+ - check-external-types
steps:
- run: exit 0
diff --git /dev/null b/.github/workflows/external-types.toml
new file mode 100644
--- /dev/null
+++ b/.github/workflows/external-types.toml
@@ -0,0 +1,15 @@
+allowed_external_types = [
+ "bytes::buf::buf_impl::Buf",
+ "bytes::bytes::Bytes",
+ "http::header",
+ "http::header::map::HeaderMap",
+ "http::method::Method",
+ "http::request::Request",
+ "http::response::Response",
+ "http::status::StatusCode",
+ "http::uri::Uri",
+ "http::version::Version",
+ "http_body::Body",
+ "http_body::frame::Frame",
+ "http_body::size_hint::SizeHint",
+]
|
Looks like that has been refactored into [cargo-check-external-types](https://github.com/awslabs/smithy-rs/tree/main/tools/cargo-check-external-types). I'll pick up this up.
I ran this tool against the master branch (982e6a51385). Any objections to these types being exposed and added to the allowlist, @seanmonstar? The `http` crate seems to be the only one listed that isn't stable/on a path towards stabilization.
- `bytes::buf::buf_impl::Buf`
- `bytes::bytes::Bytes`
- `http::header`
- `http::header::map::HeaderMap`
- `http::method::Method`
- `http::request::Request`
- `http::response::Response`
- `http::status::StatusCode`
- `http::uri::Uri`
- `http::version::Version`
- `http_body::Body`
- `http_body::frame::Frame`
- `http_body::size_hint::SizeHint`
- `tokio::io::async_read::AsyncRead`
- `tokio::io::async_write::AsyncWrite`
<details>
<summary> Full `cargo-check-external-types` output </summary>
<br>
| Crate | Type | Used In |
| --- | --- | --- |
| bytes | bytes::buf::buf_impl::Buf | src/body/mod.rs:17:16 |
| bytes | bytes::bytes::Bytes | src/body/incoming.rs:155:4 |
| bytes | bytes::bytes::Bytes | src/body/mod.rs:17:21 |
| bytes | bytes::bytes::Bytes | src/client/conn/http1.rs:46:4 |
| bytes | bytes::bytes::Bytes | src/ext/h1_reason_phrase.rs:110:0 |
| bytes | bytes::bytes::Bytes | src/ext/h1_reason_phrase.rs:57:4 |
| bytes | bytes::bytes::Bytes | src/ext/h1_reason_phrase.rs:98:0 |
| bytes | bytes::bytes::Bytes | src/server/conn/http1.rs:65:4 |
| bytes | bytes::bytes::Bytes | src/upgrade.rs:92:4 |
| http | http::header | src/lib.rs:61:22 |
| http | http::header::map::HeaderMap | src/lib.rs:64:0 |
| http | http::method::Method | src/lib.rs:61:30 |
| http | http::request::Request | src/client/conn/http1.rs:188:4 |
| http | http::request::Request | src/client/conn/http2.rs:133:4 |
| http | http::request::Request | src/lib.rs:61:38 |
| http | http::request::Request | src/service/util.rs:30:0 |
| http | http::response::Response | src/client/conn/http1.rs:188:4 |
| http | http::response::Response | src/client/conn/http2.rs:133:4 |
| http | http::response::Response | src/lib.rs:61:47 |
| http | http::status::StatusCode | src/lib.rs:61:57 |
| http | http::uri::Uri | src/lib.rs:61:69 |
| http | http::version::Version | src/lib.rs:61:74 |
| http_body | http_body::Body | src/body/incoming.rs:154:0 |
| http_body | http_body::Body | src/body/mod.rs:18:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:121:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:121:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:168:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:248:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:258:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:258:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:505:4 |
| http_body | http_body::Body | src/client/conn/http1.rs:505:4 |
| http_body | http_body::Body | src/client/conn/http1.rs:55:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:55:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:63:0 |
| http_body | http_body::Body | src/client/conn/http1.rs:63:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:113:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:193:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:193:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:214:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:224:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:224:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:38:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:38:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:402:4 |
| http_body | http_body::Body | src/client/conn/http2.rs:402:4 |
| http_body | http_body::Body | src/client/conn/http2.rs:60:0 |
| http_body | http_body::Body | src/client/conn/http2.rs:60:0 |
| http_body | http_body::Body | src/server/conn/http1.rs:172:0 |
| http_body | http_body::Body | src/server/conn/http1.rs:172:0 |
| http_body | http_body::Body | src/server/conn/http1.rs:21:0 |
| http_body | http_body::Body | src/server/conn/http1.rs:358:4 |
| http_body | http_body::Body | src/server/conn/http1.rs:82:0 |
| http_body | http_body::Body | src/server/conn/http1.rs:82:0 |
| http_body | http_body::Body | src/server/conn/http2.rs:18:0 |
| http_body | http_body::Body | src/server/conn/http2.rs:255:4 |
| http_body | http_body::Body | src/server/conn/http2.rs:255:4 |
| http_body | http_body::Body | src/server/conn/http2.rs:50:0 |
| http_body | http_body::Body | src/server/conn/http2.rs:50:0 |
| http_body | http_body::Body | src/server/conn/http2.rs:74:0 |
| http_body | http_body::Body | src/server/conn/http2.rs:74:0 |
| http_body | http_body::frame::Frame | src/body/mod.rs:19:0 |
| http_body | http_body::size_hint::SizeHint | src/body/mod.rs:20:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http1.rs:121:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http1.rs:248:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http1.rs:258:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http1.rs:505:4 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http1.rs:55:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http1.rs:63:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http2.rs:193:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http2.rs:214:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http2.rs:224:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http2.rs:38:0 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http2.rs:402:4 |
| tokio | tokio::io::async_read::AsyncRead | src/client/conn/http2.rs:60:0 |
| tokio | tokio::io::async_read::AsyncRead | src/server/conn/http1.rs:172:0 |
| tokio | tokio::io::async_read::AsyncRead | src/server/conn/http1.rs:358:4 |
| tokio | tokio::io::async_read::AsyncRead | src/server/conn/http1.rs:82:0 |
| tokio | tokio::io::async_read::AsyncRead | src/server/conn/http2.rs:255:4 |
| tokio | tokio::io::async_read::AsyncRead | src/server/conn/http2.rs:50:0 |
| tokio | tokio::io::async_read::AsyncRead | src/server/conn/http2.rs:74:0 |
| tokio | tokio::io::async_read::AsyncRead | src/upgrade.rs:136:4 |
| tokio | tokio::io::async_read::AsyncRead | src/upgrade.rs:151:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http1.rs:121:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http1.rs:248:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http1.rs:258:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http1.rs:505:4 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http1.rs:55:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http1.rs:63:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http2.rs:193:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http2.rs:214:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http2.rs:224:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http2.rs:38:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http2.rs:402:4 |
| tokio | tokio::io::async_write::AsyncWrite | src/client/conn/http2.rs:60:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/server/conn/http1.rs:172:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/server/conn/http1.rs:358:4 |
| tokio | tokio::io::async_write::AsyncWrite | src/server/conn/http1.rs:82:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/server/conn/http2.rs:255:4 |
| tokio | tokio::io::async_write::AsyncWrite | src/server/conn/http2.rs:50:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/server/conn/http2.rs:74:0 |
| tokio | tokio::io::async_write::AsyncWrite | src/upgrade.rs:136:4 |
| tokio | tokio::io::async_write::AsyncWrite | src/upgrade.rs:161:0 |
</details>
Those are purposefully exposed, so yea, let's add em to the allowlist :)
|
2023-02-28T22:51:48Z
| 3,152
|
Set up CI to audit for public dependency types
We want strict control over what types from dependencies we expose, and there is a tool that can help us check that in CI: [cargo api-linter](https://github.com/awslabs/smithy-rs/tree/main/tools/api-linter).
We should start by adding a new CI job to `.github/workflows/CI.yml` to use it. Any questions about what goes in the config file can be clarified here or in a PR.
|
hyperium__hyper-3152
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -210,3 +211,26 @@ jobs:
- name: cargo doc
run: cargo rustdoc --features full,ffi -- --cfg docsrs --cfg hyper_unstable_ffi -D broken-intra-doc-links
+
+ check-external-types:
+ name: Check exposed types
+ needs: [style, test]
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Install Rust
+ uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: nightly-2023-05-31 # Compatible version for cargo-check-external-types
+
+ - name: Install cargo-check-external-types
+ uses: actions-rs/install@v0.1
+ with:
+ crate: cargo-check-external-types
+ version: 0.1.7
+ use-tool-cache: true
+
+ - name: check-external-types
+ run: cargo check-external-types --config .github/workflows/external-types.toml
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3128"
] |
0.3
|
5bf16402f7a146417a9f997387401f9dc5a7d63d
|
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -316,11 +316,10 @@ impl Opts {
let mut client = rt.block_on(async {
if self.http2 {
let io = tokio::net::TcpStream::connect(&addr).await.unwrap();
- let (tx, conn) = hyper::client::conn::http2::Builder::new()
+ let (tx, conn) = hyper::client::conn::http2::Builder::new(support::TokioExecutor)
.initial_stream_window_size(self.http2_stream_window)
.initial_connection_window_size(self.http2_conn_window)
.adaptive_window(self.http2_adaptive_window)
- .executor(support::TokioExecutor)
.handshake(io)
.await
.unwrap();
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -57,16 +57,18 @@ pub struct Builder {
///
/// This is a shortcut for `Builder::new().handshake(io)`.
/// See [`client::conn`](crate::client::conn) for more.
-pub async fn handshake<T, B>(
+pub async fn handshake<E, T, B>(
+ exec: E,
io: T,
) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
+ E: Executor<BoxSendFuture> + Send + Sync + 'static,
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
- Builder::new().handshake(io).await
+ Builder::new(exec).handshake(io).await
}
// ===== impl SendRequest
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -244,23 +246,17 @@ where
impl Builder {
/// Creates a new connection builder.
#[inline]
- pub fn new() -> Builder {
+ pub fn new<E>(exec: E) -> Builder
+ where
+ E: Executor<BoxSendFuture> + Send + Sync + 'static,
+ {
Builder {
- exec: Exec::Default,
+ exec: Exec::new(exec),
timer: Time::Empty,
h2_builder: Default::default(),
}
}
- /// Provide an executor to execute background HTTP2 tasks.
- pub fn executor<E>(&mut self, exec: E) -> &mut Builder
- where
- E: Executor<BoxSendFuture> + Send + Sync + 'static,
- {
- self.exec = Exec::Executor(Arc::new(exec));
- self
- }
-
/// Provide a timer to execute background HTTP2 tasks.
pub fn timer<M>(&mut self, timer: M) -> &mut Builder
where
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -16,30 +16,25 @@ pub trait ConnStreamExec<F, B: Body>: Clone {
pub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
-// Either the user provides an executor for background tasks, or we panic.
-// TODO: with the `runtime`feature, `Exec::Default` used `tokio::spawn`. With the
-// removal of the opt-in default runtime, this should be refactored.
+// Executor must be provided by the user
#[derive(Clone)]
-pub(crate) enum Exec {
- Default,
- Executor(Arc<dyn Executor<BoxSendFuture> + Send + Sync>),
-}
+pub(crate) struct Exec(Arc<dyn Executor<BoxSendFuture> + Send + Sync>);
// ===== impl Exec =====
impl Exec {
+ pub(crate) fn new<E>(exec: E) -> Self
+ where
+ E: Executor<BoxSendFuture> + Send + Sync + 'static,
+ {
+ Self(Arc::new(exec))
+ }
+
pub(crate) fn execute<F>(&self, fut: F)
where
F: Future<Output = ()> + Send + 'static,
{
- match *self {
- Exec::Default => {
- panic!("executor must be set");
- }
- Exec::Executor(ref e) => {
- e.execute(Box::pin(fut));
- }
- }
+ self.0.execute(Box::pin(fut))
}
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -55,8 +55,7 @@ ffi_fn! {
#[cfg(feature = "http2")]
{
if options.http2 {
- return conn::http2::Builder::new()
- .executor(options.exec.clone())
+ return conn::http2::Builder::new(options.exec.clone())
.handshake::<_, crate::body::Incoming>(io)
.await
.map(|(tx, conn)| {
|
Good catch, thank you!
|
2023-01-30T19:09:16Z
| 3,135
|
`http2::handshake()` is useless in 1.0 given no default executor
**Version**
hyper 1.0.0-rc2
**Description**
`hyper::client::conn::http2::handshake(io).await` causes an instant panic. Internally it performs `Builder::new().handshake(io).await` which uses the `Exec::Default` executor. There is no longer a default `tokio` runtime implied since the removal of the `runtime` feature.
https://github.com/hyperium/hyper/blob/master/src/common/exec.rs#L36-L38
There is a TODO in this file to refactor executors. But I couldn't find a relevant issue, so I wanted to open one to ensure this doesn't get included in the official release
|
hyperium__hyper-3135
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1922,8 +1922,7 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut client, conn) = conn::http2::Builder::new(TokioExecutor)
.handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1979,8 +1978,7 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (_client, conn) = conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (_client, conn) = conn::http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
.keep_alive_interval(Duration::from_secs(1))
.keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2008,8 +2006,7 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut client, conn) = conn::http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
.keep_alive_interval(Duration::from_secs(1))
.keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2040,8 +2037,7 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut client, conn) = conn::http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
.keep_alive_interval(Duration::from_secs(1))
.keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2100,8 +2096,7 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut client, conn) = conn::http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
.keep_alive_interval(Duration::from_secs(1))
.keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2156,8 +2151,7 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut client, conn) = conn::http2::Builder::new(TokioExecutor)
.handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2207,8 +2201,7 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut client, conn) = conn::http2::Builder::new(TokioExecutor)
.handshake::<_, Empty<Bytes>>(io)
.await
.expect("http handshake");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2389,8 +2389,7 @@ async fn http2_keep_alive_with_responsive_client() {
});
let tcp = connect_async(addr).await;
- let (mut client, conn) = hyper::client::conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut client, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor)
.handshake(tcp)
.await
.expect("http handshake");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3017,8 +3016,7 @@ impl TestClient {
.unwrap();
if self.http2_only {
- let (mut sender, conn) = hyper::client::conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut sender, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor)
.handshake(stream)
.await
.unwrap();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -427,8 +427,7 @@ async fn async_test(cfg: __TestConfig) {
let stream = TcpStream::connect(addr).await.unwrap();
let res = if http2_only {
- let (mut sender, conn) = hyper::client::conn::http2::Builder::new()
- .executor(TokioExecutor)
+ let (mut sender, conn) = hyper::client::conn::http2::Builder::new(TokioExecutor)
.handshake(stream)
.await
.unwrap();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -526,11 +525,11 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
.unwrap();
let resp = if http2_only {
- let (mut sender, conn) = hyper::client::conn::http2::Builder::new()
- .executor(TokioExecutor)
- .handshake(stream)
- .await
- .unwrap();
+ let (mut sender, conn) =
+ hyper::client::conn::http2::Builder::new(TokioExecutor)
+ .handshake(stream)
+ .await
+ .unwrap();
tokio::task::spawn(async move {
if let Err(err) = conn.await {
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3027"
] |
0.3
|
d977f209bc6068d8f878b22803fc42d90c887fcc
|
diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs
--- a/benches/support/tokiort.rs
+++ b/benches/support/tokiort.rs
@@ -41,6 +41,12 @@ impl Timer for TokioTimer {
inner: tokio::time::sleep_until(deadline.into()),
})
}
+
+ fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+ if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {
+ sleep.reset(new_deadline.into())
+ }
+ }
}
struct TokioTimeout<T> {
diff --git a/src/rt/mod.rs b/src/rt/mod.rs
--- a/src/rt/mod.rs
+++ b/src/rt/mod.rs
@@ -6,12 +6,9 @@
//! to plug in other runtimes.
pub mod bounds;
+mod timer;
-use std::{
- future::Future,
- pin::Pin,
- time::{Duration, Instant},
-};
+pub use timer::{Sleep, Timer};
/// An executor of futures.
///
diff --git a/src/rt/mod.rs b/src/rt/mod.rs
--- a/src/rt/mod.rs
+++ b/src/rt/mod.rs
@@ -39,20 +36,3 @@ pub trait Executor<Fut> {
/// Place the future into the executor to be run.
fn execute(&self, fut: Fut);
}
-
-/// A timer which provides timer-like functions.
-pub trait Timer {
- /// Return a future that resolves in `duration` time.
- fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>>;
-
- /// Return a future that resolves at `deadline`.
- fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>>;
-
- /// Reset a future to resolve at `new_deadline` instead.
- fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
- *sleep = self.sleep_until(new_deadline);
- }
-}
-
-/// A future returned by a `Timer`.
-pub trait Sleep: Send + Sync + Future<Output = ()> {}
diff --git /dev/null b/src/rt/timer.rs
new file mode 100644
--- /dev/null
+++ b/src/rt/timer.rs
@@ -0,0 +1,127 @@
+//! Provides a timer trait with timer-like functions
+//!
+//! Example using tokio timer:
+//! ```rust
+//! use std::{
+//! pin::Pin,
+//! task::{Context, Poll},
+//! time::{Duration, Instant},
+//! };
+//!
+//! use futures_util::Future;
+//! use pin_project_lite::pin_project;
+//! use hyper::rt::{Timer, Sleep};
+//!
+//! #[derive(Clone, Debug)]
+//! pub struct TokioTimer;
+//!
+//! impl Timer for TokioTimer {
+//! fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
+//! Box::pin(TokioSleep {
+//! inner: tokio::time::sleep(duration),
+//! })
+//! }
+//!
+//! fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
+//! Box::pin(TokioSleep {
+//! inner: tokio::time::sleep_until(deadline.into()),
+//! })
+//! }
+//!
+//! fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+//! if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {
+//! sleep.reset(new_deadline.into())
+//! }
+//! }
+//! }
+//!
+//! pin_project! {
+//! pub(crate) struct TokioSleep {
+//! #[pin]
+//! pub(crate) inner: tokio::time::Sleep,
+//! }
+//! }
+//!
+//! impl Future for TokioSleep {
+//! type Output = ();
+//!
+//! fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+//! self.project().inner.poll(cx)
+//! }
+//! }
+//!
+//! impl Sleep for TokioSleep {}
+//!
+//! impl TokioSleep {
+//! pub fn reset(self: Pin<&mut Self>, deadline: Instant) {
+//! self.project().inner.as_mut().reset(deadline.into());
+//! }
+//! }
+//! ````
+
+use std::{
+ any::TypeId,
+ future::Future,
+ pin::Pin,
+ time::{Duration, Instant},
+};
+
+/// A timer which provides timer-like functions.
+pub trait Timer {
+ /// Return a future that resolves in `duration` time.
+ fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>>;
+
+ /// Return a future that resolves at `deadline`.
+ fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>>;
+
+ /// Reset a future to resolve at `new_deadline` instead.
+ fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+ *sleep = self.sleep_until(new_deadline);
+ }
+}
+
+/// A future returned by a `Timer`.
+pub trait Sleep: Send + Sync + Future<Output = ()> {
+ #[doc(hidden)]
+ /// This method is private and can not be implemented by downstream crate
+ fn __type_id(&self, _: private::Sealed) -> TypeId
+ where
+ Self: 'static,
+ {
+ TypeId::of::<Self>()
+ }
+}
+
+impl dyn Sleep {
+ //! This is a re-implementation of downcast methods from std::any::Any
+
+ /// Check whether the type is the same as `T`
+ pub fn is<T>(&self) -> bool
+ where
+ T: Sleep + 'static,
+ {
+ self.__type_id(private::Sealed {}) == TypeId::of::<T>()
+ }
+
+ /// Downcast a pinned &mut Sleep object to its original type
+ pub fn downcast_mut_pin<T>(self: Pin<&mut Self>) -> Option<Pin<&'static mut T>>
+ where
+ T: Sleep + 'static,
+ {
+ if self.is::<T>() {
+ unsafe {
+ let inner = Pin::into_inner_unchecked(self);
+ Some(Pin::new_unchecked(
+ &mut *(&mut *inner as *mut dyn Sleep as *mut T),
+ ))
+ }
+ } else {
+ None
+ }
+ }
+}
+
+mod private {
+ #![allow(missing_debug_implementations)]
+ pub struct Sealed {}
+}
|
Sigh, yea, I forgot about this. And Rust still hasn't made this any easier; we can't just say `trait Sleep: Any`. So, let's see, I think these are the options:
- Copy the methods from `Any` into `Sleep`, which allows checking `type_id` and then calling `downcast_mut`. To make it safe, it probably needs to implement those methods in a blanket impl. Do we just `impl<F: Future> Sleep for F {}`?
- Maybe we can move the method to `trait Sleep { fn reset(self: Pin<&mut Self>, timer: &dyn Timer) {}`? That might just require having downcast methods on `Timer` instead...
- Ugh.
`Sleep: Any` won't handle it, but you can manually add downcasting to the Sleep trait.
|
2023-01-23T17:54:48Z
| 3,125
|
Timer::reset can't soundly have an optimized implementation
In 1.0.0-rc.1, the new `Timer` trait has a method to reset an existing `Sleep` to a new deadline. The default implementation simply creates a new `Sleep` and writes it over the old one, but presumably this method exists to it can be overridden with a better implementation when working with a timer like Tokio's that can have its deadline dynamically adjusted.
However, since the `Sleep` is provided as a `dyn Sleep` and `Sleep` has no downcasting APIs, there is no safe way to confirm that the `Sleep` instance is one created by this `Timer` implementation. That effectively means that the current default implementation is the only sound way of implementing the method.
|
hyperium__hyper-3125
|
diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs
--- a/benches/support/tokiort.rs
+++ b/benches/support/tokiort.rs
@@ -75,7 +81,10 @@ impl Future for TokioSleep {
}
}
-// Use HasSleep to get tokio::time::Sleep to implement Unpin.
-// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html
-
impl Sleep for TokioSleep {}
+
+impl TokioSleep {
+ pub fn reset(self: Pin<&mut Self>, deadline: Instant) {
+ self.project().inner.as_mut().reset(deadline.into());
+ }
+}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3079"
] |
0.14
|
0368a41a6cc1a5c6f1eada0d88e38b7dce261587
|
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -58,6 +58,11 @@ use crate::error::{Kind, Parse};
#[cfg(feature = "http1")]
use crate::upgrade::Upgraded;
+#[cfg(all(feature = "backports", feature = "http1"))]
+pub mod http1;
+#[cfg(all(feature = "backports", feature = "http2"))]
+pub mod http2;
+
cfg_feature! {
#![any(feature = "http1", feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -327,7 +332,7 @@ impl<E> Http<E> {
self
}
- /// Set a timeout for reading client request headers. If a client does not
+ /// Set a timeout for reading client request headers. If a client does not
/// transmit the entire header within this time, the connection is closed.
///
/// Default is None.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -809,7 +814,12 @@ where
let mut conn = Some(self);
futures_util::future::poll_fn(move |cx| {
ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
- Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1))
+ Poll::Ready(
+ conn.take()
+ .unwrap()
+ .try_into_parts()
+ .ok_or_else(crate::Error::new_without_shutdown_not_h1),
+ )
})
}
diff --git /dev/null b/src/server/conn/http1.rs
new file mode 100644
--- /dev/null
+++ b/src/server/conn/http1.rs
@@ -0,0 +1,446 @@
+//! HTTP/1 Server Connections
+
+use std::error::Error as StdError;
+use std::fmt;
+use std::time::Duration;
+
+use bytes::Bytes;
+use tokio::io::{AsyncRead, AsyncWrite};
+
+use crate::body::{Body as IncomingBody, HttpBody as Body};
+use crate::common::{task, Future, Pin, Poll, Unpin};
+use crate::proto;
+use crate::service::HttpService;
+
+type Http1Dispatcher<T, B, S> = proto::h1::Dispatcher<
+ proto::h1::dispatch::Server<S, IncomingBody>,
+ B,
+ T,
+ proto::ServerTransaction,
+>;
+
+pin_project_lite::pin_project! {
+ /// A future binding an http1 connection with a Service.
+ ///
+ /// Polling this future will drive HTTP forward.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct Connection<T, S>
+ where
+ S: HttpService<IncomingBody>,
+ {
+ conn: Http1Dispatcher<T, S::ResBody, S>,
+ }
+}
+
+/// A configuration builder for HTTP/1 server connections.
+#[derive(Clone, Debug)]
+pub struct Builder {
+ h1_half_close: bool,
+ h1_keep_alive: bool,
+ h1_title_case_headers: bool,
+ h1_preserve_header_case: bool,
+ h1_header_read_timeout: Option<Duration>,
+ h1_writev: Option<bool>,
+ max_buf_size: Option<usize>,
+ pipeline_flush: bool,
+}
+
+/// Deconstructed parts of a `Connection`.
+///
+/// This allows taking apart a `Connection` at a later time, in order to
+/// reclaim the IO object, and additional related pieces.
+#[derive(Debug)]
+pub struct Parts<T, S> {
+ /// The original IO object used in the handshake.
+ pub io: T,
+ /// A buffer of bytes that have been read but not processed as HTTP.
+ ///
+ /// If the client sent additional bytes after its last request, and
+ /// this connection "ended" with an upgrade, the read buffer will contain
+ /// those bytes.
+ ///
+ /// You will want to check for any existing bytes if you plan to continue
+ /// communicating on the IO object.
+ pub read_buf: Bytes,
+ /// The `Service` used to serve this connection.
+ pub service: S,
+ _inner: (),
+}
+
+// ===== impl Connection =====
+
+impl<I, S> fmt::Debug for Connection<I, S>
+where
+ S: HttpService<IncomingBody>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<I, B, S> Connection<I, S>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Start a graceful shutdown process for this connection.
+ ///
+ /// This `Connection` should continue to be polled until shutdown
+ /// can finish.
+ ///
+ /// # Note
+ ///
+ /// This should only be called while the `Connection` future is still
+ /// pending. If called after `Connection::poll` has resolved, this does
+ /// nothing.
+ pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
+ self.conn.disable_keep_alive();
+ }
+
+ /// Return the inner IO object, and additional information.
+ ///
+ /// If the IO object has been "rewound" the io will not contain those bytes rewound.
+ /// This should only be called after `poll_without_shutdown` signals
+ /// that the connection is "done". Otherwise, it may not have finished
+ /// flushing all necessary HTTP bytes.
+ ///
+ /// # Panics
+ /// This method will panic if this connection is using an h2 protocol.
+ pub fn into_parts(self) -> Parts<I, S> {
+ let (io, read_buf, dispatch) = self.conn.into_inner();
+ Parts {
+ io,
+ read_buf,
+ service: dispatch.into_service(),
+ _inner: (),
+ }
+ }
+
+ /// Poll the connection for completion, but without calling `shutdown`
+ /// on the underlying IO.
+ ///
+ /// This is useful to allow running a connection while doing an HTTP
+ /// upgrade. Once the upgrade is completed, the connection would be "done",
+ /// but it is not desired to actually shutdown the IO object. Instead you
+ /// would take it back using `into_parts`.
+ pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>>
+ where
+ S: Unpin,
+ S::Future: Unpin,
+ B: Unpin,
+ {
+ self.conn.poll_without_shutdown(cx)
+ }
+
+ /// Prevent shutdown of the underlying IO object at the end of service the request,
+ /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
+ ///
+ /// # Error
+ ///
+ /// This errors if the underlying connection protocol is not HTTP/1.
+ pub fn without_shutdown(self) -> impl Future<Output = crate::Result<Parts<I, S>>>
+ where
+ S: Unpin,
+ S::Future: Unpin,
+ B: Unpin,
+ {
+ let mut zelf = Some(self);
+ futures_util::future::poll_fn(move |cx| {
+ ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?;
+ Poll::Ready(Ok(zelf.take().unwrap().into_parts()))
+ })
+ }
+
+ /// Enable this connection to support higher-level HTTP upgrades.
+ ///
+ /// See [the `upgrade` module](crate::upgrade) for more.
+ pub fn with_upgrades(self) -> upgrades::UpgradeableConnection<I, S>
+ where
+ I: Send,
+ {
+ upgrades::UpgradeableConnection { inner: Some(self) }
+ }
+}
+
+impl<I, B, S> Future for Connection<I, S>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin + 'static,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.conn).poll(cx)) {
+ Ok(done) => {
+ match done {
+ proto::Dispatched::Shutdown => {}
+ proto::Dispatched::Upgrade(pending) => {
+ // With no `Send` bound on `I`, we can't try to do
+ // upgrades here. In case a user was trying to use
+ // `Body::on_upgrade` with this API, send a special
+ // error letting them know about that.
+ pending.manual();
+ }
+ };
+ return Poll::Ready(Ok(()));
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+}
+
+// ===== impl Builder =====
+
+impl Builder {
+ /// Create a new connection builder.
+ pub fn new() -> Self {
+ Self {
+ h1_half_close: false,
+ h1_keep_alive: true,
+ h1_title_case_headers: false,
+ h1_preserve_header_case: false,
+ h1_header_read_timeout: None,
+ h1_writev: None,
+ max_buf_size: None,
+ pipeline_flush: false,
+ }
+ }
+ /// Set whether HTTP/1 connections should support half-closures.
+ ///
+ /// Clients can chose to shutdown their write-side while waiting
+ /// for the server to respond. Setting this to `true` will
+ /// prevent closing the connection immediately if `read`
+ /// detects an EOF in the middle of a request.
+ ///
+ /// Default is `false`.
+ pub fn half_close(&mut self, val: bool) -> &mut Self {
+ self.h1_half_close = val;
+ self
+ }
+
+ /// Enables or disables HTTP/1 keep-alive.
+ ///
+ /// Default is true.
+ pub fn keep_alive(&mut self, val: bool) -> &mut Self {
+ self.h1_keep_alive = val;
+ self
+ }
+
+ /// Set whether HTTP/1 connections will write header names as title case at
+ /// the socket level.
+ ///
+ /// Default is false.
+ pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self {
+ self.h1_title_case_headers = enabled;
+ self
+ }
+
+ /// Set whether to support preserving original header cases.
+ ///
+ /// Currently, this will record the original cases received, and store them
+ /// in a private extension on the `Request`. It will also look for and use
+ /// such an extension in any provided `Response`.
+ ///
+ /// Since the relevant extension is still private, there is no way to
+ /// interact with the original cases. The only effect this can have now is
+ /// to forward the cases in a proxy-like fashion.
+ ///
+ /// Default is false.
+ pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self {
+ self.h1_preserve_header_case = enabled;
+ self
+ }
+
+ /// Set a timeout for reading client request headers. If a client does not
+ /// transmit the entire header within this time, the connection is closed.
+ ///
+ /// Default is None.
+ pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self {
+ self.h1_header_read_timeout = Some(read_timeout);
+ self
+ }
+
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ pub fn writev(&mut self, val: bool) -> &mut Self {
+ self.h1_writev = Some(val);
+ self
+ }
+
+ /// Set the maximum buffer size for the connection.
+ ///
+ /// Default is ~400kb.
+ ///
+ /// # Panics
+ ///
+ /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
+ pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(
+ max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
+ "the max_buf_size cannot be smaller than the minimum that h1 specifies."
+ );
+ self.max_buf_size = Some(max);
+ self
+ }
+
+ /// Aggregates flushes to better support pipelined responses.
+ ///
+ /// Experimental, may have bugs.
+ ///
+ /// Default is false.
+ pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self {
+ self.pipeline_flush = enabled;
+ self
+ }
+
+ // /// Set the timer used in background tasks.
+ // pub fn timer<M>(&mut self, timer: M) -> &mut Self
+ // where
+ // M: Timer + Send + Sync + 'static,
+ // {
+ // self.timer = Time::Timer(Arc::new(timer));
+ // self
+ // }
+
+ /// Bind a connection together with a [`Service`](crate::service::Service).
+ ///
+ /// This returns a Future that must be polled in order for HTTP to be
+ /// driven on the connection.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use hyper::{Body as Incoming, Request, Response};
+ /// # use hyper::service::Service;
+ /// # use hyper::server::conn::http1::Builder;
+ /// # use tokio::io::{AsyncRead, AsyncWrite};
+ /// # async fn run<I, S>(some_io: I, some_service: S)
+ /// # where
+ /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ /// # S: Service<hyper::Request<Incoming>, Response=hyper::Response<Incoming>> + Send + 'static,
+ /// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ /// # S::Future: Send,
+ /// # {
+ /// let http = Builder::new();
+ /// let conn = http.serve_connection(some_io, some_service);
+ ///
+ /// if let Err(e) = conn.await {
+ /// eprintln!("server connection error: {}", e);
+ /// }
+ /// # }
+ /// # fn main() {}
+ /// ```
+ pub fn serve_connection<I, S>(&self, io: I, service: S) -> Connection<I, S>
+ where
+ S: HttpService<IncomingBody>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ S::ResBody: 'static,
+ <S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin,
+ {
+ let mut conn = proto::Conn::new(io);
+ if !self.h1_keep_alive {
+ conn.disable_keep_alive();
+ }
+ if self.h1_half_close {
+ conn.set_allow_half_close();
+ }
+ if self.h1_title_case_headers {
+ conn.set_title_case_headers();
+ }
+ if self.h1_preserve_header_case {
+ conn.set_preserve_header_case();
+ }
+ if let Some(header_read_timeout) = self.h1_header_read_timeout {
+ conn.set_http1_header_read_timeout(header_read_timeout);
+ }
+ if let Some(writev) = self.h1_writev {
+ if writev {
+ conn.set_write_strategy_queue();
+ } else {
+ conn.set_write_strategy_flatten();
+ }
+ }
+ conn.set_flush_pipeline(self.pipeline_flush);
+ if let Some(max) = self.max_buf_size {
+ conn.set_max_buf_size(max);
+ }
+ let sd = proto::h1::dispatch::Server::new(service);
+ let proto = proto::h1::Dispatcher::new(sd, conn);
+ Connection { conn: proto }
+ }
+}
+
+mod upgrades {
+ use crate::upgrade::Upgraded;
+
+ use super::*;
+
+ // A future binding a connection with a Service with Upgrade support.
+ //
+ // This type is unnameable outside the crate.
+ #[must_use = "futures do nothing unless polled"]
+ #[allow(missing_debug_implementations)]
+ pub struct UpgradeableConnection<T, S>
+ where
+ S: HttpService<IncomingBody>,
+ {
+ pub(super) inner: Option<Connection<T, S>>,
+ }
+
+ impl<I, B, S> UpgradeableConnection<I, S>
+ where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ /// Start a graceful shutdown process for this connection.
+ ///
+ /// This `Connection` should continue to be polled until shutdown
+ /// can finish.
+ pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
+ Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown()
+ }
+ }
+
+ impl<I, B, S> Future for UpgradeableConnection<I, S>
+ where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) {
+ Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())),
+ Ok(proto::Dispatched::Upgrade(pending)) => {
+ let (io, buf, _) = self.inner.take().unwrap().conn.into_inner();
+ pending.fulfill(Upgraded::new(io, buf));
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+ }
+}
diff --git /dev/null b/src/server/conn/http2.rs
new file mode 100644
--- /dev/null
+++ b/src/server/conn/http2.rs
@@ -0,0 +1,257 @@
+//! HTTP/2 Server Connections
+
+use std::error::Error as StdError;
+use std::fmt;
+use std::time::Duration;
+
+use pin_project_lite::pin_project;
+use tokio::io::{AsyncRead, AsyncWrite};
+
+use crate::body::{Body as IncomingBody, HttpBody as Body};
+use crate::common::exec::ConnStreamExec;
+use crate::common::{task, Future, Pin, Poll, Unpin};
+use crate::proto;
+use crate::service::HttpService;
+
+pin_project! {
+ /// A future binding an HTTP/2 connection with a Service.
+ ///
+ /// Polling this future will drive HTTP forward.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct Connection<T, S, E>
+ where
+ S: HttpService<IncomingBody>,
+ {
+ conn: proto::h2::Server<T, S, S::ResBody, E>,
+ }
+}
+
+/// A configuration builder for HTTP/2 server connections.
+#[derive(Clone, Debug)]
+pub struct Builder<E> {
+ exec: E,
+ h2_builder: proto::h2::server::Config,
+}
+
+// ===== impl Connection =====
+
+impl<I, S, E> fmt::Debug for Connection<I, S, E>
+where
+ S: HttpService<IncomingBody>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Connection").finish()
+ }
+}
+
+impl<I, B, S, E> Connection<I, S, E>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ E: ConnStreamExec<S::Future, B>,
+{
+ /// Start a graceful shutdown process for this connection.
+ ///
+ /// This `Connection` should continue to be polled until shutdown
+ /// can finish.
+ ///
+ /// # Note
+ ///
+ /// This should only be called while the `Connection` future is still
+ /// pending. If called after `Connection::poll` has resolved, this does
+ /// nothing.
+ pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
+ self.conn.graceful_shutdown();
+ }
+}
+
+impl<I, B, S, E> Future for Connection<I, S, E>
+where
+ S: HttpService<IncomingBody, ResBody = B>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin + 'static,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+ E: ConnStreamExec<S::Future, B>,
+{
+ type Output = crate::Result<()>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ match ready!(Pin::new(&mut self.conn).poll(cx)) {
+ Ok(_done) => {
+ //TODO: the proto::h2::Server no longer needs to return
+ //the Dispatched enum
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
+ }
+}
+
+// ===== impl Builder =====
+
+impl<E> Builder<E> {
+ /// Create a new connection builder.
+ ///
+ /// This starts with the default options, and an executor.
+ pub fn new(exec: E) -> Self {
+ Self {
+ exec: exec,
+ h2_builder: Default::default(),
+ }
+ }
+
+ /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
+ /// stream-level flow control.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ ///
+ /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
+ pub fn initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_stream_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets the max connection-level flow control for HTTP2.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ pub fn initial_connection_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.adaptive_window = false;
+ self.h2_builder.initial_conn_window_size = sz;
+ }
+ self
+ }
+
+ /// Sets whether to use an adaptive flow control.
+ ///
+ /// Enabling this will override the limits set in
+ /// `initial_stream_window_size` and
+ /// `initial_connection_window_size`.
+ pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self {
+ use proto::h2::SPEC_WINDOW_SIZE;
+
+ self.h2_builder.adaptive_window = enabled;
+ if enabled {
+ self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
+ self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
+ }
+ self
+ }
+
+ /// Sets the maximum frame size to use for HTTP2.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ pub fn max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ if let Some(sz) = sz.into() {
+ self.h2_builder.max_frame_size = sz;
+ }
+ self
+ }
+
+ /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
+ /// connections.
+ ///
+ /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing.
+ ///
+ /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS
+ pub fn max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
+ self.h2_builder.max_concurrent_streams = max.into();
+ self
+ }
+
+ /// Sets an interval for HTTP2 Ping frames should be sent to keep a
+ /// connection alive.
+ ///
+ /// Pass `None` to disable HTTP2 keep-alive.
+ ///
+ /// Default is currently disabled.
+ ///
+ /// # Cargo Feature
+ ///
+ pub fn keep_alive_interval(&mut self, interval: impl Into<Option<Duration>>) -> &mut Self {
+ self.h2_builder.keep_alive_interval = interval.into();
+ self
+ }
+
+ /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
+ ///
+ /// If the ping is not acknowledged within the timeout, the connection will
+ /// be closed. Does nothing if `keep_alive_interval` is disabled.
+ ///
+ /// Default is 20 seconds.
+ ///
+ /// # Cargo Feature
+ ///
+ pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
+ self.h2_builder.keep_alive_timeout = timeout;
+ self
+ }
+
+ /// Set the maximum write buffer size for each HTTP/2 stream.
+ ///
+ /// Default is currently ~400KB, but may change.
+ ///
+ /// # Panics
+ ///
+ /// The value must be no larger than `u32::MAX`.
+ pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self {
+ assert!(max <= std::u32::MAX as usize);
+ self.h2_builder.max_send_buffer_size = max;
+ self
+ }
+
+ /// Enables the [extended CONNECT protocol].
+ ///
+ /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+ pub fn enable_connect_protocol(&mut self) -> &mut Self {
+ self.h2_builder.enable_connect_protocol = true;
+ self
+ }
+
+ /// Sets the max size of received header frames.
+ ///
+ /// Default is currently ~16MB, but may change.
+ pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
+ self.h2_builder.max_header_list_size = max;
+ self
+ }
+
+ // /// Set the timer used in background tasks.
+ // pub fn timer<M>(&mut self, timer: M) -> &mut Self
+ // where
+ // M: Timer + Send + Sync + 'static,
+ // {
+ // self.timer = Time::Timer(Arc::new(timer));
+ // self
+ // }
+
+ /// Bind a connection together with a [`Service`](crate::service::Service).
+ ///
+ /// This returns a Future that must be polled in order for HTTP to be
+ /// driven on the connection.
+ pub fn serve_connection<S, I, Bd>(&self, io: I, service: S) -> Connection<I, S, E>
+ where
+ S: HttpService<IncomingBody, ResBody = Bd>,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ Bd: Body + 'static,
+ Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
+ I: AsyncRead + AsyncWrite + Unpin,
+ E: ConnStreamExec<S::Future, Bd>,
+ {
+ let proto = proto::h2::Server::new(io, service, &self.h2_builder, self.exec.clone());
+ Connection { conn: proto }
+ }
+}
|
2022-12-27T22:14:51Z
| 3,102
|
Backport the split server conn modules
In 1.0, we've split hyper::server::conn::Connection into per-version types. To ease upgrading (see https://github.com/hyperium/hyper/issues/3052), we can backport the addition of the two modules, hyper::server::conn::{http1, http2}. With them in place, we could then add a deprecation to hyper::server::conn::Connection.
This is very similar in scope to #3053
|
hyperium__hyper-3102
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2641,6 +2641,144 @@ async fn http2_keep_alive_count_server_pings() {
.expect("timed out waiting for pings");
}
+// Tests for backported 1.0 APIs
+mod backports {
+ use super::*;
+ use hyper::server::conn::{http1, http2};
+
+ #[tokio::test]
+ async fn http_connect() {
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let (tx, rx) = oneshot::channel();
+
+ thread::spawn(move || {
+ let mut tcp = connect(&addr);
+ tcp.write_all(
+ b"\
+ CONNECT localhost:80 HTTP/1.1\r\n\
+ \r\n\
+ eagerly optimistic\
+ ",
+ )
+ .expect("write 1");
+ let mut buf = [0; 256];
+ tcp.read(&mut buf).expect("read 1");
+
+ let expected = "HTTP/1.1 200 OK\r\n";
+ assert_eq!(s(&buf[..expected.len()]), expected);
+ let _ = tx.send(());
+
+ let n = tcp.read(&mut buf).expect("read 2");
+ assert_eq!(s(&buf[..n]), "foo=bar");
+ tcp.write_all(b"bar=foo").expect("write 2");
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ let conn = http1::Builder::new().serve_connection(
+ socket,
+ service_fn(|_| {
+ // In 1.0 we would use `http_body_util::Empty::<Bytes>::new()` to construct
+ // an empty body
+ let res = Response::builder().status(200).body(Body::empty()).unwrap();
+ future::ready(Ok::<_, hyper::Error>(res))
+ }),
+ );
+
+ let parts = conn.without_shutdown().await.unwrap();
+ assert_eq!(parts.read_buf, "eagerly optimistic");
+
+ // wait so that we don't write until other side saw 101 response
+ rx.await.unwrap();
+
+ let mut io = parts.io;
+ io.write_all(b"foo=bar").await.unwrap();
+ let mut vec = vec![];
+ io.read_to_end(&mut vec).await.unwrap();
+ assert_eq!(vec, b"bar=foo");
+ }
+
+ #[tokio::test]
+ async fn h2_connect() {
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let conn = connect_async(addr).await;
+
+ let (h2, connection) = h2::client::handshake(conn).await.unwrap();
+ tokio::spawn(async move {
+ connection.await.unwrap();
+ });
+ let mut h2 = h2.ready().await.unwrap();
+
+ async fn connect_and_recv_bread(
+ h2: &mut SendRequest<Bytes>,
+ ) -> (RecvStream, SendStream<Bytes>) {
+ let request = Request::connect("localhost").body(()).unwrap();
+ let (response, send_stream) = h2.send_request(request, false).unwrap();
+ let response = response.await.unwrap();
+ assert_eq!(response.status(), StatusCode::OK);
+
+ let mut body = response.into_body();
+ let bytes = body.data().await.unwrap().unwrap();
+ assert_eq!(&bytes[..], b"Bread?");
+ let _ = body.flow_control().release_capacity(bytes.len());
+
+ (body, send_stream)
+ }
+
+ tokio::spawn(async move {
+ let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await;
+
+ send_stream.send_data("Baguette!".into(), true).unwrap();
+
+ assert!(recv_stream.data().await.unwrap().unwrap().is_empty());
+ });
+
+ // In 1.0 the `Body` struct is renamed to `IncomingBody`
+ let svc = service_fn(move |req: Request<Body>| {
+ let on_upgrade = hyper::upgrade::on(req);
+
+ tokio::spawn(async move {
+ let mut upgraded = on_upgrade.await.expect("on_upgrade");
+ upgraded.write_all(b"Bread?").await.unwrap();
+
+ let mut vec = vec![];
+ upgraded.read_to_end(&mut vec).await.unwrap();
+ assert_eq!(s(&vec), "Baguette!");
+
+ upgraded.shutdown().await.unwrap();
+ });
+
+ future::ok::<_, hyper::Error>(
+ // In 1.0 we would use `http_body_util::Empty::<Bytes>::new()` to construct
+ // an empty body
+ Response::builder().status(200).body(Body::empty()).unwrap(),
+ )
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ http2::Builder::new(TokioExecutor)
+ .serve_connection(socket, svc)
+ .await
+ .unwrap();
+ }
+
+ #[derive(Clone)]
+ /// An Executor that uses the tokio runtime.
+ pub struct TokioExecutor;
+
+ impl<F> hyper::rt::Executor<F> for TokioExecutor
+ where
+ F: std::future::Future + Send + 'static,
+ F::Output: Send + 'static,
+ {
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+ }
+}
// -------------------------------------------------
// the Server that is used to run all the tests with
// -------------------------------------------------
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"3085"
] |
0.3
|
984760f76aaf5f0e20dddd119cad8188ca81e0e4
|
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -31,8 +31,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
- .http1_preserve_header_case(true)
- .http1_title_case_headers(true)
+ .preserve_header_case(true)
+ .title_case_headers(true)
.serve_connection(stream, service_fn(proxy))
.with_upgrades()
.await
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -90,8 +90,8 @@ async fn proxy(
let stream = TcpStream::connect(addr).await.unwrap();
let (mut sender, conn) = Builder::new()
- .http1_preserve_header_case(true)
- .http1_title_case_headers(true)
+ .preserve_header_case(true)
+ .title_case_headers(true)
.handshake(stream)
.await?;
tokio::task::spawn(async move {
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -333,12 +333,10 @@ impl Builder {
/// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
/// > response message before forwarding the message downstream.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
///
/// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- pub fn http1_allow_spaces_after_header_name_in_responses(
+ pub fn allow_spaces_after_header_name_in_responses(
&mut self,
enabled: bool,
) -> &mut Builder {
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -376,12 +374,10 @@ impl Builder {
/// > obs-fold with one or more SP octets prior to interpreting the field
/// > value.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
///
/// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- pub fn http1_allow_obsolete_multiline_headers_in_responses(
+ pub fn allow_obsolete_multiline_headers_in_responses(
&mut self,
enabled: bool,
) -> &mut Builder {
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -396,10 +392,8 @@ impl Builder {
/// name, or does not include a colon at all, the line will be silently ignored
/// and no error will be reported.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
- pub fn http1_ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
+ pub fn ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
self.h1_parser_config
.ignore_invalid_headers_in_responses(enabled);
self
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -417,7 +411,7 @@ impl Builder {
///
/// Default is `auto`. In this mode hyper will try to guess which
/// mode to use
- pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
+ pub fn writev(&mut self, enabled: bool) -> &mut Builder {
self.h1_writev = Some(enabled);
self
}
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -425,10 +419,8 @@ impl Builder {
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
- pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Builder {
+ pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder {
self.h1_title_case_headers = enabled;
self
}
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -443,10 +435,8 @@ impl Builder {
/// interact with the original cases. The only effect this can have now is
/// to forward the cases in a proxy-like fashion.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
- pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
+ pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
self.h1_preserve_header_case = enabled;
self
}
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -457,21 +447,19 @@ impl Builder {
/// ordering in a private extension on the `Response`. It will also look for and use
/// such an extension in any provided `Request`.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
#[cfg(feature = "ffi")]
- pub fn http1_preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
+ pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
self.h1_preserve_header_order = enabled;
self
}
/// Sets the exact size of the read buffer to *always* use.
///
- /// Note that setting this option unsets the `http1_max_buf_size` option.
+ /// Note that setting this option unsets the `max_buf_size` option.
///
/// Default is an adaptive read buffer.
- pub fn http1_read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
+ pub fn read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
self.h1_read_buf_exact_size = sz;
self.h1_max_buf_size = None;
self
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -481,12 +469,12 @@ impl Builder {
///
/// Default is ~400kb.
///
- /// Note that setting this option unsets the `http1_read_exact_buf_size` option.
+ /// Note that setting this option unsets the `read_exact_buf_size` option.
///
/// # Panics
///
/// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
- pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
+ pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
assert!(
max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
"the max_buf_size cannot be smaller than the minimum that h1 specifies."
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -278,7 +278,7 @@ impl Builder {
/// If not set, hyper will use a default.
///
/// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ pub fn initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_stream_window_size = sz;
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -291,7 +291,7 @@ impl Builder {
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
- pub fn http2_initial_connection_window_size(
+ pub fn initial_connection_window_size(
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -305,9 +305,9 @@ impl Builder {
/// Sets whether to use an adaptive flow control.
///
/// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
+ /// `initial_stream_window_size` and
+ /// `initial_connection_window_size`.
+ pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self {
use proto::h2::SPEC_WINDOW_SIZE;
self.h2_builder.adaptive_window = enabled;
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -323,7 +323,7 @@ impl Builder {
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
- pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ pub fn max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.max_frame_size = sz;
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -336,7 +336,7 @@ impl Builder {
/// Pass `None` to disable HTTP2 keep-alive.
///
/// Default is currently disabled.
- pub fn http2_keep_alive_interval(
+ pub fn keep_alive_interval(
&mut self,
interval: impl Into<Option<Duration>>,
) -> &mut Self {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -347,10 +347,10 @@ impl Builder {
/// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
///
/// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
+ /// be closed. Does nothing if `keep_alive_interval` is disabled.
///
/// Default is 20 seconds.
- pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
+ pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
self.h2_builder.keep_alive_timeout = timeout;
self
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -359,11 +359,11 @@ impl Builder {
///
/// If disabled, keep-alive pings are only sent while there are open
/// request/responses streams. If enabled, pings are also sent when no
- /// streams are active. Does nothing if `http2_keep_alive_interval` is
+ /// streams are active. Does nothing if `keep_alive_interval` is
/// disabled.
///
/// Default is `false`.
- pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
+ pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
self.h2_builder.keep_alive_while_idle = enabled;
self
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -376,7 +376,7 @@ impl Builder {
/// The default value is determined by the `h2` crate.
///
/// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams
- pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
+ pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
self.h2_builder.max_concurrent_reset_streams = Some(max);
self
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -388,7 +388,7 @@ impl Builder {
/// # Panics
///
/// The value must be no larger than `u32::MAX`.
- pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
+ pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self {
assert!(max <= std::u32::MAX as usize);
self.h2_builder.max_send_buffer_size = max;
self
diff --git a/src/ext.rs b/src/ext.rs
--- a/src/ext.rs
+++ b/src/ext.rs
@@ -76,7 +76,7 @@ impl fmt::Debug for Protocol {
/// A map from header names to their original casing as received in an HTTP message.
///
/// If an HTTP/1 response `res` is parsed on a connection whose option
-/// [`http1_preserve_header_case`] was set to true and the response included
+/// [`preserve_header_case`] was set to true and the response included
/// the following headers:
///
/// ```ignore
diff --git a/src/ext.rs b/src/ext.rs
--- a/src/ext.rs
+++ b/src/ext.rs
@@ -93,7 +93,7 @@ impl fmt::Debug for Protocol {
/// })
/// ```
///
-/// [`http1_preserve_header_case`]: /client/struct.Client.html#method.http1_preserve_header_case
+/// [`preserve_header_case`]: /client/struct.Client.html#method.preserve_header_case
#[derive(Clone, Debug)]
pub(crate) struct HeaderCaseMap(HeaderMap<Bytes>);
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -70,9 +70,9 @@ ffi_fn! {
conn::http1::Builder::new()
.executor(options.exec.clone())
- .http1_allow_obsolete_multiline_headers_in_responses(options.http1_allow_obsolete_multiline_headers_in_responses)
- .http1_preserve_header_case(options.http1_preserve_header_case)
- .http1_preserve_header_order(options.http1_preserve_header_order)
+ .allow_obsolete_multiline_headers_in_responses(options.http1_allow_obsolete_multiline_headers_in_responses)
+ .preserve_header_case(options.http1_preserve_header_case)
+ .preserve_header_order(options.http1_preserve_header_order)
.handshake::<_, crate::body::Incoming>(io)
.await
.map(|(tx, conn)| {
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -224,7 +224,7 @@ impl Builder {
/// detects an EOF in the middle of a request.
///
/// Default is `false`.
- pub fn http1_half_close(&mut self, val: bool) -> &mut Self {
+ pub fn half_close(&mut self, val: bool) -> &mut Self {
self.h1_half_close = val;
self
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -232,7 +232,7 @@ impl Builder {
/// Enables or disables HTTP/1 keep-alive.
///
/// Default is true.
- pub fn http1_keep_alive(&mut self, val: bool) -> &mut Self {
+ pub fn keep_alive(&mut self, val: bool) -> &mut Self {
self.h1_keep_alive = val;
self
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -240,10 +240,8 @@ impl Builder {
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
- pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self {
+ pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self {
self.h1_title_case_headers = enabled;
self
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -258,10 +256,8 @@ impl Builder {
/// interact with the original cases. The only effect this can have now is
/// to forward the cases in a proxy-like fashion.
///
- /// Note that this setting does not affect HTTP/2.
- ///
/// Default is false.
- pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Self {
+ pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self {
self.h1_preserve_header_case = enabled;
self
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -270,7 +266,7 @@ impl Builder {
/// transmit the entire header within this time, the connection is closed.
///
/// Default is None.
- pub fn http1_header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self {
+ pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self {
self.h1_header_read_timeout = Some(read_timeout);
self
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -287,7 +283,7 @@ impl Builder {
///
/// Default is `auto`. In this mode hyper will try to guess which
/// mode to use
- pub fn http1_writev(&mut self, val: bool) -> &mut Self {
+ pub fn writev(&mut self, val: bool) -> &mut Self {
self.h1_writev = Some(val);
self
}
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -116,7 +116,7 @@ impl<E> Builder<E> {
/// If not set, hyper will use a default.
///
/// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ pub fn initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.adaptive_window = false;
self.h2_builder.initial_stream_window_size = sz;
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -129,7 +129,7 @@ impl<E> Builder<E> {
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
- pub fn http2_initial_connection_window_size(
+ pub fn initial_connection_window_size(
&mut self,
sz: impl Into<Option<u32>>,
) -> &mut Self {
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -143,9 +143,9 @@ impl<E> Builder<E> {
/// Sets whether to use an adaptive flow control.
///
/// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
+ /// `initial_stream_window_size` and
+ /// `initial_connection_window_size`.
+ pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self {
use proto::h2::SPEC_WINDOW_SIZE;
self.h2_builder.adaptive_window = enabled;
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -161,7 +161,7 @@ impl<E> Builder<E> {
/// Passing `None` will do nothing.
///
/// If not set, hyper will use a default.
- pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ pub fn max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
if let Some(sz) = sz.into() {
self.h2_builder.max_frame_size = sz;
}
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -174,7 +174,7 @@ impl<E> Builder<E> {
/// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing.
///
/// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS
- pub fn http2_max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
+ pub fn max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
self.h2_builder.max_concurrent_streams = max.into();
self
}
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -188,7 +188,7 @@ impl<E> Builder<E> {
///
/// # Cargo Feature
///
- pub fn http2_keep_alive_interval(
+ pub fn keep_alive_interval(
&mut self,
interval: impl Into<Option<Duration>>,
) -> &mut Self {
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -199,13 +199,13 @@ impl<E> Builder<E> {
/// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
///
/// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
+ /// be closed. Does nothing if `keep_alive_interval` is disabled.
///
/// Default is 20 seconds.
///
/// # Cargo Feature
///
- pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
+ pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
self.h2_builder.keep_alive_timeout = timeout;
self
}
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -217,7 +217,7 @@ impl<E> Builder<E> {
/// # Panics
///
/// The value must be no larger than `u32::MAX`.
- pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
+ pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self {
assert!(max <= std::u32::MAX as usize);
self.h2_builder.max_send_buffer_size = max;
self
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -226,7 +226,7 @@ impl<E> Builder<E> {
/// Enables the [extended CONNECT protocol].
///
/// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
- pub fn http2_enable_connect_protocol(&mut self) -> &mut Self {
+ pub fn enable_connect_protocol(&mut self) -> &mut Self {
self.h2_builder.enable_connect_protocol = true;
self
}
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -234,7 +234,7 @@ impl<E> Builder<E> {
/// Sets the max size of received header frames.
///
/// Default is currently ~16MB, but may change.
- pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self {
+ pub fn max_header_list_size(&mut self, max: u32) -> &mut Self {
self.h2_builder.max_header_list_size = max;
self
}
diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs
--- a/src/server/conn/mod.rs
+++ b/src/server/conn/mod.rs
@@ -30,7 +30,7 @@
//! let (tcp_stream, _) = tcp_listener.accept().await?;
//! tokio::task::spawn(async move {
//! if let Err(http_err) = http1::Builder::new()
-//! .http1_keep_alive(true)
+//! .keep_alive(true)
//! .serve_connection(tcp_stream, service_fn(hello))
//! .await {
//! eprintln!("Error while serving HTTP connection: {}", http_err);
|
Leaving for comments for a few days.
👍 Would it make sense to rename the builders `Http1Builder`, etc?
So, `http1::Http1Builder`? Seems contrary [RFC 356](http://rust-lang.github.io/rfcs/0356-no-module-prefixes.html). Users who want it can rename it on import: `use hyper::client::conn::http1::Builder as Http1Builder`.
Oh didn't even know that existed! Yeah seems fine to me.
The APIs are already inconsistent today (e.g. there's `http1_title_case_headers` but also `max_buffer_size` on `http1::Builder`).
Agree with this proposal, the prefixing is now an indirect stutter-naming (`conn::http1::Builder::http1_...`).
|
2022-12-24T18:42:06Z
| 3,101
|
Remove version-specific prefixes from builder methods
With the split of the builders into per-version kinds (see #2842 and #2851), I feel there's a new question around whether we should remove the prefixes from the method names. They used to exist because the builders were combined over multiple HTTP versions, so the options were scoped to a version. But now:
```rust
mod http1 {
impl Builder {
pub fn http1_title_case_headers() {
// ...
}
}
}
```
It seems all those can have the `http1_` prefix removed, and same for the `http2::Builder`s.
Noticed in https://github.com/hyperium/hyper-util/pull/11#discussion_r1035315089.
|
hyperium__hyper-3101
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1197,7 +1197,7 @@ test! {
client:
options: {
- http1_title_case_headers: true,
+ title_case_headers: true,
},
request: {
method: GET,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1311,7 +1311,7 @@ test! {
client:
options: {
- http1_allow_obsolete_multiline_headers_in_responses: true,
+ allow_obsolete_multiline_headers_in_responses: true,
},
request: {
method: GET,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1982,10 +1982,10 @@ mod conn {
let (_client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_keep_alive_interval(Duration::from_secs(1))
- .http2_keep_alive_timeout(Duration::from_secs(1))
+ .keep_alive_interval(Duration::from_secs(1))
+ .keep_alive_timeout(Duration::from_secs(1))
// enable while idle since we aren't sending requests
- .http2_keep_alive_while_idle(true)
+ .keep_alive_while_idle(true)
.handshake::<_, hyper::body::Incoming>(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2011,8 +2011,8 @@ mod conn {
let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_keep_alive_interval(Duration::from_secs(1))
- .http2_keep_alive_timeout(Duration::from_secs(1))
+ .keep_alive_interval(Duration::from_secs(1))
+ .keep_alive_timeout(Duration::from_secs(1))
.handshake::<_, hyper::body::Incoming>(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2043,8 +2043,8 @@ mod conn {
let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_keep_alive_interval(Duration::from_secs(1))
- .http2_keep_alive_timeout(Duration::from_secs(1))
+ .keep_alive_interval(Duration::from_secs(1))
+ .keep_alive_timeout(Duration::from_secs(1))
.handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2103,8 +2103,8 @@ mod conn {
let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_keep_alive_interval(Duration::from_secs(1))
- .http2_keep_alive_timeout(Duration::from_secs(1))
+ .keep_alive_interval(Duration::from_secs(1))
+ .keep_alive_timeout(Duration::from_secs(1))
.handshake(io)
.await
.expect("http handshake");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1247,7 +1247,7 @@ async fn http1_allow_half_close() {
let (socket, _) = listener.accept().await.unwrap();
http1::Builder::new()
- .http1_half_close(true)
+ .half_close(true)
.serve_connection(
socket,
service_fn(|_| {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1274,7 +1274,7 @@ async fn disconnect_after_reading_request_before_responding() {
let (socket, _) = listener.accept().await.unwrap();
http1::Builder::new()
- .http1_half_close(false)
+ .half_close(false)
.serve_connection(
socket,
service_fn(|_| {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1370,7 +1370,7 @@ async fn header_read_timeout_slow_writes() {
let (socket, _) = listener.accept().await.unwrap();
let conn = http1::Builder::new()
.timer(TokioTimer)
- .http1_header_read_timeout(Duration::from_secs(5))
+ .header_read_timeout(Duration::from_secs(5))
.serve_connection(
socket,
service_fn(|_| {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1445,7 +1445,7 @@ async fn header_read_timeout_slow_writes_multiple_requests() {
let (socket, _) = listener.accept().await.unwrap();
let conn = http1::Builder::new()
.timer(TokioTimer)
- .http1_header_read_timeout(Duration::from_secs(5))
+ .header_read_timeout(Duration::from_secs(5))
.serve_connection(
socket,
service_fn(|_| {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2363,8 +2363,8 @@ async fn http2_keep_alive_detects_unresponsive_client() {
let err = http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
- .http2_keep_alive_interval(Duration::from_secs(1))
- .http2_keep_alive_timeout(Duration::from_secs(1))
+ .keep_alive_interval(Duration::from_secs(1))
+ .keep_alive_timeout(Duration::from_secs(1))
.serve_connection(socket, unreachable_service())
.await
.expect_err("serve_connection should error");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2381,8 +2381,8 @@ async fn http2_keep_alive_with_responsive_client() {
http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
- .http2_keep_alive_interval(Duration::from_secs(1))
- .http2_keep_alive_timeout(Duration::from_secs(1))
+ .keep_alive_interval(Duration::from_secs(1))
+ .keep_alive_timeout(Duration::from_secs(1))
.serve_connection(socket, HelloWorld)
.await
.expect("serve_connection");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2445,8 +2445,8 @@ async fn http2_keep_alive_count_server_pings() {
http2::Builder::new(TokioExecutor)
.timer(TokioTimer)
- .http2_keep_alive_interval(Duration::from_secs(1))
- .http2_keep_alive_timeout(Duration::from_secs(1))
+ .keep_alive_interval(Duration::from_secs(1))
+ .keep_alive_timeout(Duration::from_secs(1))
.serve_connection(socket, unreachable_service())
.await
.expect("serve_connection");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2839,7 +2839,7 @@ impl ServeOptions {
.serve_connection(stream, service).await.unwrap();
} else {
http1::Builder::new()
- .http1_keep_alive(_options.keep_alive)
+ .keep_alive(_options.keep_alive)
.pipeline_flush(_options.pipeline)
.serve_connection(stream, service).await.unwrap();
}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3038"
] |
0.14
|
9ad4055fbfc899391f54ecbfc63de385cae711ef
|
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -43,24 +43,62 @@ impl TcpKeepaliveConfig {
}
}
- #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris")))]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_vendor = "apple",
+ windows,
+ ))]
fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive {
*dirty = true;
ka.with_interval(interval)
}
- #[cfg(any(target_os = "openbsd", target_os = "redox", target_os = "solaris"))]
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_vendor = "apple",
+ windows,
+ )))]
fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive {
ka // no-op as keepalive interval is not supported on this platform
}
- #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris", target_os = "windows")))]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_vendor = "apple",
+ ))]
fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive {
*dirty = true;
ka.with_retries(retries)
}
- #[cfg(any(target_os = "openbsd", target_os = "redox", target_os = "solaris", target_os = "windows"))]
+ #[cfg(not(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_vendor = "apple",
+ )))]
fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive {
ka // no-op as keepalive retries is not supported on this platform
}
|
I am also getting this error, had to downgrade it to v0.14.20 since my project would not build with v0.14.21
```
hyper = { version = "=0.14.20", features = ["full"] }
```
Looks like it needs a more complex blob of cfgs:
```rust
#[cfg(all(
feature = "all",
any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "illumos",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
windows,
)
))]
```
|
2022-10-31T16:47:16Z
| 3,039
|
v0.14.21 broken build
**Version**
v0.14.21
**Platform**
MacOS and Linux
**Description**
Cannot build the newest hyper:
```
[2022-10-31T14:18:27.529Z] error[E0599]: no method named `with_interval` found for struct `TcpKeepalive` in the current scope
[2022-10-31T14:18:27.529Z] --> /root/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.21/src/server/tcp.rs:49:12
[2022-10-31T14:18:27.529Z] |
[2022-10-31T14:18:27.529Z] 49 | ka.with_interval(interval)
[2022-10-31T14:18:27.529Z] | ^^^^^^^^^^^^^ method not found in `TcpKeepalive`
[2022-10-31T14:18:27.529Z]
[2022-10-31T14:18:27.529Z] error[E0599]: no method named `with_retries` found for struct `TcpKeepalive` in the current scope
[2022-10-31T14:18:27.529Z] --> /root/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.21/src/server/tcp.rs:60:12
[2022-10-31T14:18:27.529Z] |
[2022-10-31T14:18:27.529Z] 60 | ka.with_retries(retries)
[2022-10-31T14:18:27.529Z] | ^^^^^^^^^^^^ help: there is an associated function with a similar name: `with_time`
[2022-10-31T14:18:27.529Z]
```
|
hyperium__hyper-3039
|
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -401,7 +439,17 @@ mod tests {
}
}
- #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris")))]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_vendor = "apple",
+ windows,
+ ))]
#[test]
fn tcp_keepalive_interval_config() {
let mut kac = TcpKeepaliveConfig::default();
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -413,7 +461,16 @@ mod tests {
}
}
- #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris", target_os = "windows")))]
+ #[cfg(any(
+ target_os = "android",
+ target_os = "dragonfly",
+ target_os = "freebsd",
+ target_os = "fuchsia",
+ target_os = "illumos",
+ target_os = "linux",
+ target_os = "netbsd",
+ target_vendor = "apple",
+ ))]
#[test]
fn tcp_keepalive_retries_config() {
let mut kac = TcpKeepaliveConfig::default();
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"3028"
] |
0.3
|
54eaf7fb1377dbb60c1b7a1f1e93388a58acd466
|
diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs
--- a/benches/support/tokiort.rs
+++ b/benches/support/tokiort.rs
@@ -8,6 +8,7 @@ use std::{
use futures_util::Future;
use hyper::rt::{Sleep, Timer};
+use pin_project_lite::pin_project;
#[derive(Clone)]
/// An Executor that uses the tokio runtime.
diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs
--- a/benches/support/tokiort.rs
+++ b/benches/support/tokiort.rs
@@ -29,16 +30,16 @@ where
pub struct TokioTimer;
impl Timer for TokioTimer {
- fn sleep(&self, duration: Duration) -> Box<dyn Sleep + Unpin> {
- let s = tokio::time::sleep(duration);
- let hs = TokioSleep { inner: Box::pin(s) };
- return Box::new(hs);
+ fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
+ Box::pin(TokioSleep {
+ inner: tokio::time::sleep(duration),
+ })
}
- fn sleep_until(&self, deadline: Instant) -> Box<dyn Sleep + Unpin> {
- return Box::new(TokioSleep {
- inner: Box::pin(tokio::time::sleep_until(deadline.into())),
- });
+ fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
+ Box::pin(TokioSleep {
+ inner: tokio::time::sleep_until(deadline.into()),
+ })
}
}
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -28,10 +28,3 @@ cfg_proto! {
pub(crate) use std::marker::Unpin;
}
pub(crate) use std::{future::Future, pin::Pin};
-
-pub(crate) fn into_pin<T: ?Sized>(boxed: Box<T>) -> Pin<Box<T>> {
- // It's not possible to move or replace the insides of a `Pin<Box<T>>`
- // when `T: !Unpin`, so it's safe to pin it directly without any
- // additional requirements.
- unsafe { Pin::new_unchecked(boxed) }
-}
diff --git a/src/common/time.rs b/src/common/time.rs
--- a/src/common/time.rs
+++ b/src/common/time.rs
@@ -55,7 +55,7 @@ impl<F> Future for HyperTimeout<F> where F: Future {
*/
impl Time {
- pub(crate) fn sleep(&self, duration: Duration) -> Box<dyn Sleep + Unpin> {
+ pub(crate) fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
match *self {
Time::Empty => {
panic!("You must supply a timer.")
diff --git a/src/common/time.rs b/src/common/time.rs
--- a/src/common/time.rs
+++ b/src/common/time.rs
@@ -64,7 +64,7 @@ impl Time {
}
}
- pub(crate) fn sleep_until(&self, deadline: Instant) -> Box<dyn Sleep + Unpin> {
+ pub(crate) fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
match *self {
Time::Empty => {
panic!("You must supply a timer.")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -87,8 +87,7 @@ where
}
None => {
debug!("setting h1 header read timeout timer");
- *ctx.h1_header_read_timeout_fut =
- Some(crate::common::into_pin(ctx.timer.sleep_until(deadline)));
+ *ctx.h1_header_read_timeout_fut = Some(ctx.timer.sleep_until(deadline));
}
}
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -61,7 +61,7 @@ pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Re
interval,
timeout: config.keep_alive_timeout,
while_idle: config.keep_alive_while_idle,
- sleep: crate::common::into_pin(__timer.sleep(interval)),
+ sleep: __timer.sleep(interval),
state: KeepAliveState::Init,
timer: __timer,
});
diff --git a/src/rt.rs b/src/rt.rs
--- a/src/rt.rs
+++ b/src/rt.rs
@@ -20,16 +20,16 @@ pub trait Executor<Fut> {
/// A timer which provides timer-like functions.
pub trait Timer {
/// Return a future that resolves in `duration` time.
- fn sleep(&self, duration: Duration) -> Box<dyn Sleep + Unpin>;
+ fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>>;
/// Return a future that resolves at `deadline`.
- fn sleep_until(&self, deadline: Instant) -> Box<dyn Sleep + Unpin>;
+ fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>>;
/// Reset a future to resolve at `new_deadline` instead.
fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
- *sleep = crate::common::into_pin(self.sleep_until(new_deadline));
+ *sleep = self.sleep_until(new_deadline);
}
}
/// A future returned by a `Timer`.
-pub trait Sleep: Send + Sync + Unpin + Future<Output = ()> {}
+pub trait Sleep: Send + Sync + Future<Output = ()> {}
|
2022-10-29T21:08:44Z
| 3,037
|
`Timer` methods should return `Pin<Box<dyn Sleep>>` and `Sleep` should drop the `Unpin` requirement
Otherwise, you need to double-box `!Unpin` timer futures like Tokio's.
|
hyperium__hyper-3037
|
diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs
--- a/benches/support/tokiort.rs
+++ b/benches/support/tokiort.rs
@@ -59,15 +60,18 @@ where
// Use TokioSleep to get tokio::time::Sleep to implement Unpin.
// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html
-pub(crate) struct TokioSleep {
- pub(crate) inner: Pin<Box<tokio::time::Sleep>>,
+pin_project! {
+ pub(crate) struct TokioSleep {
+ #[pin]
+ pub(crate) inner: tokio::time::Sleep,
+ }
}
impl Future for TokioSleep {
type Output = ();
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
- self.inner.as_mut().poll(cx)
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.project().inner.poll(cx)
}
}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"2649"
] |
0.3
|
81e25fa868c86e4ea81d5a96fdca497a4b1ab3c1
|
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -90,7 +90,7 @@ impl<T, U> Sender<T, U> {
}
let (tx, rx) = oneshot::channel();
self.inner
- .send(Envelope(Some((val, Callback::Retry(tx)))))
+ .send(Envelope(Some((val, Callback::Retry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -101,7 +101,7 @@ impl<T, U> Sender<T, U> {
}
let (tx, rx) = oneshot::channel();
self.inner
- .send(Envelope(Some((val, Callback::NoRetry(tx)))))
+ .send(Envelope(Some((val, Callback::NoRetry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -131,7 +131,7 @@ impl<T, U> UnboundedSender<T, U> {
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
let (tx, rx) = oneshot::channel();
self.inner
- .send(Envelope(Some((val, Callback::Retry(tx)))))
+ .send(Envelope(Some((val, Callback::Retry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -139,7 +139,7 @@ impl<T, U> UnboundedSender<T, U> {
pub(crate) fn send(&mut self, val: T) -> Result<Promise<U>, T> {
let (tx, rx) = oneshot::channel();
self.inner
- .send(Envelope(Some((val, Callback::NoRetry(tx)))))
+ .send(Envelope(Some((val, Callback::NoRetry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -215,33 +215,59 @@ impl<T, U> Drop for Envelope<T, U> {
pub(crate) enum Callback<T, U> {
#[allow(unused)]
- Retry(oneshot::Sender<Result<U, (crate::Error, Option<T>)>>),
- NoRetry(oneshot::Sender<Result<U, crate::Error>>),
+ Retry(Option<oneshot::Sender<Result<U, (crate::Error, Option<T>)>>>),
+ NoRetry(Option<oneshot::Sender<Result<U, crate::Error>>>),
+}
+
+impl<T, U> Drop for Callback<T, U> {
+ fn drop(&mut self) {
+ // FIXME(nox): What errors do we want here?
+ let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() {
+ "user code panicked"
+ } else {
+ "runtime dropped the dispatch task"
+ });
+
+ match self {
+ Callback::Retry(tx) => {
+ if let Some(tx) = tx.take() {
+ let _ = tx.send(Err((error, None)));
+ }
+ }
+ Callback::NoRetry(tx) => {
+ if let Some(tx) = tx.take() {
+ let _ = tx.send(Err(error));
+ }
+ }
+ }
+ }
}
impl<T, U> Callback<T, U> {
#[cfg(feature = "http2")]
pub(crate) fn is_canceled(&self) -> bool {
match *self {
- Callback::Retry(ref tx) => tx.is_closed(),
- Callback::NoRetry(ref tx) => tx.is_closed(),
+ Callback::Retry(Some(ref tx)) => tx.is_closed(),
+ Callback::NoRetry(Some(ref tx)) => tx.is_closed(),
+ _ => unreachable!(),
}
}
pub(crate) fn poll_canceled(&mut self, cx: &mut task::Context<'_>) -> Poll<()> {
match *self {
- Callback::Retry(ref mut tx) => tx.poll_closed(cx),
- Callback::NoRetry(ref mut tx) => tx.poll_closed(cx),
+ Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx),
+ Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx),
+ _ => unreachable!(),
}
}
- pub(crate) fn send(self, val: Result<U, (crate::Error, Option<T>)>) {
+ pub(crate) fn send(mut self, val: Result<U, (crate::Error, Option<T>)>) {
match self {
- Callback::Retry(tx) => {
- let _ = tx.send(val);
+ Callback::Retry(ref mut tx) => {
+ let _ = tx.take().unwrap().send(val);
}
- Callback::NoRetry(tx) => {
- let _ = tx.send(val.map_err(|e| e.0));
+ Callback::NoRetry(ref mut tx) => {
+ let _ = tx.take().unwrap().send(val.map_err(|e| e.0));
}
}
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -110,6 +110,10 @@ pub(super) enum User {
#[cfg(feature = "http1")]
ManualUpgrade,
+ /// The dispatch task is gone.
+ #[cfg(feature = "client")]
+ DispatchGone,
+
/// User aborted in an FFI callback.
#[cfg(feature = "ffi")]
AbortedByCallback,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -314,6 +318,11 @@ impl Error {
Error::new_user(User::AbortedByCallback)
}
+ #[cfg(feature = "client")]
+ pub(super) fn new_user_dispatch_gone() -> Error {
+ Error::new(Kind::User(User::DispatchGone))
+ }
+
#[cfg(feature = "http2")]
pub(super) fn new_h2(cause: ::h2::Error) -> Error {
if cause.is_io() {
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -390,6 +399,8 @@ impl Error {
Kind::User(User::NoUpgrade) => "no upgrade available",
#[cfg(feature = "http1")]
Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use",
+ #[cfg(feature = "client")]
+ Kind::User(User::DispatchGone) => "dispatch task is gone",
#[cfg(feature = "ffi")]
Kind::User(User::AbortedByCallback) => "operation aborted by an application callback",
}
diff --git a/src/ext.rs b/src/ext.rs
--- a/src/ext.rs
+++ b/src/ext.rs
@@ -40,6 +40,7 @@ impl Protocol {
self.inner.as_str()
}
+ #[cfg(feature = "server")]
pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self {
Self { inner }
}
|
Yea, we can do better. We probably can't completely remove the panic, since the `Canceled` error is part of the oneshot contract. But we can improve two the existing cases that _can_ happen, to try to eliminate it so that panic never _can_ happen.
- The runtime is dropped, killing the dispatch task.
- The dispatch task panics, due to user code.
We can add a `Guard` to the `oneshot::Sender` (`Callback`) to make it send an appropriate `hyper::Error` in drop, if not consumed. If `std::thread::panicking()`, then we can send an error about the dispatch task having panicked. If not, we can send an error about an unexpected runtime dropping the task.
I'm seeing this panic in real code, it would be nice to avoid it somehow. Not sure how it happens.

|
2022-10-28T19:07:59Z
| 3,032
|
Consider removing "dispatch dropped without returning error"
**Is your feature request related to a problem? Please describe.**
Hyper panics with "dispatch dropped without returning error" if the background dispatch task goes away, either because the runtime was dropped or some user-provided code such as a HttpBody implementation panicked
**Describe the solution you'd like**
Hyper should catch the panic and return a proper error instead of panicking with "dispatch dropped without returning error".
Cc @seanmonstar
|
hyperium__hyper-3032
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2267,6 +2267,48 @@ mod conn {
done_tx.send(()).unwrap();
}
+ #[tokio::test]
+ async fn test_body_panics() {
+ let _ = pretty_env_logger::try_init();
+
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ // spawn a server that reads but doesn't write
+ tokio::spawn(async move {
+ let sock = listener.accept().await.unwrap().0;
+ drain_til_eof(sock).await.expect("server read");
+ });
+
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+
+ let (mut client, conn) = conn::http1::Builder::new()
+ .handshake(io)
+ .await
+ .expect("handshake");
+
+ tokio::spawn(async move {
+ conn.await.expect("client conn shouldn't error");
+ });
+
+ let req = Request::post("/a")
+ .body(http_body_util::BodyExt::map_frame::<_, bytes::Bytes>(
+ http_body_util::Full::<bytes::Bytes>::from("baguette"),
+ |_| panic!("oopsie"),
+ ))
+ .unwrap();
+
+ let error = client.send_request(req).await.unwrap_err();
+
+ assert!(error.is_user());
+ assert_eq!(
+ error.to_string(),
+ "dispatch task is gone: user code panicked"
+ );
+ }
+
async fn drain_til_eof<T: AsyncRead + Unpin>(mut sock: T) -> io::Result<()> {
let mut buf = [0u8; 1024];
loop {
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2971"
] |
0.3
|
0888623d3764e887706d4e38f82f0fb57c50bd1a
|
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -4,15 +4,16 @@ use std::net::SocketAddr;
use bytes::Bytes;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
-use hyper::body::Body as _;
use hyper::server::conn::http1;
use hyper::service::service_fn;
-use hyper::{Method, Recv, Request, Response, StatusCode};
+use hyper::{body::Body, Method, Request, Response, StatusCode};
use tokio::net::TcpListener;
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
-async fn echo(req: Request<Recv>) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
+async fn echo(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
match (req.method(), req.uri().path()) {
// Serve some instructions at /
(&Method::GET, "/") => Ok(Response::new(full(
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -7,10 +7,10 @@ use bytes::Bytes;
use http_body_util::Full;
use hyper::server::conn::http1;
use hyper::service::service_fn;
-use hyper::{Recv, Request, Response};
+use hyper::{Request, Response};
use tokio::net::TcpListener;
-async fn hello(_: Request<Recv>) -> Result<Response<Full<Bytes>>, Infallible> {
+async fn hello(_: Request<hyper::body::Incoming>) -> Result<Response<Full<Bytes>>, Infallible> {
Ok(Response::new(Full::new(Bytes::from("Hello World!"))))
}
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -8,7 +8,7 @@ use hyper::client::conn::http1::Builder;
use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
-use hyper::{Method, Recv, Request, Response};
+use hyper::{Method, Request, Response};
use tokio::net::{TcpListener, TcpStream};
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -43,7 +43,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
}
}
-async fn proxy(req: Request<Recv>) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
+async fn proxy(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
println!("req: {:?}", req);
if Method::CONNECT == req.method() {
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -8,17 +8,17 @@ use futures_util::future::join;
use http_body_util::Full;
use hyper::server::conn::http1;
use hyper::service::service_fn;
-use hyper::{Recv, Request, Response};
+use hyper::{Request, Response};
use tokio::net::TcpListener;
static INDEX1: &[u8] = b"The 1st service!";
static INDEX2: &[u8] = b"The 2nd service!";
-async fn index1(_: Request<Recv>) -> Result<Response<Full<Bytes>>, hyper::Error> {
+async fn index1(_: Request<hyper::body::Incoming>) -> Result<Response<Full<Bytes>>, hyper::Error> {
Ok(Response::new(Full::new(Bytes::from(INDEX1))))
}
-async fn index2(_: Request<Recv>) -> Result<Response<Full<Bytes>>, hyper::Error> {
+async fn index2(_: Request<hyper::body::Incoming>) -> Result<Response<Full<Bytes>>, hyper::Error> {
Ok(Response::new(Full::new(Bytes::from(INDEX2))))
}
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -5,7 +5,7 @@ use bytes::Bytes;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
use hyper::server::conn::http1;
use hyper::service::service_fn;
-use hyper::{Method, Recv, Request, Response, StatusCode};
+use hyper::{Method, Request, Response, StatusCode};
use tokio::net::TcpListener;
use std::collections::HashMap;
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -19,7 +19,7 @@ static NOTNUMERIC: &[u8] = b"Number field is not numeric";
// Using service_fn, we can turn this function into a `Service`.
async fn param_example(
- req: Request<Recv>,
+ req: Request<hyper::body::Incoming>,
) -> Result<Response<BoxBody<Bytes, Infallible>>, hyper::Error> {
match (req.method(), req.uri().path()) {
(&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(full(INDEX))),
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -8,7 +8,7 @@ use tokio::net::TcpListener;
use bytes::Bytes;
use http_body_util::Full;
use hyper::service::service_fn;
-use hyper::{Method, Recv, Request, Response, Result, StatusCode};
+use hyper::{Method, Request, Response, Result, StatusCode};
static INDEX: &str = "examples/send_file_index.html";
static NOTFOUND: &[u8] = b"Not Found";
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -36,7 +36,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
}
}
-async fn response_examples(req: Request<Recv>) -> Result<Response<Full<Bytes>>> {
+async fn response_examples(req: Request<hyper::body::Incoming>) -> Result<Response<Full<Bytes>>> {
match (req.method(), req.uri().path()) {
(&Method::GET, "/") | (&Method::GET, "/index.html") => simple_file_send(INDEX).await,
(&Method::GET, "/no_file.html") => {
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -2,7 +2,7 @@ use bytes::Bytes;
use http_body_util::Full;
use hyper::server::conn::http1;
use hyper::service::Service;
-use hyper::{Recv, Request, Response};
+use hyper::{body::Incoming as IncomingBody, Request, Response};
use tokio::net::TcpListener;
use std::future::Future;
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -36,12 +36,12 @@ struct Svc {
counter: Counter,
}
-impl Service<Request<Recv>> for Svc {
+impl Service<Request<IncomingBody>> for Svc {
type Response = Response<Full<Bytes>>;
type Error = hyper::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
- fn call(&mut self, req: Request<Recv>) -> Self::Future {
+ fn call(&mut self, req: Request<IncomingBody>) -> Self::Future {
fn mk_response(s: String) -> Result<Response<Full<Bytes>>, hyper::Error> {
Ok(Response::builder().body(Full::new(Bytes::from(s))).unwrap())
}
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -14,7 +14,7 @@ use hyper::header::{HeaderValue, UPGRADE};
use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
-use hyper::{Recv, Request, Response, StatusCode};
+use hyper::{Request, Response, StatusCode};
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -38,7 +38,7 @@ async fn server_upgraded_io(mut upgraded: Upgraded) -> Result<()> {
}
/// Our server HTTP handler to initiate HTTP upgrades.
-async fn server_upgrade(mut req: Request<Recv>) -> Result<Response<Empty<Bytes>>> {
+async fn server_upgrade(mut req: Request<hyper::body::Incoming>) -> Result<Response<Empty<Bytes>>> {
let mut res = Response::new(Empty::new());
// Send a 400 to any request that doesn't have
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -6,7 +6,7 @@ use bytes::{Buf, Bytes};
use http_body_util::{BodyExt, Full};
use hyper::server::conn::http1;
use hyper::service::service_fn;
-use hyper::{header, Method, Recv, Request, Response, StatusCode};
+use hyper::{body::Incoming as IncomingBody, header, Method, Request, Response, StatusCode};
use tokio::net::{TcpListener, TcpStream};
type GenericError = Box<dyn std::error::Error + Send + Sync>;
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -46,7 +46,7 @@ async fn client_request_response() -> Result<Response<BoxBody>> {
Ok(Response::new(res_body))
}
-async fn api_post_response(req: Request<Recv>) -> Result<Response<BoxBody>> {
+async fn api_post_response(req: Request<IncomingBody>) -> Result<Response<BoxBody>> {
// Aggregate the body...
let whole_body = req.collect().await?.aggregate();
// Decode as JSON...
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -18,7 +18,7 @@ type TrailersSender = oneshot::Sender<HeaderMap>;
/// A stream of `Bytes`, used when receiving bodies from the network.
#[must_use = "streams do nothing unless polled"]
-pub struct Recv {
+pub struct Incoming {
kind: Kind,
}
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -65,17 +65,17 @@ pub(crate) struct Sender {
const WANT_PENDING: usize = 1;
const WANT_READY: usize = 2;
-impl Recv {
+impl Incoming {
/// Create a `Body` stream with an associated sender half.
///
/// Useful when wanting to stream chunks from another thread.
#[inline]
#[allow(unused)]
- pub(crate) fn channel() -> (Sender, Recv) {
+ pub(crate) fn channel() -> (Sender, Incoming) {
Self::new_channel(DecodedLength::CHUNKED, /*wanter =*/ false)
}
- pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Recv) {
+ pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Incoming) {
let (data_tx, data_rx) = mpsc::channel(0);
let (trailers_tx, trailers_rx) = oneshot::channel();
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -90,7 +90,7 @@ impl Recv {
data_tx,
trailers_tx: Some(trailers_tx),
};
- let rx = Recv::new(Kind::Chan {
+ let rx = Incoming::new(Kind::Chan {
content_length,
want_tx,
data_rx,
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -100,18 +100,18 @@ impl Recv {
(tx, rx)
}
- fn new(kind: Kind) -> Recv {
- Recv { kind }
+ fn new(kind: Kind) -> Incoming {
+ Incoming { kind }
}
#[allow(dead_code)]
- pub(crate) fn empty() -> Recv {
- Recv::new(Kind::Empty)
+ pub(crate) fn empty() -> Incoming {
+ Incoming::new(Kind::Empty)
}
#[cfg(feature = "ffi")]
- pub(crate) fn ffi() -> Recv {
- Recv::new(Kind::Ffi(crate::ffi::UserBody::new()))
+ pub(crate) fn ffi() -> Incoming {
+ Incoming::new(Kind::Ffi(crate::ffi::UserBody::new()))
}
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -125,7 +125,7 @@ impl Recv {
if !content_length.is_exact() && recv.is_end_stream() {
content_length = DecodedLength::ZERO;
}
- let body = Recv::new(Kind::H2 {
+ let body = Incoming::new(Kind::H2 {
data_done: false,
ping,
content_length,
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -151,7 +151,7 @@ impl Recv {
}
}
-impl Body for Recv {
+impl Body for Incoming {
type Data = Bytes;
type Error = crate::Error;
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -259,7 +259,7 @@ impl Body for Recv {
}
}
-impl fmt::Debug for Recv {
+impl fmt::Debug for Incoming {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[derive(Debug)]
struct Streaming;
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -10,28 +10,28 @@
//! - **The [`Body`](Body) trait** describes all possible bodies.
//! hyper allows any body type that implements `Body`, allowing
//! applications to have fine-grained control over their streaming.
-//! - **The [`Recv`](Recv) concrete type**, which is an implementation of
+//! - **The [`Incoming`](Incoming) concrete type**, which is an implementation of
//! `Body`, and returned by hyper as a "receive stream" (so, for server
-//! requests and client responses). It is also a decent default implementation
-//! if you don't have very custom needs of your send streams.
+//! requests and client responses).
pub use bytes::{Buf, Bytes};
pub use http_body::Body;
pub use http_body::Frame;
pub use http_body::SizeHint;
-pub use self::body::Recv;
+pub use self::incoming::Incoming;
+
#[cfg(feature = "http1")]
-pub(crate) use self::body::Sender;
+pub(crate) use self::incoming::Sender;
pub(crate) use self::length::DecodedLength;
-mod body;
+mod incoming;
mod length;
fn _assert_send_sync() {
fn _assert_send<T: Send>() {}
fn _assert_sync<T: Sync>() {}
- _assert_send::<Recv>();
- _assert_sync::<Recv>();
+ _assert_send::<Incoming>();
+ _assert_sync::<Incoming>();
}
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -9,8 +9,7 @@ use http::{Request, Response};
use httparse::ParserConfig;
use tokio::io::{AsyncRead, AsyncWrite};
-use crate::Recv;
-use crate::body::Body;
+use crate::body::{Body, Incoming as IncomingBody};
use super::super::dispatch;
use crate::common::{
exec::{BoxSendFuture, Exec},
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -25,7 +24,7 @@ type Dispatcher<T, B> =
/// The sender side of an established connection.
pub struct SendRequest<B> {
- dispatch: dispatch::Sender<Request<B>, Response<Recv>>,
+ dispatch: dispatch::Sender<Request<B>, Response<IncomingBody>>,
}
/// Deconstructed parts of a `Connection`.
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -189,7 +188,7 @@ where
pub fn send_request(
&mut self,
req: Request<B>,
- ) -> impl Future<Output = crate::Result<Response<Recv>>> {
+ ) -> impl Future<Output = crate::Result<Response<IncomingBody>>> {
let sent = self.dispatch.send(req);
async move {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -10,7 +10,7 @@ use http::{Request, Response};
use tokio::io::{AsyncRead, AsyncWrite};
use super::super::dispatch;
-use crate::body::Body;
+use crate::body::{Body, Incoming as IncomingBody};
use crate::common::time::Time;
use crate::common::{
exec::{BoxSendFuture, Exec},
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -18,11 +18,10 @@ use crate::common::{
};
use crate::proto;
use crate::rt::{Executor, Timer};
-use crate::Recv;
/// The sender side of an established connection.
pub struct SendRequest<B> {
- dispatch: dispatch::UnboundedSender<Request<B>, Response<Recv>>,
+ dispatch: dispatch::UnboundedSender<Request<B>, Response<IncomingBody>>,
}
/// A future that processes all HTTP state for the IO object.
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -128,7 +127,7 @@ where
pub fn send_request(
&mut self,
req: Request<B>,
- ) -> impl Future<Output = crate::Result<Response<Recv>>> {
+ ) -> impl Future<Output = crate::Result<Response<IncomingBody>>> {
let sent = self.dispatch.send(req);
async move {
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -8,10 +8,10 @@ use libc::{c_int, size_t};
use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType};
use super::{UserDataPointer, HYPER_ITER_CONTINUE};
-use crate::body::{Bytes, Frame, Recv};
+use crate::body::{Bytes, Frame, Incoming as IncomingBody};
/// A streaming HTTP body.
-pub struct hyper_body(pub(super) Recv);
+pub struct hyper_body(pub(super) IncomingBody);
/// A buffer of bytes that is sent or received on a `hyper_body`.
pub struct hyper_buf(pub(crate) Bytes);
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -33,7 +33,7 @@ ffi_fn! {
///
/// If not configured, this body acts as an empty payload.
fn hyper_body_new() -> *mut hyper_body {
- Box::into_raw(Box::new(hyper_body(Recv::ffi())))
+ Box::into_raw(Box::new(hyper_body(IncomingBody::ffi())))
} ?= ptr::null_mut()
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -32,9 +32,9 @@ pub struct hyper_clientconn {
enum Tx {
#[cfg(feature = "http1")]
- Http1(conn::http1::SendRequest<crate::Recv>),
+ Http1(conn::http1::SendRequest<crate::body::Incoming>),
#[cfg(feature = "http2")]
- Http2(conn::http2::SendRequest<crate::Recv>),
+ Http2(conn::http2::SendRequest<crate::body::Incoming>),
}
// ===== impl hyper_clientconn =====
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -57,7 +57,7 @@ ffi_fn! {
if options.http2 {
return conn::http2::Builder::new()
.executor(options.exec.clone())
- .handshake::<_, crate::Recv>(io)
+ .handshake::<_, crate::body::Incoming>(io)
.await
.map(|(tx, conn)| {
options.exec.execute(Box::pin(async move {
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -73,7 +73,7 @@ ffi_fn! {
.http1_allow_obsolete_multiline_headers_in_responses(options.http1_allow_obsolete_multiline_headers_in_responses)
.http1_preserve_header_case(options.http1_preserve_header_case)
.http1_preserve_header_order(options.http1_preserve_header_order)
- .handshake::<_, crate::Recv>(io)
+ .handshake::<_, crate::body::Incoming>(io)
.await
.map(|(tx, conn)| {
options.exec.execute(Box::pin(async move {
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -6,15 +6,16 @@ use super::body::hyper_body;
use super::error::hyper_code;
use super::task::{hyper_task_return_type, AsTaskType};
use super::{UserDataPointer, HYPER_ITER_CONTINUE};
+use crate::body::Incoming as IncomingBody;
use crate::ext::{HeaderCaseMap, OriginalHeaderOrder, ReasonPhrase};
use crate::header::{HeaderName, HeaderValue};
-use crate::{HeaderMap, Method, Recv, Request, Response, Uri};
+use crate::{HeaderMap, Method, Request, Response, Uri};
/// An HTTP request.
-pub struct hyper_request(pub(super) Request<Recv>);
+pub struct hyper_request(pub(super) Request<IncomingBody>);
/// An HTTP response.
-pub struct hyper_response(pub(super) Response<Recv>);
+pub struct hyper_response(pub(super) Response<IncomingBody>);
/// An HTTP header map.
///
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -37,7 +38,7 @@ type hyper_request_on_informational_callback = extern "C" fn(*mut c_void, *mut h
ffi_fn! {
/// Construct a new HTTP request.
fn hyper_request_new() -> *mut hyper_request {
- Box::into_raw(Box::new(hyper_request(Request::new(Recv::empty()))))
+ Box::into_raw(Box::new(hyper_request(Request::new(IncomingBody::empty()))))
} ?= std::ptr::null_mut()
}
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -312,13 +313,13 @@ ffi_fn! {
///
/// It is safe to free the response even after taking ownership of its body.
fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body {
- let body = std::mem::replace(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut(), crate::Recv::empty());
+ let body = std::mem::replace(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut(), IncomingBody::empty());
Box::into_raw(Box::new(hyper_body(body)))
} ?= std::ptr::null_mut()
}
impl hyper_response {
- pub(super) fn wrap(mut resp: Response<Recv>) -> hyper_response {
+ pub(super) fn wrap(mut resp: Response<IncomingBody>) -> hyper_response {
let headers = std::mem::take(resp.headers_mut());
let orig_casing = resp
.extensions_mut()
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -509,7 +510,7 @@ unsafe fn raw_name_value(
// ===== impl OnInformational =====
impl OnInformational {
- pub(crate) fn call(&mut self, resp: Response<Recv>) {
+ pub(crate) fn call(&mut self, resp: Response<IncomingBody>) {
let mut resp = hyper_response::wrap(resp);
(self.func)(self.data.0, &mut resp);
}
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -63,7 +63,6 @@ pub use crate::http::{header, Method, Request, Response, StatusCode, Uri, Versio
#[doc(no_inline)]
pub use crate::http::HeaderMap;
-pub use crate::body::Recv;
pub use crate::error::{Error, Result};
#[macro_use]
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -6,7 +6,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace};
use super::{Http1Transaction, Wants};
-use crate::body::{Body, DecodedLength, Recv};
+use crate::body::{Body, DecodedLength, Incoming as IncomingBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead};
use crate::upgrade::OnUpgrade;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -28,7 +28,7 @@ pub(crate) trait Dispatch {
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>>;
- fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Recv)>) -> crate::Result<()>;
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), ()>>;
fn should_poll(&self) -> bool;
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -45,14 +45,14 @@ cfg_server! {
cfg_client! {
pin_project_lite::pin_project! {
pub(crate) struct Client<B> {
- callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<Recv>>>,
+ callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<IncomingBody>>>,
#[pin]
rx: ClientRx<B>,
rx_closed: bool,
}
}
- type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, http::Response<Recv>>;
+ type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, http::Response<IncomingBody>>;
}
impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -247,9 +247,9 @@ where
match ready!(self.conn.poll_read_head(cx)) {
Some(Ok((mut head, body_len, wants))) => {
let body = match body_len {
- DecodedLength::ZERO => Recv::empty(),
+ DecodedLength::ZERO => IncomingBody::empty(),
other => {
- let (tx, rx) = Recv::new_channel(other, wants.contains(Wants::EXPECT));
+ let (tx, rx) = IncomingBody::new_channel(other, wants.contains(Wants::EXPECT));
self.body_tx = Some(tx);
rx
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -470,9 +470,9 @@ cfg_server! {
// Service is never pinned
impl<S: HttpService<B>, B> Unpin for Server<S, B> {}
- impl<S, Bs> Dispatch for Server<S, Recv>
+ impl<S, Bs> Dispatch for Server<S, IncomingBody>
where
- S: HttpService<Recv, ResBody = Bs>,
+ S: HttpService<IncomingBody, ResBody = Bs>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
Bs: Body,
{
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -505,7 +505,7 @@ cfg_server! {
ret
}
- fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Recv)>) -> crate::Result<()> {
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()> {
let (msg, body) = msg?;
let mut req = Request::new(body);
*req.method_mut() = msg.subject.0;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -591,7 +591,7 @@ cfg_client! {
}
}
- fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Recv)>) -> crate::Result<()> {
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, IncomingBody)>) -> crate::Result<()> {
match msg {
Ok((msg, body)) => {
if let Some(cb) = self.callback.take() {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1085,7 +1085,7 @@ impl Http1Transaction for Client {
#[cfg(feature = "ffi")]
if head.subject.is_informational() {
if let Some(callback) = ctx.on_informational {
- callback.call(head.into_response(crate::Recv::empty()));
+ callback.call(head.into_response(crate::body::Incoming::empty()));
}
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -11,7 +11,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
-use crate::body::Body;
+use crate::body::{Body, Incoming as IncomingBody};
use crate::common::time::Time;
use crate::common::{exec::Exec, task, Future, Never, Pin, Poll};
use crate::ext::Protocol;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -19,9 +19,9 @@ use crate::headers;
use crate::proto::h2::UpgradedSendStream;
use crate::proto::Dispatched;
use crate::upgrade::Upgraded;
-use crate::{Recv, Request, Response};
+use crate::{Request, Response};
-type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Recv>>;
+type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<IncomingBody>>;
///// An mpsc channel is used to help notify the `Connection` task when *all*
///// other handles to it have been dropped, so that it can shutdown.
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -327,7 +327,7 @@ where
));
}
let (parts, recv_stream) = res.into_parts();
- let mut res = Response::from_parts(parts, Recv::empty());
+ let mut res = Response::from_parts(parts, IncomingBody::empty());
let (pending, on_upgrade) = crate::upgrade::pending();
let io = H2Upgraded {
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -345,7 +345,7 @@ where
} else {
let res = res.map(|stream| {
let ping = ping.for_stream(&stream);
- crate::Recv::h2(stream, content_length.into(), ping)
+ IncomingBody::h2(stream, content_length.into(), ping)
});
Ok(res)
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -12,7 +12,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::{ping, PipeToSendStream, SendBuf};
-use crate::body::Body;
+use crate::body::{Body, Incoming as IncomingBody};
use crate::common::exec::ConnStreamExec;
use crate::common::time::Time;
use crate::common::{date, task, Future, Pin, Poll};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -24,7 +24,7 @@ use crate::proto::Dispatched;
use crate::service::HttpService;
use crate::upgrade::{OnUpgrade, Pending, Upgraded};
-use crate::{Recv, Response};
+use crate::{Response};
// Our defaults are chosen for the "majority" case, which usually are not
// resource constrained, and so the spec default of 64kb can be too limiting
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -73,7 +73,7 @@ impl Default for Config {
pin_project! {
pub(crate) struct Server<T, S, B, E>
where
- S: HttpService<Recv>,
+ S: HttpService<IncomingBody>,
B: Body,
{
exec: E,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -107,7 +107,7 @@ where
impl<T, S, B, E> Server<T, S, B, E>
where
T: AsyncRead + AsyncWrite + Unpin,
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Body + 'static,
E: ConnStreamExec<S::Future, B>,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -183,7 +183,7 @@ where
impl<T, S, B, E> Future for Server<T, S, B, E>
where
T: AsyncRead + AsyncWrite + Unpin,
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Body + 'static,
E: ConnStreamExec<S::Future, B>,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -238,7 +238,7 @@ where
exec: &mut E,
) -> Poll<crate::Result<()>>
where
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -265,7 +265,7 @@ where
(
Request::from_parts(
parts,
- crate::Recv::h2(stream, content_length.into(), ping),
+ IncomingBody::h2(stream, content_length.into(), ping),
),
None,
)
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -279,7 +279,7 @@ where
debug_assert!(parts.extensions.get::<OnUpgrade>().is_none());
parts.extensions.insert(upgrade);
(
- Request::from_parts(parts, crate::Recv::empty()),
+ Request::from_parts(parts, IncomingBody::empty()),
Some(ConnectParts {
pending,
ping,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -8,14 +8,14 @@ use std::time::Duration;
use bytes::Bytes;
use tokio::io::{AsyncRead, AsyncWrite};
-use crate::body::{Body, Recv};
+use crate::body::{Body, Incoming as IncomingBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::{common::time::Time, rt::Timer};
use crate::proto;
use crate::service::HttpService;
type Http1Dispatcher<T, B, S> =
- proto::h1::Dispatcher<proto::h1::dispatch::Server<S, Recv>, B, T, proto::ServerTransaction>;
+ proto::h1::Dispatcher<proto::h1::dispatch::Server<S, IncomingBody>, B, T, proto::ServerTransaction>;
pin_project_lite::pin_project! {
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -25,7 +25,7 @@ pin_project_lite::pin_project! {
#[must_use = "futures do nothing unless polled"]
pub struct Connection<T, S>
where
- S: HttpService<Recv>,
+ S: HttpService<IncomingBody>,
{
conn: Http1Dispatcher<T, S::ResBody, S>,
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -72,7 +72,7 @@ pub struct Parts<T, S> {
impl<I, S> fmt::Debug for Connection<I, S>
where
- S: HttpService<Recv>,
+ S: HttpService<IncomingBody>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -81,7 +81,7 @@ where
impl<I, B, S> Connection<I, S>
where
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
B: Body + 'static,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -171,7 +171,7 @@ where
impl<I, B, S> Future for Connection<I, S>
where
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + 'static,
B: Body + 'static,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -337,14 +337,14 @@ impl Builder {
/// # Example
///
/// ```
- /// # use hyper::{Recv, Request, Response};
+ /// # use hyper::{body::Incoming, Request, Response};
/// # use hyper::service::Service;
/// # use hyper::server::conn::http1::Builder;
/// # use tokio::io::{AsyncRead, AsyncWrite};
/// # async fn run<I, S>(some_io: I, some_service: S)
/// # where
/// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- /// # S: Service<hyper::Request<Recv>, Response=hyper::Response<Recv>> + Send + 'static,
+ /// # S: Service<hyper::Request<Incoming>, Response=hyper::Response<Incoming>> + Send + 'static,
/// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
/// # S::Future: Send,
/// # {
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -359,7 +359,7 @@ impl Builder {
/// ```
pub fn serve_connection<I, S>(&self, io: I, service: S) -> Connection<I, S>
where
- S: HttpService<Recv>,
+ S: HttpService<IncomingBody>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::ResBody: 'static,
<S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -413,14 +413,14 @@ mod upgrades {
#[allow(missing_debug_implementations)]
pub struct UpgradeableConnection<T, S>
where
- S: HttpService<Recv>,
+ S: HttpService<IncomingBody>,
{
pub(super) inner: Option<Connection<T, S>>,
}
impl<I, B, S> UpgradeableConnection<I, S>
where
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
B: Body + 'static,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -437,7 +437,7 @@ mod upgrades {
impl<I, B, S> Future for UpgradeableConnection<I, S>
where
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Body + 'static,
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -8,7 +8,7 @@ use std::time::Duration;
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
-use crate::body::{Body, Recv};
+use crate::body::{Body, Incoming as IncomingBody};
use crate::common::exec::{ConnStreamExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::{common::time::Time, rt::Timer};
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -22,7 +22,7 @@ pin_project! {
#[must_use = "futures do nothing unless polled"]
pub struct Connection<T, S, E>
where
- S: HttpService<Recv>,
+ S: HttpService<IncomingBody>,
{
conn: proto::h2::Server<T, S, S::ResBody, E>,
}
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -40,7 +40,7 @@ pub struct Builder<E> {
impl<I, S, E> fmt::Debug for Connection<I, S, E>
where
- S: HttpService<Recv>,
+ S: HttpService<IncomingBody>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -49,7 +49,7 @@ where
impl<I, B, S, E> Connection<I, S, E>
where
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
B: Body + 'static,
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -73,7 +73,7 @@ where
impl<I, B, S, E> Future for Connection<I, S, E>
where
- S: HttpService<Recv, ResBody = B>,
+ S: HttpService<IncomingBody, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + 'static,
B: Body + 'static,
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -286,7 +286,7 @@ impl<E> Builder<E> {
/// driven on the connection.
pub fn serve_connection<S, I, Bd>(&self, io: I, service: S) -> Connection<I, S, E>
where
- S: HttpService<Recv, ResBody = Bd>,
+ S: HttpService<IncomingBody, ResBody = Bd>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
Bd: Body + 'static,
Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs
--- a/src/server/conn/mod.rs
+++ b/src/server/conn/mod.rs
@@ -17,7 +17,7 @@
//! # mod rt {
//! use http::{Request, Response, StatusCode};
//! use http_body_util::Full;
-//! use hyper::{server::conn::http1, service::service_fn, body::Bytes};
+//! use hyper::{server::conn::http1, service::service_fn, body, body::Bytes};
//! use std::{net::SocketAddr, convert::Infallible};
//! use tokio::net::TcpListener;
//!
diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs
--- a/src/server/conn/mod.rs
+++ b/src/server/conn/mod.rs
@@ -39,7 +39,7 @@
//! }
//! }
//!
-//! async fn hello(_req: Request<hyper::Recv>) -> Result<Response<Full<Bytes>>, Infallible> {
+//! async fn hello(_req: Request<body::Incoming>) -> Result<Response<Full<Bytes>>, Infallible> {
//! Ok(Response::new(Full::new(Bytes::from("Hello World!"))))
//! }
//! # }
diff --git a/src/service/util.rs b/src/service/util.rs
--- a/src/service/util.rs
+++ b/src/service/util.rs
@@ -13,11 +13,11 @@ use crate::{Request, Response};
///
/// ```
/// use bytes::Bytes;
-/// use hyper::{Recv, Request, Response, Version};
+/// use hyper::{body, Request, Response, Version};
/// use http_body_util::Full;
/// use hyper::service::service_fn;
///
-/// let service = service_fn(|req: Request<Recv>| async move {
+/// let service = service_fn(|req: Request<body::Incoming>| async move {
/// if req.version() == Version::HTTP_11 {
/// Ok(Response::new(Full::<Bytes>::from("Hello World")))
/// } else {
|
Pinging because you may have opinions about the name: @hawkw @LucioFranco @davidpdrsn.
Node.js uses IncomingMessage. Making the connection to Body clearer though would be good. Perhaps IncomingBody?
Assuming that the user would never construct these themselves then I like `IncomingBody`.
ReqBody?
It's the body of a Request for servers, but it's the body of Responses for the client.
`ReqBody` doesn't work well because on the client it would be the `RespBody`.
I like `RecvBody`. I think the `-ing` forms are odd for a `'static` type name, and I think it should be a name that includes `Body` because that's what it's called in the HTTP standard.
`IncomingBody` sounds good then.
Another similar name/idea would be `IngressBody`. Is this always an http related body? If so, the name `HttpBody` may also be considered.
I think more explícit is better and because is a request and responde body, then i like HttpBody
I like the idea of hinting that it's a stream, I know for some it's confusing that you don't get the whole headers+body on the same await. So ~`RecvStream`,~ `IncomingBody` or something like `BodyStream/StreamingBody` would be my choice.
EDIT: On second thought, `RecvStream` name is too detached from the concept of HTTP Body.
|
2022-10-25T19:20:26Z
| 3,022
|
Determine name for `Recv` body type
We temporarily renamed the old `hyper::Body` struct to `hyper::body::Recv` in #2966. That was to unblock #2839, since we wanted to use the name for the trait instead. But, the name should be properly considered.
The purpose of this type is: an implementation of `Body` to represent bodies received from a remote. So, the client side would see this in the `Response`, and the server side would see this in the `Request`. It is **not** meant to be constructed by users as a "default" implementation.
Some names I've seen or considered before:
- `Recv`
- `RecvStream`/`RecvBody`
- `Streaming`
- `Incoming`
- `Remote`
- `Wire`/`FromWire`
It might help to envision the type signature users will frequently see it as, such as `Request<Streaming>` (or insert any other name in there).
|
hyperium__hyper-3022
|
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -77,7 +77,7 @@ async fn api_get_response() -> Result<Response<BoxBody>> {
Ok(res)
}
-async fn response_examples(req: Request<Recv>) -> Result<Response<BoxBody>> {
+async fn response_examples(req: Request<IncomingBody>) -> Result<Response<BoxBody>> {
match (req.method(), req.uri().path()) {
(&Method::GET, "/") | (&Method::GET, "/index.html") => Ok(Response::new(full(INDEX))),
(&Method::GET, "/test.html") => client_request_response().await,
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -375,7 +375,7 @@ mod tests {
use std::mem;
use std::task::Poll;
- use super::{Body, DecodedLength, Recv, Sender, SizeHint};
+ use super::{Body, DecodedLength, Incoming, Sender, SizeHint};
use http_body_util::BodyExt;
#[test]
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -383,7 +383,7 @@ mod tests {
// These are mostly to help catch *accidentally* increasing
// the size by too much.
- let body_size = mem::size_of::<Recv>();
+ let body_size = mem::size_of::<Incoming>();
let body_expected_size = mem::size_of::<u64>() * 5;
assert!(
body_size <= body_expected_size,
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -392,7 +392,7 @@ mod tests {
body_expected_size,
);
- //assert_eq!(body_size, mem::size_of::<Option<Recv>>(), "Option<Recv>");
+ //assert_eq!(body_size, mem::size_of::<Option<Incoming>>(), "Option<Incoming>");
assert_eq!(
mem::size_of::<Sender>(),
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -409,18 +409,18 @@ mod tests {
#[test]
fn size_hint() {
- fn eq(body: Recv, b: SizeHint, note: &str) {
+ fn eq(body: Incoming, b: SizeHint, note: &str) {
let a = body.size_hint();
assert_eq!(a.lower(), b.lower(), "lower for {:?}", note);
assert_eq!(a.upper(), b.upper(), "upper for {:?}", note);
}
- eq(Recv::empty(), SizeHint::with_exact(0), "empty");
+ eq(Incoming::empty(), SizeHint::with_exact(0), "empty");
- eq(Recv::channel().1, SizeHint::new(), "channel");
+ eq(Incoming::channel().1, SizeHint::new(), "channel");
eq(
- Recv::new_channel(DecodedLength::new(4), /*wanter =*/ false).1,
+ Incoming::new_channel(DecodedLength::new(4), /*wanter =*/ false).1,
SizeHint::with_exact(4),
"channel with length",
);
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -429,7 +429,7 @@ mod tests {
#[cfg(not(miri))]
#[tokio::test]
async fn channel_abort() {
- let (tx, mut rx) = Recv::channel();
+ let (tx, mut rx) = Incoming::channel();
tx.abort();
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -440,7 +440,7 @@ mod tests {
#[cfg(all(not(miri), feature = "http1"))]
#[tokio::test]
async fn channel_abort_when_buffer_is_full() {
- let (mut tx, mut rx) = Recv::channel();
+ let (mut tx, mut rx) = Incoming::channel();
tx.try_send_data("chunk 1".into()).expect("send 1");
// buffer is full, but can still send abort
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -462,7 +462,7 @@ mod tests {
#[cfg(feature = "http1")]
#[test]
fn channel_buffers_one() {
- let (mut tx, _rx) = Recv::channel();
+ let (mut tx, _rx) = Incoming::channel();
tx.try_send_data("chunk 1".into()).expect("send 1");
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -474,14 +474,14 @@ mod tests {
#[cfg(not(miri))]
#[tokio::test]
async fn channel_empty() {
- let (_, mut rx) = Recv::channel();
+ let (_, mut rx) = Incoming::channel();
assert!(rx.frame().await.is_none());
}
#[test]
fn channel_ready() {
- let (mut tx, _rx) = Recv::new_channel(DecodedLength::CHUNKED, /*wanter = */ false);
+ let (mut tx, _rx) = Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ false);
let mut tx_ready = tokio_test::task::spawn(tx.ready());
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -490,7 +490,8 @@ mod tests {
#[test]
fn channel_wanter() {
- let (mut tx, mut rx) = Recv::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
+ let (mut tx, mut rx) =
+ Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
let mut tx_ready = tokio_test::task::spawn(tx.ready());
let mut rx_data = tokio_test::task::spawn(rx.frame());
diff --git a/src/body/body.rs b/src/body/incoming.rs
--- a/src/body/body.rs
+++ b/src/body/incoming.rs
@@ -511,7 +512,7 @@ mod tests {
#[test]
fn channel_notices_closure() {
- let (mut tx, rx) = Recv::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
+ let (mut tx, rx) = Incoming::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
let mut tx_ready = tokio_test::task::spawn(tx.ready());
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -379,15 +379,15 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn giver_queue_throughput(b: &mut test::Bencher) {
- use crate::{Recv, Request, Response};
+ use crate::{body::Incoming, Request, Response};
let rt = tokio::runtime::Builder::new_current_thread()
.build()
.unwrap();
- let (mut tx, mut rx) = channel::<Request<Recv>, Response<Recv>>();
+ let (mut tx, mut rx) = channel::<Request<Incoming>, Response<Incoming>>();
b.iter(move || {
- let _ = tx.send(Request::new(Recv::empty())).unwrap();
+ let _ = tx.send(Request::new(Incoming::empty())).unwrap();
rt.block_on(async {
loop {
let poll_once = PollOnce(&mut rx);
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -673,7 +673,7 @@ mod tests {
handle.read(b"HTTP/1.1 200 OK\r\n\r\n");
let mut res_rx = tx
- .try_send(crate::Request::new(crate::Recv::empty()))
+ .try_send(crate::Request::new(IncomingBody::empty()))
.unwrap();
tokio_test::assert_ready_ok!(Pin::new(&mut dispatcher).poll(cx));
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -706,7 +706,7 @@ mod tests {
let _dispatcher = tokio::spawn(async move { dispatcher.await });
let body = {
- let (mut tx, body) = crate::Recv::new_channel(DecodedLength::new(4), false);
+ let (mut tx, body) = IncomingBody::new_channel(DecodedLength::new(4), false);
tx.try_send_data("reee".into()).unwrap();
body
};
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -737,7 +737,7 @@ mod tests {
assert!(dispatcher.poll().is_pending());
let body = {
- let (mut tx, body) = crate::Recv::channel();
+ let (mut tx, body) = IncomingBody::channel();
tx.try_send_data("".into()).unwrap();
body
};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1345,7 +1345,7 @@ mod conn {
use hyper::body::{Body, Frame};
use hyper::client::conn;
use hyper::upgrade::OnUpgrade;
- use hyper::{self, Method, Recv, Request, Response, StatusCode};
+ use hyper::{Method, Request, Response, StatusCode};
use super::{concat, s, support, tcp_connect, FutureHyperExt};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1899,7 +1899,7 @@ mod conn {
res = listener.accept() => {
let (stream, _) = res.unwrap();
- let service = service_fn(|_:Request<Recv>| future::ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new())));
+ let service = service_fn(|_:Request<hyper::body::Incoming>| future::ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new())));
let mut shdn_rx = shdn_rx.clone();
tokio::task::spawn(async move {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1994,7 +1994,7 @@ mod conn {
.http2_keep_alive_timeout(Duration::from_secs(1))
// enable while idle since we aren't sending requests
.http2_keep_alive_while_idle(true)
- .handshake::<_, Recv>(io)
+ .handshake::<_, hyper::body::Incoming>(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2026,7 +2026,7 @@ mod conn {
.timer(TokioTimer)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
- .handshake::<_, Recv>(io)
+ .handshake::<_, hyper::body::Incoming>(io)
.await
.expect("http handshake");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -27,10 +27,10 @@ use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
-use hyper::body::Body;
+use hyper::body::{Body, Incoming as IncomingBody};
use hyper::server::conn::{http1, http2};
use hyper::service::{service_fn, Service};
-use hyper::{Method, Recv, Request, Response, StatusCode, Uri, Version};
+use hyper::{Method, Request, Response, StatusCode, Uri, Version};
mod support;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1287,7 +1287,7 @@ async fn disconnect_after_reading_request_before_responding() {
socket,
service_fn(|_| {
TokioTimer.sleep(Duration::from_secs(2)).map(
- |_| -> Result<Response<Recv>, hyper::Error> {
+ |_| -> Result<Response<IncomingBody>, hyper::Error> {
panic!("response future should have been dropped");
},
)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1616,7 +1616,7 @@ async fn upgrades_new() {
});
let (upgrades_tx, upgrades_rx) = mpsc::channel();
- let svc = service_fn(move |req: Request<Recv>| {
+ let svc = service_fn(move |req: Request<IncomingBody>| {
let on_upgrade = hyper::upgrade::on(req);
let _ = upgrades_tx.send(on_upgrade);
future::ok::<_, hyper::Error>(
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1658,7 +1658,7 @@ async fn upgrades_ignored() {
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
- let svc = service_fn(move |req: Request<Recv>| {
+ let svc = service_fn(move |req: Request<IncomingBody>| {
assert_eq!(req.headers()["upgrade"], "yolo");
future::ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new()))
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1725,7 +1725,7 @@ async fn http_connect_new() {
});
let (upgrades_tx, upgrades_rx) = mpsc::channel();
- let svc = service_fn(move |req: Request<Recv>| {
+ let svc = service_fn(move |req: Request<IncomingBody>| {
let on_upgrade = hyper::upgrade::on(req);
let _ = upgrades_tx.send(on_upgrade);
future::ok::<_, hyper::Error>(
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1796,7 +1796,7 @@ async fn h2_connect() {
assert!(recv_stream.data().await.unwrap().unwrap().is_empty());
});
- let svc = service_fn(move |req: Request<Recv>| {
+ let svc = service_fn(move |req: Request<IncomingBody>| {
let on_upgrade = hyper::upgrade::on(req);
tokio::spawn(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1884,7 +1884,7 @@ async fn h2_connect_multiplex() {
futures.for_each(future::ready).await;
});
- let svc = service_fn(move |req: Request<Recv>| {
+ let svc = service_fn(move |req: Request<IncomingBody>| {
let authority = req.uri().authority().unwrap().to_string();
let on_upgrade = hyper::upgrade::on(req);
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1979,7 +1979,7 @@ async fn h2_connect_large_body() {
assert!(recv_stream.data().await.unwrap().unwrap().is_empty());
});
- let svc = service_fn(move |req: Request<Recv>| {
+ let svc = service_fn(move |req: Request<IncomingBody>| {
let on_upgrade = hyper::upgrade::on(req);
tokio::spawn(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2052,7 +2052,7 @@ async fn h2_connect_empty_frames() {
assert!(recv_stream.data().await.unwrap().unwrap().is_empty());
});
- let svc = service_fn(move |req: Request<Recv>| {
+ let svc = service_fn(move |req: Request<IncomingBody>| {
let on_upgrade = hyper::upgrade::on(req);
tokio::spawn(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2695,12 +2695,12 @@ enum Msg {
End,
}
-impl Service<Request<Recv>> for TestService {
+impl Service<Request<IncomingBody>> for TestService {
type Response = Response<ReplyBody>;
type Error = BoxError;
type Future = BoxFuture;
- fn call(&mut self, mut req: Request<Recv>) -> Self::Future {
+ fn call(&mut self, mut req: Request<IncomingBody>) -> Self::Future {
let tx = self.tx.clone();
let replies = self.reply.clone();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2761,19 +2761,19 @@ const HELLO: &str = "hello";
struct HelloWorld;
-impl Service<Request<Recv>> for HelloWorld {
+impl Service<Request<IncomingBody>> for HelloWorld {
type Response = Response<Full<Bytes>>;
type Error = hyper::Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
- fn call(&mut self, _req: Request<Recv>) -> Self::Future {
+ fn call(&mut self, _req: Request<IncomingBody>) -> Self::Future {
let response = Response::new(Full::new(HELLO.into()));
future::ok(response)
}
}
fn unreachable_service() -> impl Service<
- http::Request<hyper::Recv>,
+ http::Request<IncomingBody>,
Response = http::Response<ReplyBody>,
Error = BoxError,
Future = BoxFuture,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3036,7 +3036,7 @@ impl TestClient {
self
}
- async fn get(&self, uri: Uri) -> Result<Response<Recv>, hyper::Error> {
+ async fn get(&self, uri: Uri) -> Result<Response<IncomingBody>, hyper::Error> {
self.request(
Request::builder()
.uri(uri)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3047,7 +3047,10 @@ impl TestClient {
.await
}
- async fn request(&self, req: Request<Empty<Bytes>>) -> Result<Response<Recv>, hyper::Error> {
+ async fn request(
+ &self,
+ req: Request<Empty<Bytes>>,
+ ) -> Result<Response<IncomingBody>, hyper::Error> {
let host = req.uri().host().expect("uri has no host");
let port = req.uri().port_u16().expect("uri has no port");
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -12,7 +12,7 @@ use hyper::server;
use tokio::net::{TcpListener, TcpStream};
use hyper::service::service_fn;
-use hyper::{Recv, Request, Response, Version};
+use hyper::{body::Incoming as IncomingBody, Request, Response, Version};
pub use futures_util::{
future, FutureExt as _, StreamExt as _, TryFutureExt as _, TryStreamExt as _,
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -360,7 +360,7 @@ async fn async_test(cfg: __TestConfig) {
// Move a clone into the service_fn
let serve_handles = serve_handles.clone();
- let service = service_fn(move |req: Request<Recv>| {
+ let service = service_fn(move |req: Request<IncomingBody>| {
let (sreq, sres) = serve_handles.lock().unwrap().remove(0);
assert_eq!(req.uri().path(), sreq.uri, "client path");
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -562,7 +562,9 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
let mut builder = Response::builder().status(parts.status);
*builder.headers_mut().unwrap() = parts.headers;
- Result::<Response<Recv>, hyper::Error>::Ok(builder.body(body).unwrap())
+ Result::<Response<hyper::body::Incoming>, hyper::Error>::Ok(
+ builder.body(body).unwrap(),
+ )
}
});
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"3010"
] |
0.3
|
91e83b7e486da956439df4439b32ad9e9977cbb2
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -27,8 +27,8 @@ futures-core = { version = "0.3", default-features = false }
futures-channel = "0.3"
futures-util = { version = "0.3", default-features = false }
http = "0.2"
-http-body = { git = "https://github.com/hyperium/http-body", rev = "6d7dd17" }
-http-body-util = { git = "https://github.com/hyperium/http-body", rev = "6d7dd17" }
+http-body = { git = "https://github.com/hyperium/http-body", rev = "0e20ca9" }
+http-body-util = { git = "https://github.com/hyperium/http-body", rev = "0e20ca9" }
httpdate = "1.0"
httparse = "1.6"
h2 = { version = "0.3.9", optional = true }
diff --git a/benches/body.rs b/benches/body.rs
--- a/benches/body.rs
+++ b/benches/body.rs
@@ -21,7 +22,8 @@ macro_rules! bench_stream {
$bencher.iter(|| {
rt.block_on(async {
let $body_pat = StreamBody::new(
- stream::iter(__s.iter()).map(|&s| Ok::<_, std::convert::Infallible>(s)),
+ stream::iter(__s.iter())
+ .map(|&s| Ok::<_, std::convert::Infallible>(Frame::data(s))),
);
$block;
diff --git a/benches/body.rs b/benches/body.rs
--- a/benches/body.rs
+++ b/benches/body.rs
@@ -55,7 +57,7 @@ macro_rules! benches {
bench_stream!(b, bytes: $bytes, count: $count, total, mut body, {
let mut vec = Vec::new();
while let Some(chunk) = body.next().await {
- vec.extend_from_slice(&chunk.unwrap());
+ vec.extend_from_slice(&chunk.unwrap().into_data().unwrap());
}
assert_eq!(vec.len(), total);
});
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -13,6 +13,7 @@ use futures_util::{stream, StreamExt};
use http_body_util::{BodyExt, Full, StreamBody};
use tokio::sync::oneshot;
+use hyper::body::Frame;
use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::Response;
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -3,8 +3,8 @@
use std::env;
use bytes::Bytes;
-use http_body_util::Empty;
-use hyper::{body::Body as _, Request};
+use http_body_util::{BodyExt, Empty};
+use hyper::Request;
use tokio::io::{self, AsyncWriteExt as _};
use tokio::net::TcpStream;
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -62,9 +62,11 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> {
// Stream the body, writing each chunk to stdout as we get it
// (instead of buffering and printing at the end).
- while let Some(next) = res.data().await {
- let chunk = next?;
- io::stdout().write_all(&chunk).await?;
+ while let Some(next) = res.frame().await {
+ let frame = next?;
+ if let Some(chunk) = frame.data_ref() {
+ io::stdout().write_all(&chunk).await?;
+ }
}
println!("\n\nDone!");
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -2,7 +2,7 @@
#![warn(rust_2018_idioms)]
use bytes::Bytes;
-use http_body_util::Empty;
+use http_body_util::{BodyExt, Empty};
use hyper::{body::Buf, Request};
use serde::Deserialize;
use tokio::net::TcpStream;
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -48,7 +48,7 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
let res = sender.send_request(req).await?;
// asynchronously aggregate the chunks of the body
- let body = hyper::body::aggregate(res).await?;
+ let body = res.collect().await?.aggregate();
// try to parse as json with serde_json
let users = serde_json::from_reader(body.reader())?;
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -50,7 +50,7 @@ async fn echo(req: Request<Recv>) -> Result<Response<BoxBody<Bytes, hyper::Error
return Ok(resp);
}
- let whole_body = hyper::body::to_bytes(req.into_body()).await?;
+ let whole_body = req.collect().await?.to_bytes();
let reversed_body = whole_body.iter().rev().cloned().collect::<Vec<u8>>();
Ok(Response::new(full(reversed_body)))
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -25,7 +25,7 @@ async fn param_example(
(&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(full(INDEX))),
(&Method::POST, "/post") => {
// Concatenate the body...
- let b = hyper::body::to_bytes(req).await?;
+ let b = req.collect().await?.to_bytes();
// Parse the request body. form_urlencoded::parse
// always succeeds, but in general parsing may
// fail (for example, an invalid post of json), so
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -6,8 +6,7 @@ use std::net::SocketAddr;
use std::rc::Rc;
use tokio::net::TcpListener;
-use hyper::body::{Body as HttpBody, Bytes};
-use hyper::header::{HeaderMap, HeaderValue};
+use hyper::body::{Body as HttpBody, Bytes, Frame};
use hyper::service::service_fn;
use hyper::{Error, Response};
use std::marker::PhantomData;
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -33,18 +32,11 @@ impl HttpBody for Body {
type Data = Bytes;
type Error = Error;
- fn poll_data(
+ fn poll_frame(
self: Pin<&mut Self>,
_: &mut Context<'_>,
- ) -> Poll<Option<Result<Self::Data, Self::Error>>> {
- Poll::Ready(self.get_mut().data.take().map(Ok))
- }
-
- fn poll_trailers(
- self: Pin<&mut Self>,
- _: &mut Context<'_>,
- ) -> Poll<Result<Option<HeaderMap<HeaderValue>>, Self::Error>> {
- Poll::Ready(Ok(None))
+ ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
+ Poll::Ready(self.get_mut().data.take().map(|d| Ok(Frame::data(d))))
}
}
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -48,7 +48,7 @@ async fn client_request_response() -> Result<Response<BoxBody>> {
async fn api_post_response(req: Request<Recv>) -> Result<Response<BoxBody>> {
// Aggregate the body...
- let whole_body = hyper::body::aggregate(req).await?;
+ let whole_body = req.collect().await?.aggregate();
// Decode as JSON...
let mut data: serde_json::Value = serde_json::from_reader(whole_body.reader())?;
// Change the JSON...
diff --git a/src/body/aggregate.rs /dev/null
--- a/src/body/aggregate.rs
+++ /dev/null
@@ -1,31 +0,0 @@
-use bytes::Buf;
-
-use super::Body;
-use crate::common::buf::BufList;
-
-/// Aggregate the data buffers from a body asynchronously.
-///
-/// The returned `impl Buf` groups the `Buf`s from the `Body` without
-/// copying them. This is ideal if you don't require a contiguous buffer.
-///
-/// # Note
-///
-/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length
-/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the
-/// `Content-Length` is a possibility, but it is not strictly mandated to be present.
-pub async fn aggregate<T>(body: T) -> Result<impl Buf, T::Error>
-where
- T: Body,
-{
- let mut bufs = BufList::new();
-
- futures_util::pin_mut!(body);
- while let Some(buf) = body.data().await {
- let buf = buf?;
- if buf.has_remaining() {
- bufs.push(buf);
- }
- }
-
- Ok(bufs)
-}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -3,9 +3,9 @@ use std::fmt;
use bytes::Bytes;
use futures_channel::mpsc;
use futures_channel::oneshot;
-use futures_core::Stream; // for mpsc::Receiver
+use futures_core::{FusedStream, Stream}; // for mpsc::Receiver
use http::HeaderMap;
-use http_body::{Body, SizeHint};
+use http_body::{Body, Frame, SizeHint};
use super::DecodedLength;
use crate::common::Future;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -16,13 +16,7 @@ use crate::proto::h2::ping;
type BodySender = mpsc::Sender<Result<Bytes, crate::Error>>;
type TrailersSender = oneshot::Sender<HeaderMap>;
-/// A stream of `Bytes`, used when receiving bodies.
-///
-/// A good default [`Body`](crate::body::Body) to use in many
-/// applications.
-///
-/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes)
-/// or [`body::aggregate`](crate::body::aggregate).
+/// A stream of `Bytes`, used when receiving bodies from the network.
#[must_use = "streams do nothing unless polled"]
pub struct Recv {
kind: Kind,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -39,8 +33,9 @@ enum Kind {
},
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
H2 {
- ping: ping::Recorder,
content_length: DecodedLength,
+ data_done: bool,
+ ping: ping::Recorder,
recv: h2::RecvStream,
},
#[cfg(feature = "ffi")]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -131,6 +126,7 @@ impl Recv {
content_length = DecodedLength::ZERO;
}
let body = Recv::new(Kind::H2 {
+ data_done: false,
ping,
content_length,
recv,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -153,86 +149,78 @@ impl Recv {
_ => unreachable!(),
}
}
+}
- fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<Bytes>>> {
+impl Body for Recv {
+ type Data = Bytes;
+ type Error = crate::Error;
+
+ fn poll_frame(
+ mut self: Pin<&mut Self>,
+ cx: &mut task::Context<'_>,
+ ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
match self.kind {
Kind::Empty => Poll::Ready(None),
Kind::Chan {
content_length: ref mut len,
ref mut data_rx,
ref mut want_tx,
- ..
+ ref mut trailers_rx,
} => {
want_tx.send(WANT_READY);
- match ready!(Pin::new(data_rx).poll_next(cx)?) {
- Some(chunk) => {
- len.sub_if(chunk.len() as u64);
- Poll::Ready(Some(Ok(chunk)))
+ if !data_rx.is_terminated() {
+ match ready!(Pin::new(data_rx).poll_next(cx)?) {
+ Some(chunk) => {
+ len.sub_if(chunk.len() as u64);
+ return Poll::Ready(Some(Ok(Frame::data(chunk))));
+ }
+ // fall through to trailers
+ None => (),
}
- None => Poll::Ready(None),
+ }
+
+ // check trailers after data is terminated
+ match ready!(Pin::new(trailers_rx).poll(cx)) {
+ Ok(t) => Poll::Ready(Some(Ok(Frame::trailers(t)))),
+ Err(_) => Poll::Ready(None),
}
}
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 {
+ ref mut data_done,
ref ping,
recv: ref mut h2,
content_length: ref mut len,
- } => match ready!(h2.poll_data(cx)) {
- Some(Ok(bytes)) => {
- let _ = h2.flow_control().release_capacity(bytes.len());
- len.sub_if(bytes.len() as u64);
- ping.record_data(bytes.len());
- Poll::Ready(Some(Ok(bytes)))
+ } => {
+ if !*data_done {
+ match ready!(h2.poll_data(cx)) {
+ Some(Ok(bytes)) => {
+ let _ = h2.flow_control().release_capacity(bytes.len());
+ len.sub_if(bytes.len() as u64);
+ ping.record_data(bytes.len());
+ return Poll::Ready(Some(Ok(Frame::data(bytes))));
+ }
+ Some(Err(e)) => return Poll::Ready(Some(Err(crate::Error::new_body(e)))),
+ None => {
+ *data_done = true;
+ // fall through to trailers
+ }
+ }
}
- Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
- None => Poll::Ready(None),
- },
- #[cfg(feature = "ffi")]
- Kind::Ffi(ref mut body) => body.poll_data(cx),
- }
- }
-}
-
-impl Body for Recv {
- type Data = Bytes;
- type Error = crate::Error;
-
- fn poll_data(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<Result<Self::Data, Self::Error>>> {
- self.poll_inner(cx)
- }
-
- fn poll_trailers(
- #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut self: Pin<&mut Self>,
- #[cfg_attr(not(feature = "http2"), allow(unused))] cx: &mut task::Context<'_>,
- ) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
- match self.kind {
- Kind::Empty => Poll::Ready(Ok(None)),
- #[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
- Kind::H2 {
- recv: ref mut h2,
- ref ping,
- ..
- } => match ready!(h2.poll_trailers(cx)) {
- Ok(t) => {
- ping.record_non_data();
- Poll::Ready(Ok(t))
+ // after data, check trailers
+ match ready!(h2.poll_trailers(cx)) {
+ Ok(t) => {
+ ping.record_non_data();
+ Poll::Ready(Ok(t.map(Frame::trailers)).transpose())
+ }
+ Err(e) => Poll::Ready(Some(Err(crate::Error::new_h2(e)))),
}
- Err(e) => Poll::Ready(Err(crate::Error::new_h2(e))),
- },
- Kind::Chan {
- ref mut trailers_rx,
- ..
- } => match ready!(Pin::new(trailers_rx).poll(cx)) {
- Ok(t) => Poll::Ready(Ok(Some(t))),
- Err(_) => Poll::Ready(Ok(None)),
- },
+ }
+
#[cfg(feature = "ffi")]
- Kind::Ffi(ref mut body) => body.poll_trailers(cx),
+ Kind::Ffi(ref mut body) => body.poll_data(cx),
}
}
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -17,19 +17,16 @@
pub use bytes::{Buf, Bytes};
pub use http_body::Body;
+pub use http_body::Frame;
pub use http_body::SizeHint;
-pub use self::aggregate::aggregate;
pub use self::body::Recv;
#[cfg(feature = "http1")]
pub(crate) use self::body::Sender;
pub(crate) use self::length::DecodedLength;
-pub use self::to_bytes::to_bytes;
-mod aggregate;
mod body;
mod length;
-mod to_bytes;
fn _assert_send_sync() {
fn _assert_send<T: Send>() {}
diff --git a/src/body/to_bytes.rs /dev/null
--- a/src/body/to_bytes.rs
+++ /dev/null
@@ -1,70 +0,0 @@
-use bytes::{Buf, BufMut, Bytes};
-
-use super::Body;
-
-/// Concatenate the buffers from a body into a single `Bytes` asynchronously.
-///
-/// This may require copying the data into a single buffer. If you don't need
-/// a contiguous buffer, prefer the [`aggregate`](crate::body::aggregate())
-/// function.
-///
-/// # Note
-///
-/// Care needs to be taken if the remote is untrusted. The function doesn't implement any length
-/// checks and an malicious peer might make it consume arbitrary amounts of memory. Checking the
-/// `Content-Length` is a possibility, but it is not strictly mandated to be present.
-///
-/// # Example
-///
-/// ```
-/// # use hyper::{Recv, Response};
-/// # async fn doc(response: Response<Recv>) -> hyper::Result<()> {
-/// # use hyper::body::Body;
-/// // let response: Response<Body> ...
-///
-/// const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024;
-///
-/// let response_content_length = match response.body().size_hint().upper() {
-/// Some(v) => v,
-/// None => MAX_ALLOWED_RESPONSE_SIZE + 1 // Just to protect ourselves from a malicious response
-/// };
-///
-/// if response_content_length < MAX_ALLOWED_RESPONSE_SIZE {
-/// let body_bytes = hyper::body::to_bytes(response.into_body()).await?;
-/// println!("body: {:?}", body_bytes);
-/// }
-///
-/// # Ok(())
-/// # }
-/// ```
-pub async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
-where
- T: Body,
-{
- futures_util::pin_mut!(body);
-
- // If there's only 1 chunk, we can just return Buf::to_bytes()
- let mut first = if let Some(buf) = body.data().await {
- buf?
- } else {
- return Ok(Bytes::new());
- };
-
- let second = if let Some(buf) = body.data().await {
- buf?
- } else {
- return Ok(first.copy_to_bytes(first.remaining()));
- };
-
- // With more than 1 buf, we gotta flatten into a Vec first.
- let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize;
- let mut vec = Vec::with_capacity(cap);
- vec.put(first);
- vec.put(second);
-
- while let Some(buf) = body.data().await {
- vec.put(buf?);
- }
-
- Ok(vec.into())
-}
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -3,12 +3,12 @@ use std::mem::ManuallyDrop;
use std::ptr;
use std::task::{Context, Poll};
-use http::HeaderMap;
+use http_body_util::BodyExt as _;
use libc::{c_int, size_t};
use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType};
use super::{UserDataPointer, HYPER_ITER_CONTINUE};
-use crate::body::{Body as _, Bytes, Recv};
+use crate::body::{Bytes, Frame, Recv};
/// A streaming HTTP body.
pub struct hyper_body(pub(super) Recv);
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -60,7 +60,19 @@ ffi_fn! {
let mut body = ManuallyDrop::new(non_null!(Box::from_raw(body) ?= ptr::null_mut()));
Box::into_raw(hyper_task::boxed(async move {
- body.0.data().await.map(|res| res.map(hyper_buf))
+ loop {
+ match body.0.frame().await {
+ Some(Ok(frame)) => {
+ if frame.is_data() {
+ return Ok(Some(hyper_buf(frame.into_data().unwrap())));
+ } else {
+ continue;
+ }
+ },
+ Some(Err(e)) => return Err(e),
+ None => return Ok(None),
+ }
+ }
}))
} ?= ptr::null_mut()
}
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -81,10 +93,12 @@ ffi_fn! {
let userdata = UserDataPointer(userdata);
Box::into_raw(hyper_task::boxed(async move {
- while let Some(item) = body.0.data().await {
- let chunk = item?;
- if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) {
- return Err(crate::Error::new_user_aborted_by_callback());
+ while let Some(item) = body.0.frame().await {
+ let frame = item?;
+ if let Some(chunk) = frame.into_data() {
+ if HYPER_ITER_CONTINUE != func(userdata.0, &hyper_buf(chunk)) {
+ return Err(crate::Error::new_user_aborted_by_callback());
+ }
}
}
Ok(())
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -136,7 +150,10 @@ impl UserBody {
}
}
- pub(crate) fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll<Option<crate::Result<Bytes>>> {
+ pub(crate) fn poll_data(
+ &mut self,
+ cx: &mut Context<'_>,
+ ) -> Poll<Option<crate::Result<Frame<Bytes>>>> {
let mut out = std::ptr::null_mut();
match (self.data_func)(self.userdata, hyper_context::wrap(cx), &mut out) {
super::task::HYPER_POLL_READY => {
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -144,7 +161,7 @@ impl UserBody {
Poll::Ready(None)
} else {
let buf = unsafe { Box::from_raw(out) };
- Poll::Ready(Some(Ok(buf.0)))
+ Poll::Ready(Some(Ok(Frame::data(buf.0))))
}
}
super::task::HYPER_POLL_PENDING => Poll::Pending,
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -157,13 +174,6 @@ impl UserBody {
))))),
}
}
-
- pub(crate) fn poll_trailers(
- &mut self,
- _cx: &mut Context<'_>,
- ) -> Poll<crate::Result<Option<HeaderMap>>> {
- Poll::Ready(Ok(None))
- }
}
/// cbindgen:ignore
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -333,12 +333,18 @@ where
continue;
}
- let item = ready!(body.as_mut().poll_data(cx));
+ let item = ready!(body.as_mut().poll_frame(cx));
if let Some(item) = item {
- let chunk = item.map_err(|e| {
+ let frame = item.map_err(|e| {
*clear_body = true;
crate::Error::new_user_body(e)
})?;
+ let chunk = if frame.is_data() {
+ frame.into_data().unwrap()
+ } else {
+ trace!("discarding non-data frame");
+ continue;
+ };
let eos = body.is_end_stream();
if eos {
*clear_body = true;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -119,43 +119,44 @@ where
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut me = self.project();
loop {
- if !*me.data_done {
- // we don't have the next chunk of data yet, so just reserve 1 byte to make
- // sure there's some capacity available. h2 will handle the capacity management
- // for the actual body chunk.
- me.body_tx.reserve_capacity(1);
-
- if me.body_tx.capacity() == 0 {
- loop {
- match ready!(me.body_tx.poll_capacity(cx)) {
- Some(Ok(0)) => {}
- Some(Ok(_)) => break,
- Some(Err(e)) => {
- return Poll::Ready(Err(crate::Error::new_body_write(e)))
- }
- None => {
- // None means the stream is no longer in a
- // streaming state, we either finished it
- // somehow, or the remote reset us.
- return Poll::Ready(Err(crate::Error::new_body_write(
- "send stream capacity unexpectedly closed",
- )));
- }
+ // we don't have the next chunk of data yet, so just reserve 1 byte to make
+ // sure there's some capacity available. h2 will handle the capacity management
+ // for the actual body chunk.
+ me.body_tx.reserve_capacity(1);
+
+ if me.body_tx.capacity() == 0 {
+ loop {
+ match ready!(me.body_tx.poll_capacity(cx)) {
+ Some(Ok(0)) => {}
+ Some(Ok(_)) => break,
+ Some(Err(e)) => {
+ return Poll::Ready(Err(crate::Error::new_body_write(e)))
+ }
+ None => {
+ // None means the stream is no longer in a
+ // streaming state, we either finished it
+ // somehow, or the remote reset us.
+ return Poll::Ready(Err(crate::Error::new_body_write(
+ "send stream capacity unexpectedly closed",
+ )));
}
}
- } else if let Poll::Ready(reason) = me
- .body_tx
- .poll_reset(cx)
- .map_err(crate::Error::new_body_write)?
- {
- debug!("stream received RST_STREAM: {:?}", reason);
- return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(
- reason,
- ))));
}
+ } else if let Poll::Ready(reason) = me
+ .body_tx
+ .poll_reset(cx)
+ .map_err(crate::Error::new_body_write)?
+ {
+ debug!("stream received RST_STREAM: {:?}", reason);
+ return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(
+ reason,
+ ))));
+ }
- match ready!(me.stream.as_mut().poll_data(cx)) {
- Some(Ok(chunk)) => {
+ match ready!(me.stream.as_mut().poll_frame(cx)) {
+ Some(Ok(frame)) => {
+ if frame.is_data() {
+ let chunk = frame.into_data().unwrap();
let is_eos = me.stream.is_end_stream();
trace!(
"send body chunk: {} bytes, eos={}",
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -171,43 +172,24 @@ where
if is_eos {
return Poll::Ready(Ok(()));
}
- }
- Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))),
- None => {
+ } else if frame.is_trailers() {
+ // no more DATA, so give any capacity back
me.body_tx.reserve_capacity(0);
- let is_eos = me.stream.is_end_stream();
- if is_eos {
- return Poll::Ready(me.body_tx.send_eos_frame());
- } else {
- *me.data_done = true;
- // loop again to poll_trailers
- }
- }
- }
- } else {
- if let Poll::Ready(reason) = me
- .body_tx
- .poll_reset(cx)
- .map_err(crate::Error::new_body_write)?
- {
- debug!("stream received RST_STREAM: {:?}", reason);
- return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(
- reason,
- ))));
- }
-
- match ready!(me.stream.poll_trailers(cx)) {
- Ok(Some(trailers)) => {
me.body_tx
- .send_trailers(trailers)
+ .send_trailers(frame.into_trailers().unwrap())
.map_err(crate::Error::new_body_write)?;
return Poll::Ready(Ok(()));
+ } else {
+ trace!("discarding unknown frame");
+ // loop again
}
- Ok(None) => {
- // There were no trailers, so send an empty DATA frame...
- return Poll::Ready(me.body_tx.send_eos_frame());
- }
- Err(e) => return Poll::Ready(Err(me.body_tx.on_user_err(e))),
+ }
+ Some(Err(e)) => return Poll::Ready(Err(me.body_tx.on_user_err(e))),
+ None => {
+ // no more frames means we're done here
+ // but at this point, we haven't sent an EOS DATA, or
+ // any trailers, so send an empty EOS DATA.
+ return Poll::Ready(me.body_tx.send_eos_frame());
}
}
}
|
2022-10-24T18:15:45Z
| 3,020
|
Update to use new http-body trait
With #2840 completed, we need to update hyper to use the new trait.
|
hyperium__hyper-3020
|
diff --git a/benches/body.rs b/benches/body.rs
--- a/benches/body.rs
+++ b/benches/body.rs
@@ -6,7 +6,8 @@ extern crate test;
use bytes::Buf;
use futures_util::stream;
use futures_util::StreamExt;
-use http_body_util::StreamBody;
+use http_body::Frame;
+use http_body_util::{BodyExt, StreamBody};
macro_rules! bench_stream {
($bencher:ident, bytes: $bytes:expr, count: $count:expr, $total_ident:ident, $body_pat:pat, $block:expr) => {{
diff --git a/benches/body.rs b/benches/body.rs
--- a/benches/body.rs
+++ b/benches/body.rs
@@ -39,7 +41,7 @@ macro_rules! benches {
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
- let buf = hyper::body::aggregate(body).await.unwrap();
+ let buf = BodyExt::collect(body).await.unwrap().aggregate();
assert_eq!(buf.remaining(), total);
});
}
diff --git a/benches/body.rs b/benches/body.rs
--- a/benches/body.rs
+++ b/benches/body.rs
@@ -70,7 +72,7 @@ macro_rules! benches {
#[bench]
fn $name(b: &mut test::Bencher) {
bench_stream!(b, bytes: $bytes, count: $count, total, body, {
- let bytes = hyper::body::to_bytes(body).await.unwrap();
+ let bytes = BodyExt::collect(body).await.unwrap().to_bytes();
assert_eq!(bytes.len(), total);
});
}
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -109,7 +110,7 @@ fn throughput_fixedsize_many_chunks(b: &mut test::Bencher) {
bench_server!(b, ("content-length", "1000000"), move || {
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
BodyExt::boxed(StreamBody::new(
- stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)),
+ stream::iter(S.iter()).map(|&s| Ok::<_, String>(Frame::data(s))),
))
})
}
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -133,7 +134,7 @@ fn throughput_chunked_many_chunks(b: &mut test::Bencher) {
bench_server!(b, ("transfer-encoding", "chunked"), || {
static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
BodyExt::boxed(StreamBody::new(
- stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)),
+ stream::iter(S.iter()).map(|&s| Ok::<_, String>(Frame::data(s))),
))
})
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -388,6 +376,7 @@ mod tests {
use std::task::Poll;
use super::{Body, DecodedLength, Recv, Sender, SizeHint};
+ use http_body_util::BodyExt;
#[test]
fn test_size_of() {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -395,7 +384,7 @@ mod tests {
// the size by too much.
let body_size = mem::size_of::<Recv>();
- let body_expected_size = mem::size_of::<u64>() * 6;
+ let body_expected_size = mem::size_of::<u64>() * 5;
assert!(
body_size <= body_expected_size,
"Body size = {} <= {}",
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -403,7 +392,7 @@ mod tests {
body_expected_size,
);
- assert_eq!(body_size, mem::size_of::<Option<Recv>>(), "Option<Recv>");
+ //assert_eq!(body_size, mem::size_of::<Option<Recv>>(), "Option<Recv>");
assert_eq!(
mem::size_of::<Sender>(),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -444,7 +433,7 @@ mod tests {
tx.abort();
- let err = rx.data().await.unwrap().unwrap_err();
+ let err = rx.frame().await.unwrap().unwrap_err();
assert!(err.is_body_write_aborted(), "{:?}", err);
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -457,10 +446,16 @@ mod tests {
// buffer is full, but can still send abort
tx.abort();
- let chunk1 = rx.data().await.expect("item 1").expect("chunk 1");
+ let chunk1 = rx
+ .frame()
+ .await
+ .expect("item 1")
+ .expect("chunk 1")
+ .into_data()
+ .unwrap();
assert_eq!(chunk1, "chunk 1");
- let err = rx.data().await.unwrap().unwrap_err();
+ let err = rx.frame().await.unwrap().unwrap_err();
assert!(err.is_body_write_aborted(), "{:?}", err);
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -481,7 +476,7 @@ mod tests {
async fn channel_empty() {
let (_, mut rx) = Recv::channel();
- assert!(rx.data().await.is_none());
+ assert!(rx.frame().await.is_none());
}
#[test]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -498,7 +493,7 @@ mod tests {
let (mut tx, mut rx) = Recv::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
let mut tx_ready = tokio_test::task::spawn(tx.ready());
- let mut rx_data = tokio_test::task::spawn(rx.data());
+ let mut rx_data = tokio_test::task::spawn(rx.frame());
assert!(
tx_ready.poll().is_pending(),
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -14,7 +14,7 @@ use std::time::Duration;
use http::uri::PathAndQuery;
use http_body_util::{BodyExt, StreamBody};
-use hyper::body::to_bytes as concat;
+use hyper::body::Frame;
use hyper::header::HeaderValue;
use hyper::{Method, Request, StatusCode, Uri, Version};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -29,6 +29,13 @@ fn s(buf: &[u8]) -> &str {
std::str::from_utf8(buf).expect("from_utf8")
}
+async fn concat<B>(b: B) -> Result<Bytes, B::Error>
+where
+ B: hyper::body::Body,
+{
+ b.collect().await.map(|c| c.to_bytes())
+}
+
fn tcp_connect(addr: &SocketAddr) -> impl Future<Output = std::io::Result<TcpStream>> {
TcpStream::connect(*addr)
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -398,7 +405,10 @@ macro_rules! __client_req_prop {
}};
($req_builder:ident, $body:ident, $addr:ident, body_stream: $body_e:expr) => {{
- $body = BodyExt::boxed(StreamBody::new($body_e));
+ $body = BodyExt::boxed(StreamBody::new(futures_util::TryStreamExt::map_ok(
+ $body_e,
+ Frame::data,
+ )));
}};
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1327,12 +1337,12 @@ mod conn {
use bytes::{Buf, Bytes};
use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
- use http_body_util::{Empty, StreamBody};
+ use http_body_util::{BodyExt, Empty, StreamBody};
use hyper::rt::Timer;
use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
- use hyper::body::Body;
+ use hyper::body::{Body, Frame};
use hyper::client::conn;
use hyper::upgrade::OnUpgrade;
use hyper::{self, Method, Recv, Request, Response, StatusCode};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1379,7 +1389,7 @@ mod conn {
.unwrap();
let mut res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), hyper::StatusCode::OK);
- assert!(res.body_mut().data().await.is_none());
+ assert!(res.body_mut().frame().await.is_none());
};
future::join(server, client).await;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1435,7 +1445,7 @@ mod conn {
res.headers().get(http::header::CONTENT_LENGTH).unwrap(),
"0"
);
- assert!(res.body_mut().data().await.is_none());
+ assert!(res.body_mut().frame().await.is_none());
};
future::join(server, client).await;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1443,8 +1453,6 @@ mod conn {
#[test]
fn incoming_content_length() {
- use hyper::body::Body;
-
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
let rt = support::runtime();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1481,13 +1489,13 @@ mod conn {
assert_eq!(res.status(), hyper::StatusCode::OK);
assert_eq!(res.body().size_hint().exact(), Some(5));
assert!(!res.body().is_end_stream());
- poll_fn(move |ctx| Pin::new(res.body_mut()).poll_data(ctx)).map(Option::unwrap)
+ poll_fn(move |ctx| Pin::new(res.body_mut()).poll_frame(ctx)).map(Option::unwrap)
});
let rx = rx1.expect("thread panicked");
let rx = rx.then(|_| TokioTimer.sleep(Duration::from_millis(200)));
let chunk = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- assert_eq!(chunk.len(), 5);
+ assert_eq!(chunk.data_ref().unwrap().len(), 5);
}
#[test]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1519,10 +1527,13 @@ mod conn {
rt.spawn(conn.map_err(|e| panic!("conn error: {}", e)).map(|_| ()));
- let (mut sender, recv) = mpsc::channel::<Result<Bytes, Box<dyn Error + Send + Sync>>>(0);
+ let (mut sender, recv) =
+ mpsc::channel::<Result<Frame<Bytes>, Box<dyn Error + Send + Sync>>>(0);
let sender = thread::spawn(move || {
- sender.try_send(Ok("hello".into())).expect("try_send_data");
+ sender
+ .try_send(Ok(Frame::data("hello".into())))
+ .expect("try_send_data");
support::runtime().block_on(rx).unwrap();
// Aborts the body in an abnormal fashion.
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2100,7 +2111,7 @@ mod conn {
sock,
service_fn(|req| async move {
tokio::spawn(async move {
- let _ = hyper::body::aggregate(req.into_body())
+ let _ = concat(req.into_body())
.await
.expect("server req body aggregate");
});
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2126,7 +2137,7 @@ mod conn {
});
// Use a channel to keep request stream open
- let (_tx, recv) = mpsc::channel::<Result<Bytes, Box<dyn Error + Send + Sync>>>(0);
+ let (_tx, recv) = mpsc::channel::<Result<Frame<Bytes>, Box<dyn Error + Send + Sync>>>(0);
let req = http::Request::new(StreamBody::new(recv));
let _resp = client.send_request(req).await.expect("send_request");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2245,7 +2256,7 @@ mod conn {
assert!(res.extensions().get::<OnUpgrade>().is_none());
let mut body = String::new();
- hyper::body::aggregate(res.into_body())
+ concat(res.into_body())
.await
.unwrap()
.reader()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2276,8 +2276,8 @@ fn http2_body_user_error_sends_reset_reason() {
let mut res = client.get(uri).await?;
- while let Some(chunk) = res.body_mut().data().await {
- chunk?;
+ while let Some(item) = res.body_mut().frame().await {
+ item?;
}
Ok(())
})
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2631,7 +2631,9 @@ impl<'a> ReplyBuilder<'a> {
where
S: futures_util::Stream<Item = Result<Bytes, BoxError>> + Send + Sync + 'static,
{
- let body = BodyExt::boxed(StreamBody::new(stream));
+ use futures_util::TryStreamExt;
+ use hyper::body::Frame;
+ let body = BodyExt::boxed(StreamBody::new(stream.map_ok(Frame::data)));
self.tx.lock().unwrap().send(Reply::Body(body)).unwrap();
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2703,10 +2705,13 @@ impl Service<Request<Recv>> for TestService {
let replies = self.reply.clone();
Box::pin(async move {
- while let Some(chunk) = req.data().await {
- match chunk {
- Ok(chunk) => {
- tx.send(Msg::Chunk(chunk.to_vec())).unwrap();
+ while let Some(item) = req.frame().await {
+ match item {
+ Ok(frame) => {
+ if frame.is_data() {
+ tx.send(Msg::Chunk(frame.into_data().unwrap().to_vec()))
+ .unwrap();
+ }
}
Err(err) => {
tx.send(Msg::Error(err)).unwrap();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -7,7 +7,7 @@ use std::sync::{
};
use bytes::Bytes;
-use http_body_util::Full;
+use http_body_util::{BodyExt, Full};
use hyper::server;
use tokio::net::{TcpListener, TcpStream};
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -370,7 +370,8 @@ async fn async_test(cfg: __TestConfig) {
func(&req.headers());
}
let sbody = sreq.body;
- hyper::body::to_bytes(req).map_ok(move |body| {
+ req.collect().map_ok(move |collected| {
+ let body = collected.to_bytes();
assert_eq!(body.as_ref(), sbody.as_slice(), "client body");
let mut res = Response::builder()
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -458,7 +459,7 @@ async fn async_test(cfg: __TestConfig) {
func(&res.headers());
}
- let body = hyper::body::to_bytes(res).await.unwrap();
+ let body = res.collect().await.unwrap().to_bytes();
assert_eq!(body.as_ref(), cbody.as_slice(), "server body");
}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"3012"
] |
0.3
|
fc4d3356cb7f2fffff5af9c474fa34c5adc5d6f1
|
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -14,7 +14,7 @@ use http_body_util::Full;
use tokio::net::TcpListener;
use tokio::sync::oneshot;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::Response;
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -13,7 +13,7 @@ use futures_util::{stream, StreamExt};
use http_body_util::{BodyExt, Full, StreamBody};
use tokio::sync::oneshot;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::Response;
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -38,7 +38,7 @@ macro_rules! bench_server {
loop {
let (stream, _) = listener.accept().await.expect("accept");
- Http::new()
+ http1::Builder::new()
.serve_connection(
stream,
service_fn(|_| async {
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -5,7 +5,7 @@ use std::net::SocketAddr;
use bytes::Bytes;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
use hyper::body::Body as _;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Method, Recv, Request, Response, StatusCode};
use tokio::net::TcpListener;
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -87,7 +87,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let (stream, _) = listener.accept().await?;
tokio::task::spawn(async move {
- if let Err(err) = Http::new().serve_connection(stream, service_fn(echo)).await {
+ if let Err(err) = http1::Builder::new()
+ .serve_connection(stream, service_fn(echo))
+ .await
+ {
println!("Error serving connection: {:?}", err);
}
});
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -1,6 +1,6 @@
#![deny(warnings)]
-use hyper::{server::conn::Http, service::service_fn};
+use hyper::{server::conn::http1, service::service_fn};
use std::net::SocketAddr;
use tokio::net::{TcpListener, TcpStream};
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -56,7 +56,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
});
tokio::task::spawn(async move {
- if let Err(err) = Http::new().serve_connection(stream, service).await {
+ if let Err(err) = http1::Builder::new()
+ .serve_connection(stream, service)
+ .await
+ {
println!("Failed to servce connection: {:?}", err);
}
});
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -5,7 +5,7 @@ use std::net::SocketAddr;
use bytes::Bytes;
use http_body_util::Full;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Recv, Request, Response};
use tokio::net::TcpListener;
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -26,7 +26,7 @@ pub async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let (stream, _) = listener.accept().await?;
tokio::task::spawn(async move {
- if let Err(err) = Http::new()
+ if let Err(err) = http1::Builder::new()
.serve_connection(stream, service_fn(hello))
.await
{
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -5,7 +5,7 @@ use std::net::SocketAddr;
use bytes::Bytes;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
use hyper::client::conn::http1::Builder;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
use hyper::{Method, Recv, Request, Response};
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -30,7 +30,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let (stream, _) = listener.accept().await?;
tokio::task::spawn(async move {
- if let Err(err) = Http::new()
+ if let Err(err) = http1::Builder::new()
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve_connection(stream, service_fn(proxy))
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -6,7 +6,7 @@ use std::net::SocketAddr;
use bytes::Bytes;
use futures_util::future::join;
use http_body_util::Full;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Recv, Request, Response};
use tokio::net::TcpListener;
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -35,7 +35,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let (stream, _) = listener.accept().await.unwrap();
tokio::task::spawn(async move {
- if let Err(err) = Http::new()
+ if let Err(err) = http1::Builder::new()
.serve_connection(stream, service_fn(index1))
.await
{
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -51,7 +51,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let (stream, _) = listener.accept().await.unwrap();
tokio::task::spawn(async move {
- if let Err(err) = Http::new()
+ if let Err(err) = http1::Builder::new()
.serve_connection(stream, service_fn(index2))
.await
{
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -3,7 +3,7 @@
use bytes::Bytes;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Method, Recv, Request, Response, StatusCode};
use tokio::net::TcpListener;
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -126,7 +126,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let (stream, _) = listener.accept().await?;
tokio::task::spawn(async move {
- if let Err(err) = Http::new()
+ if let Err(err) = http1::Builder::new()
.serve_connection(stream, service_fn(param_example))
.await
{
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -2,7 +2,7 @@
use std::net::SocketAddr;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use tokio::net::TcpListener;
use bytes::Bytes;
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -26,7 +26,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
let (stream, _) = listener.accept().await?;
tokio::task::spawn(async move {
- if let Err(err) = Http::new()
+ if let Err(err) = http1::Builder::new()
.serve_connection(stream, service_fn(response_examples))
.await
{
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -1,6 +1,6 @@
use bytes::Bytes;
use http_body_util::Full;
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::Service;
use hyper::{Recv, Request, Response};
use tokio::net::TcpListener;
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -22,7 +22,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let (stream, _) = listener.accept().await?;
tokio::task::spawn(async move {
- if let Err(err) = Http::new()
+ if let Err(err) = http1::Builder::new()
.serve_connection(stream, Svc { counter: 81818 })
.await
{
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -1,6 +1,6 @@
#![deny(warnings)]
-use hyper::server::conn::Http;
+use hyper::server::conn::http2;
use std::cell::Cell;
use std::net::SocketAddr;
use std::rc::Rc;
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -84,8 +84,7 @@ async fn run() -> Result<(), Box<dyn std::error::Error>> {
});
tokio::task::spawn_local(async move {
- if let Err(err) = Http::new()
- .with_executor(LocalExec)
+ if let Err(err) = http2::Builder::new(LocalExec)
.serve_connection(stream, service)
.await
{
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -95,6 +94,8 @@ async fn run() -> Result<(), Box<dyn std::error::Error>> {
}
}
+// NOTE: This part is only needed for HTTP/2. HTTP/1 doesn't need an executor.
+//
// Since the Server needs to spawn some background tasks, we needed
// to configure an Executor that can spawn !Send futures...
#[derive(Clone, Copy, Debug)]
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -8,7 +8,7 @@ use std::sync::{
use bytes::Bytes;
use http_body_util::Full;
-use hyper::{server::conn::Http, service::service_fn};
+use hyper::{server::conn::http1, service::service_fn};
use hyper::{Error, Response};
use tokio::net::TcpListener;
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -46,7 +46,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
}
});
- if let Err(err) = Http::new().serve_connection(stream, service).await {
+ if let Err(err) = http1::Builder::new()
+ .serve_connection(stream, service)
+ .await
+ {
println!("Error serving connection: {:?}", err);
}
}
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -11,7 +11,7 @@ use tokio::sync::watch;
use bytes::Bytes;
use http_body_util::Empty;
use hyper::header::{HeaderValue, UPGRADE};
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
use hyper::{Recv, Request, Response, StatusCode};
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -149,7 +149,7 @@ async fn main() {
let mut rx = rx.clone();
tokio::task::spawn(async move {
- let conn = Http::new().serve_connection(stream, service_fn(server_upgrade));
+ let conn = http1::Builder::new().serve_connection(stream, service_fn(server_upgrade));
// Don't forget to enable upgrades on the connection.
let mut conn = conn.with_upgrades();
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -4,7 +4,7 @@ use std::net::SocketAddr;
use bytes::{Buf, Bytes};
use http_body_util::{BodyExt, Full};
-use hyper::server::conn::Http;
+use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{header, Method, Recv, Request, Response, StatusCode};
use tokio::net::{TcpListener, TcpStream};
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -113,7 +113,10 @@ async fn main() -> Result<()> {
tokio::task::spawn(async move {
let service = service_fn(move |req| response_examples(req));
- if let Err(err) = Http::new().serve_connection(stream, service).await {
+ if let Err(err) = http1::Builder::new()
+ .serve_connection(stream, service)
+ .await
+ {
println!("Failed to serve connection: {:?}", err);
}
});
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -20,7 +20,7 @@ pub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
// TODO: with the `runtime`feature, `Exec::Default` used `tokio::spawn`. With the
// removal of the opt-in default runtime, this should be refactored.
#[derive(Clone)]
-pub enum Exec {
+pub(crate) enum Exec {
Default,
Executor(Arc<dyn Executor<BoxSendFuture> + Send + Sync>),
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -2,7 +2,6 @@
use std::error::Error as StdError;
use std::fmt;
-use std::marker::PhantomData;
use std::sync::Arc;
use std::time::Duration;
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -10,7 +9,6 @@ use bytes::Bytes;
use tokio::io::{AsyncRead, AsyncWrite};
use crate::body::{Body, Recv};
-use crate::common::exec::{ConnStreamExec, Exec};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::{common::time::Time, rt::Timer};
use crate::proto;
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -25,21 +23,18 @@ pin_project_lite::pin_project! {
///
/// Polling this future will drive HTTP forward.
#[must_use = "futures do nothing unless polled"]
- pub struct Connection<T, S, E>
+ pub struct Connection<T, S>
where
S: HttpService<Recv>,
{
conn: Option<Http1Dispatcher<T, S::ResBody, S>>,
- // can we remove this?
- _exec: PhantomData<E>,
}
}
/// A configuration builder for HTTP/1 server connections.
#[derive(Clone, Debug)]
-pub struct Builder<E = Exec> {
- pub(crate) _exec: E,
+pub struct Builder {
pub(crate) timer: Time,
h1_half_close: bool,
h1_keep_alive: bool,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -75,7 +70,7 @@ pub struct Parts<T, S> {
// ===== impl Connection =====
-impl<I, S, E> fmt::Debug for Connection<I, S, E>
+impl<I, S> fmt::Debug for Connection<I, S>
where
S: HttpService<Recv>,
{
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -84,14 +79,13 @@ where
}
}
-impl<I, B, S, E> Connection<I, S, E>
+impl<I, B, S> Connection<I, S>
where
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
{
/// Start a graceful shutdown process for this connection.
///
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -187,7 +181,7 @@ where
/// Enable this connection to support higher-level HTTP upgrades.
///
/// See [the `upgrade` module](crate::upgrade) for more.
- pub fn with_upgrades(self) -> upgrades::UpgradeableConnection<I, S, E>
+ pub fn with_upgrades(self) -> upgrades::UpgradeableConnection<I, S>
where
I: Send,
{
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -196,14 +190,13 @@ where
}
-impl<I, B, S, E> Future for Connection<I, S, E>
+impl<I, B, S> Future for Connection<I, S>
where
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + 'static,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
{
type Output = crate::Result<()>;
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -229,13 +222,10 @@ where
// ===== impl Builder =====
-impl<E> Builder<E> {
+impl Builder {
/// Create a new connection builder.
- ///
- /// This starts with the default options, and an executor.
- pub fn new(exec: E) -> Self {
+ pub fn new() -> Self {
Self {
- _exec: exec,
timer: Time::Empty,
h1_half_close: false,
h1_keep_alive: true,
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -351,24 +341,6 @@ impl<E> Builder<E> {
self
}
- /// Set the executor used to spawn background tasks.
- ///
- /// Default uses implicit default (like `tokio::spawn`).
- pub fn with_executor<E2>(self, exec: E2) -> Builder<E2> {
- Builder {
- _exec: exec,
- timer: self.timer,
- h1_half_close: self.h1_half_close,
- h1_keep_alive: self.h1_keep_alive,
- h1_title_case_headers: self.h1_title_case_headers,
- h1_preserve_header_case: self.h1_preserve_header_case,
- h1_header_read_timeout: self.h1_header_read_timeout,
- h1_writev: self.h1_writev,
- max_buf_size: self.max_buf_size,
- pipeline_flush: self.pipeline_flush,
- }
- }
-
/// Set the timer used in background tasks.
pub fn timer<M>(&mut self, timer: M) -> &mut Self
where
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -388,7 +360,7 @@ impl<E> Builder<E> {
/// ```
/// # use hyper::{Recv, Request, Response};
/// # use hyper::service::Service;
- /// # use hyper::server::conn::Http;
+ /// # use hyper::server::conn::http1::Builder;
/// # use tokio::io::{AsyncRead, AsyncWrite};
/// # async fn run<I, S>(some_io: I, some_service: S)
/// # where
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -397,7 +369,7 @@ impl<E> Builder<E> {
/// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
/// # S::Future: Send,
/// # {
- /// let http = Http::new();
+ /// let http = Builder::new();
/// let conn = http.serve_connection(some_io, some_service);
///
/// if let Err(e) = conn.await {
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -406,14 +378,13 @@ impl<E> Builder<E> {
/// # }
/// # fn main() {}
/// ```
- pub fn serve_connection<S, I, Bd>(&self, io: I, service: S) -> Connection<I, S, E>
+ pub fn serve_connection<I, S>(&self, io: I, service: S) -> Connection<I, S>
where
- S: HttpService<Recv, ResBody = Bd>,
+ S: HttpService<Recv>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bd: Body + 'static,
- Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
+ S::ResBody: 'static,
+ <S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
- E: ConnStreamExec<S::Future, Bd>,
{
let mut conn = proto::Conn::new(io);
conn.set_timer(self.timer.clone());
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -447,7 +418,6 @@ impl<E> Builder<E> {
let proto = proto::h1::Dispatcher::new(sd, conn);
Connection {
conn: Some(proto),
- _exec: PhantomData,
}
}
}
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -459,25 +429,23 @@ mod upgrades {
// A future binding a connection with a Service with Upgrade support.
//
- // This type is unnameable outside the crate, and so basically just an
- // `impl Future`, without requiring Rust 1.26.
+ // This type is unnameable outside the crate.
#[must_use = "futures do nothing unless polled"]
#[allow(missing_debug_implementations)]
- pub struct UpgradeableConnection<T, S, E>
+ pub struct UpgradeableConnection<T, S>
where
S: HttpService<Recv>,
{
- pub(super) inner: Connection<T, S, E>,
+ pub(super) inner: Connection<T, S>,
}
- impl<I, B, S, E> UpgradeableConnection<I, S, E>
+ impl<I, B, S> UpgradeableConnection<I, S>
where
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
{
/// Start a graceful shutdown process for this connection.
///
diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs
--- a/src/server/conn/http1.rs
+++ b/src/server/conn/http1.rs
@@ -488,14 +456,13 @@ mod upgrades {
}
}
- impl<I, B, S, E> Future for UpgradeableConnection<I, S, E>
+ impl<I, B, S> Future for UpgradeableConnection<I, S>
where
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
{
type Output = crate::Result<()>;
diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs
--- a/src/server/conn/http2.rs
+++ b/src/server/conn/http2.rs
@@ -284,30 +284,6 @@ impl<E> Builder<E> {
///
/// This returns a Future that must be polled in order for HTTP to be
/// driven on the connection.
- ///
- /// # Example
- ///
- /// ```
- /// # use hyper::{Recv, Request, Response};
- /// # use hyper::service::Service;
- /// # use hyper::server::conn::Http;
- /// # use tokio::io::{AsyncRead, AsyncWrite};
- /// # async fn run<I, S>(some_io: I, some_service: S)
- /// # where
- /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- /// # S: Service<hyper::Request<Recv>, Response=hyper::Response<Recv>> + Send + 'static,
- /// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- /// # S::Future: Send,
- /// # {
- /// let http = Http::new();
- /// let conn = http.serve_connection(some_io, some_service);
- ///
- /// if let Err(e) = conn.await {
- /// eprintln!("server connection error: {}", e);
- /// }
- /// # }
- /// # fn main() {}
- /// ```
pub fn serve_connection<S, I, Bd>(&self, io: I, service: S) -> Connection<I, S, E>
where
S: HttpService<Recv, ResBody = Bd>,
diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs
--- a/src/server/conn/mod.rs
+++ b/src/server/conn/mod.rs
@@ -1,18 +1,23 @@
-//! Lower-level Server connection API.
+//! Server connection API.
//!
//! The types in this module are to provide a lower-level API based around a
//! single connection. Accepting a connection and binding it with a service
//! are not handled at this level. This module provides the building blocks to
//! customize those things externally.
//!
+//! This module is split by HTTP version. Both work similarly, but do have
+//! specific options on each builder.
+//!
//! ## Example
-//! A simple example that uses the `Http` struct to talk HTTP over a Tokio TCP stream
+//!
+//! A simple example that prepares an HTTP/1 connection over a Tokio TCP stream.
+//!
//! ```no_run
//! # #[cfg(feature = "http1")]
//! # mod rt {
//! use http::{Request, Response, StatusCode};
//! use http_body_util::Full;
-//! use hyper::{server::conn::Http, service::service_fn, body::Bytes};
+//! use hyper::{server::conn::http1, service::service_fn, body::Bytes};
//! use std::{net::SocketAddr, convert::Infallible};
//! use tokio::net::TcpListener;
//!
diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs
--- a/src/server/conn/mod.rs
+++ b/src/server/conn/mod.rs
@@ -24,8 +29,7 @@
//! loop {
//! let (tcp_stream, _) = tcp_listener.accept().await?;
//! tokio::task::spawn(async move {
-//! if let Err(http_err) = Http::new()
-//! .http1_only(true)
+//! if let Err(http_err) = http1::Builder::new()
//! .http1_keep_alive(true)
//! .serve_connection(tcp_stream, service_fn(hello))
//! .await {
diff --git a/src/server/conn/mod.rs b/src/server/conn/mod.rs
--- a/src/server/conn/mod.rs
+++ b/src/server/conn/mod.rs
@@ -41,1048 +45,8 @@
//! # }
//! ```
-#[cfg(all(
- any(feature = "http1", feature = "http2"),
- not(all(feature = "http1", feature = "http2"))
-))]
-use std::marker::PhantomData;
-use std::sync::Arc;
-#[cfg(any(feature = "http1", feature = "http2"))]
-use std::time::Duration;
-
-#[cfg(feature = "http2")]
-use crate::common::io::Rewind;
-#[cfg(all(feature = "http1", feature = "http2"))]
-use crate::error::{Kind, Parse};
-#[cfg(feature = "http1")]
-use crate::upgrade::Upgraded;
-use crate::{common::time::Time, rt::Timer};
-
#[cfg(feature = "http1")]
pub mod http1;
#[cfg(feature = "http2")]
pub mod http2;
-cfg_feature! {
- #![any(feature = "http1", feature = "http2")]
-
- use std::error::Error as StdError;
- use std::fmt;
-
- use bytes::Bytes;
- use pin_project_lite::pin_project;
- use tokio::io::{AsyncRead, AsyncWrite};
- use tracing::trace;
-
- use crate::body::{Recv, Body};
- use crate::common::{task, Future, Pin, Poll, Unpin};
- #[cfg(not(all(feature = "http1", feature = "http2")))]
- use crate::common::Never;
- use crate::common::exec::{ConnStreamExec, Exec};
- use crate::proto;
- use crate::service::HttpService;
-
- pub(super) use self::upgrades::UpgradeableConnection;
-}
-
-/// A lower-level configuration of the HTTP protocol.
-///
-/// This structure is used to configure options for an HTTP server connection.
-#[derive(Clone, Debug)]
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-pub struct Http<E = Exec> {
- pub(crate) exec: E,
- pub(crate) timer: Time,
- h1_half_close: bool,
- h1_keep_alive: bool,
- h1_title_case_headers: bool,
- h1_preserve_header_case: bool,
- #[cfg(feature = "http1")]
- h1_header_read_timeout: Option<Duration>,
- h1_writev: Option<bool>,
- #[cfg(feature = "http2")]
- h2_builder: proto::h2::server::Config,
- mode: ConnectionMode,
- max_buf_size: Option<usize>,
- pipeline_flush: bool,
-}
-
-/// The internal mode of HTTP protocol which indicates the behavior when a parse error occurs.
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[derive(Clone, Debug, PartialEq)]
-enum ConnectionMode {
- /// Always use HTTP/1 and do not upgrade when a parse error occurs.
- #[cfg(feature = "http1")]
- H1Only,
- /// Always use HTTP/2.
- #[cfg(feature = "http2")]
- H2Only,
- /// Use HTTP/1 and try to upgrade to h2 when a parse error occurs.
- #[cfg(all(feature = "http1", feature = "http2"))]
- Fallback,
-}
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-pin_project! {
- /// A future binding a connection with a Service.
- ///
- /// Polling this future will drive HTTP forward.
- #[must_use = "futures do nothing unless polled"]
- #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
- pub struct Connection<T, S, E = Exec>
- where
- S: HttpService<Recv>,
- {
- pub(super) conn: Option<ProtoServer<T, S::ResBody, S, E>>,
- fallback: Fallback<E>,
- }
-}
-
-#[cfg(feature = "http1")]
-type Http1Dispatcher<T, B, S> =
- proto::h1::Dispatcher<proto::h1::dispatch::Server<S, Recv>, B, T, proto::ServerTransaction>;
-
-#[cfg(all(not(feature = "http1"), feature = "http2"))]
-type Http1Dispatcher<T, B, S> = (Never, PhantomData<(T, Box<Pin<B>>, Box<Pin<S>>)>);
-
-#[cfg(feature = "http2")]
-type Http2Server<T, B, S, E> = proto::h2::Server<Rewind<T>, S, B, E>;
-
-#[cfg(all(not(feature = "http2"), feature = "http1"))]
-type Http2Server<T, B, S, E> = (
- Never,
- PhantomData<(T, Box<Pin<S>>, Box<Pin<B>>, Box<Pin<E>>)>,
-);
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-pin_project! {
- #[project = ProtoServerProj]
- pub(super) enum ProtoServer<T, B, S, E = Exec>
- where
- S: HttpService<Recv>,
- B: Body,
- {
- H1 {
- #[pin]
- h1: Http1Dispatcher<T, B, S>,
- },
- H2 {
- #[pin]
- h2: Http2Server<T, B, S, E>,
- },
- }
-}
-
-#[cfg(all(feature = "http1", feature = "http2"))]
-#[derive(Clone, Debug)]
-enum Fallback<E> {
- ToHttp2(proto::h2::server::Config, E, Time),
- Http1Only,
-}
-
-#[cfg(all(
- any(feature = "http1", feature = "http2"),
- not(all(feature = "http1", feature = "http2"))
-))]
-type Fallback<E> = PhantomData<E>;
-
-#[cfg(all(feature = "http1", feature = "http2"))]
-impl<E> Fallback<E> {
- fn to_h2(&self) -> bool {
- match *self {
- Fallback::ToHttp2(..) => true,
- Fallback::Http1Only => false,
- }
- }
-}
-
-#[cfg(all(feature = "http1", feature = "http2"))]
-impl<E> Unpin for Fallback<E> {}
-
-/// Deconstructed parts of a `Connection`.
-///
-/// This allows taking apart a `Connection` at a later time, in order to
-/// reclaim the IO object, and additional related pieces.
-#[derive(Debug)]
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-pub struct Parts<T, S> {
- /// The original IO object used in the handshake.
- pub io: T,
- /// A buffer of bytes that have been read but not processed as HTTP.
- ///
- /// If the client sent additional bytes after its last request, and
- /// this connection "ended" with an upgrade, the read buffer will contain
- /// those bytes.
- ///
- /// You will want to check for any existing bytes if you plan to continue
- /// communicating on the IO object.
- pub read_buf: Bytes,
- /// The `Service` used to serve this connection.
- pub service: S,
- _inner: (),
-}
-
-// ===== impl Http =====
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-impl Http {
- /// Creates a new instance of the HTTP protocol, ready to spawn a server or
- /// start accepting connections.
- pub fn new() -> Http {
- Http {
- exec: Exec::Default,
- timer: Time::Empty,
- h1_half_close: false,
- h1_keep_alive: true,
- h1_title_case_headers: false,
- h1_preserve_header_case: false,
- #[cfg(feature = "http1")]
- h1_header_read_timeout: None,
- h1_writev: None,
- #[cfg(feature = "http2")]
- h2_builder: Default::default(),
- mode: ConnectionMode::default(),
- max_buf_size: None,
- pipeline_flush: false,
- }
- }
-}
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-impl<E> Http<E> {
- /// Sets whether HTTP1 is required.
- ///
- /// Default is false
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_only(&mut self, val: bool) -> &mut Self {
- if val {
- self.mode = ConnectionMode::H1Only;
- } else {
- #[cfg(feature = "http2")]
- {
- self.mode = ConnectionMode::Fallback;
- }
- }
- self
- }
-
- /// Set whether HTTP/1 connections should support half-closures.
- ///
- /// Clients can chose to shutdown their write-side while waiting
- /// for the server to respond. Setting this to `true` will
- /// prevent closing the connection immediately if `read`
- /// detects an EOF in the middle of a request.
- ///
- /// Default is `false`.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_half_close(&mut self, val: bool) -> &mut Self {
- self.h1_half_close = val;
- self
- }
-
- /// Enables or disables HTTP/1 keep-alive.
- ///
- /// Default is true.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_keep_alive(&mut self, val: bool) -> &mut Self {
- self.h1_keep_alive = val;
- self
- }
-
- /// Set whether HTTP/1 connections will write header names as title case at
- /// the socket level.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self {
- self.h1_title_case_headers = enabled;
- self
- }
-
- /// Set whether to support preserving original header cases.
- ///
- /// Currently, this will record the original cases received, and store them
- /// in a private extension on the `Request`. It will also look for and use
- /// such an extension in any provided `Response`.
- ///
- /// Since the relevant extension is still private, there is no way to
- /// interact with the original cases. The only effect this can have now is
- /// to forward the cases in a proxy-like fashion.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Self {
- self.h1_preserve_header_case = enabled;
- self
- }
-
- /// Set a timeout for reading client request headers. If a client does not
- /// transmit the entire header within this time, the connection is closed.
- ///
- /// Default is None.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self {
- self.h1_header_read_timeout = Some(read_timeout);
- self
- }
-
- /// Set whether HTTP/1 connections should try to use vectored writes,
- /// or always flatten into a single buffer.
- ///
- /// Note that setting this to false may mean more copies of body data,
- /// but may also improve performance when an IO transport doesn't
- /// support vectored writes well, such as most TLS implementations.
- ///
- /// Setting this to true will force hyper to use queued strategy
- /// which may eliminate unnecessary cloning on some TLS backends
- ///
- /// Default is `auto`. In this mode hyper will try to guess which
- /// mode to use
- #[inline]
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_writev(&mut self, val: bool) -> &mut Self {
- self.h1_writev = Some(val);
- self
- }
-
- /// Sets whether HTTP2 is required.
- ///
- /// Default is false
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_only(&mut self, val: bool) -> &mut Self {
- if val {
- self.mode = ConnectionMode::H2Only;
- } else {
- #[cfg(feature = "http1")]
- {
- self.mode = ConnectionMode::Fallback;
- }
- }
- self
- }
-
- /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
- /// stream-level flow control.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- if let Some(sz) = sz.into() {
- self.h2_builder.adaptive_window = false;
- self.h2_builder.initial_stream_window_size = sz;
- }
- self
- }
-
- /// Sets the max connection-level flow control for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_connection_window_size(
- &mut self,
- sz: impl Into<Option<u32>>,
- ) -> &mut Self {
- if let Some(sz) = sz.into() {
- self.h2_builder.adaptive_window = false;
- self.h2_builder.initial_conn_window_size = sz;
- }
- self
- }
-
- /// Sets whether to use an adaptive flow control.
- ///
- /// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
- use proto::h2::SPEC_WINDOW_SIZE;
-
- self.h2_builder.adaptive_window = enabled;
- if enabled {
- self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
- self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
- }
- self
- }
-
- /// Sets the maximum frame size to use for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- if let Some(sz) = sz.into() {
- self.h2_builder.max_frame_size = sz;
- }
- self
- }
-
- /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
- /// connections.
- ///
- /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_concurrent_streams(&mut self, max: impl Into<Option<u32>>) -> &mut Self {
- self.h2_builder.max_concurrent_streams = max.into();
- self
- }
-
- /// Sets an interval for HTTP2 Ping frames should be sent to keep a
- /// connection alive.
- ///
- /// Pass `None` to disable HTTP2 keep-alive.
- ///
- /// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_interval(
- &mut self,
- interval: impl Into<Option<Duration>>,
- ) -> &mut Self {
- self.h2_builder.keep_alive_interval = interval.into();
- self
- }
-
- /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
- ///
- /// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
- ///
- /// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
- self.h2_builder.keep_alive_timeout = timeout;
- self
- }
-
- /// Set the maximum write buffer size for each HTTP/2 stream.
- ///
- /// Default is currently ~400KB, but may change.
- ///
- /// # Panics
- ///
- /// The value must be no larger than `u32::MAX`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
- assert!(max <= std::u32::MAX as usize);
- self.h2_builder.max_send_buffer_size = max;
- self
- }
-
- /// Enables the [extended CONNECT protocol].
- ///
- /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
- #[cfg(feature = "http2")]
- pub fn http2_enable_connect_protocol(&mut self) -> &mut Self {
- self.h2_builder.enable_connect_protocol = true;
- self
- }
-
- /// Sets the max size of received header frames.
- ///
- /// Default is currently ~16MB, but may change.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self {
- self.h2_builder.max_header_list_size = max;
- self
- }
-
- /// Set the maximum buffer size for the connection.
- ///
- /// Default is ~400kb.
- ///
- /// # Panics
- ///
- /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
- assert!(
- max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
- "the max_buf_size cannot be smaller than the minimum that h1 specifies."
- );
- self.max_buf_size = Some(max);
- self
- }
-
- /// Aggregates flushes to better support pipelined responses.
- ///
- /// Experimental, may have bugs.
- ///
- /// Default is false.
- pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self {
- self.pipeline_flush = enabled;
- self
- }
-
- /// Set the executor used to spawn background tasks.
- ///
- /// Default uses implicit default (like `tokio::spawn`).
- pub fn with_executor<E2>(self, exec: E2) -> Http<E2> {
- Http {
- exec,
- timer: self.timer,
- h1_half_close: self.h1_half_close,
- h1_keep_alive: self.h1_keep_alive,
- h1_title_case_headers: self.h1_title_case_headers,
- h1_preserve_header_case: self.h1_preserve_header_case,
- #[cfg(feature = "http1")]
- h1_header_read_timeout: self.h1_header_read_timeout,
- h1_writev: self.h1_writev,
- #[cfg(feature = "http2")]
- h2_builder: self.h2_builder,
- mode: self.mode,
- max_buf_size: self.max_buf_size,
- pipeline_flush: self.pipeline_flush,
- }
- }
-
- /// Set the timer used in background tasks.
- pub fn with_timer<M>(self, timer: M) -> Http<E>
- where
- M: Timer + Send + Sync + 'static,
- {
- Http {
- exec: self.exec,
- timer: Time::Timer(Arc::new(timer)),
- h1_half_close: self.h1_half_close,
- h1_keep_alive: self.h1_keep_alive,
- h1_title_case_headers: self.h1_title_case_headers,
- h1_preserve_header_case: self.h1_preserve_header_case,
- #[cfg(feature = "http1")]
- h1_header_read_timeout: self.h1_header_read_timeout,
- h1_writev: self.h1_writev,
- #[cfg(feature = "http2")]
- h2_builder: self.h2_builder,
- mode: self.mode,
- max_buf_size: self.max_buf_size,
- pipeline_flush: self.pipeline_flush,
- }
- }
-
- /// Bind a connection together with a [`Service`](crate::service::Service).
- ///
- /// This returns a Future that must be polled in order for HTTP to be
- /// driven on the connection.
- ///
- /// # Example
- ///
- /// ```
- /// # use hyper::{Recv, Request, Response};
- /// # use hyper::service::Service;
- /// # use hyper::server::conn::Http;
- /// # use tokio::io::{AsyncRead, AsyncWrite};
- /// # async fn run<I, S>(some_io: I, some_service: S)
- /// # where
- /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- /// # S: Service<hyper::Request<Recv>, Response=hyper::Response<Recv>> + Send + 'static,
- /// # S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- /// # S::Future: Send,
- /// # {
- /// let http = Http::new();
- /// let conn = http.serve_connection(some_io, some_service);
- ///
- /// if let Err(e) = conn.await {
- /// eprintln!("server connection error: {}", e);
- /// }
- /// # }
- /// # fn main() {}
- /// ```
- pub fn serve_connection<S, I, Bd>(&self, io: I, service: S) -> Connection<I, S, E>
- where
- S: HttpService<Recv, ResBody = Bd>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bd: Body + 'static,
- Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
- E: ConnStreamExec<S::Future, Bd>,
- {
- #[cfg(feature = "http1")]
- macro_rules! h1 {
- () => {{
- let mut conn = proto::Conn::new(io);
- {
- conn.set_timer(self.timer.clone());
- }
- if !self.h1_keep_alive {
- conn.disable_keep_alive();
- }
- if self.h1_half_close {
- conn.set_allow_half_close();
- }
- if self.h1_title_case_headers {
- conn.set_title_case_headers();
- }
- if self.h1_preserve_header_case {
- conn.set_preserve_header_case();
- }
- #[cfg(feature = "http1")]
- if let Some(header_read_timeout) = self.h1_header_read_timeout {
- conn.set_http1_header_read_timeout(header_read_timeout);
- }
- if let Some(writev) = self.h1_writev {
- if writev {
- conn.set_write_strategy_queue();
- } else {
- conn.set_write_strategy_flatten();
- }
- }
- conn.set_flush_pipeline(self.pipeline_flush);
- if let Some(max) = self.max_buf_size {
- conn.set_max_buf_size(max);
- }
- let sd = proto::h1::dispatch::Server::new(service);
- ProtoServer::H1 {
- h1: proto::h1::Dispatcher::new(sd, conn),
- }
- }};
- }
-
- let proto = match self.mode {
- #[cfg(feature = "http1")]
- #[cfg(not(feature = "http2"))]
- ConnectionMode::H1Only => h1!(),
- #[cfg(feature = "http2")]
- #[cfg(feature = "http1")]
- ConnectionMode::H1Only | ConnectionMode::Fallback => h1!(),
- #[cfg(feature = "http2")]
- ConnectionMode::H2Only => {
- let rewind_io = Rewind::new(io);
- let h2 = proto::h2::Server::new(
- rewind_io,
- service,
- &self.h2_builder,
- self.exec.clone(),
- self.timer.clone(),
- );
- ProtoServer::H2 { h2 }
- }
- };
-
- Connection {
- conn: Some(proto),
- #[cfg(all(feature = "http1", feature = "http2"))]
- fallback: if self.mode == ConnectionMode::Fallback {
- Fallback::ToHttp2(
- self.h2_builder.clone(),
- self.exec.clone(),
- self.timer.clone(),
- )
- } else {
- Fallback::Http1Only
- },
- #[cfg(not(all(feature = "http1", feature = "http2")))]
- fallback: PhantomData,
- }
- }
-}
-
-// ===== impl Connection =====
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-impl<I, B, S, E> Connection<I, S, E>
-where
- S: HttpService<Recv, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
-{
- /// Start a graceful shutdown process for this connection.
- ///
- /// This `Connection` should continue to be polled until shutdown
- /// can finish.
- ///
- /// # Note
- ///
- /// This should only be called while the `Connection` future is still
- /// pending. If called after `Connection::poll` has resolved, this does
- /// nothing.
- pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
- match self.conn {
- #[cfg(feature = "http1")]
- Some(ProtoServer::H1 { ref mut h1, .. }) => {
- h1.disable_keep_alive();
- }
- #[cfg(feature = "http2")]
- Some(ProtoServer::H2 { ref mut h2 }) => {
- h2.graceful_shutdown();
- }
- None => (),
-
- #[cfg(not(feature = "http1"))]
- Some(ProtoServer::H1 { ref mut h1, .. }) => match h1.0 {},
- #[cfg(not(feature = "http2"))]
- Some(ProtoServer::H2 { ref mut h2 }) => match h2.0 {},
- }
- }
-
- /// Return the inner IO object, and additional information.
- ///
- /// If the IO object has been "rewound" the io will not contain those bytes rewound.
- /// This should only be called after `poll_without_shutdown` signals
- /// that the connection is "done". Otherwise, it may not have finished
- /// flushing all necessary HTTP bytes.
- ///
- /// # Panics
- /// This method will panic if this connection is using an h2 protocol.
- pub fn into_parts(self) -> Parts<I, S> {
- self.try_into_parts()
- .unwrap_or_else(|| panic!("h2 cannot into_inner"))
- }
-
- /// Return the inner IO object, and additional information, if available.
- ///
- /// This method will return a `None` if this connection is using an h2 protocol.
- pub fn try_into_parts(self) -> Option<Parts<I, S>> {
- match self.conn.unwrap() {
- #[cfg(feature = "http1")]
- ProtoServer::H1 { h1, .. } => {
- let (io, read_buf, dispatch) = h1.into_inner();
- Some(Parts {
- io,
- read_buf,
- service: dispatch.into_service(),
- _inner: (),
- })
- }
- ProtoServer::H2 { .. } => None,
-
- #[cfg(not(feature = "http1"))]
- ProtoServer::H1 { h1, .. } => match h1.0 {},
- }
- }
-
- /// Poll the connection for completion, but without calling `shutdown`
- /// on the underlying IO.
- ///
- /// This is useful to allow running a connection while doing an HTTP
- /// upgrade. Once the upgrade is completed, the connection would be "done",
- /// but it is not desired to actually shutdown the IO object. Instead you
- /// would take it back using `into_parts`.
- pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>>
- where
- S: Unpin,
- S::Future: Unpin,
- B: Unpin,
- {
- loop {
- match *self.conn.as_mut().unwrap() {
- #[cfg(feature = "http1")]
- ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) {
- Ok(()) => return Poll::Ready(Ok(())),
- Err(e) => {
- #[cfg(feature = "http2")]
- match *e.kind() {
- Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => {
- self.upgrade_h2();
- continue;
- }
- _ => (),
- }
-
- return Poll::Ready(Err(e));
- }
- },
- #[cfg(feature = "http2")]
- ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()),
-
- #[cfg(not(feature = "http1"))]
- ProtoServer::H1 { ref mut h1, .. } => match h1.0 {},
- #[cfg(not(feature = "http2"))]
- ProtoServer::H2 { ref mut h2 } => match h2.0 {},
- };
- }
- }
-
- /// Prevent shutdown of the underlying IO object at the end of service the request,
- /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
- ///
- /// # Error
- ///
- /// This errors if the underlying connection protocol is not HTTP/1.
- pub fn without_shutdown(self) -> impl Future<Output = crate::Result<Parts<I, S>>>
- where
- S: Unpin,
- S::Future: Unpin,
- B: Unpin,
- {
- let mut conn = Some(self);
- futures_util::future::poll_fn(move |cx| {
- ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
- Poll::Ready(
- conn.take()
- .unwrap()
- .try_into_parts()
- .ok_or_else(crate::Error::new_without_shutdown_not_h1),
- )
- })
- }
-
- #[cfg(all(feature = "http1", feature = "http2"))]
- fn upgrade_h2(&mut self) {
- trace!("Trying to upgrade connection to h2");
- let conn = self.conn.take();
-
- let (io, read_buf, dispatch) = match conn.unwrap() {
- ProtoServer::H1 { h1, .. } => h1.into_inner(),
- ProtoServer::H2 { .. } => {
- panic!("h2 cannot into_inner");
- }
- };
- let mut rewind_io = Rewind::new(io);
- rewind_io.rewind(read_buf);
- let (builder, exec, timer) = match self.fallback {
- Fallback::ToHttp2(ref builder, ref exec, ref timer) => (builder, exec, timer),
- Fallback::Http1Only => unreachable!("upgrade_h2 with Fallback::Http1Only"),
- };
- let h2 = proto::h2::Server::new(
- rewind_io,
- dispatch.into_service(),
- builder,
- exec.clone(),
- timer.clone(),
- );
-
- debug_assert!(self.conn.is_none());
- self.conn = Some(ProtoServer::H2 { h2 });
- }
-
- /// Enable this connection to support higher-level HTTP upgrades.
- ///
- /// See [the `upgrade` module](crate::upgrade) for more.
- pub fn with_upgrades(self) -> UpgradeableConnection<I, S, E>
- where
- I: Send,
- {
- UpgradeableConnection { inner: self }
- }
-}
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-impl<I, B, S, E> Future for Connection<I, S, E>
-where
- S: HttpService<Recv, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin + 'static,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
-{
- type Output = crate::Result<()>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- loop {
- match ready!(Pin::new(self.conn.as_mut().unwrap()).poll(cx)) {
- Ok(done) => {
- match done {
- proto::Dispatched::Shutdown => {}
- #[cfg(feature = "http1")]
- proto::Dispatched::Upgrade(pending) => {
- // With no `Send` bound on `I`, we can't try to do
- // upgrades here. In case a user was trying to use
- // `Body::on_upgrade` with this API, send a special
- // error letting them know about that.
- pending.manual();
- }
- };
- return Poll::Ready(Ok(()));
- }
- Err(e) => {
- #[cfg(feature = "http1")]
- #[cfg(feature = "http2")]
- match *e.kind() {
- Kind::Parse(Parse::VersionH2) if self.fallback.to_h2() => {
- self.upgrade_h2();
- continue;
- }
- _ => (),
- }
-
- return Poll::Ready(Err(e));
- }
- }
- }
- }
-}
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-impl<I, S> fmt::Debug for Connection<I, S>
-where
- S: HttpService<Recv>,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Connection").finish()
- }
-}
-
-// ===== impl ConnectionMode =====
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-impl Default for ConnectionMode {
- #[cfg(all(feature = "http1", feature = "http2"))]
- fn default() -> ConnectionMode {
- ConnectionMode::Fallback
- }
-
- #[cfg(all(feature = "http1", not(feature = "http2")))]
- fn default() -> ConnectionMode {
- ConnectionMode::H1Only
- }
-
- #[cfg(all(not(feature = "http1"), feature = "http2"))]
- fn default() -> ConnectionMode {
- ConnectionMode::H2Only
- }
-}
-
-// ===== impl ProtoServer =====
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-impl<T, B, S, E> Future for ProtoServer<T, B, S, E>
-where
- T: AsyncRead + AsyncWrite + Unpin,
- S: HttpService<Recv, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
-{
- type Output = crate::Result<proto::Dispatched>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match self.project() {
- #[cfg(feature = "http1")]
- ProtoServerProj::H1 { h1, .. } => h1.poll(cx),
- #[cfg(feature = "http2")]
- ProtoServerProj::H2 { h2 } => h2.poll(cx),
-
- #[cfg(not(feature = "http1"))]
- ProtoServerProj::H1 { h1, .. } => match h1.0 {},
- #[cfg(not(feature = "http2"))]
- ProtoServerProj::H2 { h2 } => match h2.0 {},
- }
- }
-}
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-mod upgrades {
- use super::*;
-
- // A future binding a connection with a Service with Upgrade support.
- //
- // This type is unnameable outside the crate, and so basically just an
- // `impl Future`, without requiring Rust 1.26.
- #[must_use = "futures do nothing unless polled"]
- #[allow(missing_debug_implementations)]
- pub struct UpgradeableConnection<T, S, E>
- where
- S: HttpService<Recv>,
- {
- pub(super) inner: Connection<T, S, E>,
- }
-
- impl<I, B, S, E> UpgradeableConnection<I, S, E>
- where
- S: HttpService<Recv, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
- {
- /// Start a graceful shutdown process for this connection.
- ///
- /// This `Connection` should continue to be polled until shutdown
- /// can finish.
- pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
- Pin::new(&mut self.inner).graceful_shutdown()
- }
- }
-
- impl<I, B, S, E> Future for UpgradeableConnection<I, S, E>
- where
- S: HttpService<Recv, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Body + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
- {
- type Output = crate::Result<()>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- loop {
- match ready!(Pin::new(self.inner.conn.as_mut().unwrap()).poll(cx)) {
- Ok(proto::Dispatched::Shutdown) => return Poll::Ready(Ok(())),
- #[cfg(feature = "http1")]
- Ok(proto::Dispatched::Upgrade(pending)) => {
- match self.inner.conn.take() {
- Some(ProtoServer::H1 { h1, .. }) => {
- let (io, buf, _) = h1.into_inner();
- pending.fulfill(Upgraded::new(io, buf));
- return Poll::Ready(Ok(()));
- }
- _ => {
- drop(pending);
- unreachable!("Upgrade expects h1")
- }
- };
- }
- Err(e) => {
- #[cfg(feature = "http1")]
- #[cfg(feature = "http2")]
- match *e.kind() {
- Kind::Parse(Parse::VersionH2) if self.inner.fallback.to_h2() => {
- self.inner.upgrade_h2();
- continue;
- }
- _ => (),
- }
-
- return Poll::Ready(Err(e));
- }
- }
- }
- }
- }
-}
|
2022-10-14T20:45:05Z
| 3,013
|
Remove the combined-version server Connection type
With #2851 complete, we now have separate types for HTTP/1 and HTTP/2 server connections. We'll remove the combo `server::conn::{Http, Connection}` stuff for 1.0. A combined/auto type can live in `hyper-util`.
|
hyperium__hyper-3013
|
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -41,7 +41,7 @@ fn hello_world_16(b: &mut test::Bencher) {
loop {
let (stream, _addr) = listener.accept().await.expect("accept");
- Http::new()
+ http1::Builder::new()
.pipeline_flush(true)
.serve_connection(
stream,
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -14,7 +14,7 @@ pub(crate) struct Rewind<T> {
}
impl<T> Rewind<T> {
- #[cfg(any(all(feature = "http2", feature = "server"), test))]
+ #[cfg(test)]
pub(crate) fn new(io: T) -> Self {
Rewind {
pre: None,
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -29,7 +29,7 @@ impl<T> Rewind<T> {
}
}
- #[cfg(any(all(feature = "http1", feature = "http2", feature = "server"), test))]
+ #[cfg(test)]
pub(crate) fn rewind(&mut self, bs: Bytes) {
debug_assert!(self.pre.is_none());
self.pre = Some(bs);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1880,7 +1880,7 @@ mod conn {
let addr = listener.local_addr().unwrap();
let (shdn_tx, mut shdn_rx) = tokio::sync::watch::channel(false);
tokio::task::spawn(async move {
- use hyper::server::conn::Http;
+ use hyper::server::conn::http2;
use hyper::service::service_fn;
loop {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1892,7 +1892,8 @@ mod conn {
let mut shdn_rx = shdn_rx.clone();
tokio::task::spawn(async move {
- let mut conn = Http::new().with_executor(TokioExecutor).http2_only(true).serve_connection(stream, service);
+ let mut conn = http2::Builder::new(TokioExecutor)
+ .serve_connection(stream, service);
tokio::select! {
res = &mut conn => {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2093,10 +2094,8 @@ mod conn {
// Spawn an HTTP2 server that reads the whole body and responds
tokio::spawn(async move {
let sock = listener.accept().await.unwrap().0;
- hyper::server::conn::Http::new()
- .with_executor(TokioExecutor)
- .with_timer(TokioTimer)
- .http2_only(true)
+ hyper::server::conn::http2::Builder::new(TokioExecutor)
+ .timer(TokioTimer)
.serve_connection(
sock,
service_fn(|req| async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -16,7 +16,7 @@ use std::time::Duration;
use bytes::Bytes;
use futures_channel::oneshot;
-use futures_util::future::{self, Either, FutureExt, TryFutureExt};
+use futures_util::future::{self, Either, FutureExt};
use h2::client::SendRequest;
use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -28,7 +28,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
use hyper::body::Body;
-use hyper::server::conn::Http;
+use hyper::server::conn::{http1, http2};
use hyper::service::{service_fn, Service};
use hyper::{Method, Recv, Request, Response, StatusCode, Uri, Version};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -321,7 +321,7 @@ mod response_body_lengths {
#[tokio::test]
async fn http2_auto_response_with_known_length() {
- let server = serve();
+ let server = serve_opts().http2().serve();
let addr_str = format!("http://{}", server.addr());
server.reply().body("Hello, World!");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -337,7 +337,7 @@ mod response_body_lengths {
#[tokio::test]
async fn http2_auto_response_with_conflicting_lengths() {
- let server = serve();
+ let server = serve_opts().http2().serve();
let addr_str = format!("http://{}", server.addr());
server
.reply()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -356,7 +356,7 @@ mod response_body_lengths {
#[tokio::test]
async fn http2_implicit_empty_size_hint() {
- let server = serve();
+ let server = serve_opts().http2().serve();
let addr_str = format!("http://{}", server.addr());
server.reply();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -954,7 +954,7 @@ async fn expect_continue_waits_for_body_poll() {
let (socket, _) = listener.accept().await.expect("accept");
- Http::new()
+ http1::Builder::new()
.serve_connection(
socket,
service_fn(|req| {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1129,7 +1129,7 @@ async fn disable_keep_alive_mid_request() {
});
let (socket, _) = listener.accept().await.unwrap();
- let srv = Http::new().serve_connection(socket, HelloWorld);
+ let srv = http1::Builder::new().serve_connection(socket, HelloWorld);
future::try_select(srv, rx1)
.then(|r| match r {
Ok(Either::Left(_)) => panic!("expected rx first"),
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1182,7 +1182,7 @@ async fn disable_keep_alive_post_request() {
stream: socket,
_debug: dropped2,
};
- let server = Http::new().serve_connection(transport, HelloWorld);
+ let server = http1::Builder::new().serve_connection(transport, HelloWorld);
let fut = future::try_select(server, rx1).then(|r| match r {
Ok(Either::Left(_)) => panic!("expected rx first"),
Ok(Either::Right(((), mut conn))) => {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1210,7 +1210,7 @@ async fn empty_parse_eof_does_not_return_error() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
.expect("empty parse eof is ok");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1227,7 +1227,7 @@ async fn nonempty_parse_eof_returns_error() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
.expect_err("partial parse eof is error");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1252,7 +1252,7 @@ async fn http1_allow_half_close() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.http1_half_close(true)
.serve_connection(
socket,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1281,7 +1281,7 @@ async fn disconnect_after_reading_request_before_responding() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.http1_half_close(false)
.serve_connection(
socket,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1313,7 +1313,7 @@ async fn returning_1xx_response_is_error() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.serve_connection(
socket,
service_fn(|_| async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1378,8 +1378,8 @@ async fn header_read_timeout_slow_writes() {
});
let (socket, _) = listener.accept().await.unwrap();
- let conn = Http::new()
- .with_timer(TokioTimer)
+ let conn = http1::Builder::new()
+ .timer(TokioTimer)
.http1_header_read_timeout(Duration::from_secs(5))
.serve_connection(
socket,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1454,8 +1454,8 @@ async fn header_read_timeout_slow_writes_multiple_requests() {
});
let (socket, _) = listener.accept().await.unwrap();
- let conn = Http::new()
- .with_timer(TokioTimer)
+ let conn = http1::Builder::new()
+ .timer(TokioTimer)
.http1_header_read_timeout(Duration::from_secs(5))
.serve_connection(
socket,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1502,7 +1502,7 @@ async fn upgrades() {
});
let (socket, _) = listener.accept().await.unwrap();
- let conn = Http::new().serve_connection(
+ let conn = http1::Builder::new().serve_connection(
socket,
service_fn(|_| {
let res = Response::builder()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1557,7 +1557,7 @@ async fn http_connect() {
});
let (socket, _) = listener.accept().await.unwrap();
- let conn = Http::new().serve_connection(
+ let conn = http1::Builder::new().serve_connection(
socket,
service_fn(|_| {
let res = Response::builder()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1629,7 +1629,7 @@ async fn upgrades_new() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.serve_connection(socket, svc)
.with_upgrades()
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1666,7 +1666,7 @@ async fn upgrades_ignored() {
loop {
let (socket, _) = listener.accept().await.unwrap();
tokio::task::spawn(async move {
- Http::new()
+ http1::Builder::new()
.serve_connection(socket, svc)
.with_upgrades()
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1737,7 +1737,7 @@ async fn http_connect_new() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.serve_connection(socket, svc)
.with_upgrades()
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1819,11 +1819,9 @@ async fn h2_connect() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
- .with_executor(TokioExecutor)
- .http2_only(true)
+ http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
- .with_upgrades()
+ //.with_upgrades()
.await
.unwrap();
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1932,11 +1930,9 @@ async fn h2_connect_multiplex() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
- .with_executor(TokioExecutor)
- .http2_only(true)
+ http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
- .with_upgrades()
+ //.with_upgrades()
.await
.unwrap();
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2008,11 +2004,9 @@ async fn h2_connect_large_body() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
- .with_executor(TokioExecutor)
- .http2_only(true)
+ http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
- .with_upgrades()
+ //.with_upgrades()
.await
.unwrap();
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2081,11 +2075,9 @@ async fn h2_connect_empty_frames() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
- .with_executor(TokioExecutor)
- .http2_only(true)
+ http2::Builder::new(TokioExecutor)
.serve_connection(socket, svc)
- .with_upgrades()
+ //.with_upgrades()
.await
.unwrap();
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2106,7 +2098,7 @@ async fn parse_errors_send_4xx_response() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
.expect_err("HTTP parse error");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2129,7 +2121,7 @@ async fn illegal_request_length_returns_400_response() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.serve_connection(socket, HelloWorld)
.await
.expect_err("illegal Content-Length should error");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2140,14 +2132,14 @@ async fn illegal_request_length_returns_400_response() {
#[should_panic]
fn max_buf_size_panic_too_small() {
const MAX: usize = 8191;
- Http::new().max_buf_size(MAX);
+ http1::Builder::new().max_buf_size(MAX);
}
#[cfg(feature = "http1")]
#[test]
fn max_buf_size_no_panic() {
const MAX: usize = 8193;
- Http::new().max_buf_size(MAX);
+ http1::Builder::new().max_buf_size(MAX);
}
#[cfg(feature = "http1")]
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2171,7 +2163,7 @@ async fn max_buf_size() {
});
let (socket, _) = listener.accept().await.unwrap();
- Http::new()
+ http1::Builder::new()
.max_buf_size(MAX)
.serve_connection(socket, HelloWorld)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2220,27 +2212,9 @@ fn http1_response_with_http2_version() {
.unwrap();
}
-#[test]
-fn try_h2() {
- let server = serve();
- let addr_str = format!("http://{}", server.addr());
-
- let rt = support::runtime();
-
- let client = TestClient::new().http2_only();
- rt.block_on({
- let uri = addr_str.parse().expect("server addr should parse");
-
- client.get(uri).map_ok(|_| ()).map_err(|_e| ())
- })
- .unwrap();
-
- assert_eq!(server.body(), b"");
-}
-
#[test]
fn http1_only() {
- let server = serve_opts().http1_only().serve();
+ let server = serve_opts().serve();
let addr_str = format!("http://{}", server.addr());
let rt = support::runtime();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2257,7 +2231,7 @@ fn http1_only() {
async fn http2_service_error_sends_reset_reason() {
use std::error::Error;
- let server = serve();
+ let server = serve_opts().http2().serve();
let addr_str = format!("http://{}", server.addr());
server
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2284,7 +2258,7 @@ async fn http2_service_error_sends_reset_reason() {
#[test]
fn http2_body_user_error_sends_reset_reason() {
use std::error::Error;
- let server = serve();
+ let server = serve_opts().http2().serve();
let addr_str = format!("http://{}", server.addr());
let b = futures_util::stream::once(future::err::<Bytes, BoxError>(Box::new(h2::Error::from(
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2423,9 +2397,8 @@ async fn http2_keep_alive_detects_unresponsive_client() {
let (socket, _) = listener.accept().await.expect("accept");
- let err = Http::new()
- .with_timer(TokioTimer)
- .http2_only(true)
+ let err = http2::Builder::new(TokioExecutor)
+ .timer(TokioTimer)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
.serve_connection(socket, unreachable_service())
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2445,10 +2418,8 @@ async fn http2_keep_alive_with_responsive_client() {
tokio::spawn(async move {
let (socket, _) = listener.accept().await.expect("accept");
- Http::new()
- .with_executor(TokioExecutor)
- .with_timer(TokioTimer)
- .http2_only(true)
+ http2::Builder::new(TokioExecutor)
+ .timer(TokioTimer)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
.serve_connection(socket, HelloWorld)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2514,9 +2485,8 @@ async fn http2_keep_alive_count_server_pings() {
tokio::spawn(async move {
let (socket, _) = listener.accept().await.expect("accept");
- Http::new()
- .with_timer(TokioTimer)
- .http2_only(true)
+ http2::Builder::new(TokioExecutor)
+ .timer(TokioTimer)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
.serve_connection(socket, unreachable_service())
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2827,24 +2797,24 @@ fn serve_opts() -> ServeOptions {
#[derive(Clone, Copy)]
struct ServeOptions {
+ http2: bool,
keep_alive: bool,
- http1_only: bool,
pipeline: bool,
}
impl Default for ServeOptions {
fn default() -> Self {
ServeOptions {
+ http2: false,
keep_alive: true,
- http1_only: false,
pipeline: false,
}
}
}
impl ServeOptions {
- fn http1_only(mut self) -> Self {
- self.http1_only = true;
+ fn http2(mut self) -> Self {
+ self.http2 = true;
self
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2894,14 +2864,6 @@ impl ServeOptions {
let (stream, _) = res.unwrap();
tokio::task::spawn(async move {
- let mut http = Http::new().with_executor(TokioExecutor);
-
- #[cfg(feature = "http1")]
- let http = http
- .http1_only(_options.http1_only)
- .http1_keep_alive(_options.keep_alive)
- .pipeline_flush(_options.pipeline);
-
let msg_tx = msg_tx.clone();
let reply_rx = reply_rx.clone();
let service = TestService {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2909,7 +2871,15 @@ impl ServeOptions {
reply: reply_rx,
};
- http.serve_connection(stream, service).await.unwrap();
+ if _options.http2 {
+ http2::Builder::new(TokioExecutor)
+ .serve_connection(stream, service).await.unwrap();
+ } else {
+ http1::Builder::new()
+ .http1_keep_alive(_options.keep_alive)
+ .pipeline_flush(_options.pipeline)
+ .serve_connection(stream, service).await.unwrap();
+ }
});
}
_ = &mut shutdown_rx => {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -8,7 +8,7 @@ use std::sync::{
use bytes::Bytes;
use http_body_util::Full;
-use hyper::server::conn::Http;
+use hyper::server;
use tokio::net::{TcpListener, TcpStream};
use hyper::service::service_fn;
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -383,12 +383,17 @@ async fn async_test(cfg: __TestConfig) {
});
tokio::task::spawn(async move {
- Http::new()
- .with_executor(TokioExecutor)
- .http2_only(http2_only)
- .serve_connection(stream, service)
- .await
- .expect("server error");
+ if http2_only {
+ server::conn::http2::Builder::new(TokioExecutor)
+ .serve_connection(stream, service)
+ .await
+ .expect("server error");
+ } else {
+ server::conn::http1::Builder::new()
+ .serve_connection(stream, service)
+ .await
+ .expect("server error");
+ }
});
}
});
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -560,12 +565,17 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
}
});
- Http::new()
- .with_executor(TokioExecutor)
- .http2_only(http2_only)
- .serve_connection(stream, service)
- .await
- .unwrap();
+ if http2_only {
+ server::conn::http2::Builder::new(TokioExecutor)
+ .serve_connection(stream, service)
+ .await
+ .unwrap();
+ } else {
+ server::conn::http1::Builder::new()
+ .serve_connection(stream, service)
+ .await
+ .unwrap();
+ }
}
});
};
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"2960"
] |
0.3
|
9cc5e62f706b39c988167c6d04b8cf4389ab1603
|
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -41,7 +41,7 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> {
let addr = format!("{}:{}", host, port);
let stream = TcpStream::connect(addr).await?;
- let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
println!("Connection failed: {:?}", err);
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -30,7 +30,7 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
let stream = TcpStream::connect(addr).await?;
- let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
println!("Connection failed: {:?}", err);
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -43,7 +43,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
async move {
let client_stream = TcpStream::connect(addr).await.unwrap();
- let (mut sender, conn) = hyper::client::conn::handshake(client_stream).await?;
+ let (mut sender, conn) =
+ hyper::client::conn::http1::handshake(client_stream).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
println!("Connection failed: {:?}", err);
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -4,7 +4,7 @@ use std::net::SocketAddr;
use bytes::Bytes;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
-use hyper::client::conn::Builder;
+use hyper::client::conn::http1::Builder;
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -97,7 +97,7 @@ async fn client_upgrade_request(addr: SocketAddr) -> Result<()> {
.unwrap();
let stream = TcpStream::connect(addr).await?;
- let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -31,7 +31,7 @@ async fn client_request_response() -> Result<Response<BoxBody>> {
let port = req.uri().port_u16().expect("uri has no port");
let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
- let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+ let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?;
tokio::task::spawn(async move {
if let Err(err) = conn.await {
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -4,6 +4,7 @@ use std::error::Error as StdError;
use std::fmt;
use std::sync::Arc;
+use bytes::Bytes;
use http::{Request, Response};
use httparse::ParserConfig;
use tokio::io::{AsyncRead, AsyncWrite};
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -27,6 +28,27 @@ pub struct SendRequest<B> {
dispatch: dispatch::Sender<Request<B>, Response<Recv>>,
}
+/// Deconstructed parts of a `Connection`.
+///
+/// This allows taking apart a `Connection` at a later time, in order to
+/// reclaim the IO object, and additional related pieces.
+#[derive(Debug)]
+pub struct Parts<T> {
+ /// The original IO object used in the handshake.
+ pub io: T,
+ /// A buffer of bytes that have been read but not processed as HTTP.
+ ///
+ /// For instance, if the `Connection` is used for an HTTP upgrade request,
+ /// it is possible the server sent back the first bytes of the new protocol
+ /// along with the response upgrade.
+ ///
+ /// You will want to check for any existing bytes if you plan to continue
+ /// communicating on the IO object.
+ pub read_buf: Bytes,
+ _inner: (),
+}
+
+
/// A future that processes all HTTP state for the IO object.
///
/// In most cases, this should just be spawned into an executor, so that it
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -40,6 +62,40 @@ where
inner: Option<Dispatcher<T, B>>,
}
+impl<T, B> Connection<T, B>
+where
+ T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
+ B: Body + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Return the inner IO object, and additional information.
+ ///
+ /// Only works for HTTP/1 connections. HTTP/2 connections will panic.
+ pub fn into_parts(self) -> Parts<T> {
+ let (io, read_buf, _) = self.inner.expect("already upgraded").into_inner();
+ Parts {
+ io,
+ read_buf,
+ _inner: (),
+ }
+ }
+
+ /// Poll the connection for completion, but without calling `shutdown`
+ /// on the underlying IO.
+ ///
+ /// This is useful to allow running a connection while doing an HTTP
+ /// upgrade. Once the upgrade is completed, the connection would be "done",
+ /// but it is not desired to actually shutdown the IO object. Instead you
+ /// would take it back using `into_parts`.
+ ///
+ /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
+ /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
+ /// to work with this function; or use the `without_shutdown` wrapper.
+ pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ self.inner.as_mut().expect("algready upgraded").poll_without_shutdown(cx)
+ }
+}
+
/// A builder to configure an HTTP connection.
///
/// After setting options, the builder is used to create a handshake future.
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -52,6 +108,8 @@ pub struct Builder {
h1_title_case_headers: bool,
h1_preserve_header_case: bool,
#[cfg(feature = "ffi")]
+ h1_headers_raw: bool,
+ #[cfg(feature = "ffi")]
h1_preserve_header_order: bool,
h1_read_buf_exact_size: Option<usize>,
h1_max_buf_size: Option<usize>,
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -61,11 +119,14 @@ pub struct Builder {
///
/// This is a shortcut for `Builder::new().handshake(io)`.
/// See [`client::conn`](crate::client::conn) for more.
-pub async fn handshake<T>(
+pub async fn handshake<T, B>(
io: T,
-) -> crate::Result<(SendRequest<crate::Recv>, Connection<T, crate::Recv>)>
+) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
Builder::new().handshake(io).await
}
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -80,6 +141,13 @@ impl<B> SendRequest<B> {
self.dispatch.poll_ready(cx)
}
+ /// Waits until the dispatcher is ready
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
/*
pub(super) async fn when_ready(self) -> crate::Result<Self> {
let mut me = Some(self);
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -232,6 +300,8 @@ impl Builder {
h1_title_case_headers: false,
h1_preserve_header_case: false,
#[cfg(feature = "ffi")]
+ h1_headers_raw: false,
+ #[cfg(feature = "ffi")]
h1_preserve_header_order: false,
h1_max_buf_size: None,
}
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -386,6 +456,12 @@ impl Builder {
self
}
+ #[cfg(feature = "ffi")]
+ pub(crate) fn http1_headers_raw(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_headers_raw = enabled;
+ self
+ }
+
/// Sets the exact size of the read buffer to *always* use.
///
/// Note that setting this option unsets the `http1_max_buf_size` option.
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -459,6 +535,10 @@ impl Builder {
if opts.h1_preserve_header_order {
conn.set_preserve_header_order();
}
+ #[cfg(feature = "ffi")]
+ if opts.h1_headers_raw {
+ conn.set_raw_headers(true);
+ }
if opts.h09_responses {
conn.set_h09_responses();
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -52,11 +52,14 @@ pub struct Builder {
///
/// This is a shortcut for `Builder::new().handshake(io)`.
/// See [`client::conn`](crate::client::conn) for more.
-pub async fn handshake<T>(
+pub async fn handshake<T, B>(
io: T,
-) -> crate::Result<(SendRequest<crate::Recv>, Connection<T, crate::Recv>)>
+) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ B: Body + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
Builder::new().handshake(io).await
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -75,6 +78,13 @@ impl<B> SendRequest<B> {
}
}
+ /// Waits until the dispatcher is ready
+ ///
+ /// If the associated connection is closed, this returns an Error.
+ pub async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
/*
pub(super) async fn when_ready(self) -> crate::Result<Self> {
let mut me = Some(self);
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -175,6 +185,27 @@ impl<B> fmt::Debug for SendRequest<B> {
// ===== impl Connection
+impl<T, B> Connection<T, B>
+where
+ T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ B: Body + Unpin + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Returns whether the [extended CONNECT protocol][1] is enabled or not.
+ ///
+ /// This setting is configured by the server peer by sending the
+ /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
+ /// This method returns the currently acknowledged value received from the
+ /// remote.
+ ///
+ /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
+ /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
+ pub fn is_extended_connect_protocol_enabled(&self) -> bool {
+ self.inner.1.is_extended_connect_protocol_enabled()
+ }
+}
+
impl<T, B> fmt::Debug for Connection<T, B>
where
T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -23,7 +23,7 @@
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! let target_stream = TcpStream::connect("example.com:80").await?;
//!
-//! let (mut request_sender, connection) = conn::handshake(target_stream).await?;
+//! let (mut request_sender, connection) = conn::http1::handshake(target_stream).await?;
//!
//! // spawn a task to poll the connection and drive the HTTP state
//! tokio::spawn(async move {
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -52,886 +52,8 @@
//! # }
//! ```
-use std::error::Error as StdError;
-use std::fmt;
-#[cfg(not(all(feature = "http1", feature = "http2")))]
-use std::marker::PhantomData;
-use std::sync::Arc;
-#[cfg(feature = "http2")]
-use std::time::Duration;
-
-use bytes::Bytes;
-use futures_util::future;
-use httparse::ParserConfig;
-use pin_project_lite::pin_project;
-use tokio::io::{AsyncRead, AsyncWrite};
-use tracing::{debug, trace};
-
-use super::dispatch;
-use crate::body::Body;
-#[cfg(not(all(feature = "http1", feature = "http2")))]
-use crate::common::Never;
-use crate::common::{
- exec::{BoxSendFuture, Exec},
- task, Future, Pin, Poll,
-};
-use crate::proto;
-use crate::rt::Executor;
-#[cfg(feature = "http1")]
-use crate::upgrade::Upgraded;
-use crate::{common::time::Time, rt::Timer};
-use crate::{Recv, Request, Response};
-
#[cfg(feature = "http1")]
pub mod http1;
#[cfg(feature = "http2")]
pub mod http2;
-#[cfg(feature = "http1")]
-type Http1Dispatcher<T, B> =
- proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, proto::h1::ClientTransaction>;
-
-#[cfg(not(feature = "http1"))]
-type Http1Dispatcher<T, B> = (Never, PhantomData<(T, Pin<Box<B>>)>);
-
-#[cfg(feature = "http2")]
-type Http2ClientTask<B> = proto::h2::ClientTask<B>;
-
-#[cfg(not(feature = "http2"))]
-type Http2ClientTask<B> = (Never, PhantomData<Pin<Box<B>>>);
-
-pin_project! {
- #[project = ProtoClientProj]
- enum ProtoClient<T, B>
- where
- B: Body,
- {
- H1 {
- #[pin]
- h1: Http1Dispatcher<T, B>,
- },
- H2 {
- #[pin]
- h2: Http2ClientTask<B>,
- },
- }
-}
-
-/// Returns a handshake future over some IO.
-///
-/// This is a shortcut for `Builder::new().handshake(io)`.
-/// See [`client::conn`](crate::client::conn) for more.
-pub async fn handshake<T, B>(io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
-where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Body + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- Builder::new().handshake(io).await
-}
-
-/// The sender side of an established connection.
-pub struct SendRequest<B> {
- dispatch: dispatch::Sender<Request<B>, Response<Recv>>,
-}
-
-/// A future that processes all HTTP state for the IO object.
-///
-/// In most cases, this should just be spawned into an executor, so that it
-/// can process incoming and outgoing messages, notice hangups, and the like.
-#[must_use = "futures do nothing unless polled"]
-pub struct Connection<T, B>
-where
- T: AsyncRead + AsyncWrite + Send + 'static,
- B: Body + 'static,
-{
- inner: Option<ProtoClient<T, B>>,
-}
-
-/// A builder to configure an HTTP connection.
-///
-/// After setting options, the builder is used to create a handshake future.
-#[derive(Clone, Debug)]
-pub struct Builder {
- pub(super) exec: Exec,
- pub(super) timer: Time,
- h09_responses: bool,
- h1_parser_config: ParserConfig,
- h1_writev: Option<bool>,
- h1_title_case_headers: bool,
- h1_preserve_header_case: bool,
- #[cfg(feature = "ffi")]
- h1_preserve_header_order: bool,
- h1_read_buf_exact_size: Option<usize>,
- h1_max_buf_size: Option<usize>,
- #[cfg(feature = "ffi")]
- h1_headers_raw: bool,
- #[cfg(feature = "http2")]
- h2_builder: proto::h2::client::Config,
- version: Proto,
-}
-
-#[derive(Clone, Debug)]
-enum Proto {
- #[cfg(feature = "http1")]
- Http1,
- #[cfg(feature = "http2")]
- Http2,
-}
-
-/// A future returned by `SendRequest::send_request`.
-///
-/// Yields a `Response` if successful.
-#[must_use = "futures do nothing unless polled"]
-pub struct ResponseFuture {
- inner: ResponseFutureState,
-}
-
-enum ResponseFutureState {
- Waiting(dispatch::Promise<Response<Recv>>),
- // Option is to be able to `take()` it in `poll`
- Error(Option<crate::Error>),
-}
-
-/// Deconstructed parts of a `Connection`.
-///
-/// This allows taking apart a `Connection` at a later time, in order to
-/// reclaim the IO object, and additional related pieces.
-#[derive(Debug)]
-pub struct Parts<T> {
- /// The original IO object used in the handshake.
- pub io: T,
- /// A buffer of bytes that have been read but not processed as HTTP.
- ///
- /// For instance, if the `Connection` is used for an HTTP upgrade request,
- /// it is possible the server sent back the first bytes of the new protocol
- /// along with the response upgrade.
- ///
- /// You will want to check for any existing bytes if you plan to continue
- /// communicating on the IO object.
- pub read_buf: Bytes,
- _inner: (),
-}
-
-// ===== impl SendRequest
-
-impl<B> SendRequest<B> {
- /// Polls to determine whether this sender can be used yet for a request.
- ///
- /// If the associated connection is closed, this returns an Error.
- pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
- self.dispatch.poll_ready(cx)
- }
-}
-
-impl<B> SendRequest<B>
-where
- B: Body + 'static,
-{
- /// Sends a `Request` on the associated connection.
- ///
- /// Returns a future that if successful, yields the `Response`.
- ///
- /// # Note
- ///
- /// There are some key differences in what automatic things the `Client`
- /// does for you that will not be done here:
- ///
- /// - `Client` requires absolute-form `Uri`s, since the scheme and
- /// authority are needed to connect. They aren't required here.
- /// - Since the `Client` requires absolute-form `Uri`s, it can add
- /// the `Host` header based on it. You must add a `Host` header yourself
- /// before calling this method.
- /// - Since absolute-form `Uri`s are not required, if received, they will
- /// be serialized as-is.
- pub fn send_request(&mut self, req: Request<B>) -> ResponseFuture {
- let inner = match self.dispatch.send(req) {
- Ok(rx) => ResponseFutureState::Waiting(rx),
- Err(_req) => {
- debug!("connection was not ready");
- let err = crate::Error::new_canceled().with("connection was not ready");
- ResponseFutureState::Error(Some(err))
- }
- };
-
- ResponseFuture { inner }
- }
-}
-
-impl<B> fmt::Debug for SendRequest<B> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("SendRequest").finish()
- }
-}
-
-// ===== impl Connection
-
-impl<T, B> Connection<T, B>
-where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Body + Unpin + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- /// Return the inner IO object, and additional information.
- ///
- /// Only works for HTTP/1 connections. HTTP/2 connections will panic.
- pub fn into_parts(self) -> Parts<T> {
- match self.inner.expect("already upgraded") {
- #[cfg(feature = "http1")]
- ProtoClient::H1 { h1 } => {
- let (io, read_buf, _) = h1.into_inner();
- Parts {
- io,
- read_buf,
- _inner: (),
- }
- }
- ProtoClient::H2 { .. } => {
- panic!("http2 cannot into_inner");
- }
-
- #[cfg(not(feature = "http1"))]
- ProtoClient::H1 { h1 } => match h1.0 {},
- }
- }
-
- /// Poll the connection for completion, but without calling `shutdown`
- /// on the underlying IO.
- ///
- /// This is useful to allow running a connection while doing an HTTP
- /// upgrade. Once the upgrade is completed, the connection would be "done",
- /// but it is not desired to actually shutdown the IO object. Instead you
- /// would take it back using `into_parts`.
- ///
- /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
- /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
- /// to work with this function; or use the `without_shutdown` wrapper.
- pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
- match *self.inner.as_mut().expect("already upgraded") {
- #[cfg(feature = "http1")]
- ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx),
- #[cfg(feature = "http2")]
- ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()),
-
- #[cfg(not(feature = "http1"))]
- ProtoClient::H1 { ref mut h1 } => match h1.0 {},
- #[cfg(not(feature = "http2"))]
- ProtoClient::H2 { ref mut h2, .. } => match h2.0 {},
- }
- }
-
- /// Prevent shutdown of the underlying IO object at the end of service the request,
- /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
- pub fn without_shutdown(self) -> impl Future<Output = crate::Result<Parts<T>>> {
- let mut conn = Some(self);
- future::poll_fn(move |cx| -> Poll<crate::Result<Parts<T>>> {
- ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
- Poll::Ready(Ok(conn.take().unwrap().into_parts()))
- })
- }
-
- /// Returns whether the [extended CONNECT protocol][1] is enabled or not.
- ///
- /// This setting is configured by the server peer by sending the
- /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
- /// This method returns the currently acknowledged value received from the
- /// remote.
- ///
- /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
- /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
- #[cfg(feature = "http2")]
- pub fn http2_is_extended_connect_protocol_enabled(&self) -> bool {
- match self.inner.as_ref().unwrap() {
- ProtoClient::H1 { .. } => false,
- ProtoClient::H2 { h2 } => h2.is_extended_connect_protocol_enabled(),
- }
- }
-}
-
-impl<T, B> Future for Connection<T, B>
-where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Body + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Output = crate::Result<()>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? {
- proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
- #[cfg(feature = "http1")]
- proto::Dispatched::Upgrade(pending) => match self.inner.take() {
- Some(ProtoClient::H1 { h1 }) => {
- let (io, buf, _) = h1.into_inner();
- pending.fulfill(Upgraded::new(io, buf));
- Poll::Ready(Ok(()))
- }
- _ => {
- drop(pending);
- unreachable!("Upgrade expects h1");
- }
- },
- }
- }
-}
-
-impl<T, B> fmt::Debug for Connection<T, B>
-where
- T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
- B: Body + 'static,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Connection").finish()
- }
-}
-
-// ===== impl Builder
-
-impl Builder {
- /// Creates a new connection builder.
- #[inline]
- pub fn new() -> Builder {
- Builder {
- exec: Exec::Default,
- timer: Time::Empty,
- h09_responses: false,
- h1_writev: None,
- h1_read_buf_exact_size: None,
- h1_parser_config: Default::default(),
- h1_title_case_headers: false,
- h1_preserve_header_case: false,
- #[cfg(feature = "ffi")]
- h1_preserve_header_order: false,
- h1_max_buf_size: None,
- #[cfg(feature = "ffi")]
- h1_headers_raw: false,
- #[cfg(feature = "http2")]
- h2_builder: Default::default(),
- #[cfg(feature = "http1")]
- version: Proto::Http1,
- #[cfg(not(feature = "http1"))]
- version: Proto::Http2,
- }
- }
-
- /// Provide an executor to execute background HTTP2 tasks.
- pub fn executor<E>(&mut self, exec: E) -> &mut Builder
- where
- E: Executor<BoxSendFuture> + Send + Sync + 'static,
- {
- self.exec = Exec::Executor(Arc::new(exec));
- self
- }
-
- /// Provide a timer to execute background HTTP2 tasks.
- pub fn timer<M>(&mut self, timer: M) -> &mut Builder
- where
- M: Timer + Send + Sync + 'static,
- {
- self.timer = Time::Timer(Arc::new(timer));
- self
- }
-
- /// Set whether HTTP/0.9 responses should be tolerated.
- ///
- /// Default is false.
- pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder {
- self.h09_responses = enabled;
- self
- }
-
- /// Set whether HTTP/1 connections will accept spaces between header names
- /// and the colon that follow them in responses.
- ///
- /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
- /// to say about it:
- ///
- /// > No whitespace is allowed between the header field-name and colon. In
- /// > the past, differences in the handling of such whitespace have led to
- /// > security vulnerabilities in request routing and response handling. A
- /// > server MUST reject any received request message that contains
- /// > whitespace between a header field-name and colon with a response code
- /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
- /// > response message before forwarding the message downstream.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- ///
- /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- pub fn http1_allow_spaces_after_header_name_in_responses(
- &mut self,
- enabled: bool,
- ) -> &mut Builder {
- self.h1_parser_config
- .allow_spaces_after_header_name_in_responses(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections will accept obsolete line folding for
- /// header values.
- ///
- /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when
- /// parsing.
- ///
- /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
- /// to say about it:
- ///
- /// > A server that receives an obs-fold in a request message that is not
- /// > within a message/http container MUST either reject the message by
- /// > sending a 400 (Bad Request), preferably with a representation
- /// > explaining that obsolete line folding is unacceptable, or replace
- /// > each received obs-fold with one or more SP octets prior to
- /// > interpreting the field value or forwarding the message downstream.
- ///
- /// > A proxy or gateway that receives an obs-fold in a response message
- /// > that is not within a message/http container MUST either discard the
- /// > message and replace it with a 502 (Bad Gateway) response, preferably
- /// > with a representation explaining that unacceptable line folding was
- /// > received, or replace each received obs-fold with one or more SP
- /// > octets prior to interpreting the field value or forwarding the
- /// > message downstream.
- ///
- /// > A user agent that receives an obs-fold in a response message that is
- /// > not within a message/http container MUST replace each received
- /// > obs-fold with one or more SP octets prior to interpreting the field
- /// > value.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- ///
- /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- pub fn http1_allow_obsolete_multiline_headers_in_responses(
- &mut self,
- enabled: bool,
- ) -> &mut Builder {
- self.h1_parser_config
- .allow_obsolete_multiline_headers_in_responses(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections should try to use vectored writes,
- /// or always flatten into a single buffer.
- ///
- /// Note that setting this to false may mean more copies of body data,
- /// but may also improve performance when an IO transport doesn't
- /// support vectored writes well, such as most TLS implementations.
- ///
- /// Setting this to true will force hyper to use queued strategy
- /// which may eliminate unnecessary cloning on some TLS backends
- ///
- /// Default is `auto`. In this mode hyper will try to guess which
- /// mode to use
- pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
- self.h1_writev = Some(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections will write header names as title case at
- /// the socket level.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Builder {
- self.h1_title_case_headers = enabled;
- self
- }
-
- /// Set whether to support preserving original header cases.
- ///
- /// Currently, this will record the original cases received, and store them
- /// in a private extension on the `Response`. It will also look for and use
- /// such an extension in any provided `Request`.
- ///
- /// Since the relevant extension is still private, there is no way to
- /// interact with the original cases. The only effect this can have now is
- /// to forward the cases in a proxy-like fashion.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- pub fn http1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
- self.h1_preserve_header_case = enabled;
- self
- }
-
- /// Set whether to support preserving original header order.
- ///
- /// Currently, this will record the order in which headers are received, and store this
- /// ordering in a private extension on the `Response`. It will also look for and use
- /// such an extension in any provided `Request`.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- #[cfg(feature = "ffi")]
- pub fn http1_preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
- self.h1_preserve_header_order = enabled;
- self
- }
-
- /// Sets the exact size of the read buffer to *always* use.
- ///
- /// Note that setting this option unsets the `http1_max_buf_size` option.
- ///
- /// Default is an adaptive read buffer.
- pub fn http1_read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
- self.h1_read_buf_exact_size = sz;
- self.h1_max_buf_size = None;
- self
- }
-
- /// Set the maximum buffer size for the connection.
- ///
- /// Default is ~400kb.
- ///
- /// Note that setting this option unsets the `http1_read_exact_buf_size` option.
- ///
- /// # Panics
- ///
- /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
- assert!(
- max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
- "the max_buf_size cannot be smaller than the minimum that h1 specifies."
- );
-
- self.h1_max_buf_size = Some(max);
- self.h1_read_buf_exact_size = None;
- self
- }
-
- #[cfg(feature = "ffi")]
- pub(crate) fn http1_headers_raw(&mut self, enabled: bool) -> &mut Self {
- self.h1_headers_raw = enabled;
- self
- }
-
- /// Sets whether HTTP2 is required.
- ///
- /// Default is false.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_only(&mut self, enabled: bool) -> &mut Builder {
- if enabled {
- self.version = Proto::Http2
- }
- self
- }
-
- /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
- /// stream-level flow control.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- if let Some(sz) = sz.into() {
- self.h2_builder.adaptive_window = false;
- self.h2_builder.initial_stream_window_size = sz;
- }
- self
- }
-
- /// Sets the max connection-level flow control for HTTP2
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_connection_window_size(
- &mut self,
- sz: impl Into<Option<u32>>,
- ) -> &mut Self {
- if let Some(sz) = sz.into() {
- self.h2_builder.adaptive_window = false;
- self.h2_builder.initial_conn_window_size = sz;
- }
- self
- }
-
- /// Sets whether to use an adaptive flow control.
- ///
- /// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
- use proto::h2::SPEC_WINDOW_SIZE;
-
- self.h2_builder.adaptive_window = enabled;
- if enabled {
- self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE;
- self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE;
- }
- self
- }
-
- /// Sets the maximum frame size to use for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- if let Some(sz) = sz.into() {
- self.h2_builder.max_frame_size = sz;
- }
- self
- }
-
- /// Sets an interval for HTTP2 Ping frames should be sent to keep a
- /// connection alive.
- ///
- /// Pass `None` to disable HTTP2 keep-alive.
- ///
- /// Default is currently disabled.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_interval(
- &mut self,
- interval: impl Into<Option<Duration>>,
- ) -> &mut Self {
- self.h2_builder.keep_alive_interval = interval.into();
- self
- }
-
- /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
- ///
- /// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
- ///
- /// Default is 20 seconds.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
- self.h2_builder.keep_alive_timeout = timeout;
- self
- }
-
- /// Sets whether HTTP2 keep-alive should apply while the connection is idle.
- ///
- /// If disabled, keep-alive pings are only sent while there are open
- /// request/responses streams. If enabled, pings are also sent when no
- /// streams are active. Does nothing if `http2_keep_alive_interval` is
- /// disabled.
- ///
- /// Default is `false`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
- self.h2_builder.keep_alive_while_idle = enabled;
- self
- }
-
- /// Sets the maximum number of HTTP2 concurrent locally reset streams.
- ///
- /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more
- /// details.
- ///
- /// The default value is determined by the `h2` crate.
- ///
- /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
- self.h2_builder.max_concurrent_reset_streams = Some(max);
- self
- }
-
- /// Set the maximum write buffer size for each HTTP/2 stream.
- ///
- /// Default is currently 1MB, but may change.
- ///
- /// # Panics
- ///
- /// The value must be no larger than `u32::MAX`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
- assert!(max <= std::u32::MAX as usize);
- self.h2_builder.max_send_buffer_size = max;
- self
- }
-
- /// Constructs a connection with the configured options and IO.
- /// See [`client::conn`](crate::client::conn) for more.
- ///
- /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
- /// do nothing.
- pub fn handshake<T, B>(
- &self,
- io: T,
- ) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
- where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Body + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- {
- let opts = self.clone();
-
- async move {
- trace!("client handshake {:?}", opts.version);
-
- let (tx, rx) = dispatch::channel();
- let proto = match opts.version {
- #[cfg(feature = "http1")]
- Proto::Http1 => {
- let mut conn = proto::Conn::new(io);
- conn.set_h1_parser_config(opts.h1_parser_config);
- if let Some(writev) = opts.h1_writev {
- if writev {
- conn.set_write_strategy_queue();
- } else {
- conn.set_write_strategy_flatten();
- }
- }
- if opts.h1_title_case_headers {
- conn.set_title_case_headers();
- }
- if opts.h1_preserve_header_case {
- conn.set_preserve_header_case();
- }
- #[cfg(feature = "ffi")]
- if opts.h1_preserve_header_order {
- conn.set_preserve_header_order();
- }
- if opts.h09_responses {
- conn.set_h09_responses();
- }
-
- #[cfg(feature = "ffi")]
- conn.set_raw_headers(opts.h1_headers_raw);
-
- if let Some(sz) = opts.h1_read_buf_exact_size {
- conn.set_read_buf_exact_size(sz);
- }
- if let Some(max) = opts.h1_max_buf_size {
- conn.set_max_buf_size(max);
- }
- let cd = proto::h1::dispatch::Client::new(rx);
- let dispatch = proto::h1::Dispatcher::new(cd, conn);
- ProtoClient::H1 { h1: dispatch }
- }
- #[cfg(feature = "http2")]
- Proto::Http2 => {
- let h2 = proto::h2::client::handshake(
- io,
- rx,
- &opts.h2_builder,
- opts.exec.clone(),
- opts.timer.clone(),
- )
- .await?;
- ProtoClient::H2 { h2 }
- }
- };
-
- Ok((
- SendRequest { dispatch: tx },
- Connection { inner: Some(proto) },
- ))
- }
- }
-}
-
-// ===== impl ResponseFuture
-
-impl Future for ResponseFuture {
- type Output = crate::Result<Response<Recv>>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match self.inner {
- ResponseFutureState::Waiting(ref mut rx) => {
- Pin::new(rx).poll(cx).map(|res| match res {
- Ok(Ok(resp)) => Ok(resp),
- Ok(Err(err)) => Err(err),
- // this is definite bug if it happens, but it shouldn't happen!
- Err(_canceled) => panic!("dispatch dropped without returning error"),
- })
- }
- ResponseFutureState::Error(ref mut err) => {
- Poll::Ready(Err(err.take().expect("polled after ready")))
- }
- }
- }
-}
-
-impl fmt::Debug for ResponseFuture {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("ResponseFuture").finish()
- }
-}
-
-// ===== impl ProtoClient
-
-impl<T, B> Future for ProtoClient<T, B>
-where
- T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
- B: Body + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Output = crate::Result<proto::Dispatched>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match self.project() {
- #[cfg(feature = "http1")]
- ProtoClientProj::H1 { h1 } => h1.poll(cx),
- #[cfg(feature = "http2")]
- ProtoClientProj::H2 { h2, .. } => h2.poll(cx),
-
- #[cfg(not(feature = "http1"))]
- ProtoClientProj::H1 { h1 } => match h1.0 {},
- #[cfg(not(feature = "http2"))]
- ProtoClientProj::H2 { h2, .. } => match h2.0 {},
- }
- }
-}
-
-// assert trait markers
-
-trait AssertSend: Send {}
-trait AssertSendSync: Send + Sync {}
-
-#[doc(hidden)]
-impl<B: Send> AssertSendSync for SendRequest<B> {}
-
-#[doc(hidden)]
-impl<T: Send, B: Send> AssertSend for Connection<T, B>
-where
- T: AsyncRead + AsyncWrite + Send + 'static,
- B: Body + 'static,
- B::Data: Send,
-{
-}
-
-#[doc(hidden)]
-impl<T: Send + Sync, B: Send + Sync> AssertSendSync for Connection<T, B>
-where
- T: AsyncRead + AsyncWrite + Send + 'static,
- B: Body + 'static,
- B::Data: Send + Sync + 'static,
-{
-}
-
-#[doc(hidden)]
-impl AssertSendSync for Builder {}
-
-#[doc(hidden)]
-impl AssertSend for ResponseFuture {}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -13,7 +13,11 @@ use super::task::{hyper_executor, hyper_task, hyper_task_return_type, AsTaskType
/// An options builder to configure an HTTP client connection.
pub struct hyper_clientconn_options {
- builder: conn::Builder,
+ http1_allow_obsolete_multiline_headers_in_responses: bool,
+ http1_headers_raw: bool,
+ http1_preserve_header_case: bool,
+ http1_preserve_header_order: bool,
+ http2: bool,
/// Use a `Weak` to prevent cycles.
exec: WeakExec,
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -24,7 +28,14 @@ pub struct hyper_clientconn_options {
/// send multiple requests on a single connection, such as when HTTP/1
/// keep-alive or HTTP/2 is used.
pub struct hyper_clientconn {
- tx: conn::SendRequest<crate::Recv>,
+ tx: Tx,
+}
+
+enum Tx {
+ #[cfg(feature = "http1")]
+ Http1(conn::http1::SendRequest<crate::Recv>),
+ #[cfg(feature = "http2")]
+ Http2(conn::http2::SendRequest<crate::Recv>),
}
// ===== impl hyper_clientconn =====
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -42,13 +53,35 @@ ffi_fn! {
let io = non_null! { Box::from_raw(io) ?= ptr::null_mut() };
Box::into_raw(hyper_task::boxed(async move {
- options.builder.handshake::<_, crate::Recv>(io)
+ #[cfg(feature = "http2")]
+ {
+ if options.http2 {
+ return conn::http2::Builder::new()
+ .executor(options.exec.clone())
+ .handshake::<_, crate::Recv>(io)
+ .await
+ .map(|(tx, conn)| {
+ options.exec.execute(Box::pin(async move {
+ let _ = conn.await;
+ }));
+ hyper_clientconn { tx: Tx::Http2(tx) }
+ });
+ }
+ }
+
+ conn::http1::Builder::new()
+ .executor(options.exec.clone())
+ .http1_allow_obsolete_multiline_headers_in_responses(options.http1_allow_obsolete_multiline_headers_in_responses)
+ .http1_headers_raw(options.http1_headers_raw)
+ .http1_preserve_header_case(options.http1_preserve_header_case)
+ .http1_preserve_header_order(options.http1_preserve_header_order)
+ .handshake::<_, crate::Recv>(io)
.await
.map(|(tx, conn)| {
options.exec.execute(Box::pin(async move {
let _ = conn.await;
}));
- hyper_clientconn { tx }
+ hyper_clientconn { tx: Tx::Http1(tx) }
})
}))
} ?= std::ptr::null_mut()
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -65,7 +98,10 @@ ffi_fn! {
// Update request with original-case map of headers
req.finalize_request();
- let fut = non_null! { &mut *conn ?= ptr::null_mut() }.tx.send_request(req.0);
+ let fut = match non_null! { &mut *conn ?= ptr::null_mut() }.tx {
+ Tx::Http1(ref mut tx) => futures_util::future::Either::Left(tx.send_request(req.0)),
+ Tx::Http2(ref mut tx) => futures_util::future::Either::Right(tx.send_request(req.0)),
+ };
let fut = async move {
fut.await.map(hyper_response::wrap)
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -93,10 +129,12 @@ unsafe impl AsTaskType for hyper_clientconn {
ffi_fn! {
/// Creates a new set of HTTP clientconn options to be used in a handshake.
fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options {
- let builder = conn::Builder::new();
-
Box::into_raw(Box::new(hyper_clientconn_options {
- builder,
+ http1_allow_obsolete_multiline_headers_in_responses: false,
+ http1_headers_raw: false,
+ http1_preserve_header_case: false,
+ http1_preserve_header_order: false,
+ http2: false,
exec: WeakExec::new(),
}))
} ?= std::ptr::null_mut()
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -108,7 +146,7 @@ ffi_fn! {
/// Pass `0` to allow lowercase normalization (default), `1` to retain original case.
fn hyper_clientconn_options_set_preserve_header_case(opts: *mut hyper_clientconn_options, enabled: c_int) {
let opts = non_null! { &mut *opts ?= () };
- opts.builder.http1_preserve_header_case(enabled != 0);
+ opts.http1_preserve_header_case = enabled != 0;
}
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -118,7 +156,7 @@ ffi_fn! {
/// Pass `0` to allow reordering (default), `1` to retain original ordering.
fn hyper_clientconn_options_set_preserve_header_order(opts: *mut hyper_clientconn_options, enabled: c_int) {
let opts = non_null! { &mut *opts ?= () };
- opts.builder.http1_preserve_header_order(enabled != 0);
+ opts.http1_preserve_header_order = enabled != 0;
}
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -140,7 +178,6 @@ ffi_fn! {
let weak_exec = hyper_executor::downgrade(&exec);
std::mem::forget(exec);
- opts.builder.executor(weak_exec.clone());
opts.exec = weak_exec;
}
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -153,7 +190,7 @@ ffi_fn! {
#[cfg(feature = "http2")]
{
let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG };
- opts.builder.http2_only(enabled != 0);
+ opts.http2 = enabled != 0;
hyper_code::HYPERE_OK
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -175,7 +212,7 @@ ffi_fn! {
/// If enabled, see `hyper_response_headers_raw()` for usage.
fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code {
let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG };
- opts.builder.http1_headers_raw(enabled != 0);
+ opts.http1_headers_raw = enabled != 0;
hyper_code::HYPERE_OK
}
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -188,7 +225,7 @@ ffi_fn! {
///
fn hyper_clientconn_options_http1_allow_multiline_headers(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code {
let opts = non_null! { &mut *opts ?= hyper_code::HYPERE_INVALID_ARG };
- opts.builder.http1_allow_obsolete_multiline_headers_in_responses(enabled != 0);
+ opts.http1_allow_obsolete_multiline_headers_in_responses = enabled != 0;
hyper_code::HYPERE_OK
}
}
|
`ffi::client` is also effected.
How would you like me to handle that?
Ohhh, I had forgotten about that. I guess a pared-down enum could be used in `ffi::client`. I can do that if it seems hairy, let me know.
|
2022-09-20T00:06:01Z
| 2,987
|
Remove the client::conn combined-version types
The public client API for 1.0 will be the version-specific types at `client::conn::{http1, http2}`. The combined version may exist in `hyper-util`, but at least it should be removed from hyper proper. Doing so isn't particularly complex, but it will mean fixing up all the tests.
- Delete `hyper::client::conn::{SendRequest, Connection}` types.
- Update `examples/*` to use the `hyper::client::conn::http1` types (or `http2` if appropriate).
- Update `tests/*` to use `conn::http1` in most places, and `conn::http2` when specifically testing for HTTP/2.
|
hyperium__hyper-2987
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -232,19 +232,17 @@ macro_rules! test {
// Wrapper around hyper::client::conn::Builder with set_host field to mimic
// hyper::client::Builder.
struct Builder {
- inner: hyper::client::conn::Builder,
+ inner: hyper::client::conn::http1::Builder,
set_host: bool,
http09_responses: bool,
- http2_only: bool,
}
impl Builder {
fn new() -> Self {
Self {
- inner: hyper::client::conn::Builder::new(),
+ inner: hyper::client::conn::http1::Builder::new(),
set_host: true,
http09_responses: false,
- http2_only: false,
}
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -260,17 +258,10 @@ macro_rules! test {
self.inner.http09_responses(val);
self
}
-
- #[allow(unused)]
- fn http2_only(&mut self, val: bool) -> &mut Self {
- self.http2_only = val;
- self.inner.http2_only(val);
- self
- }
}
impl std::ops::Deref for Builder {
- type Target = hyper::client::conn::Builder;
+ type Target = hyper::client::conn::http1::Builder;
fn deref(&self) -> &Self::Target {
&self.inner
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -292,7 +283,7 @@ macro_rules! test {
return Err(Error::UnsupportedVersion);
}
- if req.version() == Version::HTTP_2 && !builder.http2_only {
+ if req.version() == Version::HTTP_2 {
return Err(Error::UnsupportedVersion);
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1376,7 +1367,7 @@ mod conn {
let client = async move {
let tcp = tcp_connect(&addr).await.expect("connect");
- let (mut client, conn) = conn::handshake(tcp).await.expect("handshake");
+ let (mut client, conn) = conn::http1::handshake(tcp).await.expect("handshake");
tokio::task::spawn(async move {
conn.await.expect("http conn");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1420,7 +1411,7 @@ mod conn {
let client = async move {
let tcp = tcp_connect(&addr).await.expect("connect");
- let (mut client, conn) = conn::handshake(tcp).await.expect("handshake");
+ let (mut client, conn) = conn::http1::handshake(tcp).await.expect("handshake");
tokio::task::spawn(async move {
conn.await.expect("http conn");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1478,7 +1469,7 @@ mod conn {
let tcp = rt.block_on(tcp_connect(&addr)).unwrap();
- let (mut client, conn) = rt.block_on(conn::handshake(tcp)).unwrap();
+ let (mut client, conn) = rt.block_on(conn::http1::handshake(tcp)).unwrap();
rt.spawn(conn.map_err(|e| panic!("conn error: {}", e)).map(|_| ()));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1524,7 +1515,7 @@ mod conn {
let tcp = rt.block_on(tcp_connect(&addr)).unwrap();
- let (mut client, conn) = rt.block_on(conn::handshake(tcp)).unwrap();
+ let (mut client, conn) = rt.block_on(conn::http1::handshake(tcp)).unwrap();
rt.spawn(conn.map_err(|e| panic!("conn error: {}", e)).map(|_| ()));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1581,7 +1572,7 @@ mod conn {
let tcp = rt.block_on(tcp_connect(&addr)).unwrap();
- let (mut client, conn) = rt.block_on(conn::handshake(tcp)).unwrap();
+ let (mut client, conn) = rt.block_on(conn::http1::handshake(tcp)).unwrap();
rt.spawn(conn.map_err(|e| panic!("conn error: {}", e)).map(|_| ()));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1626,7 +1617,7 @@ mod conn {
let tcp = rt.block_on(tcp_connect(&addr)).unwrap();
- let (mut client, conn) = rt.block_on(conn::handshake(tcp)).unwrap();
+ let (mut client, conn) = rt.block_on(conn::http1::handshake(tcp)).unwrap();
rt.spawn(conn.map_err(|e| panic!("conn error: {}", e)).map(|_| ()));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1668,7 +1659,7 @@ mod conn {
let tcp = rt.block_on(tcp_connect(&addr)).unwrap();
- let (mut client, conn) = rt.block_on(conn::handshake(tcp)).unwrap();
+ let (mut client, conn) = rt.block_on(conn::http1::handshake(tcp)).unwrap();
rt.spawn(conn.map_err(|e| panic!("conn error: {}", e)).map(|_| ()));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1738,7 +1729,7 @@ mod conn {
shutdown_called: false,
};
- let (mut client, mut conn) = rt.block_on(conn::handshake(io)).unwrap();
+ let (mut client, mut conn) = rt.block_on(conn::http1::handshake(io)).unwrap();
{
let until_upgrade = poll_fn(|ctx| conn.poll_without_shutdown(ctx));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1824,7 +1815,7 @@ mod conn {
shutdown_called: false,
};
- let (mut client, mut conn) = rt.block_on(conn::handshake(io)).unwrap();
+ let (mut client, mut conn) = rt.block_on(conn::http1::handshake(io)).unwrap();
{
let until_tunneled = poll_fn(|ctx| conn.poll_without_shutdown(ctx));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1922,9 +1913,8 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::Builder::new()
+ let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
- .http2_only(true)
.handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1985,10 +1975,9 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (_client, conn) = conn::Builder::new()
+ let (_client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
// enable while idle since we aren't sending requests
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2020,10 +2009,9 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::Builder::new()
+ let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
.handshake::<_, Recv>(io)
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2058,10 +2046,9 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::Builder::new()
+ let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
.handshake(io)
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2126,10 +2113,9 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::Builder::new()
+ let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
.timer(TokioTimer)
- .http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
.handshake(io)
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2188,9 +2174,8 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::Builder::new()
+ let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
- .http2_only(true)
.handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2245,9 +2230,8 @@ mod conn {
});
let io = tcp_connect(&addr).await.expect("tcp connect");
- let (mut client, conn) = conn::Builder::new()
+ let (mut client, conn) = conn::http2::Builder::new()
.executor(TokioExecutor)
- .http2_only(true)
.handshake::<_, Empty<Bytes>>(io)
.await
.expect("http handshake");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2457,9 +2457,8 @@ async fn http2_keep_alive_with_responsive_client() {
});
let tcp = connect_async(addr).await;
- let (mut client, conn) = hyper::client::conn::Builder::new()
+ let (mut client, conn) = hyper::client::conn::http2::Builder::new()
.executor(TokioExecutor)
- .http2_only(true)
.handshake(tcp)
.await
.expect("http handshake");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3077,20 +3076,32 @@ impl TestClient {
let host = req.uri().host().expect("uri has no host");
let port = req.uri().port_u16().expect("uri has no port");
- let mut builder = hyper::client::conn::Builder::new();
- builder.http2_only(self.http2_only);
- builder.executor(TokioExecutor);
-
let stream = TkTcpStream::connect(format!("{}:{}", host, port))
.await
.unwrap();
- let (mut sender, conn) = builder.handshake(stream).await.unwrap();
+ if self.http2_only {
+ let (mut sender, conn) = hyper::client::conn::http2::Builder::new()
+ .executor(TokioExecutor)
+ .handshake(stream)
+ .await
+ .unwrap();
+ tokio::task::spawn(async move {
+ conn.await.unwrap();
+ });
- tokio::task::spawn(async move {
- conn.await.unwrap();
- });
+ sender.send_request(req).await
+ } else {
+ let (mut sender, conn) = hyper::client::conn::http1::Builder::new()
+ .executor(TokioExecutor)
+ .handshake(stream)
+ .await
+ .unwrap();
+ tokio::task::spawn(async move {
+ conn.await.unwrap();
+ });
- sender.send_request(req).await
+ sender.send_request(req).await
+ }
}
}
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -8,7 +8,6 @@ use std::sync::{
use bytes::Bytes;
use http_body_util::Full;
-use hyper::client::conn::Builder;
use hyper::server::conn::Http;
use tokio::net::{TcpListener, TcpStream};
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -421,20 +420,32 @@ async fn async_test(cfg: __TestConfig) {
async move {
let stream = TcpStream::connect(addr).await.unwrap();
- let (mut sender, conn) = hyper::client::conn::Builder::new()
- .executor(TokioExecutor)
- .http2_only(http2_only)
- .handshake(stream)
- .await
- .unwrap();
+ let res = if http2_only {
+ let (mut sender, conn) = hyper::client::conn::http2::Builder::new()
+ .executor(TokioExecutor)
+ .handshake(stream)
+ .await
+ .unwrap();
- tokio::task::spawn(async move {
- if let Err(err) = conn.await {
- panic!("{:?}", err);
- }
- });
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ panic!("{:?}", err);
+ }
+ });
+ sender.send_request(req).await.unwrap()
+ } else {
+ let (mut sender, conn) = hyper::client::conn::http1::Builder::new()
+ .handshake(stream)
+ .await
+ .unwrap();
- let res = sender.send_request(req).await.unwrap();
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ panic!("{:?}", err);
+ }
+ });
+ sender.send_request(req).await.unwrap()
+ };
assert_eq!(res.status(), cstatus, "server status");
assert_eq!(res.version(), version, "server version");
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -508,19 +519,32 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
.await
.unwrap();
- let mut builder = Builder::new();
- builder.http2_only(http2_only);
- builder.executor(TokioExecutor);
-
- let (mut sender, conn) = builder.handshake(stream).await.unwrap();
-
- tokio::task::spawn(async move {
- if let Err(err) = conn.await {
- panic!("{:?}", err);
- }
- });
-
- let resp = sender.send_request(req).await?;
+ let resp = if http2_only {
+ let (mut sender, conn) = hyper::client::conn::http2::Builder::new()
+ .executor(TokioExecutor)
+ .handshake(stream)
+ .await
+ .unwrap();
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ panic!("{:?}", err);
+ }
+ });
+
+ sender.send_request(req).await?
+ } else {
+ let builder = hyper::client::conn::http1::Builder::new();
+ let (mut sender, conn) = builder.handshake(stream).await.unwrap();
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ panic!("{:?}", err);
+ }
+ });
+
+ sender.send_request(req).await?
+ };
let (mut parts, body) = resp.into_parts();
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2857"
] |
0.3
|
fee7d361c28c7eb42ef6bbfae0db14028d24bfee
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -54,6 +54,7 @@ serde_json = "1.0"
tokio = { version = "1", features = [
"fs",
"macros",
+ "net",
"io-std",
"io-util",
"rt",
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -79,7 +80,6 @@ full = [
"http1",
"http2",
"server",
- "runtime",
]
# HTTP versions
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -90,12 +90,6 @@ http2 = ["h2"]
client = []
server = []
-# Tokio support
-runtime = [
- "tokio/net",
- "tokio/rt",
-]
-
# C-API support (currently unstable (no semver))
ffi = ["libc"]
diff --git a/benches/support/tokiort.rs b/benches/support/tokiort.rs
--- a/benches/support/tokiort.rs
+++ b/benches/support/tokiort.rs
@@ -9,9 +9,20 @@ use std::{
use futures_util::Future;
use hyper::rt::{Sleep, Timer};
+#[derive(Clone)]
/// An Executor that uses the tokio runtime.
pub struct TokioExecutor;
+impl<F> hyper::rt::Executor<F> for TokioExecutor
+where
+ F: std::future::Future + Send + 'static,
+ F::Output: Send + 'static,
+{
+ fn execute(&self, fut: F) {
+ tokio::task::spawn(fut);
+ }
+}
+
/// A Timer that uses the tokio runtime.
#[derive(Clone, Debug)]
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -4,15 +4,13 @@ use std::error::Error as StdError;
use std::fmt;
use std::marker::PhantomData;
use std::sync::Arc;
-#[cfg(feature = "runtime")]
use std::time::Duration;
use http::{Request, Response};
use tokio::io::{AsyncRead, AsyncWrite};
-use crate::Recv;
-use crate::body::Body;
use super::super::dispatch;
+use crate::body::Body;
use crate::common::time::Time;
use crate::common::{
exec::{BoxSendFuture, Exec},
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -20,6 +18,7 @@ use crate::common::{
};
use crate::proto;
use crate::rt::{Executor, Timer};
+use crate::Recv;
/// The sender side of an established connection.
pub struct SendRequest<B> {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -309,11 +308,6 @@ impl Builder {
/// Pass `None` to disable HTTP2 keep-alive.
///
/// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_interval(
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -330,11 +324,6 @@ impl Builder {
/// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
///
/// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -350,11 +339,6 @@ impl Builder {
/// disabled.
///
/// Default is `false`.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -416,9 +400,12 @@ impl Builder {
let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec, opts.timer)
.await?;
Ok((
- SendRequest { dispatch: tx.unbound() },
- //SendRequest { dispatch: tx },
- Connection { inner: (PhantomData, h2) },
+ SendRequest {
+ dispatch: tx.unbound(),
+ },
+ Connection {
+ inner: (PhantomData, h2),
+ },
))
}
}
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -11,7 +11,7 @@
//! ## Example
//! A simple example that uses the `SendRequest` struct to talk HTTP over a Tokio TCP stream
//! ```no_run
-//! # #[cfg(all(feature = "client", feature = "http1", feature = "runtime"))]
+//! # #[cfg(all(feature = "client", feature = "http1"))]
//! # mod rt {
//! use bytes::Bytes;
//! use http::{Request, StatusCode};
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -57,7 +57,7 @@ use std::fmt;
#[cfg(not(all(feature = "http1", feature = "http2")))]
use std::marker::PhantomData;
use std::sync::Arc;
-#[cfg(all(feature = "runtime", feature = "http2"))]
+#[cfg(feature = "http2")]
use std::time::Duration;
use bytes::Bytes;
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -79,8 +79,8 @@ use crate::proto;
use crate::rt::Executor;
#[cfg(feature = "http1")]
use crate::upgrade::Upgraded;
-use crate::{Recv, Request, Response};
use crate::{common::time::Time, rt::Timer};
+use crate::{Recv, Request, Response};
#[cfg(feature = "http1")]
pub mod http1;
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -121,9 +121,7 @@ pin_project! {
///
/// This is a shortcut for `Builder::new().handshake(io)`.
/// See [`client::conn`](crate::client::conn) for more.
-pub async fn handshake<T, B>(
- io: T,
-) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
+pub async fn handshake<T, B>(io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Body + 'static,
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -702,11 +700,6 @@ impl Builder {
/// Pass `None` to disable HTTP2 keep-alive.
///
/// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_interval(
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -723,11 +716,6 @@ impl Builder {
/// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
///
/// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -743,11 +731,6 @@ impl Builder {
/// disabled.
///
/// Default is `false`.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -16,8 +16,9 @@ pub trait ConnStreamExec<F, B: Body>: Clone {
pub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
-// Either the user provides an executor for background tasks, or we use
-// `tokio::spawn`.
+// Either the user provides an executor for background tasks, or we panic.
+// TODO: with the `runtime`feature, `Exec::Default` used `tokio::spawn`. With the
+// removal of the opt-in default runtime, this should be refactored.
#[derive(Clone)]
pub enum Exec {
Default,
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -33,15 +34,7 @@ impl Exec {
{
match *self {
Exec::Default => {
- #[cfg(feature = "runtime")]
- {
- tokio::task::spawn(fut);
- }
-
- #[cfg(not(feature = "runtime"))]
- {
- panic!("executor must be set")
- }
+ panic!("executor must be set");
}
Exec::Executor(ref e) => {
e.execute(Box::pin(fut));
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -19,7 +19,7 @@ pub(crate) mod task;
pub(crate) mod time;
pub(crate) mod watch;
-#[cfg(any(feature = "http1", feature = "http2", feature = "runtime"))]
+#[cfg(any(feature = "http1", feature = "http2"))]
pub(crate) use self::never::Never;
pub(crate) use self::task::Poll;
diff --git a/src/common/time.rs b/src/common/time.rs
--- a/src/common/time.rs
+++ b/src/common/time.rs
@@ -1,11 +1,9 @@
use std::{fmt, sync::Arc};
-#[cfg(all(feature = "server", feature = "runtime"))]
use std::{
pin::Pin,
time::{Duration, Instant},
};
-#[cfg(all(feature = "server", feature = "runtime"))]
use crate::rt::Sleep;
use crate::rt::Timer;
diff --git a/src/common/time.rs b/src/common/time.rs
--- a/src/common/time.rs
+++ b/src/common/time.rs
@@ -56,7 +54,6 @@ impl<F> Future for HyperTimeout<F> where F: Future {
}
*/
-#[cfg(all(feature = "server", feature = "runtime"))]
impl Time {
pub(crate) fn sleep(&self, duration: Duration) -> Box<dyn Sleep + Unpin> {
match *self {
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -38,7 +38,7 @@ pub(super) enum Kind {
#[cfg(all(feature = "tcp", feature = "server"))]
Listen,
/// User took too long to send headers
- #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
+ #[cfg(all(feature = "http1", feature = "server"))]
HeaderTimeout,
/// Error while reading a body from connection.
#[cfg(any(feature = "http1", feature = "http2"))]
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -278,7 +278,7 @@ impl Error {
Error::new_user(User::UnexpectedHeader)
}
- #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
+ #[cfg(all(feature = "http1", feature = "server"))]
pub(super) fn new_header_timeout() -> Error {
Error::new(Kind::HeaderTimeout)
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -370,7 +370,7 @@ impl Error {
Kind::Canceled => "operation was canceled",
#[cfg(all(feature = "server", feature = "tcp"))]
Kind::Listen => "error creating server listener",
- #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
+ #[cfg(all(feature = "http1", feature = "server"))]
Kind::HeaderTimeout => "read header from client timeout",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::Body => "error reading a body from connection",
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -49,8 +49,6 @@
//! - `http2`: Enables HTTP/2 support.
//! - `client`: Enables the HTTP `client`.
//! - `server`: Enables the HTTP `server`.
-//! - `runtime`: Enables convenient integration with `tokio`, providing
-//! connectors and acceptors for TCP, and a default executor.
//!
//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1,7 +1,7 @@
use std::fmt;
use std::io;
use std::marker::PhantomData;
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use std::time::Duration;
use bytes::{Buf, Bytes};
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -14,12 +14,12 @@ use tracing::{debug, error, trace};
use super::io::Buffered;
use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants};
use crate::body::DecodedLength;
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use crate::common::time::Time;
use crate::common::{task, Pin, Poll, Unpin};
use crate::headers::connection_keep_alive;
use crate::proto::{BodyLength, MessageHead};
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use crate::rt::Sleep;
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -53,13 +53,13 @@ where
keep_alive: KA::Busy,
method: None,
h1_parser_config: ParserConfig::default(),
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout: None,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_fut: None,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_running: false,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -82,7 +82,7 @@ where
}
}
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
pub(crate) fn set_timer(&mut self, timer: Time) {
self.state.timer = timer;
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -132,7 +132,7 @@ where
self.state.h09_responses = true;
}
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
pub(crate) fn set_http1_header_read_timeout(&mut self, val: Duration) {
self.state.h1_header_read_timeout = Some(val);
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -205,13 +205,13 @@ where
cached_headers: &mut self.state.cached_headers,
req_method: &mut self.state.method,
h1_parser_config: self.state.h1_parser_config.clone(),
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout: self.state.h1_header_read_timeout,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
timer: self.state.timer.clone(),
preserve_header_case: self.state.preserve_header_case,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -810,13 +810,13 @@ struct State {
/// a body or not.
method: Option<Method>,
h1_parser_config: ParserConfig,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout: Option<Duration>,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_fut: Option<Pin<Box<dyn Sleep>>>,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_running: bool,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
timer: Time,
preserve_header_case: bool,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -95,7 +95,10 @@ impl Decoder {
// methods
pub(crate) fn is_eof(&self) -> bool {
- matches!(self.kind, Length(0) | Chunked(ChunkedState::End, _) | Eof(true))
+ matches!(
+ self.kind,
+ Length(0) | Chunked(ChunkedState::End, _) | Eof(true)
+ )
}
pub(crate) fn decode<R: MemRead>(
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -6,7 +6,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace};
use super::{Http1Transaction, Wants};
-use crate::body::{Recv, DecodedLength, Body};
+use crate::body::{Body, DecodedLength, Recv};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead};
use crate::upgrade::OnUpgrade;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -1,6 +1,6 @@
use std::cmp;
use std::fmt;
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use std::future::Future;
use std::io::{self, IoSlice};
use std::marker::Unpin;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -183,13 +183,13 @@ where
cached_headers: parse_ctx.cached_headers,
req_method: parse_ctx.req_method,
h1_parser_config: parse_ctx.h1_parser_config.clone(),
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout: parse_ctx.h1_header_read_timeout,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
timer: parse_ctx.timer.clone(),
preserve_header_case: parse_ctx.preserve_header_case,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -204,7 +204,7 @@ where
Some(msg) => {
debug!("parsed {} headers", msg.head.headers.len());
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
{
*parse_ctx.h1_header_read_timeout_running = false;
parse_ctx.h1_header_read_timeout_fut.take();
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -218,7 +218,7 @@ where
return Poll::Ready(Err(crate::Error::new_too_large()));
}
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
if *parse_ctx.h1_header_read_timeout_running {
if let Some(h1_header_read_timeout_fut) =
parse_ctx.h1_header_read_timeout_fut
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -1,4 +1,4 @@
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use std::{pin::Pin, time::Duration};
use bytes::BytesMut;
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -6,10 +6,10 @@ use http::{HeaderMap, Method};
use httparse::ParserConfig;
use crate::body::DecodedLength;
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use crate::common::time::Time;
use crate::proto::{BodyLength, MessageHead};
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use crate::rt::Sleep;
pub(crate) use self::conn::Conn;
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -78,13 +78,13 @@ pub(crate) struct ParseContext<'a> {
cached_headers: &'a mut Option<HeaderMap>,
req_method: &'a mut Option<Method>,
h1_parser_config: ParserConfig,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout: Option<Duration>,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_fut: &'a mut Option<Pin<Box<dyn Sleep>>>,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
h1_header_read_timeout_running: &'a mut bool,
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
timer: Time,
preserve_header_case: bool,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1,6 +1,6 @@
use std::fmt::{self, Write};
use std::mem::MaybeUninit;
-#[cfg(all(feature = "server", feature = "runtime"))]
+#[cfg(feature = "server")]
use std::time::Instant;
use bytes::Bytes;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -75,7 +75,7 @@ where
let span = trace_span!("parse_headers");
let _s = span.enter();
- #[cfg(all(feature = "server", feature = "runtime"))]
+ #[cfg(feature = "server")]
if !*ctx.h1_header_read_timeout_running {
if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout {
let deadline = Instant::now() + h1_header_read_timeout;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -1,5 +1,4 @@
use std::error::Error as StdError;
-#[cfg(feature = "runtime")]
use std::time::Duration;
use bytes::Bytes;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -46,11 +45,8 @@ pub(crate) struct Config {
pub(crate) initial_conn_window_size: u32,
pub(crate) initial_stream_window_size: u32,
pub(crate) max_frame_size: u32,
- #[cfg(feature = "runtime")]
pub(crate) keep_alive_interval: Option<Duration>,
- #[cfg(feature = "runtime")]
pub(crate) keep_alive_timeout: Duration,
- #[cfg(feature = "runtime")]
pub(crate) keep_alive_while_idle: bool,
pub(crate) max_concurrent_reset_streams: Option<usize>,
pub(crate) max_send_buffer_size: usize,
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -63,11 +59,8 @@ impl Default for Config {
initial_conn_window_size: DEFAULT_CONN_WINDOW,
initial_stream_window_size: DEFAULT_STREAM_WINDOW,
max_frame_size: DEFAULT_MAX_FRAME_SIZE,
- #[cfg(feature = "runtime")]
keep_alive_interval: None,
- #[cfg(feature = "runtime")]
keep_alive_timeout: Duration::from_secs(20),
- #[cfg(feature = "runtime")]
keep_alive_while_idle: false,
max_concurrent_reset_streams: None,
max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -96,11 +89,8 @@ fn new_ping_config(config: &Config) -> ping::Config {
} else {
None
},
- #[cfg(feature = "runtime")]
keep_alive_interval: config.keep_alive_interval,
- #[cfg(feature = "runtime")]
keep_alive_timeout: config.keep_alive_timeout,
- #[cfg(feature = "runtime")]
keep_alive_while_idle: config.keep_alive_while_idle,
}
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -147,7 +137,6 @@ where
conn.set_target_window_size(wnd);
conn.set_initial_window_size(wnd)?;
}
- #[cfg(feature = "runtime")]
Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
debug!("connection keep-alive timed out");
return Poll::Ready(Ok(()));
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -18,24 +18,17 @@
/// 3b. Merge RTT with a running average.
/// 3c. Calculate bdp as bytes/rtt.
/// 3d. If bdp is over 2/3 max, set new max to bdp and update windows.
-
-#[cfg(feature = "runtime")]
use std::fmt;
-#[cfg(feature = "runtime")]
use std::future::Future;
-#[cfg(feature = "runtime")]
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use std::time::{Duration, Instant};
-
use h2::{Ping, PingPong};
use tracing::{debug, trace};
-#[cfg_attr(not(feature = "runtime"), allow(unused))]
use crate::common::time::Time;
-#[cfg_attr(not(feature = "runtime"), allow(unused))]
use crate::rt::Sleep;
type WindowSize = u32;
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -64,7 +57,6 @@ pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Re
(None, None)
};
- #[cfg(feature = "runtime")]
let keep_alive = config.keep_alive_interval.map(|interval| KeepAlive {
interval,
timeout: config.keep_alive_timeout,
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -74,14 +66,11 @@ pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Re
timer: __timer,
});
- #[cfg(feature = "runtime")]
let last_read_at = keep_alive.as_ref().map(|_| Instant::now());
let shared = Arc::new(Mutex::new(Shared {
bytes,
- #[cfg(feature = "runtime")]
last_read_at,
- #[cfg(feature = "runtime")]
is_keep_alive_timed_out: false,
ping_pong,
ping_sent_at: None,
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -94,7 +83,6 @@ pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Re
},
Ponger {
bdp,
- #[cfg(feature = "runtime")]
keep_alive,
shared,
},
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -105,14 +93,11 @@ pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Re
pub(super) struct Config {
pub(super) bdp_initial_window: Option<WindowSize>,
/// If no frames are received in this amount of time, a PING frame is sent.
- #[cfg(feature = "runtime")]
pub(super) keep_alive_interval: Option<Duration>,
/// After sending a keepalive PING, the connection will be closed if
/// a pong is not received in this amount of time.
- #[cfg(feature = "runtime")]
pub(super) keep_alive_timeout: Duration,
/// If true, sends pings even when there are no active streams.
- #[cfg(feature = "runtime")]
pub(super) keep_alive_while_idle: bool,
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -123,7 +108,6 @@ pub(crate) struct Recorder {
pub(super) struct Ponger {
bdp: Option<Bdp>,
- #[cfg(feature = "runtime")]
keep_alive: Option<KeepAlive>,
shared: Arc<Mutex<Shared>>,
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -143,10 +127,8 @@ struct Shared {
// keep-alive
/// If `Some`, keep-alive is enabled, and the Instant is how long ago
/// the connection read the last frame.
- #[cfg(feature = "runtime")]
last_read_at: Option<Instant>,
- #[cfg(feature = "runtime")]
is_keep_alive_timed_out: bool,
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -165,7 +147,6 @@ struct Bdp {
stable_count: u32,
}
-#[cfg(feature = "runtime")]
struct KeepAlive {
/// If no frames are received in this amount of time, a PING frame is sent.
interval: Duration,
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -174,13 +155,11 @@ struct KeepAlive {
timeout: Duration,
/// If true, sends pings even when there are no active streams.
while_idle: bool,
-
state: KeepAliveState,
sleep: Pin<Box<dyn Sleep>>,
timer: Time,
}
-#[cfg(feature = "runtime")]
enum KeepAliveState {
Init,
Scheduled(Instant),
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -189,11 +168,9 @@ enum KeepAliveState {
pub(super) enum Ponged {
SizeUpdate(WindowSize),
- #[cfg(feature = "runtime")]
KeepAliveTimedOut,
}
-#[cfg(feature = "runtime")]
#[derive(Debug)]
pub(super) struct KeepAliveTimedOut;
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -201,15 +178,7 @@ pub(super) struct KeepAliveTimedOut;
impl Config {
pub(super) fn is_enabled(&self) -> bool {
- #[cfg(feature = "runtime")]
- {
- self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some()
- }
-
- #[cfg(not(feature = "runtime"))]
- {
- self.bdp_initial_window.is_some()
- }
+ self.bdp_initial_window.is_some() || self.keep_alive_interval.is_some()
}
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -225,7 +194,6 @@ impl Recorder {
let mut locked = shared.lock().unwrap();
- #[cfg(feature = "runtime")]
locked.update_last_read_at();
// are we ready to send another bdp ping?
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -252,18 +220,15 @@ impl Recorder {
}
pub(crate) fn record_non_data(&self) {
- #[cfg(feature = "runtime")]
- {
- let shared = if let Some(ref shared) = self.shared {
- shared
- } else {
- return;
- };
+ let shared = if let Some(ref shared) = self.shared {
+ shared
+ } else {
+ return;
+ };
- let mut locked = shared.lock().unwrap();
+ let mut locked = shared.lock().unwrap();
- locked.update_last_read_at();
- }
+ locked.update_last_read_at();
}
/// If the incoming stream is already closed, convert self into
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -278,13 +243,10 @@ impl Recorder {
}
pub(super) fn ensure_not_timed_out(&self) -> crate::Result<()> {
- #[cfg(feature = "runtime")]
- {
- if let Some(ref shared) = self.shared {
- let locked = shared.lock().unwrap();
- if locked.is_keep_alive_timed_out {
- return Err(KeepAliveTimedOut.crate_error());
- }
+ if let Some(ref shared) = self.shared {
+ let locked = shared.lock().unwrap();
+ if locked.is_keep_alive_timed_out {
+ return Err(KeepAliveTimedOut.crate_error());
}
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -299,15 +261,11 @@ impl Ponger {
pub(super) fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll<Ponged> {
let now = Instant::now();
let mut locked = self.shared.lock().unwrap();
- #[cfg(feature = "runtime")]
let is_idle = self.is_idle();
- #[cfg(feature = "runtime")]
- {
- if let Some(ref mut ka) = self.keep_alive {
- ka.maybe_schedule(is_idle, &locked);
- ka.maybe_ping(cx, &mut locked);
- }
+ if let Some(ref mut ka) = self.keep_alive {
+ ka.maybe_schedule(is_idle, &locked);
+ ka.maybe_ping(cx, &mut locked);
}
if !locked.is_ping_sent() {
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -324,13 +282,10 @@ impl Ponger {
let rtt = now - start;
trace!("recv pong");
- #[cfg(feature = "runtime")]
- {
- if let Some(ref mut ka) = self.keep_alive {
- locked.update_last_read_at();
- ka.maybe_schedule(is_idle, &locked);
- ka.maybe_ping(cx, &mut locked);
- }
+ if let Some(ref mut ka) = self.keep_alive {
+ locked.update_last_read_at();
+ ka.maybe_schedule(is_idle, &locked);
+ ka.maybe_ping(cx, &mut locked);
}
if let Some(ref mut bdp) = self.bdp {
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -349,14 +304,11 @@ impl Ponger {
debug!("pong error: {}", e);
}
Poll::Pending => {
- #[cfg(feature = "runtime")]
- {
- if let Some(ref mut ka) = self.keep_alive {
- if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) {
- self.keep_alive = None;
- locked.is_keep_alive_timed_out = true;
- return Poll::Ready(Ponged::KeepAliveTimedOut);
- }
+ if let Some(ref mut ka) = self.keep_alive {
+ if let Err(KeepAliveTimedOut) = ka.maybe_timeout(cx) {
+ self.keep_alive = None;
+ locked.is_keep_alive_timed_out = true;
+ return Poll::Ready(Ponged::KeepAliveTimedOut);
}
}
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -366,7 +318,6 @@ impl Ponger {
Poll::Pending
}
- #[cfg(feature = "runtime")]
fn is_idle(&self) -> bool {
Arc::strong_count(&self.shared) <= 2
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -391,14 +342,12 @@ impl Shared {
self.ping_sent_at.is_some()
}
- #[cfg(feature = "runtime")]
fn update_last_read_at(&mut self) {
if self.last_read_at.is_some() {
self.last_read_at = Some(Instant::now());
}
}
- #[cfg(feature = "runtime")]
fn last_read_at(&self) -> Instant {
self.last_read_at.expect("keep_alive expects last_read_at")
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -474,7 +423,6 @@ fn seconds(dur: Duration) -> f64 {
// ===== impl KeepAlive =====
-#[cfg(feature = "runtime")]
impl KeepAlive {
fn maybe_schedule(&mut self, is_idle: bool, shared: &Shared) {
match self.state {
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -539,21 +487,18 @@ impl KeepAlive {
// ===== impl KeepAliveTimedOut =====
-#[cfg(feature = "runtime")]
impl KeepAliveTimedOut {
pub(super) fn crate_error(self) -> crate::Error {
crate::Error::new(crate::error::Kind::Http2).with(self)
}
}
-#[cfg(feature = "runtime")]
impl fmt::Display for KeepAliveTimedOut {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("keep-alive timed out")
}
}
-#[cfg(feature = "runtime")]
impl std::error::Error for KeepAliveTimedOut {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&crate::error::TimedOut)
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -1,6 +1,6 @@
use std::error::Error as StdError;
use std::marker::Unpin;
-#[cfg(feature = "runtime")]
+
use std::time::Duration;
use bytes::Bytes;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -47,9 +47,7 @@ pub(crate) struct Config {
pub(crate) max_frame_size: u32,
pub(crate) enable_connect_protocol: bool,
pub(crate) max_concurrent_streams: Option<u32>,
- #[cfg(feature = "runtime")]
pub(crate) keep_alive_interval: Option<Duration>,
- #[cfg(feature = "runtime")]
pub(crate) keep_alive_timeout: Duration,
pub(crate) max_send_buffer_size: usize,
pub(crate) max_header_list_size: u32,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -64,9 +62,7 @@ impl Default for Config {
max_frame_size: DEFAULT_MAX_FRAME_SIZE,
enable_connect_protocol: false,
max_concurrent_streams: None,
- #[cfg(feature = "runtime")]
keep_alive_interval: None,
- #[cfg(feature = "runtime")]
keep_alive_timeout: Duration::from_secs(20),
max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE,
max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -146,13 +142,10 @@ where
let ping_config = ping::Config {
bdp_initial_window: bdp,
- #[cfg(feature = "runtime")]
keep_alive_interval: config.keep_alive_interval,
- #[cfg(feature = "runtime")]
keep_alive_timeout: config.keep_alive_timeout,
// If keep-alive is enabled for servers, always enabled while
// idle, so it can more aggressively close dead connections.
- #[cfg(feature = "runtime")]
keep_alive_while_idle: true,
};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -208,11 +201,7 @@ where
let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?;
let ping = if ping_config.is_enabled() {
let pp = conn.ping_pong().expect("conn.ping_pong");
- Some(ping::channel(
- pp,
- ping_config.clone(),
- me.timer.clone(),
- ))
+ Some(ping::channel(pp, ping_config.clone(), me.timer.clone()))
} else {
None
};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -339,7 +328,6 @@ where
self.conn.set_target_window_size(wnd);
let _ = self.conn.set_initial_window_size(wnd);
}
- #[cfg(feature = "runtime")]
Poll::Ready(ping::Ponged::KeepAliveTimedOut) => {
debug!("keep-alive timed out, closing connection");
self.conn.abrupt_shutdown(h2::Reason::NO_ERROR);
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -8,7 +8,7 @@
//! ## Example
//! A simple example that uses the `Http` struct to talk HTTP over a Tokio TCP stream
//! ```no_run
-//! # #[cfg(all(feature = "http1", feature = "runtime"))]
+//! # #[cfg(feature = "http1")]
//! # mod rt {
//! use http::{Request, Response, StatusCode};
//! use http_body_util::Full;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -47,7 +47,7 @@
))]
use std::marker::PhantomData;
use std::sync::Arc;
-#[cfg(all(any(feature = "http1", feature = "http2"), feature = "runtime"))]
+#[cfg(any(feature = "http1", feature = "http2"))]
use std::time::Duration;
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -93,7 +93,7 @@ pub struct Http<E = Exec> {
h1_keep_alive: bool,
h1_title_case_headers: bool,
h1_preserve_header_case: bool,
- #[cfg(all(feature = "http1", feature = "runtime"))]
+ #[cfg(feature = "http1")]
h1_header_read_timeout: Option<Duration>,
h1_writev: Option<bool>,
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -233,7 +233,7 @@ impl Http {
h1_keep_alive: true,
h1_title_case_headers: false,
h1_preserve_header_case: false,
- #[cfg(all(feature = "http1", feature = "runtime"))]
+ #[cfg(feature = "http1")]
h1_header_read_timeout: None,
h1_writev: None,
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -326,8 +326,8 @@ impl<E> Http<E> {
/// transmit the entire header within this time, the connection is closed.
///
/// Default is None.
- #[cfg(all(feature = "http1", feature = "runtime"))]
- #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))]
+ #[cfg(feature = "http1")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
pub fn http1_header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self {
self.h1_header_read_timeout = Some(read_timeout);
self
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -460,8 +460,6 @@ impl<E> Http<E> {
///
/// # Cargo Feature
///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_interval(
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -481,8 +479,6 @@ impl<E> Http<E> {
///
/// # Cargo Feature
///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
#[cfg(feature = "http2")]
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -563,7 +559,7 @@ impl<E> Http<E> {
h1_keep_alive: self.h1_keep_alive,
h1_title_case_headers: self.h1_title_case_headers,
h1_preserve_header_case: self.h1_preserve_header_case,
- #[cfg(all(feature = "http1", feature = "runtime"))]
+ #[cfg(feature = "http1")]
h1_header_read_timeout: self.h1_header_read_timeout,
h1_writev: self.h1_writev,
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -586,7 +582,7 @@ impl<E> Http<E> {
h1_keep_alive: self.h1_keep_alive,
h1_title_case_headers: self.h1_title_case_headers,
h1_preserve_header_case: self.h1_preserve_header_case,
- #[cfg(all(feature = "http1", feature = "runtime"))]
+ #[cfg(feature = "http1")]
h1_header_read_timeout: self.h1_header_read_timeout,
h1_writev: self.h1_writev,
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -638,7 +634,6 @@ impl<E> Http<E> {
macro_rules! h1 {
() => {{
let mut conn = proto::Conn::new(io);
- #[cfg(feature = "runtime")]
{
conn.set_timer(self.timer.clone());
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -654,7 +649,7 @@ impl<E> Http<E> {
if self.h1_preserve_header_case {
conn.set_preserve_header_case();
}
- #[cfg(all(feature = "http1", feature = "runtime"))]
+ #[cfg(feature = "http1")]
if let Some(header_read_timeout) = self.h1_header_read_timeout {
conn.set_http1_header_read_timeout(header_read_timeout);
}
|
This is blocked on the timer related issues (like #2848) being done first.
With a `Timer` added, we can now delete all `cfg(feature = "runtime")` stuff from hyper.
|
2022-09-03T12:38:20Z
| 2,975
|
Remove the `runtime` cargo feature
With the removal of the [`tcp` feature](#2856) and the addition of the [`Timer`](https://github.com/hyperium/hyper/issues/2846) trait, we no longer have need of the `runtime` feature.
|
hyperium__hyper-2975
|
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -7,7 +7,7 @@
//! For a small example program simply fetching a URL, take a look at the
//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs).
-#[cfg(all(test, feature = "runtime"))]
+#[cfg(test)]
mod tests;
cfg_feature! {
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -113,6 +113,7 @@ mod tests {
use bytes::Bytes;
use tokio::io::AsyncReadExt;
+ #[cfg(not(miri))]
#[tokio::test]
async fn partial_rewind() {
let underlying = [104, 101, 108, 108, 111];
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -135,6 +136,7 @@ mod tests {
assert_eq!(&buf, &underlying);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn full_rewind() {
let underlying = [104, 101, 108, 108, 111];
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1038,7 +1038,7 @@ impl State {
#[cfg(test)]
mod tests {
- #[cfg(feature = "nightly")]
+ #[cfg(all(feature = "nightly", not(miri)))]
#[bench]
fn bench_read_head_short(b: &mut ::test::Bencher) {
use super::*;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1048,8 +1048,7 @@ mod tests {
// an empty IO, we'll be skipping and using the read buffer anyways
let io = tokio_test::io::Builder::new().build();
- let mut conn =
- Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io);
+ let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io);
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -471,6 +474,7 @@ mod tests {
use crate::mock::AsyncIo;
*/
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_chunk_size() {
use std::io::ErrorKind::{InvalidData, InvalidInput, UnexpectedEof};
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -553,6 +557,7 @@ mod tests {
read_err("f0000000000000003\r\n", InvalidData).await;
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_sized_early_eof() {
let mut bytes = &b"foo bar"[..];
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -562,6 +567,7 @@ mod tests {
assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_chunked_early_eof() {
let mut bytes = &b"\
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -574,6 +580,7 @@ mod tests {
assert_eq!(e.kind(), io::ErrorKind::UnexpectedEof);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_chunked_single_read() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n"[..];
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -586,6 +593,7 @@ mod tests {
assert_eq!("1234567890abcdef", &result);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_chunked_trailer_with_missing_lf() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..];
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -595,6 +603,7 @@ mod tests {
assert_eq!(e.kind(), io::ErrorKind::InvalidInput);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_chunked_after_eof() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -659,12 +668,14 @@ mod tests {
}
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_length_async() {
let content = "foobar";
all_async_cases(content, content, Decoder::length(content.len() as u64)).await;
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_chunked_async() {
let content = "3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n";
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -672,13 +683,14 @@ mod tests {
all_async_cases(content, expected, Decoder::chunked()).await;
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn test_read_eof_async() {
let content = "foobar";
all_async_cases(content, content, Decoder::eof()).await;
}
- #[cfg(feature = "nightly")]
+ #[cfg(all(feature = "nightly", not(miri)))]
#[bench]
fn bench_decode_chunked_1kb(b: &mut test::Bencher) {
let rt = new_runtime();
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -702,7 +714,7 @@ mod tests {
});
}
- #[cfg(feature = "nightly")]
+ #[cfg(all(feature = "nightly", not(miri)))]
#[bench]
fn bench_decode_length_1kb(b: &mut test::Bencher) {
let rt = new_runtime();
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -681,6 +681,7 @@ mod tests {
});
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn client_flushing_is_not_ready_for_next_request() {
let _ = pretty_env_logger::try_init();
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -704,10 +705,7 @@ mod tests {
body
};
- let req = crate::Request::builder()
- .method("POST")
- .body(body)
- .unwrap();
+ let req = crate::Request::builder().method("POST").body(body).unwrap();
let res = tx.try_send(req).unwrap().await.expect("response");
drop(res);
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -715,6 +713,7 @@ mod tests {
assert!(!tx.is_ready());
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn body_empty_chunks_ignored() {
let _ = pretty_env_logger::try_init();
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -705,6 +705,7 @@ mod tests {
// io_buf.flush().await.expect("should short-circuit flush");
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn parse_reads_until_blocked() {
use crate::proto::h1::ClientTransaction;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -727,13 +728,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -897,6 +894,7 @@ mod tests {
}
*/
+ #[cfg(not(miri))]
#[tokio::test]
async fn write_buf_flatten() {
let _ = pretty_env_logger::try_init();
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -950,6 +948,7 @@ mod tests {
assert_eq!(write_buf.headers.pos, 0);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn write_buf_queue_disable_auto() {
let _ = pretty_env_logger::try_init();
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1565,7 +1565,6 @@ fn extend(dst: &mut Vec<u8>, data: &[u8]) {
mod tests {
use bytes::BytesMut;
- #[cfg(feature = "runtime")]
use crate::common::time::Time;
use super::*;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1581,13 +1580,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut method,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1618,13 +1613,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1650,13 +1641,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1680,13 +1667,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1712,13 +1695,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1748,13 +1727,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config,
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1781,13 +1756,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1809,13 +1780,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: true,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1858,13 +1825,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1888,13 +1851,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2127,13 +2086,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2157,13 +2112,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(m),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2187,13 +2138,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2712,13 +2659,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2806,13 +2749,9 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2856,13 +2795,9 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "runtime")]
h1_header_read_timeout: None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_fut: &mut None,
- #[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
- #[cfg(feature = "runtime")]
timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1348,7 +1348,7 @@ mod conn {
use super::{concat, s, support, tcp_connect, FutureHyperExt};
- use support::TokioTimer;
+ use support::{TokioExecutor, TokioTimer};
#[tokio::test]
async fn get() {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1901,7 +1901,7 @@ mod conn {
let mut shdn_rx = shdn_rx.clone();
tokio::task::spawn(async move {
- let mut conn = Http::new().http2_only(true).serve_connection(stream, service);
+ let mut conn = Http::new().with_executor(TokioExecutor).http2_only(true).serve_connection(stream, service);
tokio::select! {
res = &mut conn => {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1923,6 +1923,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .executor(TokioExecutor)
.http2_only(true)
.handshake(io)
.await
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1985,6 +1986,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (_client, conn) = conn::Builder::new()
+ .executor(TokioExecutor)
.timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2019,6 +2021,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .executor(TokioExecutor)
.timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2056,6 +2059,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .executor(TokioExecutor)
.timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2103,6 +2107,7 @@ mod conn {
tokio::spawn(async move {
let sock = listener.accept().await.unwrap().0;
hyper::server::conn::Http::new()
+ .with_executor(TokioExecutor)
.with_timer(TokioTimer)
.http2_only(true)
.serve_connection(
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2122,6 +2127,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .executor(TokioExecutor)
.timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2183,6 +2189,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .executor(TokioExecutor)
.http2_only(true)
.handshake(io)
.await
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2239,6 +2246,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .executor(TokioExecutor)
.http2_only(true)
.handshake::<_, Empty<Bytes>>(io)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -22,7 +22,7 @@ use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full, StreamBody};
use hyper::rt::Timer;
-use support::TokioTimer;
+use support::{TokioExecutor, TokioTimer};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1820,6 +1820,7 @@ async fn h2_connect() {
let (socket, _) = listener.accept().await.unwrap();
Http::new()
+ .with_executor(TokioExecutor)
.http2_only(true)
.serve_connection(socket, svc)
.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1932,6 +1933,7 @@ async fn h2_connect_multiplex() {
let (socket, _) = listener.accept().await.unwrap();
Http::new()
+ .with_executor(TokioExecutor)
.http2_only(true)
.serve_connection(socket, svc)
.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2007,6 +2009,7 @@ async fn h2_connect_large_body() {
let (socket, _) = listener.accept().await.unwrap();
Http::new()
+ .with_executor(TokioExecutor)
.http2_only(true)
.serve_connection(socket, svc)
.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2079,6 +2082,7 @@ async fn h2_connect_empty_frames() {
let (socket, _) = listener.accept().await.unwrap();
Http::new()
+ .with_executor(TokioExecutor)
.http2_only(true)
.serve_connection(socket, svc)
.with_upgrades()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2442,6 +2446,7 @@ async fn http2_keep_alive_with_responsive_client() {
let (socket, _) = listener.accept().await.expect("accept");
Http::new()
+ .with_executor(TokioExecutor)
.with_timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2453,6 +2458,7 @@ async fn http2_keep_alive_with_responsive_client() {
let tcp = connect_async(addr).await;
let (mut client, conn) = hyper::client::conn::Builder::new()
+ .executor(TokioExecutor)
.http2_only(true)
.handshake(tcp)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2889,7 +2895,7 @@ impl ServeOptions {
let (stream, _) = res.unwrap();
tokio::task::spawn(async move {
- let mut http = Http::new();
+ let mut http = Http::new().with_executor(TokioExecutor);
#[cfg(feature = "http1")]
let http = http
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3073,6 +3079,7 @@ impl TestClient {
let mut builder = hyper::client::conn::Builder::new();
builder.http2_only(self.http2_only);
+ builder.executor(TokioExecutor);
let stream = TkTcpStream::connect(format!("{}:{}", host, port))
.await
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -22,7 +22,7 @@ pub use hyper::{HeaderMap, StatusCode};
pub use std::net::SocketAddr;
mod tokiort;
-pub use tokiort::TokioTimer;
+pub use tokiort::{TokioExecutor, TokioTimer};
#[allow(unused_macros)]
macro_rules! t {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -385,6 +385,7 @@ async fn async_test(cfg: __TestConfig) {
tokio::task::spawn(async move {
Http::new()
+ .with_executor(TokioExecutor)
.http2_only(http2_only)
.serve_connection(stream, service)
.await
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -421,6 +422,7 @@ async fn async_test(cfg: __TestConfig) {
let stream = TcpStream::connect(addr).await.unwrap();
let (mut sender, conn) = hyper::client::conn::Builder::new()
+ .executor(TokioExecutor)
.http2_only(http2_only)
.handshake(stream)
.await
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -508,6 +510,8 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
let mut builder = Builder::new();
builder.http2_only(http2_only);
+ builder.executor(TokioExecutor);
+
let (mut sender, conn) = builder.handshake(stream).await.unwrap();
tokio::task::spawn(async move {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -533,6 +537,7 @@ async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>)
});
Http::new()
+ .with_executor(TokioExecutor)
.http2_only(http2_only)
.serve_connection(stream, service)
.await
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2848"
] |
0.3
|
2988baa309f726a588d818e6708f8bd38f9d9ce9
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -96,7 +96,6 @@ server = []
runtime = [
"tokio/net",
"tokio/rt",
- "tokio/time",
]
# C-API support (currently unstable (no semver))
diff --git /dev/null b/benches/support/mod.rs
new file mode 100644
--- /dev/null
+++ b/benches/support/mod.rs
@@ -0,0 +1,3 @@
+
+mod tokiort;
+pub use tokiort::TokioTimer;
\ No newline at end of file
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -10,14 +10,14 @@ use tokio::io::{AsyncRead, AsyncWrite};
use crate::Recv;
use crate::body::Body;
+use super::super::dispatch;
use crate::common::{
exec::{BoxSendFuture, Exec},
task, Future, Pin, Poll,
};
-use crate::upgrade::Upgraded;
use crate::proto;
-use crate::rt::Executor;
-use super::super::dispatch;
+use crate::rt::{Executor};
+use crate::upgrade::Upgraded;
type Dispatcher<T, B> =
proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, proto::h1::ClientTransaction>;
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -120,7 +120,10 @@ where
/// before calling this method.
/// - Since absolute-form `Uri`s are not required, if received, they will
/// be serialized as-is.
- pub fn send_request(&mut self, req: Request<B>) -> impl Future<Output = crate::Result<Response<Recv>>> {
+ pub fn send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = crate::Result<Response<Recv>>> {
let sent = self.dispatch.send(req);
async move {
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -130,7 +133,7 @@ where
Ok(Err(err)) => Err(err),
// this is definite bug if it happens, but it shouldn't happen!
Err(_canceled) => panic!("dispatch dropped without returning error"),
- }
+ },
Err(_req) => {
tracing::debug!("connection was not ready");
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -476,4 +479,3 @@ impl Builder {
}
}
}
-
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -12,13 +12,14 @@ use tokio::io::{AsyncRead, AsyncWrite};
use crate::Recv;
use crate::body::Body;
+use super::super::dispatch;
+use crate::common::time::Time;
use crate::common::{
exec::{BoxSendFuture, Exec},
task, Future, Pin, Poll,
};
use crate::proto;
-use crate::rt::Executor;
-use super::super::dispatch;
+use crate::rt::{Executor, Timer};
/// The sender side of an established connection.
pub struct SendRequest<B> {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -44,6 +45,7 @@ where
#[derive(Clone, Debug)]
pub struct Builder {
pub(super) exec: Exec,
+ pub(super) timer: Time,
h2_builder: proto::h2::client::Config,
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -114,7 +116,10 @@ where
/// before calling this method.
/// - Since absolute-form `Uri`s are not required, if received, they will
/// be serialized as-is.
- pub fn send_request(&mut self, req: Request<B>) -> impl Future<Output = crate::Result<Response<Recv>>> {
+ pub fn send_request(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = crate::Result<Response<Recv>>> {
let sent = self.dispatch.send(req);
async move {
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -124,7 +129,7 @@ where
Ok(Err(err)) => Err(err),
// this is definite bug if it happens, but it shouldn't happen!
Err(_canceled) => panic!("dispatch dropped without returning error"),
- }
+ },
Err(_req) => {
tracing::debug!("connection was not ready");
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -207,6 +212,7 @@ impl Builder {
pub fn new() -> Builder {
Builder {
exec: Exec::Default,
+ timer: Time::Empty,
h2_builder: Default::default(),
}
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -220,6 +226,15 @@ impl Builder {
self
}
+ /// Provide a timer to execute background HTTP2 tasks.
+ pub fn timer<M>(&mut self, timer: M) -> &mut Builder
+ where
+ M: Timer + Send + Sync + 'static,
+ {
+ self.timer = Time::Timer(Arc::new(timer));
+ self
+ }
+
/// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
/// stream-level flow control.
///
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -398,14 +413,13 @@ impl Builder {
tracing::trace!("client handshake HTTP/1");
let (tx, rx) = dispatch::channel();
- let h2 =
- proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec)
- .await?;
+ let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec, opts.timer)
+ .await?;
Ok((
SendRequest { dispatch: tx.unbound() },
+ //SendRequest { dispatch: tx },
Connection { inner: (PhantomData, h2) },
))
}
}
}
-
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -85,6 +85,7 @@ use crate::rt::Executor;
#[cfg(feature = "http1")]
use crate::upgrade::Upgraded;
use crate::{Recv, Request, Response};
+use crate::{common::time::Time, rt::Timer};
#[cfg(feature = "http1")]
pub mod http1;
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -161,6 +162,7 @@ where
#[derive(Clone, Debug)]
pub struct Builder {
pub(super) exec: Exec,
+ pub(super) timer: Time,
h09_responses: bool,
h1_parser_config: ParserConfig,
h1_writev: Option<bool>,
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -418,6 +420,7 @@ impl Builder {
pub fn new() -> Builder {
Builder {
exec: Exec::Default,
+ timer: Time::Empty,
h09_responses: false,
h1_writev: None,
h1_read_buf_exact_size: None,
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -447,6 +450,15 @@ impl Builder {
self
}
+ /// Provide a timer to execute background HTTP2 tasks.
+ pub fn timer<M>(&mut self, timer: M) -> &mut Builder
+ where
+ M: Timer + Send + Sync + 'static,
+ {
+ self.timer = Time::Timer(Arc::new(timer));
+ self
+ }
+
/// Set whether HTTP/0.9 responses should be tolerated.
///
/// Default is false.
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -857,9 +869,14 @@ impl Builder {
}
#[cfg(feature = "http2")]
Proto::Http2 => {
- let h2 =
- proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone())
- .await?;
+ let h2 = proto::h2::client::handshake(
+ io,
+ rx,
+ &opts.h2_builder,
+ opts.exec.clone(),
+ opts.timer.clone(),
+ )
+ .await?;
ProtoClient::H2 { h2 }
}
};
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -15,6 +15,8 @@ pub(crate) mod exec;
pub(crate) mod io;
mod never;
pub(crate) mod task;
+#[cfg(any(feature = "http1", feature = "http2", feature = "server"))]
+pub(crate) mod time;
pub(crate) mod watch;
#[cfg(any(feature = "http1", feature = "http2", feature = "runtime"))]
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -26,3 +28,10 @@ cfg_proto! {
pub(crate) use std::marker::Unpin;
}
pub(crate) use std::{future::Future, pin::Pin};
+
+pub(crate) fn into_pin<T: ?Sized>(boxed: Box<T>) -> Pin<Box<T>> {
+ // It's not possible to move or replace the insides of a `Pin<Box<T>>`
+ // when `T: !Unpin`, so it's safe to pin it directly without any
+ // additional requirements.
+ unsafe { Pin::new_unchecked(boxed) }
+}
diff --git /dev/null b/src/common/time.rs
new file mode 100644
--- /dev/null
+++ b/src/common/time.rs
@@ -0,0 +1,87 @@
+use std::{fmt, sync::Arc};
+#[cfg(all(feature = "server", feature = "runtime"))]
+use std::{
+ pin::Pin,
+ time::{Duration, Instant},
+};
+
+#[cfg(all(feature = "server", feature = "runtime"))]
+use crate::rt::Sleep;
+use crate::rt::Timer;
+
+/// A user-provided timer to time background tasks.
+#[derive(Clone)]
+pub(crate) enum Time {
+ Timer(Arc<dyn Timer + Send + Sync>),
+ Empty,
+}
+
+impl fmt::Debug for Time {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Time").finish()
+ }
+}
+
+/*
+pub(crate) fn timeout<F>(tim: Tim, future: F, duration: Duration) -> HyperTimeout<F> {
+ HyperTimeout { sleep: tim.sleep(duration), future: future }
+}
+
+pin_project_lite::pin_project! {
+ pub(crate) struct HyperTimeout<F> {
+ sleep: Box<dyn Sleep>,
+ #[pin]
+ future: F
+ }
+}
+
+pub(crate) struct Timeout;
+
+impl<F> Future for HyperTimeout<F> where F: Future {
+
+ type Output = Result<F::Output, Timeout>;
+
+ fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output>{
+ let mut this = self.project();
+ if let Poll::Ready(v) = this.future.poll(ctx) {
+ return Poll::Ready(Ok(v));
+ }
+
+ if let Poll::Ready(_) = Pin::new(&mut this.sleep).poll(ctx) {
+ return Poll::Ready(Err(Timeout));
+ }
+
+ return Poll::Pending;
+ }
+}
+*/
+
+#[cfg(all(feature = "server", feature = "runtime"))]
+impl Time {
+ pub(crate) fn sleep(&self, duration: Duration) -> Box<dyn Sleep + Unpin> {
+ match *self {
+ Time::Empty => {
+ panic!("You must supply a timer.")
+ }
+ Time::Timer(ref t) => t.sleep(duration),
+ }
+ }
+
+ pub(crate) fn sleep_until(&self, deadline: Instant) -> Box<dyn Sleep + Unpin> {
+ match *self {
+ Time::Empty => {
+ panic!("You must supply a timer.")
+ }
+ Time::Timer(ref t) => t.sleep_until(deadline),
+ }
+ }
+
+ pub(crate) fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+ match *self {
+ Time::Empty => {
+ panic!("You must supply a timer.")
+ }
+ Time::Timer(ref t) => t.reset(sleep, new_deadline),
+ }
+ }
+}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -9,16 +9,18 @@ use http::header::{HeaderValue, CONNECTION};
use http::{HeaderMap, Method, Version};
use httparse::ParserConfig;
use tokio::io::{AsyncRead, AsyncWrite};
-#[cfg(all(feature = "server", feature = "runtime"))]
-use tokio::time::Sleep;
use tracing::{debug, error, trace};
use super::io::Buffered;
use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants};
use crate::body::DecodedLength;
+#[cfg(all(feature = "server", feature = "runtime"))]
+use crate::common::time::Time;
use crate::common::{task, Pin, Poll, Unpin};
use crate::headers::connection_keep_alive;
use crate::proto::{BodyLength, MessageHead};
+#[cfg(all(feature = "server", feature = "runtime"))]
+use crate::rt::Sleep;
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -57,6 +59,8 @@ where
h1_header_read_timeout_fut: None,
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: false,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -78,6 +82,11 @@ where
}
}
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ pub(crate) fn set_timer(&mut self, timer: Time) {
+ self.state.timer = timer;
+ }
+
#[cfg(feature = "server")]
pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) {
self.io.set_flush_pipeline(enabled);
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -202,6 +211,8 @@ where
h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut,
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ timer: self.state.timer.clone(),
preserve_header_case: self.state.preserve_header_case,
#[cfg(feature = "ffi")]
preserve_header_order: self.state.preserve_header_order,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -802,9 +813,11 @@ struct State {
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout: Option<Duration>,
#[cfg(all(feature = "server", feature = "runtime"))]
- h1_header_read_timeout_fut: Option<Pin<Box<Sleep>>>,
+ h1_header_read_timeout_fut: Option<Pin<Box<dyn Sleep>>>,
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: bool,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ timer: Time,
preserve_header_case: bool,
#[cfg(feature = "ffi")]
preserve_header_order: bool,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -58,10 +58,10 @@ cfg_client! {
impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
where
D: Dispatch<
- PollItem = MessageHead<T::Outgoing>,
- PollBody = Bs,
- RecvItem = MessageHead<T::Incoming>,
- > + Unpin,
+ PollItem = MessageHead<T::Outgoing>,
+ PollBody = Bs,
+ RecvItem = MessageHead<T::Incoming>,
+ > + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -256,7 +256,10 @@ where
if wants.contains(Wants::UPGRADE) {
let upgrade = self.conn.on_upgrade();
debug_assert!(!upgrade.is_none(), "empty upgrade");
- debug_assert!(head.extensions.get::<OnUpgrade>().is_none(), "OnUpgrade already set");
+ debug_assert!(
+ head.extensions.get::<OnUpgrade>().is_none(),
+ "OnUpgrade already set"
+ );
head.extensions.insert(upgrade);
}
self.dispatch.recv_msg(Ok((head, body)))?;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -396,10 +399,10 @@ where
impl<D, Bs, I, T> Future for Dispatcher<D, Bs, I, T>
where
D: Dispatch<
- PollItem = MessageHead<T::Outgoing>,
- PollBody = Bs,
- RecvItem = MessageHead<T::Incoming>,
- > + Unpin,
+ PollItem = MessageHead<T::Outgoing>,
+ PollBody = Bs,
+ RecvItem = MessageHead<T::Incoming>,
+ > + Unpin,
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -5,13 +5,9 @@ use std::future::Future;
use std::io::{self, IoSlice};
use std::marker::Unpin;
use std::mem::MaybeUninit;
-#[cfg(all(feature = "server", feature = "runtime"))]
-use std::time::Duration;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
-#[cfg(all(feature = "server", feature = "runtime"))]
-use tokio::time::Instant;
use tracing::{debug, trace};
use super::{Http1Transaction, ParseContext, ParsedMessage};
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -193,6 +189,8 @@ where
h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut,
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ timer: parse_ctx.timer.clone(),
preserve_header_case: parse_ctx.preserve_header_case,
#[cfg(feature = "ffi")]
preserve_header_order: parse_ctx.preserve_header_order,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -209,15 +207,7 @@ where
#[cfg(all(feature = "server", feature = "runtime"))]
{
*parse_ctx.h1_header_read_timeout_running = false;
-
- if let Some(h1_header_read_timeout_fut) =
- parse_ctx.h1_header_read_timeout_fut
- {
- // Reset the timer in order to avoid woken up when the timeout finishes
- h1_header_read_timeout_fut
- .as_mut()
- .reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60));
- }
+ parse_ctx.h1_header_read_timeout_fut.take();
}
return Poll::Ready(Ok(msg));
}
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -4,11 +4,13 @@ use std::{pin::Pin, time::Duration};
use bytes::BytesMut;
use http::{HeaderMap, Method};
use httparse::ParserConfig;
-#[cfg(all(feature = "server", feature = "runtime"))]
-use tokio::time::Sleep;
use crate::body::DecodedLength;
+#[cfg(all(feature = "server", feature = "runtime"))]
+use crate::common::time::Time;
use crate::proto::{BodyLength, MessageHead};
+#[cfg(all(feature = "server", feature = "runtime"))]
+use crate::rt::Sleep;
pub(crate) use self::conn::Conn;
pub(crate) use self::decode::Decoder;
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -79,9 +81,11 @@ pub(crate) struct ParseContext<'a> {
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout: Option<Duration>,
#[cfg(all(feature = "server", feature = "runtime"))]
- h1_header_read_timeout_fut: &'a mut Option<Pin<Box<Sleep>>>,
+ h1_header_read_timeout_fut: &'a mut Option<Pin<Box<dyn Sleep>>>,
#[cfg(all(feature = "server", feature = "runtime"))]
h1_header_read_timeout_running: &'a mut bool,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ timer: Time,
preserve_header_case: bool,
#[cfg(feature = "ffi")]
preserve_header_order: bool,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1,5 +1,7 @@
use std::fmt::{self, Write};
use std::mem::MaybeUninit;
+#[cfg(all(feature = "server", feature = "runtime"))]
+use std::time::Instant;
use bytes::Bytes;
use bytes::BytesMut;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -7,8 +9,6 @@ use bytes::BytesMut;
use http::header::ValueIter;
use http::header::{self, Entry, HeaderName, HeaderValue};
use http::{HeaderMap, Method, StatusCode, Version};
-#[cfg(all(feature = "server", feature = "runtime"))]
-use tokio::time::Instant;
use tracing::{debug, error, trace, trace_span, warn};
use crate::body::DecodedLength;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -83,12 +83,12 @@ where
match ctx.h1_header_read_timeout_fut {
Some(h1_header_read_timeout_fut) => {
debug!("resetting h1 header read timeout timer");
- h1_header_read_timeout_fut.as_mut().reset(deadline);
+ ctx.timer.reset(h1_header_read_timeout_fut, deadline);
}
None => {
debug!("setting h1 header read timeout timer");
*ctx.h1_header_read_timeout_fut =
- Some(Box::pin(tokio::time::sleep_until(deadline)));
+ Some(crate::common::into_pin(ctx.timer.sleep_until(deadline)));
}
}
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -994,7 +994,6 @@ impl Http1Transaction for Client {
// SAFETY: array is valid up to `headers_len`
let header = unsafe { &mut *header.as_mut_ptr() };
Client::obs_fold_line(&mut slice, header);
-
}
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -13,6 +13,7 @@ use tracing::{debug, trace, warn};
use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
use crate::body::Body;
+use crate::common::time::Time;
use crate::common::{exec::Exec, task, Future, Never, Pin, Poll};
use crate::ext::Protocol;
use crate::headers;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -109,6 +110,7 @@ pub(crate) async fn handshake<T, B>(
req_rx: ClientRx<B>,
config: &Config,
exec: Exec,
+ timer: Time,
) -> crate::Result<ClientTask<B>>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -137,7 +139,7 @@ where
let (conn, ping) = if ping_config.is_enabled() {
let pp = conn.ping_pong().expect("conn.ping_pong");
- let (recorder, mut ponger) = ping::channel(pp, ping_config);
+ let (recorder, mut ponger) = ping::channel(pp, ping_config, timer);
let conn = future::poll_fn(move |cx| {
match ponger.poll(cx) {
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -27,22 +27,24 @@ use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
-use std::time::Duration;
-#[cfg(not(feature = "runtime"))]
-use std::time::Instant;
+use std::time::{Duration, Instant};
+
use h2::{Ping, PingPong};
-#[cfg(feature = "runtime")]
-use tokio::time::{Instant, Sleep};
use tracing::{debug, trace};
+#[cfg_attr(not(feature = "runtime"), allow(unused))]
+use crate::common::time::Time;
+#[cfg_attr(not(feature = "runtime"), allow(unused))]
+use crate::rt::Sleep;
+
type WindowSize = u32;
pub(super) fn disabled() -> Recorder {
Recorder { shared: None }
}
-pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger) {
+pub(super) fn channel(ping_pong: PingPong, config: Config, __timer: Time) -> (Recorder, Ponger) {
debug_assert!(
config.is_enabled(),
"ping channel requires bdp or keep-alive config",
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -67,8 +69,9 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger)
interval,
timeout: config.keep_alive_timeout,
while_idle: config.keep_alive_while_idle,
- timer: Box::pin(tokio::time::sleep(interval)),
+ sleep: crate::common::into_pin(__timer.sleep(interval)),
state: KeepAliveState::Init,
+ timer: __timer,
});
#[cfg(feature = "runtime")]
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -173,13 +176,14 @@ struct KeepAlive {
while_idle: bool,
state: KeepAliveState,
- timer: Pin<Box<Sleep>>,
+ sleep: Pin<Box<dyn Sleep>>,
+ timer: Time,
}
#[cfg(feature = "runtime")]
enum KeepAliveState {
Init,
- Scheduled,
+ Scheduled(Instant),
PingSent,
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -301,7 +305,7 @@ impl Ponger {
#[cfg(feature = "runtime")]
{
if let Some(ref mut ka) = self.keep_alive {
- ka.schedule(is_idle, &locked);
+ ka.maybe_schedule(is_idle, &locked);
ka.maybe_ping(cx, &mut locked);
}
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -324,11 +328,12 @@ impl Ponger {
{
if let Some(ref mut ka) = self.keep_alive {
locked.update_last_read_at();
- ka.schedule(is_idle, &locked);
+ ka.maybe_schedule(is_idle, &locked);
+ ka.maybe_ping(cx, &mut locked);
}
}
- if let Some(ref mut bdp) = self.bdp {
+ if let Some(ref mut bdp) = self.bdp {
let bytes = locked.bytes.expect("bdp enabled implies bytes");
locked.bytes = Some(0); // reset
trace!("received BDP ack; bytes = {}, rtt = {:?}", bytes, rtt);
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -336,7 +341,7 @@ impl Ponger {
let update = bdp.calculate(bytes, rtt);
locked.next_bdp_at = Some(now + bdp.ping_delay);
if let Some(update) = update {
- return Poll::Ready(Ponged::SizeUpdate(update))
+ return Poll::Ready(Ponged::SizeUpdate(update));
}
}
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -471,38 +476,39 @@ fn seconds(dur: Duration) -> f64 {
#[cfg(feature = "runtime")]
impl KeepAlive {
- fn schedule(&mut self, is_idle: bool, shared: &Shared) {
+ fn maybe_schedule(&mut self, is_idle: bool, shared: &Shared) {
match self.state {
KeepAliveState::Init => {
if !self.while_idle && is_idle {
return;
}
- self.state = KeepAliveState::Scheduled;
- let interval = shared.last_read_at() + self.interval;
- self.timer.as_mut().reset(interval);
+ self.schedule(shared);
}
KeepAliveState::PingSent => {
if shared.is_ping_sent() {
return;
}
-
- self.state = KeepAliveState::Scheduled;
- let interval = shared.last_read_at() + self.interval;
- self.timer.as_mut().reset(interval);
+ self.schedule(shared);
}
- KeepAliveState::Scheduled => (),
+ KeepAliveState::Scheduled(..) => (),
}
}
+ fn schedule(&mut self, shared: &Shared) {
+ let interval = shared.last_read_at() + self.interval;
+ self.state = KeepAliveState::Scheduled(interval);
+ self.timer.reset(&mut self.sleep, interval);
+ }
+
fn maybe_ping(&mut self, cx: &mut task::Context<'_>, shared: &mut Shared) {
match self.state {
- KeepAliveState::Scheduled => {
- if Pin::new(&mut self.timer).poll(cx).is_pending() {
+ KeepAliveState::Scheduled(at) => {
+ if Pin::new(&mut self.sleep).poll(cx).is_pending() {
return;
}
// check if we've received a frame while we were scheduled
- if shared.last_read_at() + self.interval > self.timer.deadline() {
+ if shared.last_read_at() + self.interval > at {
self.state = KeepAliveState::Init;
cx.waker().wake_by_ref(); // schedule us again
return;
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -511,7 +517,7 @@ impl KeepAlive {
shared.send_ping();
self.state = KeepAliveState::PingSent;
let timeout = Instant::now() + self.timeout;
- self.timer.as_mut().reset(timeout);
+ self.timer.reset(&mut self.sleep, timeout);
}
KeepAliveState::Init | KeepAliveState::PingSent => (),
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -520,13 +526,13 @@ impl KeepAlive {
fn maybe_timeout(&mut self, cx: &mut task::Context<'_>) -> Result<(), KeepAliveTimedOut> {
match self.state {
KeepAliveState::PingSent => {
- if Pin::new(&mut self.timer).poll(cx).is_pending() {
+ if Pin::new(&mut self.sleep).poll(cx).is_pending() {
return Ok(());
}
trace!("keep-alive timeout ({:?}) reached", self.timeout);
Err(KeepAliveTimedOut)
}
- KeepAliveState::Init | KeepAliveState::Scheduled => Ok(()),
+ KeepAliveState::Init | KeepAliveState::Scheduled(..) => Ok(()),
}
}
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -14,6 +14,7 @@ use tracing::{debug, trace, warn};
use super::{ping, PipeToSendStream, SendBuf};
use crate::body::Body;
use crate::common::exec::ConnStreamExec;
+use crate::common::time::Time;
use crate::common::{date, task, Future, Pin, Poll};
use crate::ext::Protocol;
use crate::headers;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -35,7 +36,7 @@ const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb
const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb
const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb
-// 16 MB "sane default" taken from golang http2
+ // 16 MB "sane default" taken from golang http2
const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20;
#[derive(Clone, Debug)]
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -80,6 +81,7 @@ pin_project! {
B: Body,
{
exec: E,
+ timer: Time,
service: S,
state: State<T, B>,
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -114,7 +116,13 @@ where
B: Body + 'static,
E: ConnStreamExec<S::Future, B>,
{
- pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server<T, S, B, E> {
+ pub(crate) fn new(
+ io: T,
+ service: S,
+ config: &Config,
+ exec: E,
+ timer: Time,
+ ) -> Server<T, S, B, E> {
let mut builder = h2::server::Builder::default();
builder
.initial_window_size(config.initial_stream_window_size)
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -150,6 +158,7 @@ where
Server {
exec,
+ timer,
state: State::Handshaking {
ping_config,
hs: handshake,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -199,7 +208,11 @@ where
let mut conn = ready!(Pin::new(hs).poll(cx).map_err(crate::Error::new_h2))?;
let ping = if ping_config.is_enabled() {
let pp = conn.ping_pong().expect("conn.ping_pong");
- Some(ping::channel(pp, ping_config.clone()))
+ Some(ping::channel(
+ pp,
+ ping_config.clone(),
+ me.timer.clone(),
+ ))
} else {
None
};
diff --git a/src/rt.rs b/src/rt.rs
--- a/src/rt.rs
+++ b/src/rt.rs
@@ -5,8 +5,31 @@
//! If the `runtime` feature is disabled, the types in this module can be used
//! to plug in other runtimes.
+use std::{
+ future::Future,
+ pin::Pin,
+ time::{Duration, Instant},
+};
+
/// An executor of futures.
pub trait Executor<Fut> {
/// Place the future into the executor to be run.
fn execute(&self, fut: Fut);
}
+
+/// A timer which provides timer-like functions.
+pub trait Timer {
+ /// Return a future that resolves in `duration` time.
+ fn sleep(&self, duration: Duration) -> Box<dyn Sleep + Unpin>;
+
+ /// Return a future that resolves at `deadline`.
+ fn sleep_until(&self, deadline: Instant) -> Box<dyn Sleep + Unpin>;
+
+ /// Reset a future to resolve at `new_deadline` instead.
+ fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
+ *sleep = crate::common::into_pin(self.sleep_until(new_deadline));
+ }
+}
+
+/// A future returned by a `Timer`.
+pub trait Sleep: Send + Sync + Unpin + Future<Output = ()> {}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -46,6 +46,7 @@
not(all(feature = "http1", feature = "http2"))
))]
use std::marker::PhantomData;
+use std::sync::Arc;
#[cfg(all(any(feature = "http1", feature = "http2"), feature = "runtime"))]
use std::time::Duration;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -55,6 +56,7 @@ use crate::common::io::Rewind;
use crate::error::{Kind, Parse};
#[cfg(feature = "http1")]
use crate::upgrade::Upgraded;
+use crate::{common::time::Time, rt::Timer};
cfg_feature! {
#![any(feature = "http1", feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -86,6 +88,7 @@ cfg_feature! {
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
pub struct Http<E = Exec> {
pub(crate) exec: E,
+ pub(crate) timer: Time,
h1_half_close: bool,
h1_keep_alive: bool,
h1_title_case_headers: bool,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -169,7 +172,7 @@ pin_project! {
#[cfg(all(feature = "http1", feature = "http2"))]
#[derive(Clone, Debug)]
enum Fallback<E> {
- ToHttp2(proto::h2::server::Config, E),
+ ToHttp2(proto::h2::server::Config, E, Time),
Http1Only,
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -225,6 +228,7 @@ impl Http {
pub fn new() -> Http {
Http {
exec: Exec::Default,
+ timer: Time::Empty,
h1_half_close: false,
h1_keep_alive: true,
h1_title_case_headers: false,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -554,6 +558,30 @@ impl<E> Http<E> {
pub fn with_executor<E2>(self, exec: E2) -> Http<E2> {
Http {
exec,
+ timer: self.timer,
+ h1_half_close: self.h1_half_close,
+ h1_keep_alive: self.h1_keep_alive,
+ h1_title_case_headers: self.h1_title_case_headers,
+ h1_preserve_header_case: self.h1_preserve_header_case,
+ #[cfg(all(feature = "http1", feature = "runtime"))]
+ h1_header_read_timeout: self.h1_header_read_timeout,
+ h1_writev: self.h1_writev,
+ #[cfg(feature = "http2")]
+ h2_builder: self.h2_builder,
+ mode: self.mode,
+ max_buf_size: self.max_buf_size,
+ pipeline_flush: self.pipeline_flush,
+ }
+ }
+
+ /// Set the timer used in background tasks.
+ pub fn with_timer<M>(self, timer: M) -> Http<E>
+ where
+ M: Timer + Send + Sync + 'static,
+ {
+ Http {
+ exec: self.exec,
+ timer: Time::Timer(Arc::new(timer)),
h1_half_close: self.h1_half_close,
h1_keep_alive: self.h1_keep_alive,
h1_title_case_headers: self.h1_title_case_headers,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -610,6 +638,10 @@ impl<E> Http<E> {
macro_rules! h1 {
() => {{
let mut conn = proto::Conn::new(io);
+ #[cfg(feature = "runtime")]
+ {
+ conn.set_timer(self.timer.clone());
+ }
if !self.h1_keep_alive {
conn.disable_keep_alive();
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -654,8 +686,13 @@ impl<E> Http<E> {
#[cfg(feature = "http2")]
ConnectionMode::H2Only => {
let rewind_io = Rewind::new(io);
- let h2 =
- proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone());
+ let h2 = proto::h2::Server::new(
+ rewind_io,
+ service,
+ &self.h2_builder,
+ self.exec.clone(),
+ self.timer.clone(),
+ );
ProtoServer::H2 { h2 }
}
};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -664,7 +701,11 @@ impl<E> Http<E> {
conn: Some(proto),
#[cfg(all(feature = "http1", feature = "http2"))]
fallback: if self.mode == ConnectionMode::Fallback {
- Fallback::ToHttp2(self.h2_builder.clone(), self.exec.clone())
+ Fallback::ToHttp2(
+ self.h2_builder.clone(),
+ self.exec.clone(),
+ self.timer.clone(),
+ )
} else {
Fallback::Http1Only
},
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -808,7 +849,12 @@ where
let mut conn = Some(self);
futures_util::future::poll_fn(move |cx| {
ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
- Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1))
+ Poll::Ready(
+ conn.take()
+ .unwrap()
+ .try_into_parts()
+ .ok_or_else(crate::Error::new_without_shutdown_not_h1),
+ )
})
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -825,11 +871,17 @@ where
};
let mut rewind_io = Rewind::new(io);
rewind_io.rewind(read_buf);
- let (builder, exec) = match self.fallback {
- Fallback::ToHttp2(ref builder, ref exec) => (builder, exec),
+ let (builder, exec, timer) = match self.fallback {
+ Fallback::ToHttp2(ref builder, ref exec, ref timer) => (builder, exec, timer),
Fallback::Http1Only => unreachable!("upgrade_h2 with Fallback::Http1Only"),
};
- let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone());
+ let h2 = proto::h2::Server::new(
+ rewind_io,
+ dispatch.into_service(),
+ builder,
+ exec.clone(),
+ timer.clone(),
+ );
debug_assert!(self.conn.is_none());
self.conn = Some(ProtoServer::H2 { h2 });
|
I'll look to take on the timer series of issues (#2847, #2846, #2848, #2857).
|
2022-08-31T22:49:43Z
| 2,974
|
Use a generic Timer to implement the timeouts and intervals
After adding a [generic `T: Timer`](https://github.com/hyperium/hyper/issues/2847), update the uses of `tokio::time` to use the generic timer instead.
- [ ] HTTP/2 keep-alive interval
- [ ] HTTP/2 keep-alive timeout
- [ ] HTTP/1 read headers timeout
|
hyperium__hyper-2974
|
diff --git /dev/null b/benches/support/tokiort.rs
new file mode 100644
--- /dev/null
+++ b/benches/support/tokiort.rs
@@ -0,0 +1,66 @@
+#![allow(dead_code)]
+//! Various runtimes for hyper
+use std::{
+ pin::Pin,
+ task::{Context, Poll},
+ time::{Duration, Instant},
+};
+
+use futures_util::Future;
+use hyper::rt::{Sleep, Timer};
+
+/// An Executor that uses the tokio runtime.
+pub struct TokioExecutor;
+
+/// A Timer that uses the tokio runtime.
+
+#[derive(Clone, Debug)]
+pub struct TokioTimer;
+
+impl Timer for TokioTimer {
+ fn sleep(&self, duration: Duration) -> Box<dyn Sleep + Unpin> {
+ let s = tokio::time::sleep(duration);
+ let hs = TokioSleep { inner: Box::pin(s) };
+ return Box::new(hs);
+ }
+
+ fn sleep_until(&self, deadline: Instant) -> Box<dyn Sleep + Unpin> {
+ return Box::new(TokioSleep {
+ inner: Box::pin(tokio::time::sleep_until(deadline.into())),
+ });
+ }
+}
+
+struct TokioTimeout<T> {
+ inner: Pin<Box<tokio::time::Timeout<T>>>,
+}
+
+impl<T> Future for TokioTimeout<T>
+where
+ T: Future,
+{
+ type Output = Result<T::Output, tokio::time::error::Elapsed>;
+
+ fn poll(mut self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Self::Output> {
+ self.inner.as_mut().poll(context)
+ }
+}
+
+// Use TokioSleep to get tokio::time::Sleep to implement Unpin.
+// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html
+pub(crate) struct TokioSleep {
+ pub(crate) inner: Pin<Box<tokio::time::Sleep>>,
+}
+
+impl Future for TokioSleep {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.inner.as_mut().poll(cx)
+ }
+}
+
+// Use HasSleep to get tokio::time::Sleep to implement Unpin.
+// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html
+
+impl Sleep for TokioSleep {}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1035,7 +1048,8 @@ mod tests {
// an empty IO, we'll be skipping and using the read buffer anyways
let io = tokio_test::io::Builder::new().build();
- let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io);
+ let mut conn =
+ Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io);
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -674,6 +664,8 @@ enum WriteStrategy {
#[cfg(test)]
mod tests {
+ use crate::common::time::Time;
+
use super::*;
use std::time::Duration;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -741,6 +733,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1566,6 +1565,9 @@ fn extend(dst: &mut Vec<u8>, data: &[u8]) {
mod tests {
use bytes::BytesMut;
+ #[cfg(feature = "runtime")]
+ use crate::common::time::Time;
+
use super::*;
#[test]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1585,6 +1587,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1620,6 +1624,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1650,6 +1656,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1678,6 +1686,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1708,6 +1718,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1742,6 +1754,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1773,6 +1787,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1799,6 +1815,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: true,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1846,6 +1864,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1874,6 +1894,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2111,6 +2133,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2139,6 +2163,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2167,6 +2193,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2450,18 +2478,12 @@ mod tests {
value: (0, buf.len()),
};
Client::obs_fold_line(&mut buf, &mut idx);
- String::from_utf8(buf[idx.value.0 .. idx.value.1].to_vec()).unwrap()
+ String::from_utf8(buf[idx.value.0..idx.value.1].to_vec()).unwrap()
}
- assert_eq!(
- unfold("a normal line"),
- "a normal line",
- );
+ assert_eq!(unfold("a normal line"), "a normal line",);
- assert_eq!(
- unfold("obs\r\n fold\r\n\t line"),
- "obs fold line",
- );
+ assert_eq!(unfold("obs\r\n fold\r\n\t line"), "obs fold line",);
}
#[test]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2696,6 +2718,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2788,6 +2812,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2836,6 +2862,8 @@ mod tests {
h1_header_read_timeout_fut: &mut None,
#[cfg(feature = "runtime")]
h1_header_read_timeout_running: &mut false,
+ #[cfg(feature = "runtime")]
+ timer: Time::Empty,
preserve_header_case: false,
#[cfg(feature = "ffi")]
preserve_header_order: false,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1337,6 +1337,7 @@ mod conn {
use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
use http_body_util::{Empty, StreamBody};
+ use hyper::rt::Timer;
use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1347,6 +1348,8 @@ mod conn {
use super::{concat, s, support, tcp_connect, FutureHyperExt};
+ use support::TokioTimer;
+
#[tokio::test]
async fn get() {
let _ = ::pretty_env_logger::try_init();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1491,7 +1494,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| TokioTimer.sleep(Duration::from_millis(200)));
let chunk = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
assert_eq!(chunk.len(), 5);
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1592,7 +1595,7 @@ mod conn {
concat(res)
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| TokioTimer.sleep(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1638,7 +1641,7 @@ mod conn {
concat(res)
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| TokioTimer.sleep(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1690,7 +1693,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| TokioTimer.sleep(Duration::from_millis(200)));
rt.block_on(future::join3(res1, res2, rx).map(|r| r.0))
.unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1751,7 +1754,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| TokioTimer.sleep(Duration::from_millis(200)));
rt.block_on(future::join3(until_upgrade, res, rx).map(|r| r.0))
.unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1842,7 +1845,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| TokioTimer.sleep(Duration::from_millis(200)));
rt.block_on(future::join3(until_tunneled, res, rx).map(|r| r.0))
.unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1950,7 +1953,7 @@ mod conn {
let _ = shdn_tx.send(true);
// Allow time for graceful shutdown roundtrips...
- tokio::time::sleep(Duration::from_millis(100)).await;
+ TokioTimer.sleep(Duration::from_millis(100)).await;
// After graceful shutdown roundtrips, the client should be closed...
future::poll_fn(|ctx| client.poll_ready(ctx))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1982,6 +1985,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (_client, conn) = conn::Builder::new()
+ .timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2015,6 +2019,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2027,7 +2032,7 @@ mod conn {
});
// sleep longer than keepalive would trigger
- tokio::time::sleep(Duration::from_secs(4)).await;
+ TokioTimer.sleep(Duration::from_secs(4)).await;
future::poll_fn(|ctx| client.poll_ready(ctx))
.await
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2051,6 +2056,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2097,6 +2103,7 @@ mod conn {
tokio::spawn(async move {
let sock = listener.accept().await.unwrap().0;
hyper::server::conn::Http::new()
+ .with_timer(TokioTimer)
.http2_only(true)
.serve_connection(
sock,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2115,6 +2122,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
+ .timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2133,7 +2141,7 @@ mod conn {
let _resp = client.send_request(req).await.expect("send_request");
// sleep longer than keepalive would trigger
- tokio::time::sleep(Duration::from_secs(4)).await;
+ TokioTimer.sleep(Duration::from_secs(4)).await;
future::poll_fn(|ctx| client.poll_ready(ctx))
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -21,6 +21,8 @@ use h2::client::SendRequest;
use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full, StreamBody};
+use hyper::rt::Timer;
+use support::TokioTimer;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -958,7 +960,7 @@ async fn expect_continue_waits_for_body_poll() {
service_fn(|req| {
assert_eq!(req.headers()["expect"], "100-continue");
// But! We're never going to poll the body!
- tokio::time::sleep(Duration::from_millis(50)).map(move |_| {
+ TokioTimer.sleep(Duration::from_millis(50)).map(move |_| {
// Move and drop the req, so we don't auto-close
drop(req);
Response::builder()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1255,7 +1257,8 @@ async fn http1_allow_half_close() {
.serve_connection(
socket,
service_fn(|_| {
- tokio::time::sleep(Duration::from_millis(500))
+ TokioTimer
+ .sleep(Duration::from_millis(500))
.map(|_| Ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new())))
}),
)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1283,7 +1286,7 @@ async fn disconnect_after_reading_request_before_responding() {
.serve_connection(
socket,
service_fn(|_| {
- tokio::time::sleep(Duration::from_secs(2)).map(
+ TokioTimer.sleep(Duration::from_secs(2)).map(
|_| -> Result<Response<Recv>, hyper::Error> {
panic!("response future should have been dropped");
},
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1376,6 +1379,7 @@ async fn header_read_timeout_slow_writes() {
let (socket, _) = listener.accept().await.unwrap();
let conn = Http::new()
+ .with_timer(TokioTimer)
.http1_header_read_timeout(Duration::from_secs(5))
.serve_connection(
socket,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1451,6 +1455,7 @@ async fn header_read_timeout_slow_writes_multiple_requests() {
let (socket, _) = listener.accept().await.unwrap();
let conn = Http::new()
+ .with_timer(TokioTimer)
.http1_header_read_timeout(Duration::from_secs(5))
.serve_connection(
socket,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2486,6 +2491,7 @@ async fn http2_keep_alive_detects_unresponsive_client() {
let (socket, _) = listener.accept().await.expect("accept");
let err = Http::new()
+ .with_timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2507,6 +2513,7 @@ async fn http2_keep_alive_with_responsive_client() {
let (socket, _) = listener.accept().await.expect("accept");
Http::new()
+ .with_timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2526,7 +2533,7 @@ async fn http2_keep_alive_with_responsive_client() {
conn.await.expect("client conn");
});
- tokio::time::sleep(Duration::from_secs(4)).await;
+ TokioTimer.sleep(Duration::from_secs(4)).await;
let req = http::Request::new(Empty::<Bytes>::new());
client.send_request(req).await.expect("client.send_request");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2574,6 +2581,7 @@ async fn http2_keep_alive_count_server_pings() {
let (socket, _) = listener.accept().await.expect("accept");
Http::new()
+ .with_timer(TokioTimer)
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -21,6 +21,9 @@ pub use futures_util::{
pub use hyper::{HeaderMap, StatusCode};
pub use std::net::SocketAddr;
+mod tokiort;
+pub use tokiort::TokioTimer;
+
#[allow(unused_macros)]
macro_rules! t {
(
diff --git /dev/null b/tests/support/tokiort.rs
new file mode 100644
--- /dev/null
+++ b/tests/support/tokiort.rs
@@ -0,0 +1,1 @@
+../../benches/support/tokiort.rs
\ No newline at end of file
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2962"
] |
0.3
|
7a41da5f601f59632d8c1514a75af720ed43413e
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -61,7 +61,7 @@ enum Kind {
/// [`Body::channel()`]: struct.Body.html#method.channel
/// [`Sender::abort()`]: struct.Sender.html#method.abort
#[must_use = "Sender does nothing unless sent on"]
-pub struct Sender {
+pub(crate) struct Sender {
want_rx: watch::Receiver,
data_tx: BodySender,
trailers_tx: Option<TrailersSender>,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -75,7 +75,8 @@ impl Recv {
///
/// Useful when wanting to stream chunks from another thread.
#[inline]
- pub fn channel() -> (Sender, Recv) {
+ #[allow(unused)]
+ pub(crate) fn channel() -> (Sender, Recv) {
Self::new_channel(DecodedLength::CHUNKED, /*wanter =*/ false)
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -289,7 +290,7 @@ impl fmt::Debug for Recv {
impl Sender {
/// Check to see if this `Sender` can send more data.
- pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ pub(crate) fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// Check if the receiver end has tried polling for the body yet
ready!(self.poll_want(cx)?);
self.data_tx
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -311,7 +312,8 @@ impl Sender {
}
/// Send data on data channel when it is ready.
- pub async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> {
+ #[allow(unused)]
+ pub(crate) async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> {
self.ready().await?;
self.data_tx
.try_send(Ok(chunk))
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -319,7 +321,8 @@ impl Sender {
}
/// Send trailers on trailers channel.
- pub async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> {
+ #[allow(unused)]
+ pub(crate) async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> {
let tx = match self.trailers_tx.take() {
Some(tx) => tx,
None => return Err(crate::Error::new_closed()),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -339,14 +342,15 @@ impl Sender {
/// This is mostly useful for when trying to send from some other thread
/// that doesn't have an async context. If in an async context, prefer
/// `send_data()` instead.
- pub fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> {
+ pub(crate) fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> {
self.data_tx
.try_send(Ok(chunk))
.map_err(|err| err.into_inner().expect("just sent Ok"))
}
/// Aborts the body in an abnormal fashion.
- pub fn abort(self) {
+ #[allow(unused)]
+ pub(crate) fn abort(self) {
let _ = self
.data_tx
// clone so the send works even if buffer is full
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -20,7 +20,8 @@ pub use http_body::Body as HttpBody;
pub use http_body::SizeHint;
pub use self::aggregate::aggregate;
-pub use self::body::{Recv, Sender};
+pub use self::body::Recv;
+pub(crate) use self::body::Sender;
pub(crate) use self::length::DecodedLength;
pub use self::to_bytes::to_bytes;
|
I'll get started on this.
|
2022-08-29T17:44:54Z
| 2,970
|
Make `body::Sender` type and `Body::channel()` constructor private.
This is an "easy" task, in that the changes aren't *complicated*:
- Change the `hyper::body::Sender` type to `pub(crate)`.
- Change the `Body::channel()` method to `pub(crate)`.
- Replace any usage in the examples, tests, or docs with an appropriate alternative.
- This might be as simple as a `Full`, but if specifically showing a channel usage, or needing a channel behavior for a test, a `tokio::sync::mpsc` wrapped in `StreamBody` should work.
|
hyperium__hyper-2970
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1325,6 +1325,7 @@ test! {
}
mod conn {
+ use std::error::Error;
use std::io::{self, Read, Write};
use std::net::{SocketAddr, TcpListener};
use std::pin::Pin;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1333,15 +1334,15 @@ mod conn {
use std::time::Duration;
use bytes::{Buf, Bytes};
- use futures_channel::oneshot;
+ use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
- use http_body_util::Empty;
- use hyper::upgrade::OnUpgrade;
+ use http_body_util::{Empty, StreamBody};
use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
use hyper::body::HttpBody;
use hyper::client::conn;
+ use hyper::upgrade::OnUpgrade;
use hyper::{self, Method, Recv, Request, Response, StatusCode};
use super::{concat, s, support, tcp_connect, FutureHyperExt};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1524,17 +1525,23 @@ mod conn {
rt.spawn(conn.map_err(|e| panic!("conn error: {}", e)).map(|_| ()));
- let (mut sender, body) = Recv::channel();
+ let (mut sender, recv) = mpsc::channel::<Result<Bytes, Box<dyn Error + Send + Sync>>>(0);
+
let sender = thread::spawn(move || {
- sender.try_send_data("hello".into()).expect("try_send_data");
+ sender.try_send(Ok("hello".into())).expect("try_send_data");
support::runtime().block_on(rx).unwrap();
- sender.abort();
+
+ // Aborts the body in an abnormal fashion.
+ let _ = sender.try_send(Err(Box::new(std::io::Error::new(
+ io::ErrorKind::Other,
+ "body write aborted",
+ ))));
});
let req = Request::builder()
.method(Method::POST)
.uri("/")
- .body(body)
+ .body(StreamBody::new(recv))
.unwrap();
let res = client.send_request(req);
rt.block_on(res).unwrap_err();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2111,7 +2118,7 @@ mod conn {
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
- .handshake::<_, Recv>(io)
+ .handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2120,9 +2127,10 @@ mod conn {
});
// Use a channel to keep request stream open
- let (_tx, body) = hyper::Recv::channel();
- let req1 = http::Request::new(body);
- let _resp = client.send_request(req1).await.expect("send_request");
+ let (_tx, recv) = mpsc::channel::<Result<Bytes, Box<dyn Error + Send + Sync>>>(0);
+ let req = http::Request::new(StreamBody::new(recv));
+
+ let _resp = client.send_request(req).await.expect("send_request");
// sleep longer than keepalive would trigger
tokio::time::sleep(Duration::from_secs(4)).await;
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2839"
] |
0.3
|
d963e6a9504575116f63df2485d8480fdb9b6f0b
|
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -4,7 +4,7 @@ use std::env;
use bytes::Bytes;
use http_body_util::Empty;
-use hyper::{body::HttpBody as _, Request};
+use hyper::{body::Body as _, Request};
use tokio::io::{self, AsyncWriteExt as _};
use tokio::net::TcpStream;
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -4,7 +4,7 @@ use std::net::SocketAddr;
use bytes::Bytes;
use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
-use hyper::body::HttpBody as _;
+use hyper::body::Body as _;
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::{Method, Recv, Request, Response, StatusCode};
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -6,7 +6,7 @@ use std::net::SocketAddr;
use std::rc::Rc;
use tokio::net::TcpListener;
-use hyper::body::{Bytes, HttpBody};
+use hyper::body::{Body as HttpBody, Bytes};
use hyper::header::{HeaderMap, HeaderValue};
use hyper::service::service_fn;
use hyper::{Error, Response};
diff --git a/src/body/aggregate.rs b/src/body/aggregate.rs
--- a/src/body/aggregate.rs
+++ b/src/body/aggregate.rs
@@ -1,11 +1,11 @@
use bytes::Buf;
-use super::HttpBody;
+use super::Body;
use crate::common::buf::BufList;
/// Aggregate the data buffers from a body asynchronously.
///
-/// The returned `impl Buf` groups the `Buf`s from the `HttpBody` without
+/// The returned `impl Buf` groups the `Buf`s from the `Body` without
/// copying them. This is ideal if you don't require a contiguous buffer.
///
/// # Note
diff --git a/src/body/aggregate.rs b/src/body/aggregate.rs
--- a/src/body/aggregate.rs
+++ b/src/body/aggregate.rs
@@ -15,7 +15,7 @@ use crate::common::buf::BufList;
/// `Content-Length` is a possibility, but it is not strictly mandated to be present.
pub async fn aggregate<T>(body: T) -> Result<impl Buf, T::Error>
where
- T: HttpBody,
+ T: Body,
{
let mut bufs = BufList::new();
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -5,7 +5,7 @@ use futures_channel::mpsc;
use futures_channel::oneshot;
use futures_core::Stream; // for mpsc::Receiver
use http::HeaderMap;
-use http_body::{Body as HttpBody, SizeHint};
+use http_body::{Body, SizeHint};
use super::DecodedLength;
use crate::common::Future;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -18,7 +18,7 @@ type TrailersSender = oneshot::Sender<HeaderMap>;
/// A stream of `Bytes`, used when receiving bodies.
///
-/// A good default [`HttpBody`](crate::body::HttpBody) to use in many
+/// A good default [`Body`](crate::body::Body) to use in many
/// applications.
///
/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes)
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -195,7 +195,7 @@ impl Recv {
}
}
-impl HttpBody for Recv {
+impl Body for Recv {
type Data = Bytes;
type Error = crate::Error;
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -7,16 +7,16 @@
//!
//! There are two pieces to this in hyper:
//!
-//! - **The [`HttpBody`](HttpBody) trait** describes all possible bodies.
-//! hyper allows any body type that implements `HttpBody`, allowing
+//! - **The [`Body`](Body) trait** describes all possible bodies.
+//! hyper allows any body type that implements `Body`, allowing
//! applications to have fine-grained control over their streaming.
//! - **The [`Recv`](Recv) concrete type**, which is an implementation of
-//! `HttpBody`, and returned by hyper as a "receive stream" (so, for server
+//! `Body`, and returned by hyper as a "receive stream" (so, for server
//! requests and client responses). It is also a decent default implementation
//! if you don't have very custom needs of your send streams.
pub use bytes::{Buf, Bytes};
-pub use http_body::Body as HttpBody;
+pub use http_body::Body;
pub use http_body::SizeHint;
pub use self::aggregate::aggregate;
diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs
--- a/src/body/to_bytes.rs
+++ b/src/body/to_bytes.rs
@@ -1,6 +1,6 @@
use bytes::{Buf, BufMut, Bytes};
-use super::HttpBody;
+use super::Body;
/// Concatenate the buffers from a body into a single `Bytes` asynchronously.
///
diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs
--- a/src/body/to_bytes.rs
+++ b/src/body/to_bytes.rs
@@ -19,7 +19,7 @@ use super::HttpBody;
/// ```
/// # use hyper::{Recv, Response};
/// # async fn doc(response: Response<Recv>) -> hyper::Result<()> {
-/// # use hyper::body::HttpBody;
+/// # use hyper::body::Body;
/// // let response: Response<Body> ...
///
/// const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024;
diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs
--- a/src/body/to_bytes.rs
+++ b/src/body/to_bytes.rs
@@ -39,7 +39,7 @@ use super::HttpBody;
/// ```
pub async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
where
- T: HttpBody,
+ T: Body,
{
futures_util::pin_mut!(body);
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -9,7 +9,7 @@ use httparse::ParserConfig;
use tokio::io::{AsyncRead, AsyncWrite};
use crate::Recv;
-use crate::body::HttpBody;
+use crate::body::Body;
use crate::common::{
exec::{BoxSendFuture, Exec},
task, Future, Pin, Poll,
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -35,7 +35,7 @@ pub struct SendRequest<B> {
pub struct Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
{
inner: Option<Dispatcher<T, B>>,
}
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -102,7 +102,7 @@ impl<B> SendRequest<B> {
impl<B> SendRequest<B>
where
- B: HttpBody + 'static,
+ B: Body + 'static,
{
/// Sends a `Request` on the associated connection.
///
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -180,7 +180,7 @@ impl<B> fmt::Debug for SendRequest<B> {
impl<T, B> fmt::Debug for Connection<T, B>
where
T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -190,7 +190,7 @@ where
impl<T, B> Future for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + Send + 'static,
+ B: Body + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -427,7 +427,7 @@ impl Builder {
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -11,7 +11,7 @@ use http::{Request, Response};
use tokio::io::{AsyncRead, AsyncWrite};
use crate::Recv;
-use crate::body::HttpBody;
+use crate::body::Body;
use crate::common::{
exec::{BoxSendFuture, Exec},
task, Future, Pin, Poll,
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -33,7 +33,7 @@ pub struct SendRequest<B> {
pub struct Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
{
inner: (PhantomData<T>, proto::h2::ClientTask<B>),
}
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -96,7 +96,7 @@ impl<B> SendRequest<B> {
impl<B> SendRequest<B>
where
- B: HttpBody + 'static,
+ B: Body + 'static,
{
/// Sends a `Request` on the associated connection.
///
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -174,7 +174,7 @@ impl<B> fmt::Debug for SendRequest<B> {
impl<T, B> fmt::Debug for Connection<T, B>
where
T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -184,7 +184,7 @@ where
impl<T, B> Future for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + Send + 'static,
+ B: Body + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -388,7 +388,7 @@ impl Builder {
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -73,7 +73,7 @@ use tower_service::Service;
use tracing::{debug, trace};
use super::dispatch;
-use crate::body::HttpBody;
+use crate::body::Body;
#[cfg(not(all(feature = "http1", feature = "http2")))]
use crate::common::Never;
use crate::common::{
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -108,7 +108,7 @@ pin_project! {
#[project = ProtoClientProj]
enum ProtoClient<T, B>
where
- B: HttpBody,
+ B: Body,
{
H1 {
#[pin]
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -130,7 +130,7 @@ pub async fn handshake<T, B>(
) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -150,7 +150,7 @@ pub struct SendRequest<B> {
pub struct Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
{
inner: Option<ProtoClient<T, B>>,
}
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -232,7 +232,7 @@ impl<B> SendRequest<B> {
impl<B> SendRequest<B>
where
- B: HttpBody + 'static,
+ B: Body + 'static,
{
/// Sends a `Request` on the associated connection.
///
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -266,7 +266,7 @@ where
impl<B> Service<Request<B>> for SendRequest<B>
where
- B: HttpBody + 'static,
+ B: Body + 'static,
{
type Response = Response<Recv>;
type Error = crate::Error;
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -292,7 +292,7 @@ impl<B> fmt::Debug for SendRequest<B> {
impl<T, B> Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + Unpin + Send + 'static,
+ B: Body + Unpin + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -375,7 +375,7 @@ where
impl<T, B> Future for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + Send + 'static,
+ B: Body + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -403,7 +403,7 @@ where
impl<T, B> fmt::Debug for Connection<T, B>
where
T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -806,7 +806,7 @@ impl Builder {
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -905,7 +905,7 @@ impl fmt::Debug for ResponseFuture {
impl<T, B> Future for ProtoClient<T, B>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
- B: HttpBody + Send + 'static,
+ B: Body + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -938,7 +938,7 @@ impl<B: Send> AssertSendSync for SendRequest<B> {}
impl<T: Send, B: Send> AssertSend for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Data: Send,
{
}
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -947,7 +947,7 @@ where
impl<T: Send + Sync, B: Send + Sync> AssertSendSync for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Data: Send + Sync + 'static,
{
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -4,13 +4,13 @@ use std::pin::Pin;
use std::sync::Arc;
#[cfg(feature = "server")]
-use crate::body::HttpBody;
+use crate::body::Body;
#[cfg(all(feature = "http2", feature = "server"))]
use crate::proto::h2::server::H2Stream;
use crate::rt::Executor;
#[cfg(feature = "server")]
-pub trait ConnStreamExec<F, B: HttpBody>: Clone {
+pub trait ConnStreamExec<F, B: Body>: Clone {
fn execute_h2stream(&mut self, fut: H2Stream<F, B>);
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -60,7 +60,7 @@ impl fmt::Debug for Exec {
impl<F, B> ConnStreamExec<F, B> for Exec
where
H2Stream<F, B>: Future<Output = ()> + Send + 'static,
- B: HttpBody,
+ B: Body,
{
fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
self.execute(fut)
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -74,7 +74,7 @@ impl<E, F, B> ConnStreamExec<F, B> for E
where
E: Executor<H2Stream<F, B>> + Clone,
H2Stream<F, B>: Future<Output = ()>,
- B: HttpBody,
+ B: Body,
{
fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
self.execute(fut)
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -94,7 +94,7 @@ pub struct H2Stream<F, B>(std::marker::PhantomData<(F, B)>);
impl<F, B, E> Future for H2Stream<F, B>
where
F: Future<Output = Result<http::Response<B>, E>>,
- B: crate::body::HttpBody,
+ B: crate::body::Body,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -84,7 +84,7 @@ pub(super) enum Header {
#[derive(Debug)]
pub(super) enum User {
- /// Error calling user's HttpBody::poll_data().
+ /// Error calling user's Body::poll_data().
#[cfg(any(feature = "http1", feature = "http2"))]
Body,
/// The user aborted writing of the outgoing body.
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -384,7 +384,7 @@ impl Error {
Kind::Io => "connection error",
#[cfg(any(feature = "http1", feature = "http2"))]
- Kind::User(User::Body) => "error from user's HttpBody stream",
+ Kind::User(User::Body) => "error from user's Body stream",
Kind::User(User::BodyWriteAborted) => "user body write aborted",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::User(User::Service) => "error from user's Service",
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -8,7 +8,7 @@ use libc::{c_int, size_t};
use super::task::{hyper_context, hyper_task, hyper_task_return_type, AsTaskType};
use super::{UserDataPointer, HYPER_ITER_CONTINUE};
-use crate::body::{Bytes, HttpBody as _, Recv};
+use crate::body::{Body as _, Bytes, Recv};
/// A streaming HTTP body.
pub struct hyper_body(pub(super) Recv);
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -6,12 +6,12 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace};
use super::{Http1Transaction, Wants};
-use crate::body::{Recv, DecodedLength, HttpBody};
+use crate::body::{Recv, DecodedLength, Body};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead};
use crate::upgrade::OnUpgrade;
-pub(crate) struct Dispatcher<D, Bs: HttpBody, I, T> {
+pub(crate) struct Dispatcher<D, Bs: Body, I, T> {
conn: Conn<I, Bs::Data, T>,
dispatch: D,
body_tx: Option<crate::body::Sender>,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -65,7 +65,7 @@ where
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
- Bs: HttpBody + 'static,
+ Bs: Body + 'static,
Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
{
pub(crate) fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -403,7 +403,7 @@ where
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
- Bs: HttpBody + 'static,
+ Bs: Body + 'static,
Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<Dispatched>;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -464,7 +464,7 @@ cfg_server! {
where
S: HttpService<Recv, ResBody = Bs>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bs: HttpBody,
+ Bs: Body,
{
type PollItem = MessageHead<http::StatusCode>;
type PollBody = Bs;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -540,7 +540,7 @@ cfg_client! {
impl<B> Dispatch for Client<B>
where
- B: HttpBody,
+ B: Body,
{
type PollItem = RequestHead;
type PollBody = B;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -673,7 +673,7 @@ impl Server {
}
match msg.body {
Some(BodyLength::Known(known_len)) => {
- // The HttpBody claims to know a length, and
+ // The Body claims to know a length, and
// the headers are already set. For performance
// reasons, we are just going to trust that
// the values match.
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -706,7 +706,7 @@ impl Server {
continue 'headers;
}
Some(BodyLength::Unknown) => {
- // The HttpBody impl didn't know how long the
+ // The Body impl didn't know how long the
// body is, but a length header was included.
// We have to parse the value to return our
// Encoder...
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1243,7 +1243,7 @@ impl Client {
let headers = &mut head.headers;
// If the user already set specific headers, we should respect them, regardless
- // of what the HttpBody knows about itself. They set them for a reason.
+ // of what the Body knows about itself. They set them for a reason.
// Because of the borrow checker, we can't check the for an existing
// Content-Length header while holding an `Entry` for the Transfer-Encoding
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -12,7 +12,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
-use crate::body::HttpBody;
+use crate::body::Body;
use crate::common::{exec::Exec, task, Future, Never, Pin, Poll};
use crate::ext::Protocol;
use crate::headers;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -112,7 +112,7 @@ pub(crate) async fn handshake<T, B>(
) -> crate::Result<ClientTask<B>>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
- B: HttpBody,
+ B: Body,
B::Data: Send + 'static,
{
let (h2_tx, mut conn) = new_builder(config)
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -195,7 +195,7 @@ where
pub(crate) struct ClientTask<B>
where
- B: HttpBody,
+ B: Body,
{
ping: ping::Recorder,
conn_drop_ref: ConnDropRef,
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -207,7 +207,7 @@ where
impl<B> ClientTask<B>
where
- B: HttpBody + 'static,
+ B: Body + 'static,
{
pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool {
self.h2_tx.is_extended_connect_protocol_enabled()
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -216,7 +216,7 @@ where
impl<B> Future for ClientTask<B>
where
- B: HttpBody + Send + 'static,
+ B: Body + Send + 'static,
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -10,7 +10,7 @@ use std::task::Context;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tracing::{debug, trace, warn};
-use crate::body::HttpBody;
+use crate::body::Body;
use crate::common::{task, Future, Pin, Poll};
use crate::proto::h2::ping::Recorder;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -87,7 +87,7 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
pin_project! {
struct PipeToSendStream<S>
where
- S: HttpBody,
+ S: Body,
{
body_tx: SendStream<SendBuf<S::Data>>,
data_done: bool,
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -98,7 +98,7 @@ pin_project! {
impl<S> PipeToSendStream<S>
where
- S: HttpBody,
+ S: Body,
{
fn new(stream: S, tx: SendStream<SendBuf<S::Data>>) -> PipeToSendStream<S> {
PipeToSendStream {
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -111,7 +111,7 @@ where
impl<S> Future for PipeToSendStream<S>
where
- S: HttpBody,
+ S: Body,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<()>;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -12,7 +12,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::{debug, trace, warn};
use super::{ping, PipeToSendStream, SendBuf};
-use crate::body::HttpBody;
+use crate::body::Body;
use crate::common::exec::ConnStreamExec;
use crate::common::{date, task, Future, Pin, Poll};
use crate::ext::Protocol;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -77,7 +77,7 @@ pin_project! {
pub(crate) struct Server<T, S, B, E>
where
S: HttpService<Recv>,
- B: HttpBody,
+ B: Body,
{
exec: E,
service: S,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -87,7 +87,7 @@ pin_project! {
enum State<T, B>
where
- B: HttpBody,
+ B: Body,
{
Handshaking {
ping_config: ping::Config,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -99,7 +99,7 @@ where
struct Serving<T, B>
where
- B: HttpBody,
+ B: Body,
{
ping: Option<(ping::Recorder, ping::Ponger)>,
conn: Connection<T, SendBuf<B::Data>>,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -111,7 +111,7 @@ where
T: AsyncRead + AsyncWrite + Unpin,
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: HttpBody + 'static,
+ B: Body + 'static,
E: ConnStreamExec<S::Future, B>,
{
pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server<T, S, B, E> {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -183,7 +183,7 @@ where
T: AsyncRead + AsyncWrite + Unpin,
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: HttpBody + 'static,
+ B: Body + 'static,
E: ConnStreamExec<S::Future, B>,
{
type Output = crate::Result<Dispatched>;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -227,7 +227,7 @@ where
impl<T, B> Serving<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
- B: HttpBody + 'static,
+ B: Body + 'static,
{
fn poll_server<S, E>(
&mut self,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -373,7 +373,7 @@ pin_project! {
#[allow(missing_debug_implementations)]
pub struct H2Stream<F, B>
where
- B: HttpBody,
+ B: Body,
{
reply: SendResponse<SendBuf<B::Data>>,
#[pin]
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -385,7 +385,7 @@ pin_project! {
#[project = H2StreamStateProj]
enum H2StreamState<F, B>
where
- B: HttpBody,
+ B: Body,
{
Service {
#[pin]
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -407,7 +407,7 @@ struct ConnectParts {
impl<F, B> H2Stream<F, B>
where
- B: HttpBody,
+ B: Body,
{
fn new(
fut: F,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -437,7 +437,7 @@ macro_rules! reply {
impl<F, B, E> H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
- B: HttpBody,
+ B: Body,
B::Data: 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -530,7 +530,7 @@ where
impl<F, B, E> Future for H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
- B: HttpBody,
+ B: Body,
B::Data: 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Into<Box<dyn StdError + Send + Sync>>,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -67,7 +67,7 @@ cfg_feature! {
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::trace;
- use crate::body::{Recv, HttpBody};
+ use crate::body::{Recv, Body};
use crate::common::{task, Future, Pin, Poll, Unpin};
#[cfg(not(all(feature = "http1", feature = "http2")))]
use crate::common::Never;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -153,7 +153,7 @@ pin_project! {
pub(super) enum ProtoServer<T, B, S, E = Exec>
where
S: HttpService<Recv>,
- B: HttpBody,
+ B: Body,
{
H1 {
#[pin]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -601,7 +601,7 @@ impl<E> Http<E> {
where
S: HttpService<Recv, ResBody = Bd>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bd: HttpBody + 'static,
+ Bd: Body + 'static,
Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
E: ConnStreamExec<S::Future, Bd>,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -682,7 +682,7 @@ where
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -852,7 +852,7 @@ where
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -931,7 +931,7 @@ where
T: AsyncRead + AsyncWrite + Unpin,
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -974,7 +974,7 @@ mod upgrades {
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -992,7 +992,7 @@ mod upgrades {
S: HttpService<Recv, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: HttpBody + 'static,
+ B: Body + 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: ConnStreamExec<S::Future, B>,
{
diff --git a/src/service/http.rs b/src/service/http.rs
--- a/src/service/http.rs
+++ b/src/service/http.rs
@@ -1,13 +1,13 @@
use std::error::Error as StdError;
-use crate::body::HttpBody;
+use crate::body::Body;
use crate::common::{task, Future, Poll};
use crate::{Request, Response};
/// An asynchronous function from `Request` to `Response`.
pub trait HttpService<ReqBody>: sealed::Sealed<ReqBody> {
- /// The `HttpBody` body of the `http::Response`.
- type ResBody: HttpBody;
+ /// The `Body` body of the `http::Response`.
+ type ResBody: Body;
/// The error type that can occur within this `Service`.
///
diff --git a/src/service/http.rs b/src/service/http.rs
--- a/src/service/http.rs
+++ b/src/service/http.rs
@@ -29,7 +29,7 @@ pub trait HttpService<ReqBody>: sealed::Sealed<ReqBody> {
impl<T, B1, B2> HttpService<B1> for T
where
T: tower_service::Service<Request<B1>, Response = Response<B2>>,
- B2: HttpBody,
+ B2: Body,
T::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type ResBody = B2;
diff --git a/src/service/http.rs b/src/service/http.rs
--- a/src/service/http.rs
+++ b/src/service/http.rs
@@ -49,7 +49,7 @@ where
impl<T, B1, B2> sealed::Sealed<B1> for T
where
T: tower_service::Service<Request<B1>, Response = Response<B2>>,
- B2: HttpBody,
+ B2: Body,
{
}
diff --git a/src/service/util.rs b/src/service/util.rs
--- a/src/service/util.rs
+++ b/src/service/util.rs
@@ -2,7 +2,7 @@ use std::error::Error as StdError;
use std::fmt;
use std::marker::PhantomData;
-use crate::body::HttpBody;
+use crate::body::Body;
use crate::common::{task, Future, Poll};
use crate::{Request, Response};
diff --git a/src/service/util.rs b/src/service/util.rs
--- a/src/service/util.rs
+++ b/src/service/util.rs
@@ -47,10 +47,10 @@ impl<F, ReqBody, Ret, ResBody, E> tower_service::Service<crate::Request<ReqBody>
for ServiceFn<F, ReqBody>
where
F: FnMut(Request<ReqBody>) -> Ret,
- ReqBody: HttpBody,
+ ReqBody: Body,
Ret: Future<Output = Result<Response<ResBody>, E>>,
E: Into<Box<dyn StdError + Send + Sync>>,
- ResBody: HttpBody,
+ ResBody: Body,
{
type Response = crate::Response<ResBody>;
type Error = E;
|
What would be the advantage in case of import both trait and struct?
The struct will be removed, as detailed in #2345.
I am just starting up with Hyper so would like to work on this task (if that's ok). By renaming and exporting the `http_body::Body` trait as just `Body`, I assume you are talking about updating the following:
https://github.com/hyperium/hyper/blob/84f6ae78d62f0ca3b85f86ea92887dc46ed9717e/src/body/mod.rs#L19-L23
But since the concrete `Body` type is already exported as `Body`, wouldn't that cause a conflict? Should I get rid of the concrete type export?
Yes, we'd need to finish removing parts of `Body`, and renaming _that_. Then this one would be straightforward.
Is there a separate task that I can work on to remove parts of `Body`?
@RajivTS The top comment in #2345 includes a list of tasks, and I tried to put them in order of unblocking the next one. Some are linked to specific issues, we can make more of them issues too.
This is no longer blocked, since `Body` was renamed to `Recv` as a stop-gap (#2963).
|
2022-08-28T14:30:00Z
| 2,969
|
Rename the HttpBody export to Body
When the concrete `Body` struct is removed, we can export the `http_body::Body` trait as just `Body`.
|
hyperium__hyper-2969
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -386,7 +386,7 @@ mod tests {
use std::mem;
use std::task::Poll;
- use super::{DecodedLength, HttpBody, Recv, Sender, SizeHint};
+ use super::{Body, DecodedLength, Recv, Sender, SizeHint};
#[test]
fn test_size_of() {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -402,7 +402,7 @@ mod tests {
body_expected_size,
);
- assert_eq!(body_size, mem::size_of::<Option<Recv>>(), "Option<Body>");
+ assert_eq!(body_size, mem::size_of::<Option<Recv>>(), "Option<Recv>");
assert_eq!(
mem::size_of::<Sender>(),
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1340,7 +1340,7 @@ mod conn {
use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
- use hyper::body::HttpBody;
+ use hyper::body::Body;
use hyper::client::conn;
use hyper::upgrade::OnUpgrade;
use hyper::{self, Method, Recv, Request, Response, StatusCode};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1449,7 +1449,7 @@ mod conn {
#[test]
fn incoming_content_length() {
- use hyper::body::HttpBody;
+ use hyper::body::Body;
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -25,7 +25,7 @@ use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
-use hyper::body::HttpBody;
+use hyper::body::Body;
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::{Method, Recv, Request, Response, StatusCode, Uri, Version};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -260,7 +260,7 @@ mod response_body_lengths {
fn auto_response_with_unknown_length() {
run_test(TestCase {
version: 1,
- // no headers means trying to guess from HttpBody
+ // no headers means trying to guess from Body
headers: &[],
body: Bd::Unknown("foo bar baz"),
expects_chunked: true,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -272,7 +272,7 @@ mod response_body_lengths {
fn auto_response_with_known_length() {
run_test(TestCase {
version: 1,
- // no headers means trying to guess from HttpBody
+ // no headers means trying to guess from Body
headers: &[],
body: Bd::Known("foo bar baz"),
expects_chunked: false,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -284,7 +284,7 @@ mod response_body_lengths {
fn auto_response_known_empty() {
run_test(TestCase {
version: 1,
- // no headers means trying to guess from HttpBody
+ // no headers means trying to guess from Body
headers: &[],
body: Bd::Known(""),
expects_chunked: false,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -296,7 +296,7 @@ mod response_body_lengths {
fn http10_auto_response_with_unknown_length() {
run_test(TestCase {
version: 0,
- // no headers means trying to guess from HttpBody
+ // no headers means trying to guess from Body
headers: &[],
body: Bd::Unknown("foo bar baz"),
expects_chunked: false,
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2922"
] |
0.3
|
952756b916eff4fc9482f7d38c6fb606614c2c12
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -181,11 +181,6 @@ name = "state"
path = "examples/state.rs"
required-features = ["full"]
-[[example]]
-name = "tower_server"
-path = "examples/tower_server.rs"
-required-features = ["full"]
-
[[example]]
name = "upgrades"
path = "examples/upgrades.rs"
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -8,8 +8,9 @@ use std::net::{SocketAddr, TcpListener, TcpStream};
use std::sync::mpsc;
use std::time::Duration;
+use bytes::Bytes;
use futures_util::{stream, StreamExt};
-use http_body_util::{BodyExt, StreamBody};
+use http_body_util::{BodyExt, Full, StreamBody};
use tokio::sync::oneshot;
use hyper::server::conn::Http;
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -87,8 +88,8 @@ macro_rules! bench_server {
}};
}
-fn body(b: &'static [u8]) -> hyper::Body {
- b.into()
+fn body(b: &'static [u8]) -> Full<Bytes> {
+ Full::new(b.into())
}
#[bench]
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -2,7 +2,9 @@
#![warn(rust_2018_idioms)]
use std::env;
-use hyper::{body::HttpBody as _, Body, Request};
+use bytes::Bytes;
+use http_body_util::Empty;
+use hyper::{body::HttpBody as _, Request};
use tokio::io::{self, AsyncWriteExt as _};
use tokio::net::TcpStream;
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -51,7 +53,7 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> {
let req = Request::builder()
.uri(url)
.header(hyper::header::HOST, authority.as_str())
- .body(Body::empty())?;
+ .body(Empty::<Bytes>::new())?;
let mut res = sender.send_request(req).await?;
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -1,7 +1,8 @@
#![deny(warnings)]
#![warn(rust_2018_idioms)]
-use hyper::Body;
+use bytes::Bytes;
+use http_body_util::Empty;
use hyper::{body::Buf, Request};
use serde::Deserialize;
use tokio::net::TcpStream;
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -42,7 +43,7 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
let req = Request::builder()
.uri(url)
.header(hyper::header::HOST, authority.as_str())
- .body(Body::empty())?;
+ .body(Empty::<Bytes>::new())?;
let res = sender.send_request(req).await?;
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -2,6 +2,8 @@
use std::net::SocketAddr;
+use bytes::Bytes;
+use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
use hyper::body::HttpBody as _;
use hyper::server::conn::Http;
use hyper::service::service_fn;
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -10,15 +12,15 @@ use tokio::net::TcpListener;
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
-async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
+async fn echo(req: Request<Body>) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
match (req.method(), req.uri().path()) {
// Serve some instructions at /
- (&Method::GET, "/") => Ok(Response::new(Body::from(
+ (&Method::GET, "/") => Ok(Response::new(full(
"Try POSTing data to /echo such as: `curl localhost:3000/echo -XPOST -d 'hello world'`",
))),
// Simply echo the body back to the client.
- (&Method::POST, "/echo") => Ok(Response::new(req.into_body())),
+ (&Method::POST, "/echo") => Ok(Response::new(req.into_body().boxed())),
// TODO: Fix this, broken in PR #2896
// Convert to uppercase before sending back to client using a stream.
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -43,7 +45,7 @@ async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
// 64kbs of data.
let max = req.body().size_hint().upper().unwrap_or(u64::MAX);
if max > 1024 * 64 {
- let mut resp = Response::new(Body::from("Body too big"));
+ let mut resp = Response::new(full("Body too big"));
*resp.status_mut() = hyper::StatusCode::PAYLOAD_TOO_LARGE;
return Ok(resp);
}
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -51,18 +53,30 @@ async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
let whole_body = hyper::body::to_bytes(req.into_body()).await?;
let reversed_body = whole_body.iter().rev().cloned().collect::<Vec<u8>>();
- Ok(Response::new(Body::from(reversed_body)))
+ Ok(Response::new(full(reversed_body)))
}
// Return the 404 Not Found for other routes.
_ => {
- let mut not_found = Response::default();
+ let mut not_found = Response::new(empty());
*not_found.status_mut() = StatusCode::NOT_FOUND;
Ok(not_found)
}
}
}
+fn empty() -> BoxBody<Bytes, hyper::Error> {
+ Empty::<Bytes>::new()
+ .map_err(|never| match never {})
+ .boxed()
+}
+
+fn full<T: Into<Bytes>>(chunk: T) -> BoxBody<Bytes, hyper::Error> {
+ Full::new(chunk.into())
+ .map_err(|never| match never {})
+ .boxed()
+}
+
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -3,13 +3,15 @@
use std::convert::Infallible;
use std::net::SocketAddr;
+use bytes::Bytes;
+use http_body_util::Full;
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::{Body, Request, Response};
use tokio::net::TcpListener;
-async fn hello(_: Request<Body>) -> Result<Response<Body>, Infallible> {
- Ok(Response::new(Body::from("Hello World!")))
+async fn hello(_: Request<Body>) -> Result<Response<Full<Bytes>>, Infallible> {
+ Ok(Response::new(Full::new(Bytes::from("Hello World!"))))
}
#[tokio::main]
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -2,6 +2,8 @@
use std::net::SocketAddr;
+use bytes::Bytes;
+use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
use hyper::client::conn::Builder;
use hyper::server::conn::Http;
use hyper::service::service_fn;
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -41,7 +43,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
}
}
-async fn proxy(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
+async fn proxy(req: Request<Body>) -> Result<Response<BoxBody<Bytes, hyper::Error>>, hyper::Error> {
println!("req: {:?}", req);
if Method::CONNECT == req.method() {
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -70,10 +72,10 @@ async fn proxy(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
}
});
- Ok(Response::new(Body::empty()))
+ Ok(Response::new(empty()))
} else {
eprintln!("CONNECT host is not socket addr: {:?}", req.uri());
- let mut resp = Response::new(Body::from("CONNECT must be to a socket address"));
+ let mut resp = Response::new(full("CONNECT must be to a socket address"));
*resp.status_mut() = http::StatusCode::BAD_REQUEST;
Ok(resp)
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -96,7 +98,8 @@ async fn proxy(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
}
});
- sender.send_request(req).await
+ let resp = sender.send_request(req).await?;
+ Ok(resp.map(|b| b.boxed()))
}
}
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -104,6 +107,18 @@ fn host_addr(uri: &http::Uri) -> Option<String> {
uri.authority().and_then(|auth| Some(auth.to_string()))
}
+fn empty() -> BoxBody<Bytes, hyper::Error> {
+ Empty::<Bytes>::new()
+ .map_err(|never| match never {})
+ .boxed()
+}
+
+fn full<T: Into<Bytes>>(chunk: T) -> BoxBody<Bytes, hyper::Error> {
+ Full::new(chunk.into())
+ .map_err(|never| match never {})
+ .boxed()
+}
+
// Create a TCP connection to host:port, build a tunnel between the connection and
// the upgraded connection
async fn tunnel(mut upgraded: Upgraded, addr: String) -> std::io::Result<()> {
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -3,7 +3,9 @@
use std::net::SocketAddr;
+use bytes::Bytes;
use futures_util::future::join;
+use http_body_util::Full;
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::{Body, Request, Response};
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -12,12 +14,12 @@ use tokio::net::TcpListener;
static INDEX1: &[u8] = b"The 1st service!";
static INDEX2: &[u8] = b"The 2nd service!";
-async fn index1(_: Request<Body>) -> Result<Response<Body>, hyper::Error> {
- Ok(Response::new(Body::from(INDEX1)))
+async fn index1(_: Request<Body>) -> Result<Response<Full<Bytes>>, hyper::Error> {
+ Ok(Response::new(Full::new(Bytes::from(INDEX1))))
}
-async fn index2(_: Request<Body>) -> Result<Response<Body>, hyper::Error> {
- Ok(Response::new(Body::from(INDEX2)))
+async fn index2(_: Request<Body>) -> Result<Response<Full<Bytes>>, hyper::Error> {
+ Ok(Response::new(Full::new(Bytes::from(INDEX2))))
}
#[tokio::main]
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -1,12 +1,15 @@
// #![deny(warnings)] // FIXME: https://github.com/rust-lang/rust/issues/62411
#![warn(rust_2018_idioms)]
+use bytes::Bytes;
+use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full};
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::{Body, Method, Request, Response, StatusCode};
use tokio::net::TcpListener;
use std::collections::HashMap;
+use std::convert::Infallible;
use std::net::SocketAddr;
use url::form_urlencoded;
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -15,9 +18,11 @@ static MISSING: &[u8] = b"Missing field";
static NOTNUMERIC: &[u8] = b"Number field is not numeric";
// Using service_fn, we can turn this function into a `Service`.
-async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
+async fn param_example(
+ req: Request<Body>,
+) -> Result<Response<BoxBody<Bytes, Infallible>>, hyper::Error> {
match (req.method(), req.uri().path()) {
- (&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(INDEX.into())),
+ (&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(full(INDEX))),
(&Method::POST, "/post") => {
// Concatenate the body...
let b = hyper::body::to_bytes(req).await?;
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -43,7 +48,7 @@ async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Erro
} else {
return Ok(Response::builder()
.status(StatusCode::UNPROCESSABLE_ENTITY)
- .body(MISSING.into())
+ .body(full(MISSING))
.unwrap());
};
let number = if let Some(n) = params.get("number") {
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -52,13 +57,13 @@ async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Erro
} else {
return Ok(Response::builder()
.status(StatusCode::UNPROCESSABLE_ENTITY)
- .body(NOTNUMERIC.into())
+ .body(full(NOTNUMERIC))
.unwrap());
}
} else {
return Ok(Response::builder()
.status(StatusCode::UNPROCESSABLE_ENTITY)
- .body(MISSING.into())
+ .body(full(MISSING))
.unwrap());
};
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -69,7 +74,7 @@ async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Erro
// responses such as InternalServiceError may be
// needed here, too.
let body = format!("Hello {}, your number is {}", name, number);
- Ok(Response::new(body.into()))
+ Ok(Response::new(full(body)))
}
(&Method::GET, "/get") => {
let query = if let Some(q) = req.uri().query() {
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -77,7 +82,7 @@ async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Erro
} else {
return Ok(Response::builder()
.status(StatusCode::UNPROCESSABLE_ENTITY)
- .body(MISSING.into())
+ .body(full(MISSING))
.unwrap());
};
let params = form_urlencoded::parse(query.as_bytes())
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -88,19 +93,27 @@ async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Erro
} else {
return Ok(Response::builder()
.status(StatusCode::UNPROCESSABLE_ENTITY)
- .body(MISSING.into())
+ .body(full(MISSING))
.unwrap());
};
let body = format!("You requested {}", page);
- Ok(Response::new(body.into()))
+ Ok(Response::new(full(body)))
}
_ => Ok(Response::builder()
.status(StatusCode::NOT_FOUND)
- .body(Body::empty())
+ .body(empty())
.unwrap()),
}
}
+fn empty() -> BoxBody<Bytes, Infallible> {
+ Empty::<Bytes>::new().boxed()
+}
+
+fn full<T: Into<Bytes>>(chunk: T) -> BoxBody<Bytes, Infallible> {
+ Full::new(chunk.into()).boxed()
+}
+
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
pretty_env_logger::init();
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -5,6 +5,8 @@ use std::net::SocketAddr;
use hyper::server::conn::Http;
use tokio::net::TcpListener;
+use bytes::Bytes;
+use http_body_util::Full;
use hyper::service::service_fn;
use hyper::{Body, Method, Request, Response, Result, StatusCode};
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -34,7 +36,7 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
}
}
-async fn response_examples(req: Request<Body>) -> Result<Response<Body>> {
+async fn response_examples(req: Request<Body>) -> Result<Response<Full<Bytes>>> {
match (req.method(), req.uri().path()) {
(&Method::GET, "/") | (&Method::GET, "/index.html") => simple_file_send(INDEX).await,
(&Method::GET, "/no_file.html") => {
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -46,17 +48,17 @@ async fn response_examples(req: Request<Body>) -> Result<Response<Body>> {
}
/// HTTP status code 404
-fn not_found() -> Response<Body> {
+fn not_found() -> Response<Full<Bytes>> {
Response::builder()
.status(StatusCode::NOT_FOUND)
- .body(NOTFOUND.into())
+ .body(Full::new(NOTFOUND.into()))
.unwrap()
}
-async fn simple_file_send(filename: &str) -> Result<Response<Body>> {
+async fn simple_file_send(filename: &str) -> Result<Response<Full<Bytes>>> {
if let Ok(contents) = tokio::fs::read(filename).await {
let body = contents.into();
- return Ok(Response::new(body));
+ return Ok(Response::new(Full::new(body)));
}
Ok(not_found())
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -1,3 +1,5 @@
+use bytes::Bytes;
+use http_body_util::Full;
use hyper::server::conn::Http;
use hyper::service::Service;
use hyper::{Body, Request, Response};
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -36,7 +38,7 @@ struct Svc {
}
impl Service<Request<Body>> for Svc {
- type Response = Response<Body>;
+ type Response = Response<Full<Bytes>>;
type Error = hyper::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -45,8 +47,8 @@ impl Service<Request<Body>> for Svc {
}
fn call(&mut self, req: Request<Body>) -> Self::Future {
- fn mk_response(s: String) -> Result<Response<Body>, hyper::Error> {
- Ok(Response::builder().body(Body::from(s)).unwrap())
+ fn mk_response(s: String) -> Result<Response<Full<Bytes>>, hyper::Error> {
+ Ok(Response::builder().body(Full::new(Bytes::from(s))).unwrap())
}
let res = match req.uri().path() {
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -6,8 +6,10 @@ use std::sync::{
Arc,
};
+use bytes::Bytes;
+use http_body_util::Full;
use hyper::{server::conn::Http, service::service_fn};
-use hyper::{Body, Error, Response};
+use hyper::{Error, Response};
use tokio::net::TcpListener;
#[tokio::main]
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -36,7 +38,12 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Get the current count, and also increment by 1, in a single
// atomic operation.
let count = counter.fetch_add(1, Ordering::AcqRel);
- async move { Ok::<_, Error>(Response::new(Body::from(format!("Request #{}", count)))) }
+ async move {
+ Ok::<_, Error>(Response::new(Full::new(Bytes::from(format!(
+ "Request #{}",
+ count
+ )))))
+ }
});
if let Err(err) = Http::new().serve_connection(stream, service).await {
diff --git a/examples/tower_server.rs /dev/null
--- a/examples/tower_server.rs
+++ /dev/null
@@ -1,60 +0,0 @@
-#![deny(warnings)]
-
-use std::net::SocketAddr;
-use std::task::{Context, Poll};
-
-use futures_util::future;
-use hyper::server::conn::Http;
-use hyper::service::Service;
-use hyper::{Body, Request, Response};
-use tokio::net::TcpListener;
-
-const ROOT: &str = "/";
-
-#[derive(Debug)]
-pub struct Svc;
-
-impl Service<Request<Body>> for Svc {
- type Response = Response<Body>;
- type Error = hyper::Error;
- type Future = future::Ready<Result<Self::Response, Self::Error>>;
-
- fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- Ok(()).into()
- }
-
- fn call(&mut self, req: Request<Body>) -> Self::Future {
- let rsp = Response::builder();
-
- let uri = req.uri();
- if uri.path() != ROOT {
- let body = Body::from(Vec::new());
- let rsp = rsp.status(404).body(body).unwrap();
- return future::ok(rsp);
- }
-
- let body = Body::from(Vec::from(&b"heyo!"[..]));
- let rsp = rsp.status(200).body(body).unwrap();
- future::ok(rsp)
- }
-}
-
-#[tokio::main]
-async fn main() -> Result<(), Box<dyn std::error::Error>> {
- pretty_env_logger::init();
-
- let addr: SocketAddr = "127.0.0.1:1337".parse().unwrap();
-
- let listener = TcpListener::bind(addr).await?;
- println!("Listening on http://{}", addr);
-
- loop {
- let (stream, _) = listener.accept().await?;
-
- tokio::task::spawn(async move {
- if let Err(err) = Http::new().serve_connection(stream, Svc).await {
- println!("Failed to serve connection: {:?}", err);
- }
- });
- }
-}
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -1,18 +1,20 @@
#![deny(warnings)]
// Note: `hyper::upgrade` docs link to this upgrade.
+use std::net::SocketAddr;
use std::str;
-use hyper::server::conn::Http;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::watch;
+use bytes::Bytes;
+use http_body_util::Empty;
use hyper::header::{HeaderValue, UPGRADE};
+use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
use hyper::{Body, Request, Response, StatusCode};
-use std::net::SocketAddr;
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -36,8 +38,8 @@ async fn server_upgraded_io(mut upgraded: Upgraded) -> Result<()> {
}
/// Our server HTTP handler to initiate HTTP upgrades.
-async fn server_upgrade(mut req: Request<Body>) -> Result<Response<Body>> {
- let mut res = Response::new(Body::empty());
+async fn server_upgrade(mut req: Request<Body>) -> Result<Response<Empty<Bytes>>> {
+ let mut res = Response::new(Empty::new());
// Send a 400 to any request that doesn't have
// an `Upgrade` header.
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -91,7 +93,7 @@ async fn client_upgrade_request(addr: SocketAddr) -> Result<()> {
let req = Request::builder()
.uri(format!("http://{}/", addr))
.header(UPGRADE, "foobar")
- .body(Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap();
let stream = TcpStream::connect(addr).await?;
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -2,7 +2,8 @@
use std::net::SocketAddr;
-use bytes::Buf;
+use bytes::{Buf, Bytes};
+use http_body_util::{BodyExt, Full};
use hyper::server::conn::Http;
use hyper::service::service_fn;
use hyper::{header, Body, Method, Request, Response, StatusCode};
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -17,12 +19,12 @@ static NOTFOUND: &[u8] = b"Not Found";
static POST_DATA: &str = r#"{"original": "data"}"#;
static URL: &str = "http://127.0.0.1:1337/json_api";
-async fn client_request_response() -> Result<Response<Body>> {
+async fn client_request_response() -> Result<Response<BoxBody>> {
let req = Request::builder()
.method(Method::POST)
.uri(URL)
.header(header::CONTENT_TYPE, "application/json")
- .body(POST_DATA.into())
+ .body(Full::new(Bytes::from(POST_DATA)))
.unwrap();
let host = req.uri().host().expect("uri has no host");
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -39,12 +41,12 @@ async fn client_request_response() -> Result<Response<Body>> {
let web_res = sender.send_request(req).await?;
- let res_body = web_res.into_body();
+ let res_body = web_res.into_body().boxed();
Ok(Response::new(res_body))
}
-async fn api_post_response(req: Request<Body>) -> Result<Response<Body>> {
+async fn api_post_response(req: Request<Body>) -> Result<Response<BoxBody>> {
// Aggregate the body...
let whole_body = hyper::body::aggregate(req).await?;
// Decode as JSON...
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -85,12 +87,18 @@ async fn response_examples(req: Request<Body>) -> Result<Response<Body>> {
// Return 404 not found response.
Ok(Response::builder()
.status(StatusCode::NOT_FOUND)
- .body(NOTFOUND.into())
+ .body(full(NOTFOUND))
.unwrap())
}
}
}
+fn full<T: Into<Bytes>>(chunk: T) -> BoxBody {
+ Full::new(chunk.into())
+ .map_err(|never| match never {})
+ .boxed()
+}
+
#[tokio::main]
async fn main() -> Result<()> {
pretty_env_logger::init();
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -1,4 +1,3 @@
-use std::borrow::Cow;
use std::fmt;
use bytes::Bytes;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -30,7 +29,8 @@ pub struct Body {
}
enum Kind {
- Once(Option<Bytes>),
+ #[allow(dead_code)]
+ Empty,
Chan {
content_length: DecodedLength,
want_tx: watch::Sender,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -71,21 +71,6 @@ const WANT_PENDING: usize = 1;
const WANT_READY: usize = 2;
impl Body {
- /// Create an empty `Body` stream.
- ///
- /// # Example
- ///
- /// ```
- /// use hyper::{Body, Request};
- ///
- /// // create a `GET /` request
- /// let get = Request::new(Body::empty());
- /// ```
- #[inline]
- pub fn empty() -> Body {
- Body::new(Kind::Once(None))
- }
-
/// Create a `Body` stream with an associated sender half.
///
/// Useful when wanting to stream chunks from another thread.
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -123,6 +108,16 @@ impl Body {
Body { kind }
}
+ #[allow(dead_code)]
+ pub(crate) fn empty() -> Body {
+ Body::new(Kind::Empty)
+ }
+
+ #[cfg(feature = "ffi")]
+ pub(crate) fn ffi() -> Body {
+ Body::new(Kind::Ffi(crate::ffi::UserBody::new()))
+ }
+
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
pub(crate) fn h2(
recv: h2::RecvStream,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -160,7 +155,7 @@ impl Body {
fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<Bytes>>> {
match self.kind {
- Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)),
+ Kind::Empty => Poll::Ready(None),
Kind::Chan {
content_length: ref mut len,
ref mut data_rx,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -197,23 +192,6 @@ impl Body {
Kind::Ffi(ref mut body) => body.poll_data(cx),
}
}
-
- #[cfg(feature = "http1")]
- pub(super) fn take_full_data(&mut self) -> Option<Bytes> {
- if let Kind::Once(ref mut chunk) = self.kind {
- chunk.take()
- } else {
- None
- }
- }
-}
-
-impl Default for Body {
- /// Returns `Body::empty()`.
- #[inline]
- fn default() -> Body {
- Body::empty()
- }
}
impl HttpBody for Body {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -232,6 +210,7 @@ impl HttpBody for Body {
#[cfg_attr(not(feature = "http2"), allow(unused))] cx: &mut task::Context<'_>,
) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
match self.kind {
+ Kind::Empty => Poll::Ready(Ok(None)),
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 {
recv: ref mut h2,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -253,13 +232,12 @@ impl HttpBody for Body {
},
#[cfg(feature = "ffi")]
Kind::Ffi(ref mut body) => body.poll_trailers(cx),
- _ => Poll::Ready(Ok(None)),
}
}
fn is_end_stream(&self) -> bool {
match self.kind {
- Kind::Once(ref val) => val.is_none(),
+ Kind::Empty => true,
Kind::Chan { content_length, .. } => content_length == DecodedLength::ZERO,
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -282,8 +260,7 @@ impl HttpBody for Body {
}
match self.kind {
- Kind::Once(Some(ref val)) => SizeHint::with_exact(val.len() as u64),
- Kind::Once(None) => SizeHint::with_exact(0),
+ Kind::Empty => SizeHint::with_exact(0),
Kind::Chan { content_length, .. } => opt_len!(content_length),
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
Kind::H2 { content_length, .. } => opt_len!(content_length),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -299,13 +276,10 @@ impl fmt::Debug for Body {
struct Streaming;
#[derive(Debug)]
struct Empty;
- #[derive(Debug)]
- struct Full<'a>(&'a Bytes);
let mut builder = f.debug_tuple("Body");
match self.kind {
- Kind::Once(None) => builder.field(&Empty),
- Kind::Once(Some(ref chunk)) => builder.field(&Full(chunk)),
+ Kind::Empty => builder.field(&Empty),
_ => builder.field(&Streaming),
};
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -313,65 +287,6 @@ impl fmt::Debug for Body {
}
}
-impl From<Bytes> for Body {
- #[inline]
- fn from(chunk: Bytes) -> Body {
- if chunk.is_empty() {
- Body::empty()
- } else {
- Body::new(Kind::Once(Some(chunk)))
- }
- }
-}
-
-impl From<Vec<u8>> for Body {
- #[inline]
- fn from(vec: Vec<u8>) -> Body {
- Body::from(Bytes::from(vec))
- }
-}
-
-impl From<&'static [u8]> for Body {
- #[inline]
- fn from(slice: &'static [u8]) -> Body {
- Body::from(Bytes::from(slice))
- }
-}
-
-impl From<Cow<'static, [u8]>> for Body {
- #[inline]
- fn from(cow: Cow<'static, [u8]>) -> Body {
- match cow {
- Cow::Borrowed(b) => Body::from(b),
- Cow::Owned(o) => Body::from(o),
- }
- }
-}
-
-impl From<String> for Body {
- #[inline]
- fn from(s: String) -> Body {
- Body::from(Bytes::from(s.into_bytes()))
- }
-}
-
-impl From<&'static str> for Body {
- #[inline]
- fn from(slice: &'static str) -> Body {
- Body::from(Bytes::from(slice.as_bytes()))
- }
-}
-
-impl From<Cow<'static, str>> for Body {
- #[inline]
- fn from(cow: Cow<'static, str>) -> Body {
- match cow {
- Cow::Borrowed(b) => Body::from(b),
- Cow::Owned(o) => Body::from(o),
- }
- }
-}
-
impl Sender {
/// Check to see if this `Sender` can send more data.
pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -29,33 +29,6 @@ mod body;
mod length;
mod to_bytes;
-/// An optimization to try to take a full body if immediately available.
-///
-/// This is currently limited to *only* `hyper::Body`s.
-#[cfg(feature = "http1")]
-pub(crate) fn take_full_data<T: HttpBody + 'static>(body: &mut T) -> Option<T::Data> {
- use std::any::{Any, TypeId};
-
- // This static type check can be optimized at compile-time.
- if TypeId::of::<T>() == TypeId::of::<Body>() {
- let mut full = (body as &mut dyn Any)
- .downcast_mut::<Body>()
- .expect("must be Body")
- .take_full_data();
- // This second cast is required to make the type system happy.
- // Without it, the compiler cannot reason that the type is actually
- // `T::Data`. Oh wells.
- //
- // It's still a measurable win!
- (&mut full as &mut dyn Any)
- .downcast_mut::<Option<T::Data>>()
- .expect("must be T::Data")
- .take()
- } else {
- None
- }
-}
-
fn _assert_send_sync() {
fn _assert_send<T: Send>() {}
fn _assert_sync<T: Sync>() {}
diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs
--- a/src/body/to_bytes.rs
+++ b/src/body/to_bytes.rs
@@ -17,11 +17,10 @@ use super::HttpBody;
/// # Example
///
/// ```
-/// # async fn doc() -> hyper::Result<()> {
/// # use hyper::{Body, Response};
+/// # async fn doc(response: Response<Body>) -> hyper::Result<()> {
/// # use hyper::body::HttpBody;
-/// #
-/// let response = Response::new(Body::from("response body"));
+/// // let response: Response<Body> ...
///
/// const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024;
///
diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs
--- a/src/client/conn/http1.rs
+++ b/src/client/conn/http1.rs
@@ -120,31 +120,6 @@ where
/// before calling this method.
/// - Since absolute-form `Uri`s are not required, if received, they will
/// be serialized as-is.
- ///
- /// # Example
- ///
- /// ```
- /// # use http::header::HOST;
- /// # use hyper::client::conn::SendRequest;
- /// # use hyper::Body;
- /// use hyper::Request;
- ///
- /// # async fn doc(mut tx: SendRequest<Body>) -> hyper::Result<()> {
- /// // build a Request
- /// let req = Request::builder()
- /// .uri("/foo/bar")
- /// .header(HOST, "hyper.rs")
- /// .body(Body::empty())
- /// .unwrap();
- ///
- /// // send it and await a Response
- /// let res = tx.send_request(req).await?;
- /// // assert the Response
- /// assert!(res.status().is_success());
- /// # Ok(())
- /// # }
- /// # fn main() {}
- /// ```
pub fn send_request(&mut self, req: Request<B>) -> impl Future<Output = crate::Result<Response<Body>>> {
let sent = self.dispatch.send(req);
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -114,31 +114,6 @@ where
/// before calling this method.
/// - Since absolute-form `Uri`s are not required, if received, they will
/// be serialized as-is.
- ///
- /// # Example
- ///
- /// ```
- /// # use http::header::HOST;
- /// # use hyper::client::conn::SendRequest;
- /// # use hyper::Body;
- /// use hyper::Request;
- ///
- /// # async fn doc(mut tx: SendRequest<Body>) -> hyper::Result<()> {
- /// // build a Request
- /// let req = Request::builder()
- /// .uri("/foo/bar")
- /// .header(HOST, "hyper.rs")
- /// .body(Body::empty())
- /// .unwrap();
- ///
- /// // send it and await a Response
- /// let res = tx.send_request(req).await?;
- /// // assert the Response
- /// assert!(res.status().is_success());
- /// # Ok(())
- /// # }
- /// # fn main() {}
- /// ```
pub fn send_request(&mut self, req: Request<B>) -> impl Future<Output = crate::Result<Response<Body>>> {
let sent = self.dispatch.send(req);
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -13,10 +13,12 @@
//! ```no_run
//! # #[cfg(all(feature = "client", feature = "http1", feature = "runtime"))]
//! # mod rt {
-//! use tower::ServiceExt;
+//! use bytes::Bytes;
//! use http::{Request, StatusCode};
+//! use http_body_util::Empty;
//! use hyper::{client::conn, Body};
//! use tokio::net::TcpStream;
+//! use tower::ServiceExt;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -35,7 +37,7 @@
//! // We need to manually add the host header because SendRequest does not
//! .header("Host", "example.com")
//! .method("GET")
-//! .body(Body::from(""))?;
+//! .body(Empty::<Bytes>::new())?;
//! let response = request_sender.send_request(request).await?;
//! assert!(response.status() == StatusCode::OK);
//!
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -45,7 +47,7 @@
//! let request = Request::builder()
//! .header("Host", "example.com")
//! .method("GET")
-//! .body(Body::from(""))?;
+//! .body(Empty::<Bytes>::new())?;
//! let response = request_sender.send_request(request).await?;
//! assert!(response.status() == StatusCode::OK);
//! Ok(())
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -123,11 +125,14 @@ pin_project! {
///
/// This is a shortcut for `Builder::new().handshake(io)`.
/// See [`client::conn`](crate::client::conn) for more.
-pub async fn handshake<T>(
+pub async fn handshake<T, B>(
io: T,
-) -> crate::Result<(SendRequest<crate::Body>, Connection<T, crate::Body>)>
+) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ B: HttpBody + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
Builder::new().handshake(io).await
}
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -245,31 +250,6 @@ where
/// before calling this method.
/// - Since absolute-form `Uri`s are not required, if received, they will
/// be serialized as-is.
- ///
- /// # Example
- ///
- /// ```
- /// # use http::header::HOST;
- /// # use hyper::client::conn::SendRequest;
- /// # use hyper::Body;
- /// use hyper::Request;
- ///
- /// # async fn doc(mut tx: SendRequest<Body>) -> hyper::Result<()> {
- /// // build a Request
- /// let req = Request::builder()
- /// .uri("/foo/bar")
- /// .header(HOST, "hyper.rs")
- /// .body(Body::empty())
- /// .unwrap();
- ///
- /// // send it and await a Response
- /// let res = tx.send_request(req).await?;
- /// // assert the Response
- /// assert!(res.status().is_success());
- /// # Ok(())
- /// # }
- /// # fn main() {}
- /// ```
pub fn send_request(&mut self, req: Request<B>) -> ResponseFuture {
let inner = match self.dispatch.send(req) {
Ok(rx) => ResponseFutureState::Waiting(rx),
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -33,7 +33,7 @@ ffi_fn! {
///
/// If not configured, this body acts as an empty payload.
fn hyper_body_new() -> *mut hyper_body {
- Box::into_raw(Box::new(hyper_body(Body::empty())))
+ Box::into_raw(Box::new(hyper_body(Body::ffi())))
} ?= ptr::null_mut()
}
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -335,7 +335,7 @@ ffi_fn! {
///
/// It is safe to free the response even after taking ownership of its body.
fn hyper_response_body(resp: *mut hyper_response) -> *mut hyper_body {
- let body = std::mem::take(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut());
+ let body = std::mem::replace(non_null!(&mut *resp ?= std::ptr::null_mut()).0.body_mut(), crate::Body::empty());
Box::into_raw(Box::new(hyper_body(body)))
} ?= std::ptr::null_mut()
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -522,24 +522,6 @@ where
}
}
- pub(crate) fn write_full_msg(&mut self, head: MessageHead<T::Outgoing>, body: B) {
- if let Some(encoder) =
- self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64)))
- {
- let is_last = encoder.is_last();
- // Make sure we don't write a body if we weren't actually allowed
- // to do so, like because its a HEAD request.
- if !encoder.is_eof() {
- encoder.danger_full_buf(body, self.io.write_buf());
- }
- self.state.writing = if is_last {
- Writing::Closed
- } else {
- Writing::KeepAlive
- }
- }
- }
-
fn encode_head(
&mut self,
mut head: MessageHead<T::Outgoing>,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -8,9 +8,7 @@ use tracing::{debug, trace};
use super::{Http1Transaction, Wants};
use crate::body::{Body, DecodedLength, HttpBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
-use crate::proto::{
- BodyLength, Conn, Dispatched, MessageHead, RequestHead,
-};
+use crate::proto::{BodyLength, Conn, Dispatched, MessageHead, RequestHead};
use crate::upgrade::OnUpgrade;
pub(crate) struct Dispatcher<D, Bs: HttpBody, I, T> {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -295,16 +293,7 @@ where
&& self.dispatch.should_poll()
{
if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) {
- let (head, mut body) = msg.map_err(crate::Error::new_user_service)?;
-
- // Check if the body knows its full data immediately.
- //
- // If so, we can skip a bit of bookkeeping that streaming
- // bodies need to do.
- if let Some(full) = crate::body::take_full_data(&mut body) {
- self.conn.write_full_msg(head, full);
- return Poll::Ready(Ok(()));
- }
+ let (head, body) = msg.map_err(crate::Error::new_user_service)?;
let body_type = if body.is_end_stream() {
self.body_rx.set(None);
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -180,39 +180,6 @@ impl Encoder {
}
}
}
-
- /// Encodes the full body, without verifying the remaining length matches.
- ///
- /// This is used in conjunction with HttpBody::__hyper_full_data(), which
- /// means we can trust that the buf has the correct size (the buf itself
- /// was checked to make the headers).
- pub(super) fn danger_full_buf<B>(self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>)
- where
- B: Buf,
- {
- debug_assert!(msg.remaining() > 0, "encode() called with empty buf");
- debug_assert!(
- match self.kind {
- Kind::Length(len) => len == msg.remaining() as u64,
- _ => true,
- },
- "danger_full_buf length mismatches"
- );
-
- match self.kind {
- Kind::Chunked => {
- let len = msg.remaining();
- trace!("encoding chunked {}B", len);
- let buf = ChunkSize::new(len)
- .chain(msg)
- .chain(b"\r\n0\r\n\r\n" as &'static [u8]);
- dst.buffer(buf);
- }
- _ => {
- dst.buffer(msg);
- }
- }
- }
}
impl<B> Buf for EncodedBuf<B>
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -503,7 +503,6 @@ where
}
}
-
if !body.is_end_stream() {
// automatically set Content-Length from body...
if let Some(len) = body.size_hint().exact() {
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -11,7 +11,8 @@
//! # #[cfg(all(feature = "http1", feature = "runtime"))]
//! # mod rt {
//! use http::{Request, Response, StatusCode};
-//! use hyper::{server::conn::Http, service::service_fn, Body};
+//! use http_body_util::Full;
+//! use hyper::{server::conn::Http, service::service_fn, body::Bytes};
//! use std::{net::SocketAddr, convert::Infallible};
//! use tokio::net::TcpListener;
//!
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -34,8 +35,8 @@
//! }
//! }
//!
-//! async fn hello(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
-//! Ok(Response::new(Body::from("Hello World!")))
+//! async fn hello(_req: Request<hyper::Body>) -> Result<Response<Full<Bytes>>, Infallible> {
+//! Ok(Response::new(Full::new(Bytes::from("Hello World!"))))
//! }
//! # }
//! ```
diff --git a/src/service/util.rs b/src/service/util.rs
--- a/src/service/util.rs
+++ b/src/service/util.rs
@@ -11,12 +11,14 @@ use crate::{Request, Response};
/// # Example
///
/// ```
+/// use bytes::Bytes;
/// use hyper::{Body, Request, Response, Version};
+/// use http_body_util::Full;
/// use hyper::service::service_fn;
///
/// let service = service_fn(|req: Request<Body>| async move {
/// if req.version() == Version::HTTP_11 {
-/// Ok(Response::new(Body::from("Hello World")))
+/// Ok(Response::new(Full::<Bytes>::from("Hello World")))
/// } else {
/// // Note: it's usually better to return a Response
/// // with an appropriate StatusCode instead of an Err.
|
It was noted that the FFI body type makes use of `Body::empty()`. I think there's 2 ways we can fix that:
1. Add a `pub(crate) fn ffi() -> Body` that just starts the type like this: https://github.com/hyperium/hyper/blob/509672aada0af68a91d963e69828c6e31c44cb7b/src/body/body.rs#L240
2. Or, keep an `Empty` variant. I don't think I like this option as much, but welcome feedback.
cc @Xuanwo
> 1. Add a `pub(crate) fn ffi() -> Body` that just starts the type like this
Let me give this a try!
|
2022-08-24T00:25:01Z
| 2,958
|
Remove Body's `Once` variant
|
hyperium__hyper-2958
|
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -3,17 +3,20 @@
extern crate test;
+use std::convert::Infallible;
use std::io::{Read, Write};
use std::net::{SocketAddr, TcpStream};
use std::sync::mpsc;
use std::time::Duration;
+use bytes::Bytes;
+use http_body_util::Full;
use tokio::net::TcpListener;
use tokio::sync::oneshot;
use hyper::server::conn::Http;
use hyper::service::service_fn;
-use hyper::{Body, Response};
+use hyper::Response;
const PIPELINED_REQUESTS: usize = 16;
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -43,7 +46,9 @@ fn hello_world_16(b: &mut test::Bencher) {
.serve_connection(
stream,
service_fn(|_| async {
- Ok::<_, hyper::Error>(Response::new(Body::from("Hello, World!")))
+ Ok::<_, Infallible>(Response::new(Full::new(Bytes::from(
+ "Hello, World!",
+ ))))
}),
)
.await
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -10,6 +11,7 @@ use tokio::net::{TcpListener, TcpStream};
type GenericError = Box<dyn std::error::Error + Send + Sync>;
type Result<T> = std::result::Result<T, GenericError>;
+type BoxBody = http_body_util::combinators::BoxBody<Bytes, hyper::Error>;
static INDEX: &[u8] = b"<a href=\"test.html\">test.html</a>";
static INTERNAL_SERVER_ERROR: &[u8] = b"Internal Server Error";
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -56,28 +58,28 @@ async fn api_post_response(req: Request<Body>) -> Result<Response<Body>> {
let response = Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "application/json")
- .body(Body::from(json))?;
+ .body(full(json))?;
Ok(response)
}
-async fn api_get_response() -> Result<Response<Body>> {
+async fn api_get_response() -> Result<Response<BoxBody>> {
let data = vec!["foo", "bar"];
let res = match serde_json::to_string(&data) {
Ok(json) => Response::builder()
.header(header::CONTENT_TYPE, "application/json")
- .body(Body::from(json))
+ .body(full(json))
.unwrap(),
Err(_) => Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
- .body(INTERNAL_SERVER_ERROR.into())
+ .body(full(INTERNAL_SERVER_ERROR))
.unwrap(),
};
Ok(res)
}
-async fn response_examples(req: Request<Body>) -> Result<Response<Body>> {
+async fn response_examples(req: Request<Body>) -> Result<Response<BoxBody>> {
match (req.method(), req.uri().path()) {
- (&Method::GET, "/") | (&Method::GET, "/index.html") => Ok(Response::new(INDEX.into())),
+ (&Method::GET, "/") | (&Method::GET, "/index.html") => Ok(Response::new(full(INDEX))),
(&Method::GET, "/test.html") => client_request_response().await,
(&Method::POST, "/json_api") => api_post_response(req).await,
(&Method::GET, "/json_api") => api_get_response().await,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -506,8 +421,6 @@ mod tests {
assert_eq!(a.upper(), b.upper(), "upper for {:?}", note);
}
- eq(Body::from("Hello"), SizeHint::with_exact(5), "from str");
-
eq(Body::empty(), SizeHint::with_exact(0), "empty");
eq(Body::channel().1, SizeHint::new(), "channel");
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -387,7 +387,7 @@ mod tests {
let (mut tx, mut rx) = channel::<Request<Body>, Response<Body>>();
b.iter(move || {
- let _ = tx.send(Request::default()).unwrap();
+ let _ = tx.send(Request::new(Body::empty())).unwrap();
rt.block_on(async {
loop {
let poll_once = PollOnce(&mut rx);
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -708,9 +697,15 @@ mod tests {
let dispatcher = Dispatcher::new(Client::new(rx), conn);
let _dispatcher = tokio::spawn(async move { dispatcher.await });
+ let body = {
+ let (mut tx, body) = crate::Body::new_channel(DecodedLength::new(4), false);
+ tx.try_send_data("reee".into()).unwrap();
+ body
+ };
+
let req = crate::Request::builder()
.method("POST")
- .body(crate::Body::from("reee"))
+ .body(body)
.unwrap();
let res = tx.try_send(req).unwrap().await.expect("response");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1332,9 +1332,10 @@ mod conn {
use std::thread;
use std::time::Duration;
- use bytes::Buf;
+ use bytes::{Buf, Bytes};
use futures_channel::oneshot;
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
+ use http_body_util::Empty;
use hyper::upgrade::OnUpgrade;
use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1379,7 +1380,7 @@ mod conn {
let req = Request::builder()
.uri("/a")
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let mut res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), hyper::StatusCode::OK);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1423,7 +1424,7 @@ mod conn {
let req = Request::builder()
.uri("/a")
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let mut res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), hyper::StatusCode::OK);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1479,7 +1480,7 @@ mod conn {
let req = Request::builder()
.uri("/")
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res = client.send_request(req).and_then(move |mut res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1576,7 +1577,7 @@ mod conn {
let req = Request::builder()
.uri("http://hyper.local/a")
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res = client.send_request(req).and_then(move |res| {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1622,7 +1623,7 @@ mod conn {
let req = Request::builder()
.uri("/a")
.version(hyper::Version::HTTP_2)
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res = client.send_request(req).and_then(move |res| {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1663,7 +1664,7 @@ mod conn {
let req = Request::builder()
.uri("/a")
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res1 = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1673,7 +1674,7 @@ mod conn {
// pipelined request will hit NotReady, and thus should return an Error::Cancel
let req = Request::builder()
.uri("/b")
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res2 = client.send_request(req).map(|result| {
let err = result.expect_err("res2");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1734,7 +1735,7 @@ mod conn {
let req = Request::builder()
.uri("/a")
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::SWITCHING_PROTOCOLS);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1821,7 +1822,7 @@ mod conn {
let req = Request::builder()
.method("CONNECT")
.uri(addr.to_string())
- .body(Default::default())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res = client
.send_request(req)
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1886,7 +1887,7 @@ mod conn {
res = listener.accept() => {
let (stream, _) = res.unwrap();
- let service = service_fn(|_:Request<Body>| future::ok::<Response<Body>, hyper::Error>(Response::new(Body::empty())));
+ let service = service_fn(|_:Request<Body>| future::ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new())));
let mut shdn_rx = shdn_rx.clone();
tokio::task::spawn(async move {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1913,7 +1914,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
.http2_only(true)
- .handshake::<_, Body>(io)
+ .handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1928,7 +1929,7 @@ mod conn {
let req = Request::builder()
.uri(format!("http://{}/", addr))
- .body(Body::empty())
+ .body(Empty::<Bytes>::new())
.expect("request builder");
client.send_request(req).await.expect("req1 send");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2046,7 +2047,7 @@ mod conn {
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
- .handshake::<_, Body>(io)
+ .handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2055,7 +2056,7 @@ mod conn {
assert!(err.is_timeout());
});
- let req = http::Request::new(hyper::Body::empty());
+ let req = http::Request::new(Empty::<Bytes>::new());
let err = client
.send_request(req)
.await
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2098,7 +2099,7 @@ mod conn {
.await
.expect("server req body aggregate");
});
- Ok::<_, hyper::Error>(http::Response::new(hyper::Body::empty()))
+ Ok::<_, hyper::Error>(http::Response::new(Empty::<Bytes>::new()))
}),
)
.await
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2153,7 +2154,7 @@ mod conn {
let mut body = req.into_body();
- let mut send_stream = respond.send_response(Response::default(), false).unwrap();
+ let mut send_stream = respond.send_response(Response::new(()), false).unwrap();
send_stream.send_data("Bread?".into(), true).unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2167,7 +2168,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
.http2_only(true)
- .handshake::<_, Body>(io)
+ .handshake(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2176,7 +2177,7 @@ mod conn {
});
let req = Request::connect("localhost")
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap();
let res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), StatusCode::OK);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2223,7 +2224,7 @@ mod conn {
let io = tcp_connect(&addr).await.expect("tcp connect");
let (mut client, conn) = conn::Builder::new()
.http2_only(true)
- .handshake::<_, Body>(io)
+ .handshake::<_, Empty<Bytes>>(io)
.await
.expect("http handshake");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2231,9 +2232,7 @@ mod conn {
conn.await.expect("client conn shouldn't error");
});
- let req = Request::connect("localhost")
- .body(hyper::Body::empty())
- .unwrap();
+ let req = Request::connect("localhost").body(Empty::new()).unwrap();
let res = client.send_request(req).await.expect("send_request");
assert_eq!(res.status(), StatusCode::BAD_REQUEST);
assert!(res.extensions().get::<OnUpgrade>().is_none());
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -20,7 +20,7 @@ use futures_util::future::{self, Either, FutureExt, TryFutureExt};
use h2::client::SendRequest;
use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
-use http_body_util::{combinators::BoxBody, BodyExt, StreamBody};
+use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full, StreamBody};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -963,7 +963,7 @@ async fn expect_continue_waits_for_body_poll() {
drop(req);
Response::builder()
.status(StatusCode::BAD_REQUEST)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
})
}),
)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1256,7 +1256,7 @@ async fn http1_allow_half_close() {
socket,
service_fn(|_| {
tokio::time::sleep(Duration::from_millis(500))
- .map(|_| Ok::<_, hyper::Error>(Response::new(Body::empty())))
+ .map(|_| Ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new())))
}),
)
.await
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1317,7 +1317,7 @@ async fn returning_1xx_response_is_error() {
Ok::<_, hyper::Error>(
Response::builder()
.status(StatusCode::CONTINUE)
- .body(Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
}),
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1382,7 +1382,7 @@ async fn header_read_timeout_slow_writes() {
service_fn(|_| {
let res = Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap();
future::ready(Ok::<_, hyper::Error>(res))
}),
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1457,7 +1457,7 @@ async fn header_read_timeout_slow_writes_multiple_requests() {
service_fn(|_| {
let res = Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap();
future::ready(Ok::<_, hyper::Error>(res))
}),
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1503,7 +1503,7 @@ async fn upgrades() {
let res = Response::builder()
.status(101)
.header("upgrade", "foobar")
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap();
future::ready(Ok::<_, hyper::Error>(res))
}),
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1557,7 +1557,7 @@ async fn http_connect() {
service_fn(|_| {
let res = Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap();
future::ready(Ok::<_, hyper::Error>(res))
}),
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1618,7 +1618,7 @@ async fn upgrades_new() {
Response::builder()
.status(101)
.header("upgrade", "foobar")
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1655,7 +1655,7 @@ async fn upgrades_ignored() {
tokio::spawn(async move {
let svc = service_fn(move |req: Request<Body>| {
assert_eq!(req.headers()["upgrade"], "yolo");
- future::ok::<_, hyper::Error>(Response::new(hyper::Body::empty()))
+ future::ok::<_, hyper::Error>(Response::new(Empty::<Bytes>::new()))
});
loop {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1678,7 +1678,7 @@ async fn upgrades_ignored() {
.uri(&*url)
.header("upgrade", "yolo")
.header("connection", "upgrade")
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.expect("make_req")
};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1726,7 +1726,7 @@ async fn http_connect_new() {
future::ok::<_, hyper::Error>(
Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1808,7 +1808,7 @@ async fn h2_connect() {
future::ok::<_, hyper::Error>(
Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1920,7 +1920,7 @@ async fn h2_connect_multiplex() {
future::ok::<_, hyper::Error>(
Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1995,7 +1995,7 @@ async fn h2_connect_large_body() {
future::ok::<_, hyper::Error>(
Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2067,7 +2067,7 @@ async fn h2_connect_empty_frames() {
future::ok::<_, hyper::Error>(
Response::builder()
.status(200)
- .body(hyper::Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2518,7 +2518,7 @@ async fn http2_keep_alive_with_responsive_client() {
let tcp = connect_async(addr).await;
let (mut client, conn) = hyper::client::conn::Builder::new()
.http2_only(true)
- .handshake::<_, Body>(tcp)
+ .handshake(tcp)
.await
.expect("http handshake");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2528,7 +2528,7 @@ async fn http2_keep_alive_with_responsive_client() {
tokio::time::sleep(Duration::from_secs(4)).await;
- let req = http::Request::new(hyper::Body::empty());
+ let req = http::Request::new(Empty::<Bytes>::new());
client.send_request(req).await.expect("client.send_request");
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2849,16 +2849,16 @@ const HELLO: &str = "hello";
struct HelloWorld;
impl tower_service::Service<Request<Body>> for HelloWorld {
- type Response = Response<Body>;
+ type Response = Response<Full<Bytes>>;
type Error = hyper::Error;
- type Future = future::Ready<Result<Response<Body>, Self::Error>>;
+ type Future = future::Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, _req: Request<Body>) -> Self::Future {
- let response = Response::new(HELLO.into());
+ let response = Response::new(Full::new(HELLO.into()));
future::ok(response)
}
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3132,13 +3132,13 @@ impl TestClient {
Request::builder()
.uri(uri)
.method(Method::GET)
- .body(Body::empty())
+ .body(Empty::<Bytes>::new())
.unwrap(),
)
.await
}
- async fn request(&self, req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
+ async fn request(&self, req: Request<Empty<Bytes>>) -> Result<Response<Body>, hyper::Error> {
let host = req.uri().host().expect("uri has no host");
let port = req.uri().port_u16().expect("uri has no port");
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -6,6 +6,8 @@ use std::sync::{
Arc, Mutex,
};
+use bytes::Bytes;
+use http_body_util::Full;
use hyper::client::conn::Builder;
use hyper::server::conn::Http;
use tokio::net::{TcpListener, TcpStream};
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -371,7 +373,7 @@ async fn async_test(cfg: __TestConfig) {
let mut res = Response::builder()
.status(sres.status)
- .body(Body::from(sres.body))
+ .body(Full::new(Bytes::from(sres.body)))
.expect("Response::build");
*res.headers_mut() = sres.headers;
res
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -405,7 +407,7 @@ async fn async_test(cfg: __TestConfig) {
.method(creq.method)
.uri(uri)
//.headers(creq.headers)
- .body(creq.body.into())
+ .body(Full::new(Bytes::from(creq.body)))
.expect("Request::build");
*req.headers_mut() = creq.headers;
let cstatus = cres.status;
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -417,7 +419,7 @@ async fn async_test(cfg: __TestConfig) {
let (mut sender, conn) = hyper::client::conn::Builder::new()
.http2_only(http2_only)
- .handshake::<TcpStream, Body>(stream)
+ .handshake(stream)
.await
.unwrap();
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2948"
] |
0.3
|
bb3af17ce1a3841e9170adabcce595c7c8743ea7
|
diff --git a/src/client/connect/http.rs /dev/null
--- a/src/client/connect/http.rs
+++ /dev/null
@@ -1,252 +0,0 @@
-use std::error::Error as StdError;
-use std::fmt;
-use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
-use std::sync::Arc;
-use std::time::Duration;
-
-//#[cfg(feature = "runtime")] use super::dns::TokioThreadpoolGaiResolver;
-
-/// A connector for the `http` scheme.
-///
-/// Performs DNS resolution in a thread pool, and then connects over TCP.
-///
-/// # Note
-///
-/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes
-/// transport information such as the remote socket address used.
-#[derive(Clone)]
-pub struct HttpConnector {
- config: Arc<Config>,
-}
-
-/// Extra information about the transport when an HttpConnector is used.
-///
-/// # Note
-///
-/// If a different connector is used besides [`HttpConnector`](HttpConnector),
-/// this value will not exist in the extensions. Consult that specific
-/// connector to see what "extra" information it might provide to responses.
-#[derive(Clone, Debug)]
-pub struct HttpInfo {
- remote_addr: SocketAddr,
- local_addr: SocketAddr,
-}
-
-#[derive(Clone)]
-struct Config {
- connect_timeout: Option<Duration>,
- enforce_http: bool,
- happy_eyeballs_timeout: Option<Duration>,
- keep_alive_timeout: Option<Duration>,
- local_address_ipv4: Option<Ipv4Addr>,
- local_address_ipv6: Option<Ipv6Addr>,
- nodelay: bool,
- reuse_address: bool,
- send_buffer_size: Option<usize>,
- recv_buffer_size: Option<usize>,
-}
-
-// ===== impl HttpConnector =====
-
-impl HttpConnector {
- /// Construct a new HttpConnector.
- pub fn new() -> HttpConnector {
- HttpConnector {
- config: Arc::new(Config {
- connect_timeout: None,
- enforce_http: true,
- happy_eyeballs_timeout: Some(Duration::from_millis(300)),
- keep_alive_timeout: None,
- local_address_ipv4: None,
- local_address_ipv6: None,
- nodelay: false,
- reuse_address: false,
- send_buffer_size: None,
- recv_buffer_size: None,
- }),
- }
- }
-}
-
-/*
-#[cfg(feature = "runtime")]
-impl HttpConnector<TokioThreadpoolGaiResolver> {
- /// Construct a new HttpConnector using the `TokioThreadpoolGaiResolver`.
- ///
- /// This resolver **requires** the threadpool runtime to be used.
- pub fn new_with_tokio_threadpool_resolver() -> Self {
- HttpConnector::new_with_resolver(TokioThreadpoolGaiResolver::new())
- }
-}
-*/
-
-impl HttpConnector {
- /// Option to enforce all `Uri`s have the `http` scheme.
- ///
- /// Enabled by default.
- #[inline]
- pub fn enforce_http(&mut self, is_enforced: bool) {
- self.config_mut().enforce_http = is_enforced;
- }
-
- /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration.
- ///
- /// If `None`, the option will not be set.
- ///
- /// Default is `None`.
- #[inline]
- pub fn set_keepalive(&mut self, dur: Option<Duration>) {
- self.config_mut().keep_alive_timeout = dur;
- }
-
- /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`.
- ///
- /// Default is `false`.
- #[inline]
- pub fn set_nodelay(&mut self, nodelay: bool) {
- self.config_mut().nodelay = nodelay;
- }
-
- /// Sets the value of the SO_SNDBUF option on the socket.
- #[inline]
- pub fn set_send_buffer_size(&mut self, size: Option<usize>) {
- self.config_mut().send_buffer_size = size;
- }
-
- /// Sets the value of the SO_RCVBUF option on the socket.
- #[inline]
- pub fn set_recv_buffer_size(&mut self, size: Option<usize>) {
- self.config_mut().recv_buffer_size = size;
- }
-
- /// Set that all sockets are bound to the configured address before connection.
- ///
- /// If `None`, the sockets will not be bound.
- ///
- /// Default is `None`.
- #[inline]
- pub fn set_local_address(&mut self, addr: Option<IpAddr>) {
- let (v4, v6) = match addr {
- Some(IpAddr::V4(a)) => (Some(a), None),
- Some(IpAddr::V6(a)) => (None, Some(a)),
- _ => (None, None),
- };
-
- let cfg = self.config_mut();
-
- cfg.local_address_ipv4 = v4;
- cfg.local_address_ipv6 = v6;
- }
-
- /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's
- /// preferences) before connection.
- #[inline]
- pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) {
- let cfg = self.config_mut();
-
- cfg.local_address_ipv4 = Some(addr_ipv4);
- cfg.local_address_ipv6 = Some(addr_ipv6);
- }
-
- /// Set the connect timeout.
- ///
- /// If a domain resolves to multiple IP addresses, the timeout will be
- /// evenly divided across them.
- ///
- /// Default is `None`.
- #[inline]
- pub fn set_connect_timeout(&mut self, dur: Option<Duration>) {
- self.config_mut().connect_timeout = dur;
- }
-
- /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm.
- ///
- /// If hostname resolves to both IPv4 and IPv6 addresses and connection
- /// cannot be established using preferred address family before timeout
- /// elapses, then connector will in parallel attempt connection using other
- /// address family.
- ///
- /// If `None`, parallel connection attempts are disabled.
- ///
- /// Default is 300 milliseconds.
- ///
- /// [RFC 6555]: https://tools.ietf.org/html/rfc6555
- #[inline]
- pub fn set_happy_eyeballs_timeout(&mut self, dur: Option<Duration>) {
- self.config_mut().happy_eyeballs_timeout = dur;
- }
-
- /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`.
- ///
- /// Default is `false`.
- #[inline]
- pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self {
- self.config_mut().reuse_address = reuse_address;
- self
- }
-
- // private
-
- fn config_mut(&mut self) -> &mut Config {
- // If the are HttpConnector clones, this will clone the inner
- // config. So mutating the config won't ever affect previous
- // clones.
- Arc::make_mut(&mut self.config)
- }
-}
-
-// R: Debug required for now to allow adding it to debug output later...
-impl fmt::Debug for HttpConnector {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("HttpConnector").finish()
- }
-}
-
-impl HttpInfo {
- /// Get the remote address of the transport used.
- pub fn remote_addr(&self) -> SocketAddr {
- self.remote_addr
- }
-
- /// Get the local address of the transport used.
- pub fn local_addr(&self) -> SocketAddr {
- self.local_addr
- }
-}
-
-// Not publicly exported (so missing_docs doesn't trigger).
-pub(crate) struct ConnectError {
- msg: Box<str>,
- cause: Option<Box<dyn StdError + Send + Sync>>,
-}
-
-impl fmt::Debug for ConnectError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- if let Some(ref cause) = self.cause {
- f.debug_tuple("ConnectError")
- .field(&self.msg)
- .field(cause)
- .finish()
- } else {
- self.msg.fmt(f)
- }
- }
-}
-
-impl fmt::Display for ConnectError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str(&self.msg)?;
-
- if let Some(ref cause) = self.cause {
- write!(f, ": {}", cause)?;
- }
-
- Ok(())
- }
-}
-
-impl StdError for ConnectError {
- fn source(&self) -> Option<&(dyn StdError + 'static)> {
- self.cause.as_ref().map(|e| &**e as _)
- }
-}
diff --git a/src/service/mod.rs b/src/service/mod.rs
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -24,13 +24,9 @@
pub use tower_service::Service;
mod http;
-#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))]
-mod oneshot;
mod util;
#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))]
pub(super) use self::http::HttpService;
-#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))]
-pub(super) use self::oneshot::{oneshot, Oneshot};
pub use self::util::service_fn;
diff --git a/src/service/oneshot.rs /dev/null
--- a/src/service/oneshot.rs
+++ /dev/null
@@ -1,73 +0,0 @@
-// TODO: Eventually to be replaced with tower_util::Oneshot.
-
-use pin_project_lite::pin_project;
-use tower_service::Service;
-
-use crate::common::{task, Future, Pin, Poll};
-
-pub(crate) fn oneshot<S, Req>(svc: S, req: Req) -> Oneshot<S, Req>
-where
- S: Service<Req>,
-{
- Oneshot {
- state: State::NotReady { svc, req },
- }
-}
-
-pin_project! {
- // A `Future` consuming a `Service` and request, waiting until the `Service`
- // is ready, and then calling `Service::call` with the request, and
- // waiting for that `Future`.
- #[allow(missing_debug_implementations)]
- pub struct Oneshot<S: Service<Req>, Req> {
- #[pin]
- state: State<S, Req>,
- }
-}
-
-pin_project! {
- #[project = StateProj]
- #[project_replace = StateProjOwn]
- enum State<S: Service<Req>, Req> {
- NotReady {
- svc: S,
- req: Req,
- },
- Called {
- #[pin]
- fut: S::Future,
- },
- Tmp,
- }
-}
-
-impl<S, Req> Future for Oneshot<S, Req>
-where
- S: Service<Req>,
-{
- type Output = Result<S::Response, S::Error>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut me = self.project();
-
- loop {
- match me.state.as_mut().project() {
- StateProj::NotReady { ref mut svc, .. } => {
- ready!(svc.poll_ready(cx))?;
- // fallthrough out of the match's borrow
- }
- StateProj::Called { fut } => {
- return fut.poll(cx);
- }
- StateProj::Tmp => unreachable!(),
- }
-
- match me.state.as_mut().project_replace(State::Tmp) {
- StateProjOwn::NotReady { mut svc, req } => {
- me.state.set(State::Called { fut: svc.call(req) });
- }
- _ => unreachable!(),
- }
- }
- }
-}
|
2022-08-17T00:51:26Z
| 2,949
|
Remove client connect module
|
hyperium__hyper-2949
|
diff --git a/src/client/connect/dns.rs /dev/null
--- a/src/client/connect/dns.rs
+++ /dev/null
@@ -1,239 +0,0 @@
-//! DNS Resolution used by the `HttpConnector`.
-//!
-//! This module contains:
-//!
-//! - A [`GaiResolver`](GaiResolver) that is the default resolver for the
-//! `HttpConnector`.
-//! - The `Name` type used as an argument to custom resolvers.
-//!
-//! # Resolvers are `Service`s
-//!
-//! A resolver is just a
-//! `Service<Name, Response = impl Iterator<Item = SocketAddr>>`.
-//!
-//! A simple resolver that ignores the name and always returns a specific
-//! address:
-//!
-//! ```rust,ignore
-//! use std::{convert::Infallible, iter, net::SocketAddr};
-//!
-//! let resolver = tower::service_fn(|_name| async {
-//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080))))
-//! });
-//! ```
-use std::error::Error;
-use std::net::{SocketAddr};
-use std::str::FromStr;
-use std::{fmt, vec};
-
-/// A domain name to resolve into IP addresses.
-#[derive(Clone, Hash, Eq, PartialEq)]
-pub struct Name {
- host: Box<str>,
-}
-
-/// A resolver using blocking `getaddrinfo` calls in a threadpool.
-#[derive(Clone)]
-pub struct GaiResolver {
- _priv: (),
-}
-
-/// An iterator of IP addresses returned from `getaddrinfo`.
-pub struct GaiAddrs {
- inner: SocketAddrs,
-}
-
-impl Name {
- pub(super) fn new(host: Box<str>) -> Name {
- Name { host }
- }
-
- /// View the hostname as a string slice.
- pub fn as_str(&self) -> &str {
- &self.host
- }
-}
-
-impl fmt::Debug for Name {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&self.host, f)
- }
-}
-
-impl fmt::Display for Name {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&self.host, f)
- }
-}
-
-impl FromStr for Name {
- type Err = InvalidNameError;
-
- fn from_str(host: &str) -> Result<Self, Self::Err> {
- // Possibly add validation later
- Ok(Name::new(host.into()))
- }
-}
-
-/// Error indicating a given string was not a valid domain name.
-#[derive(Debug)]
-pub struct InvalidNameError(());
-
-impl fmt::Display for InvalidNameError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str("Not a valid domain name")
- }
-}
-
-impl Error for InvalidNameError {}
-
-impl GaiResolver {
- /// Construct a new `GaiResolver`.
- pub fn new() -> Self {
- GaiResolver { _priv: () }
- }
-}
-
-impl fmt::Debug for GaiResolver {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("GaiResolver")
- }
-}
-
-impl Iterator for GaiAddrs {
- type Item = SocketAddr;
-
- fn next(&mut self) -> Option<Self::Item> {
- self.inner.next()
- }
-}
-
-impl fmt::Debug for GaiAddrs {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("GaiAddrs")
- }
-}
-
-pub(super) struct SocketAddrs {
- iter: vec::IntoIter<SocketAddr>,
-}
-
-impl Iterator for SocketAddrs {
- type Item = SocketAddr;
- #[inline]
- fn next(&mut self) -> Option<SocketAddr> {
- self.iter.next()
- }
-}
-
-/*
-/// A resolver using `getaddrinfo` calls via the `tokio_executor::threadpool::blocking` API.
-///
-/// Unlike the `GaiResolver` this will not spawn dedicated threads, but only works when running on the
-/// multi-threaded Tokio runtime.
-#[cfg(feature = "runtime")]
-#[derive(Clone, Debug)]
-pub struct TokioThreadpoolGaiResolver(());
-
-/// The future returned by `TokioThreadpoolGaiResolver`.
-#[cfg(feature = "runtime")]
-#[derive(Debug)]
-pub struct TokioThreadpoolGaiFuture {
- name: Name,
-}
-
-#[cfg(feature = "runtime")]
-impl TokioThreadpoolGaiResolver {
- /// Creates a new DNS resolver that will use tokio threadpool's blocking
- /// feature.
- ///
- /// **Requires** its futures to be run on the threadpool runtime.
- pub fn new() -> Self {
- TokioThreadpoolGaiResolver(())
- }
-}
-
-#[cfg(feature = "runtime")]
-impl Service<Name> for TokioThreadpoolGaiResolver {
- type Response = GaiAddrs;
- type Error = io::Error;
- type Future = TokioThreadpoolGaiFuture;
-
- fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, name: Name) -> Self::Future {
- TokioThreadpoolGaiFuture { name }
- }
-}
-
-#[cfg(feature = "runtime")]
-impl Future for TokioThreadpoolGaiFuture {
- type Output = Result<GaiAddrs, io::Error>;
-
- fn poll(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match ready!(tokio_executor::threadpool::blocking(|| (
- self.name.as_str(),
- 0
- )
- .to_socket_addrs()))
- {
- Ok(Ok(iter)) => Poll::Ready(Ok(GaiAddrs {
- inner: IpAddrs { iter },
- })),
- Ok(Err(e)) => Poll::Ready(Err(e)),
- // a BlockingError, meaning not on a tokio_executor::threadpool :(
- Err(e) => Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, e))),
- }
- }
-}
-*/
-
-mod sealed {
- use super::{Name, SocketAddr};
- use crate::common::{task, Future, Poll};
- use tower_service::Service;
-
- // "Trait alias" for `Service<Name, Response = Addrs>`
- pub(crate) trait Resolve {
- type Addrs: Iterator<Item = SocketAddr>;
- type Error: Into<Box<dyn std::error::Error + Send + Sync>>;
- type Future: Future<Output = Result<Self::Addrs, Self::Error>>;
-
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
- fn resolve(&mut self, name: Name) -> Self::Future;
- }
-
- impl<S> Resolve for S
- where
- S: Service<Name>,
- S::Response: Iterator<Item = SocketAddr>,
- S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
- {
- type Addrs = S::Response;
- type Error = S::Error;
- type Future = S::Future;
-
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Service::poll_ready(self, cx)
- }
-
- fn resolve(&mut self, name: Name) -> Self::Future {
- Service::call(self, name)
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- #[test]
- fn test_name_from_str() {
- const DOMAIN: &str = "test.example.com";
- let name = Name::from_str(DOMAIN).expect("Should be a valid domain");
- assert_eq!(name.as_str(), DOMAIN);
- assert_eq!(name.to_string(), DOMAIN);
- }
-}
diff --git a/src/client/connect/mod.rs /dev/null
--- a/src/client/connect/mod.rs
+++ /dev/null
@@ -1,391 +0,0 @@
-//! Connectors used by the `Client`.
-//!
-//! This module contains:
-//!
-//! - A default [`HttpConnector`][] that does DNS resolution and establishes
-//! connections over TCP.
-//! - Types to build custom connectors.
-//!
-//! # Connectors
-//!
-//! A "connector" is a [`Service`][] that takes a [`Uri`][] destination, and
-//! its `Response` is some type implementing [`AsyncRead`][], [`AsyncWrite`][],
-//! and [`Connection`][].
-//!
-//! ## Custom Connectors
-//!
-//! A simple connector that ignores the `Uri` destination and always returns
-//! a TCP connection to the same address could be written like this:
-//!
-//! ```rust,ignore
-//! let connector = tower::service_fn(|_dst| async {
-//! tokio::net::TcpStream::connect("127.0.0.1:1337")
-//! })
-//! ```
-//!
-//! Or, fully written out:
-//!
-//! ```
-//! # #[cfg(feature = "runtime")]
-//! # mod rt {
-//! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}};
-//! use hyper::{service::Service, Uri};
-//! use tokio::net::TcpStream;
-//!
-//! #[derive(Clone)]
-//! struct LocalConnector;
-//!
-//! impl Service<Uri> for LocalConnector {
-//! type Response = TcpStream;
-//! type Error = std::io::Error;
-//! // We can't "name" an `async` generated future.
-//! type Future = Pin<Box<
-//! dyn Future<Output = Result<Self::Response, Self::Error>> + Send
-//! >>;
-//!
-//! fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
-//! // This connector is always ready, but others might not be.
-//! Poll::Ready(Ok(()))
-//! }
-//!
-//! fn call(&mut self, _: Uri) -> Self::Future {
-//! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337))))
-//! }
-//! }
-//! # }
-//! ```
-//!
-//! [`Uri`]: ::http::Uri
-//! [`AsyncRead`]: tokio::io::AsyncRead
-//! [`AsyncWrite`]: tokio::io::AsyncWrite
-//! [`Connection`]: Connection
-//! [`Service`]: crate::service::Service
-use std::fmt;
-
-use ::http::Extensions;
-
-pub use self::http::{HttpConnector, HttpInfo};
-
-pub mod dns;
-mod http;
-
-cfg_feature! {
- #![any(feature = "http1", feature = "http2")]
-
- pub use self::sealed::Connect;
-}
-
-/// Describes a type returned by a connector.
-pub trait Connection {
- /// Return metadata describing the connection.
- fn connected(&self) -> Connected;
-}
-
-/// Extra information about the connected transport.
-///
-/// This can be used to inform recipients about things like if ALPN
-/// was used, or if connected to an HTTP proxy.
-#[derive(Debug)]
-pub struct Connected {
- pub(super) alpn: Alpn,
- pub(super) is_proxied: bool,
- pub(super) extra: Option<Extra>,
-}
-
-pub(super) struct Extra(Box<dyn ExtraInner>);
-
-#[derive(Clone, Copy, Debug, PartialEq)]
-pub(super) enum Alpn {
- H2,
- None,
-}
-
-impl Connected {
- /// Create new `Connected` type with empty metadata.
- pub fn new() -> Connected {
- Connected {
- alpn: Alpn::None,
- is_proxied: false,
- extra: None,
- }
- }
-
- /// Set whether the connected transport is to an HTTP proxy.
- ///
- /// This setting will affect if HTTP/1 requests written on the transport
- /// will have the request-target in absolute-form or origin-form:
- ///
- /// - When `proxy(false)`:
- ///
- /// ```http
- /// GET /guide HTTP/1.1
- /// ```
- ///
- /// - When `proxy(true)`:
- ///
- /// ```http
- /// GET http://hyper.rs/guide HTTP/1.1
- /// ```
- ///
- /// Default is `false`.
- pub fn proxy(mut self, is_proxied: bool) -> Connected {
- self.is_proxied = is_proxied;
- self
- }
-
- /// Determines if the connected transport is to an HTTP proxy.
- pub fn is_proxied(&self) -> bool {
- self.is_proxied
- }
-
- /// Set extra connection information to be set in the extensions of every `Response`.
- pub fn extra<T: Clone + Send + Sync + 'static>(mut self, extra: T) -> Connected {
- if let Some(prev) = self.extra {
- self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra))));
- } else {
- self.extra = Some(Extra(Box::new(ExtraEnvelope(extra))));
- }
- self
- }
-
- /// Copies the extra connection information into an `Extensions` map.
- pub fn get_extras(&self, extensions: &mut Extensions) {
- if let Some(extra) = &self.extra {
- extra.set(extensions);
- }
- }
-
- /// Set that the connected transport negotiated HTTP/2 as its next protocol.
- pub fn negotiated_h2(mut self) -> Connected {
- self.alpn = Alpn::H2;
- self
- }
-
- /// Determines if the connected transport negotiated HTTP/2 as its next protocol.
- pub fn is_negotiated_h2(&self) -> bool {
- self.alpn == Alpn::H2
- }
-
- /*
- // Don't public expose that `Connected` is `Clone`, unsure if we want to
- // keep that contract...
- #[cfg(feature = "http2")]
- pub(super) fn clone(&self) -> Connected {
- Connected {
- alpn: self.alpn.clone(),
- is_proxied: self.is_proxied,
- extra: self.extra.clone(),
- }
- }
- */
-}
-
-// ===== impl Extra =====
-
-impl Extra {
- pub(super) fn set(&self, res: &mut Extensions) {
- self.0.set(res);
- }
-}
-
-impl Clone for Extra {
- fn clone(&self) -> Extra {
- Extra(self.0.clone_box())
- }
-}
-
-impl fmt::Debug for Extra {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Extra").finish()
- }
-}
-
-trait ExtraInner: Send + Sync {
- fn clone_box(&self) -> Box<dyn ExtraInner>;
- fn set(&self, res: &mut Extensions);
-}
-
-// This indirection allows the `Connected` to have a type-erased "extra" value,
-// while that type still knows its inner extra type. This allows the correct
-// TypeId to be used when inserting into `res.extensions_mut()`.
-#[derive(Clone)]
-struct ExtraEnvelope<T>(T);
-
-impl<T> ExtraInner for ExtraEnvelope<T>
-where
- T: Clone + Send + Sync + 'static,
-{
- fn clone_box(&self) -> Box<dyn ExtraInner> {
- Box::new(self.clone())
- }
-
- fn set(&self, res: &mut Extensions) {
- res.insert(self.0.clone());
- }
-}
-
-struct ExtraChain<T>(Box<dyn ExtraInner>, T);
-
-impl<T: Clone> Clone for ExtraChain<T> {
- fn clone(&self) -> Self {
- ExtraChain(self.0.clone_box(), self.1.clone())
- }
-}
-
-impl<T> ExtraInner for ExtraChain<T>
-where
- T: Clone + Send + Sync + 'static,
-{
- fn clone_box(&self) -> Box<dyn ExtraInner> {
- Box::new(self.clone())
- }
-
- fn set(&self, res: &mut Extensions) {
- self.0.set(res);
- res.insert(self.1.clone());
- }
-}
-
-#[cfg(any(feature = "http1", feature = "http2"))]
-pub(super) mod sealed {
- use std::error::Error as StdError;
-
- use ::http::Uri;
- use tokio::io::{AsyncRead, AsyncWrite};
-
- use super::Connection;
- use crate::common::{Future, Unpin};
-
- /// Connect to a destination, returning an IO transport.
- ///
- /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the
- /// ready connection.
- ///
- /// # Trait Alias
- ///
- /// This is really just an *alias* for the `tower::Service` trait, with
- /// additional bounds set for convenience *inside* hyper. You don't actually
- /// implement this trait, but `tower::Service<Uri>` instead.
- // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot
- // fit the `Connect` bounds because of the blanket impl for `Service`.
- pub trait Connect: Sealed + Sized {
- #[doc(hidden)]
- type _Svc: ConnectSvc;
- #[doc(hidden)]
- fn connect(self, internal_only: Internal, dst: Uri) -> <Self::_Svc as ConnectSvc>::Future;
- }
-
- pub trait ConnectSvc {
- type Connection: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static;
- type Error: Into<Box<dyn StdError + Send + Sync>>;
- type Future: Future<Output = Result<Self::Connection, Self::Error>> + Unpin + Send + 'static;
-
- fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future;
- }
-
- impl<S, T> Connect for S
- where
- S: tower_service::Service<Uri, Response = T> + Send + 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- S::Future: Unpin + Send,
- T: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,
- {
- type _Svc = S;
-
- fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot<S, Uri> {
- crate::service::oneshot(self, dst)
- }
- }
-
- impl<S, T> ConnectSvc for S
- where
- S: tower_service::Service<Uri, Response = T> + Send + 'static,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- S::Future: Unpin + Send,
- T: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,
- {
- type Connection = T;
- type Error = S::Error;
- type Future = crate::service::Oneshot<S, Uri>;
-
- fn connect(self, _: Internal, dst: Uri) -> Self::Future {
- crate::service::oneshot(self, dst)
- }
- }
-
- impl<S, T> Sealed for S
- where
- S: tower_service::Service<Uri, Response = T> + Send,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- S::Future: Unpin + Send,
- T: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,
- {
- }
-
- pub trait Sealed {}
- #[allow(missing_debug_implementations)]
- pub struct Internal;
-}
-
-#[cfg(test)]
-mod tests {
- use super::Connected;
-
- #[derive(Clone, Debug, PartialEq)]
- struct Ex1(usize);
-
- #[derive(Clone, Debug, PartialEq)]
- struct Ex2(&'static str);
-
- #[derive(Clone, Debug, PartialEq)]
- struct Ex3(&'static str);
-
- #[test]
- fn test_connected_extra() {
- let c1 = Connected::new().extra(Ex1(41));
-
- let mut ex = ::http::Extensions::new();
-
- assert_eq!(ex.get::<Ex1>(), None);
-
- c1.extra.as_ref().expect("c1 extra").set(&mut ex);
-
- assert_eq!(ex.get::<Ex1>(), Some(&Ex1(41)));
- }
-
- #[test]
- fn test_connected_extra_chain() {
- // If a user composes connectors and at each stage, there's "extra"
- // info to attach, it shouldn't override the previous extras.
-
- let c1 = Connected::new()
- .extra(Ex1(45))
- .extra(Ex2("zoom"))
- .extra(Ex3("pew pew"));
-
- let mut ex1 = ::http::Extensions::new();
-
- assert_eq!(ex1.get::<Ex1>(), None);
- assert_eq!(ex1.get::<Ex2>(), None);
- assert_eq!(ex1.get::<Ex3>(), None);
-
- c1.extra.as_ref().expect("c1 extra").set(&mut ex1);
-
- assert_eq!(ex1.get::<Ex1>(), Some(&Ex1(45)));
- assert_eq!(ex1.get::<Ex2>(), Some(&Ex2("zoom")));
- assert_eq!(ex1.get::<Ex3>(), Some(&Ex3("pew pew")));
-
- // Just like extensions, inserting the same type overrides previous type.
- let c2 = Connected::new()
- .extra(Ex1(33))
- .extra(Ex2("hiccup"))
- .extra(Ex1(99));
-
- let mut ex2 = ::http::Extensions::new();
-
- c2.extra.as_ref().expect("c2 extra").set(&mut ex2);
-
- assert_eq!(ex2.get::<Ex1>(), Some(&Ex1(99)));
- assert_eq!(ex2.get::<Ex2>(), Some(&Ex2("hiccup")));
- }
-}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -7,7 +7,6 @@
//! For a small example program simply fetching a URL, take a look at the
//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs).
-pub mod connect;
#[cfg(all(test, feature = "runtime"))]
mod tests;
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"2841"
] |
0.3
|
889fa2d87252108eb7668b8bf034ffcc30985117
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -10,8 +10,6 @@ use http_body::{Body as HttpBody, SizeHint};
use super::DecodedLength;
use crate::common::Future;
-#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
-use crate::common::Never;
use crate::common::{task, watch, Pin, Poll};
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
use crate::proto::h2::ping;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -29,9 +27,6 @@ type TrailersSender = oneshot::Sender<HeaderMap>;
#[must_use = "streams do nothing unless polled"]
pub struct Body {
kind: Kind,
- /// Keep the extra bits in an `Option<Box<Extra>>`, so that
- /// Body stays small in the common case (no extras needed).
- extra: Option<Box<Extra>>,
}
enum Kind {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -52,34 +47,6 @@ enum Kind {
Ffi(crate::ffi::UserBody),
}
-struct Extra {
- /// Allow the client to pass a future to delay the `Body` from returning
- /// EOF. This allows the `Client` to try to put the idle connection
- /// back into the pool before the body is "finished".
- ///
- /// The reason for this is so that creating a new request after finishing
- /// streaming the body of a response could sometimes result in creating
- /// a brand new connection, since the pool didn't know about the idle
- /// connection yet.
- delayed_eof: Option<DelayEof>,
-}
-
-#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
-type DelayEofUntil = oneshot::Receiver<Never>;
-
-enum DelayEof {
- /// Initial state, stream hasn't seen EOF yet.
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- NotEof(DelayEofUntil),
- /// Transitions to this state once we've seen `poll` try to
- /// return EOF (`None`). This future is then polled, and
- /// when it completes, the Body finally returns EOF (`None`).
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- Eof(DelayEofUntil),
-}
-
/// A sender half created through [`Body::channel()`].
///
/// Useful when wanting to stream chunks from another thread.
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -153,7 +120,7 @@ impl Body {
}
fn new(kind: Kind) -> Body {
- Body { kind, extra: None }
+ Body { kind }
}
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -176,62 +143,6 @@ impl Body {
body
}
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- pub(crate) fn delayed_eof(&mut self, fut: DelayEofUntil) {
- self.extra_mut().delayed_eof = Some(DelayEof::NotEof(fut));
- }
-
- fn take_delayed_eof(&mut self) -> Option<DelayEof> {
- self.extra
- .as_mut()
- .and_then(|extra| extra.delayed_eof.take())
- }
-
- #[cfg(any(feature = "http1", feature = "http2"))]
- fn extra_mut(&mut self) -> &mut Extra {
- self.extra
- .get_or_insert_with(|| Box::new(Extra { delayed_eof: None }))
- }
-
- fn poll_eof(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<Bytes>>> {
- match self.take_delayed_eof() {
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- Some(DelayEof::NotEof(mut delay)) => match self.poll_inner(cx) {
- ok @ Poll::Ready(Some(Ok(..))) | ok @ Poll::Pending => {
- self.extra_mut().delayed_eof = Some(DelayEof::NotEof(delay));
- ok
- }
- Poll::Ready(None) => match Pin::new(&mut delay).poll(cx) {
- Poll::Ready(Ok(never)) => match never {},
- Poll::Pending => {
- self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay));
- Poll::Pending
- }
- Poll::Ready(Err(_done)) => Poll::Ready(None),
- },
- Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
- },
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- Some(DelayEof::Eof(mut delay)) => match Pin::new(&mut delay).poll(cx) {
- Poll::Ready(Ok(never)) => match never {},
- Poll::Pending => {
- self.extra_mut().delayed_eof = Some(DelayEof::Eof(delay));
- Poll::Pending
- }
- Poll::Ready(Err(_done)) => Poll::Ready(None),
- },
- #[cfg(any(
- not(any(feature = "http1", feature = "http2")),
- not(feature = "client")
- ))]
- Some(delay_eof) => match delay_eof {},
- None => self.poll_inner(cx),
- }
- }
-
#[cfg(feature = "ffi")]
pub(crate) fn as_ffi_mut(&mut self) -> &mut crate::ffi::UserBody {
match self.kind {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -313,7 +224,7 @@ impl HttpBody for Body {
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
- self.poll_eof(cx)
+ self.poll_inner(cx)
}
fn poll_trailers(
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -22,7 +22,7 @@ use super::super::dispatch;
/// The sender side of an established connection.
pub struct SendRequest<B> {
- dispatch: dispatch::Sender<Request<B>, Response<Body>>,
+ dispatch: dispatch::UnboundedSender<Request<B>, Response<Body>>,
}
/// A future that processes all HTTP state for the IO object.
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -66,8 +66,12 @@ impl<B> SendRequest<B> {
/// Polls to determine whether this sender can be used yet for a request.
///
/// If the associated connection is closed, this returns an Error.
- pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
- self.dispatch.poll_ready(cx)
+ pub fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ if self.is_closed() {
+ Poll::Ready(Err(crate::Error::new_closed()))
+ } else {
+ Poll::Ready(Ok(()))
+ }
}
/*
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -83,11 +87,11 @@ impl<B> SendRequest<B> {
pub(super) fn is_ready(&self) -> bool {
self.dispatch.is_ready()
}
+ */
pub(super) fn is_closed(&self) -> bool {
self.dispatch.is_closed()
}
- */
}
impl<B> SendRequest<B>
diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs
--- a/src/client/conn/http2.rs
+++ b/src/client/conn/http2.rs
@@ -423,7 +427,7 @@ impl Builder {
proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec)
.await?;
Ok((
- SendRequest { dispatch: tx },
+ SendRequest { dispatch: tx.unbound() },
Connection { inner: (PhantomData, h2) },
))
}
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -63,7 +63,7 @@ use std::sync::Arc;
use std::time::Duration;
use bytes::Bytes;
-use futures_util::future::{self, Either, FutureExt as _};
+use futures_util::future;
use httparse::ParserConfig;
use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -214,16 +214,6 @@ pub struct Parts<T> {
_inner: (),
}
-// ========== internal client api
-
-// A `SendRequest` that can be cloned to send HTTP2 requests.
-// private for now, probably not a great idea of a type...
-#[must_use = "futures do nothing unless polled"]
-#[cfg(feature = "http2")]
-pub(super) struct Http2SendRequest<B> {
- dispatch: dispatch::UnboundedSender<Request<B>, Response<Body>>,
-}
-
// ===== impl SendRequest
impl<B> SendRequest<B> {
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -233,30 +223,6 @@ impl<B> SendRequest<B> {
pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
self.dispatch.poll_ready(cx)
}
-
- pub(super) async fn when_ready(self) -> crate::Result<Self> {
- let mut me = Some(self);
- future::poll_fn(move |cx| {
- ready!(me.as_mut().unwrap().poll_ready(cx))?;
- Poll::Ready(Ok(me.take().unwrap()))
- })
- .await
- }
-
- pub(super) fn is_ready(&self) -> bool {
- self.dispatch.is_ready()
- }
-
- pub(super) fn is_closed(&self) -> bool {
- self.dispatch.is_closed()
- }
-
- #[cfg(feature = "http2")]
- pub(super) fn into_http2(self) -> Http2SendRequest<B> {
- Http2SendRequest {
- dispatch: self.dispatch.unbound(),
- }
- }
}
impl<B> SendRequest<B>
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -316,32 +282,6 @@ where
ResponseFuture { inner }
}
-
- pub(super) fn send_request_retryable(
- &mut self,
- req: Request<B>,
- ) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>> + Unpin
- where
- B: Send,
- {
- match self.dispatch.try_send(req) {
- Ok(rx) => {
- Either::Left(rx.then(move |res| {
- match res {
- Ok(Ok(res)) => future::ok(res),
- Ok(Err(err)) => future::err(err),
- // this is definite bug if it happens, but it shouldn't happen!
- Err(_) => panic!("dispatch dropped without returning error"),
- }
- }))
- }
- Err(req) => {
- debug!("connection was not ready");
- let err = crate::Error::new_canceled().with("connection was not ready");
- Either::Right(future::err((err, Some(req))))
- }
- }
- }
}
impl<B> Service<Request<B>> for SendRequest<B>
diff --git a/src/client/conn/mod.rs b/src/client/conn/mod.rs
--- a/src/client/conn/mod.rs
+++ b/src/client/conn/mod.rs
@@ -367,67 +307,6 @@ impl<B> fmt::Debug for SendRequest<B> {
}
}
-// ===== impl Http2SendRequest
-
-#[cfg(feature = "http2")]
-impl<B> Http2SendRequest<B> {
- pub(super) fn is_ready(&self) -> bool {
- self.dispatch.is_ready()
- }
-
- pub(super) fn is_closed(&self) -> bool {
- self.dispatch.is_closed()
- }
-}
-
-#[cfg(feature = "http2")]
-impl<B> Http2SendRequest<B>
-where
- B: HttpBody + 'static,
-{
- pub(super) fn send_request_retryable(
- &mut self,
- req: Request<B>,
- ) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>>
- where
- B: Send,
- {
- match self.dispatch.try_send(req) {
- Ok(rx) => {
- Either::Left(rx.then(move |res| {
- match res {
- Ok(Ok(res)) => future::ok(res),
- Ok(Err(err)) => future::err(err),
- // this is definite bug if it happens, but it shouldn't happen!
- Err(_) => panic!("dispatch dropped without returning error"),
- }
- }))
- }
- Err(req) => {
- debug!("connection was not ready");
- let err = crate::Error::new_canceled().with("connection was not ready");
- Either::Right(future::err((err, Some(req))))
- }
- }
- }
-}
-
-#[cfg(feature = "http2")]
-impl<B> fmt::Debug for Http2SendRequest<B> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Http2SendRequest").finish()
- }
-}
-
-#[cfg(feature = "http2")]
-impl<B> Clone for Http2SendRequest<B> {
- fn clone(&self) -> Self {
- Http2SendRequest {
- dispatch: self.dispatch.clone(),
- }
- }
-}
-
// ===== impl Connection
impl<T, B> Connection<T, B>
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -166,6 +166,7 @@ impl Connected {
self.alpn == Alpn::H2
}
+ /*
// Don't public expose that `Connected` is `Clone`, unsure if we want to
// keep that contract...
#[cfg(feature = "http2")]
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -176,6 +177,7 @@ impl Connected {
extra: self.extra.clone(),
}
}
+ */
}
// ===== impl Extra =====
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -127,6 +135,14 @@ impl<T, U> UnboundedSender<T, U> {
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
+
+ pub(crate) fn send(&mut self, val: T) -> Result<Promise<U>, T> {
+ let (tx, rx) = oneshot::channel();
+ self.inner
+ .send(Envelope(Some((val, Callback::NoRetry(tx)))))
+ .map(move |_| rx)
+ .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
+ }
}
#[cfg(feature = "http2")]
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -198,6 +214,7 @@ impl<T, U> Drop for Envelope<T, U> {
}
pub(crate) enum Callback<T, U> {
+ #[allow(unused)]
Retry(oneshot::Sender<Result<U, (crate::Error, Option<T>)>>),
NoRetry(oneshot::Sender<Result<U, crate::Error>>),
}
diff --git a/src/common/lazy.rs /dev/null
--- a/src/common/lazy.rs
+++ /dev/null
@@ -1,76 +0,0 @@
-use pin_project_lite::pin_project;
-
-use super::{task, Future, Pin, Poll};
-
-pub(crate) trait Started: Future {
- fn started(&self) -> bool;
-}
-
-pub(crate) fn lazy<F, R>(func: F) -> Lazy<F, R>
-where
- F: FnOnce() -> R,
- R: Future + Unpin,
-{
- Lazy {
- inner: Inner::Init { func },
- }
-}
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-pin_project! {
- #[allow(missing_debug_implementations)]
- pub(crate) struct Lazy<F, R> {
- #[pin]
- inner: Inner<F, R>,
- }
-}
-
-pin_project! {
- #[project = InnerProj]
- #[project_replace = InnerProjReplace]
- enum Inner<F, R> {
- Init { func: F },
- Fut { #[pin] fut: R },
- Empty,
- }
-}
-
-impl<F, R> Started for Lazy<F, R>
-where
- F: FnOnce() -> R,
- R: Future,
-{
- fn started(&self) -> bool {
- match self.inner {
- Inner::Init { .. } => false,
- Inner::Fut { .. } | Inner::Empty => true,
- }
- }
-}
-
-impl<F, R> Future for Lazy<F, R>
-where
- F: FnOnce() -> R,
- R: Future,
-{
- type Output = R::Output;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
-
- if let InnerProj::Fut { fut } = this.inner.as_mut().project() {
- return fut.poll(cx);
- }
-
- match this.inner.as_mut().project_replace(Inner::Empty) {
- InnerProjReplace::Init { func } => {
- this.inner.set(Inner::Fut { fut: func() });
- if let InnerProj::Fut { fut } = this.inner.project() {
- return fut.poll(cx);
- }
- unreachable!()
- }
- _ => unreachable!("lazy state wrong"),
- }
- }
-}
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -13,16 +13,10 @@ pub(crate) mod date;
#[cfg(any(feature = "http1", feature = "http2", feature = "server"))]
pub(crate) mod exec;
pub(crate) mod io;
-#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
-mod lazy;
mod never;
-#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
-pub(crate) mod sync_wrapper;
pub(crate) mod task;
pub(crate) mod watch;
-#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
-pub(crate) use self::lazy::{lazy, Started as Lazy};
#[cfg(any(feature = "http1", feature = "http2", feature = "runtime"))]
pub(crate) use self::never::Never;
pub(crate) use self::task::Poll;
diff --git a/src/common/sync_wrapper.rs /dev/null
--- a/src/common/sync_wrapper.rs
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * This is a copy of the sync_wrapper crate.
- */
-
-/// A mutual exclusion primitive that relies on static type information only
-///
-/// In some cases synchronization can be proven statically: whenever you hold an exclusive `&mut`
-/// reference, the Rust type system ensures that no other part of the program can hold another
-/// reference to the data. Therefore it is safe to access it even if the current thread obtained
-/// this reference via a channel. Whenever this is the case, the overhead of allocating and locking
-/// a [`Mutex`] can be avoided by using this static version.
-///
-/// One example where this is often applicable is [`Future`], which requires an exclusive reference
-/// for its [`poll`] method: While a given `Future` implementation may not be safe to access by
-/// multiple threads concurrently, the executor can only run the `Future` on one thread at any
-/// given time, making it [`Sync`] in practice as long as the implementation is `Send`. You can
-/// therefore use the sync wrapper to prove that your data structure is `Sync` even though it
-/// contains such a `Future`.
-///
-/// # Example
-///
-/// ```ignore
-/// use hyper::common::sync_wrapper::SyncWrapper;
-/// use std::future::Future;
-///
-/// struct MyThing {
-/// future: SyncWrapper<Box<dyn Future<Output = String> + Send>>,
-/// }
-///
-/// impl MyThing {
-/// // all accesses to `self.future` now require an exclusive reference or ownership
-/// }
-///
-/// fn assert_sync<T: Sync>() {}
-///
-/// assert_sync::<MyThing>();
-/// ```
-///
-/// [`Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html
-/// [`Future`]: https://doc.rust-lang.org/std/future/trait.Future.html
-/// [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#method.poll
-/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
-#[repr(transparent)]
-pub(crate) struct SyncWrapper<T>(T);
-
-impl<T> SyncWrapper<T> {
- /// Creates a new SyncWrapper containing the given value.
- ///
- /// # Examples
- ///
- /// ```ignore
- /// use hyper::common::sync_wrapper::SyncWrapper;
- ///
- /// let wrapped = SyncWrapper::new(42);
- /// ```
- pub(crate) fn new(value: T) -> Self {
- Self(value)
- }
-
- /// Acquires a reference to the protected value.
- ///
- /// This is safe because it requires an exclusive reference to the wrapper. Therefore this method
- /// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which
- /// returns an error if another thread panicked while holding the lock. It is not recommended
- /// to send an exclusive reference to a potentially damaged value to another thread for further
- /// processing.
- ///
- /// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut
- ///
- /// # Examples
- ///
- /// ```ignore
- /// use hyper::common::sync_wrapper::SyncWrapper;
- ///
- /// let mut wrapped = SyncWrapper::new(42);
- /// let value = wrapped.get_mut();
- /// *value = 0;
- /// assert_eq!(*wrapped.get_mut(), 0);
- /// ```
- pub(crate) fn get_mut(&mut self) -> &mut T {
- &mut self.0
- }
-
- /// Consumes this wrapper, returning the underlying data.
- ///
- /// This is safe because it requires ownership of the wrapper, aherefore this method will neither
- /// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which
- /// returns an error if another thread panicked while holding the lock. It is not recommended
- /// to send an exclusive reference to a potentially damaged value to another thread for further
- /// processing.
- ///
- /// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner
- ///
- /// # Examples
- ///
- /// ```ignore
- /// use hyper::common::sync_wrapper::SyncWrapper;
- ///
- /// let mut wrapped = SyncWrapper::new(42);
- /// assert_eq!(wrapped.into_inner(), 42);
- /// ```
- #[allow(dead_code)]
- pub(crate) fn into_inner(self) -> T {
- self.0
- }
-}
-
-// this is safe because the only operations permitted on this data structure require exclusive
-// access or ownership
-unsafe impl<T: Send> Sync for SyncWrapper<T> {}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -34,9 +34,6 @@ pub(super) enum Kind {
/// An `io::Error` that occurred while trying to read or write to a network stream.
#[cfg(any(feature = "http1", feature = "http2"))]
Io,
- /// Error occurred while connecting.
- #[allow(unused)]
- Connect,
/// Error creating a TcpListener.
#[cfg(all(feature = "tcp", feature = "server"))]
Listen,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -101,22 +98,10 @@ pub(super) enum User {
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
UnexpectedHeader,
- /// User tried to create a Request with bad version.
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- UnsupportedVersion,
- /// User tried to create a CONNECT Request with the Client.
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- UnsupportedRequestMethod,
/// User tried to respond with a 1xx (not 101) response code.
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
UnsupportedStatusCode,
- /// User tried to send a Request with Client with non-absolute URI.
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- AbsoluteUriRequired,
/// User tried polling for an upgrade that doesn't exist.
NoUpgrade,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -173,11 +158,6 @@ impl Error {
matches!(self.inner.kind, Kind::ChannelClosed)
}
- /// Returns true if this was an error from `Connect`.
- pub fn is_connect(&self) -> bool {
- matches!(self.inner.kind, Kind::Connect)
- }
-
/// Returns true if the connection closed before a message could complete.
pub fn is_incomplete_message(&self) -> bool {
matches!(self.inner.kind, Kind::IncompleteMessage)
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -270,12 +250,6 @@ impl Error {
Error::new(Kind::Listen).with(cause)
}
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- pub(super) fn new_connect<E: Into<Cause>>(cause: E) -> Error {
- Error::new(Kind::Connect).with(cause)
- }
-
pub(super) fn new_closed() -> Error {
Error::new(Kind::ChannelClosed)
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -309,30 +283,12 @@ impl Error {
Error::new(Kind::HeaderTimeout)
}
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- pub(super) fn new_user_unsupported_version() -> Error {
- Error::new_user(User::UnsupportedVersion)
- }
-
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- pub(super) fn new_user_unsupported_request_method() -> Error {
- Error::new_user(User::UnsupportedRequestMethod)
- }
-
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
pub(super) fn new_user_unsupported_status_code() -> Error {
Error::new_user(User::UnsupportedStatusCode)
}
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- pub(super) fn new_user_absolute_uri_required() -> Error {
- Error::new_user(User::AbsoluteUriRequired)
- }
-
pub(super) fn new_user_no_upgrade() -> Error {
Error::new_user(User::NoUpgrade)
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -411,7 +367,6 @@ impl Error {
#[cfg(feature = "http1")]
Kind::UnexpectedMessage => "received unexpected message from connection",
Kind::ChannelClosed => "channel closed",
- Kind::Connect => "error trying to connect",
Kind::Canceled => "operation was canceled",
#[cfg(all(feature = "server", feature = "tcp"))]
Kind::Listen => "error creating server listener",
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -436,20 +391,11 @@ impl Error {
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
Kind::User(User::UnexpectedHeader) => "user sent unexpected header",
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- Kind::User(User::UnsupportedVersion) => "request has unsupported HTTP version",
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- Kind::User(User::UnsupportedRequestMethod) => "request has unsupported HTTP method",
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
Kind::User(User::UnsupportedStatusCode) => {
"response has 1xx status code, not supported by server"
}
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "client")]
- Kind::User(User::AbsoluteUriRequired) => "client requires absolute-form URIs",
Kind::User(User::NoUpgrade) => "no upgrade available",
#[cfg(feature = "http1")]
Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use",
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -93,9 +93,6 @@ cfg_feature! {
#![feature = "client"]
pub mod client;
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[doc(no_inline)]
- pub use crate::client::Client;
}
cfg_feature! {
|
2022-08-13T00:19:51Z
| 2,941
|
Remove high-level pooling Client
The `hyper::Client` and support modules `hyper::client::connect` should be removed from hyper, and parts should reappear in hyper-util.
- [x] #2860
- [x] #2859
|
hyperium__hyper-2941
|
diff --git a/src/client/client.rs /dev/null
--- a/src/client/client.rs
+++ /dev/null
@@ -1,1356 +0,0 @@
-use std::error::Error as StdError;
-use std::fmt;
-use std::mem;
-use std::time::Duration;
-
-use futures_channel::oneshot;
-use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _};
-use http::header::{HeaderValue, HOST};
-use http::uri::{Port, Scheme};
-use http::{Method, Request, Response, Uri, Version};
-use tracing::{debug, trace, warn};
-
-use super::conn;
-use super::connect::{self, sealed::Connect, Alpn, Connected, Connection};
-use super::pool::{
- self, CheckoutIsClosedError, Key as PoolKey, Pool, Poolable, Pooled, Reservation,
-};
-use crate::body::{Body, HttpBody};
-use crate::common::{
- exec::BoxSendFuture, lazy as hyper_lazy, sync_wrapper::SyncWrapper, task, Future, Lazy, Pin,
- Poll,
-};
-use crate::rt::Executor;
-
-/// A Client to make outgoing HTTP requests.
-///
-/// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The
-/// underlying connection pool will be reused.
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-pub struct Client<C, B = Body> {
- config: Config,
- conn_builder: conn::Builder,
- connector: C,
- pool: Pool<PoolClient<B>>,
-}
-
-#[derive(Clone, Copy, Debug)]
-struct Config {
- retry_canceled_requests: bool,
- set_host: bool,
- ver: Ver,
-}
-
-/// A `Future` that will resolve to an HTTP Response.
-///
-/// This is returned by `Client::request` (and `Client::get`).
-#[must_use = "futures do nothing unless polled"]
-pub struct ResponseFuture {
- inner: SyncWrapper<Pin<Box<dyn Future<Output = crate::Result<Response<Body>>> + Send>>>,
-}
-
-// ===== impl Client =====
-
-impl Client<(), Body> {
- /// Create a builder to configure a new `Client`.
- #[inline]
- pub fn builder() -> Builder {
- Builder::default()
- }
-}
-
-impl<C, B> Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: HttpBody + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- /// Send a `GET` request to the supplied `Uri`.
- ///
- /// # Note
- ///
- /// This requires that the `HttpBody` type have a `Default` implementation.
- /// It *should* return an "empty" version of itself, such that
- /// `HttpBody::is_end_stream` is `true`.
- pub fn get(&self, uri: Uri) -> ResponseFuture
- where
- B: Default,
- {
- let body = B::default();
- if !body.is_end_stream() {
- warn!("default HttpBody used for get() does not return true for is_end_stream");
- }
-
- let mut req = Request::new(body);
- *req.uri_mut() = uri;
- self.request(req)
- }
-
- /// Send a constructed `Request` using this `Client`.
- pub fn request(&self, mut req: Request<B>) -> ResponseFuture {
- let is_http_connect = req.method() == Method::CONNECT;
- match req.version() {
- Version::HTTP_11 => (),
- Version::HTTP_10 => {
- if is_http_connect {
- warn!("CONNECT is not allowed for HTTP/1.0");
- return ResponseFuture::new(future::err(
- crate::Error::new_user_unsupported_request_method(),
- ));
- }
- }
- Version::HTTP_2 => (),
- // completely unsupported HTTP version (like HTTP/0.9)!
- other => return ResponseFuture::error_version(other),
- };
-
- let pool_key = match extract_domain(req.uri_mut(), is_http_connect) {
- Ok(s) => s,
- Err(err) => {
- return ResponseFuture::new(future::err(err));
- }
- };
-
- ResponseFuture::new(self.clone().retryably_send_request(req, pool_key))
- }
-
- async fn retryably_send_request(
- self,
- mut req: Request<B>,
- pool_key: PoolKey,
- ) -> crate::Result<Response<Body>> {
- let uri = req.uri().clone();
-
- loop {
- req = match self.send_request(req, pool_key.clone()).await {
- Ok(resp) => return Ok(resp),
- Err(ClientError::Normal(err)) => return Err(err),
- Err(ClientError::Canceled {
- connection_reused,
- mut req,
- reason,
- }) => {
- if !self.config.retry_canceled_requests || !connection_reused {
- // if client disabled, don't retry
- // a fresh connection means we definitely can't retry
- return Err(reason);
- }
-
- trace!(
- "unstarted request canceled, trying again (reason={:?})",
- reason
- );
- *req.uri_mut() = uri.clone();
- req
- }
- }
- }
- }
-
- async fn send_request(
- &self,
- mut req: Request<B>,
- pool_key: PoolKey,
- ) -> Result<Response<Body>, ClientError<B>> {
- let mut pooled = match self.connection_for(pool_key).await {
- Ok(pooled) => pooled,
- Err(ClientConnectError::Normal(err)) => return Err(ClientError::Normal(err)),
- Err(ClientConnectError::H2CheckoutIsClosed(reason)) => {
- return Err(ClientError::Canceled {
- connection_reused: true,
- req,
- reason,
- })
- }
- };
-
- if pooled.is_http1() {
- if req.version() == Version::HTTP_2 {
- warn!("Connection is HTTP/1, but request requires HTTP/2");
- return Err(ClientError::Normal(
- crate::Error::new_user_unsupported_version(),
- ));
- }
-
- if self.config.set_host {
- let uri = req.uri().clone();
- req.headers_mut().entry(HOST).or_insert_with(|| {
- let hostname = uri.host().expect("authority implies host");
- if let Some(port) = get_non_default_port(&uri) {
- let s = format!("{}:{}", hostname, port);
- HeaderValue::from_str(&s)
- } else {
- HeaderValue::from_str(hostname)
- }
- .expect("uri host is valid header value")
- });
- }
-
- // CONNECT always sends authority-form, so check it first...
- if req.method() == Method::CONNECT {
- authority_form(req.uri_mut());
- } else if pooled.conn_info.is_proxied {
- absolute_form(req.uri_mut());
- } else {
- origin_form(req.uri_mut());
- }
- } else if req.method() == Method::CONNECT {
- authority_form(req.uri_mut());
- }
-
- let fut = pooled
- .send_request_retryable(req)
- .map_err(ClientError::map_with_reused(pooled.is_reused()));
-
- // If the Connector included 'extra' info, add to Response...
- let extra_info = pooled.conn_info.extra.clone();
- let fut = fut.map_ok(move |mut res| {
- if let Some(extra) = extra_info {
- extra.set(res.extensions_mut());
- }
- res
- });
-
- // As of futures@0.1.21, there is a race condition in the mpsc
- // channel, such that sending when the receiver is closing can
- // result in the message being stuck inside the queue. It won't
- // ever notify until the Sender side is dropped.
- //
- // To counteract this, we must check if our senders 'want' channel
- // has been closed after having tried to send. If so, error out...
- if pooled.is_closed() {
- return fut.await;
- }
-
- let mut res = fut.await?;
-
- // If pooled is HTTP/2, we can toss this reference immediately.
- //
- // when pooled is dropped, it will try to insert back into the
- // pool. To delay that, spawn a future that completes once the
- // sender is ready again.
- //
- // This *should* only be once the related `Connection` has polled
- // for a new request to start.
- //
- // It won't be ready if there is a body to stream.
- if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
- drop(pooled);
- } else if !res.body().is_end_stream() {
- let (delayed_tx, delayed_rx) = oneshot::channel();
- res.body_mut().delayed_eof(delayed_rx);
- let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| {
- // At this point, `pooled` is dropped, and had a chance
- // to insert into the pool (if conn was idle)
- drop(delayed_tx);
- });
-
- self.conn_builder.exec.execute(on_idle);
- } else {
- // There's no body to delay, but the connection isn't
- // ready yet. Only re-insert when it's ready
- let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ());
-
- self.conn_builder.exec.execute(on_idle);
- }
-
- Ok(res)
- }
-
- async fn connection_for(
- &self,
- pool_key: PoolKey,
- ) -> Result<Pooled<PoolClient<B>>, ClientConnectError> {
- // This actually races 2 different futures to try to get a ready
- // connection the fastest, and to reduce connection churn.
- //
- // - If the pool has an idle connection waiting, that's used
- // immediately.
- // - Otherwise, the Connector is asked to start connecting to
- // the destination Uri.
- // - Meanwhile, the pool Checkout is watching to see if any other
- // request finishes and tries to insert an idle connection.
- // - If a new connection is started, but the Checkout wins after
- // (an idle connection became available first), the started
- // connection future is spawned into the runtime to complete,
- // and then be inserted into the pool as an idle connection.
- let checkout = self.pool.checkout(pool_key.clone());
- let connect = self.connect_to(pool_key);
- let is_ver_h2 = self.config.ver == Ver::Http2;
-
- // The order of the `select` is depended on below...
-
- match future::select(checkout, connect).await {
- // Checkout won, connect future may have been started or not.
- //
- // If it has, let it finish and insert back into the pool,
- // so as to not waste the socket...
- Either::Left((Ok(checked_out), connecting)) => {
- // This depends on the `select` above having the correct
- // order, such that if the checkout future were ready
- // immediately, the connect future will never have been
- // started.
- //
- // If it *wasn't* ready yet, then the connect future will
- // have been started...
- if connecting.started() {
- let bg = connecting
- .map_err(|err| {
- trace!("background connect error: {}", err);
- })
- .map(|_pooled| {
- // dropping here should just place it in
- // the Pool for us...
- });
- // An execute error here isn't important, we're just trying
- // to prevent a waste of a socket...
- self.conn_builder.exec.execute(bg);
- }
- Ok(checked_out)
- }
- // Connect won, checkout can just be dropped.
- Either::Right((Ok(connected), _checkout)) => Ok(connected),
- // Either checkout or connect could get canceled:
- //
- // 1. Connect is canceled if this is HTTP/2 and there is
- // an outstanding HTTP/2 connecting task.
- // 2. Checkout is canceled if the pool cannot deliver an
- // idle connection reliably.
- //
- // In both cases, we should just wait for the other future.
- Either::Left((Err(err), connecting)) => {
- if err.is_canceled() {
- connecting.await.map_err(ClientConnectError::Normal)
- } else {
- Err(ClientConnectError::Normal(err))
- }
- }
- Either::Right((Err(err), checkout)) => {
- if err.is_canceled() {
- checkout.await.map_err(move |err| {
- if is_ver_h2
- && err.is_canceled()
- && err.find_source::<CheckoutIsClosedError>().is_some()
- {
- ClientConnectError::H2CheckoutIsClosed(err)
- } else {
- ClientConnectError::Normal(err)
- }
- })
- } else {
- Err(ClientConnectError::Normal(err))
- }
- }
- }
- }
-
- fn connect_to(
- &self,
- pool_key: PoolKey,
- ) -> impl Lazy<Output = crate::Result<Pooled<PoolClient<B>>>> + Unpin {
- let executor = self.conn_builder.exec.clone();
- let pool = self.pool.clone();
- #[cfg(not(feature = "http2"))]
- let conn_builder = self.conn_builder.clone();
- #[cfg(feature = "http2")]
- let mut conn_builder = self.conn_builder.clone();
- let ver = self.config.ver;
- let is_ver_h2 = ver == Ver::Http2;
- let connector = self.connector.clone();
- let dst = domain_as_uri(pool_key.clone());
- hyper_lazy(move || {
- // Try to take a "connecting lock".
- //
- // If the pool_key is for HTTP/2, and there is already a
- // connection being established, then this can't take a
- // second lock. The "connect_to" future is Canceled.
- let connecting = match pool.connecting(&pool_key, ver) {
- Some(lock) => lock,
- None => {
- let canceled =
- crate::Error::new_canceled().with("HTTP/2 connection in progress");
- return Either::Right(future::err(canceled));
- }
- };
- Either::Left(
- connector
- .connect(connect::sealed::Internal, dst)
- .map_err(crate::Error::new_connect)
- .and_then(move |io| {
- let connected = io.connected();
- // If ALPN is h2 and we aren't http2_only already,
- // then we need to convert our pool checkout into
- // a single HTTP2 one.
- let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 {
- match connecting.alpn_h2(&pool) {
- Some(lock) => {
- trace!("ALPN negotiated h2, updating pool");
- lock
- }
- None => {
- // Another connection has already upgraded,
- // the pool checkout should finish up for us.
- let canceled = crate::Error::new_canceled()
- .with("ALPN upgraded to HTTP/2");
- return Either::Right(future::err(canceled));
- }
- }
- } else {
- connecting
- };
-
- #[cfg_attr(not(feature = "http2"), allow(unused))]
- let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
- #[cfg(feature = "http2")]
- {
- conn_builder.http2_only(is_h2);
- }
-
- Either::Left(Box::pin(async move {
- let (tx, conn) = conn_builder.handshake(io).await?;
-
- trace!("handshake complete, spawning background dispatcher task");
- executor.execute(
- conn.map_err(|e| debug!("client connection error: {}", e))
- .map(|_| ()),
- );
-
- // Wait for 'conn' to ready up before we
- // declare this tx as usable
- let tx = tx.when_ready().await?;
-
- let tx = {
- #[cfg(feature = "http2")]
- {
- if is_h2 {
- PoolTx::Http2(tx.into_http2())
- } else {
- PoolTx::Http1(tx)
- }
- }
- #[cfg(not(feature = "http2"))]
- PoolTx::Http1(tx)
- };
-
- Ok(pool.pooled(
- connecting,
- PoolClient {
- conn_info: connected,
- tx,
- },
- ))
- }))
- }),
- )
- })
- }
-}
-
-impl<C, B> tower_service::Service<Request<B>> for Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: HttpBody + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Response = Response<Body>;
- type Error = crate::Error;
- type Future = ResponseFuture;
-
- fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: Request<B>) -> Self::Future {
- self.request(req)
- }
-}
-
-impl<C, B> tower_service::Service<Request<B>> for &'_ Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: HttpBody + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Response = Response<Body>;
- type Error = crate::Error;
- type Future = ResponseFuture;
-
- fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: Request<B>) -> Self::Future {
- self.request(req)
- }
-}
-
-impl<C: Clone, B> Clone for Client<C, B> {
- fn clone(&self) -> Client<C, B> {
- Client {
- config: self.config.clone(),
- conn_builder: self.conn_builder.clone(),
- connector: self.connector.clone(),
- pool: self.pool.clone(),
- }
- }
-}
-
-impl<C, B> fmt::Debug for Client<C, B> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Client").finish()
- }
-}
-
-// ===== impl ResponseFuture =====
-
-impl ResponseFuture {
- fn new<F>(value: F) -> Self
- where
- F: Future<Output = crate::Result<Response<Body>>> + Send + 'static,
- {
- Self {
- inner: SyncWrapper::new(Box::pin(value)),
- }
- }
-
- fn error_version(ver: Version) -> Self {
- warn!("Request has unsupported version \"{:?}\"", ver);
- ResponseFuture::new(Box::pin(future::err(
- crate::Error::new_user_unsupported_version(),
- )))
- }
-}
-
-impl fmt::Debug for ResponseFuture {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("Future<Response>")
- }
-}
-
-impl Future for ResponseFuture {
- type Output = crate::Result<Response<Body>>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- self.inner.get_mut().as_mut().poll(cx)
- }
-}
-
-// ===== impl PoolClient =====
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-struct PoolClient<B> {
- conn_info: Connected,
- tx: PoolTx<B>,
-}
-
-enum PoolTx<B> {
- Http1(conn::SendRequest<B>),
- #[cfg(feature = "http2")]
- Http2(conn::Http2SendRequest<B>),
-}
-
-impl<B> PoolClient<B> {
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
- match self.tx {
- PoolTx::Http1(ref mut tx) => tx.poll_ready(cx),
- #[cfg(feature = "http2")]
- PoolTx::Http2(_) => Poll::Ready(Ok(())),
- }
- }
-
- fn is_http1(&self) -> bool {
- !self.is_http2()
- }
-
- fn is_http2(&self) -> bool {
- match self.tx {
- PoolTx::Http1(_) => false,
- #[cfg(feature = "http2")]
- PoolTx::Http2(_) => true,
- }
- }
-
- fn is_ready(&self) -> bool {
- match self.tx {
- PoolTx::Http1(ref tx) => tx.is_ready(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_ready(),
- }
- }
-
- fn is_closed(&self) -> bool {
- match self.tx {
- PoolTx::Http1(ref tx) => tx.is_closed(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_closed(),
- }
- }
-}
-
-impl<B: HttpBody + 'static> PoolClient<B> {
- fn send_request_retryable(
- &mut self,
- req: Request<B>,
- ) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>>
- where
- B: Send,
- {
- match self.tx {
- #[cfg(not(feature = "http2"))]
- PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req),
- #[cfg(feature = "http2")]
- PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)),
- }
- }
-}
-
-impl<B> Poolable for PoolClient<B>
-where
- B: Send + 'static,
-{
- fn is_open(&self) -> bool {
- match self.tx {
- PoolTx::Http1(ref tx) => tx.is_ready(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_ready(),
- }
- }
-
- fn reserve(self) -> Reservation<Self> {
- match self.tx {
- PoolTx::Http1(tx) => Reservation::Unique(PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http1(tx),
- }),
- #[cfg(feature = "http2")]
- PoolTx::Http2(tx) => {
- let b = PoolClient {
- conn_info: self.conn_info.clone(),
- tx: PoolTx::Http2(tx.clone()),
- };
- let a = PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http2(tx),
- };
- Reservation::Shared(a, b)
- }
- }
- }
-
- fn can_share(&self) -> bool {
- self.is_http2()
- }
-}
-
-// ===== impl ClientError =====
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-enum ClientError<B> {
- Normal(crate::Error),
- Canceled {
- connection_reused: bool,
- req: Request<B>,
- reason: crate::Error,
- },
-}
-
-impl<B> ClientError<B> {
- fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option<Request<B>>)) -> Self {
- move |(err, orig_req)| {
- if let Some(req) = orig_req {
- ClientError::Canceled {
- connection_reused: conn_reused,
- reason: err,
- req,
- }
- } else {
- ClientError::Normal(err)
- }
- }
- }
-}
-
-enum ClientConnectError {
- Normal(crate::Error),
- H2CheckoutIsClosed(crate::Error),
-}
-
-/// A marker to identify what version a pooled connection is.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-pub(super) enum Ver {
- Auto,
- Http2,
-}
-
-fn origin_form(uri: &mut Uri) {
- let path = match uri.path_and_query() {
- Some(path) if path.as_str() != "/" => {
- let mut parts = ::http::uri::Parts::default();
- parts.path_and_query = Some(path.clone());
- Uri::from_parts(parts).expect("path is valid uri")
- }
- _none_or_just_slash => {
- debug_assert!(Uri::default() == "/");
- Uri::default()
- }
- };
- *uri = path
-}
-
-fn absolute_form(uri: &mut Uri) {
- debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme");
- debug_assert!(
- uri.authority().is_some(),
- "absolute_form needs an authority"
- );
- // If the URI is to HTTPS, and the connector claimed to be a proxy,
- // then it *should* have tunneled, and so we don't want to send
- // absolute-form in that case.
- if uri.scheme() == Some(&Scheme::HTTPS) {
- origin_form(uri);
- }
-}
-
-fn authority_form(uri: &mut Uri) {
- if let Some(path) = uri.path_and_query() {
- // `https://hyper.rs` would parse with `/` path, don't
- // annoy people about that...
- if path != "/" {
- warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path);
- }
- }
- *uri = match uri.authority() {
- Some(auth) => {
- let mut parts = ::http::uri::Parts::default();
- parts.authority = Some(auth.clone());
- Uri::from_parts(parts).expect("authority is valid")
- }
- None => {
- unreachable!("authority_form with relative uri");
- }
- };
-}
-
-fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result<PoolKey> {
- let uri_clone = uri.clone();
- match (uri_clone.scheme(), uri_clone.authority()) {
- (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())),
- (None, Some(auth)) if is_http_connect => {
- let scheme = match auth.port_u16() {
- Some(443) => {
- set_scheme(uri, Scheme::HTTPS);
- Scheme::HTTPS
- }
- _ => {
- set_scheme(uri, Scheme::HTTP);
- Scheme::HTTP
- }
- };
- Ok((scheme, auth.clone()))
- }
- _ => {
- debug!("Client requires absolute-form URIs, received: {:?}", uri);
- Err(crate::Error::new_user_absolute_uri_required())
- }
- }
-}
-
-fn domain_as_uri((scheme, auth): PoolKey) -> Uri {
- http::uri::Builder::new()
- .scheme(scheme)
- .authority(auth)
- .path_and_query("/")
- .build()
- .expect("domain is valid Uri")
-}
-
-fn set_scheme(uri: &mut Uri, scheme: Scheme) {
- debug_assert!(
- uri.scheme().is_none(),
- "set_scheme expects no existing scheme"
- );
- let old = mem::replace(uri, Uri::default());
- let mut parts: ::http::uri::Parts = old.into();
- parts.scheme = Some(scheme);
- parts.path_and_query = Some("/".parse().expect("slash is a valid path"));
- *uri = Uri::from_parts(parts).expect("scheme is valid");
-}
-
-fn get_non_default_port(uri: &Uri) -> Option<Port<&str>> {
- match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) {
- (Some(443), true) => None,
- (Some(80), false) => None,
- _ => uri.port(),
- }
-}
-
-fn is_schema_secure(uri: &Uri) -> bool {
- uri.scheme_str()
- .map(|scheme_str| matches!(scheme_str, "wss" | "https"))
- .unwrap_or_default()
-}
-
-/// A builder to configure a new [`Client`](Client).
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-#[derive(Clone)]
-pub struct Builder {
- client_config: Config,
- conn_builder: conn::Builder,
- pool_config: pool::Config,
-}
-
-impl Default for Builder {
- fn default() -> Self {
- Self {
- client_config: Config {
- retry_canceled_requests: true,
- set_host: true,
- ver: Ver::Auto,
- },
- conn_builder: conn::Builder::new(),
- pool_config: pool::Config {
- idle_timeout: Some(Duration::from_secs(90)),
- max_idle_per_host: std::usize::MAX,
- },
- }
- }
-}
-
-impl Builder {
- #[doc(hidden)]
- #[deprecated(
- note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)"
- )]
- pub fn keep_alive(&mut self, val: bool) -> &mut Self {
- if !val {
- // disable
- self.pool_max_idle_per_host(0)
- } else if self.pool_config.max_idle_per_host == 0 {
- // enable
- self.pool_max_idle_per_host(std::usize::MAX)
- } else {
- // already enabled
- self
- }
- }
-
- #[doc(hidden)]
- #[deprecated(note = "renamed to `pool_idle_timeout`")]
- pub fn keep_alive_timeout<D>(&mut self, val: D) -> &mut Self
- where
- D: Into<Option<Duration>>,
- {
- self.pool_idle_timeout(val)
- }
-
- /// Set an optional timeout for idle sockets being kept-alive.
- ///
- /// Pass `None` to disable timeout.
- ///
- /// Default is 90 seconds.
- pub fn pool_idle_timeout<D>(&mut self, val: D) -> &mut Self
- where
- D: Into<Option<Duration>>,
- {
- self.pool_config.idle_timeout = val.into();
- self
- }
-
- #[doc(hidden)]
- #[deprecated(note = "renamed to `pool_max_idle_per_host`")]
- pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
- self.pool_config.max_idle_per_host = max_idle;
- self
- }
-
- /// Sets the maximum idle connection per host allowed in the pool.
- ///
- /// Default is `usize::MAX` (no limit).
- pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
- self.pool_config.max_idle_per_host = max_idle;
- self
- }
-
- // HTTP/1 options
-
- /// Sets the exact size of the read buffer to *always* use.
- ///
- /// Note that setting this option unsets the `http1_max_buf_size` option.
- ///
- /// Default is an adaptive read buffer.
- pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self {
- self.conn_builder.http1_read_buf_exact_size(Some(sz));
- self
- }
-
- /// Set the maximum buffer size for the connection.
- ///
- /// Default is ~400kb.
- ///
- /// Note that setting this option unsets the `http1_read_exact_buf_size` option.
- ///
- /// # Panics
- ///
- /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
- self.conn_builder.http1_max_buf_size(max);
- self
- }
-
- /// Set whether HTTP/1 connections will accept spaces between header names
- /// and the colon that follow them in responses.
- ///
- /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when
- /// parsing.
- ///
- /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
- /// to say about it:
- ///
- /// > No whitespace is allowed between the header field-name and colon. In
- /// > the past, differences in the handling of such whitespace have led to
- /// > security vulnerabilities in request routing and response handling. A
- /// > server MUST reject any received request message that contains
- /// > whitespace between a header field-name and colon with a response code
- /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
- /// > response message before forwarding the message downstream.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- ///
- /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self {
- self.conn_builder
- .http1_allow_spaces_after_header_name_in_responses(val);
- self
- }
-
- /// Set whether HTTP/1 connections will accept obsolete line folding for
- /// header values.
- ///
- /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
- /// to say about it:
- ///
- /// > A server that receives an obs-fold in a request message that is not
- /// > within a message/http container MUST either reject the message by
- /// > sending a 400 (Bad Request), preferably with a representation
- /// > explaining that obsolete line folding is unacceptable, or replace
- /// > each received obs-fold with one or more SP octets prior to
- /// > interpreting the field value or forwarding the message downstream.
- ///
- /// > A proxy or gateway that receives an obs-fold in a response message
- /// > that is not within a message/http container MUST either discard the
- /// > message and replace it with a 502 (Bad Gateway) response, preferably
- /// > with a representation explaining that unacceptable line folding was
- /// > received, or replace each received obs-fold with one or more SP
- /// > octets prior to interpreting the field value or forwarding the
- /// > message downstream.
- ///
- /// > A user agent that receives an obs-fold in a response message that is
- /// > not within a message/http container MUST replace each received
- /// > obs-fold with one or more SP octets prior to interpreting the field
- /// > value.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- ///
- /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
- pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self {
- self.conn_builder
- .http1_allow_obsolete_multiline_headers_in_responses(val);
- self
- }
-
- /// Set whether HTTP/1 connections should try to use vectored writes,
- /// or always flatten into a single buffer.
- ///
- /// Note that setting this to false may mean more copies of body data,
- /// but may also improve performance when an IO transport doesn't
- /// support vectored writes well, such as most TLS implementations.
- ///
- /// Setting this to true will force hyper to use queued strategy
- /// which may eliminate unnecessary cloning on some TLS backends
- ///
- /// Default is `auto`. In this mode hyper will try to guess which
- /// mode to use
- pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
- self.conn_builder.http1_writev(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections will write header names as title case at
- /// the socket level.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self {
- self.conn_builder.http1_title_case_headers(val);
- self
- }
-
- /// Set whether to support preserving original header cases.
- ///
- /// Currently, this will record the original cases received, and store them
- /// in a private extension on the `Response`. It will also look for and use
- /// such an extension in any provided `Request`.
- ///
- /// Since the relevant extension is still private, there is no way to
- /// interact with the original cases. The only effect this can have now is
- /// to forward the cases in a proxy-like fashion.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self {
- self.conn_builder.http1_preserve_header_case(val);
- self
- }
-
- /// Set whether HTTP/0.9 responses should be tolerated.
- ///
- /// Default is false.
- pub fn http09_responses(&mut self, val: bool) -> &mut Self {
- self.conn_builder.http09_responses(val);
- self
- }
-
- /// Set whether the connection **must** use HTTP/2.
- ///
- /// The destination must either allow HTTP2 Prior Knowledge, or the
- /// `Connect` should be configured to do use ALPN to upgrade to `h2`
- /// as part of the connection process. This will not make the `Client`
- /// utilize ALPN by itself.
- ///
- /// Note that setting this to true prevents HTTP/1 from being allowed.
- ///
- /// Default is false.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_only(&mut self, val: bool) -> &mut Self {
- self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto };
- self
- }
-
- /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
- /// stream-level flow control.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.conn_builder
- .http2_initial_stream_window_size(sz.into());
- self
- }
-
- /// Sets the max connection-level flow control for HTTP2
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_connection_window_size(
- &mut self,
- sz: impl Into<Option<u32>>,
- ) -> &mut Self {
- self.conn_builder
- .http2_initial_connection_window_size(sz.into());
- self
- }
-
- /// Sets whether to use an adaptive flow control.
- ///
- /// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
- self.conn_builder.http2_adaptive_window(enabled);
- self
- }
-
- /// Sets the maximum frame size to use for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.conn_builder.http2_max_frame_size(sz);
- self
- }
-
- /// Sets an interval for HTTP2 Ping frames should be sent to keep a
- /// connection alive.
- ///
- /// Pass `None` to disable HTTP2 keep-alive.
- ///
- /// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_interval(
- &mut self,
- interval: impl Into<Option<Duration>>,
- ) -> &mut Self {
- self.conn_builder.http2_keep_alive_interval(interval);
- self
- }
-
- /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
- ///
- /// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
- ///
- /// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
- self.conn_builder.http2_keep_alive_timeout(timeout);
- self
- }
-
- /// Sets whether HTTP2 keep-alive should apply while the connection is idle.
- ///
- /// If disabled, keep-alive pings are only sent while there are open
- /// request/responses streams. If enabled, pings are also sent when no
- /// streams are active. Does nothing if `http2_keep_alive_interval` is
- /// disabled.
- ///
- /// Default is `false`.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
- self.conn_builder.http2_keep_alive_while_idle(enabled);
- self
- }
-
- /// Sets the maximum number of HTTP2 concurrent locally reset streams.
- ///
- /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more
- /// details.
- ///
- /// The default value is determined by the `h2` crate.
- ///
- /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self {
- self.conn_builder.http2_max_concurrent_reset_streams(max);
- self
- }
-
- /// Set the maximum write buffer size for each HTTP/2 stream.
- ///
- /// Default is currently 1MB, but may change.
- ///
- /// # Panics
- ///
- /// The value must be no larger than `u32::MAX`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self {
- self.conn_builder.http2_max_send_buf_size(max);
- self
- }
-
- /// Set whether to retry requests that get disrupted before ever starting
- /// to write.
- ///
- /// This means a request that is queued, and gets given an idle, reused
- /// connection, and then encounters an error immediately as the idle
- /// connection was found to be unusable.
- ///
- /// When this is set to `false`, the related `ResponseFuture` would instead
- /// resolve to an `Error::Cancel`.
- ///
- /// Default is `true`.
- #[inline]
- pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self {
- self.client_config.retry_canceled_requests = val;
- self
- }
-
- /// Set whether to automatically add the `Host` header to requests.
- ///
- /// If true, and a request does not include a `Host` header, one will be
- /// added automatically, derived from the authority of the `Uri`.
- ///
- /// Default is `true`.
- #[inline]
- pub fn set_host(&mut self, val: bool) -> &mut Self {
- self.client_config.set_host = val;
- self
- }
-
- /// Provide an executor to execute background `Connection` tasks.
- pub fn executor<E>(&mut self, exec: E) -> &mut Self
- where
- E: Executor<BoxSendFuture> + Send + Sync + 'static,
- {
- self.conn_builder.executor(exec);
- self
- }
-
- /// Combine the configuration of this builder with a connector to create a `Client`.
- pub fn build<C, B>(&self, connector: C) -> Client<C, B>
- where
- C: Connect + Clone,
- B: HttpBody + Send,
- B::Data: Send,
- {
- Client {
- config: self.client_config,
- conn_builder: self.conn_builder.clone(),
- connector,
- pool: Pool::new(self.pool_config, &self.conn_builder.exec),
- }
- }
-}
-
-impl fmt::Debug for Builder {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Builder")
- .field("client_config", &self.client_config)
- .field("conn_builder", &self.conn_builder)
- .field("pool_config", &self.pool_config)
- .finish()
- }
-}
-
-#[cfg(test)]
-mod unit_tests {
- use super::*;
-
- #[test]
- fn response_future_is_sync() {
- fn assert_sync<T: Sync>() {}
- assert_sync::<ResponseFuture>();
- }
-
- #[test]
- fn set_relative_uri_with_implicit_path() {
- let mut uri = "http://hyper.rs".parse().unwrap();
- origin_form(&mut uri);
- assert_eq!(uri.to_string(), "/");
- }
-
- #[test]
- fn test_origin_form() {
- let mut uri = "http://hyper.rs/guides".parse().unwrap();
- origin_form(&mut uri);
- assert_eq!(uri.to_string(), "/guides");
-
- let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap();
- origin_form(&mut uri);
- assert_eq!(uri.to_string(), "/guides?foo=bar");
- }
-
- #[test]
- fn test_absolute_form() {
- let mut uri = "http://hyper.rs/guides".parse().unwrap();
- absolute_form(&mut uri);
- assert_eq!(uri.to_string(), "http://hyper.rs/guides");
-
- let mut uri = "https://hyper.rs/guides".parse().unwrap();
- absolute_form(&mut uri);
- assert_eq!(uri.to_string(), "/guides");
- }
-
- #[test]
- fn test_authority_form() {
- let _ = pretty_env_logger::try_init();
-
- let mut uri = "http://hyper.rs".parse().unwrap();
- authority_form(&mut uri);
- assert_eq!(uri.to_string(), "hyper.rs");
-
- let mut uri = "hyper.rs".parse().unwrap();
- authority_form(&mut uri);
- assert_eq!(uri.to_string(), "hyper.rs");
- }
-
- #[test]
- fn test_extract_domain_connect_no_port() {
- let mut uri = "hyper.rs".parse().unwrap();
- let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain");
- assert_eq!(scheme, *"http");
- assert_eq!(host, "hyper.rs");
- }
-
- #[test]
- fn test_is_secure() {
- assert_eq!(
- is_schema_secure(&"http://hyper.rs".parse::<Uri>().unwrap()),
- false
- );
- assert_eq!(is_schema_secure(&"hyper.rs".parse::<Uri>().unwrap()), false);
- assert_eq!(
- is_schema_secure(&"wss://hyper.rs".parse::<Uri>().unwrap()),
- true
- );
- assert_eq!(
- is_schema_secure(&"ws://hyper.rs".parse::<Uri>().unwrap()),
- false
- );
- }
-
- #[test]
- fn test_get_non_default_port() {
- assert!(get_non_default_port(&"http://hyper.rs".parse::<Uri>().unwrap()).is_none());
- assert!(get_non_default_port(&"http://hyper.rs:80".parse::<Uri>().unwrap()).is_none());
- assert!(get_non_default_port(&"https://hyper.rs:443".parse::<Uri>().unwrap()).is_none());
- assert!(get_non_default_port(&"hyper.rs:80".parse::<Uri>().unwrap()).is_none());
-
- assert_eq!(
- get_non_default_port(&"http://hyper.rs:123".parse::<Uri>().unwrap())
- .unwrap()
- .as_u16(),
- 123
- );
- assert_eq!(
- get_non_default_port(&"https://hyper.rs:80".parse::<Uri>().unwrap())
- .unwrap()
- .as_u16(),
- 80
- );
- assert_eq!(
- get_non_default_port(&"hyper.rs:123".parse::<Uri>().unwrap())
- .unwrap()
- .as_u16(),
- 123
- );
- }
-}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -7,6 +7,7 @@ use tokio::sync::{mpsc, oneshot};
use crate::common::Pin;
use crate::common::{task, Poll};
+#[cfg(test)]
pub(crate) type RetryPromise<T, U> = oneshot::Receiver<Result<U, (crate::Error, Option<T>)>>;
pub(crate) type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -58,13 +59,16 @@ impl<T, U> Sender<T, U> {
.map_err(|_| crate::Error::new_closed())
}
+ #[cfg(test)]
pub(crate) fn is_ready(&self) -> bool {
self.giver.is_wanting()
}
+ /*
pub(crate) fn is_closed(&self) -> bool {
self.giver.is_canceled()
}
+ */
fn can_send(&mut self) -> bool {
if self.giver.give() || !self.buffered_once {
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -79,6 +83,7 @@ impl<T, U> Sender<T, U> {
}
}
+ #[cfg(test)]
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
if !self.can_send() {
return Err(val);
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -112,14 +117,17 @@ impl<T, U> Sender<T, U> {
#[cfg(feature = "http2")]
impl<T, U> UnboundedSender<T, U> {
+ /*
pub(crate) fn is_ready(&self) -> bool {
!self.giver.is_canceled()
}
+ */
pub(crate) fn is_closed(&self) -> bool {
self.giver.is_canceled()
}
+ #[cfg(test)]
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
let (tx, rx) = oneshot::channel();
self.inner
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -1,31 +1,11 @@
//! HTTP Client
//!
-//! There are two levels of APIs provided for construct HTTP clients:
-//!
-//! - The higher-level [`Client`](Client) type.
-//! - The lower-level [`conn`](conn) module.
-//!
-//! # Client
-//!
-//! The [`Client`](Client) is the main way to send HTTP requests to a server.
-//! The default `Client` provides these things on top of the lower-level API:
-//!
-//! - A default **connector**, able to resolve hostnames and connect to
-//! destinations over plain-text TCP.
-//! - A **pool** of existing connections, allowing better performance when
-//! making multiple requests to the same hostname.
-//! - Automatic setting of the `Host` header, based on the request `Uri`.
-//! - Automatic request **retries** when a pooled connection is closed by the
-//! server before any bytes have been written.
-//!
-//! Many of these features can configured, by making use of
-//! [`Client::builder`](Client::builder).
+//! hyper provides HTTP over a single connection. See the [`conn`](conn) module.
//!
//! ## Example
//!
//! For a small example program simply fetching a URL, take a look at the
//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs).
-//!
pub mod connect;
#[cfg(all(test, feature = "runtime"))]
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -34,10 +14,6 @@ mod tests;
cfg_feature! {
#![any(feature = "http1", feature = "http2")]
- pub use self::client::{Builder, Client, ResponseFuture};
-
- mod client;
pub mod conn;
pub(super) mod dispatch;
- mod pool;
}
diff --git a/src/client/pool.rs /dev/null
--- a/src/client/pool.rs
+++ /dev/null
@@ -1,1044 +0,0 @@
-use std::collections::{HashMap, HashSet, VecDeque};
-use std::error::Error as StdError;
-use std::fmt;
-use std::ops::{Deref, DerefMut};
-use std::sync::{Arc, Mutex, Weak};
-
-#[cfg(not(feature = "runtime"))]
-use std::time::{Duration, Instant};
-
-use futures_channel::oneshot;
-#[cfg(feature = "runtime")]
-use tokio::time::{Duration, Instant, Interval};
-use tracing::{debug, trace};
-
-use super::client::Ver;
-use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin};
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub(super) struct Pool<T> {
- // If the pool is disabled, this is None.
- inner: Option<Arc<Mutex<PoolInner<T>>>>,
-}
-
-// Before using a pooled connection, make sure the sender is not dead.
-//
-// This is a trait to allow the `client::pool::tests` to work for `i32`.
-//
-// See https://github.com/hyperium/hyper/issues/1429
-pub(super) trait Poolable: Unpin + Send + Sized + 'static {
- fn is_open(&self) -> bool;
- /// Reserve this connection.
- ///
- /// Allows for HTTP/2 to return a shared reservation.
- fn reserve(self) -> Reservation<Self>;
- fn can_share(&self) -> bool;
-}
-
-/// When checking out a pooled connection, it might be that the connection
-/// only supports a single reservation, or it might be usable for many.
-///
-/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be
-/// used for multiple requests.
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub(super) enum Reservation<T> {
- /// This connection could be used multiple times, the first one will be
- /// reinserted into the `idle` pool, and the second will be given to
- /// the `Checkout`.
- #[cfg(feature = "http2")]
- Shared(T, T),
- /// This connection requires unique access. It will be returned after
- /// use is complete.
- Unique(T),
-}
-
-/// Simple type alias in case the key type needs to be adjusted.
-pub(super) type Key = (http::uri::Scheme, http::uri::Authority); //Arc<String>;
-
-struct PoolInner<T> {
- // A flag that a connection is being established, and the connection
- // should be shared. This prevents making multiple HTTP/2 connections
- // to the same host.
- connecting: HashSet<Key>,
- // These are internal Conns sitting in the event loop in the KeepAlive
- // state, waiting to receive a new Request to send on the socket.
- idle: HashMap<Key, Vec<Idle<T>>>,
- max_idle_per_host: usize,
- // These are outstanding Checkouts that are waiting for a socket to be
- // able to send a Request one. This is used when "racing" for a new
- // connection.
- //
- // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait
- // for the Pool to receive an idle Conn. When a Conn becomes idle,
- // this list is checked for any parked Checkouts, and tries to notify
- // them that the Conn could be used instead of waiting for a brand new
- // connection.
- waiters: HashMap<Key, VecDeque<oneshot::Sender<T>>>,
- // A oneshot channel is used to allow the interval to be notified when
- // the Pool completely drops. That way, the interval can cancel immediately.
- #[cfg(feature = "runtime")]
- idle_interval_ref: Option<oneshot::Sender<crate::common::Never>>,
- #[cfg(feature = "runtime")]
- exec: Exec,
- timeout: Option<Duration>,
-}
-
-// This is because `Weak::new()` *allocates* space for `T`, even if it
-// doesn't need it!
-struct WeakOpt<T>(Option<Weak<T>>);
-
-#[derive(Clone, Copy, Debug)]
-pub(super) struct Config {
- pub(super) idle_timeout: Option<Duration>,
- pub(super) max_idle_per_host: usize,
-}
-
-impl Config {
- pub(super) fn is_enabled(&self) -> bool {
- self.max_idle_per_host > 0
- }
-}
-
-impl<T> Pool<T> {
- pub(super) fn new(config: Config, __exec: &Exec) -> Pool<T> {
- let inner = if config.is_enabled() {
- Some(Arc::new(Mutex::new(PoolInner {
- connecting: HashSet::new(),
- idle: HashMap::new(),
- #[cfg(feature = "runtime")]
- idle_interval_ref: None,
- max_idle_per_host: config.max_idle_per_host,
- waiters: HashMap::new(),
- #[cfg(feature = "runtime")]
- exec: __exec.clone(),
- timeout: config.idle_timeout,
- })))
- } else {
- None
- };
-
- Pool { inner }
- }
-
- fn is_enabled(&self) -> bool {
- self.inner.is_some()
- }
-
- #[cfg(test)]
- pub(super) fn no_timer(&self) {
- // Prevent an actual interval from being created for this pool...
- #[cfg(feature = "runtime")]
- {
- let mut inner = self.inner.as_ref().unwrap().lock().unwrap();
- assert!(inner.idle_interval_ref.is_none(), "timer already spawned");
- let (tx, _) = oneshot::channel();
- inner.idle_interval_ref = Some(tx);
- }
- }
-}
-
-impl<T: Poolable> Pool<T> {
- /// Returns a `Checkout` which is a future that resolves if an idle
- /// connection becomes available.
- pub(super) fn checkout(&self, key: Key) -> Checkout<T> {
- Checkout {
- key,
- pool: self.clone(),
- waiter: None,
- }
- }
-
- /// Ensure that there is only ever 1 connecting task for HTTP/2
- /// connections. This does nothing for HTTP/1.
- pub(super) fn connecting(&self, key: &Key, ver: Ver) -> Option<Connecting<T>> {
- if ver == Ver::Http2 {
- if let Some(ref enabled) = self.inner {
- let mut inner = enabled.lock().unwrap();
- return if inner.connecting.insert(key.clone()) {
- let connecting = Connecting {
- key: key.clone(),
- pool: WeakOpt::downgrade(enabled),
- };
- Some(connecting)
- } else {
- trace!("HTTP/2 connecting already in progress for {:?}", key);
- None
- };
- }
- }
-
- // else
- Some(Connecting {
- key: key.clone(),
- // in HTTP/1's case, there is never a lock, so we don't
- // need to do anything in Drop.
- pool: WeakOpt::none(),
- })
- }
-
- #[cfg(test)]
- fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner<T>> {
- self.inner.as_ref().expect("enabled").lock().expect("lock")
- }
-
- /* Used in client/tests.rs...
- #[cfg(feature = "runtime")]
- #[cfg(test)]
- pub(super) fn h1_key(&self, s: &str) -> Key {
- Arc::new(s.to_string())
- }
-
- #[cfg(feature = "runtime")]
- #[cfg(test)]
- pub(super) fn idle_count(&self, key: &Key) -> usize {
- self
- .locked()
- .idle
- .get(key)
- .map(|list| list.len())
- .unwrap_or(0)
- }
- */
-
- pub(super) fn pooled(
- &self,
- #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting<T>,
- value: T,
- ) -> Pooled<T> {
- let (value, pool_ref) = if let Some(ref enabled) = self.inner {
- match value.reserve() {
- #[cfg(feature = "http2")]
- Reservation::Shared(to_insert, to_return) => {
- let mut inner = enabled.lock().unwrap();
- inner.put(connecting.key.clone(), to_insert, enabled);
- // Do this here instead of Drop for Connecting because we
- // already have a lock, no need to lock the mutex twice.
- inner.connected(&connecting.key);
- // prevent the Drop of Connecting from repeating inner.connected()
- connecting.pool = WeakOpt::none();
-
- // Shared reservations don't need a reference to the pool,
- // since the pool always keeps a copy.
- (to_return, WeakOpt::none())
- }
- Reservation::Unique(value) => {
- // Unique reservations must take a reference to the pool
- // since they hope to reinsert once the reservation is
- // completed
- (value, WeakOpt::downgrade(enabled))
- }
- }
- } else {
- // If pool is not enabled, skip all the things...
-
- // The Connecting should have had no pool ref
- debug_assert!(connecting.pool.upgrade().is_none());
-
- (value, WeakOpt::none())
- };
- Pooled {
- key: connecting.key.clone(),
- is_reused: false,
- pool: pool_ref,
- value: Some(value),
- }
- }
-
- fn reuse(&self, key: &Key, value: T) -> Pooled<T> {
- debug!("reuse idle connection for {:?}", key);
- // TODO: unhack this
- // In Pool::pooled(), which is used for inserting brand new connections,
- // there's some code that adjusts the pool reference taken depending
- // on if the Reservation can be shared or is unique. By the time
- // reuse() is called, the reservation has already been made, and
- // we just have the final value, without knowledge of if this is
- // unique or shared. So, the hack is to just assume Ver::Http2 means
- // shared... :(
- let mut pool_ref = WeakOpt::none();
- if !value.can_share() {
- if let Some(ref enabled) = self.inner {
- pool_ref = WeakOpt::downgrade(enabled);
- }
- }
-
- Pooled {
- is_reused: true,
- key: key.clone(),
- pool: pool_ref,
- value: Some(value),
- }
- }
-}
-
-/// Pop off this list, looking for a usable connection that hasn't expired.
-struct IdlePopper<'a, T> {
- key: &'a Key,
- list: &'a mut Vec<Idle<T>>,
-}
-
-impl<'a, T: Poolable + 'a> IdlePopper<'a, T> {
- fn pop(self, expiration: &Expiration) -> Option<Idle<T>> {
- while let Some(entry) = self.list.pop() {
- // If the connection has been closed, or is older than our idle
- // timeout, simply drop it and keep looking...
- if !entry.value.is_open() {
- trace!("removing closed connection for {:?}", self.key);
- continue;
- }
- // TODO: Actually, since the `idle` list is pushed to the end always,
- // that would imply that if *this* entry is expired, then anything
- // "earlier" in the list would *have* to be expired also... Right?
- //
- // In that case, we could just break out of the loop and drop the
- // whole list...
- if expiration.expires(entry.idle_at) {
- trace!("removing expired connection for {:?}", self.key);
- continue;
- }
-
- let value = match entry.value.reserve() {
- #[cfg(feature = "http2")]
- Reservation::Shared(to_reinsert, to_checkout) => {
- self.list.push(Idle {
- idle_at: Instant::now(),
- value: to_reinsert,
- });
- to_checkout
- }
- Reservation::Unique(unique) => unique,
- };
-
- return Some(Idle {
- idle_at: entry.idle_at,
- value,
- });
- }
-
- None
- }
-}
-
-impl<T: Poolable> PoolInner<T> {
- fn put(&mut self, key: Key, value: T, __pool_ref: &Arc<Mutex<PoolInner<T>>>) {
- if value.can_share() && self.idle.contains_key(&key) {
- trace!("put; existing idle HTTP/2 connection for {:?}", key);
- return;
- }
- trace!("put; add idle connection for {:?}", key);
- let mut remove_waiters = false;
- let mut value = Some(value);
- if let Some(waiters) = self.waiters.get_mut(&key) {
- while let Some(tx) = waiters.pop_front() {
- if !tx.is_canceled() {
- let reserved = value.take().expect("value already sent");
- let reserved = match reserved.reserve() {
- #[cfg(feature = "http2")]
- Reservation::Shared(to_keep, to_send) => {
- value = Some(to_keep);
- to_send
- }
- Reservation::Unique(uniq) => uniq,
- };
- match tx.send(reserved) {
- Ok(()) => {
- if value.is_none() {
- break;
- } else {
- continue;
- }
- }
- Err(e) => {
- value = Some(e);
- }
- }
- }
-
- trace!("put; removing canceled waiter for {:?}", key);
- }
- remove_waiters = waiters.is_empty();
- }
- if remove_waiters {
- self.waiters.remove(&key);
- }
-
- match value {
- Some(value) => {
- // borrow-check scope...
- {
- let idle_list = self.idle.entry(key.clone()).or_insert_with(Vec::new);
- if self.max_idle_per_host <= idle_list.len() {
- trace!("max idle per host for {:?}, dropping connection", key);
- return;
- }
-
- debug!("pooling idle connection for {:?}", key);
- idle_list.push(Idle {
- value,
- idle_at: Instant::now(),
- });
- }
-
- #[cfg(feature = "runtime")]
- {
- self.spawn_idle_interval(__pool_ref);
- }
- }
- None => trace!("put; found waiter for {:?}", key),
- }
- }
-
- /// A `Connecting` task is complete. Not necessarily successfully,
- /// but the lock is going away, so clean up.
- fn connected(&mut self, key: &Key) {
- let existed = self.connecting.remove(key);
- debug_assert!(existed, "Connecting dropped, key not in pool.connecting");
- // cancel any waiters. if there are any, it's because
- // this Connecting task didn't complete successfully.
- // those waiters would never receive a connection.
- self.waiters.remove(key);
- }
-
- #[cfg(feature = "runtime")]
- fn spawn_idle_interval(&mut self, pool_ref: &Arc<Mutex<PoolInner<T>>>) {
- let (dur, rx) = {
- if self.idle_interval_ref.is_some() {
- return;
- }
-
- if let Some(dur) = self.timeout {
- let (tx, rx) = oneshot::channel();
- self.idle_interval_ref = Some(tx);
- (dur, rx)
- } else {
- return;
- }
- };
-
- let interval = IdleTask {
- interval: tokio::time::interval(dur),
- pool: WeakOpt::downgrade(pool_ref),
- pool_drop_notifier: rx,
- };
-
- self.exec.execute(interval);
- }
-}
-
-impl<T> PoolInner<T> {
- /// Any `FutureResponse`s that were created will have made a `Checkout`,
- /// and possibly inserted into the pool that it is waiting for an idle
- /// connection. If a user ever dropped that future, we need to clean out
- /// those parked senders.
- fn clean_waiters(&mut self, key: &Key) {
- let mut remove_waiters = false;
- if let Some(waiters) = self.waiters.get_mut(key) {
- waiters.retain(|tx| !tx.is_canceled());
- remove_waiters = waiters.is_empty();
- }
- if remove_waiters {
- self.waiters.remove(key);
- }
- }
-}
-
-#[cfg(feature = "runtime")]
-impl<T: Poolable> PoolInner<T> {
- /// This should *only* be called by the IdleTask
- fn clear_expired(&mut self) {
- let dur = self.timeout.expect("interval assumes timeout");
-
- let now = Instant::now();
- //self.last_idle_check_at = now;
-
- self.idle.retain(|key, values| {
- values.retain(|entry| {
- if !entry.value.is_open() {
- trace!("idle interval evicting closed for {:?}", key);
- return false;
- }
-
- // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470.
- if now.saturating_duration_since(entry.idle_at) > dur {
- trace!("idle interval evicting expired for {:?}", key);
- return false;
- }
-
- // Otherwise, keep this value...
- true
- });
-
- // returning false evicts this key/val
- !values.is_empty()
- });
- }
-}
-
-impl<T> Clone for Pool<T> {
- fn clone(&self) -> Pool<T> {
- Pool {
- inner: self.inner.clone(),
- }
- }
-}
-
-/// A wrapped poolable value that tries to reinsert to the Pool on Drop.
-// Note: The bounds `T: Poolable` is needed for the Drop impl.
-pub(super) struct Pooled<T: Poolable> {
- value: Option<T>,
- is_reused: bool,
- key: Key,
- pool: WeakOpt<Mutex<PoolInner<T>>>,
-}
-
-impl<T: Poolable> Pooled<T> {
- pub(super) fn is_reused(&self) -> bool {
- self.is_reused
- }
-
- pub(super) fn is_pool_enabled(&self) -> bool {
- self.pool.0.is_some()
- }
-
- fn as_ref(&self) -> &T {
- self.value.as_ref().expect("not dropped")
- }
-
- fn as_mut(&mut self) -> &mut T {
- self.value.as_mut().expect("not dropped")
- }
-}
-
-impl<T: Poolable> Deref for Pooled<T> {
- type Target = T;
- fn deref(&self) -> &T {
- self.as_ref()
- }
-}
-
-impl<T: Poolable> DerefMut for Pooled<T> {
- fn deref_mut(&mut self) -> &mut T {
- self.as_mut()
- }
-}
-
-impl<T: Poolable> Drop for Pooled<T> {
- fn drop(&mut self) {
- if let Some(value) = self.value.take() {
- if !value.is_open() {
- // If we *already* know the connection is done here,
- // it shouldn't be re-inserted back into the pool.
- return;
- }
-
- if let Some(pool) = self.pool.upgrade() {
- if let Ok(mut inner) = pool.lock() {
- inner.put(self.key.clone(), value, &pool);
- }
- } else if !value.can_share() {
- trace!("pool dropped, dropping pooled ({:?})", self.key);
- }
- // Ver::Http2 is already in the Pool (or dead), so we wouldn't
- // have an actual reference to the Pool.
- }
- }
-}
-
-impl<T: Poolable> fmt::Debug for Pooled<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Pooled").field("key", &self.key).finish()
- }
-}
-
-struct Idle<T> {
- idle_at: Instant,
- value: T,
-}
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub(super) struct Checkout<T> {
- key: Key,
- pool: Pool<T>,
- waiter: Option<oneshot::Receiver<T>>,
-}
-
-#[derive(Debug)]
-pub(super) struct CheckoutIsClosedError;
-
-impl StdError for CheckoutIsClosedError {}
-
-impl fmt::Display for CheckoutIsClosedError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str("checked out connection was closed")
- }
-}
-
-impl<T: Poolable> Checkout<T> {
- fn poll_waiter(
- &mut self,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<crate::Result<Pooled<T>>>> {
- if let Some(mut rx) = self.waiter.take() {
- match Pin::new(&mut rx).poll(cx) {
- Poll::Ready(Ok(value)) => {
- if value.is_open() {
- Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value))))
- } else {
- Poll::Ready(Some(Err(
- crate::Error::new_canceled().with(CheckoutIsClosedError)
- )))
- }
- }
- Poll::Pending => {
- self.waiter = Some(rx);
- Poll::Pending
- }
- Poll::Ready(Err(_canceled)) => Poll::Ready(Some(Err(
- crate::Error::new_canceled().with("request has been canceled")
- ))),
- }
- } else {
- Poll::Ready(None)
- }
- }
-
- fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option<Pooled<T>> {
- let entry = {
- let mut inner = self.pool.inner.as_ref()?.lock().unwrap();
- let expiration = Expiration::new(inner.timeout);
- let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| {
- trace!("take? {:?}: expiration = {:?}", self.key, expiration.0);
- // A block to end the mutable borrow on list,
- // so the map below can check is_empty()
- {
- let popper = IdlePopper {
- key: &self.key,
- list,
- };
- popper.pop(&expiration)
- }
- .map(|e| (e, list.is_empty()))
- });
-
- let (entry, empty) = if let Some((e, empty)) = maybe_entry {
- (Some(e), empty)
- } else {
- // No entry found means nuke the list for sure.
- (None, true)
- };
- if empty {
- //TODO: This could be done with the HashMap::entry API instead.
- inner.idle.remove(&self.key);
- }
-
- if entry.is_none() && self.waiter.is_none() {
- let (tx, mut rx) = oneshot::channel();
- trace!("checkout waiting for idle connection: {:?}", self.key);
- inner
- .waiters
- .entry(self.key.clone())
- .or_insert_with(VecDeque::new)
- .push_back(tx);
-
- // register the waker with this oneshot
- assert!(Pin::new(&mut rx).poll(cx).is_pending());
- self.waiter = Some(rx);
- }
-
- entry
- };
-
- entry.map(|e| self.pool.reuse(&self.key, e.value))
- }
-}
-
-impl<T: Poolable> Future for Checkout<T> {
- type Output = crate::Result<Pooled<T>>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- if let Some(pooled) = ready!(self.poll_waiter(cx)?) {
- return Poll::Ready(Ok(pooled));
- }
-
- if let Some(pooled) = self.checkout(cx) {
- Poll::Ready(Ok(pooled))
- } else if !self.pool.is_enabled() {
- Poll::Ready(Err(crate::Error::new_canceled().with("pool is disabled")))
- } else {
- // There's a new waiter, already registered in self.checkout()
- debug_assert!(self.waiter.is_some());
- Poll::Pending
- }
- }
-}
-
-impl<T> Drop for Checkout<T> {
- fn drop(&mut self) {
- if self.waiter.take().is_some() {
- trace!("checkout dropped for {:?}", self.key);
- if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) {
- inner.clean_waiters(&self.key);
- }
- }
- }
-}
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-pub(super) struct Connecting<T: Poolable> {
- key: Key,
- pool: WeakOpt<Mutex<PoolInner<T>>>,
-}
-
-impl<T: Poolable> Connecting<T> {
- pub(super) fn alpn_h2(self, pool: &Pool<T>) -> Option<Self> {
- debug_assert!(
- self.pool.0.is_none(),
- "Connecting::alpn_h2 but already Http2"
- );
-
- pool.connecting(&self.key, Ver::Http2)
- }
-}
-
-impl<T: Poolable> Drop for Connecting<T> {
- fn drop(&mut self) {
- if let Some(pool) = self.pool.upgrade() {
- // No need to panic on drop, that could abort!
- if let Ok(mut inner) = pool.lock() {
- inner.connected(&self.key);
- }
- }
- }
-}
-
-struct Expiration(Option<Duration>);
-
-impl Expiration {
- fn new(dur: Option<Duration>) -> Expiration {
- Expiration(dur)
- }
-
- fn expires(&self, instant: Instant) -> bool {
- match self.0 {
- // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470.
- Some(timeout) => Instant::now().saturating_duration_since(instant) > timeout,
- None => false,
- }
- }
-}
-
-#[cfg(feature = "runtime")]
-pin_project_lite::pin_project! {
- struct IdleTask<T> {
- #[pin]
- interval: Interval,
- pool: WeakOpt<Mutex<PoolInner<T>>>,
- // This allows the IdleTask to be notified as soon as the entire
- // Pool is fully dropped, and shutdown. This channel is never sent on,
- // but Err(Canceled) will be received when the Pool is dropped.
- #[pin]
- pool_drop_notifier: oneshot::Receiver<crate::common::Never>,
- }
-}
-
-#[cfg(feature = "runtime")]
-impl<T: Poolable + 'static> Future for IdleTask<T> {
- type Output = ();
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut this = self.project();
- loop {
- match this.pool_drop_notifier.as_mut().poll(cx) {
- Poll::Ready(Ok(n)) => match n {},
- Poll::Pending => (),
- Poll::Ready(Err(_canceled)) => {
- trace!("pool closed, canceling idle interval");
- return Poll::Ready(());
- }
- }
-
- ready!(this.interval.as_mut().poll_tick(cx));
-
- if let Some(inner) = this.pool.upgrade() {
- if let Ok(mut inner) = inner.lock() {
- trace!("idle interval checking for expired");
- inner.clear_expired();
- continue;
- }
- }
- return Poll::Ready(());
- }
- }
-}
-
-impl<T> WeakOpt<T> {
- fn none() -> Self {
- WeakOpt(None)
- }
-
- fn downgrade(arc: &Arc<T>) -> Self {
- WeakOpt(Some(Arc::downgrade(arc)))
- }
-
- fn upgrade(&self) -> Option<Arc<T>> {
- self.0.as_ref().and_then(Weak::upgrade)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::task::Poll;
- use std::time::Duration;
-
- use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt};
- use crate::common::{exec::Exec, task, Future, Pin};
-
- /// Test unique reservations.
- #[derive(Debug, PartialEq, Eq)]
- struct Uniq<T>(T);
-
- impl<T: Send + 'static + Unpin> Poolable for Uniq<T> {
- fn is_open(&self) -> bool {
- true
- }
-
- fn reserve(self) -> Reservation<Self> {
- Reservation::Unique(self)
- }
-
- fn can_share(&self) -> bool {
- false
- }
- }
-
- fn c<T: Poolable>(key: Key) -> Connecting<T> {
- Connecting {
- key,
- pool: WeakOpt::none(),
- }
- }
-
- fn host_key(s: &str) -> Key {
- (http::uri::Scheme::HTTP, s.parse().expect("host key"))
- }
-
- fn pool_no_timer<T>() -> Pool<T> {
- pool_max_idle_no_timer(::std::usize::MAX)
- }
-
- fn pool_max_idle_no_timer<T>(max_idle: usize) -> Pool<T> {
- let pool = Pool::new(
- super::Config {
- idle_timeout: Some(Duration::from_millis(100)),
- max_idle_per_host: max_idle,
- },
- &Exec::Default,
- );
- pool.no_timer();
- pool
- }
-
- #[tokio::test]
- async fn test_pool_checkout_smoke() {
- let pool = pool_no_timer();
- let key = host_key("foo");
- let pooled = pool.pooled(c(key.clone()), Uniq(41));
-
- drop(pooled);
-
- match pool.checkout(key).await {
- Ok(pooled) => assert_eq!(*pooled, Uniq(41)),
- Err(_) => panic!("not ready"),
- };
- }
-
- /// Helper to check if the future is ready after polling once.
- struct PollOnce<'a, F>(&'a mut F);
-
- impl<F, T, U> Future for PollOnce<'_, F>
- where
- F: Future<Output = Result<T, U>> + Unpin,
- {
- type Output = Option<()>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match Pin::new(&mut self.0).poll(cx) {
- Poll::Ready(Ok(_)) => Poll::Ready(Some(())),
- Poll::Ready(Err(_)) => Poll::Ready(Some(())),
- Poll::Pending => Poll::Ready(None),
- }
- }
- }
-
- #[tokio::test]
- async fn test_pool_checkout_returns_none_if_expired() {
- let pool = pool_no_timer();
- let key = host_key("foo");
- let pooled = pool.pooled(c(key.clone()), Uniq(41));
-
- drop(pooled);
- tokio::time::sleep(pool.locked().timeout.unwrap()).await;
- let mut checkout = pool.checkout(key);
- let poll_once = PollOnce(&mut checkout);
- let is_not_ready = poll_once.await.is_none();
- assert!(is_not_ready);
- }
-
- #[cfg(feature = "runtime")]
- #[tokio::test]
- async fn test_pool_checkout_removes_expired() {
- let pool = pool_no_timer();
- let key = host_key("foo");
-
- pool.pooled(c(key.clone()), Uniq(41));
- pool.pooled(c(key.clone()), Uniq(5));
- pool.pooled(c(key.clone()), Uniq(99));
-
- assert_eq!(
- pool.locked().idle.get(&key).map(|entries| entries.len()),
- Some(3)
- );
- tokio::time::sleep(pool.locked().timeout.unwrap()).await;
-
- let mut checkout = pool.checkout(key.clone());
- let poll_once = PollOnce(&mut checkout);
- // checkout.await should clean out the expired
- poll_once.await;
- assert!(pool.locked().idle.get(&key).is_none());
- }
-
- #[test]
- fn test_pool_max_idle_per_host() {
- let pool = pool_max_idle_no_timer(2);
- let key = host_key("foo");
-
- pool.pooled(c(key.clone()), Uniq(41));
- pool.pooled(c(key.clone()), Uniq(5));
- pool.pooled(c(key.clone()), Uniq(99));
-
- // pooled and dropped 3, max_idle should only allow 2
- assert_eq!(
- pool.locked().idle.get(&key).map(|entries| entries.len()),
- Some(2)
- );
- }
-
- #[cfg(feature = "runtime")]
- #[tokio::test]
- async fn test_pool_timer_removes_expired() {
- let _ = pretty_env_logger::try_init();
- tokio::time::pause();
-
- let pool = Pool::new(
- super::Config {
- idle_timeout: Some(Duration::from_millis(10)),
- max_idle_per_host: std::usize::MAX,
- },
- &Exec::Default,
- );
-
- let key = host_key("foo");
-
- pool.pooled(c(key.clone()), Uniq(41));
- pool.pooled(c(key.clone()), Uniq(5));
- pool.pooled(c(key.clone()), Uniq(99));
-
- assert_eq!(
- pool.locked().idle.get(&key).map(|entries| entries.len()),
- Some(3)
- );
-
- // Let the timer tick passed the expiration...
- tokio::time::advance(Duration::from_millis(30)).await;
- // Yield so the Interval can reap...
- tokio::task::yield_now().await;
-
- assert!(pool.locked().idle.get(&key).is_none());
- }
-
- #[tokio::test]
- async fn test_pool_checkout_task_unparked() {
- use futures_util::future::join;
- use futures_util::FutureExt;
-
- let pool = pool_no_timer();
- let key = host_key("foo");
- let pooled = pool.pooled(c(key.clone()), Uniq(41));
-
- let checkout = join(pool.checkout(key), async {
- // the checkout future will park first,
- // and then this lazy future will be polled, which will insert
- // the pooled back into the pool
- //
- // this test makes sure that doing so will unpark the checkout
- drop(pooled);
- })
- .map(|(entry, _)| entry);
-
- assert_eq!(*checkout.await.unwrap(), Uniq(41));
- }
-
- #[tokio::test]
- async fn test_pool_checkout_drop_cleans_up_waiters() {
- let pool = pool_no_timer::<Uniq<i32>>();
- let key = host_key("foo");
-
- let mut checkout1 = pool.checkout(key.clone());
- let mut checkout2 = pool.checkout(key.clone());
-
- let poll_once1 = PollOnce(&mut checkout1);
- let poll_once2 = PollOnce(&mut checkout2);
-
- // first poll needed to get into Pool's parked
- poll_once1.await;
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
- poll_once2.await;
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2);
-
- // on drop, clean up Pool
- drop(checkout1);
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
-
- drop(checkout2);
- assert!(pool.locked().waiters.get(&key).is_none());
- }
-
- #[derive(Debug)]
- struct CanClose {
- #[allow(unused)]
- val: i32,
- closed: bool,
- }
-
- impl Poolable for CanClose {
- fn is_open(&self) -> bool {
- !self.closed
- }
-
- fn reserve(self) -> Reservation<Self> {
- Reservation::Unique(self)
- }
-
- fn can_share(&self) -> bool {
- false
- }
- }
-
- #[test]
- fn pooled_drop_if_closed_doesnt_reinsert() {
- let pool = pool_no_timer();
- let key = host_key("foo");
- pool.pooled(
- c(key.clone()),
- CanClose {
- val: 57,
- closed: true,
- },
- );
-
- assert!(!pool.locked().idle.contains_key(&key));
- }
-}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -9,7 +9,6 @@ use std::fmt;
use std::io::{Read, Write};
use std::net::{SocketAddr, TcpListener};
use std::pin::Pin;
-use std::task::{Context, Poll};
use std::thread;
use std::time::Duration;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -17,11 +16,11 @@ use http::uri::PathAndQuery;
use http_body_util::{BodyExt, StreamBody};
use hyper::body::to_bytes as concat;
use hyper::header::HeaderValue;
-use hyper::{Body, Method, Request, StatusCode, Uri, Version};
+use hyper::{Method, Request, StatusCode, Uri, Version};
use bytes::Bytes;
use futures_channel::oneshot;
-use futures_core::{Future, Stream, TryFuture};
+use futures_core::{Future, TryFuture};
use futures_util::future::{self, FutureExt, TryFutureExt};
use tokio::net::TcpStream;
mod support;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1325,1035 +1324,6 @@ test! {
body: None,
}
-mod dispatch_impl {
- use super::*;
- use std::io::{self, Read, Write};
- use std::sync::atomic::{AtomicUsize, Ordering};
- use std::sync::Arc;
- use std::thread;
- use std::time::Duration;
-
- use futures_channel::{mpsc, oneshot};
- use futures_core::{self, Future};
- use futures_util::future::{FutureExt, TryFutureExt};
- use futures_util::stream::StreamExt;
- use http::Uri;
- use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
- use tokio::net::TcpStream;
-
- use super::support;
- use hyper::body::HttpBody;
- use hyper::client::connect::{Connected, Connection};
- use hyper::Client;
-
- #[test]
- fn drop_body_before_eof_closes_connection() {
- // https://github.com/hyperium/hyper/issues/1353
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
- let (closes_tx, closes) = mpsc::channel(10);
- let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
-
- let (tx1, rx1) = oneshot::channel();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- let body = vec![b'x'; 1024 * 128];
- write!(
- sock,
- "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n",
- body.len()
- )
- .expect("write head");
- let _ = sock.write_all(&body);
- let _ = tx1.send(());
- });
-
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req).map_ok(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- });
- let rx = rx1.expect("thread panicked");
- rt.block_on(async move {
- let (res, ()) = future::join(res, rx).await;
- res.unwrap();
- tokio::time::sleep(Duration::from_secs(1)).await;
- });
-
- rt.block_on(closes.into_future()).0.expect("closes");
- }
-
- #[test]
- fn dropped_client_closes_connection() {
- // https://github.com/hyperium/hyper/issues/1353
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- let body = [b'x'; 64];
- write!(
- sock,
- "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n",
- body.len()
- )
- .expect("write head");
- let _ = sock.write_all(&body);
- let _ = tx1.send(());
- });
-
- let res = {
- let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
-
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- client
- .request(req)
- .and_then(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res)
- })
- .map_ok(|_| ())
- };
- // client is dropped
- let rx = rx1.expect("thread panicked");
- rt.block_on(async move {
- let (res, ()) = future::join(res, rx).await;
- res.unwrap();
- tokio::time::sleep(Duration::from_secs(1)).await;
- });
-
- rt.block_on(closes.into_future()).0.expect("closes");
- }
-
- #[tokio::test]
- async fn drop_client_closes_idle_connections() {
- use futures_util::future;
-
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, mut closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
- let (_client_drop_tx, client_drop_rx) = oneshot::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- let body = [b'x'; 64];
- write!(
- sock,
- "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n",
- body.len()
- )
- .expect("write head");
- let _ = sock.write_all(&body);
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- support::runtime().block_on(client_drop_rx.into_future())
- });
-
- let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
-
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req).and_then(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res)
- });
- let rx = rx1.expect("thread panicked");
-
- let (res, ()) = future::join(res, rx).await;
- res.unwrap();
-
- // not closed yet, just idle
- future::poll_fn(|ctx| {
- assert!(Pin::new(&mut closes).poll_next(ctx).is_pending());
- Poll::Ready(())
- })
- .await;
-
- // drop to start the connections closing
- drop(client);
-
- // and wait a few ticks for the connections to close
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
- }
-
- #[tokio::test]
- async fn drop_response_future_closes_in_progress_connection() {
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
- let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- // we never write a response head
- // simulates a slow server operation
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- let _ = client_drop_rx.recv();
- });
-
- let res = {
- let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
-
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- client.request(req).map(|_| unreachable!())
- };
-
- future::select(res, rx1).await;
-
- // res now dropped
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
- }
-
- #[tokio::test]
- async fn drop_response_body_closes_in_progress_connection() {
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
- let (_client_drop_tx, client_drop_rx) = std::sync::mpsc::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- write!(
- sock,
- "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n"
- )
- .expect("write head");
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- let _ = client_drop_rx.recv();
- });
-
- let rx = rx1.expect("thread panicked");
- let res = {
- let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
-
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- // notably, haven't read body yet
- client.request(req)
- };
-
- let (res, ()) = future::join(res, rx).await;
- // drop the body
- res.unwrap();
-
- // and wait a few ticks to see the connection drop
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
- }
-
- #[tokio::test]
- async fn no_keep_alive_closes_connection() {
- // https://github.com/hyperium/hyper/issues/1383
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
- let (_tx2, rx2) = std::sync::mpsc::channel::<()>();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .unwrap();
- let _ = tx1.send(());
-
- // prevent this thread from closing until end of test, so the connection
- // stays open and idle until Client is dropped
- let _ = rx2.recv();
- });
-
- let client = Client::builder()
- .pool_max_idle_per_host(0)
- .build(DebugConnector::with_closes(closes_tx));
-
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req).and_then(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res)
- });
- let rx = rx1.expect("thread panicked");
-
- let (res, ()) = future::join(res, rx).await;
- res.unwrap();
-
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
- }
-
- #[tokio::test]
- async fn socket_disconnect_closes_idle_conn() {
- // notably when keep-alive is enabled
- let _ = pretty_env_logger::try_init();
-
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let (closes_tx, closes) = mpsc::channel(10);
-
- let (tx1, rx1) = oneshot::channel();
-
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .unwrap();
- let _ = tx1.send(());
- });
-
- let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
-
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req).and_then(move |res| {
- assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res)
- });
- let rx = rx1.expect("thread panicked");
-
- let (res, ()) = future::join(res, rx).await;
- res.unwrap();
-
- let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
- futures_util::pin_mut!(t);
- let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
- future::select(t, close).await;
- }
-
- #[test]
- fn connect_call_is_lazy() {
- // We especially don't want connects() triggered if there's
- // idle connections that the Checkout would have found
- let _ = pretty_env_logger::try_init();
-
- let _rt = support::runtime();
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder().build(connector);
-
- assert_eq!(connects.load(Ordering::Relaxed), 0);
- let req = Request::builder()
- .uri("http://hyper.local/a")
- .body(Body::empty())
- .unwrap();
- let _fut = client.request(req);
- // internal Connect::connect should have been lazy, and not
- // triggered an actual connect yet.
- assert_eq!(connects.load(Ordering::Relaxed), 0);
- }
-
- #[test]
- fn client_keep_alive_0() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder().build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- let _ = tx1.send(());
-
- let n2 = sock.read(&mut buf).expect("read 2");
- assert_ne!(n2, 0);
- let second_get = "GET /b HTTP/1.1\r\n";
- assert_eq!(s(&buf[..second_get.len()]), second_get);
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 2");
- let _ = tx2.send(());
- });
-
- assert_eq!(connects.load(Ordering::SeqCst), 0);
-
- let rx = rx1.expect("thread panicked");
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(connects.load(Ordering::SeqCst), 1);
-
- // sleep real quick to let the threadpool put connection in ready
- // state and back into client pool
- thread::sleep(Duration::from_millis(50));
-
- let rx = rx2.expect("thread panicked");
- let req = Request::builder()
- .uri(&*format!("http://{}/b", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(
- connects.load(Ordering::SeqCst),
- 1,
- "second request should still only have 1 connect"
- );
- drop(client);
- }
-
- #[test]
- fn client_keep_alive_extra_body() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
-
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder().build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nhello")
- .expect("write 1");
- // the body "hello", while ignored because its a HEAD request, should mean the connection
- // cannot be put back in the pool
- let _ = tx1.send(());
-
- let mut sock2 = server.accept().unwrap().0;
- let n2 = sock2.read(&mut buf).expect("read 2");
- assert_ne!(n2, 0);
- let second_get = "GET /b HTTP/1.1\r\n";
- assert_eq!(s(&buf[..second_get.len()]), second_get);
- sock2
- .write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 2");
- let _ = tx2.send(());
- });
-
- assert_eq!(connects.load(Ordering::Relaxed), 0);
-
- let rx = rx1.expect("thread panicked");
- let req = Request::builder()
- .method("HEAD")
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(connects.load(Ordering::Relaxed), 1);
-
- let rx = rx2.expect("thread panicked");
- let req = Request::builder()
- .uri(&*format!("http://{}/b", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(connects.load(Ordering::Relaxed), 2);
- }
-
- #[test]
- fn client_keep_alive_when_response_before_request_body_ends() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
-
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder().build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- let (tx3, rx3) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- // after writing the response, THEN stream the body
- let _ = tx1.send(());
-
- sock.read(&mut buf).expect("read 2");
- let _ = tx2.send(());
-
- let n2 = sock.read(&mut buf).expect("read 3");
- assert_ne!(n2, 0);
- let second_get = "GET /b HTTP/1.1\r\n";
- assert_eq!(s(&buf[..second_get.len()]), second_get);
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 2");
- let _ = tx3.send(());
- });
-
- assert_eq!(connects.load(Ordering::Relaxed), 0);
-
- let delayed_body = rx1
- .then(|_| tokio::time::sleep(Duration::from_millis(200)))
- .map(|_| Ok::<_, ()>(Bytes::from("hello a")))
- .map_err(|_| -> std::convert::Infallible { panic!("rx1") })
- .into_stream();
-
- let rx = rx2.expect("thread panicked");
- let req = Request::builder()
- .method("POST")
- .uri(&*format!("http://{}/a", addr))
- .body(BodyExt::boxed(StreamBody::new(delayed_body)))
- .unwrap();
- let client2 = client.clone();
-
- // req 1
- let fut = future::join(client.request(req), rx)
- .then(|_| tokio::time::sleep(Duration::from_millis(200)))
- // req 2
- .then(move |()| {
- let rx = rx3.expect("thread panicked");
- let req = Request::builder()
- .uri(&*format!("http://{}/b", addr))
- .body(BodyExt::boxed(http_body_util::Empty::new()))
- .unwrap();
- future::join(client2.request(req), rx).map(|r| r.0)
- });
-
- rt.block_on(fut).unwrap();
-
- assert_eq!(connects.load(Ordering::Relaxed), 1);
- }
-
- #[tokio::test]
- async fn client_keep_alive_eager_when_chunked() {
- // If a response body has been read to completion, with completion
- // determined by some other factor, like decompression, and thus
- // it is in't polled a final time to clear the final 0-len chunk,
- // try to eagerly clear it so the connection can still be used.
-
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let connector = DebugConnector::new();
- let connects = connector.connects.clone();
-
- let client = Client::builder().build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- let (tx2, rx2) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(
- b"\
- HTTP/1.1 200 OK\r\n\
- transfer-encoding: chunked\r\n\
- \r\n\
- 5\r\n\
- hello\r\n\
- 0\r\n\r\n\
- ",
- )
- .expect("write 1");
- let _ = tx1.send(());
-
- let n2 = sock.read(&mut buf).expect("read 2");
- assert_ne!(n2, 0, "bytes of second request");
- let second_get = "GET /b HTTP/1.1\r\n";
- assert_eq!(s(&buf[..second_get.len()]), second_get);
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 2");
- let _ = tx2.send(());
- });
-
- assert_eq!(connects.load(Ordering::SeqCst), 0);
-
- let rx = rx1.expect("thread panicked");
- let req = Request::builder()
- .uri(&*format!("http://{}/a", addr))
- .body(Body::empty())
- .unwrap();
- let fut = client.request(req);
-
- let mut resp = future::join(fut, rx).map(|r| r.0).await.unwrap();
- assert_eq!(connects.load(Ordering::SeqCst), 1);
- assert_eq!(resp.status(), 200);
- assert_eq!(resp.headers()["transfer-encoding"], "chunked");
-
- // Read the "hello" chunk...
- let chunk = resp.body_mut().data().await.unwrap().unwrap();
- assert_eq!(chunk, "hello");
-
- // With our prior knowledge, we know that's the end of the body.
- // So just drop the body, without polling for the `0\r\n\r\n` end.
- drop(resp);
-
- // sleep real quick to let the threadpool put connection in ready
- // state and back into client pool
- tokio::time::sleep(Duration::from_millis(50)).await;
-
- let rx = rx2.expect("thread panicked");
- let req = Request::builder()
- .uri(&*format!("http://{}/b", addr))
- .body(Body::empty())
- .unwrap();
- let fut = client.request(req);
- future::join(fut, rx).map(|r| r.0).await.unwrap();
-
- assert_eq!(
- connects.load(Ordering::SeqCst),
- 1,
- "second request should still only have 1 connect"
- );
- drop(client);
- }
-
- #[test]
- fn connect_proxy_sends_absolute_uri() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
- let connector = DebugConnector::new().proxy();
-
- let client = Client::builder().build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- let n = sock.read(&mut buf).expect("read 1");
- let expected = format!(
- "GET http://{addr}/foo/bar HTTP/1.1\r\nhost: {addr}\r\n\r\n",
- addr = addr
- );
- assert_eq!(s(&buf[..n]), expected);
-
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- let _ = tx1.send(());
- });
-
- let rx = rx1.expect("thread panicked");
- let req = Request::builder()
- .uri(&*format!("http://{}/foo/bar", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- }
-
- #[test]
- fn connect_proxy_http_connect_sends_authority_form() {
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
- let connector = DebugConnector::new().proxy();
-
- let client = Client::builder().build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- //drop(server);
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- let n = sock.read(&mut buf).expect("read 1");
- let expected = format!(
- "CONNECT {addr} HTTP/1.1\r\nhost: {addr}\r\n\r\n",
- addr = addr
- );
- assert_eq!(s(&buf[..n]), expected);
-
- sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n")
- .expect("write 1");
- let _ = tx1.send(());
- });
-
- let rx = rx1.expect("thread panicked");
- let req = Request::builder()
- .method("CONNECT")
- .uri(&*format!("http://{}/useless/path", addr))
- .body(Body::empty())
- .unwrap();
- let res = client.request(req);
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- }
-
- #[test]
- fn client_upgrade() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
- let _ = pretty_env_logger::try_init();
- let server = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server.local_addr().unwrap();
- let rt = support::runtime();
-
- let connector = DebugConnector::new();
-
- let client = Client::builder().build(connector);
-
- let (tx1, rx1) = oneshot::channel();
- thread::spawn(move || {
- let mut sock = server.accept().unwrap().0;
- sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap();
- sock.set_write_timeout(Some(Duration::from_secs(5)))
- .unwrap();
- let mut buf = [0; 4096];
- sock.read(&mut buf).expect("read 1");
- sock.write_all(
- b"\
- HTTP/1.1 101 Switching Protocols\r\n\
- Upgrade: foobar\r\n\
- \r\n\
- foobar=ready\
- ",
- )
- .unwrap();
- let _ = tx1.send(());
-
- let n = sock.read(&mut buf).expect("read 2");
- assert_eq!(&buf[..n], b"foo=bar");
- sock.write_all(b"bar=foo").expect("write 2");
- });
-
- let rx = rx1.expect("thread panicked");
-
- let req = Request::builder()
- .method("GET")
- .uri(&*format!("http://{}/up", addr))
- .body(Body::empty())
- .unwrap();
-
- let res = client.request(req);
- let res = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
-
- assert_eq!(res.status(), 101);
- let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade");
-
- let parts = upgraded.downcast::<DebugStream>().unwrap();
- assert_eq!(s(&parts.read_buf), "foobar=ready");
-
- let mut io = parts.io;
- rt.block_on(io.write_all(b"foo=bar")).unwrap();
- let mut vec = vec![];
- rt.block_on(io.read_to_end(&mut vec)).unwrap();
- assert_eq!(vec, b"bar=foo");
- }
-
- #[test]
- fn alpn_h2() {
- use hyper::server::conn::Http;
- use hyper::service::service_fn;
- use hyper::Response;
- use tokio::net::TcpListener;
-
- let _ = pretty_env_logger::try_init();
- let rt = support::runtime();
- let listener = rt
- .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))))
- .unwrap();
- let addr = listener.local_addr().unwrap();
- let mut connector = DebugConnector::new();
- connector.alpn_h2 = true;
- let connects = connector.connects.clone();
-
- let client = Client::builder().build::<_, ::hyper::Body>(connector);
-
- rt.spawn(async move {
- let (socket, _addr) = listener.accept().await.expect("accept");
- Http::new()
- .http2_only(true)
- .serve_connection(
- socket,
- service_fn(|req| async move {
- assert_eq!(req.headers().get("host"), None);
- Ok::<_, hyper::Error>(Response::new(Body::empty()))
- }),
- )
- .await
- .expect("server");
- });
-
- assert_eq!(connects.load(Ordering::SeqCst), 0);
-
- let url = format!("http://{}/a", addr)
- .parse::<::hyper::Uri>()
- .unwrap();
- let res1 = client.get(url.clone());
- let res2 = client.get(url.clone());
- let res3 = client.get(url.clone());
- rt.block_on(future::try_join3(res1, res2, res3)).unwrap();
-
- // Since the client doesn't know it can ALPN at first, it will have
- // started 3 connections. But, the server above will only handle 1,
- // so the unwrapped responses futures show it still worked.
- assert_eq!(connects.load(Ordering::SeqCst), 3);
-
- let res4 = client.get(url.clone());
- rt.block_on(res4).unwrap();
-
- // HTTP/2 request allowed
- let res5 = client.request(
- Request::builder()
- .uri(url)
- .version(hyper::Version::HTTP_2)
- .body(Default::default())
- .unwrap(),
- );
- rt.block_on(res5).unwrap();
-
- assert_eq!(
- connects.load(Ordering::SeqCst),
- 3,
- "after ALPN, no more connects"
- );
- drop(client);
- }
-
- #[derive(Clone)]
- struct DebugConnector {
- closes: mpsc::Sender<()>,
- connects: Arc<AtomicUsize>,
- is_proxy: bool,
- alpn_h2: bool,
- }
-
- impl DebugConnector {
- fn new() -> DebugConnector {
- let (tx, _) = mpsc::channel(10);
- DebugConnector::with_closes(tx)
- }
-
- fn with_closes(closes: mpsc::Sender<()>) -> DebugConnector {
- DebugConnector {
- closes,
- connects: Arc::new(AtomicUsize::new(0)),
- is_proxy: false,
- alpn_h2: false,
- }
- }
-
- fn proxy(mut self) -> Self {
- self.is_proxy = true;
- self
- }
- }
-
- impl hyper::service::Service<Uri> for DebugConnector {
- type Response = DebugStream;
- type Error = std::io::Error;
- type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
-
- fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, dst: Uri) -> Self::Future {
- self.connects.fetch_add(1, Ordering::SeqCst);
- let closes = self.closes.clone();
- let is_proxy = self.is_proxy;
- let is_alpn_h2 = self.alpn_h2;
-
- Box::pin(async move {
- let host = dst.host().expect("no host in uri");
- let port = dst.port_u16().expect("no port in uri");
-
- let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
-
- Ok(DebugStream {
- tcp: stream,
- on_drop: closes,
- is_alpn_h2,
- is_proxy,
- })
- })
- }
- }
-
- struct DebugStream {
- tcp: TcpStream,
- on_drop: mpsc::Sender<()>,
- is_alpn_h2: bool,
- is_proxy: bool,
- }
-
- impl Drop for DebugStream {
- fn drop(&mut self) {
- let _ = self.on_drop.try_send(());
- }
- }
-
- impl AsyncWrite for DebugStream {
- fn poll_shutdown(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), io::Error>> {
- Pin::new(&mut self.tcp).poll_shutdown(cx)
- }
-
- fn poll_flush(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- ) -> Poll<Result<(), io::Error>> {
- Pin::new(&mut self.tcp).poll_flush(cx)
- }
-
- fn poll_write(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &[u8],
- ) -> Poll<Result<usize, io::Error>> {
- Pin::new(&mut self.tcp).poll_write(cx, buf)
- }
- }
-
- impl AsyncRead for DebugStream {
- fn poll_read(
- mut self: Pin<&mut Self>,
- cx: &mut Context<'_>,
- buf: &mut ReadBuf<'_>,
- ) -> Poll<io::Result<()>> {
- Pin::new(&mut self.tcp).poll_read(cx, buf)
- }
- }
-
- impl Connection for DebugStream {
- fn connected(&self) -> Connected {
- let connected = Connected::new().proxy(self.is_proxy);
-
- if self.is_alpn_h2 {
- connected.negotiated_h2()
- } else {
- connected
- }
- }
- }
-}
-
mod conn {
use std::io::{self, Read, Write};
use std::net::{SocketAddr, TcpListener};
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"2849"
] |
0.3
|
491b076bca945ad9b62d49d0c14a4b989cb0106c
|
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -3,28 +3,17 @@ use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
-#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
-use crate::body::Body;
#[cfg(feature = "server")]
use crate::body::HttpBody;
#[cfg(all(feature = "http2", feature = "server"))]
use crate::proto::h2::server::H2Stream;
use crate::rt::Executor;
-#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
-use crate::server::server::{new_svc::NewSvcTask, Watcher};
-#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
-use crate::service::HttpService;
#[cfg(feature = "server")]
pub trait ConnStreamExec<F, B: HttpBody>: Clone {
fn execute_h2stream(&mut self, fut: H2Stream<F, B>);
}
-#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
-pub trait NewSvcExec<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>>: Clone {
- fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>);
-}
-
pub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
// Either the user provides an executor for background tasks, or we use
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -78,18 +67,6 @@ where
}
}
-#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
-impl<I, N, S, E, W> NewSvcExec<I, N, S, E, W> for Exec
-where
- NewSvcTask<I, N, S, E, W>: Future<Output = ()> + Send + 'static,
- S: HttpService<Body>,
- W: Watcher<I, S, E>,
-{
- fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>) {
- self.execute(fut)
- }
-}
-
// ==== impl Executor =====
#[cfg(feature = "server")]
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -104,19 +81,6 @@ where
}
}
-#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
-impl<I, N, S, E, W> NewSvcExec<I, N, S, E, W> for E
-where
- E: Executor<NewSvcTask<I, N, S, E, W>> + Clone,
- NewSvcTask<I, N, S, E, W>: Future<Output = ()>,
- S: HttpService<Body>,
- W: Watcher<I, S, E>,
-{
- fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>) {
- self.execute(fut)
- }
-}
-
// If http2 is not enable, we just have a stub here, so that the trait bounds
// that *would* have been needed are still checked. Why?
//
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -10,8 +10,6 @@ macro_rules! ready {
pub(crate) mod buf;
#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
pub(crate) mod date;
-#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
-pub(crate) mod drain;
#[cfg(any(feature = "http1", feature = "http2", feature = "server"))]
pub(crate) mod exec;
pub(crate) mod io;
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -40,10 +40,6 @@ pub(super) enum Kind {
/// Error creating a TcpListener.
#[cfg(all(feature = "tcp", feature = "server"))]
Listen,
- /// Error accepting on an Incoming stream.
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "server")]
- Accept,
/// User took too long to send headers
#[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
HeaderTimeout,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -96,10 +92,6 @@ pub(super) enum User {
Body,
/// The user aborted writing of the outgoing body.
BodyWriteAborted,
- /// Error calling user's MakeService.
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "server")]
- MakeService,
/// Error from future of user's Service.
#[cfg(any(feature = "http1", feature = "http2"))]
Service,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -278,12 +270,6 @@ impl Error {
Error::new(Kind::Listen).with(cause)
}
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "server")]
- pub(super) fn new_accept<E: Into<Cause>>(cause: E) -> Error {
- Error::new(Kind::Accept).with(cause)
- }
-
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(super) fn new_connect<E: Into<Cause>>(cause: E) -> Error {
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -356,12 +342,6 @@ impl Error {
Error::new_user(User::ManualUpgrade)
}
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "server")]
- pub(super) fn new_user_make_service<E: Into<Cause>>(cause: E) -> Error {
- Error::new_user(User::MakeService).with(cause)
- }
-
#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn new_user_service<E: Into<Cause>>(cause: E) -> Error {
Error::new_user(User::Service).with(cause)
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -435,9 +415,6 @@ impl Error {
Kind::Canceled => "operation was canceled",
#[cfg(all(feature = "server", feature = "tcp"))]
Kind::Listen => "error creating server listener",
- #[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "server")]
- Kind::Accept => "error accepting connection",
#[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
Kind::HeaderTimeout => "read header from client timeout",
#[cfg(any(feature = "http1", feature = "http2"))]
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -455,9 +432,6 @@ impl Error {
Kind::User(User::Body) => "error from user's HttpBody stream",
Kind::User(User::BodyWriteAborted) => "user body write aborted",
#[cfg(any(feature = "http1", feature = "http2"))]
- #[cfg(feature = "server")]
- Kind::User(User::MakeService) => "error from user's MakeService",
- #[cfg(any(feature = "http1", feature = "http2"))]
Kind::User(User::Service) => "error from user's Service",
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -102,6 +102,4 @@ cfg_feature! {
#![feature = "server"]
pub mod server;
- #[doc(no_inline)]
- pub use crate::server::Server;
}
diff --git a/src/server/accept.rs /dev/null
--- a/src/server/accept.rs
+++ /dev/null
@@ -1,71 +0,0 @@
-//! The `Accept` trait and supporting types.
-//!
-//! This module contains:
-//!
-//! - The [`Accept`](Accept) trait used to asynchronously accept incoming
-//! connections.
-//! - Utilities like `poll_fn` to ease creating a custom `Accept`.
-
-use crate::common::{
- task::{self, Poll},
- Pin,
-};
-
-/// Asynchronously accept incoming connections.
-pub trait Accept {
- /// The connection type that can be accepted.
- type Conn;
- /// The error type that can occur when accepting a connection.
- type Error;
-
- /// Poll to accept the next connection.
- fn poll_accept(
- self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<Result<Self::Conn, Self::Error>>>;
-}
-
-/// Create an `Accept` with a polling function.
-///
-/// # Example
-///
-/// ```
-/// use std::task::Poll;
-/// use hyper::server::{accept, Server};
-///
-/// # let mock_conn = ();
-/// // If we created some mocked connection...
-/// let mut conn = Some(mock_conn);
-///
-/// // And accept just the mocked conn once...
-/// let once = accept::poll_fn(move |cx| {
-/// Poll::Ready(conn.take().map(Ok::<_, ()>))
-/// });
-///
-/// let builder = Server::builder(once);
-/// ```
-pub fn poll_fn<F, IO, E>(func: F) -> impl Accept<Conn = IO, Error = E>
-where
- F: FnMut(&mut task::Context<'_>) -> Poll<Option<Result<IO, E>>>,
-{
- struct PollFn<F>(F);
-
- // The closure `F` is never pinned
- impl<F> Unpin for PollFn<F> {}
-
- impl<F, IO, E> Accept for PollFn<F>
- where
- F: FnMut(&mut task::Context<'_>) -> Poll<Option<Result<IO, E>>>,
- {
- type Conn = IO;
- type Error = E;
- fn poll_accept(
- self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
- (self.get_mut().0)(cx)
- }
- }
-
- PollFn(func)
-}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -5,9 +5,6 @@
//! are not handled at this level. This module provides the building blocks to
//! customize those things externally.
//!
-//! If you don't have need to manage connections yourself, consider using the
-//! higher-level [Server](super) API.
-//!
//! ## Example
//! A simple example that uses the `Http` struct to talk HTTP over a Tokio TCP stream
//! ```no_run
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -69,7 +66,6 @@ cfg_feature! {
use tokio::io::{AsyncRead, AsyncWrite};
use tracing::trace;
- pub use super::server::Connecting;
use crate::body::{Body, HttpBody};
use crate::common::{task, Future, Pin, Poll, Unpin};
#[cfg(not(all(feature = "http1", feature = "http2")))]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -84,9 +80,6 @@ cfg_feature! {
/// A lower-level configuration of the HTTP protocol.
///
/// This structure is used to configure options for an HTTP server connection.
-///
-/// If you don't have need to manage connections yourself, consider using the
-/// higher-level [Server](super) API.
#[derive(Clone, Debug)]
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -1,37 +1,10 @@
//! HTTP Server
//!
-//! A `Server` is created to listen on a port, parse HTTP requests, and hand
-//! them off to a `Service`.
+//! A "server" is usually created by listening on a port for new connections,
+//! parse HTTP requests, and hand them off to a `Service`.
//!
-//! There are two levels of APIs provide for constructing HTTP servers:
-//!
-//! - The higher-level [`Server`](Server) type.
-//! - The lower-level [`conn`](conn) module.
-//!
-//! # Server
-//!
-//! The [`Server`](Server) is main way to start listening for HTTP requests.
-//! It wraps a listener with a [`MakeService`](crate::service), and then should
-//! be executed to start serving requests.
-//!
-//! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default.
-pub mod accept;
+//! How exactly you choose to listen for connections is not something hyper
+//! concerns itself with. After you have a connection, you can handle HTTP over
+//! it with the types in the [`conn`](conn) module.
pub mod conn;
-pub use self::server::Server;
-
-cfg_feature! {
- #![any(feature = "http1", feature = "http2")]
-
- pub(crate) mod server;
- pub use self::server::Builder;
-
- mod shutdown;
-}
-
-cfg_feature! {
- #![not(any(feature = "http1", feature = "http2"))]
-
- mod server_stub;
- use server_stub as server;
-}
diff --git a/src/server/server.rs /dev/null
--- a/src/server/server.rs
+++ /dev/null
@@ -1,622 +0,0 @@
-use std::error::Error as StdError;
-use std::fmt;
-#[cfg(feature = "http1")]
-use std::time::Duration;
-
-use pin_project_lite::pin_project;
-use tokio::io::{AsyncRead, AsyncWrite};
-use tracing::trace;
-
-use super::accept::Accept;
-use crate::body::{Body, HttpBody};
-use crate::common::exec::Exec;
-use crate::common::exec::{ConnStreamExec, NewSvcExec};
-use crate::common::{task, Future, Pin, Poll, Unpin};
-// Renamed `Http` as `Http_` for now so that people upgrading don't see an
-// error that `hyper::server::Http` is private...
-use super::conn::{Connection, Http as Http_, UpgradeableConnection};
-use super::shutdown::{Graceful, GracefulWatcher};
-use crate::service::{HttpService, MakeServiceRef};
-
-use self::new_svc::NewSvcTask;
-
-pin_project! {
- /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
- ///
- /// `Server` is a `Future` mapping a bound listener with a set of service
- /// handlers. It is built using the [`Builder`](Builder), and the future
- /// completes when the server has been shutdown. It should be run by an
- /// `Executor`.
- pub struct Server<I, S, E = Exec> {
- #[pin]
- incoming: I,
- make_service: S,
- protocol: Http_<E>,
- }
-}
-
-/// A builder for a [`Server`](Server).
-#[derive(Debug)]
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-pub struct Builder<I, E = Exec> {
- incoming: I,
- protocol: Http_<E>,
-}
-
-// ===== impl Server =====
-
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-impl<I> Server<I, ()> {
- /// Starts a [`Builder`](Builder) with the provided incoming stream.
- pub fn builder(incoming: I) -> Builder<I> {
- Builder {
- incoming,
- protocol: Http_::new(),
- }
- }
-}
-
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-impl<I, IO, IE, S, E, B> Server<I, S, E>
-where
- I: Accept<Conn = IO, Error = IE>,
- IE: Into<Box<dyn StdError + Send + Sync>>,
- IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- S: MakeServiceRef<IO, Body, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: HttpBody + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
-{
- /// Prepares a server to handle graceful shutdown when the provided future
- /// completes.
- pub fn with_graceful_shutdown<F>(self, signal: F) -> Graceful<I, S, F, E>
- where
- F: Future<Output = ()>,
- E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
- {
- Graceful::new(self, signal)
- }
-
- fn poll_next_(
- self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<crate::Result<Connecting<IO, S::Future, E>>>> {
- let me = self.project();
- match ready!(me.make_service.poll_ready_ref(cx)) {
- Ok(()) => (),
- Err(e) => {
- trace!("make_service closed");
- return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e))));
- }
- }
-
- if let Some(item) = ready!(me.incoming.poll_accept(cx)) {
- let io = item.map_err(crate::Error::new_accept)?;
- let new_fut = me.make_service.make_service_ref(&io);
- Poll::Ready(Some(Ok(Connecting {
- future: new_fut,
- io: Some(io),
- protocol: me.protocol.clone(),
- })))
- } else {
- Poll::Ready(None)
- }
- }
-
- pub(super) fn poll_watch<W>(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- watcher: &W,
- ) -> Poll<crate::Result<()>>
- where
- E: NewSvcExec<IO, S::Future, S::Service, E, W>,
- W: Watcher<IO, S::Service, E>,
- {
- loop {
- if let Some(connecting) = ready!(self.as_mut().poll_next_(cx)?) {
- let fut = NewSvcTask::new(connecting, watcher.clone());
- self.as_mut().project().protocol.exec.execute_new_svc(fut);
- } else {
- return Poll::Ready(Ok(()));
- }
- }
- }
-}
-
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-impl<I, IO, IE, S, B, E> Future for Server<I, S, E>
-where
- I: Accept<Conn = IO, Error = IE>,
- IE: Into<Box<dyn StdError + Send + Sync>>,
- IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- S: MakeServiceRef<IO, Body, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: HttpBody + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
- E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>,
-{
- type Output = crate::Result<()>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- self.poll_watch(cx, &NoopWatcher)
- }
-}
-
-impl<I: fmt::Debug, S: fmt::Debug> fmt::Debug for Server<I, S> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- let mut st = f.debug_struct("Server");
- st.field("listener", &self.incoming);
- st.finish()
- }
-}
-
-// ===== impl Builder =====
-
-#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
-impl<I, E> Builder<I, E> {
- /// Start a new builder, wrapping an incoming stream and low-level options.
- pub fn new(incoming: I, protocol: Http_<E>) -> Self {
- Builder { incoming, protocol }
- }
-
- /// Sets whether to use keep-alive for HTTP/1 connections.
- ///
- /// Default is `true`.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_keepalive(mut self, val: bool) -> Self {
- self.protocol.http1_keep_alive(val);
- self
- }
-
- /// Set whether HTTP/1 connections should support half-closures.
- ///
- /// Clients can chose to shutdown their write-side while waiting
- /// for the server to respond. Setting this to `true` will
- /// prevent closing the connection immediately if `read`
- /// detects an EOF in the middle of a request.
- ///
- /// Default is `false`.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_half_close(mut self, val: bool) -> Self {
- self.protocol.http1_half_close(val);
- self
- }
-
- /// Set the maximum buffer size.
- ///
- /// Default is ~ 400kb.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_max_buf_size(mut self, val: usize) -> Self {
- self.protocol.max_buf_size(val);
- self
- }
-
- // Sets whether to bunch up HTTP/1 writes until the read buffer is empty.
- //
- // This isn't really desirable in most cases, only really being useful in
- // silly pipeline benchmarks.
- #[doc(hidden)]
- #[cfg(feature = "http1")]
- pub fn http1_pipeline_flush(mut self, val: bool) -> Self {
- self.protocol.pipeline_flush(val);
- self
- }
-
- /// Set whether HTTP/1 connections should try to use vectored writes,
- /// or always flatten into a single buffer.
- ///
- /// Note that setting this to false may mean more copies of body data,
- /// but may also improve performance when an IO transport doesn't
- /// support vectored writes well, such as most TLS implementations.
- ///
- /// Setting this to true will force hyper to use queued strategy
- /// which may eliminate unnecessary cloning on some TLS backends
- ///
- /// Default is `auto`. In this mode hyper will try to guess which
- /// mode to use
- #[cfg(feature = "http1")]
- pub fn http1_writev(mut self, enabled: bool) -> Self {
- self.protocol.http1_writev(enabled);
- self
- }
-
- /// Set whether HTTP/1 connections will write header names as title case at
- /// the socket level.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_title_case_headers(mut self, val: bool) -> Self {
- self.protocol.http1_title_case_headers(val);
- self
- }
-
- /// Set whether to support preserving original header cases.
- ///
- /// Currently, this will record the original cases received, and store them
- /// in a private extension on the `Request`. It will also look for and use
- /// such an extension in any provided `Response`.
- ///
- /// Since the relevant extension is still private, there is no way to
- /// interact with the original cases. The only effect this can have now is
- /// to forward the cases in a proxy-like fashion.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_preserve_header_case(mut self, val: bool) -> Self {
- self.protocol.http1_preserve_header_case(val);
- self
- }
-
- /// Set a timeout for reading client request headers. If a client does not
- /// transmit the entire header within this time, the connection is closed.
- ///
- /// Default is None.
- #[cfg(all(feature = "http1", feature = "runtime"))]
- #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))]
- pub fn http1_header_read_timeout(mut self, read_timeout: Duration) -> Self {
- self.protocol.http1_header_read_timeout(read_timeout);
- self
- }
-
- /// Sets whether HTTP/1 is required.
- ///
- /// Default is `false`.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_only(mut self, val: bool) -> Self {
- self.protocol.http1_only(val);
- self
- }
-
- /// Sets whether HTTP/2 is required.
- ///
- /// Default is `false`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_only(mut self, val: bool) -> Self {
- self.protocol.http2_only(val);
- self
- }
-
- /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
- /// stream-level flow control.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_stream_window_size(mut self, sz: impl Into<Option<u32>>) -> Self {
- self.protocol.http2_initial_stream_window_size(sz.into());
- self
- }
-
- /// Sets the max connection-level flow control for HTTP2
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_connection_window_size(mut self, sz: impl Into<Option<u32>>) -> Self {
- self.protocol
- .http2_initial_connection_window_size(sz.into());
- self
- }
-
- /// Sets whether to use an adaptive flow control.
- ///
- /// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_adaptive_window(mut self, enabled: bool) -> Self {
- self.protocol.http2_adaptive_window(enabled);
- self
- }
-
- /// Sets the maximum frame size to use for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_frame_size(mut self, sz: impl Into<Option<u32>>) -> Self {
- self.protocol.http2_max_frame_size(sz);
- self
- }
-
- /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2
- /// connections.
- ///
- /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_concurrent_streams(mut self, max: impl Into<Option<u32>>) -> Self {
- self.protocol.http2_max_concurrent_streams(max.into());
- self
- }
-
- /// Sets an interval for HTTP2 Ping frames should be sent to keep a
- /// connection alive.
- ///
- /// Pass `None` to disable HTTP2 keep-alive.
- ///
- /// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(all(feature = "runtime", feature = "http2"))]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_interval(mut self, interval: impl Into<Option<Duration>>) -> Self {
- self.protocol.http2_keep_alive_interval(interval);
- self
- }
-
- /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
- ///
- /// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
- ///
- /// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(all(feature = "runtime", feature = "http2"))]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_timeout(mut self, timeout: Duration) -> Self {
- self.protocol.http2_keep_alive_timeout(timeout);
- self
- }
-
- /// Set the maximum write buffer size for each HTTP/2 stream.
- ///
- /// Default is currently ~400KB, but may change.
- ///
- /// # Panics
- ///
- /// The value must be no larger than `u32::MAX`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_send_buf_size(mut self, max: usize) -> Self {
- self.protocol.http2_max_send_buf_size(max);
- self
- }
-
- /// Enables the [extended CONNECT protocol].
- ///
- /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
- #[cfg(feature = "http2")]
- pub fn http2_enable_connect_protocol(mut self) -> Self {
- self.protocol.http2_enable_connect_protocol();
- self
- }
-
- /// Sets the `Executor` to deal with connection tasks.
- ///
- /// Default is `tokio::spawn`.
- pub fn executor<E2>(self, executor: E2) -> Builder<I, E2> {
- Builder {
- incoming: self.incoming,
- protocol: self.protocol.with_executor(executor),
- }
- }
-
- /// Consume this `Builder`, creating a [`Server`](Server).
- pub fn serve<S, B>(self, make_service: S) -> Server<I, S, E>
- where
- I: Accept,
- I::Error: Into<Box<dyn StdError + Send + Sync>>,
- I::Conn: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- S: MakeServiceRef<I::Conn, Body, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: HttpBody + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: NewSvcExec<I::Conn, S::Future, S::Service, E, NoopWatcher>,
- E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
- {
- Server {
- incoming: self.incoming,
- make_service,
- protocol: self.protocol.clone(),
- }
- }
-}
-
-// Used by `Server` to optionally watch a `Connection` future.
-//
-// The regular `hyper::Server` just uses a `NoopWatcher`, which does
-// not need to watch anything, and so returns the `Connection` untouched.
-//
-// The `Server::with_graceful_shutdown` needs to keep track of all active
-// connections, and signal that they start to shutdown when prompted, so
-// it has a `GracefulWatcher` implementation to do that.
-pub trait Watcher<I, S: HttpService<Body>, E>: Clone {
- type Future: Future<Output = crate::Result<()>>;
-
- fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future;
-}
-
-#[allow(missing_debug_implementations)]
-#[derive(Copy, Clone)]
-pub struct NoopWatcher;
-
-impl<I, S, E> Watcher<I, S, E> for NoopWatcher
-where
- I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- S: HttpService<Body>,
- E: ConnStreamExec<S::Future, S::ResBody>,
- S::ResBody: 'static,
- <S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Future = UpgradeableConnection<I, S, E>;
-
- fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future {
- conn
- }
-}
-
-// used by exec.rs
-pub(crate) mod new_svc {
- use std::error::Error as StdError;
- use tokio::io::{AsyncRead, AsyncWrite};
- use tracing::debug;
-
- use super::{Connecting, Watcher};
- use crate::body::{Body, HttpBody};
- use crate::common::exec::ConnStreamExec;
- use crate::common::{task, Future, Pin, Poll, Unpin};
- use crate::service::HttpService;
- use pin_project_lite::pin_project;
-
- // This is a `Future<Item=(), Error=()>` spawned to an `Executor` inside
- // the `Server`. By being a nameable type, we can be generic over the
- // user's `Service::Future`, and thus an `Executor` can execute it.
- //
- // Doing this allows for the server to conditionally require `Send` futures,
- // depending on the `Executor` configured.
- //
- // Users cannot import this type, nor the associated `NewSvcExec`. Instead,
- // a blanket implementation for `Executor<impl Future>` is sufficient.
-
- pin_project! {
- #[allow(missing_debug_implementations)]
- pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
- #[pin]
- state: State<I, N, S, E, W>,
- }
- }
-
- pin_project! {
- #[project = StateProj]
- pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
- Connecting {
- #[pin]
- connecting: Connecting<I, N, E>,
- watcher: W,
- },
- Connected {
- #[pin]
- future: W::Future,
- },
- }
- }
-
- impl<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> NewSvcTask<I, N, S, E, W> {
- pub(super) fn new(connecting: Connecting<I, N, E>, watcher: W) -> Self {
- NewSvcTask {
- state: State::Connecting {
- connecting,
- watcher,
- },
- }
- }
- }
-
- impl<I, N, S, NE, B, E, W> Future for NewSvcTask<I, N, S, E, W>
- where
- I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- N: Future<Output = Result<S, NE>>,
- NE: Into<Box<dyn StdError + Send + Sync>>,
- S: HttpService<Body, ResBody = B>,
- B: HttpBody + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
- W: Watcher<I, S, E>,
- {
- type Output = ();
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- // If it weren't for needing to name this type so the `Send` bounds
- // could be projected to the `Serve` executor, this could just be
- // an `async fn`, and much safer. Woe is me.
-
- let mut me = self.project();
- loop {
- let next = {
- match me.state.as_mut().project() {
- StateProj::Connecting {
- connecting,
- watcher,
- } => {
- let res = ready!(connecting.poll(cx));
- let conn = match res {
- Ok(conn) => conn,
- Err(err) => {
- let err = crate::Error::new_user_make_service(err);
- debug!("connecting error: {}", err);
- return Poll::Ready(());
- }
- };
- let future = watcher.watch(conn.with_upgrades());
- State::Connected { future }
- }
- StateProj::Connected { future } => {
- return future.poll(cx).map(|res| {
- if let Err(err) = res {
- debug!("connection error: {}", err);
- }
- });
- }
- }
- };
-
- me.state.set(next);
- }
- }
- }
-}
-
-pin_project! {
- /// A future building a new `Service` to a `Connection`.
- ///
- /// Wraps the future returned from `MakeService` into one that returns
- /// a `Connection`.
- #[must_use = "futures do nothing unless polled"]
- #[derive(Debug)]
- #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
- pub struct Connecting<I, F, E = Exec> {
- #[pin]
- future: F,
- io: Option<I>,
- protocol: Http_<E>,
- }
-}
-
-impl<I, F, S, FE, E, B> Future for Connecting<I, F, E>
-where
- I: AsyncRead + AsyncWrite + Unpin,
- F: Future<Output = Result<S, FE>>,
- S: HttpService<Body, ResBody = B>,
- B: HttpBody + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, B>,
-{
- type Output = Result<Connection<I, S, E>, FE>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut me = self.project();
- let service = ready!(me.future.poll(cx))?;
- let io = Option::take(&mut me.io).expect("polled after complete");
- Poll::Ready(Ok(me.protocol.serve_connection(io, service)))
- }
-}
diff --git a/src/server/server_stub.rs /dev/null
--- a/src/server/server_stub.rs
+++ /dev/null
@@ -1,16 +0,0 @@
-use std::fmt;
-
-use crate::common::exec::Exec;
-
-/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
-///
-/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful.
-pub struct Server<I, S, E = Exec> {
- _marker: std::marker::PhantomData<(I, S, E)>,
-}
-
-impl<I: fmt::Debug, S: fmt::Debug> fmt::Debug for Server<I, S> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Server").finish()
- }
-}
diff --git a/src/server/shutdown.rs /dev/null
--- a/src/server/shutdown.rs
+++ /dev/null
@@ -1,128 +0,0 @@
-use std::error::Error as StdError;
-
-use pin_project_lite::pin_project;
-use tokio::io::{AsyncRead, AsyncWrite};
-use tracing::debug;
-
-use super::accept::Accept;
-use super::conn::UpgradeableConnection;
-use super::server::{Server, Watcher};
-use crate::body::{Body, HttpBody};
-use crate::common::drain::{self, Draining, Signal, Watch, Watching};
-use crate::common::exec::{ConnStreamExec, NewSvcExec};
-use crate::common::{task, Future, Pin, Poll, Unpin};
-use crate::service::{HttpService, MakeServiceRef};
-
-pin_project! {
- #[allow(missing_debug_implementations)]
- pub struct Graceful<I, S, F, E> {
- #[pin]
- state: State<I, S, F, E>,
- }
-}
-
-pin_project! {
- #[project = StateProj]
- pub(super) enum State<I, S, F, E> {
- Running {
- drain: Option<(Signal, Watch)>,
- #[pin]
- server: Server<I, S, E>,
- #[pin]
- signal: F,
- },
- Draining { draining: Draining },
- }
-}
-
-impl<I, S, F, E> Graceful<I, S, F, E> {
- pub(super) fn new(server: Server<I, S, E>, signal: F) -> Self {
- let drain = Some(drain::channel());
- Graceful {
- state: State::Running {
- drain,
- server,
- signal,
- },
- }
- }
-}
-
-impl<I, IO, IE, S, B, F, E> Future for Graceful<I, S, F, E>
-where
- I: Accept<Conn = IO, Error = IE>,
- IE: Into<Box<dyn StdError + Send + Sync>>,
- IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- S: MakeServiceRef<IO, Body, ResBody = B>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: HttpBody + 'static,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
- F: Future<Output = ()>,
- E: ConnStreamExec<<S::Service as HttpService<Body>>::Future, B>,
- E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
-{
- type Output = crate::Result<()>;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut me = self.project();
- loop {
- let next = {
- match me.state.as_mut().project() {
- StateProj::Running {
- drain,
- server,
- signal,
- } => match signal.poll(cx) {
- Poll::Ready(()) => {
- debug!("signal received, starting graceful shutdown");
- let sig = drain.take().expect("drain channel").0;
- State::Draining {
- draining: sig.drain(),
- }
- }
- Poll::Pending => {
- let watch = drain.as_ref().expect("drain channel").1.clone();
- return server.poll_watch(cx, &GracefulWatcher(watch));
- }
- },
- StateProj::Draining { ref mut draining } => {
- return Pin::new(draining).poll(cx).map(Ok);
- }
- }
- };
- me.state.set(next);
- }
- }
-}
-
-#[allow(missing_debug_implementations)]
-#[derive(Clone)]
-pub struct GracefulWatcher(Watch);
-
-impl<I, S, E> Watcher<I, S, E> for GracefulWatcher
-where
- I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- S: HttpService<Body>,
- E: ConnStreamExec<S::Future, S::ResBody>,
- S::ResBody: 'static,
- <S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Future =
- Watching<UpgradeableConnection<I, S, E>, fn(Pin<&mut UpgradeableConnection<I, S, E>>)>;
-
- fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future {
- self.0.clone().watch(conn, on_drain)
- }
-}
-
-fn on_drain<I, S, E>(conn: Pin<&mut UpgradeableConnection<I, S, E>>)
-where
- S: HttpService<Body>,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- I: AsyncRead + AsyncWrite + Unpin,
- S::ResBody: HttpBody + 'static,
- <S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
- E: ConnStreamExec<S::Future, S::ResBody>,
-{
- conn.graceful_shutdown()
-}
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -1,10 +1,6 @@
-use std::error::Error as StdError;
-use std::fmt;
-
use tokio::io::{AsyncRead, AsyncWrite};
-use super::{HttpService, Service};
-use crate::body::HttpBody;
+use super::Service;
use crate::common::{task, Future, Poll};
// The same "trait alias" as tower::MakeConnection, but inlined to reduce
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -38,115 +34,6 @@ where
}
}
-// Just a sort-of "trait alias" of `MakeService`, not to be implemented
-// by anyone, only used as bounds.
-pub trait MakeServiceRef<Target, ReqBody>: self::sealed::Sealed<(Target, ReqBody)> {
- type ResBody: HttpBody;
- type Error: Into<Box<dyn StdError + Send + Sync>>;
- type Service: HttpService<ReqBody, ResBody = Self::ResBody, Error = Self::Error>;
- type MakeError: Into<Box<dyn StdError + Send + Sync>>;
- type Future: Future<Output = Result<Self::Service, Self::MakeError>>;
-
- // Acting like a #[non_exhaustive] for associated types of this trait.
- //
- // Basically, no one outside of hyper should be able to set this type
- // or declare bounds on it, so it should prevent people from creating
- // trait objects or otherwise writing code that requires using *all*
- // of the associated types.
- //
- // Why? So we can add new associated types to this alias in the future,
- // if necessary.
- type __DontNameMe: self::sealed::CantImpl;
-
- fn poll_ready_ref(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::MakeError>>;
-
- fn make_service_ref(&mut self, target: &Target) -> Self::Future;
-}
-
-impl<T, Target, E, ME, S, F, IB, OB> MakeServiceRef<Target, IB> for T
-where
- T: for<'a> Service<&'a Target, Error = ME, Response = S, Future = F>,
- E: Into<Box<dyn StdError + Send + Sync>>,
- ME: Into<Box<dyn StdError + Send + Sync>>,
- S: HttpService<IB, ResBody = OB, Error = E>,
- F: Future<Output = Result<S, ME>>,
- IB: HttpBody,
- OB: HttpBody,
-{
- type Error = E;
- type Service = S;
- type ResBody = OB;
- type MakeError = ME;
- type Future = F;
-
- type __DontNameMe = self::sealed::CantName;
-
- fn poll_ready_ref(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::MakeError>> {
- self.poll_ready(cx)
- }
-
- fn make_service_ref(&mut self, target: &Target) -> Self::Future {
- self.call(target)
- }
-}
-
-impl<T, Target, S, B1, B2> self::sealed::Sealed<(Target, B1)> for T
-where
- T: for<'a> Service<&'a Target, Response = S>,
- S: HttpService<B1, ResBody = B2>,
- B1: HttpBody,
- B2: HttpBody,
-{
-}
-
-/// Create a `MakeService` from a function.
-pub fn make_service_fn<F, Target, Ret>(f: F) -> MakeServiceFn<F>
-where
- F: FnMut(&Target) -> Ret,
- Ret: Future,
-{
- MakeServiceFn { f }
-}
-
-/// `MakeService` returned from [`make_service_fn`]
-#[derive(Clone, Copy)]
-pub struct MakeServiceFn<F> {
- f: F,
-}
-
-impl<'t, F, Ret, Target, Svc, MkErr> Service<&'t Target> for MakeServiceFn<F>
-where
- F: FnMut(&Target) -> Ret,
- Ret: Future<Output = Result<Svc, MkErr>>,
- MkErr: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Error = MkErr;
- type Response = Svc;
- type Future = Ret;
-
- fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, target: &'t Target) -> Self::Future {
- (self.f)(target)
- }
-}
-
-impl<F> fmt::Debug for MakeServiceFn<F> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("MakeServiceFn").finish()
- }
-}
-
mod sealed {
pub trait Sealed<X> {}
-
- #[allow(unreachable_pub)] // This is intentional.
- pub trait CantImpl {}
-
- #[allow(missing_debug_implementations)]
- pub enum CantName {}
-
- impl CantImpl for CantName {}
}
diff --git a/src/service/mod.rs b/src/service/mod.rs
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -10,8 +10,6 @@
//!
//! - `HttpService`: This is blanketly implemented for all types that
//! implement `Service<http::Request<B1>, Response = http::Response<B2>>`.
-//! - `MakeService`: When a `Service` returns a new `Service` as its "response",
-//! we consider it a `MakeService`. Again, blanketly implemented in those cases.
//! - `MakeConnection`: A `Service` that returns a "connection", a type that
//! implements `AsyncRead` and `AsyncWrite`.
//!
diff --git a/src/service/mod.rs b/src/service/mod.rs
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -24,16 +22,6 @@
//! The helper [`service_fn`](service_fn) should be sufficient for most cases, but
//! if you need to implement `Service` for a type manually, you can follow the example
//! in `service_struct_impl.rs`.
-//!
-//! # MakeService
-//!
-//! Since a `Service` is bound to a single connection, a [`Server`](crate::Server)
-//! needs a way to make them as it accepts connections. This is what a
-//! `MakeService` does.
-//!
-//! Resources that need to be shared by all `Service`s can be put into a
-//! `MakeService`, and then passed to individual `Service`s when `call`
-//! is called.
pub use tower_service::Service;
diff --git a/src/service/mod.rs b/src/service/mod.rs
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -43,13 +31,11 @@ mod make;
mod oneshot;
mod util;
+#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))]
pub(super) use self::http::HttpService;
#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))]
pub(super) use self::make::MakeConnection;
-#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))]
-pub(super) use self::make::MakeServiceRef;
#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))]
pub(super) use self::oneshot::{oneshot, Oneshot};
-pub use self::make::make_service_fn;
pub use self::util::service_fn;
|
2022-07-29T23:41:48Z
| 2,932
|
Remove hyper::Server
Remove the `hyper::Server` and `Builder` types, and `Accept` trait from hyper. It's possible we can move something similar to hyper-util, but [the `Accept` trait needs some reworking](https://github.com/hyperium/hyper/blob/master/docs/ROADMAP.md#higher-level-client-and-server-problems).
|
hyperium__hyper-2932
|
diff --git a/src/common/drain.rs /dev/null
--- a/src/common/drain.rs
+++ /dev/null
@@ -1,217 +0,0 @@
-use std::mem;
-
-use pin_project_lite::pin_project;
-use tokio::sync::watch;
-
-use super::{task, Future, Pin, Poll};
-
-pub(crate) fn channel() -> (Signal, Watch) {
- let (tx, rx) = watch::channel(());
- (Signal { tx }, Watch { rx })
-}
-
-pub(crate) struct Signal {
- tx: watch::Sender<()>,
-}
-
-pub(crate) struct Draining(Pin<Box<dyn Future<Output = ()> + Send + Sync>>);
-
-#[derive(Clone)]
-pub(crate) struct Watch {
- rx: watch::Receiver<()>,
-}
-
-pin_project! {
- #[allow(missing_debug_implementations)]
- pub struct Watching<F, FN> {
- #[pin]
- future: F,
- state: State<FN>,
- watch: Pin<Box<dyn Future<Output = ()> + Send + Sync>>,
- _rx: watch::Receiver<()>,
- }
-}
-
-enum State<F> {
- Watch(F),
- Draining,
-}
-
-impl Signal {
- pub(crate) fn drain(self) -> Draining {
- let _ = self.tx.send(());
- Draining(Box::pin(async move { self.tx.closed().await }))
- }
-}
-
-impl Future for Draining {
- type Output = ();
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- Pin::new(&mut self.as_mut().0).poll(cx)
- }
-}
-
-impl Watch {
- pub(crate) fn watch<F, FN>(self, future: F, on_drain: FN) -> Watching<F, FN>
- where
- F: Future,
- FN: FnOnce(Pin<&mut F>),
- {
- let Self { mut rx } = self;
- let _rx = rx.clone();
- Watching {
- future,
- state: State::Watch(on_drain),
- watch: Box::pin(async move {
- let _ = rx.changed().await;
- }),
- // Keep the receiver alive until the future completes, so that
- // dropping it can signal that draining has completed.
- _rx,
- }
- }
-}
-
-impl<F, FN> Future for Watching<F, FN>
-where
- F: Future,
- FN: FnOnce(Pin<&mut F>),
-{
- type Output = F::Output;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut me = self.project();
- loop {
- match mem::replace(me.state, State::Draining) {
- State::Watch(on_drain) => {
- match Pin::new(&mut me.watch).poll(cx) {
- Poll::Ready(()) => {
- // Drain has been triggered!
- on_drain(me.future.as_mut());
- }
- Poll::Pending => {
- *me.state = State::Watch(on_drain);
- return me.future.poll(cx);
- }
- }
- }
- State::Draining => return me.future.poll(cx),
- }
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- struct TestMe {
- draining: bool,
- finished: bool,
- poll_cnt: usize,
- }
-
- impl Future for TestMe {
- type Output = ();
-
- fn poll(mut self: Pin<&mut Self>, _: &mut task::Context<'_>) -> Poll<Self::Output> {
- self.poll_cnt += 1;
- if self.finished {
- Poll::Ready(())
- } else {
- Poll::Pending
- }
- }
- }
-
- #[test]
- fn watch() {
- let mut mock = tokio_test::task::spawn(());
- mock.enter(|cx, _| {
- let (tx, rx) = channel();
- let fut = TestMe {
- draining: false,
- finished: false,
- poll_cnt: 0,
- };
-
- let mut watch = rx.watch(fut, |mut fut| {
- fut.draining = true;
- });
-
- assert_eq!(watch.future.poll_cnt, 0);
-
- // First poll should poll the inner future
- assert!(Pin::new(&mut watch).poll(cx).is_pending());
- assert_eq!(watch.future.poll_cnt, 1);
-
- // Second poll should poll the inner future again
- assert!(Pin::new(&mut watch).poll(cx).is_pending());
- assert_eq!(watch.future.poll_cnt, 2);
-
- let mut draining = tx.drain();
- // Drain signaled, but needs another poll to be noticed.
- assert!(!watch.future.draining);
- assert_eq!(watch.future.poll_cnt, 2);
-
- // Now, poll after drain has been signaled.
- assert!(Pin::new(&mut watch).poll(cx).is_pending());
- assert_eq!(watch.future.poll_cnt, 3);
- assert!(watch.future.draining);
-
- // Draining is not ready until watcher completes
- assert!(Pin::new(&mut draining).poll(cx).is_pending());
-
- // Finishing up the watch future
- watch.future.finished = true;
- assert!(Pin::new(&mut watch).poll(cx).is_ready());
- assert_eq!(watch.future.poll_cnt, 4);
- drop(watch);
-
- assert!(Pin::new(&mut draining).poll(cx).is_ready());
- })
- }
-
- #[test]
- fn watch_clones() {
- let mut mock = tokio_test::task::spawn(());
- mock.enter(|cx, _| {
- let (tx, rx) = channel();
-
- let fut1 = TestMe {
- draining: false,
- finished: false,
- poll_cnt: 0,
- };
- let fut2 = TestMe {
- draining: false,
- finished: false,
- poll_cnt: 0,
- };
-
- let watch1 = rx.clone().watch(fut1, |mut fut| {
- fut.draining = true;
- });
- let watch2 = rx.watch(fut2, |mut fut| {
- fut.draining = true;
- });
-
- let mut draining = tx.drain();
-
- // Still 2 outstanding watchers
- assert!(Pin::new(&mut draining).poll(cx).is_pending());
-
- // drop 1 for whatever reason
- drop(watch1);
-
- // Still not ready, 1 other watcher still pending
- assert!(Pin::new(&mut draining).poll(cx).is_pending());
-
- drop(watch2);
-
- // Now all watchers are gone, draining is complete
- assert!(Pin::new(&mut draining).poll(cx).is_ready());
- });
- }
-}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"2856"
] |
0.3
|
d4b5bd4ee6af0ae8924cf05ab799cc3e19a3c62d
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -94,12 +94,6 @@ server = []
# Tokio support
runtime = [
- "tcp",
- "tokio/rt",
- "tokio/time",
-]
-tcp = [
- "socket2",
"tokio/net",
"tokio/rt",
"tokio/time",
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -2,8 +2,9 @@
#![warn(rust_2018_idioms)]
use std::env;
-use hyper::{body::HttpBody as _, Client};
+use hyper::{body::HttpBody as _, Body, Request};
use tokio::io::{self, AsyncWriteExt as _};
+use tokio::net::TcpStream;
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -33,9 +34,20 @@ async fn main() -> Result<()> {
}
async fn fetch_url(url: hyper::Uri) -> Result<()> {
- let client = Client::new();
+ let host = url.host().expect("uri has no host");
+ let port = url.port_u16().unwrap_or(80);
+ let addr = format!("{}:{}", host, port);
+ let stream = TcpStream::connect(addr).await?;
- let mut res = client.get(url).await?;
+ let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ println!("Connection failed: {:?}", err);
+ }
+ });
+
+ let req = Request::builder().uri(url).body(Body::empty()).unwrap();
+ let mut res = sender.send_request(req).await?;
println!("Response: {}", res.status());
println!("Headers: {:#?}\n", res.headers());
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -1,9 +1,10 @@
#![deny(warnings)]
#![warn(rust_2018_idioms)]
-use hyper::body::Buf;
-use hyper::Client;
+use hyper::Body;
+use hyper::{body::Buf, Request};
use serde::Deserialize;
+use tokio::net::TcpStream;
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -22,10 +23,22 @@ async fn main() -> Result<()> {
}
async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
- let client = Client::new();
+ let host = url.host().expect("uri has no host");
+ let port = url.port_u16().unwrap_or(80);
+ let addr = format!("{}:{}", host, port);
+
+ let stream = TcpStream::connect(addr).await?;
+
+ let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ println!("Connection failed: {:?}", err);
+ }
+ });
// Fetch the url...
- let res = client.get(url).await?;
+ let req = Request::builder().uri(url).body(Body::empty()).unwrap();
+ let res = sender.send_request(req).await?;
// asynchronously aggregate the chunks of the body
let body = hyper::body::aggregate(res).await?;
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -1,7 +1,11 @@
#![deny(warnings)]
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Method, Request, Response, Server, StatusCode};
+use std::net::SocketAddr;
+
+use hyper::server::conn::Http;
+use hyper::service::service_fn;
+use hyper::{Body, Method, Request, Response, StatusCode};
+use tokio::net::TcpListener;
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -51,15 +55,17 @@ async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
- let addr = ([127, 0, 0, 1], 3000).into();
-
- let service = make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(echo)) });
-
- let server = Server::bind(&addr).serve(service);
+ let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
+ let listener = TcpListener::bind(addr).await?;
println!("Listening on http://{}", addr);
+ loop {
+ let (stream, _) = listener.accept().await?;
- server.await?;
-
- Ok(())
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new().serve_connection(stream, service_fn(echo)).await {
+ println!("Error serving connection: {:?}", err);
+ }
+ });
+ }
}
diff --git a/examples/gateway.rs b/examples/gateway.rs
--- a/examples/gateway.rs
+++ b/examples/gateway.rs
@@ -1,51 +1,63 @@
#![deny(warnings)]
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Client, Error, Server};
+use hyper::{server::conn::Http, service::service_fn};
use std::net::SocketAddr;
+use tokio::net::{TcpListener, TcpStream};
#[tokio::main]
-async fn main() {
+async fn main() -> Result<(), Box<dyn std::error::Error>> {
pretty_env_logger::init();
- let in_addr = ([127, 0, 0, 1], 3001).into();
+ let in_addr: SocketAddr = ([127, 0, 0, 1], 3001).into();
let out_addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
- let client_main = Client::new();
-
let out_addr_clone = out_addr.clone();
- // The closure inside `make_service_fn` is run for each connection,
- // creating a 'service' to handle requests for that specific connection.
- let make_service = make_service_fn(move |_| {
- let client = client_main.clone();
-
- async move {
- // This is the `Service` that will handle the connection.
- // `service_fn` is a helper to convert a function that
- // returns a Response into a `Service`.
- Ok::<_, Error>(service_fn(move |mut req| {
- let uri_string = format!(
- "http://{}{}",
- out_addr_clone,
- req.uri()
- .path_and_query()
- .map(|x| x.as_str())
- .unwrap_or("/")
- );
- let uri = uri_string.parse().unwrap();
- *req.uri_mut() = uri;
- client.request(req)
- }))
- }
- });
-
- let server = Server::bind(&in_addr).serve(make_service);
+ let listener = TcpListener::bind(in_addr).await?;
println!("Listening on http://{}", in_addr);
println!("Proxying on http://{}", out_addr);
- if let Err(e) = server.await {
- eprintln!("server error: {}", e);
+ loop {
+ let (stream, _) = listener.accept().await?;
+
+ // This is the `Service` that will handle the connection.
+ // `service_fn` is a helper to convert a function that
+ // returns a Response into a `Service`.
+ let service = service_fn(move |mut req| {
+ let uri_string = format!(
+ "http://{}{}",
+ out_addr_clone,
+ req.uri()
+ .path_and_query()
+ .map(|x| x.as_str())
+ .unwrap_or("/")
+ );
+ let uri = uri_string.parse().unwrap();
+ *req.uri_mut() = uri;
+
+ let host = req.uri().host().expect("uri has no host");
+ let port = req.uri().port_u16().unwrap_or(80);
+ let addr = format!("{}:{}", host, port);
+
+ async move {
+ let client_stream = TcpStream::connect(addr).await.unwrap();
+
+ let (mut sender, conn) = hyper::client::conn::handshake(client_stream).await?;
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ println!("Connection failed: {:?}", err);
+ }
+ });
+
+ sender.send_request(req).await
+ }
+ });
+
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new().serve_connection(stream, service).await {
+ println!("Failed to servce connection: {:?}", err);
+ }
+ });
}
}
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -1,9 +1,12 @@
#![deny(warnings)]
use std::convert::Infallible;
+use std::net::SocketAddr;
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Request, Response, Server};
+use hyper::server::conn::Http;
+use hyper::service::service_fn;
+use hyper::{Body, Request, Response};
+use tokio::net::TcpListener;
async fn hello(_: Request<Body>) -> Result<Response<Body>, Infallible> {
Ok(Response::new(Body::from("Hello World!")))
diff --git a/examples/hello.rs b/examples/hello.rs
--- a/examples/hello.rs
+++ b/examples/hello.rs
@@ -13,22 +16,20 @@ async fn hello(_: Request<Body>) -> Result<Response<Body>, Infallible> {
pub async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
pretty_env_logger::init();
- // For every connection, we must make a `Service` to handle all
- // incoming HTTP requests on said connection.
- let make_svc = make_service_fn(|_conn| {
- // This is the `Service` that will handle the connection.
- // `service_fn` is a helper to convert a function that
- // returns a Response into a `Service`.
- async { Ok::<_, Infallible>(service_fn(hello)) }
- });
-
- let addr = ([127, 0, 0, 1], 3000).into();
-
- let server = Server::bind(&addr).serve(make_svc);
+ let addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
+ let listener = TcpListener::bind(addr).await?;
println!("Listening on http://{}", addr);
-
- server.await?;
-
- Ok(())
+ loop {
+ let (stream, _) = listener.accept().await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new()
+ .serve_connection(stream, service_fn(hello))
+ .await
+ {
+ println!("Error serving connection: {:?}", err);
+ }
+ });
+ }
}
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -1,15 +1,14 @@
#![deny(warnings)]
-use std::convert::Infallible;
use std::net::SocketAddr;
-use hyper::service::{make_service_fn, service_fn};
+use hyper::client::conn::Builder;
+use hyper::server::conn::Http;
+use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
-use hyper::{Body, Client, Method, Request, Response, Server};
+use hyper::{Body, Method, Request, Response};
-use tokio::net::TcpStream;
-
-type HttpClient = Client<hyper::client::HttpConnector>;
+use tokio::net::{TcpListener, TcpStream};
// To try this example:
// 1. cargo run --example http_proxy
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -19,32 +18,29 @@ type HttpClient = Client<hyper::client::HttpConnector>;
// 3. send requests
// $ curl -i https://www.some_domain.com/
#[tokio::main]
-async fn main() {
+async fn main() -> Result<(), Box<dyn std::error::Error>> {
let addr = SocketAddr::from(([127, 0, 0, 1], 8100));
- let client = Client::builder()
- .http1_title_case_headers(true)
- .http1_preserve_header_case(true)
- .build_http();
-
- let make_service = make_service_fn(move |_| {
- let client = client.clone();
- async move { Ok::<_, Infallible>(service_fn(move |req| proxy(client.clone(), req))) }
- });
-
- let server = Server::bind(&addr)
- .http1_preserve_header_case(true)
- .http1_title_case_headers(true)
- .serve(make_service);
-
+ let listener = TcpListener::bind(addr).await?;
println!("Listening on http://{}", addr);
- if let Err(e) = server.await {
- eprintln!("server error: {}", e);
+ loop {
+ let (stream, _) = listener.accept().await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new()
+ .http1_preserve_header_case(true)
+ .http1_title_case_headers(true)
+ .serve_connection(stream, service_fn(proxy))
+ .await
+ {
+ println!("Failed to serve connection: {:?}", err);
+ }
+ });
}
}
-async fn proxy(client: HttpClient, req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
+async fn proxy(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
println!("req: {:?}", req);
if Method::CONNECT == req.method() {
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -82,7 +78,24 @@ async fn proxy(client: HttpClient, req: Request<Body>) -> Result<Response<Body>,
Ok(resp)
}
} else {
- client.request(req).await
+ let host = req.uri().host().expect("uri has no host");
+ let port = req.uri().port_u16().unwrap_or(80);
+ let addr = format!("{}:{}", host, port);
+
+ let stream = TcpStream::connect(addr).await.unwrap();
+
+ let (mut sender, conn) = Builder::new()
+ .http1_preserve_header_case(true)
+ .http1_title_case_headers(true)
+ .handshake(stream)
+ .await?;
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ println!("Connection failed: {:?}", err);
+ }
+ });
+
+ sender.send_request(req).await
}
}
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -1,9 +1,13 @@
#![deny(warnings)]
#![warn(rust_2018_idioms)]
+use std::net::SocketAddr;
+
use futures_util::future::join;
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Request, Response, Server};
+use hyper::server::conn::Http;
+use hyper::service::service_fn;
+use hyper::{Body, Request, Response};
+use tokio::net::TcpListener;
static INDEX1: &[u8] = b"The 1st service!";
static INDEX2: &[u8] = b"The 2nd service!";
diff --git a/examples/multi_server.rs b/examples/multi_server.rs
--- a/examples/multi_server.rs
+++ b/examples/multi_server.rs
@@ -20,16 +24,40 @@ async fn index2(_: Request<Body>) -> Result<Response<Body>, hyper::Error> {
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
pretty_env_logger::init();
- let addr1 = ([127, 0, 0, 1], 1337).into();
- let addr2 = ([127, 0, 0, 1], 1338).into();
+ let addr1: SocketAddr = ([127, 0, 0, 1], 1337).into();
+ let addr2: SocketAddr = ([127, 0, 0, 1], 1338).into();
+
+ let srv1 = async move {
+ let listener = TcpListener::bind(addr1).await.unwrap();
+ loop {
+ let (stream, _) = listener.accept().await.unwrap();
+
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new()
+ .serve_connection(stream, service_fn(index1))
+ .await
+ {
+ println!("Error serving connection: {:?}", err);
+ }
+ });
+ }
+ };
- let srv1 = Server::bind(&addr1).serve(make_service_fn(|_| async {
- Ok::<_, hyper::Error>(service_fn(index1))
- }));
+ let srv2 = async move {
+ let listener = TcpListener::bind(addr2).await.unwrap();
+ loop {
+ let (stream, _) = listener.accept().await.unwrap();
- let srv2 = Server::bind(&addr2).serve(make_service_fn(|_| async {
- Ok::<_, hyper::Error>(service_fn(index2))
- }));
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new()
+ .serve_connection(stream, service_fn(index2))
+ .await
+ {
+ println!("Error serving connection: {:?}", err);
+ }
+ });
+ }
+ };
println!("Listening on http://{} and http://{}", addr1, addr2);
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -1,10 +1,13 @@
// #![deny(warnings)] // FIXME: https://github.com/rust-lang/rust/issues/62411
#![warn(rust_2018_idioms)]
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Method, Request, Response, Server, StatusCode};
+use hyper::server::conn::Http;
+use hyper::service::service_fn;
+use hyper::{Body, Method, Request, Response, StatusCode};
+use tokio::net::TcpListener;
use std::collections::HashMap;
+use std::net::SocketAddr;
use url::form_urlencoded;
static INDEX: &[u8] = b"<html><body><form action=\"post\" method=\"post\">Name: <input type=\"text\" name=\"name\"><br>Number: <input type=\"text\" name=\"number\"><br><input type=\"submit\"></body></html>";
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -102,15 +105,20 @@ async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Erro
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
pretty_env_logger::init();
- let addr = ([127, 0, 0, 1], 1337).into();
-
- let server = Server::bind(&addr).serve(make_service_fn(|_| async {
- Ok::<_, hyper::Error>(service_fn(param_example))
- }));
+ let addr: SocketAddr = ([127, 0, 0, 1], 1337).into();
+ let listener = TcpListener::bind(addr).await?;
println!("Listening on http://{}", addr);
+ loop {
+ let (stream, _) = listener.accept().await?;
- server.await?;
-
- Ok(())
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new()
+ .serve_connection(stream, service_fn(param_example))
+ .await
+ {
+ println!("Error serving connection: {:?}", err);
+ }
+ });
+ }
}
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -1,26 +1,36 @@
#![deny(warnings)]
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Method, Request, Response, Result, Server, StatusCode};
+use std::net::SocketAddr;
+
+use hyper::server::conn::Http;
+use tokio::net::TcpListener;
+
+use hyper::service::service_fn;
+use hyper::{Body, Method, Request, Response, Result, StatusCode};
static INDEX: &str = "examples/send_file_index.html";
static NOTFOUND: &[u8] = b"Not Found";
#[tokio::main]
-async fn main() {
+async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
pretty_env_logger::init();
- let addr = "127.0.0.1:1337".parse().unwrap();
-
- let make_service =
- make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(response_examples)) });
-
- let server = Server::bind(&addr).serve(make_service);
+ let addr: SocketAddr = "127.0.0.1:1337".parse().unwrap();
+ let listener = TcpListener::bind(addr).await?;
println!("Listening on http://{}", addr);
- if let Err(e) = server.await {
- eprintln!("server error: {}", e);
+ loop {
+ let (stream, _) = listener.accept().await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new()
+ .serve_connection(stream, service_fn(response_examples))
+ .await
+ {
+ println!("Failed to serve connection: {:?}", err);
+ }
+ });
}
}
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -1,7 +1,10 @@
+use hyper::server::conn::Http;
use hyper::service::Service;
-use hyper::{Body, Request, Response, Server};
+use hyper::{Body, Request, Response};
+use tokio::net::TcpListener;
use std::future::Future;
+use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -9,13 +12,23 @@ type Counter = i32;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
- let addr = ([127, 0, 0, 1], 3000).into();
+ let addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
- let server = Server::bind(&addr).serve(MakeSvc { counter: 81818 });
+ let listener = TcpListener::bind(addr).await?;
println!("Listening on http://{}", addr);
- server.await?;
- Ok(())
+ loop {
+ let (stream, _) = listener.accept().await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new()
+ .serve_connection(stream, Svc { counter: 81818 })
+ .await
+ {
+ println!("Failed to serve connection: {:?}", err);
+ }
+ });
+ }
}
struct Svc {
diff --git a/examples/service_struct_impl.rs b/examples/service_struct_impl.rs
--- a/examples/service_struct_impl.rs
+++ b/examples/service_struct_impl.rs
@@ -54,23 +67,3 @@ impl Service<Request<Body>> for Svc {
Box::pin(async { res })
}
}
-
-struct MakeSvc {
- counter: Counter,
-}
-
-impl<T> Service<T> for MakeSvc {
- type Response = Svc;
- type Error = hyper::Error;
- type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
-
- fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, _: T) -> Self::Future {
- let counter = self.counter.clone();
- let fut = async move { Ok(Svc { counter }) };
- Box::pin(fut)
- }
-}
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -1,13 +1,15 @@
#![deny(warnings)]
+use hyper::server::conn::Http;
use std::cell::Cell;
+use std::net::SocketAddr;
use std::rc::Rc;
-use tokio::sync::oneshot;
+use tokio::net::TcpListener;
use hyper::body::{Bytes, HttpBody};
use hyper::header::{HeaderMap, HeaderValue};
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Error, Response, Server};
+use hyper::service::service_fn;
+use hyper::{Error, Response};
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -46,7 +48,7 @@ impl HttpBody for Body {
}
}
-fn main() {
+fn main() -> Result<(), Box<dyn std::error::Error>> {
pretty_env_logger::init();
// Configure a runtime that runs everything on the current thread
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -57,43 +59,39 @@ fn main() {
// Combine it with a `LocalSet, which means it can spawn !Send futures...
let local = tokio::task::LocalSet::new();
- local.block_on(&rt, run());
+ local.block_on(&rt, run())
}
-async fn run() {
- let addr = ([127, 0, 0, 1], 3000).into();
+async fn run() -> Result<(), Box<dyn std::error::Error>> {
+ let addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
// Using a !Send request counter is fine on 1 thread...
let counter = Rc::new(Cell::new(0));
- let make_service = make_service_fn(move |_| {
+ let listener = TcpListener::bind(addr).await?;
+ println!("Listening on http://{}", addr);
+ loop {
+ let (stream, _) = listener.accept().await?;
+
// For each connection, clone the counter to use in our service...
let cnt = counter.clone();
- async move {
- Ok::<_, Error>(service_fn(move |_| {
- let prev = cnt.get();
- cnt.set(prev + 1);
- let value = cnt.get();
- async move { Ok::<_, Error>(Response::new(Body::from(format!("Request #{}", value)))) }
- }))
- }
- });
-
- let server = Server::bind(&addr).executor(LocalExec).serve(make_service);
-
- // Just shows that with_graceful_shutdown compiles with !Send,
- // !Sync HttpBody.
- let (_tx, rx) = oneshot::channel::<()>();
- let server = server.with_graceful_shutdown(async move {
- rx.await.ok();
- });
-
- println!("Listening on http://{}", addr);
-
- // The server would block on current thread to await !Send futures.
- if let Err(e) = server.await {
- eprintln!("server error: {}", e);
+ let service = service_fn(move |_| {
+ let prev = cnt.get();
+ cnt.set(prev + 1);
+ let value = cnt.get();
+ async move { Ok::<_, Error>(Response::new(Body::from(format!("Request #{}", value)))) }
+ });
+
+ tokio::task::spawn_local(async move {
+ if let Err(err) = Http::new()
+ .with_executor(LocalExec)
+ .serve_connection(stream, service)
+ .await
+ {
+ println!("Error serving connection: {:?}", err);
+ }
+ });
}
}
diff --git a/examples/state.rs b/examples/state.rs
--- a/examples/state.rs
+++ b/examples/state.rs
@@ -1,52 +1,46 @@
#![deny(warnings)]
+use std::net::SocketAddr;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Error, Response, Server};
+use hyper::{server::conn::Http, service::service_fn};
+use hyper::{Body, Error, Response};
+use tokio::net::TcpListener;
#[tokio::main]
-async fn main() {
+async fn main() -> Result<(), Box<dyn std::error::Error>> {
pretty_env_logger::init();
- let addr = ([127, 0, 0, 1], 3000).into();
+ let addr: SocketAddr = ([127, 0, 0, 1], 3000).into();
// For the most basic of state, we just share a counter, that increments
// with each request, and we send its value back in the response.
let counter = Arc::new(AtomicUsize::new(0));
- // The closure inside `make_service_fn` is run for each connection,
- // creating a 'service' to handle requests for that specific connection.
- let make_service = make_service_fn(move |_| {
- // While the state was moved into the make_service closure,
- // we need to clone it here because this closure is called
- // once for every connection.
- //
+ let listener = TcpListener::bind(addr).await?;
+ println!("Listening on http://{}", addr);
+ loop {
+ let (stream, _) = listener.accept().await?;
+
// Each connection could send multiple requests, so
// the `Service` needs a clone to handle later requests.
let counter = counter.clone();
- async move {
- // This is the `Service` that will handle the connection.
- // `service_fn` is a helper to convert a function that
- // returns a Response into a `Service`.
- Ok::<_, Error>(service_fn(move |_req| {
- // Get the current count, and also increment by 1, in a single
- // atomic operation.
- let count = counter.fetch_add(1, Ordering::AcqRel);
- async move { Ok::<_, Error>(Response::new(Body::from(format!("Request #{}", count)))) }
- }))
+ // This is the `Service` that will handle the connection.
+ // `service_fn` is a helper to convert a function that
+ // returns a Response into a `Service`.
+ let service = service_fn(move |_req| {
+ // Get the current count, and also increment by 1, in a single
+ // atomic operation.
+ let count = counter.fetch_add(1, Ordering::AcqRel);
+ async move { Ok::<_, Error>(Response::new(Body::from(format!("Request #{}", count)))) }
+ });
+
+ if let Err(err) = Http::new().serve_connection(stream, service).await {
+ println!("Error serving connection: {:?}", err);
}
- });
-
- let server = Server::bind(&addr).serve(make_service);
-
- println!("Listening on http://{}", addr);
-
- if let Err(e) = server.await {
- eprintln!("server error: {}", e);
}
}
diff --git a/examples/tower_client.rs b/examples/tower_client.rs
--- a/examples/tower_client.rs
+++ b/examples/tower_client.rs
@@ -1,20 +1,20 @@
#![deny(warnings)]
-use hyper::client::conn::Builder;
-use hyper::client::connect::HttpConnector;
-use hyper::client::service::Connect;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
use hyper::service::Service;
-use hyper::{Body, Request};
+use hyper::{Body, Request, Response};
+use tokio::net::TcpStream;
#[tokio::main]
-async fn main() -> Result<(), Box<dyn std::error::Error>> {
+async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
pretty_env_logger::init();
- let mut mk_svc = Connect::new(HttpConnector::new(), Builder::new());
-
let uri = "http://127.0.0.1:8080".parse::<http::Uri>()?;
- let mut svc = mk_svc.call(uri.clone()).await?;
+ let mut svc = Connector;
let body = Body::empty();
diff --git a/examples/tower_client.rs b/examples/tower_client.rs
--- a/examples/tower_client.rs
+++ b/examples/tower_client.rs
@@ -25,3 +25,35 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
+
+struct Connector;
+
+impl Service<Request<Body>> for Connector {
+ type Response = Response<Body>;
+ type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
+ type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
+
+ fn poll_ready(&mut self, _cx: &mut Context<'_>) -> std::task::Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn call(&mut self, req: Request<Body>) -> Self::Future {
+ Box::pin(async move {
+ let host = req.uri().host().expect("no host in uri");
+ let port = req.uri().port_u16().expect("no port in uri");
+
+ let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
+
+ let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ println!("Connection error: {:?}", err);
+ }
+ });
+
+ let res = sender.send_request(req).await?;
+ Ok(res)
+ })
+ }
+}
diff --git a/examples/tower_server.rs b/examples/tower_server.rs
--- a/examples/tower_server.rs
+++ b/examples/tower_server.rs
@@ -1,10 +1,13 @@
#![deny(warnings)]
+use std::net::SocketAddr;
use std::task::{Context, Poll};
use futures_util::future;
+use hyper::server::conn::Http;
use hyper::service::Service;
-use hyper::{Body, Request, Response, Server};
+use hyper::{Body, Request, Response};
+use tokio::net::TcpListener;
const ROOT: &str = "/";
diff --git a/examples/tower_server.rs b/examples/tower_server.rs
--- a/examples/tower_server.rs
+++ b/examples/tower_server.rs
@@ -36,33 +39,22 @@ impl Service<Request<Body>> for Svc {
}
}
-pub struct MakeSvc;
-
-impl<T> Service<T> for MakeSvc {
- type Response = Svc;
- type Error = std::io::Error;
- type Future = future::Ready<Result<Self::Response, Self::Error>>;
-
- fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- Ok(()).into()
- }
-
- fn call(&mut self, _: T) -> Self::Future {
- future::ok(Svc)
- }
-}
-
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
pretty_env_logger::init();
- let addr = "127.0.0.1:1337".parse().unwrap();
-
- let server = Server::bind(&addr).serve(MakeSvc);
+ let addr: SocketAddr = "127.0.0.1:1337".parse().unwrap();
+ let listener = TcpListener::bind(addr).await?;
println!("Listening on http://{}", addr);
- server.await?;
+ loop {
+ let (stream, _) = listener.accept().await?;
- Ok(())
+ tokio::task::spawn(async move {
+ if let Err(err) = Http::new().serve_connection(stream, Svc).await {
+ println!("Failed to serve connection: {:?}", err);
+ }
+ });
+ }
}
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -3,13 +3,15 @@
// Note: `hyper::upgrade` docs link to this upgrade.
use std::str;
+use hyper::server::conn::Http;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
-use tokio::sync::oneshot;
+use tokio::net::{TcpListener, TcpStream};
+use tokio::sync::watch;
use hyper::header::{HeaderValue, UPGRADE};
-use hyper::service::{make_service_fn, service_fn};
+use hyper::service::service_fn;
use hyper::upgrade::Upgraded;
-use hyper::{Body, Client, Request, Response, Server, StatusCode};
+use hyper::{Body, Request, Response, StatusCode};
use std::net::SocketAddr;
// A simple type alias so as to DRY.
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -92,7 +94,17 @@ async fn client_upgrade_request(addr: SocketAddr) -> Result<()> {
.body(Body::empty())
.unwrap();
- let res = Client::new().request(req).await?;
+ let stream = TcpStream::connect(addr).await?;
+ let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ println!("Connection failed: {:?}", err);
+ }
+ });
+
+ let res = sender.send_request(req).await?;
+
if res.status() != StatusCode::SWITCHING_PROTOCOLS {
panic!("Our server didn't upgrade: {}", res.status());
}
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -114,28 +126,52 @@ async fn main() {
// For this example, we just make a server and our own client to talk to
// it, so the exact port isn't important. Instead, let the OS give us an
// unused port.
- let addr = ([127, 0, 0, 1], 0).into();
-
- let make_service =
- make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(server_upgrade)) });
+ let addr: SocketAddr = ([127, 0, 0, 1], 0).into();
- let server = Server::bind(&addr).serve(make_service);
+ let listener = TcpListener::bind(addr).await.expect("failed to bind");
// We need the assigned address for the client to send it messages.
- let addr = server.local_addr();
+ let addr = listener.local_addr().unwrap();
// For this example, a oneshot is used to signal that after 1 request,
// the server should be shutdown.
- let (tx, rx) = oneshot::channel::<()>();
- let server = server.with_graceful_shutdown(async move {
- rx.await.ok();
- });
+ let (tx, mut rx) = watch::channel(false);
// Spawn server on the default executor,
// which is usually a thread-pool from tokio default runtime.
tokio::task::spawn(async move {
- if let Err(e) = server.await {
- eprintln!("server error: {}", e);
+ loop {
+ tokio::select! {
+ res = listener.accept() => {
+ let (stream, _) = res.expect("Failed to accept");
+
+ let mut rx = rx.clone();
+ tokio::task::spawn(async move {
+ let conn = Http::new().serve_connection(stream, service_fn(server_upgrade));
+
+ // Don't forget to enable upgrades on the connection.
+ let mut conn = conn.with_upgrades();
+
+ let mut conn = Pin::new(&mut conn);
+
+ tokio::select! {
+ res = &mut conn => {
+ if let Err(err) = res {
+ println!("Error serving connection: {:?}", err);
+ return;
+ }
+ }
+ // Continue polling the connection after enabling graceful shutdown.
+ _ = rx.changed() => {
+ conn.graceful_shutdown();
+ }
+ }
+ });
+ }
+ _ = rx.changed() => {
+ break;
+ }
+ }
}
});
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -147,5 +183,5 @@ async fn main() {
// Complete the oneshot so that the server stops
// listening and the process can close down.
- let _ = tx.send(());
+ let _ = tx.send(true);
}
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -1,9 +1,12 @@
#![deny(warnings)]
+use std::net::SocketAddr;
+
use bytes::Buf;
-use hyper::client::HttpConnector;
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{header, Body, Client, Method, Request, Response, Server, StatusCode};
+use hyper::server::conn::Http;
+use hyper::service::service_fn;
+use hyper::{header, Body, Method, Request, Response, StatusCode};
+use tokio::net::{TcpListener, TcpStream};
type GenericError = Box<dyn std::error::Error + Send + Sync>;
type Result<T> = std::result::Result<T, GenericError>;
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -14,7 +17,7 @@ static NOTFOUND: &[u8] = b"Not Found";
static POST_DATA: &str = r#"{"original": "data"}"#;
static URL: &str = "http://127.0.0.1:1337/json_api";
-async fn client_request_response(client: &Client<HttpConnector>) -> Result<Response<Body>> {
+async fn client_request_response() -> Result<Response<Body>> {
let req = Request::builder()
.method(Method::POST)
.uri(URL)
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -22,7 +25,19 @@ async fn client_request_response(client: &Client<HttpConnector>) -> Result<Respo
.body(POST_DATA.into())
.unwrap();
- let web_res = client.request(req).await?;
+ let host = req.uri().host().expect("uri has no host");
+ let port = req.uri().port_u16().expect("uri has no port");
+ let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
+
+ let (mut sender, conn) = hyper::client::conn::handshake(stream).await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ println!("Connection error: {:?}", err);
+ }
+ });
+
+ let web_res = sender.send_request(req).await?;
let res_body = web_res.into_body();
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -83,27 +95,19 @@ async fn response_examples(
async fn main() -> Result<()> {
pretty_env_logger::init();
- let addr = "127.0.0.1:1337".parse().unwrap();
-
- // Share a `Client` with all `Service`s
- let client = Client::new();
-
- let new_service = make_service_fn(move |_| {
- // Move a clone of `client` into the `service_fn`.
- let client = client.clone();
- async {
- Ok::<_, GenericError>(service_fn(move |req| {
- // Clone again to ensure that client outlives this closure.
- response_examples(req, client.to_owned())
- }))
- }
- });
-
- let server = Server::bind(&addr).serve(new_service);
+ let addr: SocketAddr = "127.0.0.1:1337".parse().unwrap();
+ let listener = TcpListener::bind(&addr).await?;
println!("Listening on http://{}", addr);
+ loop {
+ let (stream, _) = listener.accept().await?;
- server.await?;
+ tokio::task::spawn(async move {
+ let service = service_fn(move |req| response_examples(req));
- Ok(())
+ if let Err(err) = Http::new().serve_connection(stream, service).await {
+ println!("Failed to serve connection: {:?}", err);
+ }
+ });
+ }
}
diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs
--- a/src/body/to_bytes.rs
+++ b/src/body/to_bytes.rs
@@ -17,17 +17,11 @@ use super::HttpBody;
/// # Example
///
/// ```
-/// # #[cfg(all(feature = "client", feature = "tcp", any(feature = "http1", feature = "http2")))]
/// # async fn doc() -> hyper::Result<()> {
-/// use hyper::{body::HttpBody};
-///
-/// # let request = hyper::Request::builder()
-/// # .method(hyper::Method::POST)
-/// # .uri("http://httpbin.org/post")
-/// # .header("content-type", "application/json")
-/// # .body(hyper::Body::from(r#"{"library":"hyper"}"#)).unwrap();
-/// # let client = hyper::Client::new();
-/// let response = client.request(request).await?;
+/// # use hyper::{Body, Response};
+/// # use hyper::body::HttpBody;
+/// #
+/// let response = Response::new(Body::from("response body"));
///
/// const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024;
///
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -15,10 +15,11 @@ use super::connect::{self, sealed::Connect, Alpn, Connected, Connection};
use super::pool::{
self, CheckoutIsClosedError, Key as PoolKey, Pool, Poolable, Pooled, Reservation,
};
-#[cfg(feature = "tcp")]
-use super::HttpConnector;
use crate::body::{Body, HttpBody};
-use crate::common::{exec::BoxSendFuture, sync_wrapper::SyncWrapper, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll};
+use crate::common::{
+ exec::BoxSendFuture, lazy as hyper_lazy, sync_wrapper::SyncWrapper, task, Future, Lazy, Pin,
+ Poll,
+};
use crate::rt::Executor;
/// A Client to make outgoing HTTP requests.
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -50,49 +51,8 @@ pub struct ResponseFuture {
// ===== impl Client =====
-#[cfg(feature = "tcp")]
-impl Client<HttpConnector, Body> {
- /// Create a new Client with the default [config](Builder).
- ///
- /// # Note
- ///
- /// The default connector does **not** handle TLS. Speaking to `https`
- /// destinations will require [configuring a connector that implements
- /// TLS](https://hyper.rs/guides/client/configuration).
- #[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
- #[inline]
- pub fn new() -> Client<HttpConnector, Body> {
- Builder::default().build_http()
- }
-}
-
-#[cfg(feature = "tcp")]
-impl Default for Client<HttpConnector, Body> {
- fn default() -> Client<HttpConnector, Body> {
- Client::new()
- }
-}
-
impl Client<(), Body> {
/// Create a builder to configure a new `Client`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "runtime")]
- /// # fn run () {
- /// use std::time::Duration;
- /// use hyper::Client;
- ///
- /// let client = Client::builder()
- /// .pool_idle_timeout(Duration::from_secs(30))
- /// .http2_only(true)
- /// .build_http();
- /// # let infer: Client<_, hyper::Body> = client;
- /// # drop(infer);
- /// # }
- /// # fn main() {}
- /// ```
#[inline]
pub fn builder() -> Builder {
Builder::default()
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -113,20 +73,6 @@ where
/// This requires that the `HttpBody` type have a `Default` implementation.
/// It *should* return an "empty" version of itself, such that
/// `HttpBody::is_end_stream` is `true`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "runtime")]
- /// # fn run () {
- /// use hyper::{Client, Uri};
- ///
- /// let client = Client::new();
- ///
- /// let future = client.get(Uri::from_static("http://httpbin.org/ip"));
- /// # }
- /// # fn main() {}
- /// ```
pub fn get(&self, uri: Uri) -> ResponseFuture
where
B: Default,
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -142,26 +88,6 @@ where
}
/// Send a constructed `Request` using this `Client`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "runtime")]
- /// # fn run () {
- /// use hyper::{Body, Method, Client, Request};
- ///
- /// let client = Client::new();
- ///
- /// let req = Request::builder()
- /// .method(Method::POST)
- /// .uri("http://httpbin.org/post")
- /// .body(Body::from("Hallo!"))
- /// .expect("request builder");
- ///
- /// let future = client.request(req);
- /// # }
- /// # fn main() {}
- /// ```
pub fn request(&self, mut req: Request<B>) -> ResponseFuture {
let is_http_connect = req.method() == Method::CONNECT;
match req.version() {
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -586,7 +512,7 @@ impl ResponseFuture {
F: Future<Output = crate::Result<Response<Body>>> + Send + 'static,
{
Self {
- inner: SyncWrapper::new(Box::pin(value))
+ inner: SyncWrapper::new(Box::pin(value)),
}
}
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -872,24 +798,6 @@ fn is_schema_secure(uri: &Uri) -> bool {
}
/// A builder to configure a new [`Client`](Client).
-///
-/// # Example
-///
-/// ```
-/// # #[cfg(feature = "runtime")]
-/// # fn run () {
-/// use std::time::Duration;
-/// use hyper::Client;
-///
-/// let client = Client::builder()
-/// .pool_idle_timeout(Duration::from_secs(30))
-/// .http2_only(true)
-/// .build_http();
-/// # let infer: Client<_, hyper::Body> = client;
-/// # drop(infer);
-/// # }
-/// # fn main() {}
-/// ```
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
#[derive(Clone)]
pub struct Builder {
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -1316,20 +1224,6 @@ impl Builder {
self
}
- /// Builder a client with this configuration and the default `HttpConnector`.
- #[cfg(feature = "tcp")]
- pub fn build_http<B>(&self) -> Client<HttpConnector, B>
- where
- B: HttpBody + Send,
- B::Data: Send,
- {
- let mut connector = HttpConnector::new();
- if self.pool_config.is_enabled() {
- connector.set_keepalive(self.pool_config.idle_timeout);
- }
- self.build(connector)
- }
-
/// Combine the configuration of this builder with a connector to create a `Client`.
pub fn build<C, B>(&self, connector: C) -> Client<C, B>
where
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -22,18 +22,9 @@
//! });
//! ```
use std::error::Error;
-use std::future::Future;
-use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
-use std::pin::Pin;
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
use std::str::FromStr;
-use std::task::{self, Poll};
-use std::{fmt, io, vec};
-
-use tokio::task::JoinHandle;
-use tower_service::Service;
-use tracing::debug;
-
-pub(super) use self::sealed::Resolve;
+use std::{fmt, vec};
/// A domain name to resolve into IP addresses.
#[derive(Clone, Hash, Eq, PartialEq)]
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -52,11 +43,6 @@ pub struct GaiAddrs {
inner: SocketAddrs,
}
-/// A future to resolve a name returned by `GaiResolver`.
-pub struct GaiFuture {
- inner: JoinHandle<Result<SocketAddrs, io::Error>>,
-}
-
impl Name {
pub(super) fn new(host: Box<str>) -> Name {
Name { host }
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -108,63 +94,12 @@ impl GaiResolver {
}
}
-impl Service<Name> for GaiResolver {
- type Response = GaiAddrs;
- type Error = io::Error;
- type Future = GaiFuture;
-
- fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, name: Name) -> Self::Future {
- let blocking = tokio::task::spawn_blocking(move || {
- debug!("resolving host={:?}", name.host);
- (&*name.host, 0)
- .to_socket_addrs()
- .map(|i| SocketAddrs { iter: i })
- });
-
- GaiFuture { inner: blocking }
- }
-}
-
impl fmt::Debug for GaiResolver {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("GaiResolver")
}
}
-impl Future for GaiFuture {
- type Output = Result<GaiAddrs, io::Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- Pin::new(&mut self.inner).poll(cx).map(|res| match res {
- Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }),
- Ok(Err(err)) => Err(err),
- Err(join_err) => {
- if join_err.is_cancelled() {
- Err(io::Error::new(io::ErrorKind::Interrupted, join_err))
- } else {
- panic!("gai background task failed: {:?}", join_err)
- }
- }
- })
- }
-}
-
-impl fmt::Debug for GaiFuture {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("GaiFuture")
- }
-}
-
-impl Drop for GaiFuture {
- fn drop(&mut self) {
- self.inner.abort();
- }
-}
-
impl Iterator for GaiAddrs {
type Item = SocketAddr;
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -190,22 +125,6 @@ impl SocketAddrs {
}
}
- pub(super) fn try_parse(host: &str, port: u16) -> Option<SocketAddrs> {
- if let Ok(addr) = host.parse::<Ipv4Addr>() {
- let addr = SocketAddrV4::new(addr, port);
- return Some(SocketAddrs {
- iter: vec![SocketAddr::V4(addr)].into_iter(),
- });
- }
- if let Ok(addr) = host.parse::<Ipv6Addr>() {
- let addr = SocketAddrV6::new(addr, port, 0, 0);
- return Some(SocketAddrs {
- iter: vec![SocketAddr::V6(addr)].into_iter(),
- });
- }
- None
- }
-
#[inline]
fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs {
SocketAddrs::new(self.iter.filter(predicate).collect())
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -239,10 +158,6 @@ impl SocketAddrs {
pub(super) fn is_empty(&self) -> bool {
self.iter.as_slice().is_empty()
}
-
- pub(super) fn len(&self) -> usize {
- self.iter.as_slice().len()
- }
}
impl Iterator for SocketAddrs {
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -318,12 +233,12 @@ impl Future for TokioThreadpoolGaiFuture {
*/
mod sealed {
- use super::{SocketAddr, Name};
+ use super::{Name, SocketAddr};
use crate::common::{task, Future, Poll};
use tower_service::Service;
// "Trait alias" for `Service<Name, Response = Addrs>`
- pub trait Resolve {
+ pub(crate) trait Resolve {
type Addrs: Iterator<Item = SocketAddr>;
type Error: Into<Box<dyn std::error::Error + Send + Sync>>;
type Future: Future<Output = Result<Self::Addrs, Self::Error>>;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -1,23 +1,9 @@
use std::error::Error as StdError;
use std::fmt;
-use std::future::Future;
-use std::io;
-use std::marker::PhantomData;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
-use std::pin::Pin;
use std::sync::Arc;
-use std::task::{self, Poll};
use std::time::Duration;
-use futures_util::future::Either;
-use http::uri::{Scheme, Uri};
-use pin_project_lite::pin_project;
-use tokio::net::{TcpSocket, TcpStream};
-use tokio::time::Sleep;
-use tracing::{debug, trace, warn};
-
-use super::dns::{self, resolve, GaiResolver, Resolve};
-use super::{Connected, Connection};
//#[cfg(feature = "runtime")] use super::dns::TokioThreadpoolGaiResolver;
/// A connector for the `http` scheme.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -28,36 +14,13 @@ use super::{Connected, Connection};
///
/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes
/// transport information such as the remote socket address used.
-#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
#[derive(Clone)]
-pub struct HttpConnector<R = GaiResolver> {
+pub struct HttpConnector {
config: Arc<Config>,
- resolver: R,
}
/// Extra information about the transport when an HttpConnector is used.
///
-/// # Example
-///
-/// ```
-/// # async fn doc() -> hyper::Result<()> {
-/// use hyper::Uri;
-/// use hyper::client::{Client, connect::HttpInfo};
-///
-/// let client = Client::new();
-/// let uri = Uri::from_static("http://example.com");
-///
-/// let res = client.get(uri).await?;
-/// res
-/// .extensions()
-/// .get::<HttpInfo>()
-/// .map(|info| {
-/// println!("remote addr = {}", info.remote_addr());
-/// });
-/// # Ok(())
-/// # }
-/// ```
-///
/// # Note
///
/// If a different connector is used besides [`HttpConnector`](HttpConnector),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -88,27 +51,6 @@ struct Config {
impl HttpConnector {
/// Construct a new HttpConnector.
pub fn new() -> HttpConnector {
- HttpConnector::new_with_resolver(GaiResolver::new())
- }
-}
-
-/*
-#[cfg(feature = "runtime")]
-impl HttpConnector<TokioThreadpoolGaiResolver> {
- /// Construct a new HttpConnector using the `TokioThreadpoolGaiResolver`.
- ///
- /// This resolver **requires** the threadpool runtime to be used.
- pub fn new_with_tokio_threadpool_resolver() -> Self {
- HttpConnector::new_with_resolver(TokioThreadpoolGaiResolver::new())
- }
-}
-*/
-
-impl<R> HttpConnector<R> {
- /// Construct a new HttpConnector.
- ///
- /// Takes a [`Resolver`](crate::client::connect::dns#resolvers-are-services) to handle DNS lookups.
- pub fn new_with_resolver(resolver: R) -> HttpConnector<R> {
HttpConnector {
config: Arc::new(Config {
connect_timeout: None,
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -122,10 +64,23 @@ impl<R> HttpConnector<R> {
send_buffer_size: None,
recv_buffer_size: None,
}),
- resolver,
}
}
+}
+
+/*
+#[cfg(feature = "runtime")]
+impl HttpConnector<TokioThreadpoolGaiResolver> {
+ /// Construct a new HttpConnector using the `TokioThreadpoolGaiResolver`.
+ ///
+ /// This resolver **requires** the threadpool runtime to be used.
+ pub fn new_with_tokio_threadpool_resolver() -> Self {
+ HttpConnector::new_with_resolver(TokioThreadpoolGaiResolver::new())
+ }
+}
+*/
+impl HttpConnector {
/// Option to enforce all `Uri`s have the `http` scheme.
///
/// Enabled by default.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -240,135 +195,13 @@ impl<R> HttpConnector<R> {
}
}
-static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http";
-static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing";
-static INVALID_MISSING_HOST: &str = "invalid URL, host is missing";
-
// R: Debug required for now to allow adding it to debug output later...
-impl<R: fmt::Debug> fmt::Debug for HttpConnector<R> {
+impl fmt::Debug for HttpConnector {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("HttpConnector").finish()
}
}
-impl<R> tower_service::Service<Uri> for HttpConnector<R>
-where
- R: Resolve + Clone + Send + Sync + 'static,
- R::Future: Send,
-{
- type Response = TcpStream;
- type Error = ConnectError;
- type Future = HttpConnecting<R>;
-
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?;
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, dst: Uri) -> Self::Future {
- let mut self_ = self.clone();
- HttpConnecting {
- fut: Box::pin(async move { self_.call_async(dst).await }),
- _marker: PhantomData,
- }
- }
-}
-
-fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> {
- trace!(
- "Http::connect; scheme={:?}, host={:?}, port={:?}",
- dst.scheme(),
- dst.host(),
- dst.port(),
- );
-
- if config.enforce_http {
- if dst.scheme() != Some(&Scheme::HTTP) {
- return Err(ConnectError {
- msg: INVALID_NOT_HTTP.into(),
- cause: None,
- });
- }
- } else if dst.scheme().is_none() {
- return Err(ConnectError {
- msg: INVALID_MISSING_SCHEME.into(),
- cause: None,
- });
- }
-
- let host = match dst.host() {
- Some(s) => s,
- None => {
- return Err(ConnectError {
- msg: INVALID_MISSING_HOST.into(),
- cause: None,
- })
- }
- };
- let port = match dst.port() {
- Some(port) => port.as_u16(),
- None => {
- if dst.scheme() == Some(&Scheme::HTTPS) {
- 443
- } else {
- 80
- }
- }
- };
-
- Ok((host, port))
-}
-
-impl<R> HttpConnector<R>
-where
- R: Resolve,
-{
- async fn call_async(&mut self, dst: Uri) -> Result<TcpStream, ConnectError> {
- let config = &self.config;
-
- let (host, port) = get_host_port(config, &dst)?;
- let host = host.trim_start_matches('[').trim_end_matches(']');
-
- // If the host is already an IP addr (v4 or v6),
- // skip resolving the dns and start connecting right away.
- let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) {
- addrs
- } else {
- let addrs = resolve(&mut self.resolver, dns::Name::new(host.into()))
- .await
- .map_err(ConnectError::dns)?;
- let addrs = addrs
- .map(|mut addr| {
- addr.set_port(port);
- addr
- })
- .collect();
- dns::SocketAddrs::new(addrs)
- };
-
- let c = ConnectingTcp::new(addrs, config);
-
- let sock = c.connect().await?;
-
- if let Err(e) = sock.set_nodelay(config.nodelay) {
- warn!("tcp set_nodelay error: {}", e);
- }
-
- Ok(sock)
- }
-}
-
-impl Connection for TcpStream {
- fn connected(&self) -> Connected {
- let connected = Connected::new();
- if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) {
- connected.extra(HttpInfo { remote_addr, local_addr })
- } else {
- connected
- }
- }
-}
-
impl HttpInfo {
/// Get the remote address of the transport used.
pub fn remote_addr(&self) -> SocketAddr {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -381,66 +214,12 @@ impl HttpInfo {
}
}
-pin_project! {
- // Not publicly exported (so missing_docs doesn't trigger).
- //
- // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
- // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
- // (and thus we can change the type in the future).
- #[must_use = "futures do nothing unless polled"]
- #[allow(missing_debug_implementations)]
- pub struct HttpConnecting<R> {
- #[pin]
- fut: BoxConnecting,
- _marker: PhantomData<R>,
- }
-}
-
-type ConnectResult = Result<TcpStream, ConnectError>;
-type BoxConnecting = Pin<Box<dyn Future<Output = ConnectResult> + Send>>;
-
-impl<R: Resolve> Future for HttpConnecting<R> {
- type Output = ConnectResult;
-
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- self.project().fut.poll(cx)
- }
-}
-
// Not publicly exported (so missing_docs doesn't trigger).
-pub struct ConnectError {
+pub(crate) struct ConnectError {
msg: Box<str>,
cause: Option<Box<dyn StdError + Send + Sync>>,
}
-impl ConnectError {
- fn new<S, E>(msg: S, cause: E) -> ConnectError
- where
- S: Into<Box<str>>,
- E: Into<Box<dyn StdError + Send + Sync>>,
- {
- ConnectError {
- msg: msg.into(),
- cause: Some(cause.into()),
- }
- }
-
- fn dns<E>(cause: E) -> ConnectError
- where
- E: Into<Box<dyn StdError + Send + Sync>>,
- {
- ConnectError::new("dns error", cause)
- }
-
- fn m<S, E>(msg: S) -> impl FnOnce(E) -> ConnectError
- where
- S: Into<Box<str>>,
- E: Into<Box<dyn StdError + Send + Sync>>,
- {
- move |cause| ConnectError::new(msg, cause)
- }
-}
-
impl fmt::Debug for ConnectError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(ref cause) = self.cause {
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -55,42 +55,19 @@
//! # }
//! ```
//!
-//! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a
-//! better starting place to extend from.
-//!
-//! Using either of the above connector examples, it can be used with the
-//! `Client` like this:
-//!
-//! ```
-//! # #[cfg(feature = "runtime")]
-//! # fn rt () {
-//! # let connector = hyper::client::HttpConnector::new();
-//! // let connector = ...
-//!
-//! let client = hyper::Client::builder()
-//! .build::<_, hyper::Body>(connector);
-//! # }
-//! ```
-//!
-//!
-//! [`HttpConnector`]: HttpConnector
-//! [`Service`]: crate::service::Service
//! [`Uri`]: ::http::Uri
//! [`AsyncRead`]: tokio::io::AsyncRead
//! [`AsyncWrite`]: tokio::io::AsyncWrite
//! [`Connection`]: Connection
+//! [`Service`]: crate::service::Service
use std::fmt;
use ::http::Extensions;
-cfg_feature! {
- #![feature = "tcp"]
-
- pub use self::http::{HttpConnector, HttpInfo};
+pub use self::http::{HttpConnector, HttpInfo};
- pub mod dns;
- mod http;
-}
+pub mod dns;
+mod http;
cfg_feature! {
#![any(feature = "http1", feature = "http2")]
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -44,13 +44,13 @@ impl Exec {
{
match *self {
Exec::Default => {
- #[cfg(feature = "tcp")]
+ #[cfg(feature = "runtime")]
{
tokio::task::spawn(fut);
}
- #[cfg(not(feature = "tcp"))]
+
+ #[cfg(not(feature = "runtime"))]
{
- // If no runtime, we need an executor!
panic!("executor must be set")
}
}
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -51,7 +51,6 @@
//! - `server`: Enables the HTTP `server`.
//! - `runtime`: Enables convenient integration with `tokio`, providing
//! connectors and acceptors for TCP, and a default executor.
-//! - `tcp`: Enables convenient implementations over TCP (using tokio).
//!
//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -324,7 +324,7 @@ impl<E> Http<E> {
self
}
- /// Set a timeout for reading client request headers. If a client does not
+ /// Set a timeout for reading client request headers. If a client does not
/// transmit the entire header within this time, the connection is closed.
///
/// Default is None.
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -1,8 +1,6 @@
use std::error::Error as StdError;
use std::fmt;
-#[cfg(feature = "tcp")]
-use std::net::{SocketAddr, TcpListener as StdTcpListener};
-#[cfg(any(feature = "tcp", feature = "http1"))]
+#[cfg(feature = "http1")]
use std::time::Duration;
use pin_project_lite::pin_project;
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -10,8 +8,6 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tracing::trace;
use super::accept::Accept;
-#[cfg(all(feature = "tcp"))]
-use super::tcp::AddrIncoming;
use crate::body::{Body, HttpBody};
use crate::common::exec::Exec;
use crate::common::exec::{ConnStreamExec, NewSvcExec};
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -60,48 +56,6 @@ impl<I> Server<I, ()> {
}
}
-#[cfg(feature = "tcp")]
-#[cfg_attr(
- docsrs,
- doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2"))))
-)]
-impl Server<AddrIncoming, ()> {
- /// Binds to the provided address, and returns a [`Builder`](Builder).
- ///
- /// # Panics
- ///
- /// This method will panic if binding to the address fails. For a method
- /// to bind to an address and return a `Result`, see `Server::try_bind`.
- pub fn bind(addr: &SocketAddr) -> Builder<AddrIncoming> {
- let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| {
- panic!("error binding to {}: {}", addr, e);
- });
- Server::builder(incoming)
- }
-
- /// Tries to bind to the provided address, and returns a [`Builder`](Builder).
- pub fn try_bind(addr: &SocketAddr) -> crate::Result<Builder<AddrIncoming>> {
- AddrIncoming::new(addr).map(Server::builder)
- }
-
- /// Create a new instance from a `std::net::TcpListener` instance.
- pub fn from_tcp(listener: StdTcpListener) -> Result<Builder<AddrIncoming>, crate::Error> {
- AddrIncoming::from_std(listener).map(Server::builder)
- }
-}
-
-#[cfg(feature = "tcp")]
-#[cfg_attr(
- docsrs,
- doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2"))))
-)]
-impl<S, E> Server<AddrIncoming, S, E> {
- /// Returns the local address that this server is bound to.
- pub fn local_addr(&self) -> SocketAddr {
- self.incoming.local_addr()
- }
-}
-
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, IO, IE, S, E, B> Server<I, S, E>
where
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -116,40 +70,6 @@ where
{
/// Prepares a server to handle graceful shutdown when the provided future
/// completes.
- ///
- /// # Example
- ///
- /// ```
- /// # fn main() {}
- /// # #[cfg(feature = "tcp")]
- /// # async fn run() {
- /// # use hyper::{Body, Response, Server, Error};
- /// # use hyper::service::{make_service_fn, service_fn};
- /// # let make_service = make_service_fn(|_| async {
- /// # Ok::<_, Error>(service_fn(|_req| async {
- /// # Ok::<_, Error>(Response::new(Body::from("Hello World")))
- /// # }))
- /// # });
- /// // Make a server from the previous examples...
- /// let server = Server::bind(&([127, 0, 0, 1], 3000).into())
- /// .serve(make_service);
- ///
- /// // Prepare some signal for when the server should start shutting down...
- /// let (tx, rx) = tokio::sync::oneshot::channel::<()>();
- /// let graceful = server
- /// .with_graceful_shutdown(async {
- /// rx.await.ok();
- /// });
- ///
- /// // Await the `server` receiving the signal...
- /// if let Err(e) = graceful.await {
- /// eprintln!("server error: {}", e);
- /// }
- ///
- /// // And later, trigger the signal by calling `tx.send(())`.
- /// let _ = tx.send(());
- /// # }
- /// ```
pub fn with_graceful_shutdown<F>(self, signal: F) -> Graceful<I, S, F, E>
where
F: Future<Output = ()>,
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -237,8 +157,6 @@ impl<I: fmt::Debug, S: fmt::Debug> fmt::Debug for Server<I, S> {
#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
impl<I, E> Builder<I, E> {
/// Start a new builder, wrapping an incoming stream and low-level options.
- ///
- /// For a more convenient constructor, see [`Server::bind`](Server::bind).
pub fn new(incoming: I, protocol: Http_<E>) -> Self {
Builder { incoming, protocol }
}
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -504,35 +422,6 @@ impl<I, E> Builder<I, E> {
}
/// Consume this `Builder`, creating a [`Server`](Server).
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "tcp")]
- /// # async fn run() {
- /// use hyper::{Body, Error, Response, Server};
- /// use hyper::service::{make_service_fn, service_fn};
- ///
- /// // Construct our SocketAddr to listen on...
- /// let addr = ([127, 0, 0, 1], 3000).into();
- ///
- /// // And a MakeService to handle each connection...
- /// let make_svc = make_service_fn(|_| async {
- /// Ok::<_, Error>(service_fn(|_req| async {
- /// Ok::<_, Error>(Response::new(Body::from("Hello World")))
- /// }))
- /// });
- ///
- /// // Then bind and serve...
- /// let server = Server::bind(&addr)
- /// .serve(make_svc);
- ///
- /// // Run forever-ish...
- /// if let Err(err) = server.await {
- /// eprintln!("server error: {}", err);
- /// }
- /// # }
- /// ```
pub fn serve<S, B>(self, make_service: S) -> Server<I, S, E>
where
I: Accept,
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -553,49 +442,6 @@ impl<I, E> Builder<I, E> {
}
}
-#[cfg(feature = "tcp")]
-#[cfg_attr(
- docsrs,
- doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2"))))
-)]
-impl<E> Builder<AddrIncoming, E> {
- /// Set whether TCP keepalive messages are enabled on accepted connections.
- ///
- /// If `None` is specified, keepalive is disabled, otherwise the duration
- /// specified will be the time to remain idle before sending TCP keepalive
- /// probes.
- pub fn tcp_keepalive(mut self, keepalive: Option<Duration>) -> Self {
- self.incoming.set_keepalive(keepalive);
- self
- }
-
- /// Set the value of `TCP_NODELAY` option for accepted connections.
- pub fn tcp_nodelay(mut self, enabled: bool) -> Self {
- self.incoming.set_nodelay(enabled);
- self
- }
-
- /// Set whether to sleep on accept errors.
- ///
- /// A possible scenario is that the process has hit the max open files
- /// allowed, and so trying to accept a new connection will fail with
- /// EMFILE. In some cases, it's preferable to just wait for some time, if
- /// the application will likely close some files (or connections), and try
- /// to accept the connection again. If this option is true, the error will
- /// be logged at the error level, since it is still a big deal, and then
- /// the listener will sleep for 1 second.
- ///
- /// In other cases, hitting the max open files should be treat similarly
- /// to being out-of-memory, and simply error (and shutdown). Setting this
- /// option to false will allow that.
- ///
- /// For more details see [`AddrIncoming::set_sleep_on_errors`]
- pub fn tcp_sleep_on_accept_errors(mut self, val: bool) -> Self {
- self.incoming.set_sleep_on_errors(val);
- self
- }
-}
-
// Used by `Server` to optionally watch a `Connection` future.
//
// The regular `hyper::Server` just uses a `NoopWatcher`, which does
diff --git a/src/server/tcp.rs /dev/null
--- a/src/server/tcp.rs
+++ /dev/null
@@ -1,192 +0,0 @@
-use std::fmt;
-use std::io;
-use std::net::{SocketAddr, TcpListener as StdTcpListener};
-use std::time::Duration;
-
-use tokio::net::{TcpListener, TcpStream};
-use tokio::time::Sleep;
-use tracing::{debug, error, trace};
-
-use crate::common::{task, Future, Pin, Poll};
-
-use super::accept::Accept;
-
-/// A stream of connections from binding to an address.
-#[must_use = "streams do nothing unless polled"]
-pub struct AddrIncoming {
- addr: SocketAddr,
- listener: TcpListener,
- sleep_on_errors: bool,
- tcp_keepalive_timeout: Option<Duration>,
- tcp_nodelay: bool,
- timeout: Option<Pin<Box<Sleep>>>,
-}
-
-impl AddrIncoming {
- pub(super) fn new(addr: &SocketAddr) -> crate::Result<Self> {
- let std_listener = StdTcpListener::bind(addr).map_err(crate::Error::new_listen)?;
-
- AddrIncoming::from_std(std_listener)
- }
-
- pub(super) fn from_std(std_listener: StdTcpListener) -> crate::Result<Self> {
- // TcpListener::from_std doesn't set O_NONBLOCK
- std_listener
- .set_nonblocking(true)
- .map_err(crate::Error::new_listen)?;
- let listener = TcpListener::from_std(std_listener).map_err(crate::Error::new_listen)?;
- AddrIncoming::from_listener(listener)
- }
-
- /// Creates a new `AddrIncoming` binding to provided socket address.
- pub fn bind(addr: &SocketAddr) -> crate::Result<Self> {
- AddrIncoming::new(addr)
- }
-
- /// Creates a new `AddrIncoming` from an existing `tokio::net::TcpListener`.
- pub fn from_listener(listener: TcpListener) -> crate::Result<Self> {
- let addr = listener.local_addr().map_err(crate::Error::new_listen)?;
- Ok(AddrIncoming {
- listener,
- addr,
- sleep_on_errors: true,
- tcp_keepalive_timeout: None,
- tcp_nodelay: false,
- timeout: None,
- })
- }
-
- /// Get the local address bound to this listener.
- pub fn local_addr(&self) -> SocketAddr {
- self.addr
- }
-
- /// Set whether TCP keepalive messages are enabled on accepted connections.
- ///
- /// If `None` is specified, keepalive is disabled, otherwise the duration
- /// specified will be the time to remain idle before sending TCP keepalive
- /// probes.
- pub fn set_keepalive(&mut self, keepalive: Option<Duration>) -> &mut Self {
- self.tcp_keepalive_timeout = keepalive;
- self
- }
-
- /// Set the value of `TCP_NODELAY` option for accepted connections.
- pub fn set_nodelay(&mut self, enabled: bool) -> &mut Self {
- self.tcp_nodelay = enabled;
- self
- }
-
- /// Set whether to sleep on accept errors.
- ///
- /// A possible scenario is that the process has hit the max open files
- /// allowed, and so trying to accept a new connection will fail with
- /// `EMFILE`. In some cases, it's preferable to just wait for some time, if
- /// the application will likely close some files (or connections), and try
- /// to accept the connection again. If this option is `true`, the error
- /// will be logged at the `error` level, since it is still a big deal,
- /// and then the listener will sleep for 1 second.
- ///
- /// In other cases, hitting the max open files should be treat similarly
- /// to being out-of-memory, and simply error (and shutdown). Setting
- /// this option to `false` will allow that.
- ///
- /// Default is `true`.
- pub fn set_sleep_on_errors(&mut self, val: bool) {
- self.sleep_on_errors = val;
- }
-
- fn poll_next_(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<TcpStream>> {
- // Check if a previous timeout is active that was set by IO errors.
- if let Some(ref mut to) = self.timeout {
- ready!(Pin::new(to).poll(cx));
- }
- self.timeout = None;
-
- loop {
- match ready!(self.listener.poll_accept(cx)) {
- Ok((socket, _)) => {
- if let Some(dur) = self.tcp_keepalive_timeout {
- let socket = socket2::SockRef::from(&socket);
- let conf = socket2::TcpKeepalive::new().with_time(dur);
- if let Err(e) = socket.set_tcp_keepalive(&conf) {
- trace!("error trying to set TCP keepalive: {}", e);
- }
- }
- if let Err(e) = socket.set_nodelay(self.tcp_nodelay) {
- trace!("error trying to set TCP nodelay: {}", e);
- }
- return Poll::Ready(Ok(socket));
- }
- Err(e) => {
- // Connection errors can be ignored directly, continue by
- // accepting the next request.
- if is_connection_error(&e) {
- debug!("accepted connection already errored: {}", e);
- continue;
- }
-
- if self.sleep_on_errors {
- error!("accept error: {}", e);
-
- // Sleep 1s.
- let mut timeout = Box::pin(tokio::time::sleep(Duration::from_secs(1)));
-
- match timeout.as_mut().poll(cx) {
- Poll::Ready(()) => {
- // Wow, it's been a second already? Ok then...
- continue;
- }
- Poll::Pending => {
- self.timeout = Some(timeout);
- return Poll::Pending;
- }
- }
- } else {
- return Poll::Ready(Err(e));
- }
- }
- }
- }
- }
-}
-
-impl Accept for AddrIncoming {
- type Conn = TcpStream;
- type Error = io::Error;
-
- fn poll_accept(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
- let result = ready!(self.poll_next_(cx));
- Poll::Ready(Some(result))
- }
-}
-
-/// This function defines errors that are per-connection. Which basically
-/// means that if we get this error from `accept()` system call it means
-/// next connection might be ready to be accepted.
-///
-/// All other errors will incur a timeout before next `accept()` is performed.
-/// The timeout is useful to handle resource exhaustion errors like ENFILE
-/// and EMFILE. Otherwise, could enter into tight loop.
-fn is_connection_error(e: &io::Error) -> bool {
- matches!(
- e.kind(),
- io::ErrorKind::ConnectionRefused
- | io::ErrorKind::ConnectionAborted
- | io::ErrorKind::ConnectionReset
- )
-}
-
-impl fmt::Debug for AddrIncoming {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("AddrIncoming")
- .field("addr", &self.addr)
- .field("sleep_on_errors", &self.sleep_on_errors)
- .field("tcp_keepalive_timeout", &self.tcp_keepalive_timeout)
- .field("tcp_nodelay", &self.tcp_nodelay)
- .finish()
- }
-}
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -100,41 +100,6 @@ where
}
/// Create a `MakeService` from a function.
-///
-/// # Example
-///
-/// ```
-/// # #[cfg(feature = "runtime")]
-/// # async fn run() {
-/// use std::convert::Infallible;
-/// use hyper::{Body, Request, Response, Server};
-/// use tokio::net::TcpStream;
-/// use hyper::service::{make_service_fn, service_fn};
-///
-/// let addr = ([127, 0, 0, 1], 3000).into();
-///
-/// let make_svc = make_service_fn(|socket: &TcpStream| {
-/// let remote_addr = socket.peer_addr().unwrap();
-/// async move {
-/// Ok::<_, Infallible>(service_fn(move |_: Request<Body>| async move {
-/// Ok::<_, Infallible>(
-/// Response::new(Body::from(format!("Hello, {}!", remote_addr)))
-/// )
-/// }))
-/// }
-/// });
-///
-/// // Then bind and serve...
-/// let server = Server::bind(&addr)
-/// .serve(make_svc);
-///
-/// // Finally, spawn `server` onto an Executor...
-/// if let Err(e) = server.await {
-/// eprintln!("server error: {}", e);
-/// }
-/// # }
-/// # fn main() {}
-/// ```
pub fn make_service_fn<F, Target, Ret>(f: F) -> MakeServiceFn<F>
where
F: FnMut(&Target) -> Ret,
|
2022-07-28T20:21:55Z
| 2,929
|
Remove the `tcp` cargo feature
Remove the `tcp` cargo feature, and related `tokio::net::TcpStream` integration (`TcpStream::connect` and `TcpListener`) (since we are already removing `client::connect` and `server::accept`).
|
hyperium__hyper-2929
|
diff --git a/benches/connect.rs b/benches/connect.rs
--- a/benches/connect.rs
+++ b/benches/connect.rs
@@ -3,35 +3,38 @@
extern crate test;
-use http::Uri;
-use hyper::client::connect::HttpConnector;
-use hyper::service::Service;
-use std::net::SocketAddr;
-use tokio::net::TcpListener;
+// TODO: Reimplement http_connector bench using hyper::client::conn
+// (instead of removed HttpConnector).
-#[bench]
-fn http_connector(b: &mut test::Bencher) {
- let _ = pretty_env_logger::try_init();
- let rt = tokio::runtime::Builder::new_current_thread()
- .enable_all()
- .build()
- .expect("rt build");
- let listener = rt
- .block_on(TcpListener::bind(&SocketAddr::from(([127, 0, 0, 1], 0))))
- .expect("bind");
- let addr = listener.local_addr().expect("local_addr");
- let dst: Uri = format!("http://{}/", addr).parse().expect("uri parse");
- let mut connector = HttpConnector::new();
+// use http::Uri;
+// use hyper::client::connect::HttpConnector;
+// use hyper::service::Service;
+// use std::net::SocketAddr;
+// use tokio::net::TcpListener;
- rt.spawn(async move {
- loop {
- let _ = listener.accept().await;
- }
- });
+// #[bench]
+// fn http_connector(b: &mut test::Bencher) {
+// let _ = pretty_env_logger::try_init();
+// let rt = tokio::runtime::Builder::new_current_thread()
+// .enable_all()
+// .build()
+// .expect("rt build");
+// let listener = rt
+// .block_on(TcpListener::bind(&SocketAddr::from(([127, 0, 0, 1], 0))))
+// .expect("bind");
+// let addr = listener.local_addr().expect("local_addr");
+// let dst: Uri = format!("http://{}/", addr).parse().expect("uri parse");
+// let mut connector = HttpConnector::new();
- b.iter(|| {
- rt.block_on(async {
- connector.call(dst.clone()).await.expect("connect");
- });
- });
-}
+// rt.spawn(async move {
+// loop {
+// let _ = listener.accept().await;
+// }
+// });
+
+// b.iter(|| {
+// rt.block_on(async {
+// connector.call(dst.clone()).await.expect("connect");
+// });
+// });
+// }
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -3,380 +3,383 @@
extern crate test;
-use std::net::SocketAddr;
-
-use futures_util::future::join_all;
-
-use hyper::client::HttpConnector;
-use hyper::{body::HttpBody as _, Body, Method, Request, Response, Server};
-
-// HTTP1
-
-#[bench]
-fn http1_consecutive_x1_empty(b: &mut test::Bencher) {
- opts().bench(b)
-}
-
-#[bench]
-fn http1_consecutive_x1_req_10b(b: &mut test::Bencher) {
- opts()
- .method(Method::POST)
- .request_body(&[b's'; 10])
- .bench(b)
-}
-
-#[bench]
-fn http1_consecutive_x1_both_100kb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 100];
- opts()
- .method(Method::POST)
- .request_body(body)
- .response_body(body)
- .bench(b)
-}
-
-#[bench]
-fn http1_consecutive_x1_both_10mb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 1024 * 10];
- opts()
- .method(Method::POST)
- .request_body(body)
- .response_body(body)
- .bench(b)
-}
-
-#[bench]
-fn http1_parallel_x10_empty(b: &mut test::Bencher) {
- opts().parallel(10).bench(b)
-}
-
-#[bench]
-fn http1_parallel_x10_req_10mb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 1024 * 10];
- opts()
- .parallel(10)
- .method(Method::POST)
- .request_body(body)
- .bench(b)
-}
-
-#[bench]
-fn http1_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 10];
- opts()
- .parallel(10)
- .method(Method::POST)
- .request_chunks(body, 100)
- .bench(b)
-}
-
-#[bench]
-fn http1_parallel_x10_res_1mb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 1024 * 1];
- opts().parallel(10).response_body(body).bench(b)
-}
-
-#[bench]
-fn http1_parallel_x10_res_10mb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 1024 * 10];
- opts().parallel(10).response_body(body).bench(b)
-}
-
-// HTTP2
-
-const HTTP2_MAX_WINDOW: u32 = std::u32::MAX >> 1;
-
-#[bench]
-fn http2_consecutive_x1_empty(b: &mut test::Bencher) {
- opts().http2().bench(b)
-}
-
-#[bench]
-fn http2_consecutive_x1_req_10b(b: &mut test::Bencher) {
- opts()
- .http2()
- .method(Method::POST)
- .request_body(&[b's'; 10])
- .bench(b)
-}
-
-#[bench]
-fn http2_consecutive_x1_req_100kb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 100];
- opts()
- .http2()
- .method(Method::POST)
- .request_body(body)
- .bench(b)
-}
-
-#[bench]
-fn http2_parallel_x10_empty(b: &mut test::Bencher) {
- opts().http2().parallel(10).bench(b)
-}
-
-#[bench]
-fn http2_parallel_x10_req_10mb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 1024 * 10];
- opts()
- .http2()
- .parallel(10)
- .method(Method::POST)
- .request_body(body)
- .http2_stream_window(HTTP2_MAX_WINDOW)
- .http2_conn_window(HTTP2_MAX_WINDOW)
- .bench(b)
-}
-
-#[bench]
-fn http2_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 10];
- opts()
- .http2()
- .parallel(10)
- .method(Method::POST)
- .request_chunks(body, 100)
- .bench(b)
-}
-
-#[bench]
-fn http2_parallel_x10_req_10kb_100_chunks_adaptive_window(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 10];
- opts()
- .http2()
- .parallel(10)
- .method(Method::POST)
- .request_chunks(body, 100)
- .http2_adaptive_window()
- .bench(b)
-}
-
-#[bench]
-fn http2_parallel_x10_req_10kb_100_chunks_max_window(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 10];
- opts()
- .http2()
- .parallel(10)
- .method(Method::POST)
- .request_chunks(body, 100)
- .http2_stream_window(HTTP2_MAX_WINDOW)
- .http2_conn_window(HTTP2_MAX_WINDOW)
- .bench(b)
-}
-
-#[bench]
-fn http2_parallel_x10_res_1mb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 1024 * 1];
- opts()
- .http2()
- .parallel(10)
- .response_body(body)
- .http2_stream_window(HTTP2_MAX_WINDOW)
- .http2_conn_window(HTTP2_MAX_WINDOW)
- .bench(b)
-}
-
-#[bench]
-fn http2_parallel_x10_res_10mb(b: &mut test::Bencher) {
- let body = &[b'x'; 1024 * 1024 * 10];
- opts()
- .http2()
- .parallel(10)
- .response_body(body)
- .http2_stream_window(HTTP2_MAX_WINDOW)
- .http2_conn_window(HTTP2_MAX_WINDOW)
- .bench(b)
-}
-
-// ==== Benchmark Options =====
-
-struct Opts {
- http2: bool,
- http2_stream_window: Option<u32>,
- http2_conn_window: Option<u32>,
- http2_adaptive_window: bool,
- parallel_cnt: u32,
- request_method: Method,
- request_body: Option<&'static [u8]>,
- request_chunks: usize,
- response_body: &'static [u8],
-}
-
-fn opts() -> Opts {
- Opts {
- http2: false,
- http2_stream_window: None,
- http2_conn_window: None,
- http2_adaptive_window: false,
- parallel_cnt: 1,
- request_method: Method::GET,
- request_body: None,
- request_chunks: 0,
- response_body: b"",
- }
-}
-
-impl Opts {
- fn http2(mut self) -> Self {
- self.http2 = true;
- self
- }
-
- fn http2_stream_window(mut self, sz: impl Into<Option<u32>>) -> Self {
- assert!(!self.http2_adaptive_window);
- self.http2_stream_window = sz.into();
- self
- }
-
- fn http2_conn_window(mut self, sz: impl Into<Option<u32>>) -> Self {
- assert!(!self.http2_adaptive_window);
- self.http2_conn_window = sz.into();
- self
- }
-
- fn http2_adaptive_window(mut self) -> Self {
- assert!(self.http2_stream_window.is_none());
- assert!(self.http2_conn_window.is_none());
- self.http2_adaptive_window = true;
- self
- }
-
- fn method(mut self, m: Method) -> Self {
- self.request_method = m;
- self
- }
-
- fn request_body(mut self, body: &'static [u8]) -> Self {
- self.request_body = Some(body);
- self
- }
-
- fn request_chunks(mut self, chunk: &'static [u8], cnt: usize) -> Self {
- assert!(cnt > 0);
- self.request_body = Some(chunk);
- self.request_chunks = cnt;
- self
- }
-
- fn response_body(mut self, body: &'static [u8]) -> Self {
- self.response_body = body;
- self
- }
-
- fn parallel(mut self, cnt: u32) -> Self {
- assert!(cnt > 0, "parallel count must be larger than 0");
- self.parallel_cnt = cnt;
- self
- }
-
- fn bench(self, b: &mut test::Bencher) {
- use std::sync::Arc;
- let _ = pretty_env_logger::try_init();
- // Create a runtime of current thread.
- let rt = Arc::new(
- tokio::runtime::Builder::new_current_thread()
- .enable_all()
- .build()
- .expect("rt build"),
- );
- let exec = rt.clone();
-
- let req_len = self.request_body.map(|b| b.len()).unwrap_or(0) as u64;
- let req_len = if self.request_chunks > 0 {
- req_len * self.request_chunks as u64
- } else {
- req_len
- };
- let bytes_per_iter = (req_len + self.response_body.len() as u64) * self.parallel_cnt as u64;
- b.bytes = bytes_per_iter;
-
- let addr = spawn_server(&rt, &self);
-
- let connector = HttpConnector::new();
- let client = hyper::Client::builder()
- .http2_only(self.http2)
- .http2_initial_stream_window_size(self.http2_stream_window)
- .http2_initial_connection_window_size(self.http2_conn_window)
- .http2_adaptive_window(self.http2_adaptive_window)
- .build::<_, Body>(connector);
-
- let url: hyper::Uri = format!("http://{}/hello", addr).parse().unwrap();
-
- let make_request = || {
- let chunk_cnt = self.request_chunks;
- let body = if chunk_cnt > 0 {
- let (mut tx, body) = Body::channel();
- let chunk = self
- .request_body
- .expect("request_chunks means request_body");
- exec.spawn(async move {
- for _ in 0..chunk_cnt {
- tx.send_data(chunk.into()).await.expect("send_data");
- }
- });
- body
- } else {
- self.request_body
- .map(Body::from)
- .unwrap_or_else(Body::empty)
- };
- let mut req = Request::new(body);
- *req.method_mut() = self.request_method.clone();
- *req.uri_mut() = url.clone();
- req
- };
-
- let send_request = |req: Request<Body>| {
- let fut = client.request(req);
- async {
- let res = fut.await.expect("client wait");
- let mut body = res.into_body();
- while let Some(_chunk) = body.data().await {}
- }
- };
-
- if self.parallel_cnt == 1 {
- b.iter(|| {
- let req = make_request();
- rt.block_on(send_request(req));
- });
- } else {
- b.iter(|| {
- let futs = (0..self.parallel_cnt).map(|_| {
- let req = make_request();
- send_request(req)
- });
- // Await all spawned futures becoming completed.
- rt.block_on(join_all(futs));
- });
- }
- }
-}
-
-fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
- use hyper::service::{make_service_fn, service_fn};
- let addr = "127.0.0.1:0".parse().unwrap();
-
- let body = opts.response_body;
- let srv = rt.block_on(async move {
- Server::bind(&addr)
- .http2_only(opts.http2)
- .http2_initial_stream_window_size(opts.http2_stream_window)
- .http2_initial_connection_window_size(opts.http2_conn_window)
- .http2_adaptive_window(opts.http2_adaptive_window)
- .serve(make_service_fn(move |_| async move {
- Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| async move {
- let mut req_body = req.into_body();
- while let Some(_chunk) = req_body.data().await {}
- Ok::<_, hyper::Error>(Response::new(Body::from(body)))
- }))
- }))
- });
- let addr = srv.local_addr();
- rt.spawn(async {
- if let Err(err) = srv.await {
- panic!("server error: {}", err);
- }
- });
- addr
-}
+// TODO: Reimplement Opts::bench using hyper::server::conn and hyper::client::conn
+// (instead of Server and HttpClient).
+
+// use std::net::SocketAddr;
+
+// use futures_util::future::join_all;
+
+// use hyper::client::HttpConnector;
+// use hyper::{body::HttpBody as _, Body, Method, Request, Response, Server};
+
+// // HTTP1
+
+// #[bench]
+// fn http1_consecutive_x1_empty(b: &mut test::Bencher) {
+// opts().bench(b)
+// }
+
+// #[bench]
+// fn http1_consecutive_x1_req_10b(b: &mut test::Bencher) {
+// opts()
+// .method(Method::POST)
+// .request_body(&[b's'; 10])
+// .bench(b)
+// }
+
+// #[bench]
+// fn http1_consecutive_x1_both_100kb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 100];
+// opts()
+// .method(Method::POST)
+// .request_body(body)
+// .response_body(body)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http1_consecutive_x1_both_10mb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 1024 * 10];
+// opts()
+// .method(Method::POST)
+// .request_body(body)
+// .response_body(body)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http1_parallel_x10_empty(b: &mut test::Bencher) {
+// opts().parallel(10).bench(b)
+// }
+
+// #[bench]
+// fn http1_parallel_x10_req_10mb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 1024 * 10];
+// opts()
+// .parallel(10)
+// .method(Method::POST)
+// .request_body(body)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http1_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 10];
+// opts()
+// .parallel(10)
+// .method(Method::POST)
+// .request_chunks(body, 100)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http1_parallel_x10_res_1mb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 1024 * 1];
+// opts().parallel(10).response_body(body).bench(b)
+// }
+
+// #[bench]
+// fn http1_parallel_x10_res_10mb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 1024 * 10];
+// opts().parallel(10).response_body(body).bench(b)
+// }
+
+// // HTTP2
+
+// const HTTP2_MAX_WINDOW: u32 = std::u32::MAX >> 1;
+
+// #[bench]
+// fn http2_consecutive_x1_empty(b: &mut test::Bencher) {
+// opts().http2().bench(b)
+// }
+
+// #[bench]
+// fn http2_consecutive_x1_req_10b(b: &mut test::Bencher) {
+// opts()
+// .http2()
+// .method(Method::POST)
+// .request_body(&[b's'; 10])
+// .bench(b)
+// }
+
+// #[bench]
+// fn http2_consecutive_x1_req_100kb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 100];
+// opts()
+// .http2()
+// .method(Method::POST)
+// .request_body(body)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http2_parallel_x10_empty(b: &mut test::Bencher) {
+// opts().http2().parallel(10).bench(b)
+// }
+
+// #[bench]
+// fn http2_parallel_x10_req_10mb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 1024 * 10];
+// opts()
+// .http2()
+// .parallel(10)
+// .method(Method::POST)
+// .request_body(body)
+// .http2_stream_window(HTTP2_MAX_WINDOW)
+// .http2_conn_window(HTTP2_MAX_WINDOW)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http2_parallel_x10_req_10kb_100_chunks(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 10];
+// opts()
+// .http2()
+// .parallel(10)
+// .method(Method::POST)
+// .request_chunks(body, 100)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http2_parallel_x10_req_10kb_100_chunks_adaptive_window(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 10];
+// opts()
+// .http2()
+// .parallel(10)
+// .method(Method::POST)
+// .request_chunks(body, 100)
+// .http2_adaptive_window()
+// .bench(b)
+// }
+
+// #[bench]
+// fn http2_parallel_x10_req_10kb_100_chunks_max_window(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 10];
+// opts()
+// .http2()
+// .parallel(10)
+// .method(Method::POST)
+// .request_chunks(body, 100)
+// .http2_stream_window(HTTP2_MAX_WINDOW)
+// .http2_conn_window(HTTP2_MAX_WINDOW)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http2_parallel_x10_res_1mb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 1024 * 1];
+// opts()
+// .http2()
+// .parallel(10)
+// .response_body(body)
+// .http2_stream_window(HTTP2_MAX_WINDOW)
+// .http2_conn_window(HTTP2_MAX_WINDOW)
+// .bench(b)
+// }
+
+// #[bench]
+// fn http2_parallel_x10_res_10mb(b: &mut test::Bencher) {
+// let body = &[b'x'; 1024 * 1024 * 10];
+// opts()
+// .http2()
+// .parallel(10)
+// .response_body(body)
+// .http2_stream_window(HTTP2_MAX_WINDOW)
+// .http2_conn_window(HTTP2_MAX_WINDOW)
+// .bench(b)
+// }
+
+// // ==== Benchmark Options =====
+
+// struct Opts {
+// http2: bool,
+// http2_stream_window: Option<u32>,
+// http2_conn_window: Option<u32>,
+// http2_adaptive_window: bool,
+// parallel_cnt: u32,
+// request_method: Method,
+// request_body: Option<&'static [u8]>,
+// request_chunks: usize,
+// response_body: &'static [u8],
+// }
+
+// fn opts() -> Opts {
+// Opts {
+// http2: false,
+// http2_stream_window: None,
+// http2_conn_window: None,
+// http2_adaptive_window: false,
+// parallel_cnt: 1,
+// request_method: Method::GET,
+// request_body: None,
+// request_chunks: 0,
+// response_body: b"",
+// }
+// }
+
+// impl Opts {
+// fn http2(mut self) -> Self {
+// self.http2 = true;
+// self
+// }
+
+// fn http2_stream_window(mut self, sz: impl Into<Option<u32>>) -> Self {
+// assert!(!self.http2_adaptive_window);
+// self.http2_stream_window = sz.into();
+// self
+// }
+
+// fn http2_conn_window(mut self, sz: impl Into<Option<u32>>) -> Self {
+// assert!(!self.http2_adaptive_window);
+// self.http2_conn_window = sz.into();
+// self
+// }
+
+// fn http2_adaptive_window(mut self) -> Self {
+// assert!(self.http2_stream_window.is_none());
+// assert!(self.http2_conn_window.is_none());
+// self.http2_adaptive_window = true;
+// self
+// }
+
+// fn method(mut self, m: Method) -> Self {
+// self.request_method = m;
+// self
+// }
+
+// fn request_body(mut self, body: &'static [u8]) -> Self {
+// self.request_body = Some(body);
+// self
+// }
+
+// fn request_chunks(mut self, chunk: &'static [u8], cnt: usize) -> Self {
+// assert!(cnt > 0);
+// self.request_body = Some(chunk);
+// self.request_chunks = cnt;
+// self
+// }
+
+// fn response_body(mut self, body: &'static [u8]) -> Self {
+// self.response_body = body;
+// self
+// }
+
+// fn parallel(mut self, cnt: u32) -> Self {
+// assert!(cnt > 0, "parallel count must be larger than 0");
+// self.parallel_cnt = cnt;
+// self
+// }
+
+// fn bench(self, b: &mut test::Bencher) {
+// use std::sync::Arc;
+// let _ = pretty_env_logger::try_init();
+// // Create a runtime of current thread.
+// let rt = Arc::new(
+// tokio::runtime::Builder::new_current_thread()
+// .enable_all()
+// .build()
+// .expect("rt build"),
+// );
+// let exec = rt.clone();
+
+// let req_len = self.request_body.map(|b| b.len()).unwrap_or(0) as u64;
+// let req_len = if self.request_chunks > 0 {
+// req_len * self.request_chunks as u64
+// } else {
+// req_len
+// };
+// let bytes_per_iter = (req_len + self.response_body.len() as u64) * self.parallel_cnt as u64;
+// b.bytes = bytes_per_iter;
+
+// let addr = spawn_server(&rt, &self);
+
+// let connector = HttpConnector::new();
+// let client = hyper::Client::builder()
+// .http2_only(self.http2)
+// .http2_initial_stream_window_size(self.http2_stream_window)
+// .http2_initial_connection_window_size(self.http2_conn_window)
+// .http2_adaptive_window(self.http2_adaptive_window)
+// .build::<_, Body>(connector);
+
+// let url: hyper::Uri = format!("http://{}/hello", addr).parse().unwrap();
+
+// let make_request = || {
+// let chunk_cnt = self.request_chunks;
+// let body = if chunk_cnt > 0 {
+// let (mut tx, body) = Body::channel();
+// let chunk = self
+// .request_body
+// .expect("request_chunks means request_body");
+// exec.spawn(async move {
+// for _ in 0..chunk_cnt {
+// tx.send_data(chunk.into()).await.expect("send_data");
+// }
+// });
+// body
+// } else {
+// self.request_body
+// .map(Body::from)
+// .unwrap_or_else(Body::empty)
+// };
+// let mut req = Request::new(body);
+// *req.method_mut() = self.request_method.clone();
+// *req.uri_mut() = url.clone();
+// req
+// };
+
+// let send_request = |req: Request<Body>| {
+// let fut = client.request(req);
+// async {
+// let res = fut.await.expect("client wait");
+// let mut body = res.into_body();
+// while let Some(_chunk) = body.data().await {}
+// }
+// };
+
+// if self.parallel_cnt == 1 {
+// b.iter(|| {
+// let req = make_request();
+// rt.block_on(send_request(req));
+// });
+// } else {
+// b.iter(|| {
+// let futs = (0..self.parallel_cnt).map(|_| {
+// let req = make_request();
+// send_request(req)
+// });
+// // Await all spawned futures becoming completed.
+// rt.block_on(join_all(futs));
+// });
+// }
+// }
+// }
+
+// fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
+// use hyper::service::{make_service_fn, service_fn};
+// let addr = "127.0.0.1:0".parse().unwrap();
+
+// let body = opts.response_body;
+// let srv = rt.block_on(async move {
+// Server::bind(&addr)
+// .http2_only(opts.http2)
+// .http2_initial_stream_window_size(opts.http2_stream_window)
+// .http2_initial_connection_window_size(opts.http2_conn_window)
+// .http2_adaptive_window(opts.http2_adaptive_window)
+// .serve(make_service_fn(move |_| async move {
+// Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| async move {
+// let mut req_body = req.into_body();
+// while let Some(_chunk) = req_body.data().await {}
+// Ok::<_, hyper::Error>(Response::new(Body::from(body)))
+// }))
+// }))
+// });
+// let addr = srv.local_addr();
+// rt.spawn(async {
+// if let Err(err) = srv.await {
+// panic!("server error: {}", err);
+// }
+// });
+// addr
+// }
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -3,84 +3,87 @@
extern crate test;
-use std::io::{Read, Write};
-use std::net::TcpStream;
-use std::sync::mpsc;
-use std::time::Duration;
-
-use tokio::sync::oneshot;
-
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Response, Server};
-
-const PIPELINED_REQUESTS: usize = 16;
-
-#[bench]
-fn hello_world_16(b: &mut test::Bencher) {
- let _ = pretty_env_logger::try_init();
- let (_until_tx, until_rx) = oneshot::channel::<()>();
-
- let addr = {
- let (addr_tx, addr_rx) = mpsc::channel();
- std::thread::spawn(move || {
- let addr = "127.0.0.1:0".parse().unwrap();
-
- let make_svc = make_service_fn(|_| async {
- Ok::<_, hyper::Error>(service_fn(|_| async {
- Ok::<_, hyper::Error>(Response::new(Body::from("Hello, World!")))
- }))
- });
-
- let rt = tokio::runtime::Builder::new_current_thread()
- .enable_all()
- .build()
- .expect("rt build");
- let srv = rt.block_on(async move {
- Server::bind(&addr)
- .http1_pipeline_flush(true)
- .serve(make_svc)
- });
-
- addr_tx.send(srv.local_addr()).unwrap();
-
- let graceful = srv.with_graceful_shutdown(async {
- until_rx.await.ok();
- });
-
- rt.block_on(async {
- if let Err(e) = graceful.await {
- panic!("server error: {}", e);
- }
- });
- });
-
- addr_rx.recv().unwrap()
- };
-
- let mut pipelined_reqs = Vec::new();
- for _ in 0..PIPELINED_REQUESTS {
- pipelined_reqs.extend_from_slice(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n");
- }
-
- let total_bytes = {
- let mut tcp = TcpStream::connect(addr).unwrap();
- tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
- .unwrap();
- let mut buf = Vec::new();
- tcp.read_to_end(&mut buf).unwrap()
- } * PIPELINED_REQUESTS;
-
- let mut tcp = TcpStream::connect(addr).unwrap();
- tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
- let mut buf = [0u8; 8192];
-
- b.bytes = (pipelined_reqs.len() + total_bytes) as u64;
- b.iter(|| {
- tcp.write_all(&pipelined_reqs).unwrap();
- let mut sum = 0;
- while sum < total_bytes {
- sum += tcp.read(&mut buf).unwrap();
- }
- assert_eq!(sum, total_bytes);
- });
-}
+// TODO: Reimplement hello_world_16 bench using hyper::server::conn
+// (instead of Server).
+
+// use std::io::{Read, Write};
+// use std::net::TcpStream;
+// use std::sync::mpsc;
+// use std::time::Duration;
+
+// use tokio::sync::oneshot;
+
+// use hyper::service::{make_service_fn, service_fn};
+// use hyper::{Body, Response, Server};
+
+// const PIPELINED_REQUESTS: usize = 16;
+
+// #[bench]
+// fn hello_world_16(b: &mut test::Bencher) {
+// let _ = pretty_env_logger::try_init();
+// let (_until_tx, until_rx) = oneshot::channel::<()>();
+
+// let addr = {
+// let (addr_tx, addr_rx) = mpsc::channel();
+// std::thread::spawn(move || {
+// let addr = "127.0.0.1:0".parse().unwrap();
+
+// let make_svc = make_service_fn(|_| async {
+// Ok::<_, hyper::Error>(service_fn(|_| async {
+// Ok::<_, hyper::Error>(Response::new(Body::from("Hello, World!")))
+// }))
+// });
+
+// let rt = tokio::runtime::Builder::new_current_thread()
+// .enable_all()
+// .build()
+// .expect("rt build");
+// let srv = rt.block_on(async move {
+// Server::bind(&addr)
+// .http1_pipeline_flush(true)
+// .serve(make_svc)
+// });
+
+// addr_tx.send(srv.local_addr()).unwrap();
+
+// let graceful = srv.with_graceful_shutdown(async {
+// until_rx.await.ok();
+// });
+
+// rt.block_on(async {
+// if let Err(e) = graceful.await {
+// panic!("server error: {}", e);
+// }
+// });
+// });
+
+// addr_rx.recv().unwrap()
+// };
+
+// let mut pipelined_reqs = Vec::new();
+// for _ in 0..PIPELINED_REQUESTS {
+// pipelined_reqs.extend_from_slice(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n");
+// }
+
+// let total_bytes = {
+// let mut tcp = TcpStream::connect(addr).unwrap();
+// tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
+// .unwrap();
+// let mut buf = Vec::new();
+// tcp.read_to_end(&mut buf).unwrap()
+// } * PIPELINED_REQUESTS;
+
+// let mut tcp = TcpStream::connect(addr).unwrap();
+// tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
+// let mut buf = [0u8; 8192];
+
+// b.bytes = (pipelined_reqs.len() + total_bytes) as u64;
+// b.iter(|| {
+// tcp.write_all(&pipelined_reqs).unwrap();
+// let mut sum = 0;
+// while sum < total_bytes {
+// sum += tcp.read(&mut buf).unwrap();
+// }
+// assert_eq!(sum, total_bytes);
+// });
+// }
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -3,130 +3,133 @@
extern crate test;
+// TODO: Reimplement bench_server using hyper::server::conn (instead
+// of removed Server).
+
use std::io::{Read, Write};
use std::net::{TcpListener, TcpStream};
use std::sync::mpsc;
-use std::time::Duration;
-
-use futures_util::{stream, StreamExt};
-use http_body_util::StreamBody;
-use tokio::sync::oneshot;
-
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Response, Server};
-
-macro_rules! bench_server {
- ($b:ident, $header:expr, $body:expr) => {{
- let _ = pretty_env_logger::try_init();
- let (_until_tx, until_rx) = oneshot::channel::<()>();
- let addr = {
- let (addr_tx, addr_rx) = mpsc::channel();
- std::thread::spawn(move || {
- let addr = "127.0.0.1:0".parse().unwrap();
- let make_svc = make_service_fn(|_| async {
- Ok::<_, hyper::Error>(service_fn(|_| async {
- Ok::<_, hyper::Error>(
- Response::builder()
- .header($header.0, $header.1)
- .header("content-type", "text/plain")
- .body($body())
- .unwrap(),
- )
- }))
- });
-
- let rt = tokio::runtime::Builder::new_current_thread()
- .enable_all()
- .build()
- .expect("rt build");
-
- let srv = rt.block_on(async move { Server::bind(&addr).serve(make_svc) });
-
- addr_tx.send(srv.local_addr()).unwrap();
-
- let graceful = srv.with_graceful_shutdown(async {
- until_rx.await.ok();
- });
- rt.block_on(async move {
- if let Err(e) = graceful.await {
- panic!("server error: {}", e);
- }
- });
- });
-
- addr_rx.recv().unwrap()
- };
-
- let total_bytes = {
- let mut tcp = TcpStream::connect(addr).unwrap();
- tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
- .unwrap();
- let mut buf = Vec::new();
- tcp.read_to_end(&mut buf).unwrap()
- };
-
- let mut tcp = TcpStream::connect(addr).unwrap();
- tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
- let mut buf = [0u8; 8192];
-
- $b.bytes = 35 + total_bytes as u64;
- $b.iter(|| {
- tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n")
- .unwrap();
- let mut sum = 0;
- while sum < total_bytes {
- sum += tcp.read(&mut buf).unwrap();
- }
- assert_eq!(sum, total_bytes);
- });
- }};
-}
-
-fn body(b: &'static [u8]) -> hyper::Body {
- b.into()
-}
-
-#[bench]
-fn throughput_fixedsize_small_payload(b: &mut test::Bencher) {
- bench_server!(b, ("content-length", "13"), || body(b"Hello, World!"))
-}
-
-#[bench]
-fn throughput_fixedsize_large_payload(b: &mut test::Bencher) {
- bench_server!(b, ("content-length", "1000000"), || body(
- &[b'x'; 1_000_000]
- ))
-}
-
-#[bench]
-fn throughput_fixedsize_many_chunks(b: &mut test::Bencher) {
- bench_server!(b, ("content-length", "1000000"), || {
- static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
- StreamBody::new(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
- })
-}
-
-#[bench]
-fn throughput_chunked_small_payload(b: &mut test::Bencher) {
- bench_server!(b, ("transfer-encoding", "chunked"), || body(
- b"Hello, World!"
- ))
-}
-
-#[bench]
-fn throughput_chunked_large_payload(b: &mut test::Bencher) {
- bench_server!(b, ("transfer-encoding", "chunked"), || body(
- &[b'x'; 1_000_000]
- ))
-}
-
-#[bench]
-fn throughput_chunked_many_chunks(b: &mut test::Bencher) {
- bench_server!(b, ("transfer-encoding", "chunked"), || {
- static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
- StreamBody::new(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
- })
-}
+// use std::time::Duration;
+
+// use futures_util::{stream, StreamExt};
+// use http_body_util::StreamBody;
+// use tokio::sync::oneshot;
+
+// use hyper::service::{make_service_fn, service_fn};
+// use hyper::{Response, Server};
+
+// macro_rules! bench_server {
+// ($b:ident, $header:expr, $body:expr) => {{
+// let _ = pretty_env_logger::try_init();
+// let (_until_tx, until_rx) = oneshot::channel::<()>();
+// let addr = {
+// let (addr_tx, addr_rx) = mpsc::channel();
+// std::thread::spawn(move || {
+// let addr = "127.0.0.1:0".parse().unwrap();
+// let make_svc = make_service_fn(|_| async {
+// Ok::<_, hyper::Error>(service_fn(|_| async {
+// Ok::<_, hyper::Error>(
+// Response::builder()
+// .header($header.0, $header.1)
+// .header("content-type", "text/plain")
+// .body($body())
+// .unwrap(),
+// )
+// }))
+// });
+
+// let rt = tokio::runtime::Builder::new_current_thread()
+// .enable_all()
+// .build()
+// .expect("rt build");
+
+// let srv = rt.block_on(async move { Server::bind(&addr).serve(make_svc) });
+
+// addr_tx.send(srv.local_addr()).unwrap();
+
+// let graceful = srv.with_graceful_shutdown(async {
+// until_rx.await.ok();
+// });
+// rt.block_on(async move {
+// if let Err(e) = graceful.await {
+// panic!("server error: {}", e);
+// }
+// });
+// });
+
+// addr_rx.recv().unwrap()
+// };
+
+// let total_bytes = {
+// let mut tcp = TcpStream::connect(addr).unwrap();
+// tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n")
+// .unwrap();
+// let mut buf = Vec::new();
+// tcp.read_to_end(&mut buf).unwrap()
+// };
+
+// let mut tcp = TcpStream::connect(addr).unwrap();
+// tcp.set_read_timeout(Some(Duration::from_secs(3))).unwrap();
+// let mut buf = [0u8; 8192];
+
+// $b.bytes = 35 + total_bytes as u64;
+// $b.iter(|| {
+// tcp.write_all(b"GET / HTTP/1.1\r\nHost: localhost\r\n\r\n")
+// .unwrap();
+// let mut sum = 0;
+// while sum < total_bytes {
+// sum += tcp.read(&mut buf).unwrap();
+// }
+// assert_eq!(sum, total_bytes);
+// });
+// }};
+// }
+
+// fn body(b: &'static [u8]) -> hyper::Body {
+// b.into()
+// }
+
+// #[bench]
+// fn throughput_fixedsize_small_payload(b: &mut test::Bencher) {
+// bench_server!(b, ("content-length", "13"), || body(b"Hello, World!"))
+// }
+
+// #[bench]
+// fn throughput_fixedsize_large_payload(b: &mut test::Bencher) {
+// bench_server!(b, ("content-length", "1000000"), || body(
+// &[b'x'; 1_000_000]
+// ))
+// }
+
+// #[bench]
+// fn throughput_fixedsize_many_chunks(b: &mut test::Bencher) {
+// bench_server!(b, ("content-length", "1000000"), || {
+// static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
+// StreamBody::new(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
+// })
+// }
+
+// #[bench]
+// fn throughput_chunked_small_payload(b: &mut test::Bencher) {
+// bench_server!(b, ("transfer-encoding", "chunked"), || body(
+// b"Hello, World!"
+// ))
+// }
+
+// #[bench]
+// fn throughput_chunked_large_payload(b: &mut test::Bencher) {
+// bench_server!(b, ("transfer-encoding", "chunked"), || body(
+// &[b'x'; 1_000_000]
+// ))
+// }
+
+// #[bench]
+// fn throughput_chunked_many_chunks(b: &mut test::Bencher) {
+// bench_server!(b, ("transfer-encoding", "chunked"), || {
+// static S: &[&[u8]] = &[&[b'x'; 1_000] as &[u8]; 1_000] as _;
+// StreamBody::new(stream::iter(S.iter()).map(|&s| Ok::<_, String>(s)))
+// })
+// }
#[bench]
fn raw_tcp_throughput_small_payload(b: &mut test::Bencher) {
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -60,13 +75,10 @@ async fn api_get_response() -> Result<Response<Body>> {
Ok(res)
}
-async fn response_examples(
- req: Request<Body>,
- client: Client<HttpConnector>,
-) -> Result<Response<Body>> {
+async fn response_examples(req: Request<Body>) -> Result<Response<Body>> {
match (req.method(), req.uri().path()) {
(&Method::GET, "/") | (&Method::GET, "/index.html") => Ok(Response::new(INDEX.into())),
- (&Method::GET, "/test.html") => client_request_response(&client).await,
+ (&Method::GET, "/test.html") => client_request_response().await,
(&Method::POST, "/json_api") => api_post_response(req).await,
(&Method::GET, "/json_api") => api_get_response().await,
_ => {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -608,6 +608,7 @@ mod tests {
);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn channel_abort() {
let (tx, mut rx) = Body::channel();
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -618,6 +619,7 @@ mod tests {
assert!(err.is_body_write_aborted(), "{:?}", err);
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn channel_abort_when_buffer_is_full() {
let (mut tx, mut rx) = Body::channel();
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -644,6 +646,7 @@ mod tests {
assert_eq!(chunk2, "chunk 2");
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn channel_empty() {
let (_, mut rx) = Body::channel();
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -352,14 +267,6 @@ mod sealed {
}
}
-pub(super) async fn resolve<R>(resolver: &mut R, name: Name) -> Result<R::Addrs, R::Error>
-where
- R: Resolve,
-{
- futures_util::future::poll_fn(|cx| resolver.poll_ready(cx)).await?;
- resolver.resolve(name).await
-}
-
#[cfg(test)]
mod tests {
use super::*;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -471,537 +250,3 @@ impl StdError for ConnectError {
self.cause.as_ref().map(|e| &**e as _)
}
}
-
-struct ConnectingTcp<'a> {
- preferred: ConnectingTcpRemote,
- fallback: Option<ConnectingTcpFallback>,
- config: &'a Config,
-}
-
-impl<'a> ConnectingTcp<'a> {
- fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self {
- if let Some(fallback_timeout) = config.happy_eyeballs_timeout {
- let (preferred_addrs, fallback_addrs) = remote_addrs
- .split_by_preference(config.local_address_ipv4, config.local_address_ipv6);
- if fallback_addrs.is_empty() {
- return ConnectingTcp {
- preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout),
- fallback: None,
- config,
- };
- }
-
- ConnectingTcp {
- preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout),
- fallback: Some(ConnectingTcpFallback {
- delay: tokio::time::sleep(fallback_timeout),
- remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout),
- }),
- config,
- }
- } else {
- ConnectingTcp {
- preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout),
- fallback: None,
- config,
- }
- }
- }
-}
-
-struct ConnectingTcpFallback {
- delay: Sleep,
- remote: ConnectingTcpRemote,
-}
-
-struct ConnectingTcpRemote {
- addrs: dns::SocketAddrs,
- connect_timeout: Option<Duration>,
-}
-
-impl ConnectingTcpRemote {
- fn new(addrs: dns::SocketAddrs, connect_timeout: Option<Duration>) -> Self {
- let connect_timeout = connect_timeout.map(|t| t / (addrs.len() as u32));
-
- Self {
- addrs,
- connect_timeout,
- }
- }
-}
-
-impl ConnectingTcpRemote {
- async fn connect(&mut self, config: &Config) -> Result<TcpStream, ConnectError> {
- let mut err = None;
- for addr in &mut self.addrs {
- debug!("connecting to {}", addr);
- match connect(&addr, config, self.connect_timeout)?.await {
- Ok(tcp) => {
- debug!("connected to {}", addr);
- return Ok(tcp);
- }
- Err(e) => {
- trace!("connect error for {}: {:?}", addr, e);
- err = Some(e);
- }
- }
- }
-
- match err {
- Some(e) => Err(e),
- None => Err(ConnectError::new(
- "tcp connect error",
- std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"),
- )),
- }
- }
-}
-
-fn bind_local_address(
- socket: &socket2::Socket,
- dst_addr: &SocketAddr,
- local_addr_ipv4: &Option<Ipv4Addr>,
- local_addr_ipv6: &Option<Ipv6Addr>,
-) -> io::Result<()> {
- match (*dst_addr, local_addr_ipv4, local_addr_ipv6) {
- (SocketAddr::V4(_), Some(addr), _) => {
- socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?;
- }
- (SocketAddr::V6(_), _, Some(addr)) => {
- socket.bind(&SocketAddr::new(addr.clone().into(), 0).into())?;
- }
- _ => {
- if cfg!(windows) {
- // Windows requires a socket be bound before calling connect
- let any: SocketAddr = match *dst_addr {
- SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(),
- SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(),
- };
- socket.bind(&any.into())?;
- }
- }
- }
-
- Ok(())
-}
-
-fn connect(
- addr: &SocketAddr,
- config: &Config,
- connect_timeout: Option<Duration>,
-) -> Result<impl Future<Output = Result<TcpStream, ConnectError>>, ConnectError> {
- // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the
- // keepalive timeout, it would be nice to use that instead of socket2,
- // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance...
- use socket2::{Domain, Protocol, Socket, TcpKeepalive, Type};
- use std::convert::TryInto;
-
- let domain = Domain::for_address(*addr);
- let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP))
- .map_err(ConnectError::m("tcp open error"))?;
-
- // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is
- // responsible for ensuring O_NONBLOCK is set.
- socket
- .set_nonblocking(true)
- .map_err(ConnectError::m("tcp set_nonblocking error"))?;
-
- if let Some(dur) = config.keep_alive_timeout {
- let conf = TcpKeepalive::new().with_time(dur);
- if let Err(e) = socket.set_tcp_keepalive(&conf) {
- warn!("tcp set_keepalive error: {}", e);
- }
- }
-
- bind_local_address(
- &socket,
- addr,
- &config.local_address_ipv4,
- &config.local_address_ipv6,
- )
- .map_err(ConnectError::m("tcp bind local error"))?;
-
- #[cfg(unix)]
- let socket = unsafe {
- // Safety: `from_raw_fd` is only safe to call if ownership of the raw
- // file descriptor is transferred. Since we call `into_raw_fd` on the
- // socket2 socket, it gives up ownership of the fd and will not close
- // it, so this is safe.
- use std::os::unix::io::{FromRawFd, IntoRawFd};
- TcpSocket::from_raw_fd(socket.into_raw_fd())
- };
- #[cfg(windows)]
- let socket = unsafe {
- // Safety: `from_raw_socket` is only safe to call if ownership of the raw
- // Windows SOCKET is transferred. Since we call `into_raw_socket` on the
- // socket2 socket, it gives up ownership of the SOCKET and will not close
- // it, so this is safe.
- use std::os::windows::io::{FromRawSocket, IntoRawSocket};
- TcpSocket::from_raw_socket(socket.into_raw_socket())
- };
-
- if config.reuse_address {
- if let Err(e) = socket.set_reuseaddr(true) {
- warn!("tcp set_reuse_address error: {}", e);
- }
- }
-
- if let Some(size) = config.send_buffer_size {
- if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(std::u32::MAX)) {
- warn!("tcp set_buffer_size error: {}", e);
- }
- }
-
- if let Some(size) = config.recv_buffer_size {
- if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(std::u32::MAX)) {
- warn!("tcp set_recv_buffer_size error: {}", e);
- }
- }
-
- let connect = socket.connect(*addr);
- Ok(async move {
- match connect_timeout {
- Some(dur) => match tokio::time::timeout(dur, connect).await {
- Ok(Ok(s)) => Ok(s),
- Ok(Err(e)) => Err(e),
- Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)),
- },
- None => connect.await,
- }
- .map_err(ConnectError::m("tcp connect error"))
- })
-}
-
-impl ConnectingTcp<'_> {
- async fn connect(mut self) -> Result<TcpStream, ConnectError> {
- match self.fallback {
- None => self.preferred.connect(self.config).await,
- Some(mut fallback) => {
- let preferred_fut = self.preferred.connect(self.config);
- futures_util::pin_mut!(preferred_fut);
-
- let fallback_fut = fallback.remote.connect(self.config);
- futures_util::pin_mut!(fallback_fut);
-
- let fallback_delay = fallback.delay;
- futures_util::pin_mut!(fallback_delay);
-
- let (result, future) =
- match futures_util::future::select(preferred_fut, fallback_delay).await {
- Either::Left((result, _fallback_delay)) => {
- (result, Either::Right(fallback_fut))
- }
- Either::Right(((), preferred_fut)) => {
- // Delay is done, start polling both the preferred and the fallback
- futures_util::future::select(preferred_fut, fallback_fut)
- .await
- .factor_first()
- }
- };
-
- if result.is_err() {
- // Fallback to the remaining future (could be preferred or fallback)
- // if we get an error
- future.await
- } else {
- result
- }
- }
- }
- }
-}
-
-#[cfg(test)]
-mod tests {
- use std::io;
-
- use ::http::Uri;
-
- use super::super::sealed::{Connect, ConnectSvc};
- use super::{Config, ConnectError, HttpConnector};
-
- async fn connect<C>(
- connector: C,
- dst: Uri,
- ) -> Result<<C::_Svc as ConnectSvc>::Connection, <C::_Svc as ConnectSvc>::Error>
- where
- C: Connect,
- {
- connector.connect(super::super::sealed::Internal, dst).await
- }
-
- #[tokio::test]
- async fn test_errors_enforce_http() {
- let dst = "https://example.domain/foo/bar?baz".parse().unwrap();
- let connector = HttpConnector::new();
-
- let err = connect(connector, dst).await.unwrap_err();
- assert_eq!(&*err.msg, super::INVALID_NOT_HTTP);
- }
-
- #[cfg(any(target_os = "linux", target_os = "macos"))]
- fn get_local_ips() -> (Option<std::net::Ipv4Addr>, Option<std::net::Ipv6Addr>) {
- use std::net::{IpAddr, TcpListener};
-
- let mut ip_v4 = None;
- let mut ip_v6 = None;
-
- let ips = pnet_datalink::interfaces()
- .into_iter()
- .flat_map(|i| i.ips.into_iter().map(|n| n.ip()));
-
- for ip in ips {
- match ip {
- IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip),
- IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip),
- _ => (),
- }
-
- if ip_v4.is_some() && ip_v6.is_some() {
- break;
- }
- }
-
- (ip_v4, ip_v6)
- }
-
- #[tokio::test]
- async fn test_errors_missing_scheme() {
- let dst = "example.domain".parse().unwrap();
- let mut connector = HttpConnector::new();
- connector.enforce_http(false);
-
- let err = connect(connector, dst).await.unwrap_err();
- assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME);
- }
-
- // NOTE: pnet crate that we use in this test doesn't compile on Windows
- #[cfg(any(target_os = "linux", target_os = "macos"))]
- #[tokio::test]
- async fn local_address() {
- use std::net::{IpAddr, TcpListener};
- let _ = pretty_env_logger::try_init();
-
- let (bind_ip_v4, bind_ip_v6) = get_local_ips();
- let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
- let port = server4.local_addr().unwrap().port();
- let server6 = TcpListener::bind(&format!("[::1]:{}", port)).unwrap();
-
- let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move {
- let mut connector = HttpConnector::new();
-
- match (bind_ip_v4, bind_ip_v6) {
- (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6),
- (Some(v4), None) => connector.set_local_address(Some(v4.into())),
- (None, Some(v6)) => connector.set_local_address(Some(v6.into())),
- _ => unreachable!(),
- }
-
- connect(connector, dst.parse().unwrap()).await.unwrap();
-
- let (_, client_addr) = server.accept().unwrap();
-
- assert_eq!(client_addr.ip(), expected_ip);
- };
-
- if let Some(ip) = bind_ip_v4 {
- assert_client_ip(format!("http://127.0.0.1:{}", port), server4, ip.into()).await;
- }
-
- if let Some(ip) = bind_ip_v6 {
- assert_client_ip(format!("http://[::1]:{}", port), server6, ip.into()).await;
- }
- }
-
- #[test]
- #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)]
- fn client_happy_eyeballs() {
- use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener};
- use std::time::{Duration, Instant};
-
- use super::dns;
- use super::ConnectingTcp;
-
- let _ = pretty_env_logger::try_init();
- let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
- let addr = server4.local_addr().unwrap();
- let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap();
- let rt = tokio::runtime::Builder::new_current_thread()
- .enable_all()
- .build()
- .unwrap();
-
- let local_timeout = Duration::default();
- let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1;
- let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1;
- let fallback_timeout = std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout)
- + Duration::from_millis(250);
-
- let scenarios = &[
- // Fast primary, without fallback.
- (&[local_ipv4_addr()][..], 4, local_timeout, false),
- (&[local_ipv6_addr()][..], 6, local_timeout, false),
- // Fast primary, with (unused) fallback.
- (
- &[local_ipv4_addr(), local_ipv6_addr()][..],
- 4,
- local_timeout,
- false,
- ),
- (
- &[local_ipv6_addr(), local_ipv4_addr()][..],
- 6,
- local_timeout,
- false,
- ),
- // Unreachable + fast primary, without fallback.
- (
- &[unreachable_ipv4_addr(), local_ipv4_addr()][..],
- 4,
- unreachable_v4_timeout,
- false,
- ),
- (
- &[unreachable_ipv6_addr(), local_ipv6_addr()][..],
- 6,
- unreachable_v6_timeout,
- false,
- ),
- // Unreachable + fast primary, with (unused) fallback.
- (
- &[
- unreachable_ipv4_addr(),
- local_ipv4_addr(),
- local_ipv6_addr(),
- ][..],
- 4,
- unreachable_v4_timeout,
- false,
- ),
- (
- &[
- unreachable_ipv6_addr(),
- local_ipv6_addr(),
- local_ipv4_addr(),
- ][..],
- 6,
- unreachable_v6_timeout,
- true,
- ),
- // Slow primary, with (used) fallback.
- (
- &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..],
- 6,
- fallback_timeout,
- false,
- ),
- (
- &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..],
- 4,
- fallback_timeout,
- true,
- ),
- // Slow primary, with (used) unreachable + fast fallback.
- (
- &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..],
- 6,
- fallback_timeout + unreachable_v6_timeout,
- false,
- ),
- (
- &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..],
- 4,
- fallback_timeout + unreachable_v4_timeout,
- true,
- ),
- ];
-
- // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network.
- // Otherwise, connection to "slow" IPv6 address will error-out immediately.
- let ipv6_accessible = measure_connect(slow_ipv6_addr()).0;
-
- for &(hosts, family, timeout, needs_ipv6_access) in scenarios {
- if needs_ipv6_access && !ipv6_accessible {
- continue;
- }
-
- let (start, stream) = rt
- .block_on(async move {
- let addrs = hosts
- .iter()
- .map(|host| (host.clone(), addr.port()).into())
- .collect();
- let cfg = Config {
- local_address_ipv4: None,
- local_address_ipv6: None,
- connect_timeout: None,
- keep_alive_timeout: None,
- happy_eyeballs_timeout: Some(fallback_timeout),
- nodelay: false,
- reuse_address: false,
- enforce_http: false,
- send_buffer_size: None,
- recv_buffer_size: None,
- };
- let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg);
- let start = Instant::now();
- Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?))
- })
- .unwrap();
- let res = if stream.peer_addr().unwrap().is_ipv4() {
- 4
- } else {
- 6
- };
- let duration = start.elapsed();
-
- // Allow actual duration to be +/- 150ms off.
- let min_duration = if timeout >= Duration::from_millis(150) {
- timeout - Duration::from_millis(150)
- } else {
- Duration::default()
- };
- let max_duration = timeout + Duration::from_millis(150);
-
- assert_eq!(res, family);
- assert!(duration >= min_duration);
- assert!(duration <= max_duration);
- }
-
- fn local_ipv4_addr() -> IpAddr {
- Ipv4Addr::new(127, 0, 0, 1).into()
- }
-
- fn local_ipv6_addr() -> IpAddr {
- Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into()
- }
-
- fn unreachable_ipv4_addr() -> IpAddr {
- Ipv4Addr::new(127, 0, 0, 2).into()
- }
-
- fn unreachable_ipv6_addr() -> IpAddr {
- Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into()
- }
-
- fn slow_ipv4_addr() -> IpAddr {
- // RFC 6890 reserved IPv4 address.
- Ipv4Addr::new(198, 18, 0, 25).into()
- }
-
- fn slow_ipv6_addr() -> IpAddr {
- // RFC 6890 reserved IPv6 address.
- Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into()
- }
-
- fn measure_connect(addr: IpAddr) -> (bool, Duration) {
- let start = Instant::now();
- let result =
- std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1));
-
- let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut;
- let duration = start.elapsed();
- (reachable, duration)
- }
- }
-}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -301,6 +301,7 @@ mod tests {
}
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn drop_receiver_sends_cancel_errors() {
let _ = pretty_env_logger::try_init();
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -323,6 +324,7 @@ mod tests {
}
}
+ #[cfg(not(miri))]
#[tokio::test]
async fn sender_checks_for_want_on_send() {
let (mut tx, mut rx) = channel::<Custom, ()>();
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -363,7 +365,6 @@ mod tests {
use crate::{Body, Request, Response};
let rt = tokio::runtime::Builder::new_current_thread()
- .enable_all()
.build()
.unwrap();
let (mut tx, mut rx) = channel::<Request<Body>, Response<Body>>();
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -386,7 +387,6 @@ mod tests {
#[bench]
fn giver_queue_not_ready(b: &mut test::Bencher) {
let rt = tokio::runtime::Builder::new_current_thread()
- .enable_all()
.build()
.unwrap();
let (_tx, mut rx) = channel::<i32, ()>();
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -26,30 +26,6 @@
//! For a small example program simply fetching a URL, take a look at the
//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs).
//!
-//! ```
-//! # #[cfg(all(feature = "tcp", feature = "client", any(feature = "http1", feature = "http2")))]
-//! # async fn fetch_httpbin() -> hyper::Result<()> {
-//! use hyper::{body::HttpBody as _, Client, Uri};
-//!
-//! let client = Client::new();
-//!
-//! // Make a GET /ip to 'http://httpbin.org'
-//! let res = client.get(Uri::from_static("http://httpbin.org/ip")).await?;
-//!
-//! // And then, if the request gets a response...
-//! println!("status: {}", res.status());
-//!
-//! // Concatenate the body stream into a single buffer...
-//! let buf = hyper::body::to_bytes(res).await?;
-//!
-//! println!("body: {:?}", buf);
-//! # Ok(())
-//! # }
-//! # fn main () {}
-//! ```
-
-#[cfg(feature = "tcp")]
-pub use self::connect::HttpConnector;
pub mod connect;
#[cfg(all(test, feature = "runtime"))]
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -1,28 +1,3 @@
-use std::io;
-
-use futures_util::future;
-use tokio::net::TcpStream;
-
-use super::Client;
-
-#[tokio::test]
-async fn client_connect_uri_argument() {
- let connector = tower::service_fn(|dst: http::Uri| {
- assert_eq!(dst.scheme(), Some(&http::uri::Scheme::HTTP));
- assert_eq!(dst.host(), Some("example.local"));
- assert_eq!(dst.port(), None);
- assert_eq!(dst.path(), "/", "path should be removed");
-
- future::err::<TcpStream, _>(io::Error::new(io::ErrorKind::Other, "expect me"))
- });
-
- let client = Client::builder().build::<_, crate::Body>(connector);
- let _ = client
- .get("http://example.local/and/a/path".parse().unwrap())
- .await
- .expect_err("response should fail");
-}
-
/*
// FIXME: re-implement tests with `async/await`
#[test]
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -15,143 +15,8 @@
//! be executed to start serving requests.
//!
//! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default.
-//!
-//! ## Examples
-//!
-//! ```no_run
-//! use std::convert::Infallible;
-//! use std::net::SocketAddr;
-//! use hyper::{Body, Request, Response, Server};
-//! use hyper::service::{make_service_fn, service_fn};
-//!
-//! async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
-//! Ok(Response::new(Body::from("Hello World")))
-//! }
-//!
-//! # #[cfg(feature = "runtime")]
-//! #[tokio::main]
-//! async fn main() {
-//! // Construct our SocketAddr to listen on...
-//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
-//!
-//! // And a MakeService to handle each connection...
-//! let make_service = make_service_fn(|_conn| async {
-//! Ok::<_, Infallible>(service_fn(handle))
-//! });
-//!
-//! // Then bind and serve...
-//! let server = Server::bind(&addr).serve(make_service);
-//!
-//! // And run forever...
-//! if let Err(e) = server.await {
-//! eprintln!("server error: {}", e);
-//! }
-//! }
-//! # #[cfg(not(feature = "runtime"))]
-//! # fn main() {}
-//! ```
-//!
-//! If you don't need the connection and your service implements `Clone` you can use
-//! [`tower::make::Shared`] instead of `make_service_fn` which is a bit simpler:
-//!
-//! ```no_run
-//! # use std::convert::Infallible;
-//! # use std::net::SocketAddr;
-//! # use hyper::{Body, Request, Response, Server};
-//! # use hyper::service::{make_service_fn, service_fn};
-//! # use tower::make::Shared;
-//! # async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> {
-//! # Ok(Response::new(Body::from("Hello World")))
-//! # }
-//! # #[cfg(feature = "runtime")]
-//! #[tokio::main]
-//! async fn main() {
-//! // Construct our SocketAddr to listen on...
-//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
-//!
-//! // Shared is a MakeService that produces services by cloning an inner service...
-//! let make_service = Shared::new(service_fn(handle));
-//!
-//! // Then bind and serve...
-//! let server = Server::bind(&addr).serve(make_service);
-//!
-//! // And run forever...
-//! if let Err(e) = server.await {
-//! eprintln!("server error: {}", e);
-//! }
-//! }
-//! # #[cfg(not(feature = "runtime"))]
-//! # fn main() {}
-//! ```
-//!
-//! Passing data to your request handler can be done like so:
-//!
-//! ```no_run
-//! use std::convert::Infallible;
-//! use std::net::SocketAddr;
-//! use hyper::{Body, Request, Response, Server};
-//! use hyper::service::{make_service_fn, service_fn};
-//! # #[cfg(feature = "runtime")]
-//! use tokio::net::TcpStream;
-//!
-//! #[derive(Clone)]
-//! struct AppContext {
-//! // Whatever data your application needs can go here
-//! }
-//!
-//! async fn handle(
-//! context: AppContext,
-//! addr: SocketAddr,
-//! req: Request<Body>
-//! ) -> Result<Response<Body>, Infallible> {
-//! Ok(Response::new(Body::from("Hello World")))
-//! }
-//!
-//! # #[cfg(feature = "runtime")]
-//! #[tokio::main]
-//! async fn main() {
-//! let context = AppContext {
-//! // ...
-//! };
-//!
-//! // A `MakeService` that produces a `Service` to handle each connection.
-//! let make_service = make_service_fn(move |conn: &TcpStream| {
-//! // We have to clone the context to share it with each invocation of
-//! // `make_service`. If your data doesn't implement `Clone` consider using
-//! // an `std::sync::Arc`.
-//! let context = context.clone();
-//!
-//! // You can grab the address of the incoming connection like so.
-//! let addr = conn.peer_addr().unwrap();
-//!
-//! // Create a `Service` for responding to the request.
-//! let service = service_fn(move |req| {
-//! handle(context.clone(), addr, req)
-//! });
-//!
-//! // Return the service to hyper.
-//! async move { Ok::<_, Infallible>(service) }
-//! });
-//!
-//! // Run the server like above...
-//! let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
-//!
-//! let server = Server::bind(&addr).serve(make_service);
-//!
-//! if let Err(e) = server.await {
-//! eprintln!("server error: {}", e);
-//! }
-//! }
-//! # #[cfg(not(feature = "runtime"))]
-//! # fn main() {}
-//! ```
-//!
-//! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html
-
pub mod accept;
pub mod conn;
-#[cfg(feature = "tcp")]
-mod tcp;
pub use self::server::Server;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -5,6 +5,7 @@
extern crate matches;
use std::convert::Infallible;
+use std::fmt;
use std::io::{Read, Write};
use std::net::{SocketAddr, TcpListener};
use std::pin::Pin;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -12,9 +13,11 @@ use std::task::{Context, Poll};
use std::thread;
use std::time::Duration;
+use http::uri::PathAndQuery;
use http_body_util::{BodyExt, StreamBody};
use hyper::body::to_bytes as concat;
-use hyper::{Body, Client, Method, Request, StatusCode};
+use hyper::header::HeaderValue;
+use hyper::{Body, Method, Request, StatusCode, Uri, Version};
use bytes::Bytes;
use futures_channel::oneshot;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -31,6 +34,71 @@ fn tcp_connect(addr: &SocketAddr) -> impl Future<Output = std::io::Result<TcpStr
TcpStream::connect(*addr)
}
+struct HttpInfo {
+ remote_addr: SocketAddr,
+}
+
+#[derive(Debug)]
+enum Error {
+ Io(std::io::Error),
+ Hyper(hyper::Error),
+ AbsoluteUriRequired,
+ UnsupportedVersion,
+}
+
+impl Error {
+ fn is_incomplete_message(&self) -> bool {
+ match self {
+ Self::Hyper(err) => err.is_incomplete_message(),
+ _ => false,
+ }
+ }
+
+ fn is_parse(&self) -> bool {
+ match self {
+ Self::Hyper(err) => err.is_parse(),
+ _ => false,
+ }
+ }
+
+ fn is_parse_too_large(&self) -> bool {
+ match self {
+ Self::Hyper(err) => err.is_parse_too_large(),
+ _ => false,
+ }
+ }
+
+ fn is_parse_status(&self) -> bool {
+ match self {
+ Self::Hyper(err) => err.is_parse_status(),
+ _ => false,
+ }
+ }
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Io(err) => err.fmt(fmt),
+ Self::Hyper(err) => err.fmt(fmt),
+ Self::AbsoluteUriRequired => write!(fmt, "client requires absolute-form URIs"),
+ Self::UnsupportedVersion => write!(fmt, "request has unsupported HTTP version"),
+ }
+ }
+}
+
+impl From<std::io::Error> for Error {
+ fn from(err: std::io::Error) -> Self {
+ Self::Io(err)
+ }
+}
+
+impl From<hyper::Error> for Error {
+ fn from(err: hyper::Error) -> Self {
+ Self::Hyper(err)
+ }
+}
+
macro_rules! test {
(
name: $name:ident,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -110,7 +178,7 @@ macro_rules! test {
let _ = pretty_env_logger::try_init();
let rt = support::runtime();
- let err: ::hyper::Error = test! {
+ let err: Error = test! {
INNER;
name: $name,
runtime: &rt,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -123,7 +191,7 @@ macro_rules! test {
)*},
}.unwrap_err();
- fn infer_closure<F: FnOnce(&::hyper::Error) -> bool>(f: F) -> F { f }
+ fn infer_closure<F: FnOnce(&Error) -> bool>(f: F) -> F { f }
let closure = infer_closure($err);
if !closure(&err) {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -151,22 +219,123 @@ macro_rules! test {
let addr = server.local_addr().expect("local_addr");
let rt = $runtime;
- let connector = ::hyper::client::HttpConnector::new();
- let client = Client::builder()
- $($(.$c_opt_prop($c_opt_val))*)?
- .build(connector);
-
#[allow(unused_assignments, unused_mut)]
let mut body = BodyExt::boxed(http_body_util::Empty::<bytes::Bytes>::new());
let mut req_builder = Request::builder();
$(
test!(@client_request; req_builder, body, addr, $c_req_prop: $c_req_val);
)*
- let req = req_builder
+ let mut req = req_builder
.body(body)
.expect("request builder");
- let res = client.request(req);
+ let res = async move {
+ // Wrapper around hyper::client::conn::Builder with set_host field to mimic
+ // hyper::client::Builder.
+ struct Builder {
+ inner: hyper::client::conn::Builder,
+ set_host: bool,
+ http09_responses: bool,
+ http2_only: bool,
+ }
+
+ impl Builder {
+ fn new() -> Self {
+ Self {
+ inner: hyper::client::conn::Builder::new(),
+ set_host: true,
+ http09_responses: false,
+ http2_only: false,
+ }
+ }
+
+ #[allow(unused)]
+ fn set_host(&mut self, val: bool) -> &mut Self {
+ self.set_host = val;
+ self
+ }
+
+ #[allow(unused)]
+ fn http09_responses(&mut self, val: bool) -> &mut Self {
+ self.http09_responses = val;
+ self.inner.http09_responses(val);
+ self
+ }
+
+ #[allow(unused)]
+ fn http2_only(&mut self, val: bool) -> &mut Self {
+ self.http2_only = val;
+ self.inner.http2_only(val);
+ self
+ }
+ }
+
+ impl std::ops::Deref for Builder {
+ type Target = hyper::client::conn::Builder;
+
+ fn deref(&self) -> &Self::Target {
+ &self.inner
+ }
+ }
+
+ impl std::ops::DerefMut for Builder {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.inner
+ }
+ }
+
+ #[allow(unused_mut)]
+ let mut builder = Builder::new();
+ $(builder$(.$c_opt_prop($c_opt_val))*;)?
+
+
+ if req.version() == Version::HTTP_09 && !builder.http09_responses {
+ return Err(Error::UnsupportedVersion);
+ }
+
+ if req.version() == Version::HTTP_2 && !builder.http2_only {
+ return Err(Error::UnsupportedVersion);
+ }
+
+ let host = req.uri().host().ok_or(Error::AbsoluteUriRequired)?;
+ let port = req.uri().port_u16().unwrap_or(80);
+
+ let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
+
+ let extra = HttpInfo {
+ remote_addr: stream.peer_addr().unwrap(),
+ };
+
+ if builder.set_host {
+ let host = req.uri().host().expect("no host in uri");
+ let port = req.uri().port_u16().expect("no port in uri");
+
+ let host = format!("{}:{}", host, port);
+
+ req.headers_mut().append("Host", HeaderValue::from_str(&host).unwrap());
+ }
+
+ let (mut sender, conn) = builder.handshake(stream).await?;
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ panic!("{}", err);
+ }
+ });
+
+ let mut builder = Uri::builder();
+ if req.method() == Method::CONNECT {
+ builder = builder.path_and_query(format!("{}:{}", req.uri().host().unwrap(), req.uri().port_u16().unwrap()));
+ } else {
+ builder = builder.path_and_query(req.uri().path_and_query().cloned().unwrap_or(PathAndQuery::from_static("/")));
+ }
+ *req.uri_mut() = builder.build().unwrap();
+
+ let mut resp = sender.send_request(req).await?;
+
+ resp.extensions_mut().insert(extra);
+ Ok(resp)
+ };
let (tx, rx) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -188,7 +357,7 @@ macro_rules! test {
assert_eq!(s(&buf[..n]), expected);
inc.write_all($server_reply.as_ref()).expect("write_all");
- let _ = tx.send(Ok::<_, hyper::Error>(()));
+ let _ = tx.send(Ok::<_, Error>(()));
}).expect("thread spawn");
let rx = rx.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -197,10 +366,10 @@ macro_rules! test {
// Always check that HttpConnector has set the "extra" info...
let extra = resp
.extensions_mut()
- .remove::<::hyper::client::connect::HttpInfo>()
+ .remove::<HttpInfo>()
.expect("HttpConnector should set HttpInfo");
- assert_eq!(extra.remote_addr(), addr, "HttpInfo should have server addr");
+ assert_eq!(extra.remote_addr, addr, "HttpInfo should have server addr");
resp
})
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1174,7 +1343,7 @@ mod dispatch_impl {
use super::support;
use hyper::body::HttpBody;
- use hyper::client::connect::{Connected, Connection, HttpConnector};
+ use hyper::client::connect::{Connected, Connection};
use hyper::Client;
#[test]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1186,10 +1355,7 @@ mod dispatch_impl {
let addr = server.local_addr().unwrap();
let rt = support::runtime();
let (closes_tx, closes) = mpsc::channel(10);
- let client = Client::builder().build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
+ let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1259,10 +1425,7 @@ mod dispatch_impl {
});
let res = {
- let client = Client::builder().build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
+ let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
let req = Request::builder()
.uri(&*format!("http://{}/a", addr))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1322,10 +1485,7 @@ mod dispatch_impl {
support::runtime().block_on(client_drop_rx.into_future())
});
- let client = Client::builder().build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
+ let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
let req = Request::builder()
.uri(&*format!("http://{}/a", addr))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1385,10 +1545,7 @@ mod dispatch_impl {
});
let res = {
- let client = Client::builder().build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
+ let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
let req = Request::builder()
.uri(&*format!("http://{}/a", addr))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1438,10 +1595,7 @@ mod dispatch_impl {
let rx = rx1.expect("thread panicked");
let res = {
- let client = Client::builder().build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
+ let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
let req = Request::builder()
.uri(&*format!("http://{}/a", addr))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1490,9 +1644,9 @@ mod dispatch_impl {
let _ = rx2.recv();
});
- let client = Client::builder().pool_max_idle_per_host(0).build(
- DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx),
- );
+ let client = Client::builder()
+ .pool_max_idle_per_host(0)
+ .build(DebugConnector::with_closes(closes_tx));
let req = Request::builder()
.uri(&*format!("http://{}/a", addr))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1536,10 +1690,7 @@ mod dispatch_impl {
let _ = tx1.send(());
});
- let client = Client::builder().build(DebugConnector::with_http_and_closes(
- HttpConnector::new(),
- closes_tx,
- ));
+ let client = Client::builder().build(DebugConnector::with_closes(closes_tx));
let req = Request::builder()
.uri(&*format!("http://{}/a", addr))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2085,7 +2236,6 @@ mod dispatch_impl {
#[derive(Clone)]
struct DebugConnector {
- http: HttpConnector,
closes: mpsc::Sender<()>,
connects: Arc<AtomicUsize>,
is_proxy: bool,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2094,14 +2244,12 @@ mod dispatch_impl {
impl DebugConnector {
fn new() -> DebugConnector {
- let http = HttpConnector::new();
let (tx, _) = mpsc::channel(10);
- DebugConnector::with_http_and_closes(http, tx)
+ DebugConnector::with_closes(tx)
}
- fn with_http_and_closes(http: HttpConnector, closes: mpsc::Sender<()>) -> DebugConnector {
+ fn with_closes(closes: mpsc::Sender<()>) -> DebugConnector {
DebugConnector {
- http,
closes,
connects: Arc::new(AtomicUsize::new(0)),
is_proxy: false,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2117,12 +2265,11 @@ mod dispatch_impl {
impl hyper::service::Service<Uri> for DebugConnector {
type Response = DebugStream;
- type Error = <HttpConnector as hyper::service::Service<Uri>>::Error;
+ type Error = std::io::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
- fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
- // don't forget to check inner service is ready :)
- hyper::service::Service::<Uri>::poll_ready(&mut self.http, cx)
+ fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
}
fn call(&mut self, dst: Uri) -> Self::Future {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2130,12 +2277,20 @@ mod dispatch_impl {
let closes = self.closes.clone();
let is_proxy = self.is_proxy;
let is_alpn_h2 = self.alpn_h2;
- Box::pin(self.http.call(dst).map_ok(move |tcp| DebugStream {
- tcp,
- on_drop: closes,
- is_alpn_h2,
- is_proxy,
- }))
+
+ Box::pin(async move {
+ let host = dst.host().expect("no host in uri");
+ let port = dst.port_u16().expect("no port in uri");
+
+ let stream = TcpStream::connect(format!("{}:{}", host, port)).await?;
+
+ Ok(DebugStream {
+ tcp: stream,
+ on_drop: closes,
+ is_alpn_h2,
+ is_proxy,
+ })
+ })
}
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2188,7 +2343,7 @@ mod dispatch_impl {
impl Connection for DebugStream {
fn connected(&self) -> Connected {
- let connected = self.tcp.connected().proxy(self.is_proxy);
+ let connected = Connected::new().proxy(self.is_proxy);
if self.is_alpn_h2 {
connected.negotiated_h2()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2744,27 +2899,45 @@ mod conn {
#[tokio::test]
async fn http2_detect_conn_eof() {
use futures_util::future;
- use hyper::service::{make_service_fn, service_fn};
- use hyper::{Response, Server};
let _ = pretty_env_logger::try_init();
- let server = Server::bind(&([127, 0, 0, 1], 0).into())
- .http2_only(true)
- .serve(make_service_fn(|_| async move {
- Ok::<_, hyper::Error>(service_fn(|_req| {
- future::ok::<_, hyper::Error>(Response::new(Body::empty()))
- }))
- }));
- let addr = server.local_addr();
- let (shdn_tx, shdn_rx) = oneshot::channel();
+ let addr = SocketAddr::from(([127, 0, 0, 1], 0));
+ let listener = TkTcpListener::bind(addr).await.unwrap();
+
+ let addr = listener.local_addr().unwrap();
+ let (shdn_tx, mut shdn_rx) = tokio::sync::watch::channel(false);
tokio::task::spawn(async move {
- server
- .with_graceful_shutdown(async move {
- let _ = shdn_rx.await;
- })
- .await
- .expect("server")
+ use hyper::server::conn::Http;
+ use hyper::service::service_fn;
+
+ loop {
+ tokio::select! {
+ res = listener.accept() => {
+ let (stream, _) = res.unwrap();
+
+ let service = service_fn(|_:Request<Body>| future::ok::<Response<Body>, hyper::Error>(Response::new(Body::empty())));
+
+ let mut shdn_rx = shdn_rx.clone();
+ tokio::task::spawn(async move {
+ let mut conn = Http::new().http2_only(true).serve_connection(stream, service);
+
+ tokio::select! {
+ res = &mut conn => {
+ res.unwrap();
+ }
+ _ = shdn_rx.changed() => {
+ Pin::new(&mut conn).graceful_shutdown();
+ conn.await.unwrap();
+ }
+ }
+ });
+ }
+ _ = shdn_rx.changed() => {
+ break;
+ }
+ }
+ }
});
let io = tcp_connect(&addr).await.expect("tcp connect");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2796,7 +2969,7 @@ mod conn {
.expect("client poll ready after");
// Trigger the server shutdown...
- let _ = shdn_tx.send(());
+ let _ = shdn_tx.send(true);
// Allow time for graceful shutdown roundtrips...
tokio::time::sleep(Duration::from_millis(100)).await;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -21,15 +21,14 @@ use h2::client::SendRequest;
use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
use http_body_util::{combinators::BoxBody, BodyExt, StreamBody};
-use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
-use tokio::net::{TcpListener, TcpStream as TkTcpStream};
+use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
+use tokio::io::{AsyncReadExt, AsyncWriteExt};
+use tokio::net::{TcpListener as TkTcpListener, TcpListener, TcpStream as TkTcpStream};
-use hyper::body::HttpBody as _;
-use hyper::client::Client;
+use hyper::body::HttpBody;
use hyper::server::conn::Http;
-use hyper::server::Server;
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Request, Response, StatusCode, Version};
+use hyper::service::service_fn;
+use hyper::{Body, Method, Request, Response, StatusCode, Uri, Version};
mod support;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -320,15 +319,11 @@ mod response_body_lengths {
#[tokio::test]
async fn http2_auto_response_with_known_length() {
- use http_body::Body;
-
let server = serve();
let addr_str = format!("http://{}", server.addr());
server.reply().body("Hello, World!");
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
+ let client = TestClient::new().http2_only();
let uri = addr_str
.parse::<hyper::Uri>()
.expect("server addr should parse");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -340,8 +335,6 @@ mod response_body_lengths {
#[tokio::test]
async fn http2_auto_response_with_conflicting_lengths() {
- use http_body::Body;
-
let server = serve();
let addr_str = format!("http://{}", server.addr());
server
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -349,9 +342,7 @@ mod response_body_lengths {
.header("content-length", "10")
.body("Hello, World!");
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
+ let client = TestClient::new().http2_only();
let uri = addr_str
.parse::<hyper::Uri>()
.expect("server addr should parse");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -363,15 +354,11 @@ mod response_body_lengths {
#[tokio::test]
async fn http2_implicit_empty_size_hint() {
- use http_body::Body;
-
let server = serve();
let addr_str = format!("http://{}", server.addr());
server.reply();
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
+ let client = TestClient::new().http2_only();
let uri = addr_str
.parse::<hyper::Uri>()
.expect("server addr should parse");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1480,8 +1467,6 @@ async fn header_read_timeout_slow_writes_multiple_requests() {
#[tokio::test]
async fn upgrades() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = pretty_env_logger::try_init();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1539,8 +1524,6 @@ async fn upgrades() {
#[tokio::test]
async fn http_connect() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = pretty_env_logger::try_init();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1675,15 +1658,19 @@ async fn upgrades_ignored() {
future::ok::<_, hyper::Error>(Response::new(hyper::Body::empty()))
});
- let (socket, _) = listener.accept().await.unwrap();
- Http::new()
- .serve_connection(socket, svc)
- .with_upgrades()
- .await
- .expect("server task");
+ loop {
+ let (socket, _) = listener.accept().await.unwrap();
+ tokio::task::spawn(async move {
+ Http::new()
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .expect("server task");
+ });
+ }
});
- let client = hyper::Client::new();
+ let client = TestClient::new();
let url = format!("http://{}/", addr);
let make_req = || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1705,8 +1692,6 @@ async fn upgrades_ignored() {
#[tokio::test]
async fn http_connect_new() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = pretty_env_logger::try_init();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1771,8 +1756,6 @@ async fn http_connect_new() {
#[tokio::test]
async fn h2_connect() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = pretty_env_logger::try_init();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1843,7 +1826,6 @@ async fn h2_connect() {
async fn h2_connect_multiplex() {
use futures_util::stream::FuturesUnordered;
use futures_util::StreamExt;
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1954,8 +1936,6 @@ async fn h2_connect_multiplex() {
#[tokio::test]
async fn h2_connect_large_body() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = pretty_env_logger::try_init();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2031,8 +2011,6 @@ async fn h2_connect_large_body() {
#[tokio::test]
async fn h2_connect_empty_frames() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = pretty_env_logger::try_init();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2225,8 +2203,8 @@ fn http1_response_with_http2_version() {
server.reply().version(hyper::Version::HTTP_2);
+ let client = TestClient::new();
rt.block_on({
- let client = Client::new();
let uri = addr_str.parse().expect("server addr should parse");
client.get(uri)
})
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2240,10 +2218,8 @@ fn try_h2() {
let rt = support::runtime();
+ let client = TestClient::new().http2_only();
rt.block_on({
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
let uri = addr_str.parse().expect("server addr should parse");
client.get(uri).map_ok(|_| ()).map_err(|_e| ())
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2260,10 +2236,8 @@ fn http1_only() {
let rt = support::runtime();
+ let client = TestClient::new().http2_only();
rt.block_on({
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
let uri = addr_str.parse().expect("server addr should parse");
client.get(uri)
})
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2283,9 +2257,8 @@ async fn http2_service_error_sends_reset_reason() {
let uri = addr_str.parse().expect("server addr should parse");
dbg!("start");
- let err = dbg!(Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>()
+ let err = dbg!(TestClient::new()
+ .http2_only()
.get(uri)
.await
.expect_err("client.get"));
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2314,9 +2287,8 @@ fn http2_body_user_error_sends_reset_reason() {
let err: hyper::Error = rt
.block_on(async move {
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
+ let client = TestClient::new().http2_only();
+
let uri = addr_str.parse().expect("server addr should parse");
let mut res = client.get(uri).await?;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2363,22 +2335,33 @@ async fn http2_service_poll_ready_error_sends_goaway() {
let _ = pretty_env_logger::try_init();
- let server = hyper::Server::bind(&([127, 0, 0, 1], 0).into())
- .http2_only(true)
- .serve(make_service_fn(|_| async move {
- Ok::<_, BoxError>(Http2ReadyErrorSvc)
- }));
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
- let addr_str = format!("http://{}", server.local_addr());
+ let addr_str = format!("http://{}", listener.local_addr().unwrap());
tokio::task::spawn(async move {
- server.await.expect("server");
+ loop {
+ tokio::select! {
+ res = listener.accept() => {
+ let (stream, _) = res.unwrap();
+
+ tokio::task::spawn(async move {
+ let mut http = Http::new();
+ http.http2_only(true);
+
+ let service = Http2ReadyErrorSvc;
+ http.serve_connection(stream, service).await.unwrap();
+ });
+ }
+ }
+ }
});
let uri = addr_str.parse().expect("server addr should parse");
- let err = dbg!(Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>()
+ let err = dbg!(TestClient::new()
+ .http2_only()
.get(uri)
.await
.expect_err("client.get should fail"));
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2948,9 +2931,9 @@ impl ServeOptions {
let (addr_tx, addr_rx) = mpsc::channel();
let (msg_tx, msg_rx) = mpsc::channel();
let (reply_tx, reply_rx) = spmc::channel();
- let (shutdown_tx, shutdown_rx) = oneshot::channel();
+ let (shutdown_tx, mut shutdown_rx) = oneshot::channel();
- let addr = ([127, 0, 0, 1], 0).into();
+ let addr: SocketAddr = ([127, 0, 0, 1], 0).into();
let thread_name = format!(
"test-server-{}",
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2961,36 +2944,46 @@ impl ServeOptions {
let thread = thread::Builder::new()
.name(thread_name)
.spawn(move || {
- support::runtime()
- .block_on(async move {
- let service = make_service_fn(|_| {
- let msg_tx = msg_tx.clone();
- let reply_rx = reply_rx.clone();
- future::ok::<_, BoxError>(TestService {
- tx: msg_tx,
- reply: reply_rx,
- })
- });
-
- let builder = Server::bind(&addr);
-
- #[cfg(feature = "http1")]
- let builder = builder
- .http1_only(_options.http1_only)
- .http1_keepalive(_options.keep_alive)
- .http1_pipeline_flush(_options.pipeline);
-
- let server = builder.serve(service);
-
- addr_tx.send(server.local_addr()).expect("server addr tx");
-
- server
- .with_graceful_shutdown(async {
- let _ = shutdown_rx.await;
- })
- .await
- })
- .expect("serve()");
+ support::runtime().block_on(async move {
+ let listener = TkTcpListener::bind(addr).await.unwrap();
+
+ addr_tx
+ .send(listener.local_addr().unwrap())
+ .expect("server addr tx");
+
+ loop {
+ let msg_tx = msg_tx.clone();
+ let reply_rx = reply_rx.clone();
+
+ tokio::select! {
+ res = listener.accept() => {
+ let (stream, _) = res.unwrap();
+
+ tokio::task::spawn(async move {
+ let mut http = Http::new();
+
+ #[cfg(feature = "http1")]
+ let http = http
+ .http1_only(_options.http1_only)
+ .http1_keep_alive(_options.keep_alive)
+ .pipeline_flush(_options.pipeline);
+
+ let msg_tx = msg_tx.clone();
+ let reply_rx = reply_rx.clone();
+ let service = TestService {
+ tx: msg_tx,
+ reply: reply_rx,
+ };
+
+ http.serve_connection(stream, service).await.unwrap();
+ });
+ }
+ _ = &mut shutdown_rx => {
+ break;
+ }
+ }
+ }
+ })
})
.expect("thread spawn");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -3119,3 +3112,49 @@ impl Drop for Dropped {
self.0.store(true, Ordering::SeqCst);
}
}
+
+struct TestClient {
+ http2_only: bool,
+}
+
+impl TestClient {
+ fn new() -> Self {
+ Self { http2_only: false }
+ }
+
+ fn http2_only(mut self) -> Self {
+ self.http2_only = true;
+ self
+ }
+
+ async fn get(&self, uri: Uri) -> Result<Response<Body>, hyper::Error> {
+ self.request(
+ Request::builder()
+ .uri(uri)
+ .method(Method::GET)
+ .body(Body::empty())
+ .unwrap(),
+ )
+ .await
+ }
+
+ async fn request(&self, req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
+ let host = req.uri().host().expect("uri has no host");
+ let port = req.uri().port_u16().expect("uri has no port");
+
+ let mut builder = hyper::client::conn::Builder::new();
+ builder.http2_only(self.http2_only);
+
+ let stream = TkTcpStream::connect(format!("{}:{}", host, port))
+ .await
+ .unwrap();
+
+ let (mut sender, conn) = builder.handshake(stream).await.unwrap();
+
+ tokio::task::spawn(async move {
+ conn.await.unwrap();
+ });
+
+ sender.send_request(req).await
+ }
+}
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -6,9 +6,12 @@ use std::sync::{
Arc, Mutex,
};
-use hyper::client::HttpConnector;
-use hyper::service::{make_service_fn, service_fn};
-use hyper::{Body, Client, Request, Response, Server, Version};
+use hyper::client::conn::Builder;
+use hyper::server::conn::Http;
+use tokio::net::{TcpListener, TcpStream};
+
+use hyper::service::service_fn;
+use hyper::{Body, Request, Response, Version};
pub use futures_util::{
future, FutureExt as _, StreamExt as _, TryFutureExt as _, TryStreamExt as _,
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -326,16 +329,20 @@ async fn async_test(cfg: __TestConfig) {
Version::HTTP_11
};
- let connector = HttpConnector::new();
- let client = Client::builder()
- .http2_only(cfg.client_version == 2)
- .build::<_, Body>(connector);
+ let http2_only = cfg.server_version == 2;
let serve_handles = Arc::new(Mutex::new(cfg.server_msgs));
+ let listener = TcpListener::bind(&SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
+
+ let mut addr = listener.local_addr().unwrap();
+
let expected_connections = cfg.connections;
- let mut cnt = 0;
- let new_service = make_service_fn(move |_| {
+ tokio::task::spawn(async move {
+ let mut cnt = 0;
+
cnt += 1;
assert!(
cnt <= expected_connections,
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -344,98 +351,108 @@ async fn async_test(cfg: __TestConfig) {
cnt
);
- // Move a clone into the service_fn
- let serve_handles = serve_handles.clone();
- future::ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| {
- let (sreq, sres) = serve_handles.lock().unwrap().remove(0);
-
- assert_eq!(req.uri().path(), sreq.uri, "client path");
- assert_eq!(req.method(), &sreq.method, "client method");
- assert_eq!(req.version(), version, "client version");
- for func in &sreq.headers {
- func(&req.headers());
- }
- let sbody = sreq.body;
- hyper::body::to_bytes(req).map_ok(move |body| {
- assert_eq!(body.as_ref(), sbody.as_slice(), "client body");
-
- let mut res = Response::builder()
- .status(sres.status)
- .body(Body::from(sres.body))
- .expect("Response::build");
- *res.headers_mut() = sres.headers;
- res
- })
- }))
- });
+ loop {
+ let (stream, _) = listener.accept().await.expect("server error");
- let server = hyper::Server::bind(&SocketAddr::from(([127, 0, 0, 1], 0)))
- .http2_only(cfg.server_version == 2)
- .serve(new_service);
+ // Move a clone into the service_fn
+ let serve_handles = serve_handles.clone();
+ let service = service_fn(move |req: Request<Body>| {
+ let (sreq, sres) = serve_handles.lock().unwrap().remove(0);
- let mut addr = server.local_addr();
+ assert_eq!(req.uri().path(), sreq.uri, "client path");
+ assert_eq!(req.method(), &sreq.method, "client method");
+ assert_eq!(req.version(), version, "client version");
+ for func in &sreq.headers {
+ func(&req.headers());
+ }
+ let sbody = sreq.body;
+ hyper::body::to_bytes(req).map_ok(move |body| {
+ assert_eq!(body.as_ref(), sbody.as_slice(), "client body");
+
+ let mut res = Response::builder()
+ .status(sres.status)
+ .body(Body::from(sres.body))
+ .expect("Response::build");
+ *res.headers_mut() = sres.headers;
+ res
+ })
+ });
- tokio::task::spawn(server.map(|result| {
- result.expect("server error");
- }));
+ tokio::task::spawn(async move {
+ Http::new()
+ .http2_only(http2_only)
+ .serve_connection(stream, service)
+ .await
+ .expect("server error");
+ });
+ }
+ });
if cfg.proxy {
let (proxy_addr, proxy) = naive_proxy(ProxyConfig {
connections: cfg.connections,
dst: addr,
version: cfg.server_version,
- });
+ })
+ .await;
tokio::task::spawn(proxy);
addr = proxy_addr;
}
- let make_request = Arc::new(
- move |client: &Client<HttpConnector>, creq: __CReq, cres: __CRes| {
- let uri = format!("http://{}{}", addr, creq.uri);
- let mut req = Request::builder()
- .method(creq.method)
- .uri(uri)
- //.headers(creq.headers)
- .body(creq.body.into())
- .expect("Request::build");
- *req.headers_mut() = creq.headers;
- let cstatus = cres.status;
- let cheaders = cres.headers;
- let cbody = cres.body;
-
- client
- .request(req)
- .and_then(move |res| {
- assert_eq!(res.status(), cstatus, "server status");
- assert_eq!(res.version(), version, "server version");
- for func in &cheaders {
- func(&res.headers());
- }
- hyper::body::to_bytes(res)
- })
- .map_ok(move |body| {
- assert_eq!(body.as_ref(), cbody.as_slice(), "server body");
- })
- .map(|res| res.expect("client error"))
- },
- );
+ let make_request = Arc::new(move |creq: __CReq, cres: __CRes| {
+ let uri = format!("http://{}{}", addr, creq.uri);
+ let mut req = Request::builder()
+ .method(creq.method)
+ .uri(uri)
+ //.headers(creq.headers)
+ .body(creq.body.into())
+ .expect("Request::build");
+ *req.headers_mut() = creq.headers;
+ let cstatus = cres.status;
+ let cheaders = cres.headers;
+ let cbody = cres.body;
+
+ async move {
+ let stream = TcpStream::connect(addr).await.unwrap();
+
+ let (mut sender, conn) = hyper::client::conn::Builder::new()
+ .http2_only(http2_only)
+ .handshake::<TcpStream, Body>(stream)
+ .await
+ .unwrap();
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ panic!("{:?}", err);
+ }
+ });
+
+ let res = sender.send_request(req).await.unwrap();
+
+ assert_eq!(res.status(), cstatus, "server status");
+ assert_eq!(res.version(), version, "server version");
+ for func in &cheaders {
+ func(&res.headers());
+ }
+
+ let body = hyper::body::to_bytes(res).await.unwrap();
+
+ assert_eq!(body.as_ref(), cbody.as_slice(), "server body");
+ }
+ });
let client_futures: Pin<Box<dyn Future<Output = ()> + Send>> = if cfg.parallel {
let mut client_futures = vec![];
for (creq, cres) in cfg.client_msgs {
- client_futures.push(make_request(&client, creq, cres));
+ client_futures.push(make_request(creq, cres));
}
- drop(client);
Box::pin(future::join_all(client_futures).map(|_| ()))
} else {
- let mut client_futures: Pin<Box<dyn Future<Output = Client<HttpConnector>> + Send>> =
- Box::pin(future::ready(client));
+ let mut client_futures: Pin<Box<dyn Future<Output = ()> + Send>> =
+ Box::pin(future::ready(()));
for (creq, cres) in cfg.client_msgs {
let mk_request = make_request.clone();
- client_futures = Box::pin(client_futures.then(move |client| {
- let fut = mk_request(&client, creq, cres);
- fut.map(move |()| client)
- }));
+ client_futures = Box::pin(client_futures.then(move |_| mk_request(creq, cres)));
}
Box::pin(client_futures.map(|_| ()))
};
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -449,27 +466,75 @@ struct ProxyConfig {
version: usize,
}
-fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>) {
- let client = Client::builder()
- .http2_only(cfg.version == 2)
- .build_http::<Body>();
-
+async fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>) {
let dst_addr = cfg.dst;
let max_connections = cfg.connections;
let counter = AtomicUsize::new(0);
+ let http2_only = cfg.version == 2;
+
+ let listener = TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
+
+ let proxy_addr = listener.local_addr().unwrap();
+
+ let fut = async move {
+ tokio::task::spawn(async move {
+ let prev = counter.fetch_add(1, Ordering::Relaxed);
+ assert!(max_connections > prev, "proxy max connections");
+
+ loop {
+ let (stream, _) = listener.accept().await.unwrap();
+
+ let service = service_fn(move |mut req| {
+ async move {
+ let uri = format!("http://{}{}", dst_addr, req.uri().path())
+ .parse()
+ .expect("proxy new uri parse");
+ *req.uri_mut() = uri;
+
+ // Make the client request
+ let uri = req.uri().host().expect("uri has no host");
+ let port = req.uri().port_u16().expect("uri has no port");
+
+ let stream = TcpStream::connect(format!("{}:{}", uri, port))
+ .await
+ .unwrap();
+
+ let mut builder = Builder::new();
+ builder.http2_only(http2_only);
+ let (mut sender, conn) = builder.handshake(stream).await.unwrap();
+
+ tokio::task::spawn(async move {
+ if let Err(err) = conn.await {
+ panic!("{:?}", err);
+ }
+ });
+
+ let resp = sender.send_request(req).await?;
+
+ let (mut parts, body) = resp.into_parts();
+
+ // Remove the Connection header for HTTP/1.1 proxy connections.
+ if !http2_only {
+ parts.headers.remove("Connection");
+ }
+
+ let mut builder = Response::builder().status(parts.status);
+ *builder.headers_mut().unwrap() = parts.headers;
+
+ Result::<Response<Body>, hyper::Error>::Ok(builder.body(body).unwrap())
+ }
+ });
+
+ Http::new()
+ .http2_only(http2_only)
+ .serve_connection(stream, service)
+ .await
+ .unwrap();
+ }
+ });
+ };
- let srv = Server::bind(&([127, 0, 0, 1], 0).into()).serve(make_service_fn(move |_| {
- let prev = counter.fetch_add(1, Ordering::Relaxed);
- assert!(max_connections > prev, "proxy max connections");
- let client = client.clone();
- future::ok::<_, hyper::Error>(service_fn(move |mut req| {
- let uri = format!("http://{}{}", dst_addr, req.uri().path())
- .parse()
- .expect("proxy new uri parse");
- *req.uri_mut() = uri;
- client.request(req)
- }))
- }));
- let proxy_addr = srv.local_addr();
- (proxy_addr, srv.map(|res| res.expect("proxy error")))
+ (proxy_addr, fut)
}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"2835"
] |
0.14
|
faf24c6ad8eee1c3d5ccc9a4d4835717b8e2903f
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -10,6 +8,8 @@ use bytes::BytesMut;
use http::header::ValueIter;
use http::header::{self, Entry, HeaderName, HeaderValue};
use http::{HeaderMap, Method, StatusCode, Version};
+#[cfg(all(feature = "server", feature = "runtime"))]
+use tokio::time::Instant;
use tracing::{debug, error, trace, trace_span, warn};
use crate::body::DecodedLength;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -487,6 +487,10 @@ impl Server {
}
}
+ fn can_have_implicit_zero_content_length(method: &Option<Method>, status: StatusCode) -> bool {
+ Server::can_have_content_length(method, status) && method != &Some(Method::HEAD)
+ }
+
fn encode_headers_with_lower_case(
msg: Encode<'_, StatusCode>,
dst: &mut Vec<u8>,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -839,7 +843,10 @@ impl Server {
}
}
None | Some(BodyLength::Known(0)) => {
- if Server::can_have_content_length(msg.req_method, msg.head.subject) {
+ if Server::can_have_implicit_zero_content_length(
+ msg.req_method,
+ msg.head.subject,
+ ) {
header_name_writer.write_full_header_line(
dst,
"content-length: 0\r\n",
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1033,7 +1040,7 @@ impl Http1Transaction for Client {
}
#[cfg(feature = "ffi")]
- if let Some(ref mut header_order) = header_order {
+ if let Some(ref mut header_order) = header_order {
header_order.append(&name);
}
|
2022-05-18T13:35:31Z
| 2,836
|
Server incorrectly adds content-length: 0 to Http1 Head responses
**Version**
0.14.18
**Platform**
```
Darwin C02DN4K3MD6R 21.4.0 Darwin Kernel Version 21.4.0: Fri Mar 18 00:45:05 PDT 2022; root:xnu-8020.101.4~15/RELEASE_X86_64 x86_64 i386 MacBookPro16,1 Darwin
```
**Description**
Hyper incorrectly sets `content-length: 0` on Head responses without content-length.
According to https://www.ietf.org/archive/id/draft-ietf-httpbis-semantics-19.html#name-content-length:
"A server MAY send a Content-Length header field in a response to a HEAD request ([Section 9.3.2](https://www.ietf.org/archive/id/draft-ietf-httpbis-semantics-19.html#HEAD)); a server MUST NOT send Content-Length in such a response unless its field value equals the decimal number of octets that would have been sent in the content of a response if the same request had used the GET method."
I tried this code:
```
use std::net::SocketAddr;
use hyper::{
header::HOST,
service::{make_service_fn, service_fn},
Body, Client, Request, Server, Version,
};
type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
#[tokio::main]
async fn main() -> Result<(), Error> {
let on_request = service_fn(|r| {
let uri = format!("http://{}", r.headers()[HOST].to_str().unwrap());
let req = Request::builder()
.method(r.method())
.uri(uri)
.version(Version::HTTP_11)
.body(Body::empty())
.unwrap();
let client = Client::new();
client.request(req)
});
let make_svc = make_service_fn(move |_| async move { Ok::<_, Error>(on_request) });
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
let server = Server::bind(&addr).serve(make_svc);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
Ok(())
}
```
and
```
curl http://localhost:3000 -H "Host: eaufavor.net" -I
curl http://eaufavor.net -I
```
I expected to see this happen:
Both curl responses have no `content-length` header.
Instead, this happened:
The response for the request that goes through hyper has `content-length: 0`
|
hyperium__hyper-2836
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1,8 +1,6 @@
use std::fmt::{self, Write};
use std::mem::MaybeUninit;
-#[cfg(all(feature = "server", feature = "runtime"))]
-use tokio::time::Instant;
#[cfg(any(test, feature = "server", feature = "ffi"))]
use bytes::Bytes;
use bytes::BytesMut;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2421,6 +2421,26 @@ fn skips_content_length_and_body_for_304_responses() {
assert_eq!(lines.next(), None);
}
+#[test]
+fn no_implicit_zero_content_length_for_head_responses() {
+ let server = serve();
+ server.reply().status(hyper::StatusCode::OK).body([]);
+ let mut req = connect(server.addr());
+ req.write_all(
+ b"\
+ HEAD / HTTP/1.1\r\n\
+ Host: example.domain\r\n\
+ Connection: close\r\n\
+ \r\n\
+ ",
+ )
+ .unwrap();
+
+ let mut response = String::new();
+ req.read_to_string(&mut response).unwrap();
+ assert!(!response.contains("content-length:"));
+}
+
#[tokio::test]
async fn http2_keep_alive_detects_unresponsive_client() {
let _ = pretty_env_logger::try_init();
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2649"
] |
0.14
|
287d712483aec6671427438d60ed2a72f856fd9f
|
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -86,7 +86,7 @@ impl<T, U> Sender<T, U> {
}
let (tx, rx) = oneshot::channel();
self.inner
- .send(Envelope(Some((val, Callback::Retry(tx)))))
+ .send(Envelope(Some((val, Callback::Retry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -97,7 +97,7 @@ impl<T, U> Sender<T, U> {
}
let (tx, rx) = oneshot::channel();
self.inner
- .send(Envelope(Some((val, Callback::NoRetry(tx)))))
+ .send(Envelope(Some((val, Callback::NoRetry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -124,7 +124,7 @@ impl<T, U> UnboundedSender<T, U> {
pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
let (tx, rx) = oneshot::channel();
self.inner
- .send(Envelope(Some((val, Callback::Retry(tx)))))
+ .send(Envelope(Some((val, Callback::Retry(Some(tx))))))
.map(move |_| rx)
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -198,33 +198,59 @@ impl<T, U> Drop for Envelope<T, U> {
}
pub(crate) enum Callback<T, U> {
- Retry(oneshot::Sender<Result<U, (crate::Error, Option<T>)>>),
- NoRetry(oneshot::Sender<Result<U, crate::Error>>),
+ Retry(Option<oneshot::Sender<Result<U, (crate::Error, Option<T>)>>>),
+ NoRetry(Option<oneshot::Sender<Result<U, crate::Error>>>),
+}
+
+impl<T, U> Drop for Callback<T, U> {
+ fn drop(&mut self) {
+ // FIXME(nox): What errors do we want here?
+ let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() {
+ "user code panicked"
+ } else {
+ "runtime dropped the dispatch task"
+ });
+
+ match self {
+ Callback::Retry(tx) => {
+ if let Some(tx) = tx.take() {
+ let _ = tx.send(Err((error, None)));
+ }
+ }
+ Callback::NoRetry(tx) => {
+ if let Some(tx) = tx.take() {
+ let _ = tx.send(Err(error));
+ }
+ }
+ }
+ }
}
impl<T, U> Callback<T, U> {
#[cfg(feature = "http2")]
pub(crate) fn is_canceled(&self) -> bool {
match *self {
- Callback::Retry(ref tx) => tx.is_closed(),
- Callback::NoRetry(ref tx) => tx.is_closed(),
+ Callback::Retry(Some(ref tx)) => tx.is_closed(),
+ Callback::NoRetry(Some(ref tx)) => tx.is_closed(),
+ _ => unreachable!(),
}
}
pub(crate) fn poll_canceled(&mut self, cx: &mut task::Context<'_>) -> Poll<()> {
match *self {
- Callback::Retry(ref mut tx) => tx.poll_closed(cx),
- Callback::NoRetry(ref mut tx) => tx.poll_closed(cx),
+ Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx),
+ Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx),
+ _ => unreachable!(),
}
}
- pub(crate) fn send(self, val: Result<U, (crate::Error, Option<T>)>) {
+ pub(crate) fn send(mut self, val: Result<U, (crate::Error, Option<T>)>) {
match self {
- Callback::Retry(tx) => {
- let _ = tx.send(val);
+ Callback::Retry(ref mut tx) => {
+ let _ = tx.take().unwrap().send(val);
}
- Callback::NoRetry(tx) => {
- let _ = tx.send(val.map_err(|e| e.0));
+ Callback::NoRetry(ref mut tx) => {
+ let _ = tx.take().unwrap().send(val.map_err(|e| e.0));
}
}
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -137,6 +137,10 @@ pub(super) enum User {
#[cfg(feature = "server")]
WithoutShutdownNonHttp1,
+ /// The dispatch task is gone.
+ #[cfg(feature = "client")]
+ DispatchGone,
+
/// User aborted in an FFI callback.
#[cfg(feature = "ffi")]
AbortedByCallback,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -387,6 +391,11 @@ impl Error {
Error::new_user(User::AbortedByCallback)
}
+ #[cfg(feature = "client")]
+ pub(super) fn new_user_dispatch_gone() -> Error {
+ Error::new(Kind::User(User::DispatchGone))
+ }
+
#[cfg(feature = "http2")]
pub(super) fn new_h2(cause: ::h2::Error) -> Error {
if cause.is_io() {
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -483,6 +492,8 @@ impl Error {
Kind::User(User::WithoutShutdownNonHttp1) => {
"without_shutdown() called on a non-HTTP/1 connection"
}
+ #[cfg(feature = "client")]
+ Kind::User(User::DispatchGone) => "dispatch task is gone",
#[cfg(feature = "ffi")]
Kind::User(User::AbortedByCallback) => "operation aborted by an application callback",
}
diff --git a/src/ext.rs b/src/ext.rs
--- a/src/ext.rs
+++ b/src/ext.rs
@@ -40,6 +40,7 @@ impl Protocol {
self.inner.as_str()
}
+ #[cfg(feature = "server")]
pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self {
Self { inner }
}
|
Yea, we can do better. We probably can't completely remove the panic, since the `Canceled` error is part of the oneshot contract. But we can improve two the existing cases that _can_ happen, to try to eliminate it so that panic never _can_ happen.
- The runtime is dropped, killing the dispatch task.
- The dispatch task panics, due to user code.
We can add a `Guard` to the `oneshot::Sender` (`Callback`) to make it send an appropriate `hyper::Error` in drop, if not consumed. If `std::thread::panicking()`, then we can send an error about the dispatch task having panicked. If not, we can send an error about an unexpected runtime dropping the task.
I'm seeing this panic in real code, it would be nice to avoid it somehow. Not sure how it happens.

|
2022-03-21T14:23:11Z
| 2,790
|
Consider removing "dispatch dropped without returning error"
**Is your feature request related to a problem? Please describe.**
Hyper panics with "dispatch dropped without returning error" if the background dispatch task goes away, either because the runtime was dropped or some user-provided code such as a HttpBody implementation panicked
**Describe the solution you'd like**
Hyper should catch the panic and return a proper error instead of panicking with "dispatch dropped without returning error".
Cc @seanmonstar
|
hyperium__hyper-2790
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -3114,6 +3114,44 @@ mod conn {
done_tx.send(()).unwrap();
}
+ #[tokio::test]
+ async fn test_body_panics() {
+ use hyper::body::HttpBody;
+
+ let _ = pretty_env_logger::try_init();
+
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ // spawn a server that reads but doesn't write
+ tokio::spawn(async move {
+ let sock = listener.accept().await.unwrap().0;
+ drain_til_eof(sock).await.expect("server read");
+ });
+
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+
+ let (mut client, conn) = conn::Builder::new().handshake(io).await.expect("handshake");
+
+ tokio::spawn(async move {
+ conn.await.expect("client conn shouldn't error");
+ });
+
+ let req = Request::post("/a")
+ .body(Body::from("baguette").map_data::<_, &[u8]>(|_| panic!("oopsie")))
+ .unwrap();
+
+ let error = client.send_request(req).await.unwrap_err();
+
+ assert!(error.is_user());
+ assert_eq!(
+ error.to_string(),
+ "dispatch task is gone: user code panicked"
+ );
+ }
+
async fn drain_til_eof<T: AsyncRead + Unpin>(mut sock: T) -> io::Result<()> {
let mut buf = [0u8; 1024];
loop {
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2712"
] |
0.14
|
ce8242571fc4dd6db8ccaad76eadab80ac009d32
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -204,9 +204,14 @@ impl Body {
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
pub(crate) fn h2(
recv: h2::RecvStream,
- content_length: DecodedLength,
+ mut content_length: DecodedLength,
ping: ping::Recorder,
) -> Self {
+ // If the stream is already EOS, then the "unknown length" is clearly
+ // actually ZERO.
+ if !content_length.is_exact() && recv.is_end_stream() {
+ content_length = DecodedLength::ZERO;
+ }
let body = Body::new(Kind::H2 {
ping,
content_length,
diff --git a/src/body/length.rs b/src/body/length.rs
--- a/src/body/length.rs
+++ b/src/body/length.rs
@@ -68,6 +68,16 @@ impl DecodedLength {
}
}
}
+
+ /// Returns whether this represents an exact length.
+ ///
+ /// This includes 0, which of course is an exact known length.
+ ///
+ /// It would return false if "chunked" or otherwise size-unknown.
+ #[cfg(feature = "http2")]
+ pub(crate) fn is_exact(&self) -> bool {
+ self.0 <= MAX_LEN
+ }
}
impl fmt::Debug for DecodedLength {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -484,12 +484,13 @@ where
}
}
- // automatically set Content-Length from body...
- if let Some(len) = body.size_hint().exact() {
- headers::set_content_length_if_missing(res.headers_mut(), len);
- }
if !body.is_end_stream() {
+ // automatically set Content-Length from body...
+ if let Some(len) = body.size_hint().exact() {
+ headers::set_content_length_if_missing(res.headers_mut(), len);
+ }
+
let body_tx = reply!(me, res, false);
H2StreamState::Body {
pipe: PipeToSendStream::new(body, body_tx),
|
Whether the body size can be corrected based on is_end_stream to determine whether the request is empty.
```
/// Returns true if the receive half has reached the end of stream.
///
/// A return value of `true` means that calls to `poll` and `poll_trailers`
/// will both return `None`.
pub fn is_end_stream(&self) -> bool {
self.inner.inner.is_end_stream()
}
```
I'm not sure what you mean. The code you pasted looks correct. What exactly is wrong?
@seanmonstar When a client sends a request without content, the size_hint can obtain the correct body size of 0 in the HTTP1 scenario, but the size_hint cannot determine the body size of the request in the HTTP2 scenario.
```
DELETE /rest/test/nobody HTTP/1.1\r\n
User-Agent: insomnia/2021.6.0\r\n
Accept: */*\r\n\r\n
```
|
2021-12-06T18:44:07Z
| 2,715
|
body size
If a request without a body does not carry content-length or chunk, the size_hint method of the body can obtain the correct value in the HTTP1 scenario but cannot obtain the correct value in the H2 scenario.
```
fn size_hint(&self) -> SizeHint {
match self.kind {
Kind::Once(Some(ref val)) => SizeHint::with_exact(val.len() as u64),
//HTTP1
Kind::Once(None) => SizeHint::with_exact(0),
#[cfg(feature = "stream")]
Kind::Wrapped(..) => SizeHint::default(),
//HTTP2
Kind::Chan { content_length, .. } | Kind::H2 { content_length, .. } => {
let mut hint = SizeHint::default();
if let Some(content_length) = content_length.into_opt() {
hint.set_exact(content_length);
}
hint
}
}
}
```
|
hyperium__hyper-2715
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -361,6 +361,26 @@ mod response_body_lengths {
assert_eq!(res.headers().get("content-length").unwrap(), "10");
assert_eq!(res.body().size_hint().exact(), Some(10));
}
+
+ #[tokio::test]
+ async fn http2_implicit_empty_size_hint() {
+ use http_body::Body;
+
+ let server = serve();
+ let addr_str = format!("http://{}", server.addr());
+ server.reply();
+
+ let client = Client::builder()
+ .http2_only(true)
+ .build_http::<hyper::Body>();
+ let uri = addr_str
+ .parse::<hyper::Uri>()
+ .expect("server addr should parse");
+
+ let res = client.get(uri).await.unwrap();
+ assert_eq!(res.headers().get("content-length"), None);
+ assert_eq!(res.body().size_hint().exact(), Some(0));
+ }
}
#[test]
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2701"
] |
0.14
|
1010614a0de032b43306ef9dc6da5a3e8ebf7a09
|
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -71,6 +71,7 @@ pub(super) enum Parse {
#[cfg(feature = "http1")]
VersionH2,
Uri,
+ UriTooLong,
Header(Header),
TooLarge,
Status,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -152,7 +153,10 @@ impl Error {
/// Returns true if this was an HTTP parse error caused by a message that was too large.
pub fn is_parse_too_large(&self) -> bool {
- matches!(self.inner.kind, Kind::Parse(Parse::TooLarge))
+ matches!(
+ self.inner.kind,
+ Kind::Parse(Parse::TooLarge) | Kind::Parse(Parse::UriTooLong)
+ )
}
/// Returns true if this was an HTTP parse error caused by an invalid response status code or
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -398,6 +402,7 @@ impl Error {
#[cfg(feature = "http1")]
Kind::Parse(Parse::VersionH2) => "invalid HTTP version parsed (found HTTP2 preface)",
Kind::Parse(Parse::Uri) => "invalid URI",
+ Kind::Parse(Parse::UriTooLong) => "URI too long",
Kind::Parse(Parse::Header(Header::Token)) => "invalid HTTP header parsed",
#[cfg(feature = "http1")]
Kind::Parse(Parse::Header(Header::ContentLengthInvalid)) => {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -25,6 +25,7 @@ use crate::proto::{BodyLength, MessageHead, RequestHead, RequestLine};
const MAX_HEADERS: usize = 100;
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
+const MAX_URI_LEN: usize = (u16::MAX - 1) as usize;
macro_rules! header_name {
($bytes:expr) => {{
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -148,9 +149,13 @@ impl Http1Transaction for Server {
Ok(httparse::Status::Complete(parsed_len)) => {
trace!("Request.parse Complete({})", parsed_len);
len = parsed_len;
+ let uri = req.path.unwrap();
+ if uri.len() > MAX_URI_LEN {
+ return Err(Parse::UriTooLong);
+ }
subject = RequestLine(
Method::from_bytes(req.method.unwrap().as_bytes())?,
- req.path.unwrap().parse()?,
+ uri.parse()?,
);
version = if req.version.unwrap() == 1 {
keep_alive = true;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -408,6 +413,7 @@ impl Http1Transaction for Server {
| Kind::Parse(Parse::Uri)
| Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST,
Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE,
+ Kind::Parse(Parse::UriTooLong) => StatusCode::URI_TOO_LONG,
_ => return None,
};
|
Good catch! We could add a check for the length just before calling `Uri::from_shared`, and if too long, return a `Parse` error. An integration test in `tests/server.rs` would be good too.
|
2021-11-23T03:58:28Z
| 2,706
|
When the URL contains more than 65534 characters, the error code returned by Hyper is not 414.
The length of the from_shared method of the third-party library http::Uri is verified, The maximum length is (u16::MAX - 1).
|
hyperium__hyper-2706
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1025,6 +1025,23 @@ fn http_10_request_receives_http_10_response() {
assert_eq!(s(&buf[..expected.len()]), expected);
}
+#[test]
+fn http_11_uri_too_long() {
+ let server = serve();
+
+ let long_path = "a".repeat(65534);
+ let request_line = format!("GET /{} HTTP/1.1\r\n\r\n", long_path);
+
+ let mut req = connect(server.addr());
+ req.write_all(request_line.as_bytes()).unwrap();
+
+ let expected = "HTTP/1.1 414 URI Too Long\r\ncontent-length: 0\r\n";
+ let mut buf = [0; 256];
+ let n = req.read(&mut buf).unwrap();
+ assert!(n >= expected.len(), "read: {:?} >= {:?}", n, expected.len());
+ assert_eq!(s(&buf[..expected.len()]), expected);
+}
+
#[tokio::test]
async fn disable_keep_alive_mid_request() {
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2676"
] |
0.14
|
b5022f3854d1f9ed3e76233dd63f319efc3b8f47
|
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -1022,6 +1022,23 @@ impl Builder {
self
}
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
+ self.conn_builder.http1_writev(enabled);
+ self
+ }
+
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -153,6 +153,7 @@ pub struct Builder {
pub(super) exec: Exec,
h09_responses: bool,
h1_parser_config: ParserConfig,
+ h1_writev: Option<bool>,
h1_title_case_headers: bool,
h1_preserve_header_case: bool,
h1_read_buf_exact_size: Option<usize>,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -535,6 +536,7 @@ impl Builder {
Builder {
exec: Exec::Default,
h09_responses: false,
+ h1_writev: None,
h1_read_buf_exact_size: None,
h1_parser_config: Default::default(),
h1_title_case_headers: false,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -596,6 +598,23 @@ impl Builder {
self
}
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_writev = Some(enabled);
+ self
+ }
+
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -837,6 +856,13 @@ impl Builder {
Proto::Http1 => {
let mut conn = proto::Conn::new(io);
conn.set_h1_parser_config(opts.h1_parser_config);
+ if let Some(writev) = opts.h1_writev {
+ if writev {
+ conn.set_write_strategy_queue();
+ } else {
+ conn.set_write_strategy_flatten();
+ }
+ }
if opts.h1_title_case_headers {
conn.set_title_case_headers();
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -85,6 +84,10 @@ where
self.io.set_read_buf_exact_size(sz);
}
+ pub(crate) fn set_write_strategy_flatten(&mut self) {
+ self.io.set_write_strategy_flatten();
+ }
+
#[cfg(feature = "client")]
pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) {
self.state.h1_parser_config = parser_config;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -520,7 +521,6 @@ impl<B> WriteBuf<B>
where
B: Buf,
{
- #[cfg(feature = "server")]
fn set_strategy(&mut self, strategy: WriteStrategy) {
self.strategy = strategy;
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -103,6 +103,7 @@ pub struct Http<E = Exec> {
h1_keep_alive: bool,
h1_title_case_headers: bool,
h1_preserve_header_case: bool,
+ h1_writev: Option<bool>,
#[cfg(feature = "http2")]
h2_builder: proto::h2::server::Config,
mode: ConnectionMode,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -284,6 +285,7 @@ impl Http {
h1_keep_alive: true,
h1_title_case_headers: false,
h1_preserve_header_case: false,
+ h1_writev: None,
#[cfg(feature = "http2")]
h2_builder: Default::default(),
mode: ConnectionMode::default(),
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -363,6 +365,26 @@ impl<E> Http<E> {
self
}
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ #[inline]
+ #[cfg(feature = "http1")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
+ pub fn http1_writev(&mut self, val: bool) -> &mut Self {
+ self.h1_writev = Some(val);
+ self
+ }
+
/// Sets whether HTTP2 is required.
///
/// Default is false
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -538,6 +560,7 @@ impl<E> Http<E> {
h1_keep_alive: self.h1_keep_alive,
h1_title_case_headers: self.h1_title_case_headers,
h1_preserve_header_case: self.h1_preserve_header_case,
+ h1_writev: self.h1_writev,
#[cfg(feature = "http2")]
h2_builder: self.h2_builder,
mode: self.mode,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -599,6 +622,13 @@ impl<E> Http<E> {
if self.h1_preserve_header_case {
conn.set_preserve_header_case();
}
+ if let Some(writev) = self.h1_writev {
+ if writev {
+ conn.set_write_strategy_queue();
+ } else {
+ conn.set_write_strategy_flatten();
+ }
+ }
conn.set_flush_pipeline(self.pipeline_flush);
if let Some(max) = self.max_buf_size {
conn.set_max_buf_size(max);
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -258,6 +258,24 @@ impl<I, E> Builder<I, E> {
self
}
+ /// Set whether HTTP/1 connections should try to use vectored writes,
+ /// or always flatten into a single buffer.
+ ///
+ /// Note that setting this to false may mean more copies of body data,
+ /// but may also improve performance when an IO transport doesn't
+ /// support vectored writes well, such as most TLS implementations.
+ ///
+ /// Setting this to true will force hyper to use queued strategy
+ /// which may eliminate unnecessary cloning on some TLS backends
+ ///
+ /// Default is `auto`. In this mode hyper will try to guess which
+ /// mode to use
+ #[cfg(feature = "http1")]
+ pub fn http1_writev(mut self, enabled: bool) -> Self {
+ self.protocol.http1_writev(enabled);
+ self
+ }
+
/// Set whether HTTP/1 connections will write header names as title case at
/// the socket level.
///
|
2021-10-27T12:13:00Z
| 2,677
|
Provide a way to force the queue write strategy, again
Once upon a time, there was [a way](https://github.com/hyperium/hyper/issues/2282) to force Hyper to use the queue write strategy.
This functionality was then [removed](https://github.com/hyperium/hyper/pull/2338) from Hyper 0.14.0.
Unfortunately, the heuristic to decide whether to use flatten or queue strategy has its limits. In my case, I need to buffer the output of an egress `HttpBody` implementation until a few MBs, and then I pass that big buffer to Hyper to write it over a h1 stream. When I pass it, Hyper is using the flatten strategy, so [it ends up copying the entire big buffer again](https://github.com/hyperium/hyper/blob/b5022f3854d1f9ed3e76233dd63f319efc3b8f47/src/proto/h1/io.rs#L548).
I'll make a PR to restore the builder setter to force queue strategy.
|
hyperium__hyper-2677
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -71,7 +71,6 @@ where
self.io.set_flush_pipeline(enabled);
}
- #[cfg(test)]
pub(crate) fn set_write_strategy_queue(&mut self) {
self.io.set_write_strategy_queue();
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -97,16 +97,17 @@ where
self.read_buf_strategy = ReadStrategy::Exact(sz);
}
- #[cfg(feature = "server")]
- fn set_write_strategy_flatten(&mut self) {
+ pub(crate) fn set_write_strategy_flatten(&mut self) {
// this should always be called only at construction time,
// so this assert is here to catch myself
debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
self.write_buf.set_strategy(WriteStrategy::Flatten);
}
- #[cfg(test)]
pub(crate) fn set_write_strategy_queue(&mut self) {
+ // this should always be called only at construction time,
+ // so this assert is here to catch myself
+ debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
self.write_buf.set_strategy(WriteStrategy::Queue);
}
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2457",
"2661"
] |
0.14
|
d0b1d9ed3a10013ab356bc7d9b283e179857a672
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -100,6 +100,7 @@ stream = []
runtime = [
"tcp",
"tokio/rt",
+ "tokio/time",
]
tcp = [
"socket2",
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -44,6 +44,9 @@ pub(super) enum Kind {
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
Accept,
+ /// User took too long to send headers
+ #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
+ HeaderTimeout,
/// Error while reading a body from connection.
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
Body,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -310,6 +313,11 @@ impl Error {
Error::new_user(User::UnexpectedHeader)
}
+ #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
+ pub(super) fn new_header_timeout() -> Error {
+ Error::new(Kind::HeaderTimeout)
+ }
+
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(super) fn new_user_unsupported_version() -> Error {
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -419,6 +427,8 @@ impl Error {
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
Kind::Accept => "error accepting connection",
+ #[cfg(all(feature = "http1", feature = "server", feature = "runtime"))]
+ Kind::HeaderTimeout => "read header from client timeout",
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
Kind::Body => "error reading a body from connection",
#[cfg(any(feature = "http1", feature = "http2"))]
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1,12 +1,15 @@
use std::fmt;
use std::io;
use std::marker::PhantomData;
+use std::time::Duration;
use bytes::{Buf, Bytes};
use http::header::{HeaderValue, CONNECTION};
use http::{HeaderMap, Method, Version};
use httparse::ParserConfig;
use tokio::io::{AsyncRead, AsyncWrite};
+#[cfg(all(feature = "server", feature = "runtime"))]
+use tokio::time::Sleep;
use tracing::{debug, error, trace};
use super::io::Buffered;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -47,6 +50,12 @@ where
keep_alive: KA::Busy,
method: None,
h1_parser_config: ParserConfig::default(),
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout: None,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_fut: None,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_running: false,
preserve_header_case: false,
title_case_headers: false,
h09_responses: false,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -106,6 +115,11 @@ where
self.state.h09_responses = true;
}
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ pub(crate) fn set_http1_header_read_timeout(&mut self, val: Duration) {
+ self.state.h1_header_read_timeout = Some(val);
+ }
+
#[cfg(feature = "server")]
pub(crate) fn set_allow_half_close(&mut self) {
self.state.allow_half_close = true;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -178,6 +192,12 @@ where
cached_headers: &mut self.state.cached_headers,
req_method: &mut self.state.method,
h1_parser_config: self.state.h1_parser_config.clone(),
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout: self.state.h1_header_read_timeout,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_fut: &mut self.state.h1_header_read_timeout_fut,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running,
preserve_header_case: self.state.preserve_header_case,
h09_responses: self.state.h09_responses,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -798,6 +818,12 @@ struct State {
/// a body or not.
method: Option<Method>,
h1_parser_config: ParserConfig,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout: Option<Duration>,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_fut: Option<Pin<Box<Sleep>>>,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_running: bool,
preserve_header_case: bool,
title_case_headers: bool,
h09_responses: bool,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -3,10 +3,15 @@ use std::fmt;
use std::io::{self, IoSlice};
use std::marker::Unpin;
use std::mem::MaybeUninit;
+use std::future::Future;
+#[cfg(all(feature = "server", feature = "runtime"))]
+use std::time::Duration;
+#[cfg(all(feature = "server", feature = "runtime"))]
+use tokio::time::Instant;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
-use tracing::{debug, trace};
+use tracing::{debug, warn, trace};
use super::{Http1Transaction, ParseContext, ParsedMessage};
use crate::common::buf::BufList;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -181,6 +186,12 @@ where
cached_headers: parse_ctx.cached_headers,
req_method: parse_ctx.req_method,
h1_parser_config: parse_ctx.h1_parser_config.clone(),
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout: parse_ctx.h1_header_read_timeout,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_fut: parse_ctx.h1_header_read_timeout_fut,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running,
preserve_header_case: parse_ctx.preserve_header_case,
h09_responses: parse_ctx.h09_responses,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -191,6 +202,16 @@ where
)? {
Some(msg) => {
debug!("parsed {} headers", msg.head.headers.len());
+
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ {
+ *parse_ctx.h1_header_read_timeout_running = false;
+
+ if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut {
+ // Reset the timer in order to avoid woken up when the timeout finishes
+ h1_header_read_timeout_fut.as_mut().reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60));
+ }
+ }
return Poll::Ready(Ok(msg));
}
None => {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -199,6 +220,18 @@ where
debug!("max_buf_size ({}) reached, closing", max);
return Poll::Ready(Err(crate::Error::new_too_large()));
}
+
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ if *parse_ctx.h1_header_read_timeout_running {
+ if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut {
+ if Pin::new( h1_header_read_timeout_fut).poll(cx).is_ready() {
+ *parse_ctx.h1_header_read_timeout_running = false;
+
+ warn!("read header from client timeout");
+ return Poll::Ready(Err(crate::Error::new_header_timeout()))
+ }
+ }
+ }
}
}
if ready!(self.poll_read_from_io(cx)).map_err(crate::Error::new_io)? == 0 {
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -1,6 +1,11 @@
+use std::pin::Pin;
+use std::time::Duration;
+
use bytes::BytesMut;
use http::{HeaderMap, Method};
use httparse::ParserConfig;
+#[cfg(all(feature = "server", feature = "runtime"))]
+use tokio::time::Sleep;
use crate::body::DecodedLength;
use crate::proto::{BodyLength, MessageHead};
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -72,6 +77,12 @@ pub(crate) struct ParseContext<'a> {
cached_headers: &'a mut Option<HeaderMap>,
req_method: &'a mut Option<Method>,
h1_parser_config: ParserConfig,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout: Option<Duration>,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_fut: &'a mut Option<Pin<Box<Sleep>>>,
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ h1_header_read_timeout_running: &'a mut bool,
preserve_header_case: bool,
h09_responses: bool,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -69,6 +71,25 @@ where
let span = trace_span!("parse_headers");
let _s = span.enter();
+
+ #[cfg(all(feature = "server", feature = "runtime"))]
+ if !*ctx.h1_header_read_timeout_running {
+ if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout {
+ let deadline = Instant::now() + h1_header_read_timeout;
+
+ match ctx.h1_header_read_timeout_fut {
+ Some(h1_header_read_timeout_fut) => {
+ debug!("resetting h1 header read timeout timer");
+ h1_header_read_timeout_fut.as_mut().reset(deadline);
+ },
+ None => {
+ debug!("setting h1 header read timeout timer");
+ *ctx.h1_header_read_timeout_fut = Some(Box::pin(tokio::time::sleep_until(deadline)));
+ }
+ }
+ }
+}
+
T::parse(bytes, ctx)
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -50,7 +50,6 @@
use std::marker::PhantomData;
#[cfg(feature = "tcp")]
use std::net::SocketAddr;
-#[cfg(all(feature = "runtime", feature = "http2"))]
use std::time::Duration;
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -103,6 +102,8 @@ pub struct Http<E = Exec> {
h1_keep_alive: bool,
h1_title_case_headers: bool,
h1_preserve_header_case: bool,
+ #[cfg(all(feature = "http1", feature = "runtime"))]
+ h1_header_read_timeout: Option<Duration>,
h1_writev: Option<bool>,
#[cfg(feature = "http2")]
h2_builder: proto::h2::server::Config,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -285,6 +286,8 @@ impl Http {
h1_keep_alive: true,
h1_title_case_headers: false,
h1_preserve_header_case: false,
+ #[cfg(all(feature = "http1", feature = "runtime"))]
+ h1_header_read_timeout: None,
h1_writev: None,
#[cfg(feature = "http2")]
h2_builder: Default::default(),
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -372,6 +375,17 @@ impl<E> Http<E> {
self
}
+ /// Set a timeout for reading client request headers. If a client does not
+ /// transmit the entire header within this time, the connection is closed.
+ ///
+ /// Default is None.
+ #[cfg(all(feature = "http1", feature = "runtime"))]
+ #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))]
+ pub fn http1_header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self {
+ self.h1_header_read_timeout = Some(read_timeout);
+ self
+ }
+
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
///
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -567,6 +581,8 @@ impl<E> Http<E> {
h1_keep_alive: self.h1_keep_alive,
h1_title_case_headers: self.h1_title_case_headers,
h1_preserve_header_case: self.h1_preserve_header_case,
+ #[cfg(all(feature = "http1", feature = "runtime"))]
+ h1_header_read_timeout: self.h1_header_read_timeout,
h1_writev: self.h1_writev,
#[cfg(feature = "http2")]
h2_builder: self.h2_builder,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -629,6 +645,10 @@ impl<E> Http<E> {
if self.h1_preserve_header_case {
conn.set_preserve_header_case();
}
+ #[cfg(all(feature = "http1", feature = "runtime"))]
+ if let Some(header_read_timeout) = self.h1_header_read_timeout {
+ conn.set_http1_header_read_timeout(header_read_timeout);
+ }
if let Some(writev) = self.h1_writev {
if writev {
conn.set_write_strategy_queue();
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -1,7 +1,7 @@
use std::fmt;
#[cfg(feature = "tcp")]
use std::net::{SocketAddr, TcpListener as StdTcpListener};
-#[cfg(feature = "tcp")]
+#[cfg(any(feature = "tcp", feature = "http1"))]
use std::time::Duration;
#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))]
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -309,6 +309,17 @@ impl<I, E> Builder<I, E> {
self
}
+ /// Set a timeout for reading client request headers. If a client does not
+ /// transmit the entire header within this time, the connection is closed.
+ ///
+ /// Default is None.
+ #[cfg(all(feature = "http1", feature = "runtime"))]
+ #[cfg_attr(docsrs, doc(cfg(all(feature = "http1", feature = "runtime"))))]
+ pub fn http1_header_read_timeout(mut self, read_timeout: Duration) -> Self {
+ self.protocol.http1_header_read_timeout(read_timeout);
+ self
+ }
+
/// Sets whether HTTP/1 is required.
///
/// Default is `false`.
|
2021-10-26T09:05:32Z
| 2,675
|
Defends against slow HTTP attacks.
I think hyper needs to add capabilities such as client_header_timeout and client_body_timeout to defend against slow attacks.
The Nginx defense method is as follows:
https://www.nginx.com/blog/mitigating-ddos-attacks-with-nginx-and-nginx-plus/#client_header_timeout
feat(h1): add h1_header_read_timeout
The original MR cannot be opened. #2655
|
hyperium__hyper-2675
|
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -693,6 +726,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1,6 +1,8 @@
use std::fmt::{self, Write};
use std::mem::MaybeUninit;
+#[cfg(all(feature = "server", feature = "runtime"))]
+use tokio::time::Instant;
#[cfg(any(test, feature = "server", feature = "ffi"))]
use bytes::Bytes;
use bytes::BytesMut;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1428,6 +1449,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut method,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1455,6 +1479,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1477,6 +1504,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1497,6 +1527,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: true,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1519,6 +1552,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1545,6 +1581,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config,
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1568,6 +1607,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1586,6 +1628,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: true,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1625,6 +1670,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1645,6 +1693,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1874,6 +1925,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1894,6 +1948,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(m),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1914,6 +1971,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2411,6 +2471,9 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2495,6 +2558,9 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2535,6 +2601,9 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
+ h1_header_read_timeout: None,
+ h1_header_read_timeout_fut: &mut None,
+ h1_header_read_timeout_running: &mut false,
preserve_header_case: false,
h09_responses: false,
#[cfg(feature = "ffi")]
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1261,6 +1261,127 @@ fn header_name_too_long() {
assert!(s(&buf[..n]).starts_with("HTTP/1.1 431 Request Header Fields Too Large\r\n"));
}
+#[tokio::test]
+async fn header_read_timeout_slow_writes() {
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ thread::spawn(move || {
+ let mut tcp = connect(&addr);
+ tcp.write_all(
+ b"\
+ GET / HTTP/1.1\r\n\
+ ",
+ )
+ .expect("write 1");
+ thread::sleep(Duration::from_secs(3));
+ tcp.write_all(
+ b"\
+ Something: 1\r\n\
+ \r\n\
+ ",
+ )
+ .expect("write 2");
+ thread::sleep(Duration::from_secs(6));
+ tcp.write_all(
+ b"\
+ Works: 0\r\n\
+ ",
+ )
+ .expect_err("write 3");
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ let conn = Http::new()
+ .http1_header_read_timeout(Duration::from_secs(5))
+ .serve_connection(
+ socket,
+ service_fn(|_| {
+ let res = Response::builder()
+ .status(200)
+ .body(hyper::Body::empty())
+ .unwrap();
+ future::ready(Ok::<_, hyper::Error>(res))
+ }),
+ );
+ conn.without_shutdown().await.expect_err("header timeout");
+}
+
+#[tokio::test]
+async fn header_read_timeout_slow_writes_multiple_requests() {
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ thread::spawn(move || {
+ let mut tcp = connect(&addr);
+
+ tcp.write_all(
+ b"\
+ GET / HTTP/1.1\r\n\
+ ",
+ )
+ .expect("write 1");
+ thread::sleep(Duration::from_secs(3));
+ tcp.write_all(
+ b"\
+ Something: 1\r\n\
+ \r\n\
+ ",
+ )
+ .expect("write 2");
+
+ thread::sleep(Duration::from_secs(3));
+
+ tcp.write_all(
+ b"\
+ GET / HTTP/1.1\r\n\
+ ",
+ )
+ .expect("write 3");
+ thread::sleep(Duration::from_secs(3));
+ tcp.write_all(
+ b"\
+ Something: 1\r\n\
+ \r\n\
+ ",
+ )
+ .expect("write 4");
+
+ thread::sleep(Duration::from_secs(6));
+
+ tcp.write_all(
+ b"\
+ GET / HTTP/1.1\r\n\
+ Something: 1\r\n\
+ \r\n\
+ ",
+ )
+ .expect("write 5");
+ thread::sleep(Duration::from_secs(6));
+ tcp.write_all(
+ b"\
+ Works: 0\r\n\
+ ",
+ )
+ .expect_err("write 6");
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ let conn = Http::new()
+ .http1_header_read_timeout(Duration::from_secs(5))
+ .serve_connection(
+ socket,
+ service_fn(|_| {
+ let res = Response::builder()
+ .status(200)
+ .body(hyper::Body::empty())
+ .unwrap();
+ future::ready(Ok::<_, hyper::Error>(res))
+ }),
+ );
+ conn.without_shutdown().await.expect_err("header timeout");
+}
+
#[tokio::test]
async fn upgrades() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2643"
] |
0.14
|
e3ab409808a6aa06ebacaaa936cb926785913d24
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -461,7 +466,7 @@ where
}
}
match self.state.writing {
- Writing::Init => true,
+ Writing::Init => self.io.can_headers_buf(),
_ => false,
}
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -121,6 +126,15 @@ where
self.read_buf.capacity() - self.read_buf.len()
}
+ /// Return whether we can append to the headers buffer.
+ ///
+ /// Reasons we can't:
+ /// - The write buf is in queue mode, and some of the past body is still
+ /// needing to be flushed.
+ pub(crate) fn can_headers_buf(&self) -> bool {
+ !self.write_buf.queue.has_remaining()
+ }
+
pub(crate) fn headers_buf(&mut self) -> &mut Vec<u8> {
let buf = self.write_buf.headers_mut();
&mut buf.bytes
|
Thanks for the report, yep, definitely a bug! I'll take a look-see.
Huh, so I set up an example just as you did, but with hyper as the server, and instead of a panic, I see an IO error since the hyper server closed the connection after writing the 200 OK. I'm guessing actix does something a little different, and that's why we haven't noticed here. Looking further....
Yea, that was it. Found it. Cool, _cool_...
Oh, nice! Seems like you won't need it anymore, but here I set up a project to reproduce the bug: https://github.com/Niklas-Eckhoff/hyper-bug-report
|
2021-09-14T23:23:02Z
| 2,646
|
Panic via reqwest when sending large body in debug mode
**Version**
hyper-0.14.12
reqwest-0.11.4 (blocking)
**Platform**
Manjaro Linux x86_64
**Description**
When sending large requests **repeatedly too fast**, a panic happens in debug mode.
I tried this code in debug mode:
```
fn main() {
let client = reqwest::blocking::Client::new();
let body = "x".repeat(10_000_000);
loop {
match client
.post("http://localhost:8000")
.body(body.clone())
.send()
{
Ok(_) => println!("Success!"),
Err(e) => eprintln!("{}", e),
}
}
}
```
For the server, I used this actix-web script:
```
use actix_web::{post, App, HttpResponse, HttpServer, Responder};
use std::io;
#[post("/")]
async fn root() -> impl Responder {
HttpResponse::Ok()
}
#[actix_web::main]
async fn main() -> io::Result<()> {
HttpServer::new(|| App::new().service(root))
.bind("localhost:8000")?
.run()
.await
}
```
I expected to see this happen: "Success!" is repeatedly output
Instead, this happened: "Success!" is output once, then the following:
```
thread 'reqwest-internal-sync-runtime' panicked at 'assertion failed: !self.queue.has_remaining()', /home/<redacted>/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.12/src/proto/h1/io.rs:559:9
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
thread 'reqwest-internal-sync-runtime' panicked at 'dispatch dropped without returning error', /home/<redacted>/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.12/src/client/conn.rs:316:35
thread 'main' panicked at 'event loop thread panicked', /home/<redacted>/.cargo/registry/src/github.com-1ecc6299db9ec823/reqwest-0.11.4/src/blocking/client.rs:1028:5
```
Since [this here](https://github.com/hyperium/hyper/blob/master/src/client/conn.rs#L316-L317) states that if this panic is triggered there is a "definite bug", I thought this should be reported.
|
hyperium__hyper-2646
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -71,6 +71,11 @@ where
self.io.set_flush_pipeline(enabled);
}
+ #[cfg(test)]
+ pub(crate) fn set_write_strategy_queue(&mut self) {
+ self.io.set_write_strategy_queue();
+ }
+
pub(crate) fn set_max_buf_size(&mut self, max: usize) {
self.io.set_max_buf_size(max);
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -665,7 +665,6 @@ mod tests {
// Block at 0 for now, but we will release this response before
// the request is ready to write later...
- //let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\n\r\n".to_vec(), 0);
let (mut tx, rx) = crate::client::dispatch::channel();
let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -692,6 +691,34 @@ mod tests {
});
}
+ #[tokio::test]
+ async fn client_flushing_is_not_ready_for_next_request() {
+ let _ = pretty_env_logger::try_init();
+
+ let (io, _handle) = tokio_test::io::Builder::new()
+ .write(b"POST / HTTP/1.1\r\ncontent-length: 4\r\n\r\n")
+ .read(b"HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n")
+ .wait(std::time::Duration::from_secs(2))
+ .build_with_handle();
+
+ let (mut tx, rx) = crate::client::dispatch::channel();
+ let mut conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
+ conn.set_write_strategy_queue();
+
+ let dispatcher = Dispatcher::new(Client::new(rx), conn);
+ let _dispatcher = tokio::spawn(async move { dispatcher.await });
+
+ let req = crate::Request::builder()
+ .method("POST")
+ .body(crate::Body::from("reee"))
+ .unwrap();
+
+ let res = tx.try_send(req).unwrap().await.expect("response");
+ drop(res);
+
+ assert!(!tx.is_ready());
+ }
+
#[tokio::test]
async fn body_empty_chunks_ignored() {
let _ = pretty_env_logger::try_init();
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -98,13 +98,18 @@ where
}
#[cfg(feature = "server")]
- pub(crate) fn set_write_strategy_flatten(&mut self) {
+ fn set_write_strategy_flatten(&mut self) {
// this should always be called only at construction time,
// so this assert is here to catch myself
debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
self.write_buf.set_strategy(WriteStrategy::Flatten);
}
+ #[cfg(test)]
+ pub(crate) fn set_write_strategy_queue(&mut self) {
+ self.write_buf.set_strategy(WriteStrategy::Queue);
+ }
+
pub(crate) fn read_buf(&self) -> &[u8] {
self.read_buf.as_ref()
}
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2575"
] |
0.14
|
08b2138e4036c5ae3e4c6f5c85763d45fb869922
|
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -371,6 +371,17 @@ void hyper_clientconn_options_exec(struct hyper_clientconn_options *opts,
*/
enum hyper_code hyper_clientconn_options_http2(struct hyper_clientconn_options *opts, int enabled);
+/*
+ Set the whether to include a copy of the raw headers in responses
+ received on this connection.
+
+ Pass `0` to disable, `1` to enable.
+
+ If enabled, see `hyper_response_headers_raw()` for usage.
+ */
+enum hyper_code hyper_clientconn_options_headers_raw(struct hyper_clientconn_options *opts,
+ int enabled);
+
/*
Frees a `hyper_error`.
*/
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -475,6 +486,21 @@ const uint8_t *hyper_response_reason_phrase(const struct hyper_response *resp);
*/
size_t hyper_response_reason_phrase_len(const struct hyper_response *resp);
+/*
+ Get a reference to the full raw headers of this response.
+
+ You must have enabled `hyper_clientconn_options_headers_raw()`, or this
+ will return NULL.
+
+ The returned `hyper_buf *` is just a reference, owned by the response.
+ You need to make a copy if you wish to use it after freeing the
+ response.
+
+ The buffer is not null-terminated, see the `hyper_buf` functions for
+ getting the bytes and length.
+ */
+const struct hyper_buf *hyper_response_headers_raw(const struct hyper_response *resp);
+
/*
Get the HTTP version used by this response.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -147,6 +147,8 @@ pub struct Builder {
h1_preserve_header_case: bool,
h1_read_buf_exact_size: Option<usize>,
h1_max_buf_size: Option<usize>,
+ #[cfg(feature = "ffi")]
+ h1_headers_raw: bool,
#[cfg(feature = "http2")]
h2_builder: proto::h2::client::Config,
version: Proto,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -528,6 +530,8 @@ impl Builder {
h1_title_case_headers: false,
h1_preserve_header_case: false,
h1_max_buf_size: None,
+ #[cfg(feature = "ffi")]
+ h1_headers_raw: false,
#[cfg(feature = "http2")]
h2_builder: Default::default(),
#[cfg(feature = "http1")]
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -588,6 +592,12 @@ impl Builder {
self
}
+ #[cfg(feature = "ffi")]
+ pub(crate) fn http1_headers_raw(&mut self, enabled: bool) -> &mut Self {
+ self.h1_headers_raw = enabled;
+ self
+ }
+
/// Sets whether HTTP2 is required.
///
/// Default is false.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -773,6 +783,10 @@ impl Builder {
if opts.h09_responses {
conn.set_h09_responses();
}
+
+ #[cfg(feature = "ffi")]
+ conn.set_raw_headers(opts.h1_headers_raw);
+
if let Some(sz) = opts.h1_read_buf_exact_size {
conn.set_read_buf_exact_size(sz);
}
diff --git a/src/ffi/body.rs b/src/ffi/body.rs
--- a/src/ffi/body.rs
+++ b/src/ffi/body.rs
@@ -14,7 +14,7 @@ use crate::body::{Body, Bytes, HttpBody as _};
pub struct hyper_body(pub(super) Body);
/// A buffer of bytes that is sent or received on a `hyper_body`.
-pub struct hyper_buf(pub(super) Bytes);
+pub struct hyper_buf(pub(crate) Bytes);
pub(crate) struct UserBody {
data_func: hyper_body_data_callback,
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -159,3 +159,17 @@ ffi_fn! {
}
}
}
+
+ffi_fn! {
+ /// Set the whether to include a copy of the raw headers in responses
+ /// received on this connection.
+ ///
+ /// Pass `0` to disable, `1` to enable.
+ ///
+ /// If enabled, see `hyper_response_headers_raw()` for usage.
+ fn hyper_clientconn_options_headers_raw(opts: *mut hyper_clientconn_options, enabled: c_int) -> hyper_code {
+ let opts = unsafe { &mut *opts };
+ opts.builder.http1_headers_raw(enabled != 0);
+ hyper_code::HYPERE_OK
+ }
+}
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -2,7 +2,7 @@ use bytes::Bytes;
use libc::{c_int, size_t};
use std::ffi::c_void;
-use super::body::hyper_body;
+use super::body::{hyper_body, hyper_buf};
use super::error::hyper_code;
use super::task::{hyper_task_return_type, AsTaskType};
use super::HYPER_ITER_CONTINUE;
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -27,6 +27,8 @@ pub struct hyper_headers {
#[derive(Debug)]
pub(crate) struct ReasonPhrase(pub(crate) Bytes);
+pub(crate) struct RawHeaders(pub(crate) hyper_buf);
+
// ===== impl hyper_request =====
ffi_fn! {
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -178,6 +180,26 @@ ffi_fn! {
}
}
+ffi_fn! {
+ /// Get a reference to the full raw headers of this response.
+ ///
+ /// You must have enabled `hyper_clientconn_options_headers_raw()`, or this
+ /// will return NULL.
+ ///
+ /// The returned `hyper_buf *` is just a reference, owned by the response.
+ /// You need to make a copy if you wish to use it after freeing the
+ /// response.
+ ///
+ /// The buffer is not null-terminated, see the `hyper_buf` functions for
+ /// getting the bytes and length.
+ fn hyper_response_headers_raw(resp: *const hyper_response) -> *const hyper_buf {
+ match unsafe { &*resp }.0.extensions().get::<RawHeaders>() {
+ Some(raw) => &raw.0,
+ None => std::ptr::null(),
+ }
+ } ?= std::ptr::null()
+}
+
ffi_fn! {
/// Get the HTTP version used by this response.
///
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -49,6 +49,8 @@ where
preserve_header_case: false,
title_case_headers: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
notify_read: false,
reading: Reading::Init,
writing: Writing::Init,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -98,6 +100,11 @@ where
self.state.allow_half_close = true;
}
+ #[cfg(feature = "ffi")]
+ pub(crate) fn set_raw_headers(&mut self, enabled: bool) {
+ self.state.raw_headers = enabled;
+ }
+
pub(crate) fn into_inner(self) -> (I, Bytes) {
self.io.into_inner()
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -162,6 +169,8 @@ where
h1_parser_config: self.state.h1_parser_config.clone(),
preserve_header_case: self.state.preserve_header_case,
h09_responses: self.state.h09_responses,
+ #[cfg(feature = "ffi")]
+ raw_headers: self.state.raw_headers,
}
)) {
Ok(msg) => msg,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -766,6 +775,8 @@ struct State {
preserve_header_case: bool,
title_case_headers: bool,
h09_responses: bool,
+ #[cfg(feature = "ffi")]
+ raw_headers: bool,
/// Set to true when the Dispatcher should poll read operations
/// again. See the `maybe_notify` method for more.
notify_read: bool,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -167,6 +167,8 @@ where
h1_parser_config: parse_ctx.h1_parser_config.clone(),
preserve_header_case: parse_ctx.preserve_header_case,
h09_responses: parse_ctx.h09_responses,
+ #[cfg(feature = "ffi")]
+ raw_headers: parse_ctx.raw_headers,
},
)? {
Some(msg) => {
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -74,6 +74,8 @@ pub(crate) struct ParseContext<'a> {
h1_parser_config: ParserConfig,
preserve_header_case: bool,
h09_responses: bool,
+ #[cfg(feature = "ffi")]
+ raw_headers: bool,
}
/// Passed to Http1Transaction::encode
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -970,6 +970,11 @@ impl Http1Transaction for Client {
#[cfg(not(feature = "ffi"))]
drop(reason);
+ #[cfg(feature = "ffi")]
+ if ctx.raw_headers {
+ extensions.insert(crate::ffi::RawHeaders(crate::ffi::hyper_buf(slice)));
+ }
+
let head = MessageHead {
version,
subject: status,
|
2021-06-11T21:13:44Z
| 2,576
|
C API: Provide a way to get the raw response header bytes
What if... the C API provided an option to ask for the original header data, as a single unaltered buffer? It'd be cheap in hyper, just a ref count bump on that buffer, and in curl you wouldn't need to re-stitch together the headers into lines, or have a discrepancy of whether it was `\r\n` or just `\n`, and the order would what was sent.
It could just be an option set on either `hyper_request *` or `hyper_clientconn_options *` (perhaps the conn since you only need to set it once per connection). And then you could fetch it from a `hyper_response *`:
```c
struct hyper_buf *hyper_response_headers_raw(struct hyper_response *resp);
```
|
hyperium__hyper-2576
|
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -675,6 +677,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
assert!(buffered
.parse::<ClientTransaction>(cx, parse_ctx)
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1424,6 +1429,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.unwrap()
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1447,6 +1454,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
assert_eq!(raw.len(), 0);
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1465,6 +1474,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
Server::parse(&mut raw, ctx).unwrap_err();
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1481,6 +1492,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: true,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
assert_eq!(raw, H09_RESPONSE);
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1499,6 +1512,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
Client::parse(&mut raw, ctx).unwrap_err();
assert_eq!(raw, H09_RESPONSE);
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1521,6 +1536,8 @@ mod tests {
h1_parser_config,
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
assert_eq!(raw.len(), 0);
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1540,6 +1557,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
Client::parse(&mut raw, ctx).unwrap_err();
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1554,6 +1573,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: true,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
};
let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap();
let orig_headers = parsed_message
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1589,6 +1610,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1605,6 +1628,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.expect_err(comment)
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1820,6 +1845,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
}
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1836,6 +1863,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1852,6 +1881,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.expect_err("parse should err")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2335,6 +2366,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2415,6 +2448,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.unwrap()
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2451,6 +2486,8 @@ mod tests {
h1_parser_config: Default::default(),
preserve_header_case: false,
h09_responses: false,
+ #[cfg(feature = "ffi")]
+ raw_headers: false,
},
)
.unwrap()
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2532"
] |
0.14
|
684f2fa76d44fa2b1b063ad0443a1b0d16dfad0e
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -115,17 +111,23 @@ impl Http1Transaction for Server {
// but we *never* read any of it until after httparse has assigned
// values into it. By not zeroing out the stack memory, this saves
// a good ~5% on pipeline benchmarks.
- let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() };
+ let mut headers_indices: [MaybeUninit<HeaderIndices>; MAX_HEADERS] = unsafe {
+ // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit
+ MaybeUninit::uninit().assume_init()
+ };
{
- let mut headers: [httparse::Header<'_>; MAX_HEADERS] = unsafe { mem::uninitialized() };
+ /* SAFETY: it is safe to go from MaybeUninit array to array of MaybeUninit */
+ let mut headers: [MaybeUninit<httparse::Header<'_>>; MAX_HEADERS] = unsafe {
+ MaybeUninit::uninit().assume_init()
+ };
trace!(
"Request.parse([Header; {}], [u8; {}])",
headers.len(),
buf.len()
);
- let mut req = httparse::Request::new(&mut headers);
+ let mut req = httparse::Request::new(&mut []);
let bytes = buf.as_ref();
- match req.parse(bytes) {
+ match req.parse_with_uninit_headers(bytes, &mut headers) {
Ok(httparse::Status::Complete(parsed_len)) => {
trace!("Request.parse Complete({})", parsed_len);
len = parsed_len;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -194,6 +196,8 @@ impl Http1Transaction for Server {
headers.reserve(headers_len);
for header in &headers_indices[..headers_len] {
+ // SAFETY: array is valid up to `headers_len`
+ let header = unsafe { &*header.as_ptr() };
let name = header_name!(&slice[header.name.0..header.name.1]);
let value = header_value!(slice.slice(header.value.0..header.value.1));
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -867,18 +871,24 @@ impl Http1Transaction for Client {
// Loop to skip information status code headers (100 Continue, etc).
loop {
// Unsafe: see comment in Server Http1Transaction, above.
- let mut headers_indices: [HeaderIndices; MAX_HEADERS] = unsafe { mem::uninitialized() };
+ let mut headers_indices: [MaybeUninit<HeaderIndices>; MAX_HEADERS] = unsafe {
+ // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit
+ MaybeUninit::uninit().assume_init()
+ };
let (len, status, reason, version, headers_len) = {
- let mut headers: [httparse::Header<'_>; MAX_HEADERS] =
- unsafe { mem::uninitialized() };
+ // SAFETY: We can go safely from MaybeUninit array to array of MaybeUninit
+ let mut headers: [MaybeUninit<httparse::Header<'_>>; MAX_HEADERS] =
+ unsafe { MaybeUninit::uninit().assume_init() };
trace!(
"Response.parse([Header; {}], [u8; {}])",
headers.len(),
buf.len()
);
- let mut res = httparse::Response::new(&mut headers);
+ let mut res = httparse::Response::new(&mut []);
let bytes = buf.as_ref();
- match ctx.h1_parser_config.parse_response(&mut res, bytes) {
+ match ctx.h1_parser_config
+ .parse_response_with_uninit_headers(&mut res, bytes, &mut headers)
+ {
Ok(httparse::Status::Complete(len)) => {
trace!("Response.parse Complete({})", len);
let status = StatusCode::from_u16(res.code.unwrap())?;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -934,6 +944,8 @@ impl Http1Transaction for Client {
headers.reserve(headers_len);
for header in &headers_indices[..headers_len] {
+ // SAFETY: array is valid up to `headers_len`
+ let header = unsafe { &*header.as_ptr() };
let name = header_name!(&slice[header.name.0..header.name.1]);
let value = header_value!(slice.slice(header.value.0..header.value.1));
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1288,7 +1300,7 @@ struct HeaderIndices {
fn record_header_indices(
bytes: &[u8],
headers: &[httparse::Header<'_>],
- indices: &mut [HeaderIndices],
+ indices: &mut [MaybeUninit<HeaderIndices>],
) -> Result<(), crate::error::Parse> {
let bytes_ptr = bytes.as_ptr() as usize;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1299,10 +1311,19 @@ fn record_header_indices(
}
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
- indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
- indices.value = (value_start, value_end);
+
+ // FIXME(maybe_uninit_extra)
+ // FIXME(addr_of)
+ // Currently we don't have `ptr::addr_of_mut` in stable rust or
+ // MaybeUninit::write, so this is some way of assigning into a MaybeUninit
+ // safely
+ let new_header_indices = HeaderIndices {
+ name: (name_start, name_end),
+ value: (value_start, value_end),
+ };
+ *indices = MaybeUninit::new(new_header_indices);
}
Ok(())
|
Good call!
Refactored the code.
|
2021-05-11T12:01:41Z
| 2,545
|
Replace usage of `mem::uninitialized` with `MaybeUninit`
When the following comment was added in #1845, hyper's minimum supported rust version was `1.27.0`
https://github.com/hyperium/hyper/blob/4e9a006498c7bdb5bb2ccb76a4c877f6da7e23b2/src/proto/h1/role.rs#L1-L3
which I assume is why [`std::mem::MaybeUninit`](https://doc.rust-lang.org/std/mem/union.MaybeUninit.html), stabilized in `1.36.0`, could not be used yet. Today, hyper's MSRV is `1.46.0`, so it should now be possible to investigate removing the `#![allow(deprecated)]`.
|
hyperium__hyper-2545
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1,9 +1,5 @@
-// `mem::uninitialized` replaced with `mem::MaybeUninit`,
-// can't upgrade yet
-#![allow(deprecated)]
-
use std::fmt::{self, Write};
-use std::mem;
+use std::mem::{self, MaybeUninit};
#[cfg(any(test, feature = "server", feature = "ffi"))]
use bytes::Bytes;
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2508"
] |
0.14
|
be9677a1e782d33c4402772e0fc4ef0a4c49d507
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -31,7 +31,7 @@ http = "0.2"
http-body = "0.4"
httpdate = "1.0"
httparse = "1.4"
-h2 = { version = "0.3", optional = true }
+h2 = { version = "0.3.3", optional = true }
itoa = "0.4.1"
tracing = { version = "0.1", default-features = false, features = ["std"] }
pin-project = "1.0"
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -254,12 +254,9 @@ where
absolute_form(req.uri_mut());
} else {
origin_form(req.uri_mut());
- };
+ }
} else if req.method() == Method::CONNECT {
- debug!("client does not support CONNECT requests over HTTP2");
- return Err(ClientError::Normal(
- crate::Error::new_user_unsupported_request_method(),
- ));
+ authority_form(req.uri_mut());
}
let fut = pooled
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -90,7 +90,7 @@ pub(super) enum User {
/// User tried to send a certain header in an unexpected context.
///
/// For example, sending both `content-length` and `transfer-encoding`.
- #[cfg(feature = "http1")]
+ #[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
UnexpectedHeader,
/// User tried to create a Request with bad version.
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -290,7 +290,7 @@ impl Error {
Error::new(Kind::User(user))
}
- #[cfg(feature = "http1")]
+ #[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
pub(super) fn new_user_header() -> Error {
Error::new_user(User::UnexpectedHeader)
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -405,7 +405,7 @@ impl Error {
Kind::User(User::MakeService) => "error from user's MakeService",
#[cfg(any(feature = "http1", feature = "http2"))]
Kind::User(User::Service) => "error from user's Service",
- #[cfg(feature = "http1")]
+ #[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
Kind::User(User::UnexpectedHeader) => "user sent unexpected header",
#[cfg(any(feature = "http1", feature = "http2"))]
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -2,17 +2,21 @@ use std::error::Error as StdError;
#[cfg(feature = "runtime")]
use std::time::Duration;
+use bytes::Bytes;
use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _};
use futures_util::stream::StreamExt as _;
use h2::client::{Builder, SendRequest};
+use http::{Method, StatusCode};
use tokio::io::{AsyncRead, AsyncWrite};
-use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
+use super::{ping, H2Upgraded, PipeToSendStream, SendBuf};
use crate::body::HttpBody;
use crate::common::{exec::Exec, task, Future, Never, Pin, Poll};
use crate::headers;
+use crate::proto::h2::UpgradedSendStream;
use crate::proto::Dispatched;
+use crate::upgrade::Upgraded;
use crate::{Body, Request, Response};
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -233,8 +237,25 @@ where
headers::set_content_length_if_missing(req.headers_mut(), len);
}
}
+
+ let is_connect = req.method() == Method::CONNECT;
let eos = body.is_end_stream();
- let (fut, body_tx) = match self.h2_tx.send_request(req, eos) {
+ let ping = self.ping.clone();
+
+ if is_connect {
+ if headers::content_length_parse_all(req.headers())
+ .map_or(false, |len| len != 0)
+ {
+ warn!("h2 connect request with non-zero body not supported");
+ cb.send(Err((
+ crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
+ None,
+ )));
+ continue;
+ }
+ }
+
+ let (fut, body_tx) = match self.h2_tx.send_request(req, !is_connect && eos) {
Ok(ok) => ok,
Err(err) => {
debug!("client send request error: {}", err);
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -243,45 +264,81 @@ where
}
};
- let ping = self.ping.clone();
- if !eos {
- let mut pipe = Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| {
- if let Err(e) = res {
- debug!("client request body error: {}", e);
- }
- });
-
- // eagerly see if the body pipe is ready and
- // can thus skip allocating in the executor
- match Pin::new(&mut pipe).poll(cx) {
- Poll::Ready(_) => (),
- Poll::Pending => {
- let conn_drop_ref = self.conn_drop_ref.clone();
- // keep the ping recorder's knowledge of an
- // "open stream" alive while this body is
- // still sending...
- let ping = ping.clone();
- let pipe = pipe.map(move |x| {
- drop(conn_drop_ref);
- drop(ping);
- x
+ let send_stream = if !is_connect {
+ if !eos {
+ let mut pipe =
+ Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| {
+ if let Err(e) = res {
+ debug!("client request body error: {}", e);
+ }
});
- self.executor.execute(pipe);
+
+ // eagerly see if the body pipe is ready and
+ // can thus skip allocating in the executor
+ match Pin::new(&mut pipe).poll(cx) {
+ Poll::Ready(_) => (),
+ Poll::Pending => {
+ let conn_drop_ref = self.conn_drop_ref.clone();
+ // keep the ping recorder's knowledge of an
+ // "open stream" alive while this body is
+ // still sending...
+ let ping = ping.clone();
+ let pipe = pipe.map(move |x| {
+ drop(conn_drop_ref);
+ drop(ping);
+ x
+ });
+ self.executor.execute(pipe);
+ }
}
}
- }
+
+ None
+ } else {
+ Some(body_tx)
+ };
let fut = fut.map(move |result| match result {
Ok(res) => {
// record that we got the response headers
ping.record_non_data();
- let content_length = decode_content_length(res.headers());
- let res = res.map(|stream| {
- let ping = ping.for_stream(&stream);
- crate::Body::h2(stream, content_length, ping)
- });
- Ok(res)
+ let content_length = headers::content_length_parse_all(res.headers());
+ if let (Some(mut send_stream), StatusCode::OK) =
+ (send_stream, res.status())
+ {
+ if content_length.map_or(false, |len| len != 0) {
+ warn!("h2 connect response with non-zero body not supported");
+
+ send_stream.send_reset(h2::Reason::INTERNAL_ERROR);
+ return Err((
+ crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()),
+ None,
+ ));
+ }
+ let (parts, recv_stream) = res.into_parts();
+ let mut res = Response::from_parts(parts, Body::empty());
+
+ let (pending, on_upgrade) = crate::upgrade::pending();
+ let io = H2Upgraded {
+ ping,
+ send_stream: unsafe { UpgradedSendStream::new(send_stream) },
+ recv_stream,
+ buf: Bytes::new(),
+ };
+ let upgraded = Upgraded::new(io, Bytes::new());
+
+ pending.fulfill(upgraded);
+ res.extensions_mut().insert(on_upgrade);
+
+ Ok(res)
+ } else {
+ let res = res.map(|stream| {
+ let ping = ping.for_stream(&stream);
+ crate::Body::h2(stream, content_length.into(), ping)
+ });
+ Ok(res)
+ }
}
Err(err) => {
ping.ensure_not_timed_out().map_err(|e| (e, None))?;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -1,5 +1,5 @@
-use bytes::Buf;
-use h2::SendStream;
+use bytes::{Buf, Bytes};
+use h2::{RecvStream, SendStream};
use http::header::{
HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER,
TRANSFER_ENCODING, UPGRADE,
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -7,11 +7,14 @@ use http::header::{
use http::HeaderMap;
use pin_project::pin_project;
use std::error::Error as StdError;
-use std::io::IoSlice;
+use std::io::{self, Cursor, IoSlice};
+use std::mem;
+use std::task::Context;
+use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
-use crate::body::{DecodedLength, HttpBody};
+use crate::body::HttpBody;
use crate::common::{task, Future, Pin, Poll};
-use crate::headers::content_length_parse_all;
+use crate::proto::h2::ping::Recorder;
pub(crate) mod ping;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -83,15 +86,6 @@ fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
}
}
-fn decode_content_length(headers: &HeaderMap) -> DecodedLength {
- if let Some(len) = content_length_parse_all(headers) {
- // If the length is u64::MAX, oh well, just reported chunked.
- DecodedLength::checked_new(len).unwrap_or_else(|_| DecodedLength::CHUNKED)
- } else {
- DecodedLength::CHUNKED
- }
-}
-
// body adapters used by both Client and Server
#[pin_project]
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -172,7 +166,7 @@ where
is_eos,
);
- let buf = SendBuf(Some(chunk));
+ let buf = SendBuf::Buf(chunk);
me.body_tx
.send_data(buf, is_eos)
.map_err(crate::Error::new_body_write)?;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -243,32 +237,202 @@ impl<B: Buf> SendStreamExt for SendStream<SendBuf<B>> {
fn send_eos_frame(&mut self) -> crate::Result<()> {
trace!("send body eos");
- self.send_data(SendBuf(None), true)
+ self.send_data(SendBuf::None, true)
.map_err(crate::Error::new_body_write)
}
}
-struct SendBuf<B>(Option<B>);
+#[repr(usize)]
+enum SendBuf<B> {
+ Buf(B),
+ Cursor(Cursor<Box<[u8]>>),
+ None,
+}
impl<B: Buf> Buf for SendBuf<B> {
#[inline]
fn remaining(&self) -> usize {
- self.0.as_ref().map(|b| b.remaining()).unwrap_or(0)
+ match *self {
+ Self::Buf(ref b) => b.remaining(),
+ Self::Cursor(ref c) => c.remaining(),
+ Self::None => 0,
+ }
}
#[inline]
fn chunk(&self) -> &[u8] {
- self.0.as_ref().map(|b| b.chunk()).unwrap_or(&[])
+ match *self {
+ Self::Buf(ref b) => b.chunk(),
+ Self::Cursor(ref c) => c.chunk(),
+ Self::None => &[],
+ }
}
#[inline]
fn advance(&mut self, cnt: usize) {
- if let Some(b) = self.0.as_mut() {
- b.advance(cnt)
+ match *self {
+ Self::Buf(ref mut b) => b.advance(cnt),
+ Self::Cursor(ref mut c) => c.advance(cnt),
+ Self::None => {}
}
}
fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
- self.0.as_ref().map(|b| b.chunks_vectored(dst)).unwrap_or(0)
+ match *self {
+ Self::Buf(ref b) => b.chunks_vectored(dst),
+ Self::Cursor(ref c) => c.chunks_vectored(dst),
+ Self::None => 0,
+ }
+ }
+}
+
+struct H2Upgraded<B>
+where
+ B: Buf,
+{
+ ping: Recorder,
+ send_stream: UpgradedSendStream<B>,
+ recv_stream: RecvStream,
+ buf: Bytes,
+}
+
+impl<B> AsyncRead for H2Upgraded<B>
+where
+ B: Buf,
+{
+ fn poll_read(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ read_buf: &mut ReadBuf<'_>,
+ ) -> Poll<Result<(), io::Error>> {
+ if self.buf.is_empty() {
+ self.buf = loop {
+ match ready!(self.recv_stream.poll_data(cx)) {
+ None => return Poll::Ready(Ok(())),
+ Some(Ok(buf)) if buf.is_empty() && !self.recv_stream.is_end_stream() => {
+ continue
+ }
+ Some(Ok(buf)) => {
+ self.ping.record_data(buf.len());
+ break buf;
+ }
+ Some(Err(e)) => {
+ return Poll::Ready(Err(h2_to_io_error(e)));
+ }
+ }
+ };
+ }
+ let cnt = std::cmp::min(self.buf.len(), read_buf.remaining());
+ read_buf.put_slice(&self.buf[..cnt]);
+ self.buf.advance(cnt);
+ let _ = self.recv_stream.flow_control().release_capacity(cnt);
+ Poll::Ready(Ok(()))
+ }
+}
+
+impl<B> AsyncWrite for H2Upgraded<B>
+where
+ B: Buf,
+{
+ fn poll_write(
+ mut self: Pin<&mut Self>,
+ cx: &mut Context<'_>,
+ buf: &[u8],
+ ) -> Poll<Result<usize, io::Error>> {
+ if let Poll::Ready(reset) = self.send_stream.poll_reset(cx) {
+ return Poll::Ready(Err(h2_to_io_error(match reset {
+ Ok(reason) => reason.into(),
+ Err(e) => e,
+ })));
+ }
+ if buf.is_empty() {
+ return Poll::Ready(Ok(0));
+ }
+ self.send_stream.reserve_capacity(buf.len());
+ Poll::Ready(match ready!(self.send_stream.poll_capacity(cx)) {
+ None => Ok(0),
+ Some(Ok(cnt)) => self.send_stream.write(&buf[..cnt], false).map(|()| cnt),
+ Some(Err(e)) => Err(h2_to_io_error(e)),
+ })
+ }
+
+ fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn poll_shutdown(
+ mut self: Pin<&mut Self>,
+ _cx: &mut Context<'_>,
+ ) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(self.send_stream.write(&[], true))
+ }
+}
+
+fn h2_to_io_error(e: h2::Error) -> io::Error {
+ if e.is_io() {
+ e.into_io().unwrap()
+ } else {
+ io::Error::new(io::ErrorKind::Other, e)
+ }
+}
+
+struct UpgradedSendStream<B>(SendStream<SendBuf<Neutered<B>>>);
+
+impl<B> UpgradedSendStream<B>
+where
+ B: Buf,
+{
+ unsafe fn new(inner: SendStream<SendBuf<B>>) -> Self {
+ assert_eq!(mem::size_of::<B>(), mem::size_of::<Neutered<B>>());
+ Self(mem::transmute(inner))
+ }
+
+ fn reserve_capacity(&mut self, cnt: usize) {
+ unsafe { self.as_inner_unchecked().reserve_capacity(cnt) }
+ }
+
+ fn poll_capacity(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<usize, h2::Error>>> {
+ unsafe { self.as_inner_unchecked().poll_capacity(cx) }
+ }
+
+ fn poll_reset(&mut self, cx: &mut Context<'_>) -> Poll<Result<h2::Reason, h2::Error>> {
+ unsafe { self.as_inner_unchecked().poll_reset(cx) }
+ }
+
+ fn write(&mut self, buf: &[u8], end_of_stream: bool) -> Result<(), io::Error> {
+ let send_buf = SendBuf::Cursor(Cursor::new(buf.into()));
+ unsafe {
+ self.as_inner_unchecked()
+ .send_data(send_buf, end_of_stream)
+ .map_err(h2_to_io_error)
+ }
+ }
+
+ unsafe fn as_inner_unchecked(&mut self) -> &mut SendStream<SendBuf<B>> {
+ &mut *(&mut self.0 as *mut _ as *mut _)
+ }
+}
+
+#[repr(transparent)]
+struct Neutered<B> {
+ _inner: B,
+ impossible: Impossible,
+}
+
+enum Impossible {}
+
+unsafe impl<B> Send for Neutered<B> {}
+
+impl<B> Buf for Neutered<B> {
+ fn remaining(&self) -> usize {
+ match self.impossible {}
+ }
+
+ fn chunk(&self) -> &[u8] {
+ match self.impossible {}
+ }
+
+ fn advance(&mut self, _cnt: usize) {
+ match self.impossible {}
}
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -3,19 +3,24 @@ use std::marker::Unpin;
#[cfg(feature = "runtime")]
use std::time::Duration;
+use bytes::Bytes;
use h2::server::{Connection, Handshake, SendResponse};
-use h2::Reason;
+use h2::{Reason, RecvStream};
+use http::{Method, Request};
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
-use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
+use super::{ping, PipeToSendStream, SendBuf};
use crate::body::HttpBody;
use crate::common::exec::ConnStreamExec;
use crate::common::{date, task, Future, Pin, Poll};
use crate::headers;
+use crate::proto::h2::ping::Recorder;
+use crate::proto::h2::{H2Upgraded, UpgradedSendStream};
use crate::proto::Dispatched;
use crate::service::HttpService;
+use crate::upgrade::{OnUpgrade, Pending, Upgraded};
use crate::{Body, Response};
// Our defaults are chosen for the "majority" case, which usually are not
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -255,9 +260,9 @@ where
// When the service is ready, accepts an incoming request.
match ready!(self.conn.poll_accept(cx)) {
- Some(Ok((req, respond))) => {
+ Some(Ok((req, mut respond))) => {
trace!("incoming request");
- let content_length = decode_content_length(req.headers());
+ let content_length = headers::content_length_parse_all(req.headers());
let ping = self
.ping
.as_ref()
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -267,8 +272,36 @@ where
// Record the headers received
ping.record_non_data();
- let req = req.map(|stream| crate::Body::h2(stream, content_length, ping));
- let fut = H2Stream::new(service.call(req), respond);
+ let is_connect = req.method() == Method::CONNECT;
+ let (mut parts, stream) = req.into_parts();
+ let (req, connect_parts) = if !is_connect {
+ (
+ Request::from_parts(
+ parts,
+ crate::Body::h2(stream, content_length.into(), ping),
+ ),
+ None,
+ )
+ } else {
+ if content_length.map_or(false, |len| len != 0) {
+ warn!("h2 connect request with non-zero body not supported");
+ respond.send_reset(h2::Reason::INTERNAL_ERROR);
+ return Poll::Ready(Ok(()));
+ }
+ let (pending, upgrade) = crate::upgrade::pending();
+ debug_assert!(parts.extensions.get::<OnUpgrade>().is_none());
+ parts.extensions.insert(upgrade);
+ (
+ Request::from_parts(parts, crate::Body::empty()),
+ Some(ConnectParts {
+ pending,
+ ping,
+ recv_stream: stream,
+ }),
+ )
+ };
+
+ let fut = H2Stream::new(service.call(req), connect_parts, respond);
exec.execute_h2stream(fut);
}
Some(Err(e)) => {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -331,18 +364,28 @@ enum H2StreamState<F, B>
where
B: HttpBody,
{
- Service(#[pin] F),
+ Service(#[pin] F, Option<ConnectParts>),
Body(#[pin] PipeToSendStream<B>),
}
+struct ConnectParts {
+ pending: Pending,
+ ping: Recorder,
+ recv_stream: RecvStream,
+}
+
impl<F, B> H2Stream<F, B>
where
B: HttpBody,
{
- fn new(fut: F, respond: SendResponse<SendBuf<B::Data>>) -> H2Stream<F, B> {
+ fn new(
+ fut: F,
+ connect_parts: Option<ConnectParts>,
+ respond: SendResponse<SendBuf<B::Data>>,
+ ) -> H2Stream<F, B> {
H2Stream {
reply: respond,
- state: H2StreamState::Service(fut),
+ state: H2StreamState::Service(fut, connect_parts),
}
}
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -364,6 +407,7 @@ impl<F, B, E> H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
B: HttpBody,
+ B::Data: 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -371,7 +415,7 @@ where
let mut me = self.project();
loop {
let next = match me.state.as_mut().project() {
- H2StreamStateProj::Service(h) => {
+ H2StreamStateProj::Service(h, connect_parts) => {
let res = match h.poll(cx) {
Poll::Ready(Ok(r)) => r,
Poll::Pending => {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -402,6 +446,29 @@ where
.entry(::http::header::DATE)
.or_insert_with(date::update_and_header_value);
+ if let Some(connect_parts) = connect_parts.take() {
+ if res.status().is_success() {
+ if headers::content_length_parse_all(res.headers())
+ .map_or(false, |len| len != 0)
+ {
+ warn!("h2 successful response to CONNECT request with body not supported");
+ me.reply.send_reset(h2::Reason::INTERNAL_ERROR);
+ return Poll::Ready(Err(crate::Error::new_user_header()));
+ }
+ let send_stream = reply!(me, res, false);
+ connect_parts.pending.fulfill(Upgraded::new(
+ H2Upgraded {
+ ping: connect_parts.ping,
+ recv_stream: connect_parts.recv_stream,
+ send_stream: unsafe { UpgradedSendStream::new(send_stream) },
+ buf: Bytes::new(),
+ },
+ Bytes::new(),
+ ));
+ return Poll::Ready(Ok(()));
+ }
+ }
+
// automatically set Content-Length from body...
if let Some(len) = body.size_hint().exact() {
headers::set_content_length_if_missing(res.headers_mut(), len);
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -428,6 +495,7 @@ impl<F, B, E> Future for H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
B: HttpBody,
+ B::Data: 'static,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Into<Box<dyn StdError + Send + Sync>>,
{
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -62,12 +62,12 @@ pub fn on<T: sealed::CanUpgrade>(msg: T) -> OnUpgrade {
msg.on_upgrade()
}
-#[cfg(feature = "http1")]
+#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) struct Pending {
tx: oneshot::Sender<crate::Result<Upgraded>>,
}
-#[cfg(feature = "http1")]
+#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) fn pending() -> (Pending, OnUpgrade) {
let (tx, rx) = oneshot::channel();
(Pending { tx }, OnUpgrade { rx: Some(rx) })
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -187,13 +187,14 @@ impl fmt::Debug for OnUpgrade {
// ===== impl Pending =====
-#[cfg(feature = "http1")]
+#[cfg(any(feature = "http1", feature = "http2"))]
impl Pending {
pub(super) fn fulfill(self, upgraded: Upgraded) {
trace!("pending upgrade fulfill");
let _ = self.tx.send(Ok(upgraded));
}
+ #[cfg(feature = "http1")]
/// Don't fulfill the pending Upgrade, but instead signal that
/// upgrades are handled manually.
pub(super) fn manual(self) {
|
AFAICT for the client part we mostly want to patch this match arm:
https://github.com/hyperium/hyper/blob/117cc492a62c4051c75e7eec0f624b30db8a20e5/src/proto/h2/client.rs#L217-L290
Specifically, ultimately we want to patch this snippet so that it inserts an `OnUpgrade` extension in the returned response, with its inner oneshot channel already sent to with a `Upgraded` built from the `SendStream<_>` that was returned by `self.h2_tx.send_request` and the `RecvStream` returned from the `ResponseFuture`:
https://github.com/hyperium/hyper/blob/117cc492a62c4051c75e7eec0f624b30db8a20e5/src/proto/h2/client.rs#L270-L280
The first issue I'm experiencing is that the `SendStream<_>` is a `SendStream<SendBuf<B as Body>::Data>>` and we need this to be the `AsyncWrite` half of the `Upgraded` IO object and I have no damn clue how we can possibly do that.
I guess I can change the struct `SendBuf` to an enum like this:
```rust
enum SendBuf<B> {
Buf(B),
Cursor(Cursor<Box<[u8]>>),
None,
}
```
where `SendBuf::Cursor` is only ever used in the impl of `AsyncWrite` for the h2 upgraded stream.
To implement `AsyncRead` over `RecvStream`, I need https://github.com/hyperium/h2/pull/532.
I'm also not exactly sure what `poll_flush` and `poll_shutdown` should do.
AFAIK `poll_shutdown` can just send an empty data frame with the EOS flag set, but we definitely need something in h2 to properly implement `poll_flush`.
So, I implemented `poll_flush` as a noop and tried adding upgrades to the server-side of Hyper, and I encountered a quite pesky blocker.
To implement `Io` over an H2 stream, I wrote this:
```rust
struct H2Upgraded<B>
where
B: Buf,
{
ping: Recorder,
send_stream: SendStream<SendBuf<B>>,
recv_stream: RecvStream,
buf: Bytes,
}
impl<B> AsyncRead for H2Upgraded<B>
where
B: Buf,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
read_buf: &mut ReadBuf<'_>,
) -> Poll<Result<(), io::Error>> {
let mut polled_stream = false;
Poll::Ready(loop {
if !self.buf.is_empty() {
let cnt = std::cmp::min(self.buf.len(), read_buf.remaining());
read_buf.put_slice(&self.buf[..cnt]);
self.buf.advance(cnt);
let _ = self.recv_stream.flow_control().release_capacity(cnt);
}
if polled_stream || read_buf.remaining() == 0 {
break Ok(());
}
debug_assert!(!self.buf.is_empty());
self.buf = match ready!(self.recv_stream.poll_data(cx)) {
None => break Ok(()),
Some(Ok(buf)) => {
self.ping.record_data(buf.len());
buf
}
Some(Err(e)) => {
return Poll::Ready(Err(h2_to_io_error(e)));
}
};
polled_stream = true;
})
}
}
impl<B> AsyncWrite for H2Upgraded<B>
where
B: Buf,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
if buf.is_empty() {
return Poll::Ready(Ok(0));
}
self.send_stream.reserve_capacity(buf.len());
Poll::Ready(match ready!(self.send_stream.poll_capacity(cx)) {
None => Ok(0),
Some(Ok(cnt)) => self.write(&buf[..cnt], false).map(|()| cnt),
Some(Err(e)) => Err(h2_to_io_error(e)),
})
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Poll::Ready(self.write(&[], true))
}
}
```
So far so good, right?
Then in the server code, I pass an `H2Upgraded` to `Upgraded::new` and this is where I am stuck.
`Upgraded` wants a `Send` I/O object, but all we get from the `h2` crate is a `SendStream<SendBuf<<B as Body>::Data>`, and the system has no `Send` bound on `B::Data`. I'll try to propagate this bound everywhere but AFAIK that's a breaking change.
Ah right, it also wants `B::Data` to be `'static`.
@nox Thanks for implementing this -- is it complete so I can give it a try?
I kind of worked around this by issuing a lower-case "connect" request and passing around a streaming body for the "connect" request. The streaming body is basically one end of the pipe (I used tokio's DuplexStream, but I just need a HalfDuplexStream). The other end of this pipe, and the response's streaming body become a custom H2Upgraded struct that satisfies the trait bound `AsyncRead+AsyncWrite+Unpin+'static`, so I can use it anywhere to spawn servers or whatever.
This PR only includes support for CONNECT over h2 on the server side, not the client, but yeah it works!
|
2021-04-28T09:22:09Z
| 2,523
|
Support CONNECT over h2
There is [a h2 issue](https://github.com/hyperium/h2/issues/234) but even if support for CONNECT was perfect in h2, there are still details to sort out on hyper itself too. I'm filing this issue because I started working on that today and will probably need some help.
|
hyperium__hyper-2523
|
diff --git a/src/body/length.rs b/src/body/length.rs
--- a/src/body/length.rs
+++ b/src/body/length.rs
@@ -3,6 +3,17 @@ use std::fmt;
#[derive(Clone, Copy, PartialEq, Eq)]
pub(crate) struct DecodedLength(u64);
+#[cfg(any(feature = "http1", feature = "http2"))]
+impl From<Option<u64>> for DecodedLength {
+ fn from(len: Option<u64>) -> Self {
+ len.and_then(|len| {
+ // If the length is u64::MAX, oh well, just reported chunked.
+ Self::checked_new(len).ok()
+ })
+ .unwrap_or(DecodedLength::CHUNKED)
+ }
+}
+
#[cfg(any(feature = "http1", feature = "http2", test))]
const MAX_LEN: u64 = std::u64::MAX - 2;
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -76,7 +76,7 @@ pub(super) fn pending() -> (Pending, OnUpgrade) {
// ===== impl Upgraded =====
impl Upgraded {
- #[cfg(any(feature = "http1", test))]
+ #[cfg(any(feature = "http1", feature = "http2", test))]
pub(super) fn new<T>(io: T, read_buf: Bytes) -> Self
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2261,14 +2261,16 @@ mod conn {
use std::thread;
use std::time::Duration;
+ use bytes::Buf;
use futures_channel::oneshot;
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
use futures_util::StreamExt;
+ use hyper::upgrade::OnUpgrade;
use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
use hyper::client::conn;
- use hyper::{self, Body, Method, Request};
+ use hyper::{self, Body, Method, Request, Response, StatusCode};
use super::{concat, s, support, tcp_connect, FutureHyperExt};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2984,6 +2986,125 @@ mod conn {
.expect("client should be open");
}
+ #[tokio::test]
+ async fn h2_connect() {
+ let _ = pretty_env_logger::try_init();
+
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ // Spawn an HTTP2 server that asks for bread and responds with baguette.
+ tokio::spawn(async move {
+ let sock = listener.accept().await.unwrap().0;
+ let mut h2 = h2::server::handshake(sock).await.unwrap();
+
+ let (req, mut respond) = h2.accept().await.unwrap().unwrap();
+ tokio::spawn(async move {
+ poll_fn(|cx| h2.poll_closed(cx)).await.unwrap();
+ });
+ assert_eq!(req.method(), Method::CONNECT);
+
+ let mut body = req.into_body();
+
+ let mut send_stream = respond.send_response(Response::default(), false).unwrap();
+
+ send_stream.send_data("Bread?".into(), true).unwrap();
+
+ let bytes = body.data().await.unwrap().unwrap();
+ assert_eq!(&bytes[..], b"Baguette!");
+ let _ = body.flow_control().release_capacity(bytes.len());
+
+ assert!(body.data().await.is_none());
+ });
+
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+ let (mut client, conn) = conn::Builder::new()
+ .http2_only(true)
+ .handshake::<_, Body>(io)
+ .await
+ .expect("http handshake");
+
+ tokio::spawn(async move {
+ conn.await.expect("client conn shouldn't error");
+ });
+
+ let req = Request::connect("localhost")
+ .body(hyper::Body::empty())
+ .unwrap();
+ let res = client.send_request(req).await.expect("send_request");
+ assert_eq!(res.status(), StatusCode::OK);
+
+ let mut upgraded = hyper::upgrade::on(res).await.unwrap();
+
+ let mut vec = vec![];
+ upgraded.read_to_end(&mut vec).await.unwrap();
+ assert_eq!(s(&vec), "Bread?");
+
+ upgraded.write_all(b"Baguette!").await.unwrap();
+
+ upgraded.shutdown().await.unwrap();
+ }
+
+ #[tokio::test]
+ async fn h2_connect_rejected() {
+ let _ = pretty_env_logger::try_init();
+
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ .await
+ .unwrap();
+ let addr = listener.local_addr().unwrap();
+ let (done_tx, done_rx) = oneshot::channel();
+
+ tokio::spawn(async move {
+ let sock = listener.accept().await.unwrap().0;
+ let mut h2 = h2::server::handshake(sock).await.unwrap();
+
+ let (req, mut respond) = h2.accept().await.unwrap().unwrap();
+ tokio::spawn(async move {
+ poll_fn(|cx| h2.poll_closed(cx)).await.unwrap();
+ });
+ assert_eq!(req.method(), Method::CONNECT);
+
+ let res = Response::builder().status(400).body(()).unwrap();
+ let mut send_stream = respond.send_response(res, false).unwrap();
+ send_stream
+ .send_data("No bread for you!".into(), true)
+ .unwrap();
+ done_rx.await.unwrap();
+ });
+
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+ let (mut client, conn) = conn::Builder::new()
+ .http2_only(true)
+ .handshake::<_, Body>(io)
+ .await
+ .expect("http handshake");
+
+ tokio::spawn(async move {
+ conn.await.expect("client conn shouldn't error");
+ });
+
+ let req = Request::connect("localhost")
+ .body(hyper::Body::empty())
+ .unwrap();
+ let res = client.send_request(req).await.expect("send_request");
+ assert_eq!(res.status(), StatusCode::BAD_REQUEST);
+ assert!(res.extensions().get::<OnUpgrade>().is_none());
+
+ let mut body = String::new();
+ hyper::body::aggregate(res.into_body())
+ .await
+ .unwrap()
+ .reader()
+ .read_to_string(&mut body)
+ .unwrap();
+ assert_eq!(body, "No bread for you!");
+
+ done_tx.send(()).unwrap();
+ }
+
async fn drain_til_eof<T: AsyncRead + Unpin>(mut sock: T) -> io::Result<()> {
let mut buf = [0u8; 1024];
loop {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -13,10 +13,13 @@ use std::task::{Context, Poll};
use std::thread;
use std::time::Duration;
+use bytes::Bytes;
use futures_channel::oneshot;
use futures_util::future::{self, Either, FutureExt, TryFutureExt};
#[cfg(feature = "stream")]
use futures_util::stream::StreamExt as _;
+use h2::client::SendRequest;
+use h2::{RecvStream, SendStream};
use http::header::{HeaderName, HeaderValue};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio::net::{TcpListener, TcpStream as TkTcpStream};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1482,6 +1485,339 @@ async fn http_connect_new() {
assert_eq!(s(&vec), "bar=foo");
}
+#[tokio::test]
+async fn h2_connect() {
+ use tokio::io::{AsyncReadExt, AsyncWriteExt};
+
+ let _ = pretty_env_logger::try_init();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+ let conn = connect_async(addr).await;
+
+ let (h2, connection) = h2::client::handshake(conn).await.unwrap();
+ tokio::spawn(async move {
+ connection.await.unwrap();
+ });
+ let mut h2 = h2.ready().await.unwrap();
+
+ async fn connect_and_recv_bread(
+ h2: &mut SendRequest<Bytes>,
+ ) -> (RecvStream, SendStream<Bytes>) {
+ let request = Request::connect("localhost").body(()).unwrap();
+ let (response, send_stream) = h2.send_request(request, false).unwrap();
+ let response = response.await.unwrap();
+ assert_eq!(response.status(), StatusCode::OK);
+
+ let mut body = response.into_body();
+ let bytes = body.data().await.unwrap().unwrap();
+ assert_eq!(&bytes[..], b"Bread?");
+ let _ = body.flow_control().release_capacity(bytes.len());
+
+ (body, send_stream)
+ }
+
+ tokio::spawn(async move {
+ let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await;
+
+ send_stream.send_data("Baguette!".into(), true).unwrap();
+
+ assert!(recv_stream.data().await.unwrap().unwrap().is_empty());
+ });
+
+ let svc = service_fn(move |req: Request<Body>| {
+ let on_upgrade = hyper::upgrade::on(req);
+
+ tokio::spawn(async move {
+ let mut upgraded = on_upgrade.await.expect("on_upgrade");
+ upgraded.write_all(b"Bread?").await.unwrap();
+
+ let mut vec = vec![];
+ upgraded.read_to_end(&mut vec).await.unwrap();
+ assert_eq!(s(&vec), "Baguette!");
+
+ upgraded.shutdown().await.unwrap();
+ });
+
+ future::ok::<_, hyper::Error>(
+ Response::builder()
+ .status(200)
+ .body(hyper::Body::empty())
+ .unwrap(),
+ )
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .http2_only(true)
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .unwrap();
+}
+
+#[tokio::test]
+async fn h2_connect_multiplex() {
+ use futures_util::stream::FuturesUnordered;
+ use tokio::io::{AsyncReadExt, AsyncWriteExt};
+
+ let _ = pretty_env_logger::try_init();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+ let conn = connect_async(addr).await;
+
+ let (h2, connection) = h2::client::handshake(conn).await.unwrap();
+ tokio::spawn(async move {
+ connection.await.unwrap();
+ });
+ let mut h2 = h2.ready().await.unwrap();
+
+ tokio::spawn(async move {
+ let mut streams = vec![];
+ for i in 0..80 {
+ let request = Request::connect(format!("localhost_{}", i % 4))
+ .body(())
+ .unwrap();
+ let (response, send_stream) = h2.send_request(request, false).unwrap();
+ streams.push((i, response, send_stream));
+ }
+
+ let futures = streams
+ .into_iter()
+ .map(|(i, response, mut send_stream)| async move {
+ if i % 4 == 0 {
+ return;
+ }
+
+ let response = response.await.unwrap();
+ assert_eq!(response.status(), StatusCode::OK);
+
+ if i % 4 == 1 {
+ return;
+ }
+
+ let mut body = response.into_body();
+ let bytes = body.data().await.unwrap().unwrap();
+ assert_eq!(&bytes[..], b"Bread?");
+ let _ = body.flow_control().release_capacity(bytes.len());
+
+ if i % 4 == 2 {
+ return;
+ }
+
+ send_stream.send_data("Baguette!".into(), true).unwrap();
+
+ assert!(body.data().await.unwrap().unwrap().is_empty());
+ })
+ .collect::<FuturesUnordered<_>>();
+
+ futures.for_each(future::ready).await;
+ });
+
+ let svc = service_fn(move |req: Request<Body>| {
+ let authority = req.uri().authority().unwrap().to_string();
+ let on_upgrade = hyper::upgrade::on(req);
+
+ tokio::spawn(async move {
+ let upgrade_res = on_upgrade.await;
+ if authority == "localhost_0" {
+ assert!(upgrade_res.expect_err("upgrade cancelled").is_canceled());
+ return;
+ }
+ let mut upgraded = upgrade_res.expect("upgrade successful");
+
+ upgraded.write_all(b"Bread?").await.unwrap();
+
+ let mut vec = vec![];
+ let read_res = upgraded.read_to_end(&mut vec).await;
+
+ if authority == "localhost_1" || authority == "localhost_2" {
+ let err = read_res.expect_err("read failed");
+ assert_eq!(err.kind(), io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref()
+ .unwrap()
+ .downcast_ref::<h2::Error>()
+ .unwrap()
+ .reason(),
+ Some(h2::Reason::CANCEL),
+ );
+ return;
+ }
+
+ read_res.unwrap();
+ assert_eq!(s(&vec), "Baguette!");
+
+ upgraded.shutdown().await.unwrap();
+ });
+
+ future::ok::<_, hyper::Error>(
+ Response::builder()
+ .status(200)
+ .body(hyper::Body::empty())
+ .unwrap(),
+ )
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .http2_only(true)
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .unwrap();
+}
+
+#[tokio::test]
+async fn h2_connect_large_body() {
+ use tokio::io::{AsyncReadExt, AsyncWriteExt};
+
+ let _ = pretty_env_logger::try_init();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+ let conn = connect_async(addr).await;
+
+ let (h2, connection) = h2::client::handshake(conn).await.unwrap();
+ tokio::spawn(async move {
+ connection.await.unwrap();
+ });
+ let mut h2 = h2.ready().await.unwrap();
+
+ const NO_BREAD: &str = "All work and no bread makes nox a dull boy.\n";
+
+ async fn connect_and_recv_bread(
+ h2: &mut SendRequest<Bytes>,
+ ) -> (RecvStream, SendStream<Bytes>) {
+ let request = Request::connect("localhost").body(()).unwrap();
+ let (response, send_stream) = h2.send_request(request, false).unwrap();
+ let response = response.await.unwrap();
+ assert_eq!(response.status(), StatusCode::OK);
+
+ let mut body = response.into_body();
+ let bytes = body.data().await.unwrap().unwrap();
+ assert_eq!(&bytes[..], b"Bread?");
+ let _ = body.flow_control().release_capacity(bytes.len());
+
+ (body, send_stream)
+ }
+
+ tokio::spawn(async move {
+ let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await;
+
+ let large_body = Bytes::from(NO_BREAD.repeat(9000));
+
+ send_stream.send_data(large_body.clone(), false).unwrap();
+ send_stream.send_data(large_body, true).unwrap();
+
+ assert!(recv_stream.data().await.unwrap().unwrap().is_empty());
+ });
+
+ let svc = service_fn(move |req: Request<Body>| {
+ let on_upgrade = hyper::upgrade::on(req);
+
+ tokio::spawn(async move {
+ let mut upgraded = on_upgrade.await.expect("on_upgrade");
+ upgraded.write_all(b"Bread?").await.unwrap();
+
+ let mut vec = vec![];
+ if upgraded.read_to_end(&mut vec).await.is_err() {
+ return;
+ }
+ assert_eq!(vec.len(), NO_BREAD.len() * 9000 * 2);
+
+ upgraded.shutdown().await.unwrap();
+ });
+
+ future::ok::<_, hyper::Error>(
+ Response::builder()
+ .status(200)
+ .body(hyper::Body::empty())
+ .unwrap(),
+ )
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .http2_only(true)
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .unwrap();
+}
+
+#[tokio::test]
+async fn h2_connect_empty_frames() {
+ use tokio::io::{AsyncReadExt, AsyncWriteExt};
+
+ let _ = pretty_env_logger::try_init();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+ let conn = connect_async(addr).await;
+
+ let (h2, connection) = h2::client::handshake(conn).await.unwrap();
+ tokio::spawn(async move {
+ connection.await.unwrap();
+ });
+ let mut h2 = h2.ready().await.unwrap();
+
+ async fn connect_and_recv_bread(
+ h2: &mut SendRequest<Bytes>,
+ ) -> (RecvStream, SendStream<Bytes>) {
+ let request = Request::connect("localhost").body(()).unwrap();
+ let (response, send_stream) = h2.send_request(request, false).unwrap();
+ let response = response.await.unwrap();
+ assert_eq!(response.status(), StatusCode::OK);
+
+ let mut body = response.into_body();
+ let bytes = body.data().await.unwrap().unwrap();
+ assert_eq!(&bytes[..], b"Bread?");
+ let _ = body.flow_control().release_capacity(bytes.len());
+
+ (body, send_stream)
+ }
+
+ tokio::spawn(async move {
+ let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await;
+
+ send_stream.send_data("".into(), false).unwrap();
+ send_stream.send_data("".into(), false).unwrap();
+ send_stream.send_data("".into(), false).unwrap();
+ send_stream.send_data("Baguette!".into(), false).unwrap();
+ send_stream.send_data("".into(), true).unwrap();
+
+ assert!(recv_stream.data().await.unwrap().unwrap().is_empty());
+ });
+
+ let svc = service_fn(move |req: Request<Body>| {
+ let on_upgrade = hyper::upgrade::on(req);
+
+ tokio::spawn(async move {
+ let mut upgraded = on_upgrade.await.expect("on_upgrade");
+ upgraded.write_all(b"Bread?").await.unwrap();
+
+ let mut vec = vec![];
+ upgraded.read_to_end(&mut vec).await.unwrap();
+ assert_eq!(s(&vec), "Baguette!");
+
+ upgraded.shutdown().await.unwrap();
+ });
+
+ future::ok::<_, hyper::Error>(
+ Response::builder()
+ .status(200)
+ .body(hyper::Body::empty())
+ .unwrap(),
+ )
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .http2_only(true)
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .unwrap();
+}
+
#[tokio::test]
async fn parse_errors_send_4xx_response() {
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2483"
] |
0.14
|
c7ab1aace102688ca3ad1bba6b5a7c5fd93b21b6
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -147,10 +146,6 @@ jobs:
command: build
args: --features client,http1,http2,ffi
- # TODO: re-enable check once figuring out how to get it working in CI
- # - name: Verify cbindgen
- # run: ./capi/gen_header.sh --verify
-
- name: Make Examples
run: cd capi/examples && make client
diff --git a/capi/cbindgen.toml b/capi/cbindgen.toml
--- a/capi/cbindgen.toml
+++ b/capi/cbindgen.toml
@@ -1,4 +1,10 @@
+# See https://github.com/eqrion/cbindgen/blob/master/docs.md#cbindgentoml for
+# a list of possible configuration values.
language = "C"
+header = """/*
+ * Copyright 2021 Sean McArthur. MIT License.
+ * Generated by gen_header.sh. Do not edit directly.
+ */"""
include_guard = "_HYPER_H"
no_includes = true
sys_includes = ["stdint.h", "stddef.h"]
diff --git a/capi/gen_header.sh b/capi/gen_header.sh
--- a/capi/gen_header.sh
+++ b/capi/gen_header.sh
@@ -1,9 +1,13 @@
#!/usr/bin/env bash
-CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+# This script regenerates hyper.h. As of April 2021, it only works with the
+# nightly build of Rust.
+
+set -e
-WORK_DIR=`mktemp -d`
+CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+WORK_DIR=$(mktemp -d)
# check if tmp dir was created
if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then
diff --git a/capi/gen_header.sh b/capi/gen_header.sh
--- a/capi/gen_header.sh
+++ b/capi/gen_header.sh
@@ -14,9 +18,8 @@ fi
header_file_backup="$CAPI_DIR/include/hyper.h.backup"
function cleanup {
- #echo "$WORK_DIR"
rm -rf "$WORK_DIR"
- rm "$header_file_backup"
+ rm "$header_file_backup" || true
}
trap cleanup EXIT
diff --git a/capi/gen_header.sh b/capi/gen_header.sh
--- a/capi/gen_header.sh
+++ b/capi/gen_header.sh
@@ -44,10 +47,14 @@ cp "$CAPI_DIR/include/hyper.h" "$header_file_backup"
#cargo metadata --no-default-features --features ffi --format-version 1 > "$WORK_DIR/metadata.json"
-cd $WORK_DIR
+cd "${WORK_DIR}" || exit 2
# Expand just the ffi module
-cargo rustc -- -Z unstable-options --pretty=expanded > expanded.rs 2>/dev/null
+if ! output=$(cargo rustc -- -Z unstable-options --pretty=expanded 2>&1 > expanded.rs); then
+ # As of April 2021 the script above prints a lot of warnings/errors, and
+ # exits with a nonzero return code, but hyper.h still gets generated.
+ echo "$output"
+fi
# Replace the previous copy with the single expanded file
rm -rf ./src
diff --git a/capi/gen_header.sh b/capi/gen_header.sh
--- a/capi/gen_header.sh
+++ b/capi/gen_header.sh
@@ -56,17 +63,17 @@ mv expanded.rs src/lib.rs
# Bindgen!
-cbindgen\
- -c "$CAPI_DIR/cbindgen.toml"\
- --lockfile "$CAPI_DIR/../Cargo.lock"\
- -o "$CAPI_DIR/include/hyper.h"\
- $1
-
-bindgen_exit_code=$?
-
-if [[ "--verify" == "$1" && "$bindgen_exit_code" != 0 ]]; then
- echo "diff generated (<) vs backup (>)"
- diff "$CAPI_DIR/include/hyper.h" "$header_file_backup"
+if ! cbindgen \
+ --config "$CAPI_DIR/cbindgen.toml" \
+ --lockfile "$CAPI_DIR/../Cargo.lock" \
+ --output "$CAPI_DIR/include/hyper.h" \
+ "${@}"; then
+ bindgen_exit_code=$?
+ if [[ "--verify" == "$1" ]]; then
+ echo "diff generated (<) vs backup (>)"
+ diff "$CAPI_DIR/include/hyper.h" "$header_file_backup"
+ fi
+ exit $bindgen_exit_code
fi
-exit $bindgen_exit_code
+exit 0
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -1,32 +1,78 @@
+/*
+ * Copyright 2021 Sean McArthur. MIT License.
+ * Generated by gen_header.sh. Do not edit directly.
+ */
+
#ifndef _HYPER_H
#define _HYPER_H
#include <stdint.h>
#include <stddef.h>
+/*
+ Return in iter functions to continue iterating.
+ */
#define HYPER_ITER_CONTINUE 0
+/*
+ Return in iter functions to stop iterating.
+ */
#define HYPER_ITER_BREAK 1
+/*
+ An HTTP Version that is unspecified.
+ */
#define HYPER_HTTP_VERSION_NONE 0
+/*
+ The HTTP/1.0 version.
+ */
#define HYPER_HTTP_VERSION_1_0 10
+/*
+ The HTTP/1.1 version.
+ */
#define HYPER_HTTP_VERSION_1_1 11
+/*
+ The HTTP/2 version.
+ */
#define HYPER_HTTP_VERSION_2 20
+/*
+ Sentinel value to return from a read or write callback that the operation
+ is pending.
+ */
#define HYPER_IO_PENDING 4294967295
+/*
+ Sentinel value to return from a read or write callback that the operation
+ has errored.
+ */
#define HYPER_IO_ERROR 4294967294
+/*
+ Return in a poll function to indicate it was ready.
+ */
#define HYPER_POLL_READY 0
+/*
+ Return in a poll function to indicate it is still pending.
+
+ The passed in `hyper_waker` should be registered to wake up the task at
+ some later point.
+ */
#define HYPER_POLL_PENDING 1
+/*
+ Return in a poll function indicate an error.
+ */
#define HYPER_POLL_ERROR 3
-typedef enum {
+/*
+ A return code for many of hyper's methods.
+ */
+typedef enum hyper_code {
/*
All is well.
*/
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -60,7 +106,10 @@ typedef enum {
HYPERE_INVALID_PEER_MESSAGE,
} hyper_code;
-typedef enum {
+/*
+ A descriptor for what type a `hyper_task` value is.
+ */
+typedef enum hyper_task_return_type {
/*
The value of this task is null (does not imply an error).
*/
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -83,41 +132,86 @@ typedef enum {
HYPER_TASK_BUF,
} hyper_task_return_type;
-typedef struct hyper_executor hyper_executor;
-
-typedef struct hyper_io hyper_io;
-
-typedef struct hyper_task hyper_task;
-
+/*
+ A streaming HTTP body.
+ */
typedef struct hyper_body hyper_body;
+/*
+ A buffer of bytes that is sent or received on a `hyper_body`.
+ */
typedef struct hyper_buf hyper_buf;
+/*
+ An HTTP client connection handle.
+
+ These are used to send a request on a single connection. It's possible to
+ send multiple requests on a single connection, such as when HTTP/1
+ keep-alive or HTTP/2 is used.
+ */
typedef struct hyper_clientconn hyper_clientconn;
+/*
+ An options builder to configure an HTTP client connection.
+ */
typedef struct hyper_clientconn_options hyper_clientconn_options;
+/*
+ An async context for a task that contains the related waker.
+ */
typedef struct hyper_context hyper_context;
+/*
+ A more detailed error object returned by some hyper functions.
+ */
typedef struct hyper_error hyper_error;
+/*
+ A task executor for `hyper_task`s.
+ */
+typedef struct hyper_executor hyper_executor;
+
+/*
+ An HTTP header map.
+
+ These can be part of a request or response.
+ */
typedef struct hyper_headers hyper_headers;
+/*
+ An IO object used to represent a socket or similar concept.
+ */
+typedef struct hyper_io hyper_io;
+
+/*
+ An HTTP request.
+ */
typedef struct hyper_request hyper_request;
+/*
+ An HTTP response.
+ */
typedef struct hyper_response hyper_response;
+/*
+ An async task.
+ */
+typedef struct hyper_task hyper_task;
+
+/*
+ A waker that is saved and used to waken a pending task.
+ */
typedef struct hyper_waker hyper_waker;
-typedef int (*hyper_body_foreach_callback)(void*, const hyper_buf*);
+typedef int (*hyper_body_foreach_callback)(void*, const struct hyper_buf*);
-typedef int (*hyper_body_data_callback)(void*, hyper_context*, hyper_buf**);
+typedef int (*hyper_body_data_callback)(void*, struct hyper_context*, struct hyper_buf**);
typedef int (*hyper_headers_foreach_callback)(void*, const uint8_t*, size_t, const uint8_t*, size_t);
-typedef size_t (*hyper_io_read_callback)(void*, hyper_context*, uint8_t*, size_t);
+typedef size_t (*hyper_io_read_callback)(void*, struct hyper_context*, uint8_t*, size_t);
-typedef size_t (*hyper_io_write_callback)(void*, hyper_context*, const uint8_t*, size_t);
+typedef size_t (*hyper_io_write_callback)(void*, struct hyper_context*, const uint8_t*, size_t);
#ifdef __cplusplus
extern "C" {
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -133,12 +227,12 @@ const char *hyper_version(void);
If not configured, this body acts as an empty payload.
*/
-hyper_body *hyper_body_new(void);
+struct hyper_body *hyper_body_new(void);
/*
Free a `hyper_body *`.
*/
-void hyper_body_free(hyper_body *body);
+void hyper_body_free(struct hyper_body *body);
/*
Return a task that will poll the body for the next buffer of data.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -152,7 +246,7 @@ void hyper_body_free(hyper_body *body);
This does not consume the `hyper_body *`, so it may be used to again.
However, it MUST NOT be used or freed until the related task completes.
*/
-hyper_task *hyper_body_data(hyper_body *body);
+struct hyper_task *hyper_body_data(struct hyper_body *body);
/*
Return a task that will poll the body and execute the callback with each
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -166,12 +260,14 @@ hyper_task *hyper_body_data(hyper_body *body);
This will consume the `hyper_body *`, you shouldn't use it anymore or free it.
*/
-hyper_task *hyper_body_foreach(hyper_body *body, hyper_body_foreach_callback func, void *userdata);
+struct hyper_task *hyper_body_foreach(struct hyper_body *body,
+ hyper_body_foreach_callback func,
+ void *userdata);
/*
Set userdata on this body, which will be passed to callback functions.
*/
-void hyper_body_set_userdata(hyper_body *body, void *userdata);
+void hyper_body_set_userdata(struct hyper_body *body, void *userdata);
/*
Set the data callback for this body.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -194,7 +290,7 @@ void hyper_body_set_userdata(hyper_body *body, void *userdata);
If some error has occurred, you can return `HYPER_POLL_ERROR` to abort
the body.
*/
-void hyper_body_set_data_func(hyper_body *body, hyper_body_data_callback func);
+void hyper_body_set_data_func(struct hyper_body *body, hyper_body_data_callback func);
/*
Create a new `hyper_buf *` by copying the provided bytes.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -202,7 +298,7 @@ void hyper_body_set_data_func(hyper_body *body, hyper_body_data_callback func);
This makes an owned copy of the bytes, so the `buf` argument can be
freed or changed afterwards.
*/
-hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len);
+struct hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len);
/*
Get a pointer to the bytes in this buffer.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -213,17 +309,17 @@ hyper_buf *hyper_buf_copy(const uint8_t *buf, size_t len);
This pointer is borrowed data, and not valid once the `hyper_buf` is
consumed/freed.
*/
-const uint8_t *hyper_buf_bytes(const hyper_buf *buf);
+const uint8_t *hyper_buf_bytes(const struct hyper_buf *buf);
/*
Get the length of the bytes this buffer contains.
*/
-size_t hyper_buf_len(const hyper_buf *buf);
+size_t hyper_buf_len(const struct hyper_buf *buf);
/*
Free this buffer.
*/
-void hyper_buf_free(hyper_buf *buf);
+void hyper_buf_free(struct hyper_buf *buf);
/*
Starts an HTTP client connection handshake using the provided IO transport
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -234,7 +330,8 @@ void hyper_buf_free(hyper_buf *buf);
The returned `hyper_task *` must be polled with an executor until the
handshake completes, at which point the value can be taken.
*/
-hyper_task *hyper_clientconn_handshake(hyper_io *io, hyper_clientconn_options *options);
+struct hyper_task *hyper_clientconn_handshake(struct hyper_io *io,
+ struct hyper_clientconn_options *options);
/*
Send a request on the client connection.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -242,46 +339,47 @@ hyper_task *hyper_clientconn_handshake(hyper_io *io, hyper_clientconn_options *o
Returns a task that needs to be polled until it is ready. When ready, the
task yields a `hyper_response *`.
*/
-hyper_task *hyper_clientconn_send(hyper_clientconn *conn, hyper_request *req);
+struct hyper_task *hyper_clientconn_send(struct hyper_clientconn *conn, struct hyper_request *req);
/*
Free a `hyper_clientconn *`.
*/
-void hyper_clientconn_free(hyper_clientconn *conn);
+void hyper_clientconn_free(struct hyper_clientconn *conn);
/*
Creates a new set of HTTP clientconn options to be used in a handshake.
*/
-hyper_clientconn_options *hyper_clientconn_options_new(void);
+struct hyper_clientconn_options *hyper_clientconn_options_new(void);
/*
Free a `hyper_clientconn_options *`.
*/
-void hyper_clientconn_options_free(hyper_clientconn_options *opts);
+void hyper_clientconn_options_free(struct hyper_clientconn_options *opts);
/*
Set the client background task executor.
This does not consume the `options` or the `exec`.
*/
-void hyper_clientconn_options_exec(hyper_clientconn_options *opts, const hyper_executor *exec);
+void hyper_clientconn_options_exec(struct hyper_clientconn_options *opts,
+ const struct hyper_executor *exec);
/*
Set the whether to use HTTP2.
Pass `0` to disable, `1` to enable.
*/
-hyper_code hyper_clientconn_options_http2(hyper_clientconn_options *opts, int enabled);
+enum hyper_code hyper_clientconn_options_http2(struct hyper_clientconn_options *opts, int enabled);
/*
Frees a `hyper_error`.
*/
-void hyper_error_free(hyper_error *err);
+void hyper_error_free(struct hyper_error *err);
/*
Get an equivalent `hyper_code` from this error.
*/
-hyper_code hyper_error_code(const hyper_error *err);
+enum hyper_code hyper_error_code(const struct hyper_error *err);
/*
Print the details of this error to a buffer.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -291,27 +389,31 @@ hyper_code hyper_error_code(const hyper_error *err);
The return value is number of bytes that were written to `dst`.
*/
-size_t hyper_error_print(const hyper_error *err, uint8_t *dst, size_t dst_len);
+size_t hyper_error_print(const struct hyper_error *err, uint8_t *dst, size_t dst_len);
/*
Construct a new HTTP request.
*/
-hyper_request *hyper_request_new(void);
+struct hyper_request *hyper_request_new(void);
/*
Free an HTTP request if not going to send it on a client.
*/
-void hyper_request_free(hyper_request *req);
+void hyper_request_free(struct hyper_request *req);
/*
Set the HTTP Method of the request.
*/
-hyper_code hyper_request_set_method(hyper_request *req, const uint8_t *method, size_t method_len);
+enum hyper_code hyper_request_set_method(struct hyper_request *req,
+ const uint8_t *method,
+ size_t method_len);
/*
Set the URI of the request.
*/
-hyper_code hyper_request_set_uri(hyper_request *req, const uint8_t *uri, size_t uri_len);
+enum hyper_code hyper_request_set_uri(struct hyper_request *req,
+ const uint8_t *uri,
+ size_t uri_len);
/*
Set the preferred HTTP version of the request.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -321,7 +423,7 @@ hyper_code hyper_request_set_uri(hyper_request *req, const uint8_t *uri, size_t
Note that this won't change the major HTTP version of the connection,
since that is determined at the handshake step.
*/
-hyper_code hyper_request_set_version(hyper_request *req, int version);
+enum hyper_code hyper_request_set_version(struct hyper_request *req, int version);
/*
Gets a reference to the HTTP headers of this request
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -329,7 +431,7 @@ hyper_code hyper_request_set_version(hyper_request *req, int version);
This is not an owned reference, so it should not be accessed after the
`hyper_request` has been consumed.
*/
-hyper_headers *hyper_request_headers(hyper_request *req);
+struct hyper_headers *hyper_request_headers(struct hyper_request *req);
/*
Set the body of the request.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -339,19 +441,19 @@ hyper_headers *hyper_request_headers(hyper_request *req);
This takes ownership of the `hyper_body *`, you must not use it or
free it after setting it on the request.
*/
-hyper_code hyper_request_set_body(hyper_request *req, hyper_body *body);
+enum hyper_code hyper_request_set_body(struct hyper_request *req, struct hyper_body *body);
/*
Free an HTTP response after using it.
*/
-void hyper_response_free(hyper_response *resp);
+void hyper_response_free(struct hyper_response *resp);
/*
Get the HTTP-Status code of this response.
It will always be within the range of 100-599.
*/
-uint16_t hyper_response_status(const hyper_response *resp);
+uint16_t hyper_response_status(const struct hyper_response *resp);
/*
Get a pointer to the reason-phrase of this response.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -364,14 +466,14 @@ uint16_t hyper_response_status(const hyper_response *resp);
Use `hyper_response_reason_phrase_len()` to get the length of this
buffer.
*/
-const uint8_t *hyper_response_reason_phrase(const hyper_response *resp);
+const uint8_t *hyper_response_reason_phrase(const struct hyper_response *resp);
/*
Get the length of the reason-phrase of this response.
Use `hyper_response_reason_phrase()` to get the buffer pointer.
*/
-size_t hyper_response_reason_phrase_len(const hyper_response *resp);
+size_t hyper_response_reason_phrase_len(const struct hyper_response *resp);
/*
Get the HTTP version used by this response.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -383,7 +485,7 @@ size_t hyper_response_reason_phrase_len(const hyper_response *resp);
- `HYPER_HTTP_VERSION_2`
- `HYPER_HTTP_VERSION_NONE` if newer (or older).
*/
-int hyper_response_version(const hyper_response *resp);
+int hyper_response_version(const struct hyper_response *resp);
/*
Gets a reference to the HTTP headers of this response.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -391,14 +493,14 @@ int hyper_response_version(const hyper_response *resp);
This is not an owned reference, so it should not be accessed after the
`hyper_response` has been freed.
*/
-hyper_headers *hyper_response_headers(hyper_response *resp);
+struct hyper_headers *hyper_response_headers(struct hyper_response *resp);
/*
Take ownership of the body of this response.
It is safe to free the response even after taking ownership of its body.
*/
-hyper_body *hyper_response_body(hyper_response *resp);
+struct hyper_body *hyper_response_body(struct hyper_response *resp);
/*
Iterates the headers passing each name and value pair to the callback.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -408,7 +510,7 @@ hyper_body *hyper_response_body(hyper_response *resp);
The callback should return `HYPER_ITER_CONTINUE` to keep iterating, or
`HYPER_ITER_BREAK` to stop.
*/
-void hyper_headers_foreach(const hyper_headers *headers,
+void hyper_headers_foreach(const struct hyper_headers *headers,
hyper_headers_foreach_callback func,
void *userdata);
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -417,11 +519,11 @@ void hyper_headers_foreach(const hyper_headers *headers,
This overwrites any previous value set for the header.
*/
-hyper_code hyper_headers_set(hyper_headers *headers,
- const uint8_t *name,
- size_t name_len,
- const uint8_t *value,
- size_t value_len);
+enum hyper_code hyper_headers_set(struct hyper_headers *headers,
+ const uint8_t *name,
+ size_t name_len,
+ const uint8_t *value,
+ size_t value_len);
/*
Adds the provided value to the list of the provided name.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -429,11 +531,11 @@ hyper_code hyper_headers_set(hyper_headers *headers,
If there were already existing values for the name, this will append the
new value to the internal list.
*/
-hyper_code hyper_headers_add(hyper_headers *headers,
- const uint8_t *name,
- size_t name_len,
- const uint8_t *value,
- size_t value_len);
+enum hyper_code hyper_headers_add(struct hyper_headers *headers,
+ const uint8_t *name,
+ size_t name_len,
+ const uint8_t *value,
+ size_t value_len);
/*
Create a new IO type used to represent a transport.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -441,7 +543,7 @@ hyper_code hyper_headers_add(hyper_headers *headers,
The read and write functions of this transport should be set with
`hyper_io_set_read` and `hyper_io_set_write`.
*/
-hyper_io *hyper_io_new(void);
+struct hyper_io *hyper_io_new(void);
/*
Free an unused `hyper_io *`.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -449,14 +551,14 @@ hyper_io *hyper_io_new(void);
This is typically only useful if you aren't going to pass ownership
of the IO handle to hyper, such as with `hyper_clientconn_handshake()`.
*/
-void hyper_io_free(hyper_io *io);
+void hyper_io_free(struct hyper_io *io);
/*
Set the user data pointer for this IO to some value.
This value is passed as an argument to the read and write callbacks.
*/
-void hyper_io_set_userdata(hyper_io *io, void *data);
+void hyper_io_set_userdata(struct hyper_io *io, void *data);
/*
Set the read function for this IO transport.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -476,7 +578,7 @@ void hyper_io_set_userdata(hyper_io *io, void *data);
If there is an irrecoverable error reading data, then `HYPER_IO_ERROR`
should be the return value.
*/
-void hyper_io_set_read(hyper_io *io, hyper_io_read_callback func);
+void hyper_io_set_read(struct hyper_io *io, hyper_io_read_callback func);
/*
Set the write function for this IO transport.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -493,17 +595,17 @@ void hyper_io_set_read(hyper_io *io, hyper_io_read_callback func);
If there is an irrecoverable error reading data, then `HYPER_IO_ERROR`
should be the return value.
*/
-void hyper_io_set_write(hyper_io *io, hyper_io_write_callback func);
+void hyper_io_set_write(struct hyper_io *io, hyper_io_write_callback func);
/*
Creates a new task executor.
*/
-const hyper_executor *hyper_executor_new(void);
+const struct hyper_executor *hyper_executor_new(void);
/*
Frees an executor and any incomplete tasks still part of it.
*/
-void hyper_executor_free(const hyper_executor *exec);
+void hyper_executor_free(const struct hyper_executor *exec);
/*
Push a task onto the executor.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -511,7 +613,7 @@ void hyper_executor_free(const hyper_executor *exec);
The executor takes ownership of the task, it should not be accessed
again unless returned back to the user with `hyper_executor_poll`.
*/
-hyper_code hyper_executor_push(const hyper_executor *exec, hyper_task *task);
+enum hyper_code hyper_executor_push(const struct hyper_executor *exec, struct hyper_task *task);
/*
Polls the executor, trying to make progress on any tasks that have notified
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -521,12 +623,12 @@ hyper_code hyper_executor_push(const hyper_executor *exec, hyper_task *task);
If there are no ready tasks, this returns `NULL`.
*/
-hyper_task *hyper_executor_poll(const hyper_executor *exec);
+struct hyper_task *hyper_executor_poll(const struct hyper_executor *exec);
/*
Free a task.
*/
-void hyper_task_free(hyper_task *task);
+void hyper_task_free(struct hyper_task *task);
/*
Takes the output value of this task.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -536,12 +638,12 @@ void hyper_task_free(hyper_task *task);
Use `hyper_task_type` to determine the type of the `void *` return value.
*/
-void *hyper_task_value(hyper_task *task);
+void *hyper_task_value(struct hyper_task *task);
/*
Query the return type of this task.
*/
-hyper_task_return_type hyper_task_type(hyper_task *task);
+enum hyper_task_return_type hyper_task_type(struct hyper_task *task);
/*
Set a user data pointer to be associated with this task.
diff --git a/capi/include/hyper.h b/capi/include/hyper.h
--- a/capi/include/hyper.h
+++ b/capi/include/hyper.h
@@ -549,27 +651,27 @@ hyper_task_return_type hyper_task_type(hyper_task *task);
This value will be passed to task callbacks, and can be checked later
with `hyper_task_userdata`.
*/
-void hyper_task_set_userdata(hyper_task *task, void *userdata);
+void hyper_task_set_userdata(struct hyper_task *task, void *userdata);
/*
Retrieve the userdata that has been set via `hyper_task_set_userdata`.
*/
-void *hyper_task_userdata(hyper_task *task);
+void *hyper_task_userdata(struct hyper_task *task);
/*
Copies a waker out of the task context.
*/
-hyper_waker *hyper_context_waker(hyper_context *cx);
+struct hyper_waker *hyper_context_waker(struct hyper_context *cx);
/*
Free a waker that hasn't been woken.
*/
-void hyper_waker_free(hyper_waker *waker);
+void hyper_waker_free(struct hyper_waker *waker);
/*
Free a waker that hasn't been woken.
*/
-void hyper_waker_wake(hyper_waker *waker);
+void hyper_waker_wake(struct hyper_waker *waker);
#ifdef __cplusplus
} // extern "C"
|
I was able to get past that issue by adding the following patch to capi/gen_header.sh
```patch
diff --git a/capi/gen_header.sh b/capi/gen_header.sh
index 4cd1a26c..27f2c4cf 100755
--- a/capi/gen_header.sh
+++ b/capi/gen_header.sh
@@ -1,5 +1,7 @@
#!/usr/bin/env bash
+set -ex
+
CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
WORK_DIR=`mktemp -d`
@@ -38,6 +40,18 @@ edition = "2018"
publish = false
[dependencies]
+libc = { version = "0.2", optional = true }
+
+[features]
+default = [
+ "client",
+ "ffi",
+ "http1",
+]
+
+http1 = []
+client = []
+ffi = ["libc"]
EOF
cp "$CAPI_DIR/include/hyper.h" "$header_file_backup"
@@ -47,7 +61,7 @@ cp "$CAPI_DIR/include/hyper.h" "$header_file_backup"
cd $WORK_DIR
# Expand just the ffi module
-cargo rustc -- -Z unstable-options --pretty=expanded > expanded.rs 2>/dev/null
+cargo rustc --features client,http1,ffi -- -Z unstable-options --pretty=expanded > expanded.rs
# Replace the previous copy with the single expanded file
rm -rf ./src
```
I'm getting fewer errors now, but no idea if I'm on the right track or if fixing them takes me further down the wrong track
```
error[E0432]: unresolved import `crate::body`
--> /Users/kevin/src/github.com/hyperium/hyper/capi/../src/ffi/body.rs:11:12
|
11 | use crate::body::{Body, Bytes, HttpBody as _};
| ^^^^
| |
| unresolved import
| help: a similar path exists: `crate::ffi::body`
error[E0432]: unresolved import `crate::client`
--> /Users/kevin/src/github.com/hyperium/hyper/capi/../src/ffi/client.rs:5:12
|
5 | use crate::client::conn;
| ^^^^^^
| |
| unresolved import
| help: a similar path exists: `crate::ffi::client`
error[E0432]: unresolved import `crate::rt`
--> /Users/kevin/src/github.com/hyperium/hyper/capi/../src/ffi/client.rs:6:12
|
6 | use crate::rt::Executor as _;
| ^^
| |
| unresolved import
| help: a similar path exists: `std::rt`
```
|
2021-04-01T17:30:18Z
| 2,488
|
Errors running gen_header.sh
I'm trying to fix some of the errors you get when you run the curl test suite with the Hyper backend. I tried to pick an easy one to start - Curl expects you to return CURLE_UNSUPPORTED_PROTOCOL when a server returns HTTP/1.2, but Hyper/c-hyper.c currently doesn't.
So I wanted to add an extra constant to ffi/error.rs, which would let me read that constant in hyper-c.c in the Curl code and return the appropriate error.
```diff
--- a/src/ffi/error.rs
+++ b/src/ffi/error.rs
@@ -24,6 +24,8 @@ pub enum hyper_code {
HYPERE_FEATURE_NOT_ENABLED,
/// The peer sent an HTTP message that could not be parsed.
HYPERE_INVALID_PEER_MESSAGE,
+ /// The peer sent an HTTP version header that we cannot parse
+ HYPERE_INVALID_HTTP_VERSION,
}
// ===== impl hyper_error =====
@@ -31,9 +33,11 @@ pub enum hyper_code {
impl hyper_error {
fn code(&self) -> hyper_code {
use crate::error::Kind as ErrorKind;
+ use crate::error::Parse;
use crate::error::User;
match self.0.kind() {
+ ErrorKind::Parse(Parse::Version) => hyper_code::HYPERE_INVALID_HTTP_VERSION,
ErrorKind::Parse(_) => hyper_code::HYPERE_INVALID_PEER_MESSAGE,
ErrorKind::IncompleteMessage => hyper_code::HYPERE_UNEXPECTED_EOF,
ErrorKind::User(User::AbortedByCallback) => hyper_code::HYPERE_ABORTED_BY_CALLBACK,
```
However, in order to pick this up I need to run gen_header.sh. I ran into some errors running this script.
First I wasn't using the nightly but because `cargo rustc -- -Z unstable-options --pretty=expanded` swallows errors I didn't see that that was the issue - I had to modify the script in order to get it to work.
Now I'm running into this issue:
```
+ cargo rustc -- -Z unstable-options --pretty=expanded
Compiling hyper v0.0.0 (/private/var/folders/s1/909yt58s4wj8h_7v7frr8rkm0000gn/T/tmp.O3fSWUSrBt)
error: The `ffi` feature currently requires the `client` and `http1` features.
--> /Users/kevin/src/github.com/hyperium/hyper/capi/../src/ffi/mod.rs:37:1
|
37 | compile_error!("The `ffi` feature currently requires the `client` and `http1` features.");
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
```
Which I think is due to a poor interaction between the "fake" module and the "real" ffi module - I've been unsuccessful at modifying the `cargo rustc` to add those features. I can keep trying but my guess is that this is something that would take me an hour to fix and someone else two minutes to tell me the right place to look.
|
hyperium__hyper-2488
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -17,6 +17,7 @@ jobs:
- test
- features
- ffi
+ - ffi-header
- doc
steps:
- run: exit 0
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -119,9 +120,7 @@ jobs:
ffi:
name: Test C API (FFI)
needs: [style]
-
runs-on: ubuntu-latest
-
steps:
- name: Checkout
uses: actions/checkout@v1
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -162,6 +157,39 @@ jobs:
command: test
args: --features full,ffi --lib
+ ffi-header:
+ name: Verify hyper.h is up to date
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v1
+
+ - name: Install Rust
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: nightly
+ default: true
+ override: true
+ components: cargo
+
+ - name: Install cbindgen
+ uses: actions-rs/cargo@v1
+ with:
+ command: install
+ args: cbindgen
+
+ - name: Build FFI
+ uses: actions-rs/cargo@v1
+ env:
+ RUSTFLAGS: --cfg hyper_unstable_ffi
+ with:
+ command: build
+ args: --features client,http1,http2,ffi
+
+ - name: Ensure that hyper.h is up to date
+ run: ./capi/gen_header.sh --verify
+
doc:
name: Build docs
needs: [style, test]
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2482"
] |
0.14
|
98e7e0bd15642cf9e4bf07b3b03d8b4e538623ba
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -66,6 +66,8 @@ jobs:
- rust: nightly
features: "--features full,nightly"
benches: true
+ - rust: 1.46
+ features: "--features full"
runs-on: ${{ matrix.os }}
|
Oh shucks. Probably need to revert that commit for now, unless socket2 can reduce it's dependent features, but I doubt it.
cc @Thomasdezeeuw
I'm afraid socket2's MSRV can't easily be lowered. Rust 1.46 was released August 27th, if that isn't old enough then I guess it needs to be reverted.
You could just pin socket2 v0.3 for now? v0.3.X is still actively maintained in a separate branch, afaik.
> You could just pin socket2 v0.3 for now? v0.3.X is still actively maintained in a separate branch, afaik.
v0.3 is still being maintained, but I prefer the ecosystem to move to v0.4.
~Fair! Works for me with Rust 1.50, but not 1.49, fwiw..~
@johanot are you sure? Socket2 should work with 1.46 and up.
Ok, that's embarrassing. My shell environment was polluted. Can confirm it works fine with 1.49 as well. Sorry, don't mind me :)
That's a fair point, we've mostly adopted Tokio's policy of working on rustc at least 6 months old. It could be the answer is just bump CI.
I could send a pr to bump the MSRV to 1.46. @nickelc do you need 1.45 support?
> I could send a pr to bump the MSRV to 1.46. @nickelc do you need 1.45 support?
no, i only noticed the issue.
|
2021-03-31T11:49:43Z
| 2,486
|
The update to socket2 v0.4 raised the MSRV to 1.46
CI didn't catch it because MSRV is tested only with default features.
```
$ cargo +1.45.2 c --features tcp
Checking socket2 v0.4.0
Checking tokio v1.4.0
error[E0658]: `match` is not allowed in a `const fn`
--> /.cargo/registry/src/github.com-1ecc6299db9ec823/socket2-0.4.0/src/lib.rs:156:9
|
156 | / match address {
157 | | SocketAddr::V4(_) => Domain::IPV4,
158 | | SocketAddr::V6(_) => Domain::IPV6,
159 | | }
| |_________^
|
= note: see issue #49146 <https://github.com/rust-lang/rust/issues/49146> for more information
```
|
hyperium__hyper-2486
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -51,7 +51,7 @@ jobs:
- stable
- beta
- nightly
- - 1.45.2
+ - 1.46
os:
- ubuntu-latest
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2313"
] |
0.14
|
117cc492a62c4051c75e7eec0f624b30db8a20e5
|
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -997,6 +997,17 @@ impl Builder {
self
}
+ /// Set whether HTTP/1 connections will write header names as provided
+ /// at the socket level.
+ ///
+ /// Note that this setting does not affect HTTP/2.
+ ///
+ /// Default is false.
+ pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self {
+ self.conn_builder.h1_preserve_header_case(val);
+ self
+ }
+
/// Set whether HTTP/0.9 responses should be tolerated.
///
/// Default is false.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -126,6 +126,7 @@ pub struct Builder {
h09_responses: bool,
h1_parser_config: ParserConfig,
h1_title_case_headers: bool,
+ h1_preserve_header_case: bool,
h1_read_buf_exact_size: Option<usize>,
h1_max_buf_size: Option<usize>,
#[cfg(feature = "http2")]
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -500,6 +501,7 @@ impl Builder {
h1_read_buf_exact_size: None,
h1_parser_config: Default::default(),
h1_title_case_headers: false,
+ h1_preserve_header_case: false,
h1_max_buf_size: None,
#[cfg(feature = "http2")]
h2_builder: Default::default(),
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -537,6 +539,11 @@ impl Builder {
self
}
+ pub(crate) fn h1_preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
+ self.h1_preserve_header_case = enabled;
+ self
+ }
+
pub(super) fn h1_read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
self.h1_read_buf_exact_size = sz;
self.h1_max_buf_size = None;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -719,6 +726,9 @@ impl Builder {
if opts.h1_title_case_headers {
conn.set_title_case_headers();
}
+ if opts.h1_preserve_header_case {
+ conn.set_preserve_header_case();
+ }
if opts.h09_responses {
conn.set_h09_responses();
}
diff --git a/src/ffi/client.rs b/src/ffi/client.rs
--- a/src/ffi/client.rs
+++ b/src/ffi/client.rs
@@ -106,8 +106,11 @@ unsafe impl AsTaskType for hyper_clientconn {
ffi_fn! {
/// Creates a new set of HTTP clientconn options to be used in a handshake.
fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options {
+ let mut builder = conn::Builder::new();
+ builder.h1_preserve_header_case(true);
+
Box::into_raw(Box::new(hyper_clientconn_options {
- builder: conn::Builder::new(),
+ builder,
exec: WeakExec::new(),
}))
} ?= std::ptr::null_mut()
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -6,6 +6,7 @@ use super::body::hyper_body;
use super::error::hyper_code;
use super::task::{hyper_task_return_type, AsTaskType};
use super::HYPER_ITER_CONTINUE;
+use crate::ext::HeaderCaseMap;
use crate::header::{HeaderName, HeaderValue};
use crate::{Body, HeaderMap, Method, Request, Response, Uri};
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -18,16 +19,11 @@ pub struct hyper_response(pub(super) Response<Body>);
/// An HTTP header map.
///
/// These can be part of a request or response.
-#[derive(Default)]
pub struct hyper_headers {
pub(super) headers: HeaderMap,
orig_casing: HeaderCaseMap,
}
-// Will probably be moved to `hyper::ext::http1`
-#[derive(Debug, Default)]
-pub(crate) struct HeaderCaseMap(HeaderMap<Bytes>);
-
#[derive(Debug)]
pub(crate) struct ReasonPhrase(pub(crate) Bytes);
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -229,7 +225,7 @@ impl hyper_response {
let orig_casing = resp
.extensions_mut()
.remove::<HeaderCaseMap>()
- .unwrap_or_default();
+ .unwrap_or_else(HeaderCaseMap::default);
resp.extensions_mut().insert(hyper_headers {
headers,
orig_casing,
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -265,10 +261,7 @@ type hyper_headers_foreach_callback =
impl hyper_headers {
pub(super) fn get_or_default(ext: &mut http::Extensions) -> &mut hyper_headers {
if let None = ext.get_mut::<hyper_headers>() {
- ext.insert(hyper_headers {
- headers: Default::default(),
- orig_casing: Default::default(),
- });
+ ext.insert(hyper_headers::default());
}
ext.get_mut::<hyper_headers>().unwrap()
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -290,11 +283,11 @@ ffi_fn! {
//
// TODO: consider adding http::HeaderMap::entries() iterator
for name in headers.headers.keys() {
- let mut names = headers.orig_casing.get_all(name).iter();
+ let mut names = headers.orig_casing.get_all(name);
for value in headers.headers.get_all(name) {
let (name_ptr, name_len) = if let Some(orig_name) = names.next() {
- (orig_name.as_ptr(), orig_name.len())
+ (orig_name.as_ref().as_ptr(), orig_name.as_ref().len())
} else {
(
name.as_str().as_bytes().as_ptr(),
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -349,6 +342,15 @@ ffi_fn! {
}
}
+impl Default for hyper_headers {
+ fn default() -> Self {
+ Self {
+ headers: Default::default(),
+ orig_casing: HeaderCaseMap::default(),
+ }
+ }
+}
+
unsafe fn raw_name_value(
name: *const u8,
name_len: size_t,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -46,7 +46,6 @@ where
keep_alive: KA::Busy,
method: None,
h1_parser_config: ParserConfig::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
title_case_headers: false,
h09_responses: false,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -77,13 +76,16 @@ where
}
#[cfg(feature = "client")]
+ pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) {
+ self.state.h1_parser_config = parser_config;
+ }
+
pub(crate) fn set_title_case_headers(&mut self) {
self.state.title_case_headers = true;
}
- #[cfg(feature = "client")]
- pub(crate) fn set_h1_parser_config(&mut self, parser_config: ParserConfig) {
- self.state.h1_parser_config = parser_config;
+ pub(crate) fn set_preserve_header_case(&mut self) {
+ self.state.preserve_header_case = true;
}
#[cfg(feature = "client")]
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -158,7 +160,6 @@ where
cached_headers: &mut self.state.cached_headers,
req_method: &mut self.state.method,
h1_parser_config: self.state.h1_parser_config.clone(),
- #[cfg(feature = "ffi")]
preserve_header_case: self.state.preserve_header_case,
h09_responses: self.state.h09_responses,
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -499,16 +500,6 @@ where
self.enforce_version(&mut head);
- // Maybe check if we should preserve header casing on received
- // message headers...
- #[cfg(feature = "ffi")]
- {
- if T::is_client() && !self.state.preserve_header_case {
- self.state.preserve_header_case =
- head.extensions.get::<crate::ffi::HeaderCaseMap>().is_some();
- }
- }
-
let buf = self.io.headers_buf();
match super::role::encode_headers::<T>(
Encode {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -772,7 +763,6 @@ struct State {
/// a body or not.
method: Option<Method>,
h1_parser_config: ParserConfig,
- #[cfg(feature = "ffi")]
preserve_header_case: bool,
title_case_headers: bool,
h09_responses: bool,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -160,7 +160,6 @@ where
cached_headers: parse_ctx.cached_headers,
req_method: parse_ctx.req_method,
h1_parser_config: parse_ctx.h1_parser_config.clone(),
- #[cfg(feature = "ffi")]
preserve_header_case: parse_ctx.preserve_header_case,
h09_responses: parse_ctx.h09_responses,
},
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -72,7 +72,6 @@ pub(crate) struct ParseContext<'a> {
cached_headers: &'a mut Option<HeaderMap>,
req_method: &'a mut Option<Method>,
h1_parser_config: ParserConfig,
- #[cfg(feature = "ffi")]
preserve_header_case: bool,
h09_responses: bool,
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -191,6 +194,12 @@ impl Http1Transaction for Server {
let mut is_te_chunked = false;
let mut wants_upgrade = subject.0 == Method::CONNECT;
+ let mut header_case_map = if ctx.preserve_header_case {
+ Some(HeaderCaseMap::default())
+ } else {
+ None
+ };
+
let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new);
headers.reserve(headers_len);
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -260,6 +269,10 @@ impl Http1Transaction for Server {
_ => (),
}
+ if let Some(ref mut header_case_map) = header_case_map {
+ header_case_map.append(&name, slice.slice(header.name.0..header.name.1));
+ }
+
headers.append(name, value);
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -268,6 +281,12 @@ impl Http1Transaction for Server {
return Err(Parse::Header);
}
+ let mut extensions = http::Extensions::default();
+
+ if let Some(header_case_map) = header_case_map {
+ extensions.insert(header_case_map);
+ }
+
*ctx.req_method = Some(subject.0.clone());
Ok(Some(ParsedMessage {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -275,7 +294,7 @@ impl Http1Transaction for Server {
version,
subject,
headers,
- extensions: http::Extensions::default(),
+ extensions,
},
decode: decoder,
expect_continue,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -284,20 +303,13 @@ impl Http1Transaction for Server {
}))
}
- fn encode(
- mut msg: Encode<'_, Self::Outgoing>,
- mut dst: &mut Vec<u8>,
- ) -> crate::Result<Encoder> {
+ fn encode(mut msg: Encode<'_, Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder> {
trace!(
"Server::encode status={:?}, body={:?}, req_method={:?}",
msg.head.subject,
msg.body,
msg.req_method
);
- debug_assert!(
- !msg.title_case_headers,
- "no server config for title case headers"
- );
let mut wrote_len = false;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -305,7 +317,7 @@ impl Http1Transaction for Server {
// This is because Service only allows returning a single Response, and
// so if you try to reply with a e.g. 100 Continue, you have no way of
// replying with the latter status code response.
- let (ret, mut is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS {
+ let (ret, is_last) = if msg.head.subject == StatusCode::SWITCHING_PROTOCOLS {
(Ok(()), true)
} else if msg.req_method == &Some(Method::CONNECT) && msg.head.subject.is_success() {
// Sending content-length or transfer-encoding header on 2xx response
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -326,9 +338,6 @@ impl Http1Transaction for Server {
// pushing some bytes onto the `dst`. In those cases, we don't want to send
// the half-pushed message, so rewind to before.
let orig_len = dst.len();
- let rewind = |dst: &mut Vec<u8>| {
- dst.truncate(orig_len);
- };
let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE;
dst.reserve(init_cap);
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -359,6 +368,217 @@ impl Http1Transaction for Server {
extend(dst, b"\r\n");
}
+ let orig_headers;
+ let extensions = mem::take(&mut msg.head.extensions);
+ let orig_headers = match extensions.get::<HeaderCaseMap>() {
+ None if msg.title_case_headers => {
+ orig_headers = HeaderCaseMap::default();
+ Some(&orig_headers)
+ }
+ orig_headers => orig_headers,
+ };
+ let encoder = if let Some(orig_headers) = orig_headers {
+ Self::encode_headers_with_original_case(
+ msg,
+ dst,
+ is_last,
+ orig_len,
+ wrote_len,
+ orig_headers,
+ )?
+ } else {
+ Self::encode_headers_with_lower_case(msg, dst, is_last, orig_len, wrote_len)?
+ };
+
+ ret.map(|()| encoder)
+ }
+
+ fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
+ use crate::error::Kind;
+ let status = match *err.kind() {
+ Kind::Parse(Parse::Method)
+ | Kind::Parse(Parse::Header)
+ | Kind::Parse(Parse::Uri)
+ | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST,
+ Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE,
+ _ => return None,
+ };
+
+ debug!("sending automatic response ({}) for parse error", status);
+ let mut msg = MessageHead::default();
+ msg.subject = status;
+ Some(msg)
+ }
+
+ fn is_server() -> bool {
+ true
+ }
+
+ fn update_date() {
+ date::update();
+ }
+}
+
+#[cfg(feature = "server")]
+impl Server {
+ fn can_have_body(method: &Option<Method>, status: StatusCode) -> bool {
+ Server::can_chunked(method, status)
+ }
+
+ fn can_chunked(method: &Option<Method>, status: StatusCode) -> bool {
+ if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success()
+ {
+ false
+ } else if status.is_informational() {
+ false
+ } else {
+ match status {
+ StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false,
+ _ => true,
+ }
+ }
+ }
+
+ fn can_have_content_length(method: &Option<Method>, status: StatusCode) -> bool {
+ if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() {
+ false
+ } else {
+ match status {
+ StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false,
+ _ => true,
+ }
+ }
+ }
+
+ fn encode_headers_with_lower_case(
+ msg: Encode<'_, StatusCode>,
+ dst: &mut Vec<u8>,
+ is_last: bool,
+ orig_len: usize,
+ wrote_len: bool,
+ ) -> crate::Result<Encoder> {
+ struct LowercaseWriter;
+
+ impl HeaderNameWriter for LowercaseWriter {
+ #[inline]
+ fn write_full_header_line(
+ &mut self,
+ dst: &mut Vec<u8>,
+ line: &str,
+ _: (HeaderName, &str),
+ ) {
+ extend(dst, line.as_bytes())
+ }
+
+ #[inline]
+ fn write_header_name_with_colon(
+ &mut self,
+ dst: &mut Vec<u8>,
+ name_with_colon: &str,
+ _: HeaderName,
+ ) {
+ extend(dst, name_with_colon.as_bytes())
+ }
+
+ #[inline]
+ fn write_header_name(&mut self, dst: &mut Vec<u8>, name: &HeaderName) {
+ extend(dst, name.as_str().as_bytes())
+ }
+ }
+
+ Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, LowercaseWriter)
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn encode_headers_with_original_case(
+ msg: Encode<'_, StatusCode>,
+ dst: &mut Vec<u8>,
+ is_last: bool,
+ orig_len: usize,
+ wrote_len: bool,
+ orig_headers: &HeaderCaseMap,
+ ) -> crate::Result<Encoder> {
+ struct OrigCaseWriter<'map> {
+ map: &'map HeaderCaseMap,
+ current: Option<(HeaderName, ValueIter<'map, Bytes>)>,
+ title_case_headers: bool,
+ }
+
+ impl HeaderNameWriter for OrigCaseWriter<'_> {
+ #[inline]
+ fn write_full_header_line(
+ &mut self,
+ dst: &mut Vec<u8>,
+ _: &str,
+ (name, rest): (HeaderName, &str),
+ ) {
+ self.write_header_name(dst, &name);
+ extend(dst, rest.as_bytes());
+ }
+
+ #[inline]
+ fn write_header_name_with_colon(
+ &mut self,
+ dst: &mut Vec<u8>,
+ _: &str,
+ name: HeaderName,
+ ) {
+ self.write_header_name(dst, &name);
+ extend(dst, b": ");
+ }
+
+ #[inline]
+ fn write_header_name(&mut self, dst: &mut Vec<u8>, name: &HeaderName) {
+ let Self {
+ map,
+ ref mut current,
+ title_case_headers,
+ } = *self;
+ if current.as_ref().map_or(true, |(last, _)| last != name) {
+ *current = None;
+ }
+ let (_, values) =
+ current.get_or_insert_with(|| (name.clone(), map.get_all_internal(name)));
+
+ if let Some(orig_name) = values.next() {
+ extend(dst, orig_name);
+ } else if title_case_headers {
+ title_case(dst, name.as_str().as_bytes());
+ } else {
+ extend(dst, name.as_str().as_bytes());
+ }
+ }
+ }
+
+ let header_name_writer = OrigCaseWriter {
+ map: orig_headers,
+ current: None,
+ title_case_headers: msg.title_case_headers,
+ };
+
+ Self::encode_headers(msg, dst, is_last, orig_len, wrote_len, header_name_writer)
+ }
+
+ #[inline]
+ fn encode_headers<W>(
+ msg: Encode<'_, StatusCode>,
+ mut dst: &mut Vec<u8>,
+ mut is_last: bool,
+ orig_len: usize,
+ mut wrote_len: bool,
+ mut header_name_writer: W,
+ ) -> crate::Result<Encoder>
+ where
+ W: HeaderNameWriter,
+ {
+ // In some error cases, we don't know about the invalid message until already
+ // pushing some bytes onto the `dst`. In those cases, we don't want to send
+ // the half-pushed message, so rewind to before.
+ let rewind = |dst: &mut Vec<u8>| {
+ dst.truncate(orig_len);
+ };
+
let mut encoder = Encoder::length(0);
let mut wrote_date = false;
let mut cur_name = None;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -422,7 +642,11 @@ impl Http1Transaction for Server {
if !is_name_written {
encoder = Encoder::length(known_len);
- extend(dst, b"content-length: ");
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "content-length: ",
+ header::CONTENT_LENGTH,
+ );
extend(dst, value.as_bytes());
wrote_len = true;
is_name_written = true;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -450,7 +674,11 @@ impl Http1Transaction for Server {
} else {
// we haven't written content-length yet!
encoder = Encoder::length(len);
- extend(dst, b"content-length: ");
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "content-length: ",
+ header::CONTENT_LENGTH,
+ );
extend(dst, value.as_bytes());
wrote_len = true;
is_name_written = true;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -505,7 +733,11 @@ impl Http1Transaction for Server {
if !is_name_written {
encoder = Encoder::chunked();
is_name_written = true;
- extend(dst, b"transfer-encoding: ");
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "transfer-encoding: ",
+ header::TRANSFER_ENCODING,
+ );
extend(dst, value.as_bytes());
} else {
extend(dst, b", ");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -519,7 +751,11 @@ impl Http1Transaction for Server {
}
if !is_name_written {
is_name_written = true;
- extend(dst, b"connection: ");
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "connection: ",
+ header::CONNECTION,
+ );
extend(dst, value.as_bytes());
} else {
extend(dst, b", ");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -541,7 +777,7 @@ impl Http1Transaction for Server {
"{:?} set is_name_written and didn't continue loop",
name,
);
- extend(dst, name.as_str().as_bytes());
+ header_name_writer.write_header_name(dst, name);
extend(dst, b": ");
extend(dst, value.as_bytes());
extend(dst, b"\r\n");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -557,13 +793,21 @@ impl Http1Transaction for Server {
{
Encoder::close_delimited()
} else {
- extend(dst, b"transfer-encoding: chunked\r\n");
+ header_name_writer.write_full_header_line(
+ dst,
+ "transfer-encoding: chunked\r\n",
+ (header::TRANSFER_ENCODING, ": chunked\r\n"),
+ );
Encoder::chunked()
}
}
None | Some(BodyLength::Known(0)) => {
if Server::can_have_content_length(msg.req_method, msg.head.subject) {
- extend(dst, b"content-length: 0\r\n");
+ header_name_writer.write_full_header_line(
+ dst,
+ "content-length: 0\r\n",
+ (header::CONTENT_LENGTH, ": 0\r\n"),
+ )
}
Encoder::length(0)
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -571,7 +815,11 @@ impl Http1Transaction for Server {
if !Server::can_have_content_length(msg.req_method, msg.head.subject) {
Encoder::length(0)
} else {
- extend(dst, b"content-length: ");
+ header_name_writer.write_header_name_with_colon(
+ dst,
+ "content-length: ",
+ header::CONTENT_LENGTH,
+ );
let _ = ::itoa::write(&mut dst, len);
extend(dst, b"\r\n");
Encoder::length(len)
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -592,72 +840,32 @@ impl Http1Transaction for Server {
// cached date is much faster than formatting every request
if !wrote_date {
dst.reserve(date::DATE_VALUE_LENGTH + 8);
- extend(dst, b"date: ");
+ header_name_writer.write_header_name_with_colon(dst, "date: ", header::DATE);
date::extend(dst);
extend(dst, b"\r\n\r\n");
} else {
extend(dst, b"\r\n");
}
- ret.map(|()| encoder.set_last(is_last))
- }
-
- fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
- use crate::error::Kind;
- let status = match *err.kind() {
- Kind::Parse(Parse::Method)
- | Kind::Parse(Parse::Header)
- | Kind::Parse(Parse::Uri)
- | Kind::Parse(Parse::Version) => StatusCode::BAD_REQUEST,
- Kind::Parse(Parse::TooLarge) => StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE,
- _ => return None,
- };
-
- debug!("sending automatic response ({}) for parse error", status);
- let mut msg = MessageHead::default();
- msg.subject = status;
- Some(msg)
- }
-
- fn is_server() -> bool {
- true
- }
-
- fn update_date() {
- date::update();
+ Ok(encoder.set_last(is_last))
}
}
#[cfg(feature = "server")]
-impl Server {
- fn can_have_body(method: &Option<Method>, status: StatusCode) -> bool {
- Server::can_chunked(method, status)
- }
-
- fn can_chunked(method: &Option<Method>, status: StatusCode) -> bool {
- if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success()
- {
- false
- } else if status.is_informational() {
- false
- } else {
- match status {
- StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false,
- _ => true,
- }
- }
- }
-
- fn can_have_content_length(method: &Option<Method>, status: StatusCode) -> bool {
- if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() {
- false
- } else {
- match status {
- StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false,
- _ => true,
- }
- }
- }
+trait HeaderNameWriter {
+ fn write_full_header_line(
+ &mut self,
+ dst: &mut Vec<u8>,
+ line: &str,
+ name_value_pair: (HeaderName, &str),
+ );
+ fn write_header_name_with_colon(
+ &mut self,
+ dst: &mut Vec<u8>,
+ name_with_colon: &str,
+ name: HeaderName,
+ );
+ fn write_header_name(&mut self, dst: &mut Vec<u8>, name: &HeaderName);
}
#[cfg(feature = "client")]
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -732,8 +940,11 @@ impl Http1Transaction for Client {
let mut keep_alive = version == Version::HTTP_11;
- #[cfg(feature = "ffi")]
- let mut header_case_map = crate::ffi::HeaderCaseMap::default();
+ let mut header_case_map = if ctx.preserve_header_case {
+ Some(HeaderCaseMap::default())
+ } else {
+ None
+ };
headers.reserve(headers_len);
for header in &headers_indices[..headers_len] {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -751,19 +962,16 @@ impl Http1Transaction for Client {
}
}
- #[cfg(feature = "ffi")]
- if ctx.preserve_header_case {
+ if let Some(ref mut header_case_map) = header_case_map {
header_case_map.append(&name, slice.slice(header.name.0..header.name.1));
}
headers.append(name, value);
}
- #[allow(unused_mut)]
let mut extensions = http::Extensions::default();
- #[cfg(feature = "ffi")]
- if ctx.preserve_header_case {
+ if let Some(header_case_map) = header_case_map {
extensions.insert(header_case_map);
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -830,26 +1038,17 @@ impl Http1Transaction for Client {
}
extend(dst, b"\r\n");
- #[cfg(feature = "ffi")]
- {
- if msg.title_case_headers {
- write_headers_title_case(&msg.head.headers, dst);
- } else if let Some(orig_headers) =
- msg.head.extensions.get::<crate::ffi::HeaderCaseMap>()
- {
- write_headers_original_case(&msg.head.headers, orig_headers, dst);
- } else {
- write_headers(&msg.head.headers, dst);
- }
- }
-
- #[cfg(not(feature = "ffi"))]
- {
- if msg.title_case_headers {
- write_headers_title_case(&msg.head.headers, dst);
- } else {
- write_headers(&msg.head.headers, dst);
- }
+ if let Some(orig_headers) = msg.head.extensions.get::<HeaderCaseMap>() {
+ write_headers_original_case(
+ &msg.head.headers,
+ orig_headers,
+ dst,
+ msg.title_case_headers,
+ );
+ } else if msg.title_case_headers {
+ write_headers_title_case(&msg.head.headers, dst);
+ } else {
+ write_headers(&msg.head.headers, dst);
}
extend(dst, b"\r\n");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1162,12 +1361,12 @@ fn write_headers(headers: &HeaderMap, dst: &mut Vec<u8>) {
}
}
-#[cfg(feature = "ffi")]
#[cold]
fn write_headers_original_case(
headers: &HeaderMap,
- orig_case: &crate::ffi::HeaderCaseMap,
+ orig_case: &HeaderCaseMap,
dst: &mut Vec<u8>,
+ title_case_headers: bool,
) {
// For each header name/value pair, there may be a value in the casemap
// that corresponds to the HeaderValue. So, we iterator all the keys,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1175,11 +1374,13 @@ fn write_headers_original_case(
//
// TODO: consider adding http::HeaderMap::entries() iterator
for name in headers.keys() {
- let mut names = orig_case.get_all(name).iter();
+ let mut names = orig_case.get_all(name);
for value in headers.get_all(name) {
if let Some(orig_name) = names.next() {
- extend(dst, orig_name);
+ extend(dst, orig_name.as_ref());
+ } else if title_case_headers {
+ title_case(dst, name.as_str().as_bytes());
} else {
extend(dst, name.as_str().as_bytes());
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -88,6 +88,7 @@ pub struct Http<E = Exec> {
exec: E,
h1_half_close: bool,
h1_keep_alive: bool,
+ h1_title_case_headers: bool,
#[cfg(feature = "http2")]
h2_builder: proto::h2::server::Config,
mode: ConnectionMode,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -234,6 +235,7 @@ impl Http {
exec: Exec::Default,
h1_half_close: false,
h1_keep_alive: true,
+ h1_title_case_headers: false,
#[cfg(feature = "http2")]
h2_builder: Default::default(),
mode: ConnectionMode::default(),
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -286,6 +288,19 @@ impl<E> Http<E> {
self
}
+ /// Set whether HTTP/1 connections will write header names as title case at
+ /// the socket level.
+ ///
+ /// Note that this setting does not affect HTTP/2.
+ ///
+ /// Default is false.
+ #[cfg(feature = "http1")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
+ pub fn http1_title_case_headers(&mut self, enabled: bool) -> &mut Self {
+ self.h1_title_case_headers = enabled;
+ self
+ }
+
/// Sets whether HTTP2 is required.
///
/// Default is false
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -459,6 +474,7 @@ impl<E> Http<E> {
exec,
h1_half_close: self.h1_half_close,
h1_keep_alive: self.h1_keep_alive,
+ h1_title_case_headers: self.h1_title_case_headers,
#[cfg(feature = "http2")]
h2_builder: self.h2_builder,
mode: self.mode,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -514,6 +530,9 @@ impl<E> Http<E> {
if self.h1_half_close {
conn.set_allow_half_close();
}
+ if self.h1_title_case_headers {
+ conn.set_title_case_headers();
+ }
conn.set_flush_pipeline(self.pipeline_flush);
if let Some(max) = self.max_buf_size {
conn.set_max_buf_size(max);
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -231,6 +231,19 @@ impl<I, E> Builder<I, E> {
self
}
+ /// Set whether HTTP/1 connections will write header names as title case at
+ /// the socket level.
+ ///
+ /// Note that this setting does not affect HTTP/2.
+ ///
+ /// Default is false.
+ #[cfg(feature = "http1")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
+ pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self {
+ self.protocol.http1_title_case_headers(val);
+ self
+ }
+
/// Sets whether HTTP/1 is required.
///
/// Default is `false`.
|
As always, I've suggest filing a bug with the thing that expects a certain casing, since in HTTP/1 header casing is defined as case-insensitive, and in HTTP2 and 3, it's forced to lowercase. So the Switch is definitely wrong here.
That said, I realize it's not always controllable, and so I'm exploring providing support in the C API for curl, and it might be possible to expose it as part of Rust's API as well... So far, my idea is to include an extra type in the request `Extensions` which is a mapping of `HeaderName`s to the original casing. If that type exists, it would use that to serialize instead of `HeaderName`, and then hyper would also collect a map for the response and put that in the response `Extensions`.
Isn't there now code to do that behind the `ffi` feature?
There is indeed code in hyper now, currently behind the `ffi` feature. If someone wants to contribute this, it'd be good to propose the API that is exposed to users, and then converting the code to be not just for the `ffi` feature.
|
2021-03-26T11:26:07Z
| 2,480
|
Option to preserve header's title case
I am building a HTTP proxy with hyper, and I realize that headers are all converted to lower case for HTTP/1.x.
There was an issue about adding `http1_title_case_headers` for clients: https://github.com/hyperium/hyper/issues/1492 , but we don't have a way to preserve the **response headers'** title case from remote servers.
Some client applications are expecting to receive header with name that is exactly the same as the server returns, for example:
- Nintendo Switch's connection checking will sent a GET request to `http://ctest.cdn.nintendo.net/` and expecting a response with header `X-Organization: Nintendo`.
|
hyperium__hyper-2480
|
diff --git /dev/null b/src/ext.rs
new file mode 100644
--- /dev/null
+++ b/src/ext.rs
@@ -0,0 +1,64 @@
+//! HTTP extensions
+
+use bytes::Bytes;
+#[cfg(feature = "http1")]
+use http::header::{HeaderName, IntoHeaderName, ValueIter};
+use http::HeaderMap;
+
+/// A map from header names to their original casing as received in an HTTP message.
+///
+/// If an HTTP/1 response `res` is parsed on a connection whose option
+/// [`http1_preserve_header_case`] was set to true and the response included
+/// the following headers:
+///
+/// ```ignore
+/// x-Bread: Baguette
+/// X-BREAD: Pain
+/// x-bread: Ficelle
+/// ```
+///
+/// Then `res.extensions().get::<HeaderCaseMap>()` will return a map with:
+///
+/// ```ignore
+/// HeaderCaseMap({
+/// "x-bread": ["x-Bread", "X-BREAD", "x-bread"],
+/// })
+/// ```
+///
+/// [`http1_preserve_header_case`]: /client/struct.Client.html#method.http1_preserve_header_case
+#[derive(Clone, Debug)]
+pub(crate) struct HeaderCaseMap(HeaderMap<Bytes>);
+
+#[cfg(feature = "http1")]
+impl HeaderCaseMap {
+ /// Returns a view of all spellings associated with that header name,
+ /// in the order they were found.
+ pub(crate) fn get_all<'a>(
+ &'a self,
+ name: &HeaderName,
+ ) -> impl Iterator<Item = impl AsRef<[u8]> + 'a> + 'a {
+ self.get_all_internal(name).into_iter()
+ }
+
+ /// Returns a view of all spellings associated with that header name,
+ /// in the order they were found.
+ pub(crate) fn get_all_internal<'a>(&'a self, name: &HeaderName) -> ValueIter<'_, Bytes> {
+ self.0.get_all(name).into_iter()
+ }
+
+ pub(crate) fn default() -> Self {
+ Self(Default::default())
+ }
+
+ #[cfg(any(test, feature = "ffi"))]
+ pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) {
+ self.0.insert(name, orig);
+ }
+
+ pub(crate) fn append<N>(&mut self, name: N, orig: Bytes)
+ where
+ N: IntoHeaderName,
+ {
+ self.0.append(name, orig);
+ }
+}
diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs
--- a/src/ffi/http_types.rs
+++ b/src/ffi/http_types.rs
@@ -370,25 +372,6 @@ unsafe fn raw_name_value(
Ok((name, value, orig_name))
}
-// ===== impl HeaderCaseMap =====
-
-impl HeaderCaseMap {
- pub(crate) fn get_all(&self, name: &HeaderName) -> http::header::GetAll<'_, Bytes> {
- self.0.get_all(name)
- }
-
- pub(crate) fn insert(&mut self, name: HeaderName, orig: Bytes) {
- self.0.insert(name, orig);
- }
-
- pub(crate) fn append<N>(&mut self, name: N, orig: Bytes)
- where
- N: http::header::IntoHeaderName,
- {
- self.0.append(name, orig);
- }
-}
-
#[cfg(test)]
mod tests {
use super::*;
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -80,6 +80,7 @@ mod cfg;
mod common;
pub mod body;
mod error;
+mod ext;
#[cfg(test)]
mod mock;
#[cfg(any(feature = "http1", feature = "http2",))]
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -644,7 +643,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
};
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -5,16 +5,19 @@
use std::fmt::{self, Write};
use std::mem;
-#[cfg(feature = "ffi")]
+#[cfg(any(test, feature = "server", feature = "ffi"))]
use bytes::Bytes;
use bytes::BytesMut;
use http::header::{self, Entry, HeaderName, HeaderValue};
+#[cfg(feature = "server")]
+use http::header::ValueIter;
use http::{HeaderMap, Method, StatusCode, Version};
use crate::body::DecodedLength;
#[cfg(feature = "server")]
use crate::common::date;
use crate::error::Parse;
+use crate::ext::HeaderCaseMap;
use crate::headers;
use crate::proto::h1::{
Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1233,7 +1434,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut method,
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1257,7 +1457,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
};
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1276,7 +1475,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
};
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1293,7 +1491,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: true,
};
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1312,7 +1509,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
};
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1335,7 +1531,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config,
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
};
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1355,13 +1550,45 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
};
Client::parse(&mut raw, ctx).unwrap_err();
}
+ #[test]
+ fn test_parse_preserve_header_case_in_request() {
+ let mut raw =
+ BytesMut::from("GET / HTTP/1.1\r\nHost: hyper.rs\r\nX-BREAD: baguette\r\n\r\n");
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut None,
+ h1_parser_config: Default::default(),
+ preserve_header_case: true,
+ h09_responses: false,
+ };
+ let parsed_message = Server::parse(&mut raw, ctx).unwrap().unwrap();
+ let orig_headers = parsed_message
+ .head
+ .extensions
+ .get::<HeaderCaseMap>()
+ .unwrap();
+ assert_eq!(
+ orig_headers
+ .get_all_internal(&HeaderName::from_static("host"))
+ .into_iter()
+ .collect::<Vec<_>>(),
+ vec![&Bytes::from("Host")]
+ );
+ assert_eq!(
+ orig_headers
+ .get_all_internal(&HeaderName::from_static("x-bread"))
+ .into_iter()
+ .collect::<Vec<_>>(),
+ vec![&Bytes::from("X-BREAD")]
+ );
+ }
+
#[test]
fn test_decoder_request() {
fn parse(s: &str) -> ParsedMessage<RequestLine> {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1372,7 +1599,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1389,7 +1615,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1605,7 +1830,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1622,7 +1846,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(m),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1639,7 +1862,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1927,6 +2149,75 @@ mod tests {
assert_eq!(vec, b"GET / HTTP/1.1\r\nContent-Length: 10\r\nContent-Type: application/json\r\n*-*: o_o\r\n\r\n".to_vec());
}
+ #[test]
+ fn test_client_request_encode_orig_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Client::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: false,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ assert_eq!(
+ &*vec,
+ b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\n\r\n"
+ .as_ref(),
+ );
+ }
+ #[test]
+ fn test_client_request_encode_orig_and_title_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Client::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ assert_eq!(
+ &*vec,
+ b"GET / HTTP/1.1\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\n\r\n"
+ .as_ref(),
+ );
+ }
+
#[test]
fn test_server_encode_connect_method() {
let mut head = MessageHead::default();
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1947,6 +2238,104 @@ mod tests {
assert!(encoder.is_last());
}
+ #[test]
+ fn test_server_response_encode_title_case() {
+ use crate::proto::BodyLength;
+ use http::header::HeaderValue;
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut vec = Vec::new();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ let expected_response =
+ b"HTTP/1.1 200 OK\r\nContent-Length: 10\r\nContent-Type: application/json\r\n";
+
+ assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
+ }
+
+ #[test]
+ fn test_server_response_encode_orig_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: false,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ let expected_response =
+ b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\ncontent-type: application/json\r\ndate: ";
+
+ assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
+ }
+
+ #[test]
+ fn test_server_response_encode_orig_and_title_case() {
+ use crate::proto::BodyLength;
+ use http::header::{HeaderValue, CONTENT_LENGTH};
+
+ let mut head = MessageHead::default();
+ head.headers
+ .insert("content-length", HeaderValue::from_static("10"));
+ head.headers
+ .insert("content-type", HeaderValue::from_static("application/json"));
+
+ let mut orig_headers = HeaderCaseMap::default();
+ orig_headers.insert(CONTENT_LENGTH, "CONTENT-LENGTH".into());
+ head.extensions.insert(orig_headers);
+
+ let mut vec = Vec::new();
+ Server::encode(
+ Encode {
+ head: &mut head,
+ body: Some(BodyLength::Known(10)),
+ keep_alive: true,
+ req_method: &mut None,
+ title_case_headers: true,
+ },
+ &mut vec,
+ )
+ .unwrap();
+
+ let expected_response =
+ b"HTTP/1.1 200 OK\r\nCONTENT-LENGTH: 10\r\nContent-Type: application/json\r\nDate: ";
+
+ assert_eq!(&vec[..expected_response.len()], &expected_response[..]);
+ }
+
#[test]
fn parse_header_htabs() {
let mut bytes = BytesMut::from("HTTP/1.1 200 OK\r\nserver: hello\tworld\r\n\r\n");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1956,7 +2345,6 @@ mod tests {
cached_headers: &mut None,
req_method: &mut Some(Method::GET),
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1967,17 +2355,16 @@ mod tests {
assert_eq!(parsed.head.headers["server"], "hello\tworld");
}
- #[cfg(feature = "ffi")]
#[test]
fn test_write_headers_orig_case_empty_value() {
let mut headers = HeaderMap::new();
let name = http::header::HeaderName::from_static("x-empty");
headers.insert(&name, "".parse().expect("parse empty"));
- let mut orig_cases = crate::ffi::HeaderCaseMap::default();
+ let mut orig_cases = HeaderCaseMap::default();
orig_cases.insert(name, Bytes::from_static(b"X-EmptY"));
let mut dst = Vec::new();
- super::write_headers_original_case(&headers, &orig_cases, &mut dst);
+ super::write_headers_original_case(&headers, &orig_cases, &mut dst, false);
assert_eq!(
dst, b"X-EmptY:\r\n",
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1985,7 +2372,6 @@ mod tests {
);
}
- #[cfg(feature = "ffi")]
#[test]
fn test_write_headers_orig_case_multiple_entries() {
let mut headers = HeaderMap::new();
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1993,12 +2379,12 @@ mod tests {
headers.insert(&name, "a".parse().unwrap());
headers.append(&name, "b".parse().unwrap());
- let mut orig_cases = crate::ffi::HeaderCaseMap::default();
+ let mut orig_cases = HeaderCaseMap::default();
orig_cases.insert(name.clone(), Bytes::from_static(b"X-Empty"));
orig_cases.append(name, Bytes::from_static(b"X-EMPTY"));
let mut dst = Vec::new();
- super::write_headers_original_case(&headers, &orig_cases, &mut dst);
+ super::write_headers_original_case(&headers, &orig_cases, &mut dst, false);
assert_eq!(dst, b"X-Empty: a\r\nX-EMPTY: b\r\n");
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2039,7 +2425,6 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -2076,7 +2461,6 @@ mod tests {
cached_headers: &mut headers,
req_method: &mut None,
h1_parser_config: Default::default(),
- #[cfg(feature = "ffi")]
preserve_header_case: false,
h09_responses: false,
},
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2468"
] |
0.14
|
41f99578a53845e5e0bb999c101bef8307e1ce5f
|
diff --git a/src/client/client.rs b/src/client/client.rs
--- a/src/client/client.rs
+++ b/src/client/client.rs
@@ -972,6 +972,14 @@ impl Builder {
self
}
+ /// Set whether HTTP/0.9 responses should be tolerated.
+ ///
+ /// Default is false.
+ pub fn http09_responses(&mut self, val: bool) -> &mut Self {
+ self.conn_builder.h09_responses(val);
+ self
+ }
+
/// Set whether the connection **must** use HTTP/2.
///
/// The destination must either allow HTTP2 Prior Knowledge, or the
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -122,6 +122,7 @@ where
#[derive(Clone, Debug)]
pub struct Builder {
pub(super) exec: Exec,
+ h09_responses: bool,
h1_title_case_headers: bool,
h1_read_buf_exact_size: Option<usize>,
h1_max_buf_size: Option<usize>,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -493,6 +494,7 @@ impl Builder {
pub fn new() -> Builder {
Builder {
exec: Exec::Default,
+ h09_responses: false,
h1_read_buf_exact_size: None,
h1_title_case_headers: false,
h1_max_buf_size: None,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -514,6 +516,11 @@ impl Builder {
self
}
+ pub(super) fn h09_responses(&mut self, enabled: bool) -> &mut Builder {
+ self.h09_responses = enabled;
+ self
+ }
+
pub(super) fn h1_title_case_headers(&mut self, enabled: bool) -> &mut Builder {
self.h1_title_case_headers = enabled;
self
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -700,6 +707,9 @@ impl Builder {
if opts.h1_title_case_headers {
conn.set_title_case_headers();
}
+ if opts.h09_responses {
+ conn.set_h09_responses();
+ }
if let Some(sz) = opts.h1_read_buf_exact_size {
conn.set_read_buf_exact_size(sz);
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -47,6 +47,7 @@ where
#[cfg(feature = "ffi")]
preserve_header_case: false,
title_case_headers: false,
+ h09_responses: false,
notify_read: false,
reading: Reading::Init,
writing: Writing::Init,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -78,6 +79,11 @@ where
self.state.title_case_headers = true;
}
+ #[cfg(feature = "client")]
+ pub(crate) fn set_h09_responses(&mut self) {
+ self.state.h09_responses = true;
+ }
+
#[cfg(feature = "server")]
pub(crate) fn set_allow_half_close(&mut self) {
self.state.allow_half_close = true;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -146,6 +152,7 @@ where
req_method: &mut self.state.method,
#[cfg(feature = "ffi")]
preserve_header_case: self.state.preserve_header_case,
+ h09_responses: self.state.h09_responses,
}
)) {
Ok(msg) => msg,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -157,6 +164,9 @@ where
debug!("incoming body is {}", msg.decode);
+ // Prevent accepting HTTP/0.9 responses after the initial one, if any.
+ self.state.h09_responses = false;
+
self.state.busy();
self.state.keep_alive &= msg.keep_alive;
self.state.version = msg.head.version;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -753,6 +763,7 @@ struct State {
#[cfg(feature = "ffi")]
preserve_header_case: bool,
title_case_headers: bool,
+ h09_responses: bool,
/// Set to true when the Dispatcher should poll read operations
/// again. See the `maybe_notify` method for more.
notify_read: bool,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -161,6 +161,7 @@ where
req_method: parse_ctx.req_method,
#[cfg(feature = "ffi")]
preserve_header_case: parse_ctx.preserve_header_case,
+ h09_responses: parse_ctx.h09_responses,
},
)? {
Some(msg) => {
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -72,6 +72,7 @@ pub(crate) struct ParseContext<'a> {
req_method: &'a mut Option<Method>,
#[cfg(feature = "ffi")]
preserve_header_case: bool,
+ h09_responses: bool,
}
/// Passed to Http1Transaction::encode
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -683,8 +683,8 @@ impl Http1Transaction for Client {
);
let mut res = httparse::Response::new(&mut headers);
let bytes = buf.as_ref();
- match res.parse(bytes)? {
- httparse::Status::Complete(len) => {
+ match res.parse(bytes) {
+ Ok(httparse::Status::Complete(len)) => {
trace!("Response.parse Complete({})", len);
let status = StatusCode::from_u16(res.code.unwrap())?;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -710,7 +710,18 @@ impl Http1Transaction for Client {
let headers_len = res.headers.len();
(len, status, reason, version, headers_len)
}
- httparse::Status::Partial => return Ok(None),
+ Ok(httparse::Status::Partial) => return Ok(None),
+ Err(httparse::Error::Version) if ctx.h09_responses => {
+ trace!("Response.parse accepted HTTP/0.9 response");
+
+ #[cfg(not(feature = "ffi"))]
+ let reason = ();
+ #[cfg(feature = "ffi")]
+ let reason = None;
+
+ (0, StatusCode::OK, reason, Version::HTTP_09, 0)
+ }
+ Err(e) => return Err(e.into()),
}
};
|
👀 This is the first time I've seen HTTP/0.9 support asked for. I would have assumed that since HTTP/1.0 came out (double checks) over 25 years ago, people would have moved on. Oh well.
If a client sends a request to a server that happens to not speak HTTP at all, what's the proposed way to notice that versus just assuming it's HTTP/0.9?
Firefox does this: https://searchfox.org/mozilla-central/rev/f07a609a76136ef779c65185165ff5ac513cc172/netwerk/protocol/http/nsHttpTransaction.cpp#2014-2031 So more or less if it doesn't find some "HTTP/1|2|3" or "ICY " in the first 11 bytes, it will assume this is a head-less HTTP/0.9 response. Note that Firefox explicitly rejects HTTP/0.9 responses to PUT requests.
The logic for Chrome is here: https://chromium.googlesource.com/chromium/src/net/+/8cc942b38e929612f0d85a685f04e73b3203dbb1/http/http_stream_parser.cc#966 AFAICT it also tries to find "HTTP" near the start of the incoming data, and if it doesn't it assumes HTTP/0.9. Note that Chrome explicitly rejects HTTP/0.9 responses not made against default scheme ports, except if it find "ICY " ([see this code](https://chromium.googlesource.com/chromium/src/net/+/8cc942b38e929612f0d85a685f04e73b3203dbb1/http/http_stream_parser.cc#1019)).
I suppose what we could do in hyper is add a builder option, default off. If enabled, then do as you propose: try to parse as HTTP/1, if it fails and the response doesn't look like `HTTP/1.X`, return it as HTTP/0.9. How does that sound?
That's fine by me, whatever you think would be best. We have yet to see any response that didn't start immediately at the first byte we are looking at, so that plan would be good I think.
|
2021-03-19T13:57:22Z
| 2,473
|
Support HTTP/0.9 responses in the client
There are servers out there that sometimes reply with HTTP/0.9 responses, even modern-ish stack (we saw such responses from MSFT servers for example).
I'm filing this here and not on the httparse repo because this is a substantial change worth discussing in the open, and because we need to think about how to support it. Firefox for example has some logic to content-sniff the start of an actual `HTTP/1.*` response in the first 4KB of received data on the first request made on the connection, and this is obviously not code that should live in httparse.
As a start, I feel like Hyper should check whether the response starts with "HTTP" at the beginning of the data it received, and if not, maybe it should pretend it saw `HTTP/1.0 200 OK\r\n\r\n`.
I say `HTTP/1.0` and not `HTTP/0.9` because it is literally impossible for `httparse::Response` to represent `0.9` in its version field.
|
hyperium__hyper-2473
|
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,3 @@
-#![doc(html_root_url = "https://docs.rs/hyper/0.14.4")]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![cfg_attr(test, deny(rust_2018_idioms))]
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -640,6 +641,7 @@ mod tests {
req_method: &mut None,
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
};
assert!(buffered
.parse::<ClientTransaction>(cx, parse_ctx)
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1222,6 +1233,7 @@ mod tests {
req_method: &mut method,
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.unwrap()
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1244,6 +1256,7 @@ mod tests {
req_method: &mut Some(crate::Method::GET),
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
};
let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
assert_eq!(raw.len(), 0);
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1261,10 +1274,46 @@ mod tests {
req_method: &mut None,
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
};
Server::parse(&mut raw, ctx).unwrap_err();
}
+ const H09_RESPONSE: &'static str = "Baguettes are super delicious, don't you agree?";
+
+ #[test]
+ fn test_parse_response_h09_allowed() {
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from(H09_RESPONSE);
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(crate::Method::GET),
+ #[cfg(feature = "ffi")]
+ preserve_header_case: false,
+ h09_responses: true,
+ };
+ let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
+ assert_eq!(raw, H09_RESPONSE);
+ assert_eq!(msg.head.subject, crate::StatusCode::OK);
+ assert_eq!(msg.head.version, crate::Version::HTTP_09);
+ assert_eq!(msg.head.headers.len(), 0);
+ }
+
+ #[test]
+ fn test_parse_response_h09_rejected() {
+ let _ = pretty_env_logger::try_init();
+ let mut raw = BytesMut::from(H09_RESPONSE);
+ let ctx = ParseContext {
+ cached_headers: &mut None,
+ req_method: &mut Some(crate::Method::GET),
+ #[cfg(feature = "ffi")]
+ preserve_header_case: false,
+ h09_responses: false,
+ };
+ Client::parse(&mut raw, ctx).unwrap_err();
+ assert_eq!(raw, H09_RESPONSE);
+ }
+
#[test]
fn test_decoder_request() {
fn parse(s: &str) -> ParsedMessage<RequestLine> {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1276,6 +1325,7 @@ mod tests {
req_method: &mut None,
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1291,6 +1341,7 @@ mod tests {
req_method: &mut None,
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.expect_err(comment)
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1505,6 +1556,7 @@ mod tests {
req_method: &mut Some(Method::GET),
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
}
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1520,6 +1572,7 @@ mod tests {
req_method: &mut Some(m),
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1535,6 +1588,7 @@ mod tests {
req_method: &mut Some(Method::GET),
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.expect_err("parse should err")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1850,6 +1904,7 @@ mod tests {
req_method: &mut Some(Method::GET),
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.expect("parse ok")
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1931,6 +1986,7 @@ mod tests {
req_method: &mut None,
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.unwrap()
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1966,6 +2022,7 @@ mod tests {
req_method: &mut None,
#[cfg(feature = "ffi")]
preserve_header_case: false,
+ h09_responses: false,
},
)
.unwrap()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -112,6 +112,43 @@ macro_rules! test {
headers: { $($response_header_name:expr => $response_header_val:expr,)* },
body: $response_body:expr,
) => (
+ test! {
+ name: $name,
+ server:
+ expected: $server_expected,
+ reply: $server_reply,
+ client:
+ set_host: $set_host,
+ title_case_headers: $title_case_headers,
+ allow_h09_responses: false,
+ request: {$(
+ $c_req_prop: $c_req_val,
+ )*},
+
+ response:
+ status: $client_status,
+ headers: { $($response_header_name => $response_header_val,)* },
+ body: $response_body,
+ }
+ );
+ (
+ name: $name:ident,
+ server:
+ expected: $server_expected:expr,
+ reply: $server_reply:expr,
+ client:
+ set_host: $set_host:expr,
+ title_case_headers: $title_case_headers:expr,
+ allow_h09_responses: $allow_h09_responses:expr,
+ request: {$(
+ $c_req_prop:ident: $c_req_val:tt,
+ )*},
+
+ response:
+ status: $client_status:ident,
+ headers: { $($response_header_name:expr => $response_header_val:expr,)* },
+ body: $response_body:expr,
+ ) => (
#[test]
fn $name() {
let _ = pretty_env_logger::try_init();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -127,6 +164,7 @@ macro_rules! test {
client:
set_host: $set_host,
title_case_headers: $title_case_headers,
+ allow_h09_responses: $allow_h09_responses,
request: {$(
$c_req_prop: $c_req_val,
)*},
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -181,6 +219,7 @@ macro_rules! test {
client:
set_host: true,
title_case_headers: false,
+ allow_h09_responses: false,
request: {$(
$c_req_prop: $c_req_val,
)*},
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -205,6 +244,7 @@ macro_rules! test {
client:
set_host: $set_host:expr,
title_case_headers: $title_case_headers:expr,
+ allow_h09_responses: $allow_h09_responses:expr,
request: {$(
$c_req_prop:ident: $c_req_val:tt,
)*},
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -217,6 +257,7 @@ macro_rules! test {
let client = Client::builder()
.set_host($set_host)
.http1_title_case_headers($title_case_headers)
+ .http09_responses($allow_h09_responses)
.build(connector);
#[allow(unused_assignments, unused_mut)]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1067,6 +1108,31 @@ test! {
body: &b"abc"[..],
}
+test! {
+ name: client_allows_http09_when_requested,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ Host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: "Mmmmh, baguettes.",
+
+ client:
+ set_host: true,
+ title_case_headers: true,
+ allow_h09_responses: true,
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: &b"Mmmmh, baguettes."[..],
+}
+
mod dispatch_impl {
use super::*;
use std::io::{self, Read, Write};
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2470"
] |
0.14
|
eb0e7186963bbe4fa1ad4478ac493f75ddc92ae5
|
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -42,26 +42,26 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>
// be alright if they all contain the same value, and all parse
// correctly. If not, then it's an error.
- let folded = values.fold(None, |prev, line| match prev {
- Some(Ok(prev)) => Some(
- line.to_str()
- .map_err(|_| ())
- .and_then(|s| s.parse().map_err(|_| ()))
- .and_then(|n| if prev == n { Ok(n) } else { Err(()) }),
- ),
- None => Some(
- line.to_str()
- .map_err(|_| ())
- .and_then(|s| s.parse().map_err(|_| ())),
- ),
- Some(Err(())) => Some(Err(())),
- });
-
- if let Some(Ok(n)) = folded {
- Some(n)
- } else {
- None
+ let mut content_length: Option<u64> = None;
+ for h in values {
+ if let Ok(line) = h.to_str() {
+ for v in line.split(',') {
+ if let Some(n) = v.trim().parse().ok() {
+ if content_length.is_none() {
+ content_length = Some(n)
+ } else if content_length != Some(n) {
+ return None;
+ }
+ } else {
+ return None
+ }
+ }
+ } else {
+ return None
+ }
}
+
+ return content_length
}
#[cfg(all(feature = "http2", feature = "client"))]
|
I coulda sworn hyper supported this, and so dug a little. hyper _does_ correctly support if the multiple values are on multiple lines, but not if comma-separated. So, this works:
```
HTTP/1.1 200 OK
content-length: 3
content-length: 3
hey
```
But not `content-length: 3,3`. Seems like something to fix, and add a test case for.
The relevant code is this function: https://github.com/hyperium/hyper/blob/eb0e7186963bbe4fa1ad4478ac493f75ddc92ae5/src/headers.rs#L40
Ok, i'll provide a PR
|
2021-03-17T21:17:40Z
| 2,471
|
Support multiple Content-Length values
Hi,
I try to work on some [failing tests](https://github.com/servo/servo/blob/master/tests/wpt/web-platform-tests/fetch/content-length/resources/content-lengths.json) of servo. To summarize the issue : the `fetch` specification has been recently updated to handle multiple values for the [`Content-Length` header](https://fetch.spec.whatwg.org/#content-length-header). I found Hyper client does not handle the header as expected by the specification. To be more precise, hyper does not handle the multiple values into only one header entry : `Content-Length: 42,42`. I don't know if we want to update hyper to follow this specification or not.
I reproduced the error with the [example client](https://github.com/hyperium/hyper/blob/master/examples/client.rs) and the following node http server code:
```js
var http = require('http');
http.createServer(function (req, res) {
res.writeHead(200, {'Content-Length': '42,42'});
res.write('012345678901234567890123456789012345678901');
res.end();
}).listen(8080);
```
|
hyperium__hyper-2471
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1043,6 +1043,30 @@ test! {
error: |err| err.to_string() == "request has unsupported HTTP version",
}
+test! {
+ name: client_handles_contentlength_values_on_same_line,
+
+ server:
+ expected: "GET /foo HTTP/1.1\r\nhost: {addr}\r\n\r\n",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ Content-Length: 3,3\r\n\
+ Content-Length: 3,3\r\n\
+ \r\n\
+ abc\r\n",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/foo",
+ },
+ response:
+ status: OK,
+ headers: {
+ },
+ body: &b"abc"[..],
+}
+
mod dispatch_impl {
use super::*;
use std::io::{self, Read, Write};
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2421"
] |
0.14
|
48d4594930da4e227039cfa254411b85c98b63c5
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -34,7 +34,7 @@ httparse = "1.0"
h2 = { version = "0.3", optional = true }
itoa = "0.4.1"
tracing = { version = "0.1", default-features = false, features = ["std"] }
-pin-project-lite = "0.2.4"
+pin-project = "1.0"
tower-service = "0.3"
tokio = { version = "1", features = ["sync"] }
want = "0.3"
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -56,7 +56,7 @@ use std::time::Duration;
use bytes::Bytes;
use futures_util::future::{self, Either, FutureExt as _};
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use tower_service::Service;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -75,23 +75,15 @@ use crate::{Body, Request, Response};
#[cfg(feature = "http1")]
type Http1Dispatcher<T, B, R> = proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, R>;
-pin_project! {
- #[project = ProtoClientProj]
- enum ProtoClient<T, B>
- where
- B: HttpBody,
- {
- #[cfg(feature = "http1")]
- H1 {
- #[pin]
- h1: Http1Dispatcher<T, B, proto::h1::ClientTransaction>,
- },
- #[cfg(feature = "http2")]
- H2 {
- #[pin]
- h2: proto::h2::ClientTask<B>, _phantom: PhantomData<fn(T)>,
- },
- }
+#[pin_project(project = ProtoClientProj)]
+enum ProtoClient<T, B>
+where
+ B: HttpBody,
+{
+ #[cfg(feature = "http1")]
+ H1(#[pin] Http1Dispatcher<T, B, proto::h1::ClientTransaction>),
+ #[cfg(feature = "http2")]
+ H2(#[pin] proto::h2::ClientTask<B>, PhantomData<fn(T)>),
}
/// Returns a handshake future over some IO.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -408,7 +400,7 @@ where
pub fn into_parts(self) -> Parts<T> {
match self.inner.expect("already upgraded") {
#[cfg(feature = "http1")]
- ProtoClient::H1 { h1 } => {
+ ProtoClient::H1(h1) => {
let (io, read_buf, _) = h1.into_inner();
Parts {
io,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -417,7 +409,7 @@ where
}
}
#[cfg(feature = "http2")]
- ProtoClient::H2 { .. } => {
+ ProtoClient::H2(..) => {
panic!("http2 cannot into_inner");
}
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -437,9 +429,9 @@ where
pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
match *self.inner.as_mut().expect("already upgraded") {
#[cfg(feature = "http1")]
- ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx),
+ ProtoClient::H1(ref mut h1) => h1.poll_without_shutdown(cx),
#[cfg(feature = "http2")]
- ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()),
+ ProtoClient::H2(ref mut h2, _) => Pin::new(h2).poll(cx).map_ok(|_| ()),
}
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -468,7 +460,7 @@ where
proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
#[cfg(feature = "http1")]
proto::Dispatched::Upgrade(pending) => match self.inner.take() {
- Some(ProtoClient::H1 { h1 }) => {
+ Some(ProtoClient::H1(h1)) => {
let (io, buf, _) = h1.into_inner();
pending.fulfill(Upgraded::new(io, buf));
Poll::Ready(Ok(()))
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -715,17 +707,14 @@ impl Builder {
}
let cd = proto::h1::dispatch::Client::new(rx);
let dispatch = proto::h1::Dispatcher::new(cd, conn);
- ProtoClient::H1 { h1: dispatch }
+ ProtoClient::H1(dispatch)
}
#[cfg(feature = "http2")]
Proto::Http2 => {
let h2 =
proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone())
.await?;
- ProtoClient::H2 {
- h2,
- _phantom: PhantomData,
- }
+ ProtoClient::H2(h2, PhantomData)
}
};
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -779,9 +768,9 @@ where
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "http1")]
- ProtoClientProj::H1 { h1 } => h1.poll(cx),
+ ProtoClientProj::H1(c) => c.poll(cx),
#[cfg(feature = "http2")]
- ProtoClientProj::H2 { h2, .. } => h2.poll(cx),
+ ProtoClientProj::H2(c, _) => c.poll(cx),
}
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -11,7 +11,7 @@ use std::time::Duration;
use futures_util::future::Either;
use http::uri::{Scheme, Uri};
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tokio::net::{TcpSocket, TcpStream};
use tokio::time::Sleep;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -373,19 +373,18 @@ impl HttpInfo {
}
}
-pin_project! {
- // Not publicly exported (so missing_docs doesn't trigger).
- //
- // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
- // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
- // (and thus we can change the type in the future).
- #[must_use = "futures do nothing unless polled"]
- #[allow(missing_debug_implementations)]
- pub struct HttpConnecting<R> {
- #[pin]
- fut: BoxConnecting,
- _marker: PhantomData<R>,
- }
+// Not publicly exported (so missing_docs doesn't trigger).
+//
+// We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
+// so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
+// (and thus we can change the type in the future).
+#[must_use = "futures do nothing unless polled"]
+#[pin_project]
+#[allow(missing_debug_implementations)]
+pub struct HttpConnecting<R> {
+ #[pin]
+ fut: BoxConnecting,
+ _marker: PhantomData<R>,
}
type ConnectResult = Result<TcpStream, ConnectError>;
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -4,7 +4,9 @@ use std::future::Future;
use futures_util::FutureExt;
use tokio::sync::{mpsc, oneshot};
-use crate::common::{task, Pin, Poll};
+use crate::common::{task, Poll};
+#[cfg(feature = "http2")]
+use crate::common::Pin;
pub(crate) type RetryPromise<T, U> = oneshot::Receiver<Result<U, (crate::Error, Option<T>)>>;
pub(crate) type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -11,7 +11,7 @@ use futures_channel::oneshot;
use tokio::time::{Duration, Instant, Interval};
use super::client::Ver;
-use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin};
+use crate::common::{task, exec::Exec, Future, Pin, Poll, Unpin};
// FIXME: allow() required due to `impl Trait` leaking types to this lint
#[allow(missing_debug_implementations)]
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -714,17 +714,16 @@ impl Expiration {
}
#[cfg(feature = "runtime")]
-pin_project_lite::pin_project! {
- struct IdleTask<T> {
- #[pin]
- interval: Interval,
- pool: WeakOpt<Mutex<PoolInner<T>>>,
- // This allows the IdleTask to be notified as soon as the entire
- // Pool is fully dropped, and shutdown. This channel is never sent on,
- // but Err(Canceled) will be received when the Pool is dropped.
- #[pin]
- pool_drop_notifier: oneshot::Receiver<crate::common::Never>,
- }
+#[pin_project::pin_project]
+struct IdleTask<T> {
+ #[pin]
+ interval: Interval,
+ pool: WeakOpt<Mutex<PoolInner<T>>>,
+ // This allows the IdleTask to be notified as soon as the entire
+ // Pool is fully dropped, and shutdown. This channel is never sent on,
+ // but Err(Canceled) will be received when the Pool is dropped.
+ #[pin]
+ pool_drop_notifier: oneshot::Receiver<crate::common::Never>,
}
#[cfg(feature = "runtime")]
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -1,6 +1,6 @@
use std::mem;
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tokio::sync::watch;
use super::{task, Future, Pin, Poll};
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -21,15 +21,14 @@ pub(crate) struct Watch {
rx: watch::Receiver<()>,
}
-pin_project! {
- #[allow(missing_debug_implementations)]
- pub struct Watching<F, FN> {
- #[pin]
- future: F,
- state: State<FN>,
- watch: Pin<Box<dyn Future<Output = ()> + Send + Sync>>,
- _rx: watch::Receiver<()>,
- }
+#[allow(missing_debug_implementations)]
+#[pin_project]
+pub struct Watching<F, FN> {
+ #[pin]
+ future: F,
+ state: State<FN>,
+ watch: Pin<Box<dyn Future<Output = ()> + Send + Sync>>,
+ _rx: watch::Receiver<()>,
}
enum State<F> {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -44,13 +44,10 @@ cfg_server! {
}
cfg_client! {
- pin_project_lite::pin_project! {
- pub(crate) struct Client<B> {
- callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<Body>>>,
- #[pin]
- rx: ClientRx<B>,
- rx_closed: bool,
- }
+ pub(crate) struct Client<B> {
+ callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<Body>>>,
+ rx: ClientRx<B>,
+ rx_closed: bool,
}
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, http::Response<Body>>;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -5,7 +5,7 @@ use http::header::{
TRANSFER_ENCODING, UPGRADE,
};
use http::HeaderMap;
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use std::error::Error as StdError;
use std::io::IoSlice;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -94,16 +94,15 @@ fn decode_content_length(headers: &HeaderMap) -> DecodedLength {
// body adapters used by both Client and Server
-pin_project! {
- struct PipeToSendStream<S>
- where
- S: HttpBody,
- {
- body_tx: SendStream<SendBuf<S::Data>>,
- data_done: bool,
- #[pin]
- stream: S,
- }
+#[pin_project]
+struct PipeToSendStream<S>
+where
+ S: HttpBody,
+{
+ body_tx: SendStream<SendBuf<S::Data>>,
+ data_done: bool,
+ #[pin]
+ stream: S,
}
impl<S> PipeToSendStream<S>
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -5,7 +5,7 @@ use std::time::Duration;
use h2::server::{Connection, Handshake, SendResponse};
use h2::Reason;
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -57,16 +57,15 @@ impl Default for Config {
}
}
-pin_project! {
- pub(crate) struct Server<T, S, B, E>
- where
- S: HttpService<Body>,
- B: HttpBody,
- {
- exec: E,
- service: S,
- state: State<T, B>,
- }
+#[pin_project]
+pub(crate) struct Server<T, S, B, E>
+where
+ S: HttpService<Body>,
+ B: HttpBody,
+{
+ exec: E,
+ service: S,
+ state: State<T, B>,
}
enum State<T, B>
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -316,33 +315,24 @@ where
}
}
-pin_project! {
- #[allow(missing_debug_implementations)]
- pub struct H2Stream<F, B>
- where
- B: HttpBody,
- {
- reply: SendResponse<SendBuf<B::Data>>,
- #[pin]
- state: H2StreamState<F, B>,
- }
+#[allow(missing_debug_implementations)]
+#[pin_project]
+pub struct H2Stream<F, B>
+where
+ B: HttpBody,
+{
+ reply: SendResponse<SendBuf<B::Data>>,
+ #[pin]
+ state: H2StreamState<F, B>,
}
-pin_project! {
- #[project = H2StreamStateProj]
- enum H2StreamState<F, B>
- where
- B: HttpBody,
- {
- Service {
- #[pin]
- fut: F,
- },
- Body {
- #[pin]
- pipe: PipeToSendStream<B>,
- },
- }
+#[pin_project(project = H2StreamStateProj)]
+enum H2StreamState<F, B>
+where
+ B: HttpBody,
+{
+ Service(#[pin] F),
+ Body(#[pin] PipeToSendStream<B>),
}
impl<F, B> H2Stream<F, B>
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -352,7 +342,7 @@ where
fn new(fut: F, respond: SendResponse<SendBuf<B::Data>>) -> H2Stream<F, B> {
H2Stream {
reply: respond,
- state: H2StreamState::Service { fut },
+ state: H2StreamState::Service(fut),
}
}
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -381,7 +371,7 @@ where
let mut me = self.project();
loop {
let next = match me.state.as_mut().project() {
- H2StreamStateProj::Service { fut: h } => {
+ H2StreamStateProj::Service(h) => {
let res = match h.poll(cx) {
Poll::Ready(Ok(r)) => r,
Poll::Pending => {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -419,15 +409,13 @@ where
if !body.is_end_stream() {
let body_tx = reply!(me, res, false);
- H2StreamState::Body {
- pipe: PipeToSendStream::new(body, body_tx),
- }
+ H2StreamState::Body(PipeToSendStream::new(body, body_tx))
} else {
reply!(me, res, true);
return Poll::Ready(Ok(()));
}
}
- H2StreamStateProj::Body { pipe } => {
+ H2StreamStateProj::Body(pipe) => {
return pipe.poll(cx);
}
};
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -9,7 +9,7 @@
#[cfg(feature = "stream")]
use futures_core::Stream;
#[cfg(feature = "stream")]
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use crate::common::{
task::{self, Poll},
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -86,12 +86,8 @@ pub fn from_stream<S, IO, E>(stream: S) -> impl Accept<Conn = IO, Error = E>
where
S: Stream<Item = Result<IO, E>>,
{
- pin_project! {
- struct FromStream<S> {
- #[pin]
- stream: S,
- }
- }
+ #[pin_project]
+ struct FromStream<S>(#[pin] S);
impl<S, IO, E> Accept for FromStream<S>
where
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -103,9 +99,9 @@ where
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
- self.project().stream.poll_next(cx)
+ self.project().0.poll_next(cx)
}
}
- FromStream { stream }
+ FromStream(stream)
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -45,6 +45,7 @@
use std::error::Error as StdError;
use std::fmt;
+#[cfg(feature = "http1")]
use std::marker::PhantomData;
#[cfg(feature = "tcp")]
use std::net::SocketAddr;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -52,7 +53,7 @@ use std::net::SocketAddr;
use std::time::Duration;
use bytes::Bytes;
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::accept::Accept;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -108,85 +109,77 @@ enum ConnectionMode {
Fallback,
}
-pin_project! {
- /// A stream mapping incoming IOs to new services.
- ///
- /// Yields `Connecting`s that are futures that should be put on a reactor.
- #[must_use = "streams do nothing unless polled"]
- #[derive(Debug)]
- pub(super) struct Serve<I, S, E = Exec> {
- #[pin]
- incoming: I,
- make_service: S,
- protocol: Http<E>,
- }
+/// A stream mapping incoming IOs to new services.
+///
+/// Yields `Connecting`s that are futures that should be put on a reactor.
+#[must_use = "streams do nothing unless polled"]
+#[pin_project]
+#[derive(Debug)]
+pub(super) struct Serve<I, S, E = Exec> {
+ #[pin]
+ incoming: I,
+ make_service: S,
+ protocol: Http<E>,
}
-pin_project! {
- /// A future building a new `Service` to a `Connection`.
- ///
- /// Wraps the future returned from `MakeService` into one that returns
- /// a `Connection`.
- #[must_use = "futures do nothing unless polled"]
- #[derive(Debug)]
- pub struct Connecting<I, F, E = Exec> {
- #[pin]
- future: F,
- io: Option<I>,
- protocol: Http<E>,
- }
+/// A future building a new `Service` to a `Connection`.
+///
+/// Wraps the future returned from `MakeService` into one that returns
+/// a `Connection`.
+#[must_use = "futures do nothing unless polled"]
+#[pin_project]
+#[derive(Debug)]
+pub struct Connecting<I, F, E = Exec> {
+ #[pin]
+ future: F,
+ io: Option<I>,
+ protocol: Http<E>,
}
-pin_project! {
- #[must_use = "futures do nothing unless polled"]
- #[derive(Debug)]
- pub(super) struct SpawnAll<I, S, E> {
- // TODO: re-add `pub(super)` once rustdoc can handle this.
- //
- // See https://github.com/rust-lang/rust/issues/64705
- #[pin]
- pub(super) serve: Serve<I, S, E>,
- }
+#[must_use = "futures do nothing unless polled"]
+#[pin_project]
+#[derive(Debug)]
+pub(super) struct SpawnAll<I, S, E> {
+ // TODO: re-add `pub(super)` once rustdoc can handle this.
+ //
+ // See https://github.com/rust-lang/rust/issues/64705
+ #[pin]
+ pub(super) serve: Serve<I, S, E>,
}
-pin_project! {
- /// A future binding a connection with a Service.
- ///
- /// Polling this future will drive HTTP forward.
- #[must_use = "futures do nothing unless polled"]
- pub struct Connection<T, S, E = Exec>
- where
- S: HttpService<Body>,
- {
- pub(super) conn: Option<ProtoServer<T, S::ResBody, S, E>>,
- fallback: Fallback<E>,
- }
+/// A future binding a connection with a Service.
+///
+/// Polling this future will drive HTTP forward.
+#[must_use = "futures do nothing unless polled"]
+#[pin_project]
+pub struct Connection<T, S, E = Exec>
+where
+ S: HttpService<Body>,
+{
+ pub(super) conn: Option<ProtoServer<T, S::ResBody, S, E>>,
+ #[cfg(all(feature = "http1", feature = "http2"))]
+ fallback: Fallback<E>,
}
-pin_project! {
- #[project = ProtoServerProj]
- pub(super) enum ProtoServer<T, B, S, E = Exec>
- where
- S: HttpService<Body>,
- B: HttpBody,
- {
- #[cfg(feature = "http1")]
- H1 {
- #[pin]
- h1: proto::h1::Dispatcher<
- proto::h1::dispatch::Server<S, Body>,
- B,
- T,
- proto::ServerTransaction,
- >,
- _phantom: PhantomData<E>,
- },
- #[cfg(feature = "http2")]
- H2 {
- #[pin]
- h2: proto::h2::Server<Rewind<T>, S, B, E>,
- },
- }
+#[pin_project(project = ProtoServerProj)]
+pub(super) enum ProtoServer<T, B, S, E = Exec>
+where
+ S: HttpService<Body>,
+ B: HttpBody,
+{
+ #[cfg(feature = "http1")]
+ H1(
+ #[pin]
+ proto::h1::Dispatcher<
+ proto::h1::dispatch::Server<S, Body>,
+ B,
+ T,
+ proto::ServerTransaction,
+ >,
+ PhantomData<E>,
+ ),
+ #[cfg(feature = "http2")]
+ H2(#[pin] proto::h2::Server<Rewind<T>, S, B, E>),
}
#[cfg(all(feature = "http1", feature = "http2"))]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -196,10 +189,6 @@ enum Fallback<E> {
Http1Only,
}
-#[cfg(not(all(feature = "http1", feature = "http2")))]
-#[derive(Clone, Debug)]
-struct Fallback<E>(PhantomData<E>);
-
#[cfg(all(feature = "http1", feature = "http2"))]
impl<E> Fallback<E> {
fn to_h2(&self) -> bool {
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -530,10 +519,7 @@ impl<E> Http<E> {
conn.set_max_buf_size(max);
}
let sd = proto::h1::dispatch::Server::new(service);
- ProtoServer::H1 {
- h1: proto::h1::Dispatcher::new(sd, conn),
- _phantom: PhantomData,
- }
+ ProtoServer::H1(proto::h1::Dispatcher::new(sd, conn), PhantomData)
}};
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -549,7 +535,7 @@ impl<E> Http<E> {
let rewind_io = Rewind::new(io);
let h2 =
proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone());
- ProtoServer::H2 { h2 }
+ ProtoServer::H2(h2)
}
};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -604,14 +590,14 @@ where
/// This should only be called while the `Connection` future is still
/// pending. If called after `Connection::poll` has resolved, this does
/// nothing.
- pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
- match self.conn {
+ pub fn graceful_shutdown(self: Pin<&mut Self>) {
+ match self.project().conn {
#[cfg(feature = "http1")]
- Some(ProtoServer::H1 { ref mut h1, .. }) => {
+ Some(ProtoServer::H1(ref mut h1, _)) => {
h1.disable_keep_alive();
}
#[cfg(feature = "http2")]
- Some(ProtoServer::H2 { ref mut h2 }) => {
+ Some(ProtoServer::H2(ref mut h2)) => {
h2.graceful_shutdown();
}
None => (),
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -638,7 +624,7 @@ where
pub fn try_into_parts(self) -> Option<Parts<I, S>> {
match self.conn.unwrap() {
#[cfg(feature = "http1")]
- ProtoServer::H1 { h1, .. } => {
+ ProtoServer::H1(h1, _) => {
let (io, read_buf, dispatch) = h1.into_inner();
Some(Parts {
io,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -648,7 +634,7 @@ where
})
}
#[cfg(feature = "http2")]
- ProtoServer::H2 { .. } => None,
+ ProtoServer::H2(_h2) => None,
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -672,7 +658,7 @@ where
loop {
match *self.conn.as_mut().unwrap() {
#[cfg(feature = "http1")]
- ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) {
+ ProtoServer::H1(ref mut h1, _) => match ready!(h1.poll_without_shutdown(cx)) {
Ok(()) => return Poll::Ready(Ok(())),
Err(e) => {
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -688,7 +674,7 @@ where
}
},
#[cfg(feature = "http2")]
- ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()),
+ ProtoServer::H2(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()),
};
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -714,8 +700,8 @@ where
let conn = self.conn.take();
let (io, read_buf, dispatch) = match conn.unwrap() {
- ProtoServer::H1 { h1, .. } => h1.into_inner(),
- ProtoServer::H2 { .. } => {
+ ProtoServer::H1(h1, _) => h1.into_inner(),
+ ProtoServer::H2(_h2) => {
panic!("h2 cannot into_inner");
}
};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -728,7 +714,7 @@ where
let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone());
debug_assert!(self.conn.is_none());
- self.conn = Some(ProtoServer::H2 { h2 });
+ self.conn = Some(ProtoServer::H2(h2));
}
/// Enable this connection to support higher-level HTTP upgrades.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -962,9 +948,9 @@ where
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "http1")]
- ProtoServerProj::H1 { h1, .. } => h1.poll(cx),
+ ProtoServerProj::H1(s, _) => s.poll(cx),
#[cfg(feature = "http2")]
- ProtoServerProj::H2 { h2 } => h2.poll(cx),
+ ProtoServerProj::H2(s) => s.poll(cx),
}
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -978,7 +964,7 @@ pub(crate) mod spawn_all {
use crate::common::exec::ConnStreamExec;
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::HttpService;
- use pin_project_lite::pin_project;
+ use pin_project::pin_project;
// Used by `SpawnAll` to optionally watch a `Connection` future.
//
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1023,36 +1009,23 @@ pub(crate) mod spawn_all {
// Users cannot import this type, nor the associated `NewSvcExec`. Instead,
// a blanket implementation for `Executor<impl Future>` is sufficient.
- pin_project! {
- #[allow(missing_debug_implementations)]
- pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
- #[pin]
- state: State<I, N, S, E, W>,
- }
+ #[pin_project]
+ #[allow(missing_debug_implementations)]
+ pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
+ #[pin]
+ state: State<I, N, S, E, W>,
}
- pin_project! {
- #[project = StateProj]
- pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
- Connecting {
- #[pin]
- connecting: Connecting<I, N, E>,
- watcher: W,
- },
- Connected {
- #[pin]
- future: W::Future,
- },
- }
+ #[pin_project(project = StateProj)]
+ pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
+ Connecting(#[pin] Connecting<I, N, E>, W),
+ Connected(#[pin] W::Future),
}
impl<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> NewSvcTask<I, N, S, E, W> {
pub(super) fn new(connecting: Connecting<I, N, E>, watcher: W) -> Self {
NewSvcTask {
- state: State::Connecting {
- connecting,
- watcher,
- },
+ state: State::Connecting(connecting, watcher),
}
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1079,10 +1052,7 @@ pub(crate) mod spawn_all {
loop {
let next = {
match me.state.as_mut().project() {
- StateProj::Connecting {
- connecting,
- watcher,
- } => {
+ StateProj::Connecting(connecting, watcher) => {
let res = ready!(connecting.poll(cx));
let conn = match res {
Ok(conn) => conn,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1092,10 +1062,10 @@ pub(crate) mod spawn_all {
return Poll::Ready(());
}
};
- let future = watcher.watch(conn.with_upgrades());
- State::Connected { future }
+ let connected = watcher.watch(conn.with_upgrades());
+ State::Connected(connected)
}
- StateProj::Connected { future } => {
+ StateProj::Connected(future) => {
return future.poll(cx).map(|res| {
if let Err(err) = res {
debug!("connection error: {}", err);
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1163,7 +1133,7 @@ mod upgrades {
#[cfg(feature = "http1")]
Ok(proto::Dispatched::Upgrade(pending)) => {
match self.inner.conn.take() {
- Some(ProtoServer::H1 { h1, .. }) => {
+ Some(ProtoServer::H1(h1, _)) => {
let (io, buf, _) = h1.into_inner();
pending.fulfill(Upgraded::new(io, buf));
return Poll::Ready(Ok(()));
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -6,7 +6,7 @@ use std::net::{SocketAddr, TcpListener as StdTcpListener};
#[cfg(feature = "tcp")]
use std::time::Duration;
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::accept::Accept;
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -21,17 +21,16 @@ use super::shutdown::{Graceful, GracefulWatcher};
#[cfg(feature = "tcp")]
use super::tcp::AddrIncoming;
-pin_project! {
- /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
- ///
- /// `Server` is a `Future` mapping a bound listener with a set of service
- /// handlers. It is built using the [`Builder`](Builder), and the future
- /// completes when the server has been shutdown. It should be run by an
- /// `Executor`.
- pub struct Server<I, S, E = Exec> {
- #[pin]
- spawn_all: SpawnAll<I, S, E>,
- }
+/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
+///
+/// `Server` is a `Future` mapping a bound listener with a set of service
+/// handlers. It is built using the [`Builder`](Builder), and the future
+/// completes when the server has been shutdown. It should be run by an
+/// `Executor`.
+#[pin_project]
+pub struct Server<I, S, E = Exec> {
+ #[pin]
+ spawn_all: SpawnAll<I, S, E>,
}
/// A builder for a [`Server`](Server).
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -1,36 +1,33 @@
use std::error::Error as StdError;
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
-use super::accept::Accept;
use super::conn::{SpawnAll, UpgradeableConnection, Watcher};
+use super::accept::Accept;
use crate::body::{Body, HttpBody};
use crate::common::drain::{self, Draining, Signal, Watch, Watching};
use crate::common::exec::{ConnStreamExec, NewSvcExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::{HttpService, MakeServiceRef};
-pin_project! {
- #[allow(missing_debug_implementations)]
- pub struct Graceful<I, S, F, E> {
- #[pin]
- state: State<I, S, F, E>,
- }
+#[allow(missing_debug_implementations)]
+#[pin_project]
+pub struct Graceful<I, S, F, E> {
+ #[pin]
+ state: State<I, S, F, E>,
}
-pin_project! {
- #[project = StateProj]
- pub(super) enum State<I, S, F, E> {
- Running {
- drain: Option<(Signal, Watch)>,
- #[pin]
- spawn_all: SpawnAll<I, S, E>,
- #[pin]
- signal: F,
- },
- Draining { draining: Draining },
- }
+#[pin_project(project = StateProj)]
+pub(super) enum State<I, S, F, E> {
+ Running {
+ drain: Option<(Signal, Watch)>,
+ #[pin]
+ spawn_all: SpawnAll<I, S, E>,
+ #[pin]
+ signal: F,
+ },
+ Draining(Draining),
}
impl<I, S, F, E> Graceful<I, S, F, E> {
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -74,16 +71,14 @@ where
Poll::Ready(()) => {
debug!("signal received, starting graceful shutdown");
let sig = drain.take().expect("drain channel").0;
- State::Draining {
- draining: sig.drain(),
- }
+ State::Draining(sig.drain())
}
Poll::Pending => {
let watch = drain.as_ref().expect("drain channel").1.clone();
return spawn_all.poll_watch(cx, &GracefulWatcher(watch));
}
},
- StateProj::Draining { ref mut draining } => {
+ StateProj::Draining(ref mut draining) => {
return Pin::new(draining).poll(cx).map(Ok);
}
}
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -229,14 +229,13 @@ mod addr_stream {
use crate::common::{task, Pin, Poll};
- pin_project_lite::pin_project! {
- /// A transport returned yieled by `AddrIncoming`.
- #[derive(Debug)]
- pub struct AddrStream {
- #[pin]
- inner: TcpStream,
- pub(super) remote_addr: SocketAddr,
- }
+ /// A transport returned yieled by `AddrIncoming`.
+ #[pin_project::pin_project]
+ #[derive(Debug)]
+ pub struct AddrStream {
+ #[pin]
+ inner: TcpStream,
+ pub(super) remote_addr: SocketAddr,
}
impl AddrStream {
diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs
--- a/src/service/oneshot.rs
+++ b/src/service/oneshot.rs
@@ -1,6 +1,6 @@
// TODO: Eventually to be replaced with tower_util::Oneshot.
-use pin_project_lite::pin_project;
+use pin_project::pin_project;
use tower_service::Service;
use crate::common::{task, Future, Pin, Poll};
diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs
--- a/src/service/oneshot.rs
+++ b/src/service/oneshot.rs
@@ -10,35 +10,25 @@ where
S: Service<Req>,
{
Oneshot {
- state: State::NotReady { svc, req },
+ state: State::NotReady(svc, req),
}
}
-pin_project! {
- // A `Future` consuming a `Service` and request, waiting until the `Service`
- // is ready, and then calling `Service::call` with the request, and
- // waiting for that `Future`.
- #[allow(missing_debug_implementations)]
- pub struct Oneshot<S: Service<Req>, Req> {
- #[pin]
- state: State<S, Req>,
- }
+// A `Future` consuming a `Service` and request, waiting until the `Service`
+// is ready, and then calling `Service::call` with the request, and
+// waiting for that `Future`.
+#[allow(missing_debug_implementations)]
+#[pin_project]
+pub struct Oneshot<S: Service<Req>, Req> {
+ #[pin]
+ state: State<S, Req>,
}
-pin_project! {
- #[project = StateProj]
- #[project_replace = StateProjOwn]
- enum State<S: Service<Req>, Req> {
- NotReady {
- svc: S,
- req: Req,
- },
- Called {
- #[pin]
- fut: S::Future,
- },
- Tmp,
- }
+#[pin_project(project = StateProj, project_replace = StateProjOwn)]
+enum State<S: Service<Req>, Req> {
+ NotReady(S, Req),
+ Called(#[pin] S::Future),
+ Tmp,
}
impl<S, Req> Future for Oneshot<S, Req>
diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs
--- a/src/service/oneshot.rs
+++ b/src/service/oneshot.rs
@@ -52,19 +42,19 @@ where
loop {
match me.state.as_mut().project() {
- StateProj::NotReady { ref mut svc, .. } => {
+ StateProj::NotReady(ref mut svc, _) => {
ready!(svc.poll_ready(cx))?;
// fallthrough out of the match's borrow
}
- StateProj::Called { fut } => {
+ StateProj::Called(fut) => {
return fut.poll(cx);
}
StateProj::Tmp => unreachable!(),
}
match me.state.as_mut().project_replace(State::Tmp) {
- StateProjOwn::NotReady { mut svc, req } => {
- me.state.set(State::Called { fut: svc.call(req) });
+ StateProjOwn::NotReady(mut svc, req) => {
+ me.state.set(State::Called(svc.call(req)));
}
_ => unreachable!(),
}
|
This is interesting. Did it work fine for v0.14.2? What features do you have enabled?
@seanmonstar
Yes, it completed with v0.14.2, in this run: https://github.com/spruceid/didkit/runs/1842355682#step:6:434
Features are: `["server", "client", "http1", "stream"]`
Edit: I can reproduce the errors locally using these features. Seems to work after adding `http2`.
Huh, I see it here too. Seems our CI feature checking doesn't check all the combinations. Enabling `http2` will fix you immediately, we'll need to get this fixed I suppose. Yay...
|
2021-02-05T23:41:20Z
| 2,422
|
Build failure with v0.14.3
Hi,
I get some build errors in CI with the newly released hyper v0.14.3:
In https://github.com/spruceid/didkit/runs/1842461977#step:6:437
```
error[E0433]: failed to resolve: could not find `h2` in `proto`
--> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.3/src/client/conn.rs:92:24
|
92 | h2: proto::h2::ClientTask<B>, _phantom: PhantomData<fn(T)>,
| ^^ could not find `h2` in `proto`
error[E0433]: failed to resolve: could not find `h2` in `proto`
--> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.3/src/server/conn.rs:187:24
|
187 | h2: proto::h2::Server<Rewind<T>, S, B, E>,
| ^^ could not find `h2` in `proto`
error[E0412]: cannot find type `PhantomData` in this scope
--> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.3/src/client/conn.rs:92:53
|
92 | h2: proto::h2::ClientTask<B>, _phantom: PhantomData<fn(T)>,
| ^^^^^^^^^^^ not found in this scope
|
help: consider importing one of these items
|
49 | use core::marker::PhantomData;
|
49 | use crate::client::service::PhantomData;
|
49 | use pin_project_lite::__private::PhantomData;
|
49 | use std::marker::PhantomData;
|
error[E0412]: cannot find type `Rewind` in this scope
--> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.3/src/server/conn.rs:187:35
|
187 | h2: proto::h2::Server<Rewind<T>, S, B, E>,
| ^^^^^^ not found in this scope
|
help: consider importing this struct
|
46 | use crate::common::io::Rewind;
|
error[E0599]: no variant named `H2` found for enum `server::conn::ProtoServer<T, B, S, E>`
--> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.3/src/server/conn.rs:185:9
|
166 | / pin_project! {
167 | | #[project = ProtoServerProj]
168 | | pub(super) enum ProtoServer<T, B, S, E = Exec>
169 | | where
... |
185 | | H2 {
| | ^^ help: there is a variant with a similar name: `H1`
... |
189 | | }
190 | | }
| |_- variant `H2` not found here
error[E0599]: no variant named `H2` found for enum `ProtoClient<T, B>`
--> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.3/src/client/conn.rs:90:9
|
78 | / pin_project! {
79 | | #[project = ProtoClientProj]
80 | | enum ProtoClient<T, B>
81 | | where
... |
90 | | H2 {
| | ^^ help: there is a variant with a similar name: `H1`
... |
94 | | }
95 | | }
| |_- variant `H2` not found here
error[E0063]: missing field `fallback` in initializer of `server::conn::Connection<_, _, _>`
--> /home/runner/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.14.3/src/server/conn.rs:556:9
|
556 | Connection {
| ^^^^^^^^^^ missing `fallback`
error: aborting due to 7 previous errors
Some errors have detailed explanations: E0063, E0412, E0433, E0599.
For more information about an error, try `rustc --explain E0063`.
error: could not compile `hyper`
```
Is there a feature needed? I have not yet been able to reproduce the issue locally, just saw it in CI.
|
hyperium__hyper-2422
|
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -777,7 +776,7 @@ mod tests {
use std::time::Duration;
use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt};
- use crate::common::{exec::Exec, task, Future, Pin};
+ use crate::common::{task, exec::Exec, Future, Pin};
/// Test unique reservations.
#[derive(Debug, PartialEq, Eq)]
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2409"
] |
0.14
|
1928682b33f98244435ba6d574677546205a15ec
|
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -117,9 +117,9 @@ pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue
// + 2 for ", "
let new_cap = line.as_bytes().len() + CHUNKED.len() + 2;
let mut buf = BytesMut::with_capacity(new_cap);
- buf.copy_from_slice(line.as_bytes());
- buf.copy_from_slice(b", ");
- buf.copy_from_slice(CHUNKED.as_bytes());
+ buf.extend_from_slice(line.as_bytes());
+ buf.extend_from_slice(b", ");
+ buf.extend_from_slice(CHUNKED.as_bytes());
*line = HeaderValue::from_maybe_shared(buf.freeze())
.expect("original header value plus ascii is valid");
|
2021-01-26T18:04:04Z
| 2,410
|
"Transfer-Encoding" correction code causes a panic
Repro code:
```rust
use hyper::server::Server;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Client, Request, Response};
use std::convert::Infallible;
#[tokio::main]
async fn main() {
let make_service = make_service_fn(|_conn| async {
Ok::<_, Infallible>(service_fn(|_| async {
Ok::<_, Infallible>(Response::builder().body(Body::empty()).unwrap())
}))
});
let server = Server::bind(&"127.0.0.1:0".parse().unwrap()).serve(make_service);
let addr = server.local_addr();
tokio::spawn(async move {
server.await.unwrap();
});
let req = Request::builder()
.method("GET")
.header("Transfer-Encoding", "foo")
.uri(format!("http://{}", addr))
.body(Body::from("foobar"))
.unwrap();
assert!(Client::new().request(req).await.is_ok());
}
```
This panics with "source slice length (3) does not match destination slice length (0)". Apparently the reason is that only capacity is set for the target buffer here: https://github.com/hyperium/hyper/blob/f0ddb669328163001fd18a4a21109e95047848bf/src/headers.rs#L119, but the buffer itself is not filled, so slice copying fails.
|
hyperium__hyper-2410
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -401,6 +401,36 @@ test! {
body: None,
}
+test! {
+ name: client_transfer_encoding_repair,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ transfer-encoding: foo, chunked\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\r\n\
+ ",
+ reply: REPLY_OK,
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ headers: {
+ "transfer-encoding" => "foo",
+ },
+ body: "hello", // not Body::empty
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: None,
+}
+
test! {
name: client_get_req_body_chunked_http10,
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2390"
] |
0.14
|
a15f3f7f0f536c74d51636bbc00f6b5ec110472b
|
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -272,7 +272,7 @@ where
ResponseFuture { inner }
}
- pub(crate) fn send_request_retryable(
+ pub(super) fn send_request_retryable(
&mut self,
req: Request<B>,
) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>> + Unpin
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -346,7 +346,7 @@ mod sealed {
}
}
-pub(crate) async fn resolve<R>(resolver: &mut R, name: Name) -> Result<R::Addrs, R::Error>
+pub(super) async fn resolve<R>(resolver: &mut R, name: Name) -> Result<R::Addrs, R::Error>
where
R: Resolve,
{
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -6,10 +6,10 @@ use tokio::sync::{mpsc, oneshot};
use crate::common::{task, Pin, Poll};
-pub type RetryPromise<T, U> = oneshot::Receiver<Result<U, (crate::Error, Option<T>)>>;
-pub type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
+pub(crate) type RetryPromise<T, U> = oneshot::Receiver<Result<U, (crate::Error, Option<T>)>>;
+pub(crate) type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
-pub fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {
+pub(crate) fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {
let (tx, rx) = mpsc::unbounded_channel();
let (giver, taker) = want::new();
let tx = Sender {
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -25,7 +25,7 @@ pub fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {
///
/// While the inner sender is unbounded, the Giver is used to determine
/// if the Receiver is ready for another request.
-pub struct Sender<T, U> {
+pub(crate) struct Sender<T, U> {
/// One message is always allowed, even if the Receiver hasn't asked
/// for it yet. This boolean keeps track of whether we've sent one
/// without notice.
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -44,24 +44,24 @@ pub struct Sender<T, U> {
/// Cannot poll the Giver, but can still use it to determine if the Receiver
/// has been dropped. However, this version can be cloned.
#[cfg(feature = "http2")]
-pub struct UnboundedSender<T, U> {
+pub(crate) struct UnboundedSender<T, U> {
/// Only used for `is_closed`, since mpsc::UnboundedSender cannot be checked.
giver: want::SharedGiver,
inner: mpsc::UnboundedSender<Envelope<T, U>>,
}
impl<T, U> Sender<T, U> {
- pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ pub(crate) fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
self.giver
.poll_want(cx)
.map_err(|_| crate::Error::new_closed())
}
- pub fn is_ready(&self) -> bool {
+ pub(crate) fn is_ready(&self) -> bool {
self.giver.is_wanting()
}
- pub fn is_closed(&self) -> bool {
+ pub(crate) fn is_closed(&self) -> bool {
self.giver.is_canceled()
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -78,7 +78,7 @@ impl<T, U> Sender<T, U> {
}
}
- pub fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
+ pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
if !self.can_send() {
return Err(val);
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -89,7 +89,7 @@ impl<T, U> Sender<T, U> {
.map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0)
}
- pub fn send(&mut self, val: T) -> Result<Promise<U>, T> {
+ pub(crate) fn send(&mut self, val: T) -> Result<Promise<U>, T> {
if !self.can_send() {
return Err(val);
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -101,7 +101,7 @@ impl<T, U> Sender<T, U> {
}
#[cfg(feature = "http2")]
- pub fn unbound(self) -> UnboundedSender<T, U> {
+ pub(crate) fn unbound(self) -> UnboundedSender<T, U> {
UnboundedSender {
giver: self.giver.shared(),
inner: self.inner,
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -111,15 +111,15 @@ impl<T, U> Sender<T, U> {
#[cfg(feature = "http2")]
impl<T, U> UnboundedSender<T, U> {
- pub fn is_ready(&self) -> bool {
+ pub(crate) fn is_ready(&self) -> bool {
!self.giver.is_canceled()
}
- pub fn is_closed(&self) -> bool {
+ pub(crate) fn is_closed(&self) -> bool {
self.giver.is_canceled()
}
- pub fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
+ pub(crate) fn try_send(&mut self, val: T) -> Result<RetryPromise<T, U>, T> {
let (tx, rx) = oneshot::channel();
self.inner
.send(Envelope(Some((val, Callback::Retry(tx)))))
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -139,7 +139,7 @@ impl<T, U> Clone for UnboundedSender<T, U> {
}
#[pin_project::pin_project(PinnedDrop)]
-pub struct Receiver<T, U> {
+pub(crate) struct Receiver<T, U> {
#[pin]
inner: mpsc::UnboundedReceiver<Envelope<T, U>>,
taker: want::Taker,
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -199,7 +199,7 @@ impl<T, U> Drop for Envelope<T, U> {
}
}
-pub enum Callback<T, U> {
+pub(crate) enum Callback<T, U> {
Retry(oneshot::Sender<Result<U, (crate::Error, Option<T>)>>),
NoRetry(oneshot::Sender<Result<U, crate::Error>>),
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -63,7 +63,7 @@ cfg_feature! {
mod client;
pub mod conn;
- pub(crate) mod dispatch;
+ pub(super) mod dispatch;
mod pool;
pub mod service;
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -100,7 +100,7 @@ impl Config {
}
impl<T> Pool<T> {
- pub fn new(config: Config, __exec: &Exec) -> Pool<T> {
+ pub(super) fn new(config: Config, __exec: &Exec) -> Pool<T> {
let inner = if config.is_enabled() {
Some(Arc::new(Mutex::new(PoolInner {
connecting: HashSet::new(),
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -140,7 +140,7 @@ impl<T> Pool<T> {
impl<T: Poolable> Pool<T> {
/// Returns a `Checkout` which is a future that resolves if an idle
/// connection becomes available.
- pub fn checkout(&self, key: Key) -> Checkout<T> {
+ pub(super) fn checkout(&self, key: Key) -> Checkout<T> {
Checkout {
key,
pool: self.clone(),
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -489,11 +489,11 @@ pub(super) struct Pooled<T: Poolable> {
}
impl<T: Poolable> Pooled<T> {
- pub fn is_reused(&self) -> bool {
+ pub(super) fn is_reused(&self) -> bool {
self.is_reused
}
- pub fn is_pool_enabled(&self) -> bool {
+ pub(super) fn is_pool_enabled(&self) -> bool {
self.pool.0.is_some()
}
diff --git a/src/common/date.rs b/src/common/date.rs
--- a/src/common/date.rs
+++ b/src/common/date.rs
@@ -8,17 +8,17 @@ use http::header::HeaderValue;
use httpdate::HttpDate;
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
-pub const DATE_VALUE_LENGTH: usize = 29;
+pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[cfg(feature = "http1")]
-pub fn extend(dst: &mut Vec<u8>) {
+pub(crate) fn extend(dst: &mut Vec<u8>) {
CACHED.with(|cache| {
dst.extend_from_slice(cache.borrow().buffer());
})
}
#[cfg(feature = "http1")]
-pub fn update() {
+pub(crate) fn update() {
CACHED.with(|cache| {
cache.borrow_mut().check();
})
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -5,19 +5,19 @@ use tokio::sync::watch;
use super::{task, Future, Pin, Poll};
-pub fn channel() -> (Signal, Watch) {
+pub(crate) fn channel() -> (Signal, Watch) {
let (tx, rx) = watch::channel(());
(Signal { tx }, Watch { rx })
}
-pub struct Signal {
+pub(crate) struct Signal {
tx: watch::Sender<()>,
}
-pub struct Draining(Pin<Box<dyn Future<Output = ()> + Send + Sync>>);
+pub(crate) struct Draining(Pin<Box<dyn Future<Output = ()> + Send + Sync>>);
#[derive(Clone)]
-pub struct Watch {
+pub(crate) struct Watch {
rx: watch::Receiver<()>,
}
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -37,7 +37,7 @@ enum State<F> {
}
impl Signal {
- pub fn drain(self) -> Draining {
+ pub(crate) fn drain(self) -> Draining {
let _ = self.tx.send(());
Draining(Box::pin(async move { self.tx.closed().await }))
}
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -52,7 +52,7 @@ impl Future for Draining {
}
impl Watch {
- pub fn watch<F, FN>(self, future: F, on_drain: FN) -> Watching<F, FN>
+ pub(crate) fn watch<F, FN>(self, future: F, on_drain: FN) -> Watching<F, FN>
where
F: Future,
FN: FnOnce(Pin<&mut F>),
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -24,7 +24,7 @@ pub trait NewSvcExec<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>>: Clone
fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>);
}
-pub type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
+pub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
// Either the user provides an executor for background tasks, or we use
// `tokio::spawn`.
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -29,7 +29,13 @@ pub(crate) mod watch;
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
pub(crate) use self::lazy::{lazy, Started as Lazy};
-pub use self::never::Never;
+#[cfg(any(
+ feature = "client",
+ feature = "http1",
+ feature = "http2",
+ feature = "runtime"
+))]
+pub(crate) use self::never::Never;
pub(crate) use self::task::Poll;
// group up types normally needed for `Future`
diff --git a/src/common/never.rs b/src/common/never.rs
--- a/src/common/never.rs
+++ b/src/common/never.rs
@@ -6,7 +6,7 @@ use std::error::Error;
use std::fmt;
#[derive(Debug)]
-pub enum Never {}
+pub(crate) enum Never {}
impl fmt::Display for Never {
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
diff --git a/src/common/sync_wrapper.rs b/src/common/sync_wrapper.rs
--- a/src/common/sync_wrapper.rs
+++ b/src/common/sync_wrapper.rs
@@ -46,7 +46,7 @@
/// [`poll`]: https://doc.rust-lang.org/std/future/trait.Future.html#method.poll
/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
#[repr(transparent)]
-pub struct SyncWrapper<T>(T);
+pub(crate) struct SyncWrapper<T>(T);
impl<T> SyncWrapper<T> {
/// Creates a new SyncWrapper containing the given value.
diff --git a/src/common/sync_wrapper.rs b/src/common/sync_wrapper.rs
--- a/src/common/sync_wrapper.rs
+++ b/src/common/sync_wrapper.rs
@@ -58,7 +58,7 @@ impl<T> SyncWrapper<T> {
///
/// let wrapped = SyncWrapper::new(42);
/// ```
- pub fn new(value: T) -> Self {
+ pub(crate) fn new(value: T) -> Self {
Self(value)
}
diff --git a/src/common/sync_wrapper.rs b/src/common/sync_wrapper.rs
--- a/src/common/sync_wrapper.rs
+++ b/src/common/sync_wrapper.rs
@@ -82,7 +82,7 @@ impl<T> SyncWrapper<T> {
/// *value = 0;
/// assert_eq!(*wrapped.get_mut(), 0);
/// ```
- pub fn get_mut(&mut self) -> &mut T {
+ pub(crate) fn get_mut(&mut self) -> &mut T {
&mut self.0
}
diff --git a/src/common/sync_wrapper.rs b/src/common/sync_wrapper.rs
--- a/src/common/sync_wrapper.rs
+++ b/src/common/sync_wrapper.rs
@@ -105,7 +105,7 @@ impl<T> SyncWrapper<T> {
/// assert_eq!(wrapped.into_inner(), 42);
/// ```
#[allow(dead_code)]
- pub fn into_inner(self) -> T {
+ pub(crate) fn into_inner(self) -> T {
self.0
}
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -18,7 +18,7 @@ struct ErrorImpl {
}
#[derive(Debug, PartialEq)]
-pub(crate) enum Kind {
+pub(super) enum Kind {
Parse(Parse),
User(User),
/// A message reached EOF, but is not complete.
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -64,7 +64,7 @@ pub(crate) enum Kind {
}
#[derive(Debug, PartialEq)]
-pub(crate) enum Parse {
+pub(super) enum Parse {
Method,
Version,
#[cfg(feature = "http1")]
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -76,7 +76,7 @@ pub(crate) enum Parse {
}
#[derive(Debug, PartialEq)]
-pub(crate) enum User {
+pub(super) enum User {
/// Error calling user's HttpBody::poll_data().
#[cfg(any(feature = "http1", feature = "http2"))]
Body,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -124,7 +124,7 @@ pub(crate) enum User {
// Sentinel type to indicate the error was caused by a timeout.
#[derive(Debug)]
-pub(crate) struct TimedOut;
+pub(super) struct TimedOut;
impl Error {
/// Returns true if this was an HTTP parse error.
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -172,19 +172,19 @@ impl Error {
self.inner.cause
}
- pub(crate) fn new(kind: Kind) -> Error {
+ pub(super) fn new(kind: Kind) -> Error {
Error {
inner: Box::new(ErrorImpl { kind, cause: None }),
}
}
- pub(crate) fn with<C: Into<Cause>>(mut self, cause: C) -> Error {
+ pub(super) fn with<C: Into<Cause>>(mut self, cause: C) -> Error {
self.inner.cause = Some(cause.into());
self
}
#[cfg(any(all(feature = "http1", feature = "server"), feature = "ffi"))]
- pub(crate) fn kind(&self) -> &Kind {
+ pub(super) fn kind(&self) -> &Kind {
&self.inner.kind
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -202,7 +202,7 @@ impl Error {
}
#[cfg(feature = "http2")]
- pub(crate) fn h2_reason(&self) -> h2::Reason {
+ pub(super) fn h2_reason(&self) -> h2::Reason {
// Find an h2::Reason somewhere in the cause stack, if it exists,
// otherwise assume an INTERNAL_ERROR.
self.find_source::<h2::Error>()
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -210,68 +210,68 @@ impl Error {
.unwrap_or(h2::Reason::INTERNAL_ERROR)
}
- pub(crate) fn new_canceled() -> Error {
+ pub(super) fn new_canceled() -> Error {
Error::new(Kind::Canceled)
}
#[cfg(feature = "http1")]
- pub(crate) fn new_incomplete() -> Error {
+ pub(super) fn new_incomplete() -> Error {
Error::new(Kind::IncompleteMessage)
}
#[cfg(feature = "http1")]
- pub(crate) fn new_too_large() -> Error {
+ pub(super) fn new_too_large() -> Error {
Error::new(Kind::Parse(Parse::TooLarge))
}
#[cfg(feature = "http1")]
- pub(crate) fn new_version_h2() -> Error {
+ pub(super) fn new_version_h2() -> Error {
Error::new(Kind::Parse(Parse::VersionH2))
}
#[cfg(feature = "http1")]
- pub(crate) fn new_unexpected_message() -> Error {
+ pub(super) fn new_unexpected_message() -> Error {
Error::new(Kind::UnexpectedMessage)
}
#[cfg(any(feature = "http1", feature = "http2"))]
- pub(crate) fn new_io(cause: std::io::Error) -> Error {
+ pub(super) fn new_io(cause: std::io::Error) -> Error {
Error::new(Kind::Io).with(cause)
}
#[cfg(all(any(feature = "http1", feature = "http2"), feature = "tcp"))]
#[cfg(feature = "server")]
- pub(crate) fn new_listen<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_listen<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Listen).with(cause)
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
- pub(crate) fn new_accept<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_accept<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Accept).with(cause)
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
- pub(crate) fn new_connect<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_connect<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Connect).with(cause)
}
- pub(crate) fn new_closed() -> Error {
+ pub(super) fn new_closed() -> Error {
Error::new(Kind::ChannelClosed)
}
#[cfg(any(feature = "http1", feature = "http2", feature = "stream"))]
- pub(crate) fn new_body<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_body<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::Body).with(cause)
}
#[cfg(any(feature = "http1", feature = "http2"))]
- pub(crate) fn new_body_write<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_body_write<E: Into<Cause>>(cause: E) -> Error {
Error::new(Kind::BodyWrite).with(cause)
}
- pub(crate) fn new_body_write_aborted() -> Error {
+ pub(super) fn new_body_write_aborted() -> Error {
Error::new(Kind::BodyWriteAborted)
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -281,71 +281,71 @@ impl Error {
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
- pub(crate) fn new_user_header() -> Error {
+ pub(super) fn new_user_header() -> Error {
Error::new_user(User::UnexpectedHeader)
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
- pub(crate) fn new_user_unsupported_version() -> Error {
+ pub(super) fn new_user_unsupported_version() -> Error {
Error::new_user(User::UnsupportedVersion)
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
- pub(crate) fn new_user_unsupported_request_method() -> Error {
+ pub(super) fn new_user_unsupported_request_method() -> Error {
Error::new_user(User::UnsupportedRequestMethod)
}
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
- pub(crate) fn new_user_unsupported_status_code() -> Error {
+ pub(super) fn new_user_unsupported_status_code() -> Error {
Error::new_user(User::UnsupportedStatusCode)
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
- pub(crate) fn new_user_absolute_uri_required() -> Error {
+ pub(super) fn new_user_absolute_uri_required() -> Error {
Error::new_user(User::AbsoluteUriRequired)
}
- pub(crate) fn new_user_no_upgrade() -> Error {
+ pub(super) fn new_user_no_upgrade() -> Error {
Error::new_user(User::NoUpgrade)
}
#[cfg(feature = "http1")]
- pub(crate) fn new_user_manual_upgrade() -> Error {
+ pub(super) fn new_user_manual_upgrade() -> Error {
Error::new_user(User::ManualUpgrade)
}
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
- pub(crate) fn new_user_make_service<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_user_make_service<E: Into<Cause>>(cause: E) -> Error {
Error::new_user(User::MakeService).with(cause)
}
#[cfg(any(feature = "http1", feature = "http2"))]
- pub(crate) fn new_user_service<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_user_service<E: Into<Cause>>(cause: E) -> Error {
Error::new_user(User::Service).with(cause)
}
#[cfg(any(feature = "http1", feature = "http2"))]
- pub(crate) fn new_user_body<E: Into<Cause>>(cause: E) -> Error {
+ pub(super) fn new_user_body<E: Into<Cause>>(cause: E) -> Error {
Error::new_user(User::Body).with(cause)
}
#[cfg(feature = "http1")]
- pub(crate) fn new_shutdown(cause: std::io::Error) -> Error {
+ pub(super) fn new_shutdown(cause: std::io::Error) -> Error {
Error::new(Kind::Shutdown).with(cause)
}
#[cfg(feature = "ffi")]
- pub(crate) fn new_user_aborted_by_callback() -> Error {
+ pub(super) fn new_user_aborted_by_callback() -> Error {
Error::new_user(User::AbortedByCallback)
}
#[cfg(feature = "http2")]
- pub(crate) fn new_h2(cause: ::h2::Error) -> Error {
+ pub(super) fn new_h2(cause: ::h2::Error) -> Error {
if cause.is_io() {
Error::new_io(cause.into_io().expect("h2::Error::is_io"))
} else {
diff --git a/src/ffi/mod.rs b/src/ffi/mod.rs
--- a/src/ffi/mod.rs
+++ b/src/ffi/mod.rs
@@ -1,5 +1,7 @@
// We have a lot of c-types in here, stop warning about their names!
#![allow(non_camel_case_types)]
+// unreachable_pub warns `#[no_mangle] pub extern fn` in private mod.
+#![allow(unreachable_pub)]
// We may eventually allow the FFI to be enabled without `client` or `http1`,
// that is why we don't auto enable them as `ffi = ["client", "http1"]` in
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -8,12 +8,12 @@ use http::Method;
use http::HeaderMap;
#[cfg(feature = "http1")]
-pub fn connection_keep_alive(value: &HeaderValue) -> bool {
+pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool {
connection_has(value, "keep-alive")
}
#[cfg(feature = "http1")]
-pub fn connection_close(value: &HeaderValue) -> bool {
+pub(super) fn connection_close(value: &HeaderValue) -> bool {
connection_has(value, "close")
}
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -31,15 +31,15 @@ fn connection_has(value: &HeaderValue, needle: &str) -> bool {
#[cfg(feature = "http1")]
#[cfg(feature = "server")]
-pub fn content_length_parse(value: &HeaderValue) -> Option<u64> {
+pub(super) fn content_length_parse(value: &HeaderValue) -> Option<u64> {
value.to_str().ok().and_then(|s| s.parse().ok())
}
-pub fn content_length_parse_all(headers: &HeaderMap) -> Option<u64> {
+pub(super) fn content_length_parse_all(headers: &HeaderMap) -> Option<u64> {
content_length_parse_all_values(headers.get_all(CONTENT_LENGTH).into_iter())
}
-pub fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option<u64> {
+pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Option<u64> {
// If multiple Content-Length headers were sent, everything can still
// be alright if they all contain the same value, and all parse
// correctly. If not, then it's an error.
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -68,7 +68,7 @@ pub fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Op
#[cfg(feature = "http2")]
#[cfg(feature = "client")]
-pub fn method_has_defined_payload_semantics(method: &Method) -> bool {
+pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false,
_ => true,
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -76,19 +76,19 @@ pub fn method_has_defined_payload_semantics(method: &Method) -> bool {
}
#[cfg(feature = "http2")]
-pub fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) {
+pub(super) fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) {
headers
.entry(CONTENT_LENGTH)
.or_insert_with(|| HeaderValue::from(len));
}
#[cfg(feature = "http1")]
-pub fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool {
+pub(super) fn transfer_encoding_is_chunked(headers: &HeaderMap) -> bool {
is_chunked(headers.get_all(http::header::TRANSFER_ENCODING).into_iter())
}
#[cfg(feature = "http1")]
-pub fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool {
+pub(super) fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool {
// chunked must always be the last encoding, according to spec
if let Some(line) = encodings.next_back() {
return is_chunked_(line);
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -98,7 +98,7 @@ pub fn is_chunked(mut encodings: ValueIter<'_, HeaderValue>) -> bool {
}
#[cfg(feature = "http1")]
-pub fn is_chunked_(value: &HeaderValue) -> bool {
+pub(super) fn is_chunked_(value: &HeaderValue) -> bool {
// chunked must always be the last encoding, according to spec
if let Ok(s) = value.to_str() {
if let Some(encoding) = s.rsplit(',').next() {
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -110,7 +110,7 @@ pub fn is_chunked_(value: &HeaderValue) -> bool {
}
#[cfg(feature = "http1")]
-pub fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) {
+pub(super) fn add_chunked(mut entry: http::header::OccupiedEntry<'_, HeaderValue>) {
const CHUNKED: &str = "chunked";
if let Some(line) = entry.iter_mut().next_back() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -35,7 +35,7 @@ where
B: Buf,
T: Http1Transaction,
{
- pub fn new(io: I) -> Conn<I, B, T> {
+ pub(crate) fn new(io: I) -> Conn<I, B, T> {
Conn {
io: Buffered::new(io),
state: State {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -60,21 +60,21 @@ where
}
#[cfg(feature = "server")]
- pub fn set_flush_pipeline(&mut self, enabled: bool) {
+ pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) {
self.io.set_flush_pipeline(enabled);
}
- pub fn set_max_buf_size(&mut self, max: usize) {
+ pub(crate) fn set_max_buf_size(&mut self, max: usize) {
self.io.set_max_buf_size(max);
}
#[cfg(feature = "client")]
- pub fn set_read_buf_exact_size(&mut self, sz: usize) {
+ pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {
self.io.set_read_buf_exact_size(sz);
}
#[cfg(feature = "client")]
- pub fn set_title_case_headers(&mut self) {
+ pub(crate) fn set_title_case_headers(&mut self) {
self.state.title_case_headers = true;
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -83,23 +83,23 @@ where
self.state.allow_half_close = true;
}
- pub fn into_inner(self) -> (I, Bytes) {
+ pub(crate) fn into_inner(self) -> (I, Bytes) {
self.io.into_inner()
}
- pub fn pending_upgrade(&mut self) -> Option<crate::upgrade::Pending> {
+ pub(crate) fn pending_upgrade(&mut self) -> Option<crate::upgrade::Pending> {
self.state.upgrade.take()
}
- pub fn is_read_closed(&self) -> bool {
+ pub(crate) fn is_read_closed(&self) -> bool {
self.state.is_read_closed()
}
- pub fn is_write_closed(&self) -> bool {
+ pub(crate) fn is_write_closed(&self) -> bool {
self.state.is_write_closed()
}
- pub fn can_read_head(&self) -> bool {
+ pub(crate) fn can_read_head(&self) -> bool {
match self.state.reading {
Reading::Init => {
if T::should_read_first() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -115,7 +115,7 @@ where
}
}
- pub fn can_read_body(&self) -> bool {
+ pub(crate) fn can_read_body(&self) -> bool {
match self.state.reading {
Reading::Body(..) | Reading::Continue(..) => true,
_ => false,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -211,7 +211,7 @@ where
}
}
- pub fn poll_read_body(
+ pub(crate) fn poll_read_body(
&mut self,
cx: &mut task::Context<'_>,
) -> Poll<Option<io::Result<Bytes>>> {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -268,13 +268,13 @@ where
ret
}
- pub fn wants_read_again(&mut self) -> bool {
+ pub(crate) fn wants_read_again(&mut self) -> bool {
let ret = self.state.notify_read;
self.state.notify_read = false;
ret
}
- pub fn poll_read_keep_alive(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ pub(crate) fn poll_read_keep_alive(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
if self.is_read_closed() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -412,7 +412,7 @@ where
self.maybe_notify(cx);
}
- pub fn can_write_head(&self) -> bool {
+ pub(crate) fn can_write_head(&self) -> bool {
if !T::should_read_first() {
if let Reading::Closed = self.state.reading {
return false;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -424,18 +424,18 @@ where
}
}
- pub fn can_write_body(&self) -> bool {
+ pub(crate) fn can_write_body(&self) -> bool {
match self.state.writing {
Writing::Body(..) => true,
Writing::Init | Writing::KeepAlive | Writing::Closed => false,
}
}
- pub fn can_buffer_body(&self) -> bool {
+ pub(crate) fn can_buffer_body(&self) -> bool {
self.io.can_buffer()
}
- pub fn write_head(&mut self, head: MessageHead<T::Outgoing>, body: Option<BodyLength>) {
+ pub(crate) fn write_head(&mut self, head: MessageHead<T::Outgoing>, body: Option<BodyLength>) {
if let Some(encoder) = self.encode_head(head, body) {
self.state.writing = if !encoder.is_eof() {
Writing::Body(encoder)
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -447,7 +447,7 @@ where
}
}
- pub fn write_full_msg(&mut self, head: MessageHead<T::Outgoing>, body: B) {
+ pub(crate) fn write_full_msg(&mut self, head: MessageHead<T::Outgoing>, body: B) {
if let Some(encoder) =
self.encode_head(head, Some(BodyLength::Known(body.remaining() as u64)))
{
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -555,7 +555,7 @@ where
// the user's headers be.
}
- pub fn write_body(&mut self, chunk: B) {
+ pub(crate) fn write_body(&mut self, chunk: B) {
debug_assert!(self.can_write_body() && self.can_buffer_body());
// empty chunks should be discarded at Dispatcher level
debug_assert!(chunk.remaining() != 0);
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -580,7 +580,7 @@ where
self.state.writing = state;
}
- pub fn write_body_and_end(&mut self, chunk: B) {
+ pub(crate) fn write_body_and_end(&mut self, chunk: B) {
debug_assert!(self.can_write_body() && self.can_buffer_body());
// empty chunks should be discarded at Dispatcher level
debug_assert!(chunk.remaining() != 0);
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -600,7 +600,7 @@ where
self.state.writing = state;
}
- pub fn end_body(&mut self) -> crate::Result<()> {
+ pub(crate) fn end_body(&mut self) -> crate::Result<()> {
debug_assert!(self.can_write_body());
let mut res = Ok(());
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -657,14 +657,14 @@ where
Err(err)
}
- pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
+ pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
ready!(Pin::new(&mut self.io).poll_flush(cx))?;
self.try_keep_alive(cx);
trace!("flushed({}): {:?}", T::LOG, self.state);
Poll::Ready(Ok(()))
}
- pub fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
+ pub(crate) fn poll_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
match ready!(Pin::new(self.io.io_mut()).poll_shutdown(cx)) {
Ok(()) => {
trace!("shut down IO complete");
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -691,16 +691,16 @@ where
}
}
- pub fn close_read(&mut self) {
+ pub(crate) fn close_read(&mut self) {
self.state.close_read();
}
- pub fn close_write(&mut self) {
+ pub(crate) fn close_write(&mut self) {
self.state.close_write();
}
#[cfg(feature = "server")]
- pub fn disable_keep_alive(&mut self) {
+ pub(crate) fn disable_keep_alive(&mut self) {
if self.state.is_idle() {
trace!("disable_keep_alive; closing idle connection");
self.state.close();
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -710,7 +710,7 @@ where
}
}
- pub fn take_error(&mut self) -> crate::Result<()> {
+ pub(crate) fn take_error(&mut self) -> crate::Result<()> {
if let Some(err) = self.state.error.take() {
Err(err)
} else {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -17,7 +17,7 @@ use self::Kind::{Chunked, Eof, Length};
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
#[derive(Clone, PartialEq)]
-pub struct Decoder {
+pub(crate) struct Decoder {
kind: Kind,
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -65,19 +65,19 @@ enum ChunkedState {
impl Decoder {
// constructors
- pub fn length(x: u64) -> Decoder {
+ pub(crate) fn length(x: u64) -> Decoder {
Decoder {
kind: Kind::Length(x),
}
}
- pub fn chunked() -> Decoder {
+ pub(crate) fn chunked() -> Decoder {
Decoder {
kind: Kind::Chunked(ChunkedState::Size, 0),
}
}
- pub fn eof() -> Decoder {
+ pub(crate) fn eof() -> Decoder {
Decoder {
kind: Kind::Eof(false),
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -93,11 +93,11 @@ impl Decoder {
// methods
- pub fn is_eof(&self) -> bool {
+ pub(crate) fn is_eof(&self) -> bool {
matches!(self.kind, Length(0) | Chunked(ChunkedState::End, _) | Eof(true))
}
- pub fn decode<R: MemRead>(
+ pub(crate) fn decode<R: MemRead>(
&mut self,
cx: &mut task::Context<'_>,
body: &mut R,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -37,7 +37,7 @@ pub(crate) trait Dispatch {
cfg_server! {
use crate::service::HttpService;
- pub struct Server<S: HttpService<B>, B> {
+ pub(crate) struct Server<S: HttpService<B>, B> {
in_flight: Pin<Box<Option<S::Future>>>,
pub(crate) service: S,
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -45,7 +45,7 @@ cfg_server! {
cfg_client! {
#[pin_project::pin_project]
- pub struct Client<B> {
+ pub(crate) struct Client<B> {
callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<Body>>>,
#[pin]
rx: ClientRx<B>,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -68,7 +68,7 @@ where
Bs: HttpBody + 'static,
Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
{
- pub fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
+ pub(crate) fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
Dispatcher {
conn,
dispatch,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -79,14 +79,14 @@ where
}
#[cfg(feature = "server")]
- pub fn disable_keep_alive(&mut self) {
+ pub(crate) fn disable_keep_alive(&mut self) {
self.conn.disable_keep_alive();
if self.conn.is_write_closed() {
self.close();
}
}
- pub fn into_inner(self) -> (I, Bytes, D) {
+ pub(crate) fn into_inner(self) -> (I, Bytes, D) {
let (io, buf) = self.conn.into_inner();
(io, buf, self.dispatch)
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -454,14 +454,14 @@ cfg_server! {
where
S: HttpService<B>,
{
- pub fn new(service: S) -> Server<S, B> {
+ pub(crate) fn new(service: S) -> Server<S, B> {
Server {
in_flight: Box::pin(None),
service,
}
}
- pub fn into_service(self) -> S {
+ pub(crate) fn into_service(self) -> S {
self.service
}
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -538,7 +538,7 @@ cfg_server! {
cfg_client! {
impl<B> Client<B> {
- pub fn new(rx: ClientRx<B>) -> Client<B> {
+ pub(crate) fn new(rx: ClientRx<B>) -> Client<B> {
Client {
callback: None,
rx,
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -10,18 +10,18 @@ type StaticBuf = &'static [u8];
/// Encoders to handle different Transfer-Encodings.
#[derive(Debug, Clone, PartialEq)]
-pub struct Encoder {
+pub(crate) struct Encoder {
kind: Kind,
is_last: bool,
}
#[derive(Debug)]
-pub struct EncodedBuf<B> {
+pub(crate) struct EncodedBuf<B> {
kind: BufKind<B>,
}
#[derive(Debug)]
-pub struct NotEof;
+pub(crate) struct NotEof;
#[derive(Debug, PartialEq, Clone)]
enum Kind {
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -54,34 +54,34 @@ impl Encoder {
is_last: false,
}
}
- pub fn chunked() -> Encoder {
+ pub(crate) fn chunked() -> Encoder {
Encoder::new(Kind::Chunked)
}
- pub fn length(len: u64) -> Encoder {
+ pub(crate) fn length(len: u64) -> Encoder {
Encoder::new(Kind::Length(len))
}
#[cfg(feature = "server")]
- pub fn close_delimited() -> Encoder {
+ pub(crate) fn close_delimited() -> Encoder {
Encoder::new(Kind::CloseDelimited)
}
- pub fn is_eof(&self) -> bool {
+ pub(crate) fn is_eof(&self) -> bool {
matches!(self.kind, Kind::Length(0))
}
#[cfg(feature = "server")]
- pub fn set_last(mut self, is_last: bool) -> Self {
+ pub(crate) fn set_last(mut self, is_last: bool) -> Self {
self.is_last = is_last;
self
}
- pub fn is_last(&self) -> bool {
+ pub(crate) fn is_last(&self) -> bool {
self.is_last
}
- pub fn is_close_delimited(&self) -> bool {
+ pub(crate) fn is_close_delimited(&self) -> bool {
match self.kind {
#[cfg(feature = "server")]
Kind::CloseDelimited => true,
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -89,7 +89,7 @@ impl Encoder {
}
}
- pub fn end<B>(&self) -> Result<Option<EncodedBuf<B>>, NotEof> {
+ pub(crate) fn end<B>(&self) -> Result<Option<EncodedBuf<B>>, NotEof> {
match self.kind {
Kind::Length(0) => Ok(None),
Kind::Chunked => Ok(Some(EncodedBuf {
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -101,7 +101,7 @@ impl Encoder {
}
}
- pub fn encode<B>(&mut self, msg: B) -> EncodedBuf<B>
+ pub(crate) fn encode<B>(&mut self, msg: B) -> EncodedBuf<B>
where
B: Buf,
{
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -15,7 +15,7 @@ use crate::common::{task, Pin, Poll};
pub(crate) const INIT_BUFFER_SIZE: usize = 8192;
/// The minimum value that can be set to max buffer size.
-pub const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE;
+pub(crate) const MINIMUM_MAX_BUFFER_SIZE: usize = INIT_BUFFER_SIZE;
/// The default maximum read buffer size. If the buffer gets this big and
/// a message is still not complete, a `TooLarge` error is triggered.
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -29,7 +29,7 @@ pub(crate) const DEFAULT_MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;
/// forces a flush if the queue gets this big.
const MAX_BUF_LIST_BUFFERS: usize = 16;
-pub struct Buffered<T, B> {
+pub(crate) struct Buffered<T, B> {
flush_pipeline: bool,
io: T,
read_blocked: bool,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -55,7 +55,7 @@ where
T: AsyncRead + AsyncWrite + Unpin,
B: Buf,
{
- pub fn new(io: T) -> Buffered<T, B> {
+ pub(crate) fn new(io: T) -> Buffered<T, B> {
let write_buf = WriteBuf::new(&io);
Buffered {
flush_pipeline: false,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -68,7 +68,7 @@ where
}
#[cfg(feature = "server")]
- pub fn set_flush_pipeline(&mut self, enabled: bool) {
+ pub(crate) fn set_flush_pipeline(&mut self, enabled: bool) {
debug_assert!(!self.write_buf.has_remaining());
self.flush_pipeline = enabled;
if enabled {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -76,7 +76,7 @@ where
}
}
- pub fn set_max_buf_size(&mut self, max: usize) {
+ pub(crate) fn set_max_buf_size(&mut self, max: usize) {
assert!(
max >= MINIMUM_MAX_BUFFER_SIZE,
"The max_buf_size cannot be smaller than {}.",
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -87,19 +87,19 @@ where
}
#[cfg(feature = "client")]
- pub fn set_read_buf_exact_size(&mut self, sz: usize) {
+ pub(crate) fn set_read_buf_exact_size(&mut self, sz: usize) {
self.read_buf_strategy = ReadStrategy::Exact(sz);
}
#[cfg(feature = "server")]
- pub fn set_write_strategy_flatten(&mut self) {
+ pub(crate) fn set_write_strategy_flatten(&mut self) {
// this should always be called only at construction time,
// so this assert is here to catch myself
debug_assert!(self.write_buf.queue.bufs_cnt() == 0);
self.write_buf.set_strategy(WriteStrategy::Flatten);
}
- pub fn read_buf(&self) -> &[u8] {
+ pub(crate) fn read_buf(&self) -> &[u8] {
self.read_buf.as_ref()
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -115,7 +115,7 @@ where
self.read_buf.capacity() - self.read_buf.len()
}
- pub fn headers_buf(&mut self) -> &mut Vec<u8> {
+ pub(crate) fn headers_buf(&mut self) -> &mut Vec<u8> {
let buf = self.write_buf.headers_mut();
&mut buf.bytes
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -124,15 +124,15 @@ where
&mut self.write_buf
}
- pub fn buffer<BB: Buf + Into<B>>(&mut self, buf: BB) {
+ pub(crate) fn buffer<BB: Buf + Into<B>>(&mut self, buf: BB) {
self.write_buf.buffer(buf)
}
- pub fn can_buffer(&self) -> bool {
+ pub(crate) fn can_buffer(&self) -> bool {
self.flush_pipeline || self.write_buf.can_buffer()
}
- pub fn consume_leading_lines(&mut self) {
+ pub(crate) fn consume_leading_lines(&mut self) {
if !self.read_buf.is_empty() {
let mut i = 0;
while i < self.read_buf.len() {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -182,7 +182,7 @@ where
}
}
- pub fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<usize>> {
+ pub(crate) fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<usize>> {
self.read_blocked = false;
let next = self.read_buf_strategy.next();
if self.read_buf_remaining_mut() < next {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -212,19 +212,19 @@ where
}
}
- pub fn into_inner(self) -> (T, Bytes) {
+ pub(crate) fn into_inner(self) -> (T, Bytes) {
(self.io, self.read_buf.freeze())
}
- pub fn io_mut(&mut self) -> &mut T {
+ pub(crate) fn io_mut(&mut self) -> &mut T {
&mut self.io
}
- pub fn is_read_blocked(&self) -> bool {
+ pub(crate) fn is_read_blocked(&self) -> bool {
self.read_blocked
}
- pub fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
+ pub(crate) fn poll_flush(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
if self.flush_pipeline && !self.read_buf.is_empty() {
Poll::Ready(Ok(()))
} else if self.write_buf.remaining() == 0 {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -293,7 +293,7 @@ where
impl<T: Unpin, B> Unpin for Buffered<T, B> {}
// TODO: This trait is old... at least rename to PollBytes or something...
-pub trait MemRead {
+pub(crate) trait MemRead {
fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>>;
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -402,7 +402,7 @@ impl Default for ReadStrategy {
}
#[derive(Clone)]
-pub struct Cursor<T> {
+pub(crate) struct Cursor<T> {
bytes: T,
pos: usize,
}
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -5,11 +5,11 @@ use crate::body::DecodedLength;
use crate::proto::{BodyLength, MessageHead};
pub(crate) use self::conn::Conn;
-pub use self::decode::Decoder;
+pub(crate) use self::decode::Decoder;
pub(crate) use self::dispatch::Dispatcher;
-pub use self::encode::{EncodedBuf, Encoder};
-pub use self::io::Cursor; //TODO: move out of h1::io
-pub use self::io::MINIMUM_MAX_BUFFER_SIZE;
+pub(crate) use self::encode::{EncodedBuf, Encoder};
+ //TODO: move out of h1::io
+pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE;
mod conn;
mod decode;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -136,7 +136,7 @@ where
}
}
- pub fn graceful_shutdown(&mut self) {
+ pub(crate) fn graceful_shutdown(&mut self) {
trace!("graceful_shutdown");
match self.state {
State::Handshaking { .. } => {
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -17,33 +17,33 @@ cfg_http2! {
/// An Incoming Message head. Includes request/status line, and headers.
#[derive(Debug, Default)]
-pub struct MessageHead<S> {
+pub(crate) struct MessageHead<S> {
/// HTTP version of the message.
- pub version: http::Version,
+ pub(crate) version: http::Version,
/// Subject (request line or status line) of Incoming message.
- pub subject: S,
+ pub(crate) subject: S,
/// Headers of the Incoming message.
- pub headers: http::HeaderMap,
+ pub(crate) headers: http::HeaderMap,
/// Extensions.
extensions: http::Extensions,
}
/// An incoming request message.
#[cfg(feature = "http1")]
-pub type RequestHead = MessageHead<RequestLine>;
+pub(crate) type RequestHead = MessageHead<RequestLine>;
#[derive(Debug, Default, PartialEq)]
#[cfg(feature = "http1")]
-pub struct RequestLine(pub http::Method, pub http::Uri);
+pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri);
/// An incoming response message.
#[cfg(feature = "http1")]
#[cfg(feature = "client")]
-pub type ResponseHead = MessageHead<http::StatusCode>;
+pub(crate) type ResponseHead = MessageHead<http::StatusCode>;
#[derive(Debug)]
#[cfg(feature = "http1")]
-pub enum BodyLength {
+pub(crate) enum BodyLength {
/// Content-Length
Known(u64),
/// Transfer-Encoding: chunked (if h1)
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -147,7 +147,7 @@ pub(super) struct SpawnAll<I, S, E> {
//
// See https://github.com/rust-lang/rust/issues/64705
#[pin]
- pub serve: Serve<I, S, E>,
+ pub(super) serve: Serve<I, S, E>,
}
/// A future binding a connection with a Service.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -815,7 +815,7 @@ impl Default for ConnectionMode {
impl<I, S, E> Serve<I, S, E> {
/// Get a reference to the incoming stream.
#[inline]
- pub fn incoming_ref(&self) -> &I {
+ pub(super) fn incoming_ref(&self) -> &I {
&self.incoming
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1025,7 +1025,7 @@ pub(crate) mod spawn_all {
}
#[pin_project(project = StateProj)]
- pub enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
+ pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
Connecting(#[pin] Connecting<I, N, E>, W),
Connected(#[pin] W::Future),
}
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -8,6 +8,7 @@ use tokio::time::Sleep;
use crate::common::{task, Future, Pin, Poll};
+#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::addr_stream::AddrStream;
use super::accept::Accept;
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -177,6 +177,7 @@ impl<F> fmt::Debug for MakeServiceFn<F> {
mod sealed {
pub trait Sealed<X> {}
+ #[allow(unreachable_pub)] // This is intentional.
pub trait CantImpl {}
#[allow(missing_debug_implementations)]
diff --git a/src/service/mod.rs b/src/service/mod.rs
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -44,16 +44,16 @@ mod make;
mod oneshot;
mod util;
-pub(crate) use self::http::HttpService;
+pub(super) use self::http::HttpService;
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
-pub(crate) use self::make::MakeConnection;
+pub(super) use self::make::MakeConnection;
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "server")]
-pub(crate) use self::make::MakeServiceRef;
+pub(super) use self::make::MakeServiceRef;
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
-pub(crate) use self::oneshot::{oneshot, Oneshot};
+pub(super) use self::oneshot::{oneshot, Oneshot};
pub use self::make::make_service_fn;
pub use self::util::service_fn;
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -63,12 +63,12 @@ pub fn on<T: sealed::CanUpgrade>(msg: T) -> OnUpgrade {
}
#[cfg(feature = "http1")]
-pub(crate) struct Pending {
+pub(super) struct Pending {
tx: oneshot::Sender<crate::Result<Upgraded>>,
}
#[cfg(feature = "http1")]
-pub(crate) fn pending() -> (Pending, OnUpgrade) {
+pub(super) fn pending() -> (Pending, OnUpgrade) {
let (tx, rx) = oneshot::channel();
(Pending { tx }, OnUpgrade { rx: Some(rx) })
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -154,12 +154,12 @@ impl fmt::Debug for Upgraded {
// ===== impl OnUpgrade =====
impl OnUpgrade {
- pub(crate) fn none() -> Self {
+ pub(super) fn none() -> Self {
OnUpgrade { rx: None }
}
#[cfg(feature = "http1")]
- pub(crate) fn is_none(&self) -> bool {
+ pub(super) fn is_none(&self) -> bool {
self.rx.is_none()
}
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -189,14 +189,14 @@ impl fmt::Debug for OnUpgrade {
#[cfg(feature = "http1")]
impl Pending {
- pub(crate) fn fulfill(self, upgraded: Upgraded) {
+ pub(super) fn fulfill(self, upgraded: Upgraded) {
trace!("pending upgrade fulfill");
let _ = self.tx.send(Ok(upgraded));
}
/// Don't fulfill the pending Upgrade, but instead signal that
/// upgrades are handled manually.
- pub(crate) fn manual(self) {
+ pub(super) fn manual(self) {
trace!("pending upgrade handled manually");
let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade()));
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -221,7 +221,7 @@ impl StdError for UpgradeExpected {}
// ===== impl Io =====
-pub(crate) trait Io: AsyncRead + AsyncWrite + Unpin + 'static {
+pub(super) trait Io: AsyncRead + AsyncWrite + Unpin + 'static {
fn __hyper_type_id(&self) -> TypeId {
TypeId::of::<Self>()
}
|
2021-01-13T11:12:22Z
| 2,400
|
Lint for unreachable_pub
This is a good clean up to help detect any types or methods that seem public but aren't really. By adjusting their privacy, we can reduce accidentally making things public in case we later on made some module the re-exports them public.
To do this, start by adding `#![deny(unreachable_pub)]`, and then fixing all failures by making them `pub(crate)` or `pub(super)` as appropriate (prefer `super` to `crate` when possible).
|
hyperium__hyper-2400
|
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -2,6 +2,7 @@
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![cfg_attr(test, deny(rust_2018_idioms))]
+#![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))]
#![cfg_attr(test, deny(warnings))]
#![cfg_attr(all(test, feature = "nightly"), feature(test))]
#![cfg_attr(docsrs, feature(doc_cfg))]
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -77,7 +77,7 @@ pub(crate) fn pending() -> (Pending, OnUpgrade) {
impl Upgraded {
#[cfg(any(feature = "http1", test))]
- pub(crate) fn new<T>(io: T, read_buf: Bytes) -> Self
+ pub(super) fn new<T>(io: T, read_buf: Bytes) -> Self
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2388"
] |
0.14
|
2c8121f1735aa8efeb0d5e4ef595363c373ba470
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -34,7 +34,7 @@ httparse = "1.0"
h2 = { version = "0.3", optional = true }
itoa = "0.4.1"
tracing = { version = "0.1", default-features = false, features = ["std"] }
-pin-project = "1.0"
+pin-project-lite = "0.2.4"
tower-service = "0.3"
tokio = { version = "1", features = ["sync"] }
want = "0.3"
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -16,8 +16,7 @@ use super::DecodedLength;
#[cfg(feature = "stream")]
use crate::common::sync_wrapper::SyncWrapper;
use crate::common::Future;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
+#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
use crate::common::Never;
use crate::common::{task, watch, Pin, Poll};
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -74,8 +73,7 @@ struct Extra {
delayed_eof: Option<DelayEof>,
}
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
+#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
type DelayEofUntil = oneshot::Receiver<Never>;
enum DelayEof {
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -51,19 +51,21 @@ use std::fmt;
#[cfg(feature = "http2")]
use std::marker::PhantomData;
use std::sync::Arc;
-#[cfg(feature = "runtime")]
-#[cfg(feature = "http2")]
+#[cfg(all(feature = "runtime", feature = "http2"))]
use std::time::Duration;
use bytes::Bytes;
use futures_util::future::{self, Either, FutureExt as _};
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use tower_service::Service;
use super::dispatch;
use crate::body::HttpBody;
-use crate::common::{task, exec::{BoxSendFuture, Exec}, Future, Pin, Poll};
+use crate::common::{
+ exec::{BoxSendFuture, Exec},
+ task, Future, Pin, Poll,
+};
use crate::proto;
use crate::rt::Executor;
#[cfg(feature = "http1")]
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -73,15 +75,23 @@ use crate::{Body, Request, Response};
#[cfg(feature = "http1")]
type Http1Dispatcher<T, B, R> = proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, R>;
-#[pin_project(project = ProtoClientProj)]
-enum ProtoClient<T, B>
-where
- B: HttpBody,
-{
- #[cfg(feature = "http1")]
- H1(#[pin] Http1Dispatcher<T, B, proto::h1::ClientTransaction>),
- #[cfg(feature = "http2")]
- H2(#[pin] proto::h2::ClientTask<B>, PhantomData<fn(T)>),
+pin_project! {
+ #[project = ProtoClientProj]
+ enum ProtoClient<T, B>
+ where
+ B: HttpBody,
+ {
+ #[cfg(feature = "http1")]
+ H1 {
+ #[pin]
+ h1: Http1Dispatcher<T, B, proto::h1::ClientTransaction>,
+ },
+ #[cfg(feature = "http2")]
+ H2 {
+ #[pin]
+ h2: proto::h2::ClientTask<B>, _phantom: PhantomData<fn(T)>,
+ },
+ }
}
/// Returns a handshake future over some IO.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -398,7 +408,7 @@ where
pub fn into_parts(self) -> Parts<T> {
match self.inner.expect("already upgraded") {
#[cfg(feature = "http1")]
- ProtoClient::H1(h1) => {
+ ProtoClient::H1 { h1 } => {
let (io, read_buf, _) = h1.into_inner();
Parts {
io,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -407,7 +417,7 @@ where
}
}
#[cfg(feature = "http2")]
- ProtoClient::H2(..) => {
+ ProtoClient::H2 { .. } => {
panic!("http2 cannot into_inner");
}
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -427,9 +437,9 @@ where
pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
match *self.inner.as_mut().expect("already upgraded") {
#[cfg(feature = "http1")]
- ProtoClient::H1(ref mut h1) => h1.poll_without_shutdown(cx),
+ ProtoClient::H1 { ref mut h1 } => h1.poll_without_shutdown(cx),
#[cfg(feature = "http2")]
- ProtoClient::H2(ref mut h2, _) => Pin::new(h2).poll(cx).map_ok(|_| ()),
+ ProtoClient::H2 { ref mut h2, .. } => Pin::new(h2).poll(cx).map_ok(|_| ()),
}
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -458,7 +468,7 @@ where
proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
#[cfg(feature = "http1")]
proto::Dispatched::Upgrade(pending) => match self.inner.take() {
- Some(ProtoClient::H1(h1)) => {
+ Some(ProtoClient::H1 { h1 }) => {
let (io, buf, _) = h1.into_inner();
pending.fulfill(Upgraded::new(io, buf));
Poll::Ready(Ok(()))
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -705,14 +715,17 @@ impl Builder {
}
let cd = proto::h1::dispatch::Client::new(rx);
let dispatch = proto::h1::Dispatcher::new(cd, conn);
- ProtoClient::H1(dispatch)
+ ProtoClient::H1 { h1: dispatch }
}
#[cfg(feature = "http2")]
Proto::Http2 => {
let h2 =
proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec.clone())
.await?;
- ProtoClient::H2(h2, PhantomData)
+ ProtoClient::H2 {
+ h2,
+ _phantom: PhantomData,
+ }
}
};
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -766,9 +779,9 @@ where
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "http1")]
- ProtoClientProj::H1(c) => c.poll(cx),
+ ProtoClientProj::H1 { h1 } => h1.poll(cx),
#[cfg(feature = "http2")]
- ProtoClientProj::H2(c, _) => c.poll(cx),
+ ProtoClientProj::H2 { h2, .. } => h2.poll(cx),
}
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -11,7 +11,7 @@ use std::time::Duration;
use futures_util::future::Either;
use http::uri::{Scheme, Uri};
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tokio::net::{TcpSocket, TcpStream};
use tokio::time::Sleep;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -373,18 +373,19 @@ impl HttpInfo {
}
}
-// Not publicly exported (so missing_docs doesn't trigger).
-//
-// We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
-// so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
-// (and thus we can change the type in the future).
-#[must_use = "futures do nothing unless polled"]
-#[pin_project]
-#[allow(missing_debug_implementations)]
-pub struct HttpConnecting<R> {
- #[pin]
- fut: BoxConnecting,
- _marker: PhantomData<R>,
+pin_project! {
+ // Not publicly exported (so missing_docs doesn't trigger).
+ //
+ // We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
+ // so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
+ // (and thus we can change the type in the future).
+ #[must_use = "futures do nothing unless polled"]
+ #[allow(missing_debug_implementations)]
+ pub struct HttpConnecting<R> {
+ #[pin]
+ fut: BoxConnecting,
+ _marker: PhantomData<R>,
+ }
}
type ConnectResult = Result<TcpStream, ConnectError>;
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -11,7 +11,7 @@ use futures_channel::oneshot;
use tokio::time::{Duration, Instant, Interval};
use super::client::Ver;
-use crate::common::{task, exec::Exec, Future, Pin, Poll, Unpin};
+use crate::common::{exec::Exec, task, Future, Pin, Poll, Unpin};
// FIXME: allow() required due to `impl Trait` leaking types to this lint
#[allow(missing_debug_implementations)]
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -714,16 +714,17 @@ impl Expiration {
}
#[cfg(feature = "runtime")]
-#[pin_project::pin_project]
-struct IdleTask<T> {
- #[pin]
- interval: Interval,
- pool: WeakOpt<Mutex<PoolInner<T>>>,
- // This allows the IdleTask to be notified as soon as the entire
- // Pool is fully dropped, and shutdown. This channel is never sent on,
- // but Err(Canceled) will be received when the Pool is dropped.
- #[pin]
- pool_drop_notifier: oneshot::Receiver<crate::common::Never>,
+pin_project_lite::pin_project! {
+ struct IdleTask<T> {
+ #[pin]
+ interval: Interval,
+ pool: WeakOpt<Mutex<PoolInner<T>>>,
+ // This allows the IdleTask to be notified as soon as the entire
+ // Pool is fully dropped, and shutdown. This channel is never sent on,
+ // but Err(Canceled) will be received when the Pool is dropped.
+ #[pin]
+ pool_drop_notifier: oneshot::Receiver<crate::common::Never>,
+ }
}
#[cfg(feature = "runtime")]
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -1,6 +1,6 @@
use std::mem;
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tokio::sync::watch;
use super::{task, Future, Pin, Poll};
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -21,14 +21,15 @@ pub(crate) struct Watch {
rx: watch::Receiver<()>,
}
-#[allow(missing_debug_implementations)]
-#[pin_project]
-pub struct Watching<F, FN> {
- #[pin]
- future: F,
- state: State<FN>,
- watch: Pin<Box<dyn Future<Output = ()> + Send + Sync>>,
- _rx: watch::Receiver<()>,
+pin_project! {
+ #[allow(missing_debug_implementations)]
+ pub struct Watching<F, FN> {
+ #[pin]
+ future: F,
+ state: State<FN>,
+ watch: Pin<Box<dyn Future<Output = ()> + Send + Sync>>,
+ _rx: watch::Receiver<()>,
+ }
}
enum State<F> {
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -5,8 +5,7 @@ use std::sync::Arc;
#[cfg(feature = "server")]
use crate::body::{Body, HttpBody};
-#[cfg(feature = "http2")]
-#[cfg(feature = "server")]
+#[cfg(all(feature = "http2", feature = "server"))]
use crate::proto::h2::server::H2Stream;
use crate::rt::Executor;
#[cfg(feature = "server")]
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -8,17 +8,14 @@ macro_rules! ready {
}
pub(crate) mod buf;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "server")]
+#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
pub(crate) mod date;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "server")]
+#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
pub(crate) mod drain;
#[cfg(any(feature = "http1", feature = "http2"))]
pub(crate) mod exec;
pub(crate) mod io;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
+#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
mod lazy;
mod never;
#[cfg(feature = "stream")]
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -26,8 +23,7 @@ pub(crate) mod sync_wrapper;
pub(crate) mod task;
pub(crate) mod watch;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
+#[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))]
pub(crate) use self::lazy::{lazy, Started as Lazy};
#[cfg(any(
feature = "client",
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -2,10 +2,9 @@
use bytes::BytesMut;
use http::header::CONTENT_LENGTH;
use http::header::{HeaderValue, ValueIter};
-#[cfg(feature = "http2")]
-#[cfg(feature = "client")]
-use http::Method;
use http::HeaderMap;
+#[cfg(all(feature = "http2", feature = "client"))]
+use http::Method;
#[cfg(feature = "http1")]
pub(super) fn connection_keep_alive(value: &HeaderValue) -> bool {
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -29,8 +28,7 @@ fn connection_has(value: &HeaderValue, needle: &str) -> bool {
false
}
-#[cfg(feature = "http1")]
-#[cfg(feature = "server")]
+#[cfg(all(feature = "http1", feature = "server"))]
pub(super) fn content_length_parse(value: &HeaderValue) -> Option<u64> {
value.to_str().ok().and_then(|s| s.parse().ok())
}
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -66,8 +64,7 @@ pub(super) fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>
}
}
-#[cfg(feature = "http2")]
-#[cfg(feature = "client")]
+#[cfg(all(feature = "http2", feature = "client"))]
pub(super) fn method_has_defined_payload_semantics(method: &Method) -> bool {
match *method {
Method::GET | Method::HEAD | Method::DELETE | Method::CONNECT => false,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -44,10 +44,13 @@ cfg_server! {
}
cfg_client! {
- pub(crate) struct Client<B> {
- callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<Body>>>,
- rx: ClientRx<B>,
- rx_closed: bool,
+ pin_project_lite::pin_project! {
+ pub(crate) struct Client<B> {
+ callback: Option<crate::client::dispatch::Callback<Request<B>, http::Response<Body>>>,
+ #[pin]
+ rx: ClientRx<B>,
+ rx_closed: bool,
+ }
}
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, http::Response<Body>>;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -5,7 +5,7 @@ use http::header::{
TRANSFER_ENCODING, UPGRADE,
};
use http::HeaderMap;
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use std::error::Error as StdError;
use std::io::IoSlice;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -94,15 +94,16 @@ fn decode_content_length(headers: &HeaderMap) -> DecodedLength {
// body adapters used by both Client and Server
-#[pin_project]
-struct PipeToSendStream<S>
-where
- S: HttpBody,
-{
- body_tx: SendStream<SendBuf<S::Data>>,
- data_done: bool,
- #[pin]
- stream: S,
+pin_project! {
+ struct PipeToSendStream<S>
+ where
+ S: HttpBody,
+ {
+ body_tx: SendStream<SendBuf<S::Data>>,
+ data_done: bool,
+ #[pin]
+ stream: S,
+ }
}
impl<S> PipeToSendStream<S>
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -5,7 +5,7 @@ use std::time::Duration;
use h2::server::{Connection, Handshake, SendResponse};
use h2::Reason;
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -57,15 +57,16 @@ impl Default for Config {
}
}
-#[pin_project]
-pub(crate) struct Server<T, S, B, E>
-where
- S: HttpService<Body>,
- B: HttpBody,
-{
- exec: E,
- service: S,
- state: State<T, B>,
+pin_project! {
+ pub(crate) struct Server<T, S, B, E>
+ where
+ S: HttpService<Body>,
+ B: HttpBody,
+ {
+ exec: E,
+ service: S,
+ state: State<T, B>,
+ }
}
enum State<T, B>
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -315,24 +316,33 @@ where
}
}
-#[allow(missing_debug_implementations)]
-#[pin_project]
-pub struct H2Stream<F, B>
-where
- B: HttpBody,
-{
- reply: SendResponse<SendBuf<B::Data>>,
- #[pin]
- state: H2StreamState<F, B>,
+pin_project! {
+ #[allow(missing_debug_implementations)]
+ pub struct H2Stream<F, B>
+ where
+ B: HttpBody,
+ {
+ reply: SendResponse<SendBuf<B::Data>>,
+ #[pin]
+ state: H2StreamState<F, B>,
+ }
}
-#[pin_project(project = H2StreamStateProj)]
-enum H2StreamState<F, B>
-where
- B: HttpBody,
-{
- Service(#[pin] F),
- Body(#[pin] PipeToSendStream<B>),
+pin_project! {
+ #[project = H2StreamStateProj]
+ enum H2StreamState<F, B>
+ where
+ B: HttpBody,
+ {
+ Service {
+ #[pin]
+ fut: F,
+ },
+ Body {
+ #[pin]
+ pipe: PipeToSendStream<B>,
+ },
+ }
}
impl<F, B> H2Stream<F, B>
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -342,7 +352,7 @@ where
fn new(fut: F, respond: SendResponse<SendBuf<B::Data>>) -> H2Stream<F, B> {
H2Stream {
reply: respond,
- state: H2StreamState::Service(fut),
+ state: H2StreamState::Service { fut },
}
}
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -371,7 +381,7 @@ where
let mut me = self.project();
loop {
let next = match me.state.as_mut().project() {
- H2StreamStateProj::Service(h) => {
+ H2StreamStateProj::Service { fut: h } => {
let res = match h.poll(cx) {
Poll::Ready(Ok(r)) => r,
Poll::Pending => {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -409,13 +419,15 @@ where
if !body.is_end_stream() {
let body_tx = reply!(me, res, false);
- H2StreamState::Body(PipeToSendStream::new(body, body_tx))
+ H2StreamState::Body {
+ pipe: PipeToSendStream::new(body, body_tx),
+ }
} else {
reply!(me, res, true);
return Poll::Ready(Ok(()));
}
}
- H2StreamStateProj::Body(pipe) => {
+ H2StreamStateProj::Body { pipe } => {
return pipe.poll(cx);
}
};
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -37,8 +37,7 @@ pub(crate) type RequestHead = MessageHead<RequestLine>;
pub(crate) struct RequestLine(pub(crate) http::Method, pub(crate) http::Uri);
/// An incoming response message.
-#[cfg(feature = "http1")]
-#[cfg(feature = "client")]
+#[cfg(all(feature = "http1", feature = "client"))]
pub(crate) type ResponseHead = MessageHead<http::StatusCode>;
#[derive(Debug)]
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -9,7 +9,7 @@
#[cfg(feature = "stream")]
use futures_core::Stream;
#[cfg(feature = "stream")]
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use crate::common::{
task::{self, Poll},
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -86,8 +86,12 @@ pub fn from_stream<S, IO, E>(stream: S) -> impl Accept<Conn = IO, Error = E>
where
S: Stream<Item = Result<IO, E>>,
{
- #[pin_project]
- struct FromStream<S>(#[pin] S);
+ pin_project! {
+ struct FromStream<S> {
+ #[pin]
+ stream: S,
+ }
+ }
impl<S, IO, E> Accept for FromStream<S>
where
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -99,9 +103,9 @@ where
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
- self.project().0.poll_next(cx)
+ self.project().stream.poll_next(cx)
}
}
- FromStream(stream)
+ FromStream { stream }
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -45,16 +45,14 @@
use std::error::Error as StdError;
use std::fmt;
-#[cfg(feature = "http1")]
use std::marker::PhantomData;
#[cfg(feature = "tcp")]
use std::net::SocketAddr;
-#[cfg(feature = "runtime")]
-#[cfg(feature = "http2")]
+#[cfg(all(feature = "runtime", feature = "http2"))]
use std::time::Duration;
use bytes::Bytes;
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::accept::Accept;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -63,8 +61,7 @@ use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec};
#[cfg(feature = "http2")]
use crate::common::io::Rewind;
use crate::common::{task, Future, Pin, Poll, Unpin};
-#[cfg(feature = "http1")]
-#[cfg(feature = "http2")]
+#[cfg(all(feature = "http1", feature = "http2"))]
use crate::error::{Kind, Parse};
use crate::proto;
use crate::service::{HttpService, MakeServiceRef};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -107,95 +104,103 @@ enum ConnectionMode {
#[cfg(feature = "http2")]
H2Only,
/// Use HTTP/1 and try to upgrade to h2 when a parse error occurs.
- #[cfg(feature = "http1")]
- #[cfg(feature = "http2")]
+ #[cfg(all(feature = "http1", feature = "http2"))]
Fallback,
}
-/// A stream mapping incoming IOs to new services.
-///
-/// Yields `Connecting`s that are futures that should be put on a reactor.
-#[must_use = "streams do nothing unless polled"]
-#[pin_project]
-#[derive(Debug)]
-pub(super) struct Serve<I, S, E = Exec> {
- #[pin]
- incoming: I,
- make_service: S,
- protocol: Http<E>,
+pin_project! {
+ /// A stream mapping incoming IOs to new services.
+ ///
+ /// Yields `Connecting`s that are futures that should be put on a reactor.
+ #[must_use = "streams do nothing unless polled"]
+ #[derive(Debug)]
+ pub(super) struct Serve<I, S, E = Exec> {
+ #[pin]
+ incoming: I,
+ make_service: S,
+ protocol: Http<E>,
+ }
}
-/// A future building a new `Service` to a `Connection`.
-///
-/// Wraps the future returned from `MakeService` into one that returns
-/// a `Connection`.
-#[must_use = "futures do nothing unless polled"]
-#[pin_project]
-#[derive(Debug)]
-pub struct Connecting<I, F, E = Exec> {
- #[pin]
- future: F,
- io: Option<I>,
- protocol: Http<E>,
+pin_project! {
+ /// A future building a new `Service` to a `Connection`.
+ ///
+ /// Wraps the future returned from `MakeService` into one that returns
+ /// a `Connection`.
+ #[must_use = "futures do nothing unless polled"]
+ #[derive(Debug)]
+ pub struct Connecting<I, F, E = Exec> {
+ #[pin]
+ future: F,
+ io: Option<I>,
+ protocol: Http<E>,
+ }
}
-#[must_use = "futures do nothing unless polled"]
-#[pin_project]
-#[derive(Debug)]
-pub(super) struct SpawnAll<I, S, E> {
- // TODO: re-add `pub(super)` once rustdoc can handle this.
- //
- // See https://github.com/rust-lang/rust/issues/64705
- #[pin]
- pub(super) serve: Serve<I, S, E>,
+pin_project! {
+ #[must_use = "futures do nothing unless polled"]
+ #[derive(Debug)]
+ pub(super) struct SpawnAll<I, S, E> {
+ // TODO: re-add `pub(super)` once rustdoc can handle this.
+ //
+ // See https://github.com/rust-lang/rust/issues/64705
+ #[pin]
+ pub(super) serve: Serve<I, S, E>,
+ }
}
-/// A future binding a connection with a Service.
-///
-/// Polling this future will drive HTTP forward.
-#[must_use = "futures do nothing unless polled"]
-#[pin_project]
-pub struct Connection<T, S, E = Exec>
-where
- S: HttpService<Body>,
-{
- pub(super) conn: Option<ProtoServer<T, S::ResBody, S, E>>,
- #[cfg(feature = "http1")]
- #[cfg(feature = "http2")]
- fallback: Fallback<E>,
+pin_project! {
+ /// A future binding a connection with a Service.
+ ///
+ /// Polling this future will drive HTTP forward.
+ #[must_use = "futures do nothing unless polled"]
+ pub struct Connection<T, S, E = Exec>
+ where
+ S: HttpService<Body>,
+ {
+ pub(super) conn: Option<ProtoServer<T, S::ResBody, S, E>>,
+ fallback: Fallback<E>,
+ }
}
-#[pin_project(project = ProtoServerProj)]
-pub(super) enum ProtoServer<T, B, S, E = Exec>
-where
- S: HttpService<Body>,
- B: HttpBody,
-{
- #[cfg(feature = "http1")]
- H1(
- #[pin]
- proto::h1::Dispatcher<
- proto::h1::dispatch::Server<S, Body>,
- B,
- T,
- proto::ServerTransaction,
- >,
- PhantomData<E>,
- ),
- #[cfg(feature = "http2")]
- H2(#[pin] proto::h2::Server<Rewind<T>, S, B, E>),
+pin_project! {
+ #[project = ProtoServerProj]
+ pub(super) enum ProtoServer<T, B, S, E = Exec>
+ where
+ S: HttpService<Body>,
+ B: HttpBody,
+ {
+ #[cfg(feature = "http1")]
+ H1 {
+ #[pin]
+ h1: proto::h1::Dispatcher<
+ proto::h1::dispatch::Server<S, Body>,
+ B,
+ T,
+ proto::ServerTransaction,
+ >,
+ _phantom: PhantomData<E>,
+ },
+ #[cfg(feature = "http2")]
+ H2 {
+ #[pin]
+ h2: proto::h2::Server<Rewind<T>, S, B, E>,
+ },
+ }
}
-#[cfg(feature = "http1")]
-#[cfg(feature = "http2")]
+#[cfg(all(feature = "http1", feature = "http2"))]
#[derive(Clone, Debug)]
enum Fallback<E> {
ToHttp2(proto::h2::server::Config, E),
Http1Only,
}
-#[cfg(feature = "http1")]
-#[cfg(feature = "http2")]
+#[cfg(not(all(feature = "http1", feature = "http2")))]
+#[derive(Clone, Debug)]
+struct Fallback<E>(PhantomData<E>);
+
+#[cfg(all(feature = "http1", feature = "http2"))]
impl<E> Fallback<E> {
fn to_h2(&self) -> bool {
match *self {
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -205,8 +210,7 @@ impl<E> Fallback<E> {
}
}
-#[cfg(feature = "http1")]
-#[cfg(feature = "http2")]
+#[cfg(all(feature = "http1", feature = "http2"))]
impl<E> Unpin for Fallback<E> {}
/// Deconstructed parts of a `Connection`.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -526,7 +530,10 @@ impl<E> Http<E> {
conn.set_max_buf_size(max);
}
let sd = proto::h1::dispatch::Server::new(service);
- ProtoServer::H1(proto::h1::Dispatcher::new(sd, conn), PhantomData)
+ ProtoServer::H1 {
+ h1: proto::h1::Dispatcher::new(sd, conn),
+ _phantom: PhantomData,
+ }
}};
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -542,7 +549,7 @@ impl<E> Http<E> {
let rewind_io = Rewind::new(io);
let h2 =
proto::h2::Server::new(rewind_io, service, &self.h2_builder, self.exec.clone());
- ProtoServer::H2(h2)
+ ProtoServer::H2 { h2 }
}
};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -597,14 +604,14 @@ where
/// This should only be called while the `Connection` future is still
/// pending. If called after `Connection::poll` has resolved, this does
/// nothing.
- pub fn graceful_shutdown(self: Pin<&mut Self>) {
- match self.project().conn {
+ pub fn graceful_shutdown(mut self: Pin<&mut Self>) {
+ match self.conn {
#[cfg(feature = "http1")]
- Some(ProtoServer::H1(ref mut h1, _)) => {
+ Some(ProtoServer::H1 { ref mut h1, .. }) => {
h1.disable_keep_alive();
}
#[cfg(feature = "http2")]
- Some(ProtoServer::H2(ref mut h2)) => {
+ Some(ProtoServer::H2 { ref mut h2 }) => {
h2.graceful_shutdown();
}
None => (),
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -631,7 +638,7 @@ where
pub fn try_into_parts(self) -> Option<Parts<I, S>> {
match self.conn.unwrap() {
#[cfg(feature = "http1")]
- ProtoServer::H1(h1, _) => {
+ ProtoServer::H1 { h1, .. } => {
let (io, read_buf, dispatch) = h1.into_inner();
Some(Parts {
io,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -641,7 +648,7 @@ where
})
}
#[cfg(feature = "http2")]
- ProtoServer::H2(_h2) => None,
+ ProtoServer::H2 { .. } => None,
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -665,7 +672,7 @@ where
loop {
match *self.conn.as_mut().unwrap() {
#[cfg(feature = "http1")]
- ProtoServer::H1(ref mut h1, _) => match ready!(h1.poll_without_shutdown(cx)) {
+ ProtoServer::H1 { ref mut h1, .. } => match ready!(h1.poll_without_shutdown(cx)) {
Ok(()) => return Poll::Ready(Ok(())),
Err(e) => {
#[cfg(feature = "http2")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -681,7 +688,7 @@ where
}
},
#[cfg(feature = "http2")]
- ProtoServer::H2(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()),
+ ProtoServer::H2 { ref mut h2 } => return Pin::new(h2).poll(cx).map_ok(|_| ()),
};
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -701,15 +708,14 @@ where
})
}
- #[cfg(feature = "http1")]
- #[cfg(feature = "http2")]
+ #[cfg(all(feature = "http1", feature = "http2"))]
fn upgrade_h2(&mut self) {
trace!("Trying to upgrade connection to h2");
let conn = self.conn.take();
let (io, read_buf, dispatch) = match conn.unwrap() {
- ProtoServer::H1(h1, _) => h1.into_inner(),
- ProtoServer::H2(_h2) => {
+ ProtoServer::H1 { h1, .. } => h1.into_inner(),
+ ProtoServer::H2 { .. } => {
panic!("h2 cannot into_inner");
}
};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -722,7 +728,7 @@ where
let h2 = proto::h2::Server::new(rewind_io, dispatch.into_service(), builder, exec.clone());
debug_assert!(self.conn.is_none());
- self.conn = Some(ProtoServer::H2(h2));
+ self.conn = Some(ProtoServer::H2 { h2 });
}
/// Enable this connection to support higher-level HTTP upgrades.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -956,9 +962,9 @@ where
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
#[cfg(feature = "http1")]
- ProtoServerProj::H1(s, _) => s.poll(cx),
+ ProtoServerProj::H1 { h1, .. } => h1.poll(cx),
#[cfg(feature = "http2")]
- ProtoServerProj::H2(s) => s.poll(cx),
+ ProtoServerProj::H2 { h2 } => h2.poll(cx),
}
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -972,7 +978,7 @@ pub(crate) mod spawn_all {
use crate::common::exec::ConnStreamExec;
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::HttpService;
- use pin_project::pin_project;
+ use pin_project_lite::pin_project;
// Used by `SpawnAll` to optionally watch a `Connection` future.
//
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1017,23 +1023,36 @@ pub(crate) mod spawn_all {
// Users cannot import this type, nor the associated `NewSvcExec`. Instead,
// a blanket implementation for `Executor<impl Future>` is sufficient.
- #[pin_project]
- #[allow(missing_debug_implementations)]
- pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
- #[pin]
- state: State<I, N, S, E, W>,
+ pin_project! {
+ #[allow(missing_debug_implementations)]
+ pub struct NewSvcTask<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
+ #[pin]
+ state: State<I, N, S, E, W>,
+ }
}
- #[pin_project(project = StateProj)]
- pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
- Connecting(#[pin] Connecting<I, N, E>, W),
- Connected(#[pin] W::Future),
+ pin_project! {
+ #[project = StateProj]
+ pub(super) enum State<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> {
+ Connecting {
+ #[pin]
+ connecting: Connecting<I, N, E>,
+ watcher: W,
+ },
+ Connected {
+ #[pin]
+ future: W::Future,
+ },
+ }
}
impl<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>> NewSvcTask<I, N, S, E, W> {
pub(super) fn new(connecting: Connecting<I, N, E>, watcher: W) -> Self {
NewSvcTask {
- state: State::Connecting(connecting, watcher),
+ state: State::Connecting {
+ connecting,
+ watcher,
+ },
}
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1060,7 +1079,10 @@ pub(crate) mod spawn_all {
loop {
let next = {
match me.state.as_mut().project() {
- StateProj::Connecting(connecting, watcher) => {
+ StateProj::Connecting {
+ connecting,
+ watcher,
+ } => {
let res = ready!(connecting.poll(cx));
let conn = match res {
Ok(conn) => conn,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1070,10 +1092,10 @@ pub(crate) mod spawn_all {
return Poll::Ready(());
}
};
- let connected = watcher.watch(conn.with_upgrades());
- State::Connected(connected)
+ let future = watcher.watch(conn.with_upgrades());
+ State::Connected { future }
}
- StateProj::Connected(future) => {
+ StateProj::Connected { future } => {
return future.poll(cx).map(|res| {
if let Err(err) = res {
debug!("connection error: {}", err);
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -1141,7 +1163,7 @@ mod upgrades {
#[cfg(feature = "http1")]
Ok(proto::Dispatched::Upgrade(pending)) => {
match self.inner.conn.take() {
- Some(ProtoServer::H1(h1, _)) => {
+ Some(ProtoServer::H1 { h1, .. }) => {
let (io, buf, _) = h1.into_inner();
pending.fulfill(Upgraded::new(io, buf));
return Poll::Ready(Ok(()));
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -6,7 +6,7 @@ use std::net::{SocketAddr, TcpListener as StdTcpListener};
#[cfg(feature = "tcp")]
use std::time::Duration;
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use super::accept::Accept;
diff --git a/src/server/server.rs b/src/server/server.rs
--- a/src/server/server.rs
+++ b/src/server/server.rs
@@ -21,16 +21,17 @@ use super::shutdown::{Graceful, GracefulWatcher};
#[cfg(feature = "tcp")]
use super::tcp::AddrIncoming;
-/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
-///
-/// `Server` is a `Future` mapping a bound listener with a set of service
-/// handlers. It is built using the [`Builder`](Builder), and the future
-/// completes when the server has been shutdown. It should be run by an
-/// `Executor`.
-#[pin_project]
-pub struct Server<I, S, E = Exec> {
- #[pin]
- spawn_all: SpawnAll<I, S, E>,
+pin_project! {
+ /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default.
+ ///
+ /// `Server` is a `Future` mapping a bound listener with a set of service
+ /// handlers. It is built using the [`Builder`](Builder), and the future
+ /// completes when the server has been shutdown. It should be run by an
+ /// `Executor`.
+ pub struct Server<I, S, E = Exec> {
+ #[pin]
+ spawn_all: SpawnAll<I, S, E>,
+ }
}
/// A builder for a [`Server`](Server).
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -1,33 +1,36 @@
use std::error::Error as StdError;
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
-use super::conn::{SpawnAll, UpgradeableConnection, Watcher};
use super::accept::Accept;
+use super::conn::{SpawnAll, UpgradeableConnection, Watcher};
use crate::body::{Body, HttpBody};
use crate::common::drain::{self, Draining, Signal, Watch, Watching};
use crate::common::exec::{ConnStreamExec, NewSvcExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::{HttpService, MakeServiceRef};
-#[allow(missing_debug_implementations)]
-#[pin_project]
-pub struct Graceful<I, S, F, E> {
- #[pin]
- state: State<I, S, F, E>,
+pin_project! {
+ #[allow(missing_debug_implementations)]
+ pub struct Graceful<I, S, F, E> {
+ #[pin]
+ state: State<I, S, F, E>,
+ }
}
-#[pin_project(project = StateProj)]
-pub(super) enum State<I, S, F, E> {
- Running {
- drain: Option<(Signal, Watch)>,
- #[pin]
- spawn_all: SpawnAll<I, S, E>,
- #[pin]
- signal: F,
- },
- Draining(Draining),
+pin_project! {
+ #[project = StateProj]
+ pub(super) enum State<I, S, F, E> {
+ Running {
+ drain: Option<(Signal, Watch)>,
+ #[pin]
+ spawn_all: SpawnAll<I, S, E>,
+ #[pin]
+ signal: F,
+ },
+ Draining { draining: Draining },
+ }
}
impl<I, S, F, E> Graceful<I, S, F, E> {
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -71,14 +74,16 @@ where
Poll::Ready(()) => {
debug!("signal received, starting graceful shutdown");
let sig = drain.take().expect("drain channel").0;
- State::Draining(sig.drain())
+ State::Draining {
+ draining: sig.drain(),
+ }
}
Poll::Pending => {
let watch = drain.as_ref().expect("drain channel").1.clone();
return spawn_all.poll_watch(cx, &GracefulWatcher(watch));
}
},
- StateProj::Draining(ref mut draining) => {
+ StateProj::Draining { ref mut draining } => {
return Pin::new(draining).poll(cx).map(Ok);
}
}
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -229,13 +229,14 @@ mod addr_stream {
use crate::common::{task, Pin, Poll};
- /// A transport returned yieled by `AddrIncoming`.
- #[pin_project::pin_project]
- #[derive(Debug)]
- pub struct AddrStream {
- #[pin]
- inner: TcpStream,
- pub(super) remote_addr: SocketAddr,
+ pin_project_lite::pin_project! {
+ /// A transport returned yieled by `AddrIncoming`.
+ #[derive(Debug)]
+ pub struct AddrStream {
+ #[pin]
+ inner: TcpStream,
+ pub(super) remote_addr: SocketAddr,
+ }
}
impl AddrStream {
diff --git a/src/service/mod.rs b/src/service/mod.rs
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -39,20 +39,16 @@ pub use tower_service::Service;
mod http;
mod make;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
+#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))]
mod oneshot;
mod util;
pub(super) use self::http::HttpService;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
+#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))]
pub(super) use self::make::MakeConnection;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "server")]
+#[cfg(all(any(feature = "http1", feature = "http2"), feature = "server"))]
pub(super) use self::make::MakeServiceRef;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
+#[cfg(all(any(feature = "http1", feature = "http2"), feature = "client"))]
pub(super) use self::oneshot::{oneshot, Oneshot};
pub use self::make::make_service_fn;
diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs
--- a/src/service/oneshot.rs
+++ b/src/service/oneshot.rs
@@ -1,6 +1,6 @@
// TODO: Eventually to be replaced with tower_util::Oneshot.
-use pin_project::pin_project;
+use pin_project_lite::pin_project;
use tower_service::Service;
use crate::common::{task, Future, Pin, Poll};
diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs
--- a/src/service/oneshot.rs
+++ b/src/service/oneshot.rs
@@ -10,25 +10,35 @@ where
S: Service<Req>,
{
Oneshot {
- state: State::NotReady(svc, req),
+ state: State::NotReady { svc, req },
}
}
-// A `Future` consuming a `Service` and request, waiting until the `Service`
-// is ready, and then calling `Service::call` with the request, and
-// waiting for that `Future`.
-#[allow(missing_debug_implementations)]
-#[pin_project]
-pub struct Oneshot<S: Service<Req>, Req> {
- #[pin]
- state: State<S, Req>,
+pin_project! {
+ // A `Future` consuming a `Service` and request, waiting until the `Service`
+ // is ready, and then calling `Service::call` with the request, and
+ // waiting for that `Future`.
+ #[allow(missing_debug_implementations)]
+ pub struct Oneshot<S: Service<Req>, Req> {
+ #[pin]
+ state: State<S, Req>,
+ }
}
-#[pin_project(project = StateProj, project_replace = StateProjOwn)]
-enum State<S: Service<Req>, Req> {
- NotReady(S, Req),
- Called(#[pin] S::Future),
- Tmp,
+pin_project! {
+ #[project = StateProj]
+ #[project_replace = StateProjOwn]
+ enum State<S: Service<Req>, Req> {
+ NotReady {
+ svc: S,
+ req: Req,
+ },
+ Called {
+ #[pin]
+ fut: S::Future,
+ },
+ Tmp,
+ }
}
impl<S, Req> Future for Oneshot<S, Req>
diff --git a/src/service/oneshot.rs b/src/service/oneshot.rs
--- a/src/service/oneshot.rs
+++ b/src/service/oneshot.rs
@@ -42,19 +52,19 @@ where
loop {
match me.state.as_mut().project() {
- StateProj::NotReady(ref mut svc, _) => {
+ StateProj::NotReady { ref mut svc, .. } => {
ready!(svc.poll_ready(cx))?;
// fallthrough out of the match's borrow
}
- StateProj::Called(fut) => {
+ StateProj::Called { fut } => {
return fut.poll(cx);
}
StateProj::Tmp => unreachable!(),
}
match me.state.as_mut().project_replace(State::Tmp) {
- StateProjOwn::NotReady(mut svc, req) => {
- me.state.set(State::Called(svc.call(req)));
+ StateProjOwn::NotReady { mut svc, req } => {
+ me.state.set(State::Called { fut: svc.call(req) });
}
_ => unreachable!(),
}
|
Considered, and agreed! It should be done!
This may require https://github.com/taiki-e/pin-project-lite/pull/43.
UPDATE: merged and released in pin-project-lite 0.2.4.
I'll take a stab at implementing this.
Depends on https://github.com/taiki-e/pin-project-lite/pull/25 because of https://github.com/hyperium/hyper/blob/fad42acc79b54ce38adf99c58c894f29fa2665ad/src/client/dispatch.rs#L180
Just found that out myself. Thanks for the link 🙂
|
2021-01-11T23:13:37Z
| 2,393
|
Switch from pin-project to pin-project-lite
futures-rs [just switched](https://github.com/rust-lang/futures-rs/pull/2273) and tokio has been using the `-lite` one since 0.2.0. I haven't used either so I don't actually know how hard switching is, but it seems like a good idea since it greatly improves compile times for people who don't depend on syn & quote otherwise.
|
hyperium__hyper-2393
|
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -52,8 +52,7 @@
pub use self::connect::HttpConnector;
pub mod connect;
-#[cfg(test)]
-#[cfg(feature = "runtime")]
+#[cfg(all(test, feature = "runtime"))]
mod tests;
cfg_feature! {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -776,7 +777,7 @@ mod tests {
use std::time::Duration;
use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt};
- use crate::common::{task, exec::Exec, Future, Pin};
+ use crate::common::{exec::Exec, task, Future, Pin};
/// Test unique reservations.
#[derive(Debug, PartialEq, Eq)]
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
[
"2260"
] |
0.14
|
257d6a99193c9404ce055727833e1490c23a7197
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -5,8 +5,6 @@ use std::fmt;
use bytes::Bytes;
use futures_channel::mpsc;
-#[cfg(any(feature = "http1", feature = "http2"))]
-#[cfg(feature = "client")]
use futures_channel::oneshot;
use futures_core::Stream; // for mpsc::Receiver
#[cfg(feature = "stream")]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -17,14 +15,16 @@ use http_body::{Body as HttpBody, SizeHint};
use super::DecodedLength;
#[cfg(feature = "stream")]
use crate::common::sync_wrapper::SyncWrapper;
-use crate::common::{task, watch, Pin, Poll};
+use crate::common::Future;
#[cfg(any(feature = "http1", feature = "http2"))]
#[cfg(feature = "client")]
-use crate::common::{Future, Never};
+use crate::common::Never;
+use crate::common::{task, watch, Pin, Poll};
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
use crate::proto::h2::ping;
type BodySender = mpsc::Sender<Result<Bytes, crate::Error>>;
+type TrailersSender = oneshot::Sender<HeaderMap>;
/// A stream of `Bytes`, used when receiving bodies.
///
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -43,7 +43,8 @@ enum Kind {
Chan {
content_length: DecodedLength,
want_tx: watch::Sender,
- rx: mpsc::Receiver<Result<Bytes, crate::Error>>,
+ data_rx: mpsc::Receiver<Result<Bytes, crate::Error>>,
+ trailers_rx: oneshot::Receiver<HeaderMap>,
},
#[cfg(all(feature = "http2", any(feature = "client", feature = "server")))]
H2 {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -106,7 +107,8 @@ enum DelayEof {
#[must_use = "Sender does nothing unless sent on"]
pub struct Sender {
want_rx: watch::Receiver,
- tx: BodySender,
+ data_tx: BodySender,
+ trailers_tx: Option<TrailersSender>,
}
const WANT_PENDING: usize = 1;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -137,7 +139,8 @@ impl Body {
}
pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Body) {
- let (tx, rx) = mpsc::channel(0);
+ let (data_tx, data_rx) = mpsc::channel(0);
+ let (trailers_tx, trailers_rx) = oneshot::channel();
// If wanter is true, `Sender::poll_ready()` won't becoming ready
// until the `Body` has been polled for data once.
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -145,11 +148,16 @@ impl Body {
let (want_tx, want_rx) = watch::channel(want);
- let tx = Sender { want_rx, tx };
+ let tx = Sender {
+ want_rx,
+ data_tx,
+ trailers_tx: Some(trailers_tx),
+ };
let rx = Body::new(Kind::Chan {
content_length,
want_tx,
- rx,
+ data_rx,
+ trailers_rx,
});
(tx, rx)
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -282,12 +290,13 @@ impl Body {
Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)),
Kind::Chan {
content_length: ref mut len,
- ref mut rx,
+ ref mut data_rx,
ref mut want_tx,
+ ..
} => {
want_tx.send(WANT_READY);
- match ready!(Pin::new(rx).poll_next(cx)?) {
+ match ready!(Pin::new(data_rx).poll_next(cx)?) {
Some(chunk) => {
len.sub_if(chunk.len() as u64);
Poll::Ready(Some(Ok(chunk)))
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -368,10 +377,15 @@ impl HttpBody for Body {
}
Err(e) => Poll::Ready(Err(crate::Error::new_h2(e))),
},
-
+ Kind::Chan {
+ ref mut trailers_rx,
+ ..
+ } => match ready!(Pin::new(trailers_rx).poll(cx)) {
+ Ok(t) => Poll::Ready(Ok(Some(t))),
+ Err(_) => Poll::Ready(Ok(None)),
+ },
#[cfg(feature = "ffi")]
Kind::Ffi(ref mut body) => body.poll_trailers(cx),
-
_ => Poll::Ready(Ok(None)),
}
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -527,7 +541,7 @@ impl Sender {
pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
// Check if the receiver end has tried polling for the body yet
ready!(self.poll_want(cx)?);
- self.tx
+ self.data_tx
.poll_ready(cx)
.map_err(|_| crate::Error::new_closed())
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -545,14 +559,23 @@ impl Sender {
futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
}
- /// Send data on this channel when it is ready.
+ /// Send data on data channel when it is ready.
pub async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> {
self.ready().await?;
- self.tx
+ self.data_tx
.try_send(Ok(chunk))
.map_err(|_| crate::Error::new_closed())
}
+ /// Send trailers on trailers channel.
+ pub async fn send_trailers(&mut self, trailers: HeaderMap) -> crate::Result<()> {
+ let tx = match self.trailers_tx.take() {
+ Some(tx) => tx,
+ None => return Err(crate::Error::new_closed()),
+ };
+ tx.send(trailers).map_err(|_| crate::Error::new_closed())
+ }
+
/// Try to send data on this channel.
///
/// # Errors
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -566,7 +589,7 @@ impl Sender {
/// that doesn't have an async context. If in an async context, prefer
/// `send_data()` instead.
pub fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> {
- self.tx
+ self.data_tx
.try_send(Ok(chunk))
.map_err(|err| err.into_inner().expect("just sent Ok"))
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -574,7 +597,7 @@ impl Sender {
/// Aborts the body in an abnormal fashion.
pub fn abort(self) {
let _ = self
- .tx
+ .data_tx
// clone so the send works even if buffer is full
.clone()
.try_send(Err(crate::Error::new_body_write_aborted()));
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -582,7 +605,7 @@ impl Sender {
#[cfg(feature = "http1")]
pub(crate) fn send_error(&mut self, err: crate::Error) {
- let _ = self.tx.try_send(Err(err));
+ let _ = self.data_tx.try_send(Err(err));
}
}
|
2021-01-06T15:58:46Z
| 2,387
|
Add sending trailers on Body channel
The `Body::channel()` variant is a common simple way to send data as a `Body`. It would be useful to also be able to send trailers on it.
### Implementation
The code to change is on the `hyper::body::Sender`, and the relevant `Kind` variant. I could imagine two possible ways to implement this, and I'm not sure which is better yet.
1. Change the internal mpsc channel to send `enum Msg { Data(Bytes), Trailers(HeaderMap) }`. However, the `Kind::Chan` variant would need to grow some cache slots, since `poll_data` could pop a `Msg::Trailers`, and then that would need to cached so that `poll_trailers` returns its.
2. Add a `oneshot` channel to send the trailers. The downside here is the increased size of `Sender` and `Kind::Channel` (they both are carrying an extra pointer), and the extra allocation.
I think the 2nd option is probably going to be better, but measurements are king. We should compare the changes to the `benches/end_to_end` results to see if the added feature makes much of a difference. If it does, we could consider making an internal "skinnier channel" that is used by the h1 dispatcher.
|
hyperium__hyper-2387
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -628,7 +651,7 @@ mod tests {
assert_eq!(
mem::size_of::<Sender>(),
- mem::size_of::<usize>() * 4,
+ mem::size_of::<usize>() * 5,
"Sender"
);
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2376"
] |
0.14
|
3d6bdbe85006e623eae83d7c3ce9a45680d6c2c8
|
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -27,6 +27,7 @@ use super::{Connected, Connection};
///
/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes
/// transport information such as the remote socket address used.
+#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
#[derive(Clone)]
pub struct HttpConnector<R = GaiResolver> {
config: Arc<Config>,
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -83,13 +83,20 @@ use std::fmt;
use ::http::Extensions;
-#[cfg(feature = "tcp")]
-pub mod dns;
-#[cfg(feature = "tcp")]
-mod http;
-#[cfg(feature = "tcp")]
-pub use self::http::{HttpConnector, HttpInfo};
-pub use self::sealed::Connect;
+cfg_feature! {
+ #![feature = "tcp"]
+
+ pub use self::http::{HttpConnector, HttpInfo};
+
+ pub mod dns;
+ mod http;
+}
+
+cfg_feature! {
+ #![any(feature = "http1", feature = "http2")]
+
+ pub use self::sealed::Connect;
+}
/// Describes a type returned by a connector.
pub trait Connection {
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -260,6 +267,7 @@ where
}
}
+#[cfg(any(feature = "http1", feature = "http2"))]
pub(super) mod sealed {
use std::error::Error as StdError;
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -10,7 +10,7 @@ use futures_channel::oneshot;
#[cfg(feature = "runtime")]
use tokio::time::{Duration, Instant, Interval};
-use super::Ver;
+use super::client::Ver;
use crate::common::{task, exec::Exec, Future, Pin, Poll, Unpin};
// FIXME: allow() required due to `impl Trait` leaking types to this lint
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -58,7 +58,11 @@
#[doc(hidden)]
pub use http;
-#[cfg(any(feature = "http1", feature = "http2"))]
+#[cfg(any(
+ feature = "http1",
+ feature = "http2",
+ all(feature = "client", feature = "tcp")
+))]
#[macro_use]
extern crate tracing;
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -89,9 +93,11 @@ cfg_proto! {
}
cfg_feature! {
- #![all(feature = "client", any(feature = "http1", feature = "http2"))]
+ #![all(feature = "client")]
pub mod client;
+ #[cfg(any(feature = "http1", feature = "http2"))]
+ #[doc(no_inline)]
pub use crate::client::Client;
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -91,6 +91,7 @@ where
self.read_buf_strategy = ReadStrategy::Exact(sz);
}
+ #[cfg(feature = "server")]
pub fn set_write_strategy_flatten(&mut self) {
// this should always be called only at construction time,
// so this assert is here to catch myself
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -475,6 +476,7 @@ impl<B> WriteBuf<B>
where
B: Buf,
{
+ #[cfg(feature = "server")]
fn set_strategy(&mut self, strategy: WriteStrategy) {
self.strategy = strategy;
}
|
2020-12-24T23:14:58Z
| 2,377
|
Connector implementation crates have to pick http1/http2
The low-level connection APIs like `Connected`/`Connection`/`HttpConnector` all require one of `http1` or `http2` to be set. This means that external crates like `hyper-openssl` have to pick one of those features to enable, which is kind of out of scope for what they actually need to do. It would probably make sense for those bits of the client API to only require `client` and not `http1` or `http2` to make things less weird.
|
hyperium__hyper-2377
|
diff --git /dev/null b/src/client/client.rs
new file mode 100644
--- /dev/null
+++ b/src/client/client.rs
@@ -0,0 +1,1233 @@
+use std::error::Error as StdError;
+use std::fmt;
+use std::mem;
+use std::time::Duration;
+
+use futures_channel::oneshot;
+use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _};
+use http::header::{HeaderValue, HOST};
+use http::uri::Scheme;
+use http::{Method, Request, Response, Uri, Version};
+
+use super::conn;
+use super::connect::{self, sealed::Connect, Alpn, Connected, Connection};
+use super::pool::{self, Key as PoolKey, Pool, Poolable, Pooled, Reservation};
+#[cfg(feature = "tcp")]
+use super::HttpConnector;
+use crate::body::{Body, HttpBody};
+use crate::common::{exec::BoxSendFuture, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll};
+use crate::rt::Executor;
+
+/// A Client to make outgoing HTTP requests.
+#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
+pub struct Client<C, B = Body> {
+ config: Config,
+ conn_builder: conn::Builder,
+ connector: C,
+ pool: Pool<PoolClient<B>>,
+}
+
+#[derive(Clone, Copy, Debug)]
+struct Config {
+ retry_canceled_requests: bool,
+ set_host: bool,
+ ver: Ver,
+}
+
+/// A `Future` that will resolve to an HTTP Response.
+///
+/// This is returned by `Client::request` (and `Client::get`).
+#[must_use = "futures do nothing unless polled"]
+pub struct ResponseFuture {
+ inner: Pin<Box<dyn Future<Output = crate::Result<Response<Body>>> + Send>>,
+}
+
+// ===== impl Client =====
+
+#[cfg(feature = "tcp")]
+impl Client<HttpConnector, Body> {
+ /// Create a new Client with the default [config](Builder).
+ ///
+ /// # Note
+ ///
+ /// The default connector does **not** handle TLS. Speaking to `https`
+ /// destinations will require [configuring a connector that implements
+ /// TLS](https://hyper.rs/guides/client/configuration).
+ #[inline]
+ pub fn new() -> Client<HttpConnector, Body> {
+ Builder::default().build_http()
+ }
+}
+
+#[cfg(feature = "tcp")]
+impl Default for Client<HttpConnector, Body> {
+ fn default() -> Client<HttpConnector, Body> {
+ Client::new()
+ }
+}
+
+impl Client<(), Body> {
+ /// Create a builder to configure a new `Client`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # #[cfg(feature = "runtime")]
+ /// # fn run () {
+ /// use std::time::Duration;
+ /// use hyper::Client;
+ ///
+ /// let client = Client::builder()
+ /// .pool_idle_timeout(Duration::from_secs(30))
+ /// .http2_only(true)
+ /// .build_http();
+ /// # let infer: Client<_, hyper::Body> = client;
+ /// # drop(infer);
+ /// # }
+ /// # fn main() {}
+ /// ```
+ #[inline]
+ pub fn builder() -> Builder {
+ Builder::default()
+ }
+}
+
+impl<C, B> Client<C, B>
+where
+ C: Connect + Clone + Send + Sync + 'static,
+ B: HttpBody + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ /// Send a `GET` request to the supplied `Uri`.
+ ///
+ /// # Note
+ ///
+ /// This requires that the `HttpBody` type have a `Default` implementation.
+ /// It *should* return an "empty" version of itself, such that
+ /// `HttpBody::is_end_stream` is `true`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # #[cfg(feature = "runtime")]
+ /// # fn run () {
+ /// use hyper::{Client, Uri};
+ ///
+ /// let client = Client::new();
+ ///
+ /// let future = client.get(Uri::from_static("http://httpbin.org/ip"));
+ /// # }
+ /// # fn main() {}
+ /// ```
+ pub fn get(&self, uri: Uri) -> ResponseFuture
+ where
+ B: Default,
+ {
+ let body = B::default();
+ if !body.is_end_stream() {
+ warn!("default HttpBody used for get() does not return true for is_end_stream");
+ }
+
+ let mut req = Request::new(body);
+ *req.uri_mut() = uri;
+ self.request(req)
+ }
+
+ /// Send a constructed `Request` using this `Client`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # #[cfg(feature = "runtime")]
+ /// # fn run () {
+ /// use hyper::{Body, Client, Request};
+ ///
+ /// let client = Client::new();
+ ///
+ /// let req = Request::builder()
+ /// .method("POST")
+ /// .uri("http://httpin.org/post")
+ /// .body(Body::from("Hallo!"))
+ /// .expect("request builder");
+ ///
+ /// let future = client.request(req);
+ /// # }
+ /// # fn main() {}
+ /// ```
+ pub fn request(&self, mut req: Request<B>) -> ResponseFuture {
+ let is_http_connect = req.method() == Method::CONNECT;
+ match req.version() {
+ Version::HTTP_11 => (),
+ Version::HTTP_10 => {
+ if is_http_connect {
+ warn!("CONNECT is not allowed for HTTP/1.0");
+ return ResponseFuture::new(Box::new(future::err(
+ crate::Error::new_user_unsupported_request_method(),
+ )));
+ }
+ }
+ other_h2 @ Version::HTTP_2 => {
+ if self.config.ver != Ver::Http2 {
+ return ResponseFuture::error_version(other_h2);
+ }
+ }
+ // completely unsupported HTTP version (like HTTP/0.9)!
+ other => return ResponseFuture::error_version(other),
+ };
+
+ let pool_key = match extract_domain(req.uri_mut(), is_http_connect) {
+ Ok(s) => s,
+ Err(err) => {
+ return ResponseFuture::new(Box::new(future::err(err)));
+ }
+ };
+
+ ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key)))
+ }
+
+ fn retryably_send_request(
+ &self,
+ req: Request<B>,
+ pool_key: PoolKey,
+ ) -> impl Future<Output = crate::Result<Response<Body>>> {
+ let client = self.clone();
+ let uri = req.uri().clone();
+
+ let mut send_fut = client.send_request(req, pool_key.clone());
+ future::poll_fn(move |cx| loop {
+ match ready!(Pin::new(&mut send_fut).poll(cx)) {
+ Ok(resp) => return Poll::Ready(Ok(resp)),
+ Err(ClientError::Normal(err)) => return Poll::Ready(Err(err)),
+ Err(ClientError::Canceled {
+ connection_reused,
+ mut req,
+ reason,
+ }) => {
+ if !client.config.retry_canceled_requests || !connection_reused {
+ // if client disabled, don't retry
+ // a fresh connection means we definitely can't retry
+ return Poll::Ready(Err(reason));
+ }
+
+ trace!(
+ "unstarted request canceled, trying again (reason={:?})",
+ reason
+ );
+ *req.uri_mut() = uri.clone();
+ send_fut = client.send_request(req, pool_key.clone());
+ }
+ }
+ })
+ }
+
+ fn send_request(
+ &self,
+ mut req: Request<B>,
+ pool_key: PoolKey,
+ ) -> impl Future<Output = Result<Response<Body>, ClientError<B>>> + Unpin {
+ let conn = self.connection_for(pool_key);
+
+ let set_host = self.config.set_host;
+ let executor = self.conn_builder.exec.clone();
+ conn.and_then(move |mut pooled| {
+ if pooled.is_http1() {
+ if set_host {
+ let uri = req.uri().clone();
+ req.headers_mut().entry(HOST).or_insert_with(|| {
+ let hostname = uri.host().expect("authority implies host");
+ if let Some(port) = uri.port() {
+ let s = format!("{}:{}", hostname, port);
+ HeaderValue::from_str(&s)
+ } else {
+ HeaderValue::from_str(hostname)
+ }
+ .expect("uri host is valid header value")
+ });
+ }
+
+ // CONNECT always sends authority-form, so check it first...
+ if req.method() == Method::CONNECT {
+ authority_form(req.uri_mut());
+ } else if pooled.conn_info.is_proxied {
+ absolute_form(req.uri_mut());
+ } else {
+ origin_form(req.uri_mut());
+ };
+ } else if req.method() == Method::CONNECT {
+ debug!("client does not support CONNECT requests over HTTP2");
+ return Either::Left(future::err(ClientError::Normal(
+ crate::Error::new_user_unsupported_request_method(),
+ )));
+ }
+
+ let fut = pooled
+ .send_request_retryable(req)
+ .map_err(ClientError::map_with_reused(pooled.is_reused()));
+
+ // If the Connector included 'extra' info, add to Response...
+ let extra_info = pooled.conn_info.extra.clone();
+ let fut = fut.map_ok(move |mut res| {
+ if let Some(extra) = extra_info {
+ extra.set(res.extensions_mut());
+ }
+ res
+ });
+
+ // As of futures@0.1.21, there is a race condition in the mpsc
+ // channel, such that sending when the receiver is closing can
+ // result in the message being stuck inside the queue. It won't
+ // ever notify until the Sender side is dropped.
+ //
+ // To counteract this, we must check if our senders 'want' channel
+ // has been closed after having tried to send. If so, error out...
+ if pooled.is_closed() {
+ return Either::Right(Either::Left(fut));
+ }
+
+ Either::Right(Either::Right(fut.map_ok(move |mut res| {
+ // If pooled is HTTP/2, we can toss this reference immediately.
+ //
+ // when pooled is dropped, it will try to insert back into the
+ // pool. To delay that, spawn a future that completes once the
+ // sender is ready again.
+ //
+ // This *should* only be once the related `Connection` has polled
+ // for a new request to start.
+ //
+ // It won't be ready if there is a body to stream.
+ if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
+ drop(pooled);
+ } else if !res.body().is_end_stream() {
+ let (delayed_tx, delayed_rx) = oneshot::channel();
+ res.body_mut().delayed_eof(delayed_rx);
+ let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| {
+ // At this point, `pooled` is dropped, and had a chance
+ // to insert into the pool (if conn was idle)
+ drop(delayed_tx);
+ });
+
+ executor.execute(on_idle);
+ } else {
+ // There's no body to delay, but the connection isn't
+ // ready yet. Only re-insert when it's ready
+ let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ());
+
+ executor.execute(on_idle);
+ }
+ res
+ })))
+ })
+ }
+
+ fn connection_for(
+ &self,
+ pool_key: PoolKey,
+ ) -> impl Future<Output = Result<Pooled<PoolClient<B>>, ClientError<B>>> {
+ // This actually races 2 different futures to try to get a ready
+ // connection the fastest, and to reduce connection churn.
+ //
+ // - If the pool has an idle connection waiting, that's used
+ // immediately.
+ // - Otherwise, the Connector is asked to start connecting to
+ // the destination Uri.
+ // - Meanwhile, the pool Checkout is watching to see if any other
+ // request finishes and tries to insert an idle connection.
+ // - If a new connection is started, but the Checkout wins after
+ // (an idle connection became available first), the started
+ // connection future is spawned into the runtime to complete,
+ // and then be inserted into the pool as an idle connection.
+ let checkout = self.pool.checkout(pool_key.clone());
+ let connect = self.connect_to(pool_key);
+
+ let executor = self.conn_builder.exec.clone();
+ // The order of the `select` is depended on below...
+ future::select(checkout, connect).then(move |either| match either {
+ // Checkout won, connect future may have been started or not.
+ //
+ // If it has, let it finish and insert back into the pool,
+ // so as to not waste the socket...
+ Either::Left((Ok(checked_out), connecting)) => {
+ // This depends on the `select` above having the correct
+ // order, such that if the checkout future were ready
+ // immediately, the connect future will never have been
+ // started.
+ //
+ // If it *wasn't* ready yet, then the connect future will
+ // have been started...
+ if connecting.started() {
+ let bg = connecting
+ .map_err(|err| {
+ trace!("background connect error: {}", err);
+ })
+ .map(|_pooled| {
+ // dropping here should just place it in
+ // the Pool for us...
+ });
+ // An execute error here isn't important, we're just trying
+ // to prevent a waste of a socket...
+ executor.execute(bg);
+ }
+ Either::Left(future::ok(checked_out))
+ }
+ // Connect won, checkout can just be dropped.
+ Either::Right((Ok(connected), _checkout)) => Either::Left(future::ok(connected)),
+ // Either checkout or connect could get canceled:
+ //
+ // 1. Connect is canceled if this is HTTP/2 and there is
+ // an outstanding HTTP/2 connecting task.
+ // 2. Checkout is canceled if the pool cannot deliver an
+ // idle connection reliably.
+ //
+ // In both cases, we should just wait for the other future.
+ Either::Left((Err(err), connecting)) => Either::Right(Either::Left({
+ if err.is_canceled() {
+ Either::Left(connecting.map_err(ClientError::Normal))
+ } else {
+ Either::Right(future::err(ClientError::Normal(err)))
+ }
+ })),
+ Either::Right((Err(err), checkout)) => Either::Right(Either::Right({
+ if err.is_canceled() {
+ Either::Left(checkout.map_err(ClientError::Normal))
+ } else {
+ Either::Right(future::err(ClientError::Normal(err)))
+ }
+ })),
+ })
+ }
+
+ fn connect_to(
+ &self,
+ pool_key: PoolKey,
+ ) -> impl Lazy<Output = crate::Result<Pooled<PoolClient<B>>>> + Unpin {
+ let executor = self.conn_builder.exec.clone();
+ let pool = self.pool.clone();
+ #[cfg(not(feature = "http2"))]
+ let conn_builder = self.conn_builder.clone();
+ #[cfg(feature = "http2")]
+ let mut conn_builder = self.conn_builder.clone();
+ let ver = self.config.ver;
+ let is_ver_h2 = ver == Ver::Http2;
+ let connector = self.connector.clone();
+ let dst = domain_as_uri(pool_key.clone());
+ hyper_lazy(move || {
+ // Try to take a "connecting lock".
+ //
+ // If the pool_key is for HTTP/2, and there is already a
+ // connection being established, then this can't take a
+ // second lock. The "connect_to" future is Canceled.
+ let connecting = match pool.connecting(&pool_key, ver) {
+ Some(lock) => lock,
+ None => {
+ let canceled =
+ crate::Error::new_canceled().with("HTTP/2 connection in progress");
+ return Either::Right(future::err(canceled));
+ }
+ };
+ Either::Left(
+ connector
+ .connect(connect::sealed::Internal, dst)
+ .map_err(crate::Error::new_connect)
+ .and_then(move |io| {
+ let connected = io.connected();
+ // If ALPN is h2 and we aren't http2_only already,
+ // then we need to convert our pool checkout into
+ // a single HTTP2 one.
+ let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 {
+ match connecting.alpn_h2(&pool) {
+ Some(lock) => {
+ trace!("ALPN negotiated h2, updating pool");
+ lock
+ }
+ None => {
+ // Another connection has already upgraded,
+ // the pool checkout should finish up for us.
+ let canceled = crate::Error::new_canceled()
+ .with("ALPN upgraded to HTTP/2");
+ return Either::Right(future::err(canceled));
+ }
+ }
+ } else {
+ connecting
+ };
+
+ #[cfg_attr(not(feature = "http2"), allow(unused))]
+ let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
+ #[cfg(feature = "http2")]
+ {
+ conn_builder.http2_only(is_h2);
+ }
+
+ Either::Left(Box::pin(
+ conn_builder
+ .handshake(io)
+ .and_then(move |(tx, conn)| {
+ trace!(
+ "handshake complete, spawning background dispatcher task"
+ );
+ executor.execute(
+ conn.map_err(|e| debug!("client connection error: {}", e))
+ .map(|_| ()),
+ );
+
+ // Wait for 'conn' to ready up before we
+ // declare this tx as usable
+ tx.when_ready()
+ })
+ .map_ok(move |tx| {
+ let tx = {
+ #[cfg(feature = "http2")]
+ {
+ if is_h2 {
+ PoolTx::Http2(tx.into_http2())
+ } else {
+ PoolTx::Http1(tx)
+ }
+ }
+ #[cfg(not(feature = "http2"))]
+ PoolTx::Http1(tx)
+ };
+ pool.pooled(
+ connecting,
+ PoolClient {
+ conn_info: connected,
+ tx,
+ },
+ )
+ }),
+ ))
+ }),
+ )
+ })
+ }
+}
+
+impl<C, B> tower_service::Service<Request<B>> for Client<C, B>
+where
+ C: Connect + Clone + Send + Sync + 'static,
+ B: HttpBody + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Response = Response<Body>;
+ type Error = crate::Error;
+ type Future = ResponseFuture;
+
+ fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn call(&mut self, req: Request<B>) -> Self::Future {
+ self.request(req)
+ }
+}
+
+impl<C, B> tower_service::Service<Request<B>> for &'_ Client<C, B>
+where
+ C: Connect + Clone + Send + Sync + 'static,
+ B: HttpBody + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
+{
+ type Response = Response<Body>;
+ type Error = crate::Error;
+ type Future = ResponseFuture;
+
+ fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn call(&mut self, req: Request<B>) -> Self::Future {
+ self.request(req)
+ }
+}
+
+impl<C: Clone, B> Clone for Client<C, B> {
+ fn clone(&self) -> Client<C, B> {
+ Client {
+ config: self.config.clone(),
+ conn_builder: self.conn_builder.clone(),
+ connector: self.connector.clone(),
+ pool: self.pool.clone(),
+ }
+ }
+}
+
+impl<C, B> fmt::Debug for Client<C, B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Client").finish()
+ }
+}
+
+// ===== impl ResponseFuture =====
+
+impl ResponseFuture {
+ fn new(fut: Box<dyn Future<Output = crate::Result<Response<Body>>> + Send>) -> Self {
+ Self { inner: fut.into() }
+ }
+
+ fn error_version(ver: Version) -> Self {
+ warn!("Request has unsupported version \"{:?}\"", ver);
+ ResponseFuture::new(Box::new(future::err(
+ crate::Error::new_user_unsupported_version(),
+ )))
+ }
+}
+
+impl fmt::Debug for ResponseFuture {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("Future<Response>")
+ }
+}
+
+impl Future for ResponseFuture {
+ type Output = crate::Result<Response<Body>>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ Pin::new(&mut self.inner).poll(cx)
+ }
+}
+
+// ===== impl PoolClient =====
+
+// FIXME: allow() required due to `impl Trait` leaking types to this lint
+#[allow(missing_debug_implementations)]
+struct PoolClient<B> {
+ conn_info: Connected,
+ tx: PoolTx<B>,
+}
+
+enum PoolTx<B> {
+ Http1(conn::SendRequest<B>),
+ #[cfg(feature = "http2")]
+ Http2(conn::Http2SendRequest<B>),
+}
+
+impl<B> PoolClient<B> {
+ fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ match self.tx {
+ PoolTx::Http1(ref mut tx) => tx.poll_ready(cx),
+ #[cfg(feature = "http2")]
+ PoolTx::Http2(_) => Poll::Ready(Ok(())),
+ }
+ }
+
+ fn is_http1(&self) -> bool {
+ !self.is_http2()
+ }
+
+ fn is_http2(&self) -> bool {
+ match self.tx {
+ PoolTx::Http1(_) => false,
+ #[cfg(feature = "http2")]
+ PoolTx::Http2(_) => true,
+ }
+ }
+
+ fn is_ready(&self) -> bool {
+ match self.tx {
+ PoolTx::Http1(ref tx) => tx.is_ready(),
+ #[cfg(feature = "http2")]
+ PoolTx::Http2(ref tx) => tx.is_ready(),
+ }
+ }
+
+ fn is_closed(&self) -> bool {
+ match self.tx {
+ PoolTx::Http1(ref tx) => tx.is_closed(),
+ #[cfg(feature = "http2")]
+ PoolTx::Http2(ref tx) => tx.is_closed(),
+ }
+ }
+}
+
+impl<B: HttpBody + 'static> PoolClient<B> {
+ fn send_request_retryable(
+ &mut self,
+ req: Request<B>,
+ ) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>>
+ where
+ B: Send,
+ {
+ match self.tx {
+ #[cfg(not(feature = "http2"))]
+ PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req),
+ #[cfg(feature = "http2")]
+ PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)),
+ #[cfg(feature = "http2")]
+ PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)),
+ }
+ }
+}
+
+impl<B> Poolable for PoolClient<B>
+where
+ B: Send + 'static,
+{
+ fn is_open(&self) -> bool {
+ match self.tx {
+ PoolTx::Http1(ref tx) => tx.is_ready(),
+ #[cfg(feature = "http2")]
+ PoolTx::Http2(ref tx) => tx.is_ready(),
+ }
+ }
+
+ fn reserve(self) -> Reservation<Self> {
+ match self.tx {
+ PoolTx::Http1(tx) => Reservation::Unique(PoolClient {
+ conn_info: self.conn_info,
+ tx: PoolTx::Http1(tx),
+ }),
+ #[cfg(feature = "http2")]
+ PoolTx::Http2(tx) => {
+ let b = PoolClient {
+ conn_info: self.conn_info.clone(),
+ tx: PoolTx::Http2(tx.clone()),
+ };
+ let a = PoolClient {
+ conn_info: self.conn_info,
+ tx: PoolTx::Http2(tx),
+ };
+ Reservation::Shared(a, b)
+ }
+ }
+ }
+
+ fn can_share(&self) -> bool {
+ self.is_http2()
+ }
+}
+
+// ===== impl ClientError =====
+
+// FIXME: allow() required due to `impl Trait` leaking types to this lint
+#[allow(missing_debug_implementations)]
+enum ClientError<B> {
+ Normal(crate::Error),
+ Canceled {
+ connection_reused: bool,
+ req: Request<B>,
+ reason: crate::Error,
+ },
+}
+
+impl<B> ClientError<B> {
+ fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option<Request<B>>)) -> Self {
+ move |(err, orig_req)| {
+ if let Some(req) = orig_req {
+ ClientError::Canceled {
+ connection_reused: conn_reused,
+ reason: err,
+ req,
+ }
+ } else {
+ ClientError::Normal(err)
+ }
+ }
+ }
+}
+
+/// A marker to identify what version a pooled connection is.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub(super) enum Ver {
+ Auto,
+ Http2,
+}
+
+fn origin_form(uri: &mut Uri) {
+ let path = match uri.path_and_query() {
+ Some(path) if path.as_str() != "/" => {
+ let mut parts = ::http::uri::Parts::default();
+ parts.path_and_query = Some(path.clone());
+ Uri::from_parts(parts).expect("path is valid uri")
+ }
+ _none_or_just_slash => {
+ debug_assert!(Uri::default() == "/");
+ Uri::default()
+ }
+ };
+ *uri = path
+}
+
+fn absolute_form(uri: &mut Uri) {
+ debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme");
+ debug_assert!(
+ uri.authority().is_some(),
+ "absolute_form needs an authority"
+ );
+ // If the URI is to HTTPS, and the connector claimed to be a proxy,
+ // then it *should* have tunneled, and so we don't want to send
+ // absolute-form in that case.
+ if uri.scheme() == Some(&Scheme::HTTPS) {
+ origin_form(uri);
+ }
+}
+
+fn authority_form(uri: &mut Uri) {
+ if let Some(path) = uri.path_and_query() {
+ // `https://hyper.rs` would parse with `/` path, don't
+ // annoy people about that...
+ if path != "/" {
+ warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path);
+ }
+ }
+ *uri = match uri.authority() {
+ Some(auth) => {
+ let mut parts = ::http::uri::Parts::default();
+ parts.authority = Some(auth.clone());
+ Uri::from_parts(parts).expect("authority is valid")
+ }
+ None => {
+ unreachable!("authority_form with relative uri");
+ }
+ };
+}
+
+fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result<PoolKey> {
+ let uri_clone = uri.clone();
+ match (uri_clone.scheme(), uri_clone.authority()) {
+ (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())),
+ (None, Some(auth)) if is_http_connect => {
+ let scheme = match auth.port_u16() {
+ Some(443) => {
+ set_scheme(uri, Scheme::HTTPS);
+ Scheme::HTTPS
+ }
+ _ => {
+ set_scheme(uri, Scheme::HTTP);
+ Scheme::HTTP
+ }
+ };
+ Ok((scheme, auth.clone()))
+ }
+ _ => {
+ debug!("Client requires absolute-form URIs, received: {:?}", uri);
+ Err(crate::Error::new_user_absolute_uri_required())
+ }
+ }
+}
+
+fn domain_as_uri((scheme, auth): PoolKey) -> Uri {
+ http::uri::Builder::new()
+ .scheme(scheme)
+ .authority(auth)
+ .path_and_query("/")
+ .build()
+ .expect("domain is valid Uri")
+}
+
+fn set_scheme(uri: &mut Uri, scheme: Scheme) {
+ debug_assert!(
+ uri.scheme().is_none(),
+ "set_scheme expects no existing scheme"
+ );
+ let old = mem::replace(uri, Uri::default());
+ let mut parts: ::http::uri::Parts = old.into();
+ parts.scheme = Some(scheme);
+ parts.path_and_query = Some("/".parse().expect("slash is a valid path"));
+ *uri = Uri::from_parts(parts).expect("scheme is valid");
+}
+
+/// A builder to configure a new [`Client`](Client).
+///
+/// # Example
+///
+/// ```
+/// # #[cfg(feature = "runtime")]
+/// # fn run () {
+/// use std::time::Duration;
+/// use hyper::Client;
+///
+/// let client = Client::builder()
+/// .pool_idle_timeout(Duration::from_secs(30))
+/// .http2_only(true)
+/// .build_http();
+/// # let infer: Client<_, hyper::Body> = client;
+/// # drop(infer);
+/// # }
+/// # fn main() {}
+/// ```
+#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))]
+#[derive(Clone)]
+pub struct Builder {
+ client_config: Config,
+ conn_builder: conn::Builder,
+ pool_config: pool::Config,
+}
+
+impl Default for Builder {
+ fn default() -> Self {
+ Self {
+ client_config: Config {
+ retry_canceled_requests: true,
+ set_host: true,
+ ver: Ver::Auto,
+ },
+ conn_builder: conn::Builder::new(),
+ pool_config: pool::Config {
+ idle_timeout: Some(Duration::from_secs(90)),
+ max_idle_per_host: std::usize::MAX,
+ },
+ }
+ }
+}
+
+impl Builder {
+ #[doc(hidden)]
+ #[deprecated(
+ note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)"
+ )]
+ pub fn keep_alive(&mut self, val: bool) -> &mut Self {
+ if !val {
+ // disable
+ self.pool_max_idle_per_host(0)
+ } else if self.pool_config.max_idle_per_host == 0 {
+ // enable
+ self.pool_max_idle_per_host(std::usize::MAX)
+ } else {
+ // already enabled
+ self
+ }
+ }
+
+ #[doc(hidden)]
+ #[deprecated(note = "renamed to `pool_idle_timeout`")]
+ pub fn keep_alive_timeout<D>(&mut self, val: D) -> &mut Self
+ where
+ D: Into<Option<Duration>>,
+ {
+ self.pool_idle_timeout(val)
+ }
+
+ /// Set an optional timeout for idle sockets being kept-alive.
+ ///
+ /// Pass `None` to disable timeout.
+ ///
+ /// Default is 90 seconds.
+ pub fn pool_idle_timeout<D>(&mut self, val: D) -> &mut Self
+ where
+ D: Into<Option<Duration>>,
+ {
+ self.pool_config.idle_timeout = val.into();
+ self
+ }
+
+ #[doc(hidden)]
+ #[deprecated(note = "renamed to `pool_max_idle_per_host`")]
+ pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
+ self.pool_config.max_idle_per_host = max_idle;
+ self
+ }
+
+ /// Sets the maximum idle connection per host allowed in the pool.
+ ///
+ /// Default is `usize::MAX` (no limit).
+ pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
+ self.pool_config.max_idle_per_host = max_idle;
+ self
+ }
+
+ // HTTP/1 options
+
+ /// Sets the exact size of the read buffer to *always* use.
+ ///
+ /// Note that setting this option unsets the `http1_max_buf_size` option.
+ ///
+ /// Default is an adaptive read buffer.
+ pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self {
+ self.conn_builder.h1_read_buf_exact_size(Some(sz));
+ self
+ }
+
+ /// Set the maximum buffer size for the connection.
+ ///
+ /// Default is ~400kb.
+ ///
+ /// Note that setting this option unsets the `http1_read_exact_buf_size` option.
+ ///
+ /// # Panics
+ ///
+ /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
+ #[cfg(feature = "http1")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
+ pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
+ self.conn_builder.h1_max_buf_size(max);
+ self
+ }
+
+ /// Set whether HTTP/1 connections will write header names as title case at
+ /// the socket level.
+ ///
+ /// Note that this setting does not affect HTTP/2.
+ ///
+ /// Default is false.
+ pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self {
+ self.conn_builder.h1_title_case_headers(val);
+ self
+ }
+
+ /// Set whether the connection **must** use HTTP/2.
+ ///
+ /// The destination must either allow HTTP2 Prior Knowledge, or the
+ /// `Connect` should be configured to do use ALPN to upgrade to `h2`
+ /// as part of the connection process. This will not make the `Client`
+ /// utilize ALPN by itself.
+ ///
+ /// Note that setting this to true prevents HTTP/1 from being allowed.
+ ///
+ /// Default is false.
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_only(&mut self, val: bool) -> &mut Self {
+ self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto };
+ self
+ }
+
+ /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
+ /// stream-level flow control.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ ///
+ /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ self.conn_builder
+ .http2_initial_stream_window_size(sz.into());
+ self
+ }
+
+ /// Sets the max connection-level flow control for HTTP2
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_initial_connection_window_size(
+ &mut self,
+ sz: impl Into<Option<u32>>,
+ ) -> &mut Self {
+ self.conn_builder
+ .http2_initial_connection_window_size(sz.into());
+ self
+ }
+
+ /// Sets whether to use an adaptive flow control.
+ ///
+ /// Enabling this will override the limits set in
+ /// `http2_initial_stream_window_size` and
+ /// `http2_initial_connection_window_size`.
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
+ self.conn_builder.http2_adaptive_window(enabled);
+ self
+ }
+
+ /// Sets the maximum frame size to use for HTTP2.
+ ///
+ /// Passing `None` will do nothing.
+ ///
+ /// If not set, hyper will use a default.
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
+ self.conn_builder.http2_max_frame_size(sz);
+ self
+ }
+
+ /// Sets an interval for HTTP2 Ping frames should be sent to keep a
+ /// connection alive.
+ ///
+ /// Pass `None` to disable HTTP2 keep-alive.
+ ///
+ /// Default is currently disabled.
+ ///
+ /// # Cargo Feature
+ ///
+ /// Requires the `runtime` cargo feature to be enabled.
+ #[cfg(feature = "runtime")]
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_keep_alive_interval(
+ &mut self,
+ interval: impl Into<Option<Duration>>,
+ ) -> &mut Self {
+ self.conn_builder.http2_keep_alive_interval(interval);
+ self
+ }
+
+ /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
+ ///
+ /// If the ping is not acknowledged within the timeout, the connection will
+ /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
+ ///
+ /// Default is 20 seconds.
+ ///
+ /// # Cargo Feature
+ ///
+ /// Requires the `runtime` cargo feature to be enabled.
+ #[cfg(feature = "runtime")]
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
+ self.conn_builder.http2_keep_alive_timeout(timeout);
+ self
+ }
+
+ /// Sets whether HTTP2 keep-alive should apply while the connection is idle.
+ ///
+ /// If disabled, keep-alive pings are only sent while there are open
+ /// request/responses streams. If enabled, pings are also sent when no
+ /// streams are active. Does nothing if `http2_keep_alive_interval` is
+ /// disabled.
+ ///
+ /// Default is `false`.
+ ///
+ /// # Cargo Feature
+ ///
+ /// Requires the `runtime` cargo feature to be enabled.
+ #[cfg(feature = "runtime")]
+ #[cfg(feature = "http2")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
+ pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
+ self.conn_builder.http2_keep_alive_while_idle(enabled);
+ self
+ }
+
+ /// Set whether to retry requests that get disrupted before ever starting
+ /// to write.
+ ///
+ /// This means a request that is queued, and gets given an idle, reused
+ /// connection, and then encounters an error immediately as the idle
+ /// connection was found to be unusable.
+ ///
+ /// When this is set to `false`, the related `ResponseFuture` would instead
+ /// resolve to an `Error::Cancel`.
+ ///
+ /// Default is `true`.
+ #[inline]
+ pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self {
+ self.client_config.retry_canceled_requests = val;
+ self
+ }
+
+ /// Set whether to automatically add the `Host` header to requests.
+ ///
+ /// If true, and a request does not include a `Host` header, one will be
+ /// added automatically, derived from the authority of the `Uri`.
+ ///
+ /// Default is `true`.
+ #[inline]
+ pub fn set_host(&mut self, val: bool) -> &mut Self {
+ self.client_config.set_host = val;
+ self
+ }
+
+ /// Provide an executor to execute background `Connection` tasks.
+ pub fn executor<E>(&mut self, exec: E) -> &mut Self
+ where
+ E: Executor<BoxSendFuture> + Send + Sync + 'static,
+ {
+ self.conn_builder.executor(exec);
+ self
+ }
+
+ /// Builder a client with this configuration and the default `HttpConnector`.
+ #[cfg(feature = "tcp")]
+ pub fn build_http<B>(&self) -> Client<HttpConnector, B>
+ where
+ B: HttpBody + Send,
+ B::Data: Send,
+ {
+ let mut connector = HttpConnector::new();
+ if self.pool_config.is_enabled() {
+ connector.set_keepalive(self.pool_config.idle_timeout);
+ }
+ self.build(connector)
+ }
+
+ /// Combine the configuration of this builder with a connector to create a `Client`.
+ pub fn build<C, B>(&self, connector: C) -> Client<C, B>
+ where
+ C: Connect + Clone,
+ B: HttpBody + Send,
+ B::Data: Send,
+ {
+ Client {
+ config: self.client_config,
+ conn_builder: self.conn_builder.clone(),
+ connector,
+ pool: Pool::new(self.pool_config, &self.conn_builder.exec),
+ }
+ }
+}
+
+impl fmt::Debug for Builder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Builder")
+ .field("client_config", &self.client_config)
+ .field("conn_builder", &self.conn_builder)
+ .field("pool_config", &self.pool_config)
+ .finish()
+ }
+}
+
+#[cfg(test)]
+mod unit_tests {
+ use super::*;
+
+ #[test]
+ fn set_relative_uri_with_implicit_path() {
+ let mut uri = "http://hyper.rs".parse().unwrap();
+ origin_form(&mut uri);
+ assert_eq!(uri.to_string(), "/");
+ }
+
+ #[test]
+ fn test_origin_form() {
+ let mut uri = "http://hyper.rs/guides".parse().unwrap();
+ origin_form(&mut uri);
+ assert_eq!(uri.to_string(), "/guides");
+
+ let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap();
+ origin_form(&mut uri);
+ assert_eq!(uri.to_string(), "/guides?foo=bar");
+ }
+
+ #[test]
+ fn test_absolute_form() {
+ let mut uri = "http://hyper.rs/guides".parse().unwrap();
+ absolute_form(&mut uri);
+ assert_eq!(uri.to_string(), "http://hyper.rs/guides");
+
+ let mut uri = "https://hyper.rs/guides".parse().unwrap();
+ absolute_form(&mut uri);
+ assert_eq!(uri.to_string(), "/guides");
+ }
+
+ #[test]
+ fn test_authority_form() {
+ let _ = pretty_env_logger::try_init();
+
+ let mut uri = "http://hyper.rs".parse().unwrap();
+ authority_form(&mut uri);
+ assert_eq!(uri.to_string(), "hyper.rs");
+
+ let mut uri = "hyper.rs".parse().unwrap();
+ authority_form(&mut uri);
+ assert_eq!(uri.to_string(), "hyper.rs");
+ }
+
+ #[test]
+ fn test_extract_domain_connect_no_port() {
+ let mut uri = "hyper.rs".parse().unwrap();
+ let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain");
+ assert_eq!(scheme, *"http");
+ assert_eq!(host, "hyper.rs");
+ }
+}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -48,1243 +48,22 @@
//! # fn main () {}
//! ```
-use std::error::Error as StdError;
-use std::fmt;
-use std::mem;
-use std::time::Duration;
-
-use futures_channel::oneshot;
-use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _};
-use http::header::{HeaderValue, HOST};
-use http::uri::Scheme;
-use http::{Method, Request, Response, Uri, Version};
-
-use self::connect::{sealed::Connect, Alpn, Connected, Connection};
-use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation};
-use crate::body::{Body, HttpBody};
-use crate::common::{exec::BoxSendFuture, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll};
-use crate::rt::Executor;
-
#[cfg(feature = "tcp")]
pub use self::connect::HttpConnector;
-pub mod conn;
pub mod connect;
-pub(crate) mod dispatch;
-mod pool;
-pub mod service;
#[cfg(test)]
#[cfg(feature = "runtime")]
mod tests;
-/// A Client to make outgoing HTTP requests.
-pub struct Client<C, B = Body> {
- config: Config,
- conn_builder: conn::Builder,
- connector: C,
- pool: Pool<PoolClient<B>>,
-}
-
-#[derive(Clone, Copy, Debug)]
-struct Config {
- retry_canceled_requests: bool,
- set_host: bool,
- ver: Ver,
-}
-
-/// A `Future` that will resolve to an HTTP Response.
-///
-/// This is returned by `Client::request` (and `Client::get`).
-#[must_use = "futures do nothing unless polled"]
-pub struct ResponseFuture {
- inner: Pin<Box<dyn Future<Output = crate::Result<Response<Body>>> + Send>>,
-}
-
-// ===== impl Client =====
-
-#[cfg(feature = "tcp")]
-impl Client<HttpConnector, Body> {
- /// Create a new Client with the default [config](Builder).
- ///
- /// # Note
- ///
- /// The default connector does **not** handle TLS. Speaking to `https`
- /// destinations will require [configuring a connector that implements
- /// TLS](https://hyper.rs/guides/client/configuration).
- #[inline]
- pub fn new() -> Client<HttpConnector, Body> {
- Builder::default().build_http()
- }
-}
-
-#[cfg(feature = "tcp")]
-impl Default for Client<HttpConnector, Body> {
- fn default() -> Client<HttpConnector, Body> {
- Client::new()
- }
-}
-
-impl Client<(), Body> {
- /// Create a builder to configure a new `Client`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "runtime")]
- /// # fn run () {
- /// use std::time::Duration;
- /// use hyper::Client;
- ///
- /// let client = Client::builder()
- /// .pool_idle_timeout(Duration::from_secs(30))
- /// .http2_only(true)
- /// .build_http();
- /// # let infer: Client<_, hyper::Body> = client;
- /// # drop(infer);
- /// # }
- /// # fn main() {}
- /// ```
- #[inline]
- pub fn builder() -> Builder {
- Builder::default()
- }
-}
-
-impl<C, B> Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: HttpBody + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- /// Send a `GET` request to the supplied `Uri`.
- ///
- /// # Note
- ///
- /// This requires that the `HttpBody` type have a `Default` implementation.
- /// It *should* return an "empty" version of itself, such that
- /// `HttpBody::is_end_stream` is `true`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "runtime")]
- /// # fn run () {
- /// use hyper::{Client, Uri};
- ///
- /// let client = Client::new();
- ///
- /// let future = client.get(Uri::from_static("http://httpbin.org/ip"));
- /// # }
- /// # fn main() {}
- /// ```
- pub fn get(&self, uri: Uri) -> ResponseFuture
- where
- B: Default,
- {
- let body = B::default();
- if !body.is_end_stream() {
- warn!("default HttpBody used for get() does not return true for is_end_stream");
- }
-
- let mut req = Request::new(body);
- *req.uri_mut() = uri;
- self.request(req)
- }
-
- /// Send a constructed `Request` using this `Client`.
- ///
- /// # Example
- ///
- /// ```
- /// # #[cfg(feature = "runtime")]
- /// # fn run () {
- /// use hyper::{Body, Client, Request};
- ///
- /// let client = Client::new();
- ///
- /// let req = Request::builder()
- /// .method("POST")
- /// .uri("http://httpin.org/post")
- /// .body(Body::from("Hallo!"))
- /// .expect("request builder");
- ///
- /// let future = client.request(req);
- /// # }
- /// # fn main() {}
- /// ```
- pub fn request(&self, mut req: Request<B>) -> ResponseFuture {
- let is_http_connect = req.method() == Method::CONNECT;
- match req.version() {
- Version::HTTP_11 => (),
- Version::HTTP_10 => {
- if is_http_connect {
- warn!("CONNECT is not allowed for HTTP/1.0");
- return ResponseFuture::new(Box::new(future::err(
- crate::Error::new_user_unsupported_request_method(),
- )));
- }
- }
- other_h2 @ Version::HTTP_2 => {
- if self.config.ver != Ver::Http2 {
- return ResponseFuture::error_version(other_h2);
- }
- }
- // completely unsupported HTTP version (like HTTP/0.9)!
- other => return ResponseFuture::error_version(other),
- };
-
- let pool_key = match extract_domain(req.uri_mut(), is_http_connect) {
- Ok(s) => s,
- Err(err) => {
- return ResponseFuture::new(Box::new(future::err(err)));
- }
- };
-
- ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key)))
- }
-
- fn retryably_send_request(
- &self,
- req: Request<B>,
- pool_key: PoolKey,
- ) -> impl Future<Output = crate::Result<Response<Body>>> {
- let client = self.clone();
- let uri = req.uri().clone();
-
- let mut send_fut = client.send_request(req, pool_key.clone());
- future::poll_fn(move |cx| loop {
- match ready!(Pin::new(&mut send_fut).poll(cx)) {
- Ok(resp) => return Poll::Ready(Ok(resp)),
- Err(ClientError::Normal(err)) => return Poll::Ready(Err(err)),
- Err(ClientError::Canceled {
- connection_reused,
- mut req,
- reason,
- }) => {
- if !client.config.retry_canceled_requests || !connection_reused {
- // if client disabled, don't retry
- // a fresh connection means we definitely can't retry
- return Poll::Ready(Err(reason));
- }
-
- trace!(
- "unstarted request canceled, trying again (reason={:?})",
- reason
- );
- *req.uri_mut() = uri.clone();
- send_fut = client.send_request(req, pool_key.clone());
- }
- }
- })
- }
-
- fn send_request(
- &self,
- mut req: Request<B>,
- pool_key: PoolKey,
- ) -> impl Future<Output = Result<Response<Body>, ClientError<B>>> + Unpin {
- let conn = self.connection_for(pool_key);
-
- let set_host = self.config.set_host;
- let executor = self.conn_builder.exec.clone();
- conn.and_then(move |mut pooled| {
- if pooled.is_http1() {
- if set_host {
- let uri = req.uri().clone();
- req.headers_mut().entry(HOST).or_insert_with(|| {
- let hostname = uri.host().expect("authority implies host");
- if let Some(port) = uri.port() {
- let s = format!("{}:{}", hostname, port);
- HeaderValue::from_str(&s)
- } else {
- HeaderValue::from_str(hostname)
- }
- .expect("uri host is valid header value")
- });
- }
-
- // CONNECT always sends authority-form, so check it first...
- if req.method() == Method::CONNECT {
- authority_form(req.uri_mut());
- } else if pooled.conn_info.is_proxied {
- absolute_form(req.uri_mut());
- } else {
- origin_form(req.uri_mut());
- };
- } else if req.method() == Method::CONNECT {
- debug!("client does not support CONNECT requests over HTTP2");
- return Either::Left(future::err(ClientError::Normal(
- crate::Error::new_user_unsupported_request_method(),
- )));
- }
-
- let fut = pooled
- .send_request_retryable(req)
- .map_err(ClientError::map_with_reused(pooled.is_reused()));
-
- // If the Connector included 'extra' info, add to Response...
- let extra_info = pooled.conn_info.extra.clone();
- let fut = fut.map_ok(move |mut res| {
- if let Some(extra) = extra_info {
- extra.set(res.extensions_mut());
- }
- res
- });
-
- // As of futures@0.1.21, there is a race condition in the mpsc
- // channel, such that sending when the receiver is closing can
- // result in the message being stuck inside the queue. It won't
- // ever notify until the Sender side is dropped.
- //
- // To counteract this, we must check if our senders 'want' channel
- // has been closed after having tried to send. If so, error out...
- if pooled.is_closed() {
- return Either::Right(Either::Left(fut));
- }
-
- Either::Right(Either::Right(fut.map_ok(move |mut res| {
- // If pooled is HTTP/2, we can toss this reference immediately.
- //
- // when pooled is dropped, it will try to insert back into the
- // pool. To delay that, spawn a future that completes once the
- // sender is ready again.
- //
- // This *should* only be once the related `Connection` has polled
- // for a new request to start.
- //
- // It won't be ready if there is a body to stream.
- if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
- drop(pooled);
- } else if !res.body().is_end_stream() {
- let (delayed_tx, delayed_rx) = oneshot::channel();
- res.body_mut().delayed_eof(delayed_rx);
- let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(move |_| {
- // At this point, `pooled` is dropped, and had a chance
- // to insert into the pool (if conn was idle)
- drop(delayed_tx);
- });
-
- executor.execute(on_idle);
- } else {
- // There's no body to delay, but the connection isn't
- // ready yet. Only re-insert when it's ready
- let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ());
-
- executor.execute(on_idle);
- }
- res
- })))
- })
- }
-
- fn connection_for(
- &self,
- pool_key: PoolKey,
- ) -> impl Future<Output = Result<Pooled<PoolClient<B>>, ClientError<B>>> {
- // This actually races 2 different futures to try to get a ready
- // connection the fastest, and to reduce connection churn.
- //
- // - If the pool has an idle connection waiting, that's used
- // immediately.
- // - Otherwise, the Connector is asked to start connecting to
- // the destination Uri.
- // - Meanwhile, the pool Checkout is watching to see if any other
- // request finishes and tries to insert an idle connection.
- // - If a new connection is started, but the Checkout wins after
- // (an idle connection became available first), the started
- // connection future is spawned into the runtime to complete,
- // and then be inserted into the pool as an idle connection.
- let checkout = self.pool.checkout(pool_key.clone());
- let connect = self.connect_to(pool_key);
-
- let executor = self.conn_builder.exec.clone();
- // The order of the `select` is depended on below...
- future::select(checkout, connect).then(move |either| match either {
- // Checkout won, connect future may have been started or not.
- //
- // If it has, let it finish and insert back into the pool,
- // so as to not waste the socket...
- Either::Left((Ok(checked_out), connecting)) => {
- // This depends on the `select` above having the correct
- // order, such that if the checkout future were ready
- // immediately, the connect future will never have been
- // started.
- //
- // If it *wasn't* ready yet, then the connect future will
- // have been started...
- if connecting.started() {
- let bg = connecting
- .map_err(|err| {
- trace!("background connect error: {}", err);
- })
- .map(|_pooled| {
- // dropping here should just place it in
- // the Pool for us...
- });
- // An execute error here isn't important, we're just trying
- // to prevent a waste of a socket...
- executor.execute(bg);
- }
- Either::Left(future::ok(checked_out))
- }
- // Connect won, checkout can just be dropped.
- Either::Right((Ok(connected), _checkout)) => Either::Left(future::ok(connected)),
- // Either checkout or connect could get canceled:
- //
- // 1. Connect is canceled if this is HTTP/2 and there is
- // an outstanding HTTP/2 connecting task.
- // 2. Checkout is canceled if the pool cannot deliver an
- // idle connection reliably.
- //
- // In both cases, we should just wait for the other future.
- Either::Left((Err(err), connecting)) => Either::Right(Either::Left({
- if err.is_canceled() {
- Either::Left(connecting.map_err(ClientError::Normal))
- } else {
- Either::Right(future::err(ClientError::Normal(err)))
- }
- })),
- Either::Right((Err(err), checkout)) => Either::Right(Either::Right({
- if err.is_canceled() {
- Either::Left(checkout.map_err(ClientError::Normal))
- } else {
- Either::Right(future::err(ClientError::Normal(err)))
- }
- })),
- })
- }
-
- fn connect_to(
- &self,
- pool_key: PoolKey,
- ) -> impl Lazy<Output = crate::Result<Pooled<PoolClient<B>>>> + Unpin {
- let executor = self.conn_builder.exec.clone();
- let pool = self.pool.clone();
- #[cfg(not(feature = "http2"))]
- let conn_builder = self.conn_builder.clone();
- #[cfg(feature = "http2")]
- let mut conn_builder = self.conn_builder.clone();
- let ver = self.config.ver;
- let is_ver_h2 = ver == Ver::Http2;
- let connector = self.connector.clone();
- let dst = domain_as_uri(pool_key.clone());
- hyper_lazy(move || {
- // Try to take a "connecting lock".
- //
- // If the pool_key is for HTTP/2, and there is already a
- // connection being established, then this can't take a
- // second lock. The "connect_to" future is Canceled.
- let connecting = match pool.connecting(&pool_key, ver) {
- Some(lock) => lock,
- None => {
- let canceled =
- crate::Error::new_canceled().with("HTTP/2 connection in progress");
- return Either::Right(future::err(canceled));
- }
- };
- Either::Left(
- connector
- .connect(connect::sealed::Internal, dst)
- .map_err(crate::Error::new_connect)
- .and_then(move |io| {
- let connected = io.connected();
- // If ALPN is h2 and we aren't http2_only already,
- // then we need to convert our pool checkout into
- // a single HTTP2 one.
- let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 {
- match connecting.alpn_h2(&pool) {
- Some(lock) => {
- trace!("ALPN negotiated h2, updating pool");
- lock
- }
- None => {
- // Another connection has already upgraded,
- // the pool checkout should finish up for us.
- let canceled = crate::Error::new_canceled()
- .with("ALPN upgraded to HTTP/2");
- return Either::Right(future::err(canceled));
- }
- }
- } else {
- connecting
- };
-
- #[cfg_attr(not(feature = "http2"), allow(unused))]
- let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
- #[cfg(feature = "http2")]
- {
- conn_builder.http2_only(is_h2);
- }
-
- Either::Left(Box::pin(
- conn_builder
- .handshake(io)
- .and_then(move |(tx, conn)| {
- trace!(
- "handshake complete, spawning background dispatcher task"
- );
- executor.execute(
- conn.map_err(|e| debug!("client connection error: {}", e))
- .map(|_| ()),
- );
-
- // Wait for 'conn' to ready up before we
- // declare this tx as usable
- tx.when_ready()
- })
- .map_ok(move |tx| {
- let tx = {
- #[cfg(feature = "http2")]
- {
- if is_h2 {
- PoolTx::Http2(tx.into_http2())
- } else {
- PoolTx::Http1(tx)
- }
- }
- #[cfg(not(feature = "http2"))]
- PoolTx::Http1(tx)
- };
- pool.pooled(
- connecting,
- PoolClient {
- conn_info: connected,
- tx,
- },
- )
- }),
- ))
- }),
- )
- })
- }
-}
-
-impl<C, B> tower_service::Service<Request<B>> for Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: HttpBody + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Response = Response<Body>;
- type Error = crate::Error;
- type Future = ResponseFuture;
-
- fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: Request<B>) -> Self::Future {
- self.request(req)
- }
-}
-
-impl<C, B> tower_service::Service<Request<B>> for &'_ Client<C, B>
-where
- C: Connect + Clone + Send + Sync + 'static,
- B: HttpBody + Send + 'static,
- B::Data: Send,
- B::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Response = Response<Body>;
- type Error = crate::Error;
- type Future = ResponseFuture;
-
- fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- Poll::Ready(Ok(()))
- }
-
- fn call(&mut self, req: Request<B>) -> Self::Future {
- self.request(req)
- }
-}
-
-impl<C: Clone, B> Clone for Client<C, B> {
- fn clone(&self) -> Client<C, B> {
- Client {
- config: self.config.clone(),
- conn_builder: self.conn_builder.clone(),
- connector: self.connector.clone(),
- pool: self.pool.clone(),
- }
- }
-}
-
-impl<C, B> fmt::Debug for Client<C, B> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Client").finish()
- }
-}
-
-// ===== impl ResponseFuture =====
-
-impl ResponseFuture {
- fn new(fut: Box<dyn Future<Output = crate::Result<Response<Body>>> + Send>) -> Self {
- Self { inner: fut.into() }
- }
-
- fn error_version(ver: Version) -> Self {
- warn!("Request has unsupported version \"{:?}\"", ver);
- ResponseFuture::new(Box::new(future::err(
- crate::Error::new_user_unsupported_version(),
- )))
- }
-}
-
-impl fmt::Debug for ResponseFuture {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("Future<Response>")
- }
-}
-
-impl Future for ResponseFuture {
- type Output = crate::Result<Response<Body>>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- Pin::new(&mut self.inner).poll(cx)
- }
-}
-
-// ===== impl PoolClient =====
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-struct PoolClient<B> {
- conn_info: Connected,
- tx: PoolTx<B>,
-}
-
-enum PoolTx<B> {
- Http1(conn::SendRequest<B>),
- #[cfg(feature = "http2")]
- Http2(conn::Http2SendRequest<B>),
-}
-
-impl<B> PoolClient<B> {
- fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
- match self.tx {
- PoolTx::Http1(ref mut tx) => tx.poll_ready(cx),
- #[cfg(feature = "http2")]
- PoolTx::Http2(_) => Poll::Ready(Ok(())),
- }
- }
-
- fn is_http1(&self) -> bool {
- !self.is_http2()
- }
-
- fn is_http2(&self) -> bool {
- match self.tx {
- PoolTx::Http1(_) => false,
- #[cfg(feature = "http2")]
- PoolTx::Http2(_) => true,
- }
- }
-
- fn is_ready(&self) -> bool {
- match self.tx {
- PoolTx::Http1(ref tx) => tx.is_ready(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_ready(),
- }
- }
-
- fn is_closed(&self) -> bool {
- match self.tx {
- PoolTx::Http1(ref tx) => tx.is_closed(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_closed(),
- }
- }
-}
-
-impl<B: HttpBody + 'static> PoolClient<B> {
- fn send_request_retryable(
- &mut self,
- req: Request<B>,
- ) -> impl Future<Output = Result<Response<Body>, (crate::Error, Option<Request<B>>)>>
- where
- B: Send,
- {
- match self.tx {
- #[cfg(not(feature = "http2"))]
- PoolTx::Http1(ref mut tx) => tx.send_request_retryable(req),
- #[cfg(feature = "http2")]
- PoolTx::Http1(ref mut tx) => Either::Left(tx.send_request_retryable(req)),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref mut tx) => Either::Right(tx.send_request_retryable(req)),
- }
- }
-}
-
-impl<B> Poolable for PoolClient<B>
-where
- B: Send + 'static,
-{
- fn is_open(&self) -> bool {
- match self.tx {
- PoolTx::Http1(ref tx) => tx.is_ready(),
- #[cfg(feature = "http2")]
- PoolTx::Http2(ref tx) => tx.is_ready(),
- }
- }
-
- fn reserve(self) -> Reservation<Self> {
- match self.tx {
- PoolTx::Http1(tx) => Reservation::Unique(PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http1(tx),
- }),
- #[cfg(feature = "http2")]
- PoolTx::Http2(tx) => {
- let b = PoolClient {
- conn_info: self.conn_info.clone(),
- tx: PoolTx::Http2(tx.clone()),
- };
- let a = PoolClient {
- conn_info: self.conn_info,
- tx: PoolTx::Http2(tx),
- };
- Reservation::Shared(a, b)
- }
- }
- }
-
- fn can_share(&self) -> bool {
- self.is_http2()
- }
-}
-
-// ===== impl ClientError =====
-
-// FIXME: allow() required due to `impl Trait` leaking types to this lint
-#[allow(missing_debug_implementations)]
-enum ClientError<B> {
- Normal(crate::Error),
- Canceled {
- connection_reused: bool,
- req: Request<B>,
- reason: crate::Error,
- },
-}
-
-impl<B> ClientError<B> {
- fn map_with_reused(conn_reused: bool) -> impl Fn((crate::Error, Option<Request<B>>)) -> Self {
- move |(err, orig_req)| {
- if let Some(req) = orig_req {
- ClientError::Canceled {
- connection_reused: conn_reused,
- reason: err,
- req,
- }
- } else {
- ClientError::Normal(err)
- }
- }
- }
-}
-
-/// A marker to identify what version a pooled connection is.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-enum Ver {
- Auto,
- Http2,
-}
-
-fn origin_form(uri: &mut Uri) {
- let path = match uri.path_and_query() {
- Some(path) if path.as_str() != "/" => {
- let mut parts = ::http::uri::Parts::default();
- parts.path_and_query = Some(path.clone());
- Uri::from_parts(parts).expect("path is valid uri")
- }
- _none_or_just_slash => {
- debug_assert!(Uri::default() == "/");
- Uri::default()
- }
- };
- *uri = path
-}
-
-fn absolute_form(uri: &mut Uri) {
- debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme");
- debug_assert!(
- uri.authority().is_some(),
- "absolute_form needs an authority"
- );
- // If the URI is to HTTPS, and the connector claimed to be a proxy,
- // then it *should* have tunneled, and so we don't want to send
- // absolute-form in that case.
- if uri.scheme() == Some(&Scheme::HTTPS) {
- origin_form(uri);
- }
-}
-
-fn authority_form(uri: &mut Uri) {
- if let Some(path) = uri.path_and_query() {
- // `https://hyper.rs` would parse with `/` path, don't
- // annoy people about that...
- if path != "/" {
- warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path);
- }
- }
- *uri = match uri.authority() {
- Some(auth) => {
- let mut parts = ::http::uri::Parts::default();
- parts.authority = Some(auth.clone());
- Uri::from_parts(parts).expect("authority is valid")
- }
- None => {
- unreachable!("authority_form with relative uri");
- }
- };
-}
-
-fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result<PoolKey> {
- let uri_clone = uri.clone();
- match (uri_clone.scheme(), uri_clone.authority()) {
- (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())),
- (None, Some(auth)) if is_http_connect => {
- let scheme = match auth.port_u16() {
- Some(443) => {
- set_scheme(uri, Scheme::HTTPS);
- Scheme::HTTPS
- }
- _ => {
- set_scheme(uri, Scheme::HTTP);
- Scheme::HTTP
- }
- };
- Ok((scheme, auth.clone()))
- }
- _ => {
- debug!("Client requires absolute-form URIs, received: {:?}", uri);
- Err(crate::Error::new_user_absolute_uri_required())
- }
- }
-}
-
-fn domain_as_uri((scheme, auth): PoolKey) -> Uri {
- http::uri::Builder::new()
- .scheme(scheme)
- .authority(auth)
- .path_and_query("/")
- .build()
- .expect("domain is valid Uri")
-}
-
-fn set_scheme(uri: &mut Uri, scheme: Scheme) {
- debug_assert!(
- uri.scheme().is_none(),
- "set_scheme expects no existing scheme"
- );
- let old = mem::replace(uri, Uri::default());
- let mut parts: ::http::uri::Parts = old.into();
- parts.scheme = Some(scheme);
- parts.path_and_query = Some("/".parse().expect("slash is a valid path"));
- *uri = Uri::from_parts(parts).expect("scheme is valid");
-}
-
-/// A builder to configure a new [`Client`](Client).
-///
-/// # Example
-///
-/// ```
-/// # #[cfg(feature = "runtime")]
-/// # fn run () {
-/// use std::time::Duration;
-/// use hyper::Client;
-///
-/// let client = Client::builder()
-/// .pool_idle_timeout(Duration::from_secs(30))
-/// .http2_only(true)
-/// .build_http();
-/// # let infer: Client<_, hyper::Body> = client;
-/// # drop(infer);
-/// # }
-/// # fn main() {}
-/// ```
-#[derive(Clone)]
-pub struct Builder {
- client_config: Config,
- conn_builder: conn::Builder,
- pool_config: pool::Config,
-}
-
-impl Default for Builder {
- fn default() -> Self {
- Self {
- client_config: Config {
- retry_canceled_requests: true,
- set_host: true,
- ver: Ver::Auto,
- },
- conn_builder: conn::Builder::new(),
- pool_config: pool::Config {
- idle_timeout: Some(Duration::from_secs(90)),
- max_idle_per_host: std::usize::MAX,
- },
- }
- }
-}
-
-impl Builder {
- #[doc(hidden)]
- #[deprecated(
- note = "name is confusing, to disable the connection pool, call pool_max_idle_per_host(0)"
- )]
- pub fn keep_alive(&mut self, val: bool) -> &mut Self {
- if !val {
- // disable
- self.pool_max_idle_per_host(0)
- } else if self.pool_config.max_idle_per_host == 0 {
- // enable
- self.pool_max_idle_per_host(std::usize::MAX)
- } else {
- // already enabled
- self
- }
- }
-
- #[doc(hidden)]
- #[deprecated(note = "renamed to `pool_idle_timeout`")]
- pub fn keep_alive_timeout<D>(&mut self, val: D) -> &mut Self
- where
- D: Into<Option<Duration>>,
- {
- self.pool_idle_timeout(val)
- }
-
- /// Set an optional timeout for idle sockets being kept-alive.
- ///
- /// Pass `None` to disable timeout.
- ///
- /// Default is 90 seconds.
- pub fn pool_idle_timeout<D>(&mut self, val: D) -> &mut Self
- where
- D: Into<Option<Duration>>,
- {
- self.pool_config.idle_timeout = val.into();
- self
- }
-
- #[doc(hidden)]
- #[deprecated(note = "renamed to `pool_max_idle_per_host`")]
- pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
- self.pool_config.max_idle_per_host = max_idle;
- self
- }
-
- /// Sets the maximum idle connection per host allowed in the pool.
- ///
- /// Default is `usize::MAX` (no limit).
- pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self {
- self.pool_config.max_idle_per_host = max_idle;
- self
- }
-
- // HTTP/1 options
-
- /// Sets the exact size of the read buffer to *always* use.
- ///
- /// Note that setting this option unsets the `http1_max_buf_size` option.
- ///
- /// Default is an adaptive read buffer.
- pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self {
- self.conn_builder.h1_read_buf_exact_size(Some(sz));
- self
- }
-
- /// Set the maximum buffer size for the connection.
- ///
- /// Default is ~400kb.
- ///
- /// Note that setting this option unsets the `http1_read_exact_buf_size` option.
- ///
- /// # Panics
- ///
- /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
- #[cfg(feature = "http1")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http1")))]
- pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self {
- self.conn_builder.h1_max_buf_size(max);
- self
- }
-
- /// Set whether HTTP/1 connections will write header names as title case at
- /// the socket level.
- ///
- /// Note that this setting does not affect HTTP/2.
- ///
- /// Default is false.
- pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self {
- self.conn_builder.h1_title_case_headers(val);
- self
- }
-
- /// Set whether the connection **must** use HTTP/2.
- ///
- /// The destination must either allow HTTP2 Prior Knowledge, or the
- /// `Connect` should be configured to do use ALPN to upgrade to `h2`
- /// as part of the connection process. This will not make the `Client`
- /// utilize ALPN by itself.
- ///
- /// Note that setting this to true prevents HTTP/1 from being allowed.
- ///
- /// Default is false.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_only(&mut self, val: bool) -> &mut Self {
- self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto };
- self
- }
-
- /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2
- /// stream-level flow control.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- ///
- /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_stream_window_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.conn_builder
- .http2_initial_stream_window_size(sz.into());
- self
- }
-
- /// Sets the max connection-level flow control for HTTP2
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_initial_connection_window_size(
- &mut self,
- sz: impl Into<Option<u32>>,
- ) -> &mut Self {
- self.conn_builder
- .http2_initial_connection_window_size(sz.into());
- self
- }
-
- /// Sets whether to use an adaptive flow control.
- ///
- /// Enabling this will override the limits set in
- /// `http2_initial_stream_window_size` and
- /// `http2_initial_connection_window_size`.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self {
- self.conn_builder.http2_adaptive_window(enabled);
- self
- }
-
- /// Sets the maximum frame size to use for HTTP2.
- ///
- /// Passing `None` will do nothing.
- ///
- /// If not set, hyper will use a default.
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_max_frame_size(&mut self, sz: impl Into<Option<u32>>) -> &mut Self {
- self.conn_builder.http2_max_frame_size(sz);
- self
- }
-
- /// Sets an interval for HTTP2 Ping frames should be sent to keep a
- /// connection alive.
- ///
- /// Pass `None` to disable HTTP2 keep-alive.
- ///
- /// Default is currently disabled.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_interval(
- &mut self,
- interval: impl Into<Option<Duration>>,
- ) -> &mut Self {
- self.conn_builder.http2_keep_alive_interval(interval);
- self
- }
-
- /// Sets a timeout for receiving an acknowledgement of the keep-alive ping.
- ///
- /// If the ping is not acknowledged within the timeout, the connection will
- /// be closed. Does nothing if `http2_keep_alive_interval` is disabled.
- ///
- /// Default is 20 seconds.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self {
- self.conn_builder.http2_keep_alive_timeout(timeout);
- self
- }
-
- /// Sets whether HTTP2 keep-alive should apply while the connection is idle.
- ///
- /// If disabled, keep-alive pings are only sent while there are open
- /// request/responses streams. If enabled, pings are also sent when no
- /// streams are active. Does nothing if `http2_keep_alive_interval` is
- /// disabled.
- ///
- /// Default is `false`.
- ///
- /// # Cargo Feature
- ///
- /// Requires the `runtime` cargo feature to be enabled.
- #[cfg(feature = "runtime")]
- #[cfg(feature = "http2")]
- #[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
- pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self {
- self.conn_builder.http2_keep_alive_while_idle(enabled);
- self
- }
-
- /// Set whether to retry requests that get disrupted before ever starting
- /// to write.
- ///
- /// This means a request that is queued, and gets given an idle, reused
- /// connection, and then encounters an error immediately as the idle
- /// connection was found to be unusable.
- ///
- /// When this is set to `false`, the related `ResponseFuture` would instead
- /// resolve to an `Error::Cancel`.
- ///
- /// Default is `true`.
- #[inline]
- pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self {
- self.client_config.retry_canceled_requests = val;
- self
- }
-
- /// Set whether to automatically add the `Host` header to requests.
- ///
- /// If true, and a request does not include a `Host` header, one will be
- /// added automatically, derived from the authority of the `Uri`.
- ///
- /// Default is `true`.
- #[inline]
- pub fn set_host(&mut self, val: bool) -> &mut Self {
- self.client_config.set_host = val;
- self
- }
-
- /// Provide an executor to execute background `Connection` tasks.
- pub fn executor<E>(&mut self, exec: E) -> &mut Self
- where
- E: Executor<BoxSendFuture> + Send + Sync + 'static,
- {
- self.conn_builder.executor(exec);
- self
- }
-
- /// Builder a client with this configuration and the default `HttpConnector`.
- #[cfg(feature = "tcp")]
- pub fn build_http<B>(&self) -> Client<HttpConnector, B>
- where
- B: HttpBody + Send,
- B::Data: Send,
- {
- let mut connector = HttpConnector::new();
- if self.pool_config.is_enabled() {
- connector.set_keepalive(self.pool_config.idle_timeout);
- }
- self.build(connector)
- }
-
- /// Combine the configuration of this builder with a connector to create a `Client`.
- pub fn build<C, B>(&self, connector: C) -> Client<C, B>
- where
- C: Connect + Clone,
- B: HttpBody + Send,
- B::Data: Send,
- {
- Client {
- config: self.client_config,
- conn_builder: self.conn_builder.clone(),
- connector,
- pool: Pool::new(self.pool_config, &self.conn_builder.exec),
- }
- }
-}
-
-impl fmt::Debug for Builder {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("Builder")
- .field("client_config", &self.client_config)
- .field("conn_builder", &self.conn_builder)
- .field("pool_config", &self.pool_config)
- .finish()
- }
-}
-
-#[cfg(test)]
-mod unit_tests {
- use super::*;
-
- #[test]
- fn set_relative_uri_with_implicit_path() {
- let mut uri = "http://hyper.rs".parse().unwrap();
- origin_form(&mut uri);
- assert_eq!(uri.to_string(), "/");
- }
-
- #[test]
- fn test_origin_form() {
- let mut uri = "http://hyper.rs/guides".parse().unwrap();
- origin_form(&mut uri);
- assert_eq!(uri.to_string(), "/guides");
-
- let mut uri = "http://hyper.rs/guides?foo=bar".parse().unwrap();
- origin_form(&mut uri);
- assert_eq!(uri.to_string(), "/guides?foo=bar");
- }
-
- #[test]
- fn test_absolute_form() {
- let mut uri = "http://hyper.rs/guides".parse().unwrap();
- absolute_form(&mut uri);
- assert_eq!(uri.to_string(), "http://hyper.rs/guides");
-
- let mut uri = "https://hyper.rs/guides".parse().unwrap();
- absolute_form(&mut uri);
- assert_eq!(uri.to_string(), "/guides");
- }
-
- #[test]
- fn test_authority_form() {
- let _ = pretty_env_logger::try_init();
-
- let mut uri = "http://hyper.rs".parse().unwrap();
- authority_form(&mut uri);
- assert_eq!(uri.to_string(), "hyper.rs");
+cfg_feature! {
+ #![any(feature = "http1", feature = "http2")]
- let mut uri = "hyper.rs".parse().unwrap();
- authority_form(&mut uri);
- assert_eq!(uri.to_string(), "hyper.rs");
- }
+ pub use self::client::{Builder, Client, ResponseFuture};
- #[test]
- fn test_extract_domain_connect_no_port() {
- let mut uri = "hyper.rs".parse().unwrap();
- let (scheme, host) = extract_domain(&mut uri, true).expect("extract domain");
- assert_eq!(scheme, *"http");
- assert_eq!(host, "hyper.rs");
- }
+ mod client;
+ pub mod conn;
+ pub(crate) mod dispatch;
+ mod pool;
+ pub mod service;
}
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2373"
] |
0.14
|
c784a10174deb461b2eaa59ed9fabae5c315eb68
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -132,4 +132,4 @@ jobs:
uses: actions-rs/cargo@v1
with:
command: rustdoc
- args: --features full -- -D broken-intra-doc-links
+ args: --features full -- --cfg docsrs -D broken-intra-doc-links
|
2020-12-23T19:35:19Z
| 2,372
|
0.14 docs failed to build
https://docs.rs/crate/hyper/0.14.0
|
hyperium__hyper-2372
|
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -4,6 +4,7 @@
#![cfg_attr(test, deny(rust_2018_idioms))]
#![cfg_attr(test, deny(warnings))]
#![cfg_attr(all(test, feature = "nightly"), feature(test))]
+#![cfg_attr(docsrs, feature(doc_cfg))]
//! # hyper
//!
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2352"
] |
0.3
|
fad42acc79b54ce38adf99c58c894f29fa2665ad
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,75 @@
+## v0.14.0 (2020-12-23)
+
+
+#### Bug Fixes
+
+* **client:** log socket option errors instead of returning error (#2361) ([dad5c879](https://github.com/hyperium/hyper/commit/dad5c8792fec7b586b41b5237bc161d8f0c09f72), closes [#2359](https://github.com/hyperium/hyper/issues/2359))
+* **http1:**
+ * ignore chunked trailers (#2357) ([1dd761c8](https://github.com/hyperium/hyper/commit/1dd761c87de226261599ff2518fe9d231ba1c82d), closes [#2171](https://github.com/hyperium/hyper/issues/2171))
+ * ending close-delimited body should close (#2322) ([71f34024](https://github.com/hyperium/hyper/commit/71f340242120f1ea52c7446b4bae37b894b83912))
+
+
+#### Features
+
+* **client:**
+ * change DNS Resolver to resolve to SocketAddrs (#2346) ([b4e24332](https://github.com/hyperium/hyper/commit/b4e24332a0cd44068a806081d51686f50c086056), closes [#1937](https://github.com/hyperium/hyper/issues/1937))
+ * Make `client` an optional feature ([4e55583d](https://github.com/hyperium/hyper/commit/4e55583d30a597884883f1a51b678f5c57c76765))
+* **http1:** Make HTTP/1 support an optional feature ([2a19ab74](https://github.com/hyperium/hyper/commit/2a19ab74ed69bc776da25544e98979c9fb6e1834))
+* **http2:** Make HTTP/2 support an optional feature ([b819b428](https://github.com/hyperium/hyper/commit/b819b428d314f2203642a015545967601b8e518a))
+* **lib:**
+ * Upgrade to Tokio 1.0, Bytes 1.0, http-body 0.4 (#2369) ([fad42acc](https://github.com/hyperium/hyper/commit/fad42acc79b54ce38adf99c58c894f29fa2665ad), closes [#2370](https://github.com/hyperium/hyper/issues/2370))
+ * remove dependency on `tracing`'s `log` feature (#2342) ([db32e105](https://github.com/hyperium/hyper/commit/db32e1050cf1eae63af0365c97e920f1295b6bea), closes [#2326](https://github.com/hyperium/hyper/issues/2326))
+ * disable all optional features by default (#2336) ([ed2b22a7](https://github.com/hyperium/hyper/commit/ed2b22a7f66899d338691552fbcb6c0f2f4e06b9))
+* **server:** Make the `server` code an optional feature (#2334) ([bdb5e5d6](https://github.com/hyperium/hyper/commit/bdb5e5d6946f4e3f8115a6b1683aff6a04df73de))
+* **upgrade:** Moved HTTP upgrades off `Body` to a new API (#2337) ([121c3313](https://github.com/hyperium/hyper/commit/121c33132c0950aaa422848cdc43f6691ddf5785), closes [#2086](https://github.com/hyperium/hyper/issues/2086))
+
+
+#### Breaking Changes
+
+* hyper depends on `tokio` v1 and `bytes` v1.
+* Custom resolvers used with `HttpConnector` must change
+ to resolving to an iterator of `SocketAddr`s instead of `IpAddr`s.
+ ([b4e24332](https://github.com/hyperium/hyper/commit/b4e24332a0cd44068a806081d51686f50c086056))
+* hyper no longer emits `log` records automatically.
+ If you need hyper to integrate with a `log` logger (as opposed to `tracing`),
+ you can add `tracing = { version = "0.1", features = ["log"] }` to activate them.
+ ([db32e105](https://github.com/hyperium/hyper/commit/db32e1050cf1eae63af0365c97e920f1295b6bea))
+* Removed `http1_writev` methods from `client::Builder`,
+ `client::conn::Builder`, `server::Builder`, and `server::conn::Builder`.
+
+ Vectored writes are now enabled based on whether the `AsyncWrite`
+ implementation in use supports them, rather than though adaptive
+ detection. To explicitly disable vectored writes, users may wrap the IO
+ in a newtype that implements `AsyncRead` and `AsyncWrite` and returns
+ `false` from its `AsyncWrite::is_write_vectored` method.
+ ([d6aadb83](https://github.com/hyperium/hyper/commit/d6aadb830072959497f414c01bcdba4c8e681088))
+* The method `Body::on_upgrade()` is gone. It is
+ essentially replaced with `hyper::upgrade::on(msg)`.
+ ([121c3313](https://github.com/hyperium/hyper/commit/121c33132c0950aaa422848cdc43f6691ddf5785))
+* All optional features have been disabled by default.
+ ([ed2b22a7](https://github.com/hyperium/hyper/commit/ed2b22a7f66899d338691552fbcb6c0f2f4e06b9))
+* The HTTP server code is now an optional feature. To
+ enable the server, add `features = ["server"]` to the dependency in
+ your `Cargo.toml`.
+ ([bdb5e5d6](https://github.com/hyperium/hyper/commit/bdb5e5d6946f4e3f8115a6b1683aff6a04df73de))
+* The HTTP client of hyper is now an optional feature. To
+ enable the client, add `features = ["client"]` to the dependency in
+ your `Cargo.toml`.
+ ([4e55583d](https://github.com/hyperium/hyper/commit/4e55583d30a597884883f1a51b678f5c57c76765))
+* This puts all HTTP/1 methods and support behind an
+ `http1` cargo feature, which will not be enabled by default. To use
+ HTTP/1, add `features = ["http1"]` to the hyper dependency in your
+ `Cargo.toml`.
+
+ ([2a19ab74](https://github.com/hyperium/hyper/commit/2a19ab74ed69bc776da25544e98979c9fb6e1834))
+* This puts all HTTP/2 methods and support behind an
+ `http2` cargo feature, which will not be enabled by default. To use
+ HTTP/2, add `features = ["http2"]` to the hyper dependency in your
+ `Cargo.toml`.
+
+ ([b819b428](https://github.com/hyperium/hyper/commit/b819b428d314f2203642a015545967601b8e518a))
+
+
### v0.13.9 (2020-11-02)
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "hyper"
-version = "0.14.0-dev" # don't forget to update html_root_url
+version = "0.14.0" # don't forget to update html_root_url
description = "A fast and correct HTTP library."
readme = "README.md"
homepage = "https://hyper.rs"
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,8 +12,6 @@ keywords = ["http", "hyper", "hyperium"]
categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"]
edition = "2018"
-publish = false
-
include = [
"Cargo.toml",
"LICENSE",
|
Here's an update on the exact release situation. This is based on the expectation that Tokio 1.0 will be released by EOY 2020. Considering that plan, I see 3 options:
1. Wait the 2 weeks for Tokio 1.0, and release hyper 0.14 at the same time (with Tokio 1.0 support).
2. Release hyper v0.14 right now, and then release hyper v0.15 in a couple weeks with Tokio 1.0.
3. Release an alpha version of hyper v0.14.0-alpha.1 right now, and then update it to work with Tokio 1.0 and release hyper v0.14.
I think both 2 and 3 have downsides of additional churn, requiring preparing 2 sets of releases for hyper in a very short period of time (which is a load on the maintainers), and forces 2 "breaking changes" releases on hyper users in as many weeks.
Seeing as Tokio 0.3 was largely meant as a "preview" of Tokio 1.0, the main point of it being to check that Tokio itself didn't need any other major API changes, I think the value gained by a hyper v0.14 release at this point is very low compared to the cost. For those that wish to *try out* what hyper looks like with Tokio 0.3, the git master branch supports it.
For those reasons, I so far feel that option 1 is best, which realistically means to most people: **hyper v0.14 release will coincide with Tokio 1.0 in a couple weeks.**
@seanmonstar
I think your reasoning makes sense and I have no problem waiting for an extra two weeks.
I fear however that, as with every software project ever, the Tokio 1.0 release deadline will slip.
Would you consider making a release if Tokio 1.0 were *not* released by the end of 2020?
Tokio 1.0 is shipping by EOY. We are pretty much code complete at this point. Only a couple small tweaks left.
I'm sure you didn't intend it that way, but FWIW David, your comment
came across as pretty snarky and passive aggressive.
On Sat, Dec 19, 2020 at 5:02 PM David Craven <notifications@github.com> wrote:
>
> Glad you're finally catching up to async-std
>
> —
> You are receiving this because you are subscribed to this thread.
> Reply to this email directly, view it on GitHub, or unsubscribe.
--
All that is necessary for evil to succeed is for good people to do nothing.
Well I doubt that there will ever be a solution in std because everyone wants to have their own secret sauce, so maybe. Also I doubt there is that much technical difference these days that would warrant that.
@seanmonstar contrary to earlier objections, I think your plan makes a lot of sense.
To put in my 2c: I would much prefer to have another release of h2 / hyper / reqwest / ... now.
I am personally not upgrading anything that depends on both tokio and one of the hyperium crates to tokio 0.3 because I don't see the point of the additional work of adding the compat helper methods where necessary, and have also not seen many others using the compat helpers.
Switching to an alpha release of hyper and/or some other crate(s) seems like it would be much less work for lots of people and thus encourage more testing of tokio 0.3 (which really is the whole point of that release, right?).
|
2020-12-23T18:45:34Z
| 2,371
|
v0.14 Release Checklist
The [v0.14 milestone](https://github.com/hyperium/hyper/milestone/6) is complete, meaning all major features are merged. This is a checklist of some administrata to have a smooth release!
- [x] Release Tokio 1.0, and then upgrade hyper (https://github.com/hyperium/hyper/issues/2370)
- [x] Release `h2` v0.3
- [x] Release `http-body` v0.4
- [ ] Release `http` with updated `bytes` (can be a minor version)
- [x] Blog post
- [x] Blast off 🚀
|
hyperium__hyper-2371
|
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,4 @@
-#![doc(html_root_url = "https://docs.rs/hyper/0.13.9")]
+#![doc(html_root_url = "https://docs.rs/hyper/0.14.0")]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![cfg_attr(test, deny(rust_2018_idioms))]
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2370"
] |
0.3
|
dad5c8792fec7b586b41b5237bc161d8f0c09f72
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -22,20 +22,20 @@ include = [
]
[dependencies]
-bytes = "0.6"
+bytes = "1"
futures-core = { version = "0.3", default-features = false }
futures-channel = "0.3"
futures-util = { version = "0.3", default-features = false }
http = "0.2"
-http-body = { git = "https://github.com/hyperium/http-body" }
+http-body = "0.4"
httpdate = "0.3"
httparse = "1.0"
-h2 = { git = "https://github.com/hyperium/h2", optional = true }
+h2 = { version = "0.3", optional = true }
itoa = "0.4.1"
tracing = { version = "0.1", default-features = false, features = ["std"] }
pin-project = "1.0"
tower-service = "0.3"
-tokio = { version = "0.3.4", features = ["sync", "stream"] }
+tokio = { version = "1", features = ["sync"] }
want = "0.3"
# Optional
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -51,7 +51,7 @@ spmc = "0.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
-tokio = { version = "0.3", features = [
+tokio = { version = "1", features = [
"fs",
"macros",
"io-std",
diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs
--- a/src/body/to_bytes.rs
+++ b/src/body/to_bytes.rs
@@ -23,7 +23,7 @@ where
let second = if let Some(buf) = body.data().await {
buf?
} else {
- return Ok(first.copy_to_bytes(first.bytes().len()));
+ return Ok(first.copy_to_bytes(first.remaining()));
};
// With more than 1 buf, we gotta flatten into a Vec first.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -667,8 +667,11 @@ impl ConnectingTcp<'_> {
let fallback_fut = fallback.remote.connect(self.config);
futures_util::pin_mut!(fallback_fut);
+ let fallback_delay = fallback.delay;
+ futures_util::pin_mut!(fallback_delay);
+
let (result, future) =
- match futures_util::future::select(preferred_fut, fallback.delay).await {
+ match futures_util::future::select(preferred_fut, fallback_delay).await {
Either::Left((result, _fallback_delay)) => {
(result, Either::Right(fallback_fut))
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -1,7 +1,7 @@
#[cfg(feature = "http2")]
use std::future::Future;
-use tokio::stream::Stream;
+use futures_util::FutureExt;
use tokio::sync::{mpsc, oneshot};
use crate::common::{task, Pin, Poll};
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -150,8 +150,8 @@ impl<T, U> Receiver<T, U> {
self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<(T, Callback<T, U>)>> {
- let this = self.project();
- match this.inner.poll_next(cx) {
+ let mut this = self.project();
+ match this.inner.poll_recv(cx) {
Poll::Ready(item) => {
Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped")))
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -170,9 +170,9 @@ impl<T, U> Receiver<T, U> {
#[cfg(feature = "http1")]
pub(crate) fn try_recv(&mut self) -> Option<(T, Callback<T, U>)> {
- match self.inner.try_recv() {
- Ok(mut env) => env.0.take(),
- Err(_) => None,
+ match self.inner.recv().now_or_never() {
+ Some(Some(mut env)) => env.0.take(),
+ _ => None,
}
}
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -731,7 +731,6 @@ impl<T: Poolable + 'static> Future for IdleTask<T> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- use tokio::stream::Stream;
let mut this = self.project();
loop {
match this.pool_drop_notifier.as_mut().poll(cx) {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -743,7 +742,7 @@ impl<T: Poolable + 'static> Future for IdleTask<T> {
}
}
- ready!(this.interval.as_mut().poll_next(cx));
+ ready!(this.interval.as_mut().poll_tick(cx));
if let Some(inner) = this.pool.upgrade() {
if let Ok(mut inner) = inner.lock() {
diff --git a/src/common/buf.rs b/src/common/buf.rs
--- a/src/common/buf.rs
+++ b/src/common/buf.rs
@@ -34,8 +34,8 @@ impl<T: Buf> Buf for BufList<T> {
}
#[inline]
- fn bytes(&self) -> &[u8] {
- self.bufs.front().map(Buf::bytes).unwrap_or_default()
+ fn chunk(&self) -> &[u8] {
+ self.bufs.front().map(Buf::chunk).unwrap_or_default()
}
#[inline]
diff --git a/src/common/buf.rs b/src/common/buf.rs
--- a/src/common/buf.rs
+++ b/src/common/buf.rs
@@ -57,13 +57,13 @@ impl<T: Buf> Buf for BufList<T> {
}
#[inline]
- fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
+ fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
if dst.is_empty() {
return 0;
}
let mut vecs = 0;
for buf in &self.bufs {
- vecs += buf.bytes_vectored(&mut dst[vecs..]);
+ vecs += buf.chunks_vectored(&mut dst[vecs..]);
if vecs == dst.len() {
break;
}
diff --git a/src/common/io/mod.rs b/src/common/io/mod.rs
--- a/src/common/io/mod.rs
+++ b/src/common/io/mod.rs
@@ -1,4 +1,3 @@
mod rewind;
pub(crate) use self::rewind::Rewind;
-pub(crate) const MAX_WRITEV_BUFS: usize = 64;
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -229,12 +229,12 @@ where
}
#[inline]
- fn bytes(&self) -> &[u8] {
+ fn chunk(&self) -> &[u8] {
match self.kind {
- BufKind::Exact(ref b) => b.bytes(),
- BufKind::Limited(ref b) => b.bytes(),
- BufKind::Chunked(ref b) => b.bytes(),
- BufKind::ChunkedEnd(ref b) => b.bytes(),
+ BufKind::Exact(ref b) => b.chunk(),
+ BufKind::Limited(ref b) => b.chunk(),
+ BufKind::Chunked(ref b) => b.chunk(),
+ BufKind::ChunkedEnd(ref b) => b.chunk(),
}
}
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -249,12 +249,12 @@ where
}
#[inline]
- fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
+ fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
match self.kind {
- BufKind::Exact(ref b) => b.bytes_vectored(dst),
- BufKind::Limited(ref b) => b.bytes_vectored(dst),
- BufKind::Chunked(ref b) => b.bytes_vectored(dst),
- BufKind::ChunkedEnd(ref b) => b.bytes_vectored(dst),
+ BufKind::Exact(ref b) => b.chunks_vectored(dst),
+ BufKind::Limited(ref b) => b.chunks_vectored(dst),
+ BufKind::Chunked(ref b) => b.chunks_vectored(dst),
+ BufKind::ChunkedEnd(ref b) => b.chunks_vectored(dst),
}
}
}
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -295,7 +295,7 @@ impl Buf for ChunkSize {
}
#[inline]
- fn bytes(&self) -> &[u8] {
+ fn chunk(&self) -> &[u8] {
&self.bytes[self.pos.into()..self.len.into()]
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -186,7 +186,7 @@ where
self.read_buf.reserve(next);
}
- let dst = self.read_buf.bytes_mut();
+ let dst = self.read_buf.chunk_mut();
let dst = unsafe { &mut *(dst as *mut _ as *mut [MaybeUninit<u8>]) };
let mut buf = ReadBuf::uninit(dst);
match Pin::new(&mut self.io).poll_read(cx, &mut buf) {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -231,10 +231,11 @@ where
return self.poll_flush_flattened(cx);
}
+ const MAX_WRITEV_BUFS: usize = 64;
loop {
let n = {
- let mut iovs = [IoSlice::new(&[]); crate::common::io::MAX_WRITEV_BUFS];
- let len = self.write_buf.bytes_vectored(&mut iovs);
+ let mut iovs = [IoSlice::new(&[]); MAX_WRITEV_BUFS];
+ let len = self.write_buf.chunks_vectored(&mut iovs);
ready!(Pin::new(&mut self.io).poll_write_vectored(cx, &iovs[..len]))?
};
// TODO(eliza): we have to do this manually because
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -262,7 +263,7 @@ where
/// that skips some bookkeeping around using multiple buffers.
fn poll_flush_flattened(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
loop {
- let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.bytes()))?;
+ let n = ready!(Pin::new(&mut self.io).poll_write(cx, self.write_buf.headers.chunk()))?;
debug!("flushed {} bytes", n);
self.write_buf.headers.advance(n);
if self.write_buf.headers.remaining() == 0 {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -433,7 +434,7 @@ impl<T: AsRef<[u8]>> Buf for Cursor<T> {
}
#[inline]
- fn bytes(&self) -> &[u8] {
+ fn chunk(&self) -> &[u8] {
&self.bytes.as_ref()[self.pos..]
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -487,7 +488,7 @@ where
//but accomplishes the same result.
loop {
let adv = {
- let slice = buf.bytes();
+ let slice = buf.chunk();
if slice.is_empty() {
return;
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -534,12 +535,12 @@ impl<B: Buf> Buf for WriteBuf<B> {
}
#[inline]
- fn bytes(&self) -> &[u8] {
- let headers = self.headers.bytes();
+ fn chunk(&self) -> &[u8] {
+ let headers = self.headers.chunk();
if !headers.is_empty() {
headers
} else {
- self.queue.bytes()
+ self.queue.chunk()
}
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -559,9 +560,9 @@ impl<B: Buf> Buf for WriteBuf<B> {
}
#[inline]
- fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
- let n = self.headers.bytes_vectored(dst);
- self.queue.bytes_vectored(&mut dst[n..]) + n
+ fn chunks_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
+ let n = self.headers.chunks_vectored(dst);
+ self.queue.chunks_vectored(&mut dst[n..]) + n
}
}
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -257,8 +257,8 @@ impl<B: Buf> Buf for SendBuf<B> {
}
#[inline]
- fn bytes(&self) -> &[u8] {
- self.0.as_ref().map(|b| b.bytes()).unwrap_or(&[])
+ fn chunk(&self) -> &[u8] {
+ self.0.as_ref().map(|b| b.chunk()).unwrap_or(&[])
}
#[inline]
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -268,7 +268,7 @@ impl<B: Buf> Buf for SendBuf<B> {
}
}
- fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
- self.0.as_ref().map(|b| b.bytes_vectored(dst)).unwrap_or(0)
+ fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
+ self.0.as_ref().map(|b| b.chunks_vectored(dst)).unwrap_or(0)
}
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -60,7 +60,7 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger)
interval,
timeout: config.keep_alive_timeout,
while_idle: config.keep_alive_while_idle,
- timer: tokio::time::sleep(interval),
+ timer: Box::pin(tokio::time::sleep(interval)),
state: KeepAliveState::Init,
});
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -156,7 +156,7 @@ struct KeepAlive {
while_idle: bool,
state: KeepAliveState,
- timer: Sleep,
+ timer: Pin<Box<Sleep>>,
}
#[cfg(feature = "runtime")]
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -441,7 +441,7 @@ impl KeepAlive {
self.state = KeepAliveState::Scheduled;
let interval = shared.last_read_at() + self.interval;
- self.timer.reset(interval);
+ self.timer.as_mut().reset(interval);
}
KeepAliveState::PingSent => {
if shared.is_ping_sent() {
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -450,7 +450,7 @@ impl KeepAlive {
self.state = KeepAliveState::Scheduled;
let interval = shared.last_read_at() + self.interval;
- self.timer.reset(interval);
+ self.timer.as_mut().reset(interval);
}
KeepAliveState::Scheduled => (),
}
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -472,7 +472,7 @@ impl KeepAlive {
shared.send_ping();
self.state = KeepAliveState::PingSent;
let timeout = Instant::now() + self.timeout;
- self.timer.reset(timeout);
+ self.timer.as_mut().reset(timeout);
}
KeepAliveState::Init | KeepAliveState::PingSent => (),
}
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -19,7 +19,7 @@ pub struct AddrIncoming {
sleep_on_errors: bool,
tcp_keepalive_timeout: Option<Duration>,
tcp_nodelay: bool,
- timeout: Option<Sleep>,
+ timeout: Option<Pin<Box<Sleep>>>,
}
impl AddrIncoming {
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -160,9 +160,9 @@ impl AddrIncoming {
error!("accept error: {}", e);
// Sleep 1s.
- let mut timeout = tokio::time::sleep(Duration::from_secs(1));
+ let mut timeout = Box::pin(tokio::time::sleep(Duration::from_secs(1)));
- match Pin::new(&mut timeout).poll(cx) {
+ match timeout.as_mut().poll(cx) {
Poll::Ready(()) => {
// Wow, it's been a second already? Ok then...
continue;
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -263,7 +263,7 @@ mod addr_stream {
pub fn poll_peek(
&mut self,
cx: &mut task::Context<'_>,
- buf: &mut [u8],
+ buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<io::Result<usize>> {
self.inner.poll_peek(cx, buf)
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -11,7 +11,7 @@ use std::fmt;
use std::io;
use std::marker::Unpin;
-use bytes::{Buf, Bytes};
+use bytes::Bytes;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::sync::oneshot;
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -82,7 +82,7 @@ impl Upgraded {
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
Upgraded {
- io: Rewind::new_buffered(Box::new(ForwardsWriteBuf(io)), read_buf),
+ io: Rewind::new_buffered(Box::new(io), read_buf),
}
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -92,9 +92,9 @@ impl Upgraded {
/// `Upgraded` back.
pub fn downcast<T: AsyncRead + AsyncWrite + Unpin + 'static>(self) -> Result<Parts<T>, Self> {
let (io, buf) = self.io.into_inner();
- match io.__hyper_downcast::<ForwardsWriteBuf<T>>() {
+ match io.__hyper_downcast() {
Ok(t) => Ok(Parts {
- io: t.0,
+ io: *t,
read_buf: buf,
_inner: (),
}),
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -221,20 +221,14 @@ impl StdError for UpgradeExpected {}
// ===== impl Io =====
-struct ForwardsWriteBuf<T>(T);
-
pub(crate) trait Io: AsyncRead + AsyncWrite + Unpin + 'static {
- fn poll_write_dyn_buf(
- &mut self,
- cx: &mut task::Context<'_>,
- buf: &mut dyn Buf,
- ) -> Poll<io::Result<usize>>;
-
fn __hyper_type_id(&self) -> TypeId {
TypeId::of::<Self>()
}
}
+impl<T: AsyncRead + AsyncWrite + Unpin + 'static> Io for T {}
+
impl dyn Io + Send {
fn __hyper_is<T: Io>(&self) -> bool {
let t = TypeId::of::<T>();
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -254,61 +248,6 @@ impl dyn Io + Send {
}
}
-impl<T: AsyncRead + Unpin> AsyncRead for ForwardsWriteBuf<T> {
- fn poll_read(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &mut ReadBuf<'_>,
- ) -> Poll<io::Result<()>> {
- Pin::new(&mut self.0).poll_read(cx, buf)
- }
-}
-
-impl<T: AsyncWrite + Unpin> AsyncWrite for ForwardsWriteBuf<T> {
- fn poll_write(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &[u8],
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.0).poll_write(cx, buf)
- }
-
- fn poll_write_vectored(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- bufs: &[io::IoSlice<'_>],
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.0).poll_write_vectored(cx, bufs)
- }
-
- fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
- Pin::new(&mut self.0).poll_flush(cx)
- }
-
- fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
- Pin::new(&mut self.0).poll_shutdown(cx)
- }
-
- fn is_write_vectored(&self) -> bool {
- self.0.is_write_vectored()
- }
-}
-
-impl<T: AsyncRead + AsyncWrite + Unpin + 'static> Io for ForwardsWriteBuf<T> {
- fn poll_write_dyn_buf(
- &mut self,
- cx: &mut task::Context<'_>,
- buf: &mut dyn Buf,
- ) -> Poll<io::Result<usize>> {
- if self.0.is_write_vectored() {
- let mut bufs = [io::IoSlice::new(&[]); crate::common::io::MAX_WRITEV_BUFS];
- let cnt = buf.bytes_vectored(&mut bufs);
- return Pin::new(&mut self.0).poll_write_vectored(cx, &bufs[..cnt]);
- }
- Pin::new(&mut self.0).poll_write(cx, buf.bytes())
- }
-}
-
mod sealed {
use super::OnUpgrade;
|
2020-12-22T20:18:36Z
| 2,369
|
Upgrade to Tokio 1.0
It's not released yet, but will be *very* soon.
Relevant PRs:
- [x] http-body: https://github.com/hyperium/http-body/pull/31
- [x] h2: https://github.com/hyperium/h2/pull/504
- [x] hyper: https://github.com/hyperium/hyper/pull/2369
|
hyperium__hyper-2369
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -62,8 +62,8 @@ tokio = { version = "0.3", features = [
"time",
"test-util",
] }
-tokio-test = "0.3"
-tokio-util = { version = "0.5", features = ["codec"] }
+tokio-test = "0.4"
+tokio-util = { version = "0.6", features = ["codec"] }
tower-util = "0.3"
url = "1.0"
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -352,7 +291,6 @@ mod sealed {
#[cfg(test)]
mod tests {
use super::*;
- use tokio::io::AsyncWriteExt;
#[test]
fn upgraded_downcast() {
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -363,15 +301,6 @@ mod tests {
upgraded.downcast::<Mock>().unwrap();
}
- #[tokio::test]
- async fn upgraded_forwards_write_buf() {
- // sanity check that the underlying IO implements write_buf
- Mock.write_buf(&mut "hello".as_bytes()).await.unwrap();
-
- let mut upgraded = Upgraded::new(Mock, Bytes::new());
- upgraded.write_buf(&mut "hello".as_bytes()).await.unwrap();
- }
-
// TODO: replace with tokio_test::io when it can test write_buf
struct Mock;
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -395,17 +324,6 @@ mod tests {
Poll::Ready(Ok(buf.len()))
}
- // TODO(eliza): :(
- // fn poll_write_buf<B: Buf>(
- // self: Pin<&mut Self>,
- // _cx: &mut task::Context<'_>,
- // buf: &mut B,
- // ) -> Poll<io::Result<usize>> {
- // let n = buf.remaining();
- // buf.advance(n);
- // Poll::Ready(Ok(n))
- // }
-
fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
unreachable!("Mock::poll_flush")
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1209,6 +1209,7 @@ mod dispatch_impl {
// and wait a few ticks for the connections to close
let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
+ futures_util::pin_mut!(t);
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1257,6 +1258,7 @@ mod dispatch_impl {
// res now dropped
let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
+ futures_util::pin_mut!(t);
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1312,6 +1314,7 @@ mod dispatch_impl {
// and wait a few ticks to see the connection drop
let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
+ futures_util::pin_mut!(t);
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1362,6 +1365,7 @@ mod dispatch_impl {
res.unwrap();
let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
+ futures_util::pin_mut!(t);
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1408,6 +1412,7 @@ mod dispatch_impl {
res.unwrap();
let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
+ futures_util::pin_mut!(t);
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
|
[
"2171"
] |
0.13
|
42560c7c40d8f934658624114fda4eb819cefda8
|
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -55,6 +55,8 @@ enum ChunkedState {
Body,
BodyCr,
BodyLf,
+ Trailer,
+ TrailerLf,
EndCr,
EndLf,
End,
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -196,6 +198,8 @@ impl ChunkedState {
Body => ChunkedState::read_body(cx, body, size, buf),
BodyCr => ChunkedState::read_body_cr(cx, body),
BodyLf => ChunkedState::read_body_lf(cx, body),
+ Trailer => ChunkedState::read_trailer(cx, body),
+ TrailerLf => ChunkedState::read_trailer_lf(cx, body),
EndCr => ChunkedState::read_end_cr(cx, body),
EndLf => ChunkedState::read_end_lf(cx, body),
End => Poll::Ready(Ok(ChunkedState::End)),
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -340,18 +344,38 @@ impl ChunkedState {
}
}
- fn read_end_cr<R: MemRead>(
+ fn read_trailer<R: MemRead>(
cx: &mut task::Context<'_>,
rdr: &mut R,
) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("read_trailer");
match byte!(rdr, cx) {
- b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
+ b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)),
+ _ => Poll::Ready(Ok(ChunkedState::Trailer)),
+ }
+ }
+ fn read_trailer_lf<R: MemRead>(
+ cx: &mut task::Context<'_>,
+ rdr: &mut R,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ match byte!(rdr, cx) {
+ b'\n' => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
- "Invalid chunk end CR",
+ "Invalid trailer end LF",
))),
}
}
+
+ fn read_end_cr<R: MemRead>(
+ cx: &mut task::Context<'_>,
+ rdr: &mut R,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ match byte!(rdr, cx) {
+ b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
+ _ => Poll::Ready(Ok(ChunkedState::Trailer)),
+ }
+ }
fn read_end_lf<R: MemRead>(
cx: &mut task::Context<'_>,
rdr: &mut R,
|
That is actually valid chunked encoding, using chunked trailers. hyper just hasn't supported them, because practically _nothing_ has ever used them and they are annoying to deal with.
gRPC of HTTP/1.1 decided "oh hey, we can do trailers with this obscure detail of chunked encoding". Grumble.
Checking [RFC7230](https://tools.ietf.org/html/rfc7230#section-4.3), a server shouldn't send chunked trailers if the client didn't include a `TE: trailers` header. Does your request include one?
Perhaps hyper should strip that header as long as it doesn't have h1 trailers support.
> That is actually valid chunked encoding, using chunked trailers. hyper just hasn't supported them, because practically nothing has ever used them and they are annoying to deal with.
> gRPC of HTTP/1.1 decided "oh hey, we can do trailers with this obscure detail of chunked encoding". Grumble.
That is really interesting, I didn't know that :)
> Checking [RFC7230](https://tools.ietf.org/html/rfc7230#section-4.3), a server shouldn't send chunked trailers if the client didn't include a `TE: trailers` header. Does your request include one?
>
> Perhaps hyper should strip that header as long as it doesn't have h1 trailers support.
No my request did not include such header. I guess that results in two follow-up actions.
I will open an issue at the [grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway) repository to inform them about the spec mismatch.
I have the same problem. Original bug report is on [linkerd2 repo](https://github.com/linkerd/linkerd2/issues/4714)
@thomaseizinger any news about that?
> Even if this is an invalid response, it would be nice if hyper could be robust here and just ignore everything that is coming after the 0 chunk size indicator.
Agree with you.
> Checking [RFC7230](https://tools.ietf.org/html/rfc7230#section-4.3), a server shouldn't send chunked trailers if the client didn't include a `TE: trailers` header. Does your request include one?
>
> Perhaps hyper should strip that header as long as it doesn't have h1 trailers support.
Actually hyper strips that trailer, but after this i am getting ```net::ERR_INCOMPLETE_CHUNKED_ENCODING``` error in Google Chrome.
The only difference is after hyper strips that header, it strips last CR LF too. But probably should not. Please check attachments (wireshark screenshots with linkerd enabled and direct request).


hyper doesn't have any handling of the `te: trailers` header in its HTTP/1 code.
> hyper doesn't have any handling of the `te: trailers` header in its HTTP/1 code.
Helllo @seanmonstar! I think we are misunderstood each other.
It's not about handing that header, it's about the response with trailers passed through the hyper becomes invalid. Please take a look on screenshots above. On the first screenshot we can see a response passed through the hyper, pay attention on the end of response body. The are missing CR LF on the end and the trailer which exists in original response. On the second screenshot we can see an original response.
Just stumbled on this ourselves. We're proxying a response from a nginx instance that uses `add_trailer` and this appears to close the connection prematurely. HTTP clients tend to complain, like curl:
```
* transfer closed with outstanding read data remaining
100 76655 0 76655 0 0 467k 0 --:--:-- --:--:-- --:--:-- 467k
* Closing connection 0
curl: (18) transfer closed with outstanding read data remaining
```
76655 is the full size of the transfer, but since it produces an unexpected EOF, that curl errors.
I know this way of doing things is not very common, but some of our customers might eventually do it and stumble on this bug. It wasn't very easy to troubleshoot. My only clue was "error reading a body from connection: Invalid chunk end CR" which led me here.
|
2020-12-16T16:25:18Z
| 2,363
|
Decoding response from gRPC REST proxy results in "Invalid chunk end CR" error
I am integrating with a go application that uses the gRPC REST proxy to provide an HTTP API.
My HTTP client is `reqwest` which uses `hyper:0.13.3` at this stage.
The go application sends the following response:
```
HTTP/1.1 500 Internal Server Error
Content-Type: application/json
Trailer: Grpc-Trailer-Content-Type
Date: Wed, 01 Apr 2020 00:16:52 GMT
Transfer-Encoding: chunked
5e
{"error":"there are no existing invoices","message":"there are no existing invoices","code":2}
0
Grpc-Trailer-Content-Type: application/grpc
{"error":"there are no existing invoices","message":"there are no existing invoices","code":2}%
```
I've added a test to the hyper test suite that reproduces the issue: https://github.com/thomaseizinger/hyper/commit/a608b28dd9d93a0b0754653c859f262c14d9fd59
I don't know whether this is a valid HTTP response. Reading through the HTTP spec, it seems like this grpc trailer stuff is violating the `Transfer-Encoding: chunked` spec because the chunked is advertised as 0 bytes which from what I understand means the response is over, yet there are more bytes following.
Interestingly, `curl` is able to handle the response just fine but that could just be an artifact of curl being very robust to these kind of mistakes.
Even if this is an invalid response, it would be nice if `hyper` could be robust here and just ignore everything that is coming after the `0` chunk size indicator.
|
hyperium__hyper-2363
|
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -537,6 +561,15 @@ mod tests {
assert_eq!("1234567890abcdef", &result);
}
+ #[tokio::test]
+ async fn test_read_chunked_trailer_with_missing_lf() {
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..];
+ let mut decoder = Decoder::chunked();
+ decoder.decode_fut(&mut mock_buf).await.expect("decode");
+ let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err();
+ assert_eq!(e.kind(), io::ErrorKind::InvalidInput);
+ }
+
#[tokio::test]
async fn test_read_chunked_after_eof() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -430,6 +430,69 @@ test! {
body: None,
}
+test! {
+ name: client_get_req_body_chunked_with_trailer,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ Transfer-Encoding: chunked\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ Trailer: value\r\n\
+ \r\n\
+ ",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: &b"hello"[..],
+}
+
+test! {
+ name: client_get_req_body_chunked_with_multiple_trailers,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ Transfer-Encoding: chunked\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ Trailer: value\r\n\
+ another-trainer: another-value\r\n\
+ \r\n\
+ ",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: &b"hello"[..],
+}
+
test! {
name: client_get_req_body_sized,
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"2171"
] |
0.3
|
7d9a5806e146798d0cbe67672bbe3ad5ae680393
|
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -55,6 +55,8 @@ enum ChunkedState {
Body,
BodyCr,
BodyLf,
+ Trailer,
+ TrailerLf,
EndCr,
EndLf,
End,
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -196,6 +198,8 @@ impl ChunkedState {
Body => ChunkedState::read_body(cx, body, size, buf),
BodyCr => ChunkedState::read_body_cr(cx, body),
BodyLf => ChunkedState::read_body_lf(cx, body),
+ Trailer => ChunkedState::read_trailer(cx, body),
+ TrailerLf => ChunkedState::read_trailer_lf(cx, body),
EndCr => ChunkedState::read_end_cr(cx, body),
EndLf => ChunkedState::read_end_lf(cx, body),
End => Poll::Ready(Ok(ChunkedState::End)),
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -340,18 +344,38 @@ impl ChunkedState {
}
}
- fn read_end_cr<R: MemRead>(
+ fn read_trailer<R: MemRead>(
cx: &mut task::Context<'_>,
rdr: &mut R,
) -> Poll<Result<ChunkedState, io::Error>> {
+ trace!("read_trailer");
match byte!(rdr, cx) {
- b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
+ b'\r' => Poll::Ready(Ok(ChunkedState::TrailerLf)),
+ _ => Poll::Ready(Ok(ChunkedState::Trailer)),
+ }
+ }
+ fn read_trailer_lf<R: MemRead>(
+ cx: &mut task::Context<'_>,
+ rdr: &mut R,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ match byte!(rdr, cx) {
+ b'\n' => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
- "Invalid chunk end CR",
+ "Invalid trailer end LF",
))),
}
}
+
+ fn read_end_cr<R: MemRead>(
+ cx: &mut task::Context<'_>,
+ rdr: &mut R,
+ ) -> Poll<Result<ChunkedState, io::Error>> {
+ match byte!(rdr, cx) {
+ b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
+ _ => Poll::Ready(Ok(ChunkedState::Trailer)),
+ }
+ }
fn read_end_lf<R: MemRead>(
cx: &mut task::Context<'_>,
rdr: &mut R,
|
That is actually valid chunked encoding, using chunked trailers. hyper just hasn't supported them, because practically _nothing_ has ever used them and they are annoying to deal with.
gRPC of HTTP/1.1 decided "oh hey, we can do trailers with this obscure detail of chunked encoding". Grumble.
Checking [RFC7230](https://tools.ietf.org/html/rfc7230#section-4.3), a server shouldn't send chunked trailers if the client didn't include a `TE: trailers` header. Does your request include one?
Perhaps hyper should strip that header as long as it doesn't have h1 trailers support.
> That is actually valid chunked encoding, using chunked trailers. hyper just hasn't supported them, because practically nothing has ever used them and they are annoying to deal with.
> gRPC of HTTP/1.1 decided "oh hey, we can do trailers with this obscure detail of chunked encoding". Grumble.
That is really interesting, I didn't know that :)
> Checking [RFC7230](https://tools.ietf.org/html/rfc7230#section-4.3), a server shouldn't send chunked trailers if the client didn't include a `TE: trailers` header. Does your request include one?
>
> Perhaps hyper should strip that header as long as it doesn't have h1 trailers support.
No my request did not include such header. I guess that results in two follow-up actions.
I will open an issue at the [grpc-gateway](https://github.com/grpc-ecosystem/grpc-gateway) repository to inform them about the spec mismatch.
I have the same problem. Original bug report is on [linkerd2 repo](https://github.com/linkerd/linkerd2/issues/4714)
@thomaseizinger any news about that?
> Even if this is an invalid response, it would be nice if hyper could be robust here and just ignore everything that is coming after the 0 chunk size indicator.
Agree with you.
> Checking [RFC7230](https://tools.ietf.org/html/rfc7230#section-4.3), a server shouldn't send chunked trailers if the client didn't include a `TE: trailers` header. Does your request include one?
>
> Perhaps hyper should strip that header as long as it doesn't have h1 trailers support.
Actually hyper strips that trailer, but after this i am getting ```net::ERR_INCOMPLETE_CHUNKED_ENCODING``` error in Google Chrome.
The only difference is after hyper strips that header, it strips last CR LF too. But probably should not. Please check attachments (wireshark screenshots with linkerd enabled and direct request).


hyper doesn't have any handling of the `te: trailers` header in its HTTP/1 code.
> hyper doesn't have any handling of the `te: trailers` header in its HTTP/1 code.
Helllo @seanmonstar! I think we are misunderstood each other.
It's not about handing that header, it's about the response with trailers passed through the hyper becomes invalid. Please take a look on screenshots above. On the first screenshot we can see a response passed through the hyper, pay attention on the end of response body. The are missing CR LF on the end and the trailer which exists in original response. On the second screenshot we can see an original response.
Just stumbled on this ourselves. We're proxying a response from a nginx instance that uses `add_trailer` and this appears to close the connection prematurely. HTTP clients tend to complain, like curl:
```
* transfer closed with outstanding read data remaining
100 76655 0 76655 0 0 467k 0 --:--:-- --:--:-- --:--:-- 467k
* Closing connection 0
curl: (18) transfer closed with outstanding read data remaining
```
76655 is the full size of the transfer, but since it produces an unexpected EOF, that curl errors.
I know this way of doing things is not very common, but some of our customers might eventually do it and stumble on this bug. It wasn't very easy to troubleshoot. My only clue was "error reading a body from connection: Invalid chunk end CR" which led me here.
|
2020-12-13T05:51:51Z
| 2,357
|
Decoding response from gRPC REST proxy results in "Invalid chunk end CR" error
I am integrating with a go application that uses the gRPC REST proxy to provide an HTTP API.
My HTTP client is `reqwest` which uses `hyper:0.13.3` at this stage.
The go application sends the following response:
```
HTTP/1.1 500 Internal Server Error
Content-Type: application/json
Trailer: Grpc-Trailer-Content-Type
Date: Wed, 01 Apr 2020 00:16:52 GMT
Transfer-Encoding: chunked
5e
{"error":"there are no existing invoices","message":"there are no existing invoices","code":2}
0
Grpc-Trailer-Content-Type: application/grpc
{"error":"there are no existing invoices","message":"there are no existing invoices","code":2}%
```
I've added a test to the hyper test suite that reproduces the issue: https://github.com/thomaseizinger/hyper/commit/a608b28dd9d93a0b0754653c859f262c14d9fd59
I don't know whether this is a valid HTTP response. Reading through the HTTP spec, it seems like this grpc trailer stuff is violating the `Transfer-Encoding: chunked` spec because the chunked is advertised as 0 bytes which from what I understand means the response is over, yet there are more bytes following.
Interestingly, `curl` is able to handle the response just fine but that could just be an artifact of curl being very robust to these kind of mistakes.
Even if this is an invalid response, it would be nice if `hyper` could be robust here and just ignore everything that is coming after the `0` chunk size indicator.
|
hyperium__hyper-2357
|
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -538,6 +562,15 @@ mod tests {
assert_eq!("1234567890abcdef", &result);
}
+ #[tokio::test]
+ async fn test_read_chunked_trailer_with_missing_lf() {
+ let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\nbad\r\r\n"[..];
+ let mut decoder = Decoder::chunked();
+ decoder.decode_fut(&mut mock_buf).await.expect("decode");
+ let e = decoder.decode_fut(&mut mock_buf).await.unwrap_err();
+ assert_eq!(e.kind(), io::ErrorKind::InvalidInput);
+ }
+
#[tokio::test]
async fn test_read_chunked_after_eof() {
let mut mock_buf = &b"10\r\n1234567890abcdef\r\n0\r\n\r\n"[..];
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -430,6 +430,69 @@ test! {
body: None,
}
+test! {
+ name: client_get_req_body_chunked_with_trailer,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ Transfer-Encoding: chunked\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ Trailer: value\r\n\
+ \r\n\
+ ",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: &b"hello"[..],
+}
+
+test! {
+ name: client_get_req_body_chunked_with_multiple_trailers,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ Transfer-Encoding: chunked\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\
+ Trailer: value\r\n\
+ another-trainer: another-value\r\n\
+ \r\n\
+ ",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: &b"hello"[..],
+}
+
test! {
name: client_get_req_body_sized,
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2347"
] |
0.3
|
a470446deb2cb2c0e3700f67d9f70097d0d7d75f
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -68,7 +68,7 @@ tower-util = "0.3"
url = "1.0"
[target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies]
-pnet = "0.25.0"
+pnet_datalink = "0.27.2"
[features]
# Nothing by default
|
> **Fixed in**: >=0.26.0
> **Issue**: [pnet GitHub issue #449](https://github.com/libpnet/libpnet/issues/449)
This doesn't really seem to have been fixed yet. (https://github.com/libpnet/libpnet/pull/455)
> > **Fixed in**: >=0.26.0
>
> > **Issue**: [pnet GitHub issue #449](https://github.com/libpnet/libpnet/issues/449)
>
> This doesn't really seem to have been fixed yet. ([libpnet/libpnet#455](https://github.com/libpnet/libpnet/pull/455))
Good catch, I will file an update to the security advisory database.
Thanks for the report, definitely worth getting a fix in. Good news is that its only used in hyper's unit tests, so not part of the actually library
Additionally, it looks like the unit tests do not use any of the code in libpnet which is affected by this vulnerability.
|
2020-11-27T21:36:22Z
| 2,348
|
Known vulnerability in dependency 'pnet' v 0.25.0
I ran `cargo audit` on the current master and got a match for a known vulnerability in pnet 0.25.0:
__ID__: RUSTSEC-2019-0037
__Package__: pnet
__Version__: 0.25.0
__Fixed in__: not yet fixed
__Title__: Compiler optimisation for next_with_timeout in pnet::transport::IcmpTransportChannelIterator flaws to SEGFAULT
__Description__: Affected versions of this crate were optimized out by compiler, which caused dereference of uninitialized file descriptor which caused segfault.
__Issue__: [pnet GitHub issue #449](https://github.com/libpnet/libpnet/issues/449)
|
hyperium__hyper-2348
|
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -723,7 +723,7 @@ mod tests {
let mut ip_v4 = None;
let mut ip_v6 = None;
- let ips = pnet::datalink::interfaces()
+ let ips = pnet_datalink::interfaces()
.into_iter()
.flat_map(|i| i.ips.into_iter().map(|n| n.ip()));
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"1937"
] |
0.3
|
a470446deb2cb2c0e3700f67d9f70097d0d7d75f
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -9,21 +9,21 @@
//! # Resolvers are `Service`s
//!
//! A resolver is just a
-//! `Service<Name, Response = impl Iterator<Item = IpAddr>>`.
+//! `Service<Name, Response = impl Iterator<Item = SocketAddr>>`.
//!
//! A simple resolver that ignores the name and always returns a specific
//! address:
//!
//! ```rust,ignore
-//! use std::{convert::Infallible, iter, net::IpAddr};
+//! use std::{convert::Infallible, iter, net::SocketAddr};
//!
//! let resolver = tower::service_fn(|_name| async {
-//! Ok::<_, Infallible>(iter::once(IpAddr::from([127, 0, 0, 1])))
+//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080))))
//! });
//! ```
use std::error::Error;
use std::future::Future;
-use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
+use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
use std::pin::Pin;
use std::str::FromStr;
use std::task::{self, Poll};
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -48,12 +48,12 @@ pub struct GaiResolver {
/// An iterator of IP addresses returned from `getaddrinfo`.
pub struct GaiAddrs {
- inner: IpAddrs,
+ inner: SocketAddrs,
}
/// A future to resolve a name returned by `GaiResolver`.
pub struct GaiFuture {
- inner: JoinHandle<Result<IpAddrs, io::Error>>,
+ inner: JoinHandle<Result<SocketAddrs, io::Error>>,
}
impl Name {
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -121,7 +121,7 @@ impl Service<Name> for GaiResolver {
debug!("resolving host={:?}", name.host);
(&*name.host, 0)
.to_socket_addrs()
- .map(|i| IpAddrs { iter: i })
+ .map(|i| SocketAddrs { iter: i })
});
GaiFuture { inner: blocking }
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -159,10 +159,10 @@ impl fmt::Debug for GaiFuture {
}
impl Iterator for GaiAddrs {
- type Item = IpAddr;
+ type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
- self.inner.next().map(|sa| sa.ip())
+ self.inner.next()
}
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -172,28 +172,28 @@ impl fmt::Debug for GaiAddrs {
}
}
-pub(super) struct IpAddrs {
+pub(super) struct SocketAddrs {
iter: vec::IntoIter<SocketAddr>,
}
-impl IpAddrs {
+impl SocketAddrs {
pub(super) fn new(addrs: Vec<SocketAddr>) -> Self {
- IpAddrs {
+ SocketAddrs {
iter: addrs.into_iter(),
}
}
- pub(super) fn try_parse(host: &str, port: u16) -> Option<IpAddrs> {
+ pub(super) fn try_parse(host: &str, port: u16) -> Option<SocketAddrs> {
if let Ok(addr) = host.parse::<Ipv4Addr>() {
let addr = SocketAddrV4::new(addr, port);
- return Some(IpAddrs {
+ return Some(SocketAddrs {
iter: vec![SocketAddr::V4(addr)].into_iter(),
});
}
let host = host.trim_start_matches('[').trim_end_matches(']');
if let Ok(addr) = host.parse::<Ipv6Addr>() {
let addr = SocketAddrV6::new(addr, port, 0, 0);
- return Some(IpAddrs {
+ return Some(SocketAddrs {
iter: vec![SocketAddr::V6(addr)].into_iter(),
});
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -201,18 +201,18 @@ impl IpAddrs {
}
#[inline]
- fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> IpAddrs {
- IpAddrs::new(self.iter.filter(predicate).collect())
+ fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs {
+ SocketAddrs::new(self.iter.filter(predicate).collect())
}
pub(super) fn split_by_preference(
self,
local_addr_ipv4: Option<Ipv4Addr>,
local_addr_ipv6: Option<Ipv6Addr>,
- ) -> (IpAddrs, IpAddrs) {
+ ) -> (SocketAddrs, SocketAddrs) {
match (local_addr_ipv4, local_addr_ipv6) {
- (Some(_), None) => (self.filter(SocketAddr::is_ipv4), IpAddrs::new(vec![])),
- (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), IpAddrs::new(vec![])),
+ (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])),
+ (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])),
_ => {
let preferring_v6 = self
.iter
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -225,7 +225,7 @@ impl IpAddrs {
.iter
.partition::<Vec<_>, _>(|addr| addr.is_ipv6() == preferring_v6);
- (IpAddrs::new(preferred), IpAddrs::new(fallback))
+ (SocketAddrs::new(preferred), SocketAddrs::new(fallback))
}
}
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -239,7 +239,7 @@ impl IpAddrs {
}
}
-impl Iterator for IpAddrs {
+impl Iterator for SocketAddrs {
type Item = SocketAddr;
#[inline]
fn next(&mut self) -> Option<SocketAddr> {
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -312,13 +312,13 @@ impl Future for TokioThreadpoolGaiFuture {
*/
mod sealed {
- use super::{IpAddr, Name};
+ use super::{SocketAddr, Name};
use crate::common::{task, Future, Poll};
use tower_service::Service;
// "Trait alias" for `Service<Name, Response = Addrs>`
pub trait Resolve {
- type Addrs: Iterator<Item = IpAddr>;
+ type Addrs: Iterator<Item = SocketAddr>;
type Error: Into<Box<dyn std::error::Error + Send + Sync>>;
type Future: Future<Output = Result<Self::Addrs, Self::Error>>;
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -329,7 +329,7 @@ mod sealed {
impl<S> Resolve for S
where
S: Service<Name>,
- S::Response: Iterator<Item = IpAddr>,
+ S::Response: Iterator<Item = SocketAddr>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
type Addrs = S::Response;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -321,14 +321,17 @@ where
// If the host is already an IP addr (v4 or v6),
// skip resolving the dns and start connecting right away.
- let addrs = if let Some(addrs) = dns::IpAddrs::try_parse(host, port) {
+ let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) {
addrs
} else {
let addrs = resolve(&mut self.resolver, dns::Name::new(host.into()))
.await
.map_err(ConnectError::dns)?;
- let addrs = addrs.map(|addr| SocketAddr::new(addr, port)).collect();
- dns::IpAddrs::new(addrs)
+ let addrs = addrs.map(|mut addr| {
+ addr.set_port(port);
+ addr
+ }).collect();
+ dns::SocketAddrs::new(addrs)
};
let c = ConnectingTcp::new(addrs, config);
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -457,7 +460,7 @@ struct ConnectingTcp<'a> {
}
impl<'a> ConnectingTcp<'a> {
- fn new(remote_addrs: dns::IpAddrs, config: &'a Config) -> Self {
+ fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self {
if let Some(fallback_timeout) = config.happy_eyeballs_timeout {
let (preferred_addrs, fallback_addrs) = remote_addrs
.split_by_preference(config.local_address_ipv4, config.local_address_ipv6);
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -493,12 +496,12 @@ struct ConnectingTcpFallback {
}
struct ConnectingTcpRemote {
- addrs: dns::IpAddrs,
+ addrs: dns::SocketAddrs,
connect_timeout: Option<Duration>,
}
impl ConnectingTcpRemote {
- fn new(addrs: dns::IpAddrs, connect_timeout: Option<Duration>) -> Self {
+ fn new(addrs: dns::SocketAddrs, connect_timeout: Option<Duration>) -> Self {
let connect_timeout = connect_timeout.map(|t| t / (addrs.len() as u32));
Self {
|
Note: requires https://github.com/hyperium/http/pull/343 to work (patch in Cargo.toml)
Addressed the review comments, so this is ready for review again.
Uploaded as a separate commit, I can of course squash/rebase if desired.
I'm not up-to-speed on IPv6 zone IDs. It identifies a specific port? Or, why is this changing `Resolve` to return a `SocketAddr` instead of an `IpAddr`?
It returns a `SocketAddr` because it needs `SocketAddrV6.scope_id` to be set correctly. https://doc.rust-lang.org/std/net/struct.SocketAddrV6.html#method.scope_id , which is used to determine which network interface to use to make the connection. Since generally all interfaces have `fe80::/64` link-local addresses, and link-local addresses are non-routable, there needs to be some disambiguation.
The only standard way I found that can turn a zone_id (on Linux, that's the interface name) into a scope_id (on Linux, the interface index) is getaddrinfo.
Some relevant info I found in man pages while I was digging: http://man7.org/linux/man-pages/man7/ipv6.7.html
> sin6_scope_id is an ID depending on the scope of the address. It is new in Linux 2.4. Linux supports it only for link-local addresses, in that case sin6_scope_id contains the interface index (see netdevice(7))
The interface index can also be found in /proc/net/if_inet6 (where it's the second column). I don't use that in this CL since I'd rather use standard APIs, and getaddrinfo seems to do the trick.
> It returns a SocketAddr because it needs SocketAddrV6.scope_id to be set correctly. https://doc.rust-lang.org/std/net/struct.SocketAddrV6.html#method.scope_id , which is used to determine which network interface to use to make the connection.
I see, thanks for explaining! So, does this mean IPv6 addresses essentially allow encoding [`HttpConnector::set_local_addresses`](https://docs.rs/hyper/0.12.*/hyper/client/struct.HttpConnector.html#method.set_local_address) in a per-destination way?
Since this has a breaking change, it'd need to be merged before 0.13 is released (target is early next week). I still don't have my head around it entirely, but it sounds like this extra info cannot exist in an `IpAddr`, only a `SocketAddr`?
Is this change still on the table? Without it, it doesn't seem possible to use a link-local address with hyper with more than one link-local nic.
> I still don't have my head around it entirely, but it sounds like this extra info cannot exist in an `IpAddr`, only a `SocketAddr`?
Per the rust implementation of SocketAddrV6, the scope id is only contained in there. It's not necessarily IP info but "connection setup" info which describes which interface to use for the connection, which is necessary when multiple link-local interfaces are present.
> I see, thanks for explaining! So, does this mean IPv6 addresses essentially allow encoding HttpConnector::set_local_addresses in a per-destination way?
If there were an equivalent `HttpConnector::set_local_interface(iface_id)`, technically that is what this notation does per destination. By specifying the zone/interface, it's possible to select which interface to use for the connection which is necessary to disambiguate where you want to connect to.
Ref: https://tools.ietf.org/html/rfc4007#section-11
Ref: https://en.wikipedia.org/wiki/Link-local_address#IPv6
> Is this change still on the table?
I'm still open to the change, it was mentioned by contributors they wanted to add tests. Also, at this point, it'd be a breaking change to the "resolver" API, so either a way to provide it without breakage is needed, or this needs to wait till the next major version.
> If there were an equivalent `HttpConnector::set_local_interface(iface_id)`, technically that is what this notation does per destination.
Thanks, I understand now. Sounds useful!
|
2020-11-26T01:33:40Z
| 2,346
|
feat(client): allow connecting to IPv6 link-local addresses
|
hyperium__hyper-2346
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -366,42 +366,42 @@ mod tests {
let v4_addr = (ip_v4, 80).into();
let v6_addr = (ip_v6, 80).into();
- let (mut preferred, mut fallback) = IpAddrs {
+ let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(None, None);
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.next().unwrap().is_ipv6());
- let (mut preferred, mut fallback) = IpAddrs {
+ let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v6_addr, v4_addr].into_iter(),
}
.split_by_preference(None, None);
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
- let (mut preferred, mut fallback) = IpAddrs {
+ let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.next().unwrap().is_ipv6());
- let (mut preferred, mut fallback) = IpAddrs {
+ let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v6_addr, v4_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
- let (mut preferred, fallback) = IpAddrs {
+ let (mut preferred, fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), None);
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.is_empty());
- let (mut preferred, fallback) = IpAddrs {
+ let (mut preferred, fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(None, Some(ip_v6));
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -422,7 +422,7 @@ mod tests {
let dst = ::http::Uri::from_static("http://[::1]:8080/");
let mut addrs =
- IpAddrs::try_parse(dst.host().expect("host"), dst.port_u16().expect("port"))
+ SocketAddrs::try_parse(dst.host().expect("host"), dst.port_u16().expect("port"))
.expect("try_parse");
let expected = "[::1]:8080".parse::<SocketAddr>().expect("expected");
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -920,7 +923,7 @@ mod tests {
send_buffer_size: None,
recv_buffer_size: None,
};
- let connecting_tcp = ConnectingTcp::new(dns::IpAddrs::new(addrs), &cfg);
+ let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg);
let start = Instant::now();
Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?))
})
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2086"
] |
0.3
|
751c122589cfd9935e8e3239cd0d692e573784c5
|
diff --git a/examples/http_proxy.rs b/examples/http_proxy.rs
--- a/examples/http_proxy.rs
+++ b/examples/http_proxy.rs
@@ -58,7 +58,7 @@ async fn proxy(client: HttpClient, req: Request<Body>) -> Result<Response<Body>,
// `on_upgrade` future.
if let Some(addr) = host_addr(req.uri()) {
tokio::task::spawn(async move {
- match req.into_body().on_upgrade().await {
+ match hyper::upgrade::on(req).await {
Ok(upgraded) => {
if let Err(e) = tunnel(upgraded, addr).await {
eprintln!("server io error: {}", e);
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -34,7 +34,7 @@ async fn server_upgraded_io(mut upgraded: Upgraded) -> Result<()> {
}
/// Our server HTTP handler to initiate HTTP upgrades.
-async fn server_upgrade(req: Request<Body>) -> Result<Response<Body>> {
+async fn server_upgrade(mut req: Request<Body>) -> Result<Response<Body>> {
let mut res = Response::new(Body::empty());
// Send a 400 to any request that doesn't have
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -52,7 +52,7 @@ async fn server_upgrade(req: Request<Body>) -> Result<Response<Body>> {
// is returned below, so it's better to spawn this future instead
// waiting for it to complete to then return a response.
tokio::task::spawn(async move {
- match req.into_body().on_upgrade().await {
+ match hyper::upgrade::on(&mut req).await {
Ok(upgraded) => {
if let Err(e) = server_upgraded_io(upgraded).await {
eprintln!("server foobar io error: {}", e)
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -97,7 +97,7 @@ async fn client_upgrade_request(addr: SocketAddr) -> Result<()> {
panic!("Our server didn't upgrade: {}", res.status());
}
- match res.into_body().on_upgrade().await {
+ match hyper::upgrade::on(res).await {
Ok(upgraded) => {
if let Err(e) = client_upgraded_io(upgraded).await {
eprintln!("client foobar io error: {}", e)
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -187,13 +187,15 @@ impl Body {
Body::new(Kind::Wrapped(SyncWrapper::new(Box::pin(mapped))))
}
- /// Converts this `Body` into a `Future` of a pending HTTP upgrade.
- ///
- /// See [the `upgrade` module](crate::upgrade) for more.
- pub fn on_upgrade(self) -> OnUpgrade {
- self.extra
- .map(|ex| ex.on_upgrade)
- .unwrap_or_else(OnUpgrade::none)
+ // TODO: Eventually the pending upgrade should be stored in the
+ // `Extensions`, and all these pieces can be removed. In v0.14, we made
+ // the breaking changes, so now this TODO can be done without breakage.
+ pub(crate) fn take_upgrade(&mut self) -> OnUpgrade {
+ if let Some(ref mut extra) = self.extra {
+ std::mem::replace(&mut extra.on_upgrade, OnUpgrade::none())
+ } else {
+ OnUpgrade::none()
+ }
}
fn new(kind: Kind) -> Body {
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -57,18 +57,16 @@ pub struct Parts<T> {
_inner: (),
}
+/// Gets a pending HTTP upgrade from this message.
+pub fn on<T: sealed::CanUpgrade>(msg: T) -> OnUpgrade {
+ msg.on_upgrade()
+}
+
#[cfg(feature = "http1")]
pub(crate) struct Pending {
tx: oneshot::Sender<crate::Result<Upgraded>>,
}
-/// Error cause returned when an upgrade was expected but canceled
-/// for whatever reason.
-///
-/// This likely means the actual `Conn` future wasn't polled and upgraded.
-#[derive(Debug)]
-struct UpgradeExpected(());
-
#[cfg(feature = "http1")]
pub(crate) fn pending() -> (Pending, OnUpgrade) {
let (tx, rx) = oneshot::channel();
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -162,9 +160,7 @@ impl Future for OnUpgrade {
Some(ref mut rx) => Pin::new(rx).poll(cx).map(|res| match res {
Ok(Ok(upgraded)) => Ok(upgraded),
Ok(Err(err)) => Err(err),
- Err(_oneshot_canceled) => {
- Err(crate::Error::new_canceled().with(UpgradeExpected(())))
- }
+ Err(_oneshot_canceled) => Err(crate::Error::new_canceled().with(UpgradeExpected)),
}),
None => Poll::Ready(Err(crate::Error::new_user_no_upgrade())),
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -196,9 +192,16 @@ impl Pending {
// ===== impl UpgradeExpected =====
+/// Error cause returned when an upgrade was expected but canceled
+/// for whatever reason.
+///
+/// This likely means the actual `Conn` future wasn't polled and upgraded.
+#[derive(Debug)]
+struct UpgradeExpected;
+
impl fmt::Display for UpgradeExpected {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "upgrade expected but not completed")
+ f.write_str("upgrade expected but not completed")
}
}
|
Just my 2¢, and I don't know how you or others would feel about it, but at this early stage it should be fine IMO to just release a 0.14 with it changed to the way you believe it should be. I suspect that only a few of us users have gone far upgrading to 0.13 yet and and its just another version to bump in our Cargo.toml. Said from the opposite direction a quick 0.14 turnover is less concerning than breaking changes in 0.13.4 or whatever.
Maybe 0.13 just wasn't your lucky number?
If I had managed to release anything with an "^0.13" dependency already I might feel a little different, but not much.
A related question: should any of this updated API actually go in the _http_ crate?
Should support for intermediate 1xx responses also be added?
https://tools.ietf.org/html/rfc7231#section-6.2
This could be useful for 103: Early Hints, or something else added in the future.
|
2020-11-19T22:30:09Z
| 2,337
|
Move HTTP upgrades API off the Body type
Introduced in https://github.com/hyperium/hyper/pull/1563, the HTTP Upgrade/`CONNECT` API currently is accessed via `hyper::Body::on_upgrade`. This has some downsides that I'd like to address with a modified API:
- It makes the `Body` type fatter.
- It requires people keep the `Body` around, meaning they can't use stream combinators to read the body first, *and then* wait for the upgrade to finish. This also makes it more annoying for users who may wish adjust their `http::Request<Body>` into some `http::Request<Doodad>`.
## Proposed API
Similar to the proposal in #1586, the change would be to pass the `http::Request` or `http::Response` to a free function.
- `hyper::upgrade::on(req_or_resp) -> OnUpgrade`
- We can also allow passing just a mutable reference, thus not consuming the request or response.
- Optional other functions could be added, but not necessary: `has_upgrade`, etc.
## Implementation Plan
The types would be inserted into the `http::Extensions` of the request/response (though with a private wrapper type to hide the exact details).
- Add the new `hyper::upgrade::on` function.
- The `Body::on_upgrade` method would become `#[deprecated]`.
- So as to not be a behavioral breaking change, the `OnUpgrade` would need to be placed in both the `Body` *and* the `Extensions`, wrapped in some `Arc<Lock>` that only allows taking it once.
I'd like to in a future release be able to *not* put it in an `Arc<Lock>` and place it in the `Body`, but I'm not sure how that could really be done in 0.13.x. (I knew I'd find something I forgot to change after finally releasing XD).
|
hyperium__hyper-2337
|
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -277,6 +280,38 @@ impl<T: AsyncRead + AsyncWrite + Unpin + 'static> Io for ForwardsWriteBuf<T> {
}
}
+mod sealed {
+ use super::OnUpgrade;
+
+ pub trait CanUpgrade {
+ fn on_upgrade(self) -> OnUpgrade;
+ }
+
+ impl CanUpgrade for http::Request<crate::Body> {
+ fn on_upgrade(self) -> OnUpgrade {
+ self.into_body().take_upgrade()
+ }
+ }
+
+ impl CanUpgrade for &'_ mut http::Request<crate::Body> {
+ fn on_upgrade(self) -> OnUpgrade {
+ self.body_mut().take_upgrade()
+ }
+ }
+
+ impl CanUpgrade for http::Response<crate::Body> {
+ fn on_upgrade(self) -> OnUpgrade {
+ self.into_body().take_upgrade()
+ }
+ }
+
+ impl CanUpgrade for &'_ mut http::Response<crate::Body> {
+ fn on_upgrade(self) -> OnUpgrade {
+ self.body_mut().take_upgrade()
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1790,9 +1790,7 @@ mod dispatch_impl {
let res = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
assert_eq!(res.status(), 101);
- let upgraded = rt
- .block_on(res.into_body().on_upgrade())
- .expect("on_upgrade");
+ let upgraded = rt.block_on(hyper::upgrade::on(res)).expect("on_upgrade");
let parts = upgraded.downcast::<DebugStream>().unwrap();
assert_eq!(s(&parts.read_buf), "foobar=ready");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1341,7 +1341,7 @@ async fn upgrades_new() {
let (upgrades_tx, upgrades_rx) = mpsc::channel();
let svc = service_fn(move |req: Request<Body>| {
- let on_upgrade = req.into_body().on_upgrade();
+ let on_upgrade = hyper::upgrade::on(req);
let _ = upgrades_tx.send(on_upgrade);
future::ok::<_, hyper::Error>(
Response::builder()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1448,7 +1448,7 @@ async fn http_connect_new() {
let (upgrades_tx, upgrades_rx) = mpsc::channel();
let svc = service_fn(move |req: Request<Body>| {
- let on_upgrade = req.into_body().on_upgrade();
+ let on_upgrade = hyper::upgrade::on(req);
let _ = upgrades_tx.send(on_upgrade);
future::ok::<_, hyper::Error>(
Response::builder()
|
hyperium/hyper
|
6fd696e10974f10b2c6b9d16393bbbfa21c2333f
|
[
"2302"
] |
0.2
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,3 +1,4 @@
+
[package]
name = "hyper"
version = "0.14.0-dev" # don't forget to update html_root_url
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -30,12 +31,12 @@ http = "0.2"
http-body = "0.3.1"
httpdate = "0.3"
httparse = "1.0"
-h2 = "0.2.2"
+h2 = { git = "https://github.com/hyperium/h2" }
itoa = "0.4.1"
tracing = { version = "0.1", default-features = false, features = ["log", "std"] }
pin-project = "1.0"
tower-service = "0.3"
-tokio = { version = "0.2.11", features = ["sync"] }
+tokio = { version = "0.3", features = ["sync", "stream"] }
want = "0.3"
# Optional
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -67,12 +77,12 @@ default = [
]
runtime = [
"tcp",
- "tokio/rt-core",
+ "tokio/rt",
]
tcp = [
"socket2",
- "tokio/blocking",
- "tokio/tcp",
+ "tokio/net",
+ "tokio/rt",
"tokio/time",
]
diff --git a/benches/body.rs b/benches/body.rs
--- a/benches/body.rs
+++ b/benches/body.rs
@@ -10,8 +10,7 @@ use hyper::body::Body;
macro_rules! bench_stream {
($bencher:ident, bytes: $bytes:expr, count: $count:expr, $total_ident:ident, $body_pat:pat, $block:expr) => {{
- let mut rt = tokio::runtime::Builder::new()
- .basic_scheduler()
+ let rt = tokio::runtime::Builder::new_current_thread()
.build()
.expect("rt build");
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -288,7 +290,7 @@ impl Opts {
let bytes_per_iter = (req_len + self.response_body.len() as u64) * self.parallel_cnt as u64;
b.bytes = bytes_per_iter;
- let addr = spawn_server(&mut rt, &self);
+ let addr = spawn_server(&rt, &self);
let connector = HttpConnector::new();
let client = hyper::Client::builder()
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -351,7 +353,7 @@ impl Opts {
}
}
-fn spawn_server(rt: &mut tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
+fn spawn_server(rt: &tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
use hyper::service::{make_service_fn, service_fn};
let addr = "127.0.0.1:0".parse().unwrap();
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -34,9 +34,8 @@ macro_rules! bench_server {
}))
});
- let mut rt = tokio::runtime::Builder::new()
+ let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.expect("rt build");
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -10,15 +10,14 @@ fn main() {
pretty_env_logger::init();
// Configure a runtime that runs everything on the current thread
- let mut rt = tokio::runtime::Builder::new()
+ let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.expect("build runtime");
// Combine it with a `LocalSet, which means it can spawn !Send futures...
let local = tokio::task::LocalSet::new();
- local.block_on(&mut rt, run());
+ local.block_on(&rt, run());
}
async fn run() {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -12,8 +12,8 @@ use std::time::Duration;
use futures_util::future::Either;
use http::uri::{Scheme, Uri};
use pin_project::pin_project;
-use tokio::net::TcpStream;
-use tokio::time::Delay;
+use tokio::net::{TcpSocket, TcpStream};
+use tokio::time::Sleep;
use super::dns::{self, resolve, GaiResolver, Resolve};
use super::{Connected, Connection};
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -331,34 +331,9 @@ where
dns::IpAddrs::new(addrs)
};
- let c = ConnectingTcp::new(
- config.local_address_ipv4,
- config.local_address_ipv6,
- addrs,
- config.connect_timeout,
- config.happy_eyeballs_timeout,
- config.reuse_address,
- );
-
- let sock = c
- .connect()
- .await
- .map_err(ConnectError::m("tcp connect error"))?;
-
- if let Some(dur) = config.keep_alive_timeout {
- sock.set_keepalive(Some(dur))
- .map_err(ConnectError::m("tcp set_keepalive error"))?;
- }
-
- if let Some(size) = config.send_buffer_size {
- sock.set_send_buffer_size(size)
- .map_err(ConnectError::m("tcp set_send_buffer_size error"))?;
- }
+ let c = ConnectingTcp::new(addrs, config);
- if let Some(size) = config.recv_buffer_size {
- sock.set_recv_buffer_size(size)
- .map_err(ConnectError::m("tcp set_recv_buffer_size error"))?;
- }
+ let sock = c.connect().await?;
sock.set_nodelay(config.nodelay)
.map_err(ConnectError::m("tcp set_nodelay error"))?;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -475,60 +450,45 @@ impl StdError for ConnectError {
}
}
-struct ConnectingTcp {
- local_addr_ipv4: Option<Ipv4Addr>,
- local_addr_ipv6: Option<Ipv6Addr>,
+struct ConnectingTcp<'a> {
preferred: ConnectingTcpRemote,
fallback: Option<ConnectingTcpFallback>,
- reuse_address: bool,
+ config: &'a Config,
}
-impl ConnectingTcp {
- fn new(
- local_addr_ipv4: Option<Ipv4Addr>,
- local_addr_ipv6: Option<Ipv6Addr>,
- remote_addrs: dns::IpAddrs,
- connect_timeout: Option<Duration>,
- fallback_timeout: Option<Duration>,
- reuse_address: bool,
- ) -> ConnectingTcp {
- if let Some(fallback_timeout) = fallback_timeout {
- let (preferred_addrs, fallback_addrs) =
- remote_addrs.split_by_preference(local_addr_ipv4, local_addr_ipv6);
+impl<'a> ConnectingTcp<'a> {
+ fn new(remote_addrs: dns::IpAddrs, config: &'a Config) -> Self {
+ if let Some(fallback_timeout) = config.happy_eyeballs_timeout {
+ let (preferred_addrs, fallback_addrs) = remote_addrs
+ .split_by_preference(config.local_address_ipv4, config.local_address_ipv6);
if fallback_addrs.is_empty() {
return ConnectingTcp {
- local_addr_ipv4,
- local_addr_ipv6,
- preferred: ConnectingTcpRemote::new(preferred_addrs, connect_timeout),
+ preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout),
fallback: None,
- reuse_address,
+ config,
};
}
ConnectingTcp {
- local_addr_ipv4,
- local_addr_ipv6,
- preferred: ConnectingTcpRemote::new(preferred_addrs, connect_timeout),
+ preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout),
fallback: Some(ConnectingTcpFallback {
- delay: tokio::time::delay_for(fallback_timeout),
- remote: ConnectingTcpRemote::new(fallback_addrs, connect_timeout),
+ delay: tokio::time::sleep(fallback_timeout),
+ remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout),
}),
- reuse_address,
+ config,
}
} else {
ConnectingTcp {
- local_addr_ipv4,
- local_addr_ipv6,
- preferred: ConnectingTcpRemote::new(remote_addrs, connect_timeout),
+ preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout),
fallback: None,
- reuse_address,
+ config,
}
}
}
}
struct ConnectingTcpFallback {
- delay: Delay,
+ delay: Sleep,
remote: ConnectingTcpRemote,
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -549,24 +509,11 @@ impl ConnectingTcpRemote {
}
impl ConnectingTcpRemote {
- async fn connect(
- &mut self,
- local_addr_ipv4: &Option<Ipv4Addr>,
- local_addr_ipv6: &Option<Ipv6Addr>,
- reuse_address: bool,
- ) -> io::Result<TcpStream> {
+ async fn connect(&mut self, config: &Config) -> Result<TcpStream, ConnectError> {
let mut err = None;
for addr in &mut self.addrs {
debug!("connecting to {}", addr);
- match connect(
- &addr,
- local_addr_ipv4,
- local_addr_ipv6,
- reuse_address,
- self.connect_timeout,
- )?
- .await
- {
+ match connect(&addr, config, self.connect_timeout)?.await {
Ok(tcp) => {
debug!("connected to {}", addr);
return Ok(tcp);
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -580,9 +527,9 @@ impl ConnectingTcpRemote {
match err {
Some(e) => Err(e),
- None => Err(std::io::Error::new(
- std::io::ErrorKind::NotConnected,
- "Network unreachable",
+ None => Err(ConnectError::new(
+ "tcp connect error",
+ std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"),
)),
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -618,30 +565,79 @@ fn bind_local_address(
fn connect(
addr: &SocketAddr,
- local_addr_ipv4: &Option<Ipv4Addr>,
- local_addr_ipv6: &Option<Ipv6Addr>,
- reuse_address: bool,
+ config: &Config,
connect_timeout: Option<Duration>,
-) -> io::Result<impl Future<Output = io::Result<TcpStream>>> {
+) -> Result<impl Future<Output = Result<TcpStream, ConnectError>>, ConnectError> {
+ // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the
+ // keepalive timeout and send/recv buffer size, it would be nice to use that
+ // instead of socket2, and avoid the unsafe `into_raw_fd`/`from_raw_fd`
+ // dance...
use socket2::{Domain, Protocol, Socket, Type};
let domain = match *addr {
SocketAddr::V4(_) => Domain::ipv4(),
SocketAddr::V6(_) => Domain::ipv6(),
};
- let socket = Socket::new(domain, Type::stream(), Some(Protocol::tcp()))?;
-
- if reuse_address {
- socket.set_reuse_address(true)?;
- }
-
- bind_local_address(&socket, addr, local_addr_ipv4, local_addr_ipv6)?;
-
- let addr = *addr;
-
- let std_tcp = socket.into_tcp_stream();
-
+ let socket = Socket::new(domain, Type::stream(), Some(Protocol::tcp()))
+ .map_err(ConnectError::m("tcp open error"))?;
+
+ if config.reuse_address {
+ socket
+ .set_reuse_address(true)
+ .map_err(ConnectError::m("tcp set_reuse_address error"))?;
+ }
+
+ // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is
+ // responsible for ensuring O_NONBLOCK is set.
+ socket
+ .set_nonblocking(true)
+ .map_err(ConnectError::m("tcp set_nonblocking error"))?;
+
+ bind_local_address(
+ &socket,
+ addr,
+ &config.local_address_ipv4,
+ &config.local_address_ipv6,
+ )
+ .map_err(ConnectError::m("tcp bind local error"))?;
+
+ if let Some(dur) = config.keep_alive_timeout {
+ socket
+ .set_keepalive(Some(dur))
+ .map_err(ConnectError::m("tcp set_keepalive error"))?;
+ }
+
+ if let Some(size) = config.send_buffer_size {
+ socket
+ .set_send_buffer_size(size)
+ .map_err(ConnectError::m("tcp set_send_buffer_size error"))?;
+ }
+
+ if let Some(size) = config.recv_buffer_size {
+ socket
+ .set_recv_buffer_size(size)
+ .map_err(ConnectError::m("tcp set_recv_buffer_size error"))?;
+ }
+
+ #[cfg(unix)]
+ let socket = unsafe {
+ // Safety: `from_raw_fd` is only safe to call if ownership of the raw
+ // file descriptor is transferred. Since we call `into_raw_fd` on the
+ // socket2 socket, it gives up ownership of the fd and will not close
+ // it, so this is safe.
+ use std::os::unix::io::{FromRawFd, IntoRawFd};
+ TcpSocket::from_raw_fd(socket.into_raw_fd())
+ };
+ #[cfg(windows)]
+ let socket = unsafe {
+ // Safety: `from_raw_socket` is only safe to call if ownership of the raw
+ // Windows SOCKET is transferred. Since we call `into_raw_socket` on the
+ // socket2 socket, it gives up ownership of the SOCKET and will not close
+ // it, so this is safe.
+ use std::os::windows::io::{FromRawSocket, IntoRawSocket};
+ TcpSocket::from_raw_socket(socket.into_raw_socket())
+ };
+ let connect = socket.connect(*addr);
Ok(async move {
- let connect = TcpStream::connect_std(std_tcp, &addr);
match connect_timeout {
Some(dur) => match tokio::time::timeout(dur, connect).await {
Ok(Ok(s)) => Ok(s),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -650,33 +646,19 @@ fn connect(
},
None => connect.await,
}
+ .map_err(ConnectError::m("tcp connect error"))
})
}
-impl ConnectingTcp {
- async fn connect(mut self) -> io::Result<TcpStream> {
- let Self {
- ref local_addr_ipv4,
- ref local_addr_ipv6,
- reuse_address,
- ..
- } = self;
+impl ConnectingTcp<'_> {
+ async fn connect(mut self) -> Result<TcpStream, ConnectError> {
match self.fallback {
- None => {
- self.preferred
- .connect(local_addr_ipv4, local_addr_ipv6, reuse_address)
- .await
- }
+ None => self.preferred.connect(self.config).await,
Some(mut fallback) => {
- let preferred_fut =
- self.preferred
- .connect(local_addr_ipv4, local_addr_ipv6, reuse_address);
+ let preferred_fut = self.preferred.connect(self.config);
futures_util::pin_mut!(preferred_fut);
- let fallback_fut =
- fallback
- .remote
- .connect(local_addr_ipv4, local_addr_ipv6, reuse_address);
+ let fallback_fut = fallback.remote.connect(self.config);
futures_util::pin_mut!(fallback_fut);
let (result, future) =
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -1,4 +1,5 @@
use futures_util::future;
+use tokio::stream::Stream;
use tokio::sync::{mpsc, oneshot};
use crate::common::{task, Future, Pin, Poll};
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -131,22 +132,25 @@ impl<T, U> Clone for UnboundedSender<T, U> {
}
}
+#[pin_project::pin_project(PinnedDrop)]
pub struct Receiver<T, U> {
+ #[pin]
inner: mpsc::UnboundedReceiver<Envelope<T, U>>,
taker: want::Taker,
}
impl<T, U> Receiver<T, U> {
pub(crate) fn poll_next(
- &mut self,
+ self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<(T, Callback<T, U>)>> {
- match self.inner.poll_recv(cx) {
+ let this = self.project();
+ match this.inner.poll_next(cx) {
Poll::Ready(item) => {
Poll::Ready(item.map(|mut env| env.0.take().expect("envelope not dropped")))
}
Poll::Pending => {
- self.taker.want();
+ this.taker.want();
Poll::Pending
}
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -165,11 +169,12 @@ impl<T, U> Receiver<T, U> {
}
}
-impl<T, U> Drop for Receiver<T, U> {
- fn drop(&mut self) {
+#[pin_project::pinned_drop]
+impl<T, U> PinnedDrop for Receiver<T, U> {
+ fn drop(mut self: Pin<&mut Self>) {
// Notify the giver about the closure first, before dropping
// the mpsc::Receiver.
- self.taker.cancel();
+ self.as_mut().taker.cancel();
}
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -706,12 +706,15 @@ impl Expiration {
}
#[cfg(feature = "runtime")]
+#[pin_project::pin_project]
struct IdleTask<T> {
+ #[pin]
interval: Interval,
pool: WeakOpt<Mutex<PoolInner<T>>>,
// This allows the IdleTask to be notified as soon as the entire
// Pool is fully dropped, and shutdown. This channel is never sent on,
// but Err(Canceled) will be received when the Pool is dropped.
+ #[pin]
pool_drop_notifier: oneshot::Receiver<crate::common::Never>,
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -719,9 +722,11 @@ struct IdleTask<T> {
impl<T: Poolable + 'static> Future for IdleTask<T> {
type Output = ();
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ use tokio::stream::Stream;
+ let mut this = self.project();
loop {
- match Pin::new(&mut self.pool_drop_notifier).poll(cx) {
+ match this.pool_drop_notifier.as_mut().poll(cx) {
Poll::Ready(Ok(n)) => match n {},
Poll::Pending => (),
Poll::Ready(Err(_canceled)) => {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -730,9 +735,9 @@ impl<T: Poolable + 'static> Future for IdleTask<T> {
}
}
- ready!(self.interval.poll_tick(cx));
+ ready!(this.interval.as_mut().poll_next(cx));
- if let Some(inner) = self.pool.upgrade() {
+ if let Some(inner) = this.pool.upgrade() {
if let Ok(mut inner) = inner.lock() {
trace!("idle interval checking for expired");
inner.clear_expired();
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -1,20 +1,13 @@
use std::mem;
use pin_project::pin_project;
+use tokio::stream::Stream;
use tokio::sync::{mpsc, watch};
use super::{task, Future, Never, Pin, Poll};
-// Sentinel value signaling that the watch is still open
-#[derive(Clone, Copy)]
-enum Action {
- Open,
- // Closed isn't sent via the `Action` type, but rather once
- // the watch::Sender is dropped.
-}
-
pub fn channel() -> (Signal, Watch) {
- let (tx, rx) = watch::channel(Action::Open);
+ let (tx, rx) = watch::channel(());
let (drained_tx, drained_rx) = mpsc::channel(1);
(
Signal {
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -27,17 +20,19 @@ pub fn channel() -> (Signal, Watch) {
pub struct Signal {
drained_rx: mpsc::Receiver<Never>,
- _tx: watch::Sender<Action>,
+ _tx: watch::Sender<()>,
}
+#[pin_project::pin_project]
pub struct Draining {
+ #[pin]
drained_rx: mpsc::Receiver<Never>,
}
#[derive(Clone)]
pub struct Watch {
drained_tx: mpsc::Sender<Never>,
- rx: watch::Receiver<Action>,
+ rx: watch::Receiver<()>,
}
#[allow(missing_debug_implementations)]
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -46,7 +41,8 @@ pub struct Watching<F, FN> {
#[pin]
future: F,
state: State<FN>,
- watch: Watch,
+ watch: Pin<Box<dyn Future<Output = ()> + Send + Sync>>,
+ _drained_tx: mpsc::Sender<Never>,
}
enum State<F> {
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -66,8 +62,8 @@ impl Signal {
impl Future for Draining {
type Output = ();
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match ready!(self.drained_rx.poll_recv(cx)) {
+ fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ match ready!(self.project().drained_rx.poll_next(cx)) {
Some(never) => match never {},
None => Poll::Ready(()),
}
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -80,10 +76,14 @@ impl Watch {
F: Future,
FN: FnOnce(Pin<&mut F>),
{
+ let Self { drained_tx, mut rx } = self;
Watching {
future,
state: State::Watch(on_drain),
- watch: self,
+ watch: Box::pin(async move {
+ let _ = rx.changed().await;
+ }),
+ _drained_tx: drained_tx,
}
}
}
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -100,12 +100,12 @@ where
loop {
match mem::replace(me.state, State::Draining) {
State::Watch(on_drain) => {
- match me.watch.rx.poll_recv_ref(cx) {
- Poll::Ready(None) => {
+ match Pin::new(&mut me.watch).poll(cx) {
+ Poll::Ready(()) => {
// Drain has been triggered!
on_drain(me.future.as_mut());
}
- Poll::Ready(Some(_ /*State::Open*/)) | Poll::Pending => {
+ Poll::Pending => {
*me.state = State::Watch(on_drain);
return me.future.poll(cx);
}
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -2,7 +2,7 @@ use std::marker::Unpin;
use std::{cmp, io};
use bytes::{Buf, Bytes};
-use tokio::io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use crate::common::{task, Pin, Poll};
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -37,36 +37,33 @@ impl<T> Rewind<T> {
(self.inner, self.pre.unwrap_or_else(Bytes::new))
}
- pub(crate) fn get_mut(&mut self) -> &mut T {
- &mut self.inner
- }
+ // pub(crate) fn get_mut(&mut self) -> &mut T {
+ // &mut self.inner
+ // }
}
impl<T> AsyncRead for Rewind<T>
where
T: AsyncRead + Unpin,
{
- #[inline]
- unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
- self.inner.prepare_uninitialized_buffer(buf)
- }
-
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
- buf: &mut [u8],
- ) -> Poll<io::Result<usize>> {
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
if let Some(mut prefix) = self.pre.take() {
// If there are no remaining bytes, let the bytes get dropped.
if !prefix.is_empty() {
- let copy_len = cmp::min(prefix.len(), buf.len());
- prefix.copy_to_slice(&mut buf[..copy_len]);
+ let copy_len = cmp::min(prefix.len(), buf.remaining());
+ // TODO: There should be a way to do following two lines cleaner...
+ buf.put_slice(&prefix[..copy_len]);
+ prefix.advance(copy_len);
// Put back whats left
if !prefix.is_empty() {
self.pre = Some(prefix);
}
- return Poll::Ready(Ok(copy_len));
+ return Poll::Ready(Ok(()));
}
}
Pin::new(&mut self.inner).poll_read(cx, buf)
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -27,7 +27,7 @@ pub(crate) trait Dispatch {
type PollError;
type RecvItem;
fn poll_msg(
- &mut self,
+ self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>>;
fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -40,8 +40,10 @@ pub struct Server<S: HttpService<B>, B> {
pub(crate) service: S,
}
+#[pin_project::pin_project]
pub struct Client<B> {
callback: Option<crate::client::dispatch::Callback<Request<B>, Response<Body>>>,
+ #[pin]
rx: ClientRx<B>,
rx_closed: bool,
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -281,7 +283,7 @@ where
&& self.conn.can_write_head()
&& self.dispatch.should_poll()
{
- if let Some(msg) = ready!(self.dispatch.poll_msg(cx)) {
+ if let Some(msg) = ready!(Pin::new(&mut self.dispatch).poll_msg(cx)) {
let (head, mut body) = msg.map_err(crate::Error::new_user_service)?;
// Check if the body knows its full data immediately.
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -469,10 +471,11 @@ where
type RecvItem = RequestHead;
fn poll_msg(
- &mut self,
+ mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Self::PollError>>> {
- let ret = if let Some(ref mut fut) = self.in_flight.as_mut().as_pin_mut() {
+ let mut this = self.as_mut();
+ let ret = if let Some(ref mut fut) = this.in_flight.as_mut().as_pin_mut() {
let resp = ready!(fut.as_mut().poll(cx)?);
let (parts, body) = resp.into_parts();
let head = MessageHead {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -486,7 +489,7 @@ where
};
// Since in_flight finished, remove it
- self.in_flight.set(None);
+ this.in_flight.set(None);
ret
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -540,11 +543,12 @@ where
type RecvItem = ResponseHead;
fn poll_msg(
- &mut self,
+ self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Option<Result<(Self::PollItem, Self::PollBody), Never>>> {
- debug_assert!(!self.rx_closed);
- match self.rx.poll_next(cx) {
+ let this = self.project();
+ debug_assert!(!*this.rx_closed);
+ match this.rx.poll_next(cx) {
Poll::Ready(Some((req, mut cb))) => {
// check that future hasn't been canceled already
match cb.poll_canceled(cx) {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -559,7 +563,7 @@ where
subject: RequestLine(parts.method, parts.uri),
headers: parts.headers,
};
- self.callback = Some(cb);
+ *this.callback = Some(cb);
Poll::Ready(Some(Ok((head, body))))
}
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -567,7 +571,7 @@ where
Poll::Ready(None) => {
// user has dropped sender handle
trace!("client tx closed");
- self.rx_closed = true;
+ *this.rx_closed = true;
Poll::Ready(None)
}
Poll::Pending => Poll::Pending,
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -4,7 +4,7 @@ use std::fmt;
use std::io::{self, IoSlice};
use bytes::{Buf, BufMut, Bytes, BytesMut};
-use tokio::io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use super::{Http1Transaction, ParseContext, ParsedMessage};
use crate::common::buf::BufList;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -188,9 +188,16 @@ where
if self.read_buf_remaining_mut() < next {
self.read_buf.reserve(next);
}
- match Pin::new(&mut self.io).poll_read_buf(cx, &mut self.read_buf) {
- Poll::Ready(Ok(n)) => {
- debug!("read {} bytes", n);
+ let mut buf = ReadBuf::uninit(&mut self.read_buf.bytes_mut()[..]);
+ match Pin::new(&mut self.io).poll_read(cx, &mut buf) {
+ Poll::Ready(Ok(_)) => {
+ let n = buf.filled().len();
+ unsafe {
+ // Safety: we just read that many bytes into the
+ // uninitialized part of the buffer, so this is okay.
+ // @tokio pls give me back `poll_read_buf` thanks
+ self.read_buf.advance_mut(n);
+ }
self.read_buf_strategy.record(n);
Poll::Ready(Ok(n))
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -224,8 +231,16 @@ where
return self.poll_flush_flattened(cx);
}
loop {
- let n =
- ready!(Pin::new(&mut self.io).poll_write_buf(cx, &mut self.write_buf.auto()))?;
+ // TODO(eliza): this basically ignores all of `WriteBuf`...put
+ // back vectored IO and `poll_write_buf` when the appropriate Tokio
+ // changes land...
+ let n = ready!(Pin::new(&mut self.io)
+ // .poll_write_buf(cx, &mut self.write_buf.auto()))?;
+ .poll_write(cx, self.write_buf.auto().bytes()))?;
+ // TODO(eliza): we have to do this manually because
+ // `poll_write_buf` doesn't exist in Tokio 0.3 yet...when
+ // `poll_write_buf` comes back, the manual advance will need to leave!
+ self.write_buf.advance(n);
debug!("flushed {} bytes", n);
if self.write_buf.remaining() == 0 {
break;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -452,6 +467,7 @@ where
self.strategy = strategy;
}
+ // TODO(eliza): put back writev!
#[inline]
fn auto(&mut self) -> WriteBufAuto<'_, B> {
WriteBufAuto::new(self)
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -33,7 +33,7 @@ use std::time::Instant;
use h2::{Ping, PingPong};
#[cfg(feature = "runtime")]
-use tokio::time::{Delay, Instant};
+use tokio::time::{Instant, Sleep};
type WindowSize = u32;
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -60,7 +60,7 @@ pub(super) fn channel(ping_pong: PingPong, config: Config) -> (Recorder, Ponger)
interval,
timeout: config.keep_alive_timeout,
while_idle: config.keep_alive_while_idle,
- timer: tokio::time::delay_for(interval),
+ timer: tokio::time::sleep(interval),
state: KeepAliveState::Init,
});
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -156,7 +156,7 @@ struct KeepAlive {
while_idle: bool,
state: KeepAliveState,
- timer: Delay,
+ timer: Sleep,
}
#[cfg(feature = "runtime")]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -809,9 +809,9 @@ where
type Output = Result<Connection<I, S, E>, FE>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let me = self.project();
+ let mut me = self.project();
let service = ready!(me.future.poll(cx))?;
- let io = me.io.take().expect("polled after complete");
+ let io = Option::take(&mut me.io).expect("polled after complete");
Poll::Ready(Ok(me.protocol.serve_connection(io, service)))
}
}
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -4,7 +4,7 @@ use std::net::{SocketAddr, TcpListener as StdTcpListener};
use std::time::Duration;
use tokio::net::TcpListener;
-use tokio::time::Delay;
+use tokio::time::Sleep;
use crate::common::{task, Future, Pin, Poll};
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -19,7 +19,7 @@ pub struct AddrIncoming {
sleep_on_errors: bool,
tcp_keepalive_timeout: Option<Duration>,
tcp_nodelay: bool,
- timeout: Option<Delay>,
+ timeout: Option<Sleep>,
}
impl AddrIncoming {
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -30,6 +30,10 @@ impl AddrIncoming {
}
pub(super) fn from_std(std_listener: StdTcpListener) -> crate::Result<Self> {
+ // TcpListener::from_std doesn't set O_NONBLOCK
+ std_listener
+ .set_nonblocking(true)
+ .map_err(crate::Error::new_listen)?;
let listener = TcpListener::from_std(std_listener).map_err(crate::Error::new_listen)?;
let addr = listener.local_addr().map_err(crate::Error::new_listen)?;
Ok(AddrIncoming {
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -98,9 +102,46 @@ impl AddrIncoming {
match ready!(self.listener.poll_accept(cx)) {
Ok((socket, addr)) => {
if let Some(dur) = self.tcp_keepalive_timeout {
+ // Convert the Tokio `TcpStream` into a `socket2` socket
+ // so we can call `set_keepalive`.
+ // TODO(eliza): if Tokio's `TcpSocket` API grows a few
+ // more methods in the future, hopefully we shouldn't
+ // have to do the `from_raw_fd` dance any longer...
+ #[cfg(unix)]
+ let socket = unsafe {
+ // Safety: `socket2`'s socket will try to close the
+ // underlying fd when it's dropped. However, we
+ // can't take ownership of the fd from the tokio
+ // TcpStream, so instead we will call `into_raw_fd`
+ // on the socket2 socket before dropping it. This
+ // prevents it from trying to close the fd.
+ use std::os::unix::io::{AsRawFd, FromRawFd};
+ socket2::Socket::from_raw_fd(socket.as_raw_fd())
+ };
+ #[cfg(windows)]
+ let socket = unsafe {
+ // Safety: `socket2`'s socket will try to close the
+ // underlying SOCKET when it's dropped. However, we
+ // can't take ownership of the SOCKET from the tokio
+ // TcpStream, so instead we will call `into_raw_socket`
+ // on the socket2 socket before dropping it. This
+ // prevents it from trying to close the SOCKET.
+ use std::os::windows::io::{AsRawSocket, FromRawSocket};
+ socket2::Socket::from_raw_socket(socket.as_raw_socket())
+ };
+
+ // Actually set the TCP keepalive timeout.
if let Err(e) = socket.set_keepalive(Some(dur)) {
trace!("error trying to set TCP keepalive: {}", e);
}
+
+ // Take ownershop of the fd/socket back from the socket2
+ // `Socket`, so that socket2 doesn't try to close it
+ // when it's dropped.
+ #[cfg(unix)]
+ drop(std::os::unix::io::IntoRawFd::into_raw_fd(socket));
+ #[cfg(windows)]
+ drop(std::os::windows::io::IntoRawSocket::into_raw_socket(socket));
}
if let Err(e) = socket.set_nodelay(self.tcp_nodelay) {
trace!("error trying to set TCP nodelay: {}", e);
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -119,7 +160,7 @@ impl AddrIncoming {
error!("accept error: {}", e);
// Sleep 1s.
- let mut timeout = tokio::time::delay_for(Duration::from_secs(1));
+ let mut timeout = tokio::time::sleep(Duration::from_secs(1));
match Pin::new(&mut timeout).poll(cx) {
Poll::Ready(()) => {
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -181,19 +222,20 @@ impl fmt::Debug for AddrIncoming {
}
mod addr_stream {
- use bytes::{Buf, BufMut};
use std::io;
use std::net::SocketAddr;
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, RawFd};
- use tokio::io::{AsyncRead, AsyncWrite};
+ use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use crate::common::{task, Pin, Poll};
/// A transport returned yieled by `AddrIncoming`.
+ #[pin_project::pin_project]
#[derive(Debug)]
pub struct AddrStream {
+ #[pin]
inner: TcpStream,
pub(super) remote_addr: SocketAddr,
}
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -231,49 +273,24 @@ mod addr_stream {
}
impl AsyncRead for AddrStream {
- unsafe fn prepare_uninitialized_buffer(
- &self,
- buf: &mut [std::mem::MaybeUninit<u8>],
- ) -> bool {
- self.inner.prepare_uninitialized_buffer(buf)
- }
-
#[inline]
fn poll_read(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &mut [u8],
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.inner).poll_read(cx, buf)
- }
-
- #[inline]
- fn poll_read_buf<B: BufMut>(
- mut self: Pin<&mut Self>,
+ self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
- buf: &mut B,
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.inner).poll_read_buf(cx, buf)
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
+ self.project().inner.poll_read(cx, buf)
}
}
impl AsyncWrite for AddrStream {
#[inline]
fn poll_write(
- mut self: Pin<&mut Self>,
+ self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.inner).poll_write(cx, buf)
- }
-
- #[inline]
- fn poll_write_buf<B: Buf>(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &mut B,
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.inner).poll_write_buf(cx, buf)
+ self.project().inner.poll_write(cx, buf)
}
#[inline]
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -283,11 +300,8 @@ mod addr_stream {
}
#[inline]
- fn poll_shutdown(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<io::Result<()>> {
- Pin::new(&mut self.inner).poll_shutdown(cx)
+ fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
+ self.project().inner.poll_shutdown(cx)
}
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -12,7 +12,7 @@ use std::io;
use std::marker::Unpin;
use bytes::{Buf, Bytes};
-use tokio::io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::sync::oneshot;
use crate::common::io::Rewind;
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -105,15 +105,11 @@ impl Upgraded {
}
impl AsyncRead for Upgraded {
- unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
- self.io.prepare_uninitialized_buffer(buf)
- }
-
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
- buf: &mut [u8],
- ) -> Poll<io::Result<usize>> {
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_read(cx, buf)
}
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -127,14 +123,6 @@ impl AsyncWrite for Upgraded {
Pin::new(&mut self.io).poll_write(cx, buf)
}
- fn poll_write_buf<B: Buf>(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &mut B,
- ) -> Poll<io::Result<usize>> {
- Pin::new(self.io.get_mut()).poll_write_dyn_buf(cx, buf)
- }
-
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.io).poll_flush(cx)
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -247,15 +235,11 @@ impl dyn Io + Send {
}
impl<T: AsyncRead + Unpin> AsyncRead for ForwardsWriteBuf<T> {
- unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
- self.0.prepare_uninitialized_buffer(buf)
- }
-
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
- buf: &mut [u8],
- ) -> Poll<io::Result<usize>> {
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
Pin::new(&mut self.0).poll_read(cx, buf)
}
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -269,14 +253,6 @@ impl<T: AsyncWrite + Unpin> AsyncWrite for ForwardsWriteBuf<T> {
Pin::new(&mut self.0).poll_write(cx, buf)
}
- fn poll_write_buf<B: Buf>(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &mut B,
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.0).poll_write_buf(cx, buf)
- }
-
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.0).poll_flush(cx)
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -290,9 +266,9 @@ impl<T: AsyncRead + AsyncWrite + Unpin + 'static> Io for ForwardsWriteBuf<T> {
fn poll_write_dyn_buf(
&mut self,
cx: &mut task::Context<'_>,
- mut buf: &mut dyn Buf,
+ buf: &mut dyn Buf,
) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.0).poll_write_buf(cx, &mut buf)
+ Pin::new(&mut self.0).poll_write(cx, buf.bytes())
}
}
|
https://github.com/hyperium/h2/pull/491 is a PR for updating `h2`.
> [hyperium/h2#491](https://github.com/hyperium/h2/pull/491) is a PR for updating `h2`.
The PR just landed 🎉
As soon it is part of a new release we can proceed here 😄
|
2020-11-05T22:07:58Z
| 2,319
|
Upgrade to Tokio v0.3
Tokio v0.3 [just released](https://tokio.rs/blog/2020-10-tokio-0-3), we should upgrade as part of hyper v0.14.
Prerequisites:
- [x] Upgrade `h2` to Tokio v0.3
- [ ] Add back functionality to Tokio v0.3
- [x] Reintroduce `oneshot::Sender::poll_closed` https://github.com/tokio-rs/tokio/pull/3032
- [ ] ~~Add back TCP socket options (`set_keepalive`, `set_send_buffer_size`, `set_recv_buffer_size`)~~
- [ ] Add back vectored write support
|
hyperium__hyper-2319
|
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
--- a/.github/workflows/CI.yml
+++ b/.github/workflows/CI.yml
@@ -39,7 +39,7 @@ jobs:
- stable
- beta
- nightly
- - 1.39.0
+ - 1.45.2
os:
- ubuntu-latest
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -51,9 +52,18 @@ spmc = "0.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
-tokio = { version = "0.2.2", features = ["fs", "macros", "io-std", "rt-util", "sync", "time", "test-util"] }
-tokio-test = "0.2"
-tokio-util = { version = "0.3", features = ["codec"] }
+tokio = { version = "0.3", features = [
+ "fs",
+ "macros",
+ "io-std",
+ "rt",
+ "rt-multi-thread", # so examples can use #[tokio::main]
+ "sync",
+ "time",
+ "test-util",
+] }
+tokio-test = "0.3"
+tokio-util = { version = "0.4", features = ["codec"] }
tower-util = "0.3"
url = "1.0"
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -219,4 +229,3 @@ required-features = ["runtime", "stream"]
name = "server"
path = "tests/server.rs"
required-features = ["runtime"]
-
diff --git a/benches/connect.rs b/benches/connect.rs
--- a/benches/connect.rs
+++ b/benches/connect.rs
@@ -12,12 +12,11 @@ use tokio::net::TcpListener;
#[bench]
fn http_connector(b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
- let mut rt = tokio::runtime::Builder::new()
+ let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.expect("rt build");
- let mut listener = rt
+ let listener = rt
.block_on(TcpListener::bind(&SocketAddr::from(([127, 0, 0, 1], 0))))
.expect("bind");
let addr = listener.local_addr().expect("local_addr");
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -270,14 +270,16 @@ impl Opts {
}
fn bench(self, b: &mut test::Bencher) {
+ use std::sync::Arc;
let _ = pretty_env_logger::try_init();
// Create a runtime of current thread.
- let mut rt = tokio::runtime::Builder::new()
- .enable_all()
- .basic_scheduler()
- .build()
- .expect("rt build");
- let exec = rt.handle().clone();
+ let rt = Arc::new(
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .expect("rt build"),
+ );
+ let exec = rt.clone();
let req_len = self.request_body.map(|b| b.len()).unwrap_or(0) as u64;
let req_len = if self.request_chunks > 0 {
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -31,9 +31,8 @@ fn hello_world(b: &mut test::Bencher) {
}))
});
- let mut rt = tokio::runtime::Builder::new()
+ let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.expect("rt build");
let srv = rt.block_on(async move {
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -185,6 +184,7 @@ fn raw_tcp_throughput_large_payload(b: &mut test::Bencher) {
let mut buf = [0u8; 8192];
while rx.try_recv().is_err() {
let r = sock.read(&mut buf).unwrap();
+ extern crate test;
if r == 0 {
break;
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -711,7 +693,7 @@ mod tests {
use ::http::Uri;
use super::super::sealed::{Connect, ConnectSvc};
- use super::HttpConnector;
+ use super::{Config, ConnectError, HttpConnector};
async fn connect<C>(
connector: C,
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -773,6 +755,7 @@ mod tests {
#[tokio::test]
async fn local_address() {
use std::net::{IpAddr, TcpListener};
+ let _ = pretty_env_logger::try_init();
let (bind_ip_v4, bind_ip_v6) = get_local_ips();
let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -818,10 +801,8 @@ mod tests {
let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server4.local_addr().unwrap();
let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap();
- let mut rt = tokio::runtime::Builder::new()
- .enable_io()
- .enable_time()
- .basic_scheduler()
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .enable_all()
.build()
.unwrap();
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -925,16 +906,21 @@ mod tests {
.iter()
.map(|host| (host.clone(), addr.port()).into())
.collect();
- let connecting_tcp = ConnectingTcp::new(
- None,
- None,
- dns::IpAddrs::new(addrs),
- None,
- Some(fallback_timeout),
- false,
- );
+ let cfg = Config {
+ local_address_ipv4: None,
+ local_address_ipv6: None,
+ connect_timeout: None,
+ keep_alive_timeout: None,
+ happy_eyeballs_timeout: Some(fallback_timeout),
+ nodelay: false,
+ reuse_address: false,
+ enforce_http: false,
+ send_buffer_size: None,
+ recv_buffer_size: None,
+ };
+ let connecting_tcp = ConnectingTcp::new(dns::IpAddrs::new(addrs), &cfg);
let start = Instant::now();
- Ok::<_, io::Error>((start, connecting_tcp.connect().await?))
+ Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?))
})
.unwrap();
let res = if stream.peer_addr().unwrap().is_ipv4() {
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -262,7 +267,7 @@ mod tests {
impl<T, U> Future for Receiver<T, U> {
type Output = Option<(T, Callback<T, U>)>;
- fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.poll_next(cx)
}
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -344,9 +349,8 @@ mod tests {
fn giver_queue_throughput(b: &mut test::Bencher) {
use crate::{Body, Request, Response};
- let mut rt = tokio::runtime::Builder::new()
+ let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.unwrap();
let (mut tx, mut rx) = channel::<Request<Body>, Response<Body>>();
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -368,9 +372,8 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn giver_queue_not_ready(b: &mut test::Bencher) {
- let mut rt = tokio::runtime::Builder::new()
+ let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.unwrap();
let (_tx, mut rx) = channel::<i32, ()>();
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -850,7 +855,7 @@ mod tests {
let pooled = pool.pooled(c(key.clone()), Uniq(41));
drop(pooled);
- tokio::time::delay_for(pool.locked().timeout.unwrap()).await;
+ tokio::time::sleep(pool.locked().timeout.unwrap()).await;
let mut checkout = pool.checkout(key);
let poll_once = PollOnce(&mut checkout);
let is_not_ready = poll_once.await.is_none();
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -871,7 +876,7 @@ mod tests {
pool.locked().idle.get(&key).map(|entries| entries.len()),
Some(3)
);
- tokio::time::delay_for(pool.locked().timeout.unwrap()).await;
+ tokio::time::sleep(pool.locked().timeout.unwrap()).await;
let mut checkout = pool.checkout(key.clone());
let poll_once = PollOnce(&mut checkout);
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -92,15 +89,6 @@ where
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
-
- #[inline]
- fn poll_write_buf<B: Buf>(
- mut self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- buf: &mut B,
- ) -> Poll<io::Result<usize>> {
- Pin::new(&mut self.inner).poll_write_buf(cx, buf)
- }
}
#[cfg(test)]
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -967,9 +967,8 @@ mod tests {
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
- let mut rt = tokio::runtime::Builder::new()
+ let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.unwrap();
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -382,7 +382,7 @@ mod tests {
use super::*;
use std::pin::Pin;
use std::time::Duration;
- use tokio::io::AsyncRead;
+ use tokio::io::{AsyncRead, ReadBuf};
impl<'a> MemRead for &'a [u8] {
fn read_mem(&mut self, _: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -401,8 +401,9 @@ mod tests {
impl<'a> MemRead for &'a mut (dyn AsyncRead + Unpin) {
fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
let mut v = vec![0; len];
- let n = ready!(Pin::new(self).poll_read(cx, &mut v)?);
- Poll::Ready(Ok(Bytes::copy_from_slice(&v[..n])))
+ let mut buf = ReadBuf::new(&mut v);
+ ready!(Pin::new(self).poll_read(cx, &mut buf)?);
+ Poll::Ready(Ok(Bytes::copy_from_slice(&buf.filled())))
}
}
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -623,7 +624,7 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn bench_decode_chunked_1kb(b: &mut test::Bencher) {
- let mut rt = new_runtime();
+ let rt = new_runtime();
const LEN: usize = 1024;
let mut vec = Vec::new();
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -647,7 +648,7 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn bench_decode_length_1kb(b: &mut test::Bencher) {
- let mut rt = new_runtime();
+ let rt = new_runtime();
const LEN: usize = 1024;
let content = Bytes::from(&[0; LEN][..]);
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -665,9 +666,8 @@ mod tests {
#[cfg(feature = "nightly")]
fn new_runtime() -> tokio::runtime::Runtime {
- tokio::runtime::Builder::new()
+ tokio::runtime::Builder::new_current_thread()
.enable_all()
- .basic_scheduler()
.build()
.expect("rt build")
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -628,28 +644,31 @@ mod tests {
*/
#[tokio::test]
+ #[ignore]
async fn iobuf_write_empty_slice() {
- // First, let's just check that the Mock would normally return an
- // error on an unexpected write, even if the buffer is empty...
- let mut mock = Mock::new().build();
- futures_util::future::poll_fn(|cx| {
- Pin::new(&mut mock).poll_write_buf(cx, &mut Cursor::new(&[]))
- })
- .await
- .expect_err("should be a broken pipe");
-
- // underlying io will return the logic error upon write,
- // so we are testing that the io_buf does not trigger a write
- // when there is nothing to flush
- let mock = Mock::new().build();
- let mut io_buf = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
- io_buf.flush().await.expect("should short-circuit flush");
+ // TODO(eliza): can i have writev back pls T_T
+ // // First, let's just check that the Mock would normally return an
+ // // error on an unexpected write, even if the buffer is empty...
+ // let mut mock = Mock::new().build();
+ // futures_util::future::poll_fn(|cx| {
+ // Pin::new(&mut mock).poll_write_buf(cx, &mut Cursor::new(&[]))
+ // })
+ // .await
+ // .expect_err("should be a broken pipe");
+
+ // // underlying io will return the logic error upon write,
+ // // so we are testing that the io_buf does not trigger a write
+ // // when there is nothing to flush
+ // let mock = Mock::new().build();
+ // let mut io_buf = Buffered::<_, Cursor<Vec<u8>>>::new(mock);
+ // io_buf.flush().await.expect("should short-circuit flush");
}
#[tokio::test]
async fn parse_reads_until_blocked() {
use crate::proto::h1::ClientTransaction;
+ let _ = pretty_env_logger::try_init();
let mock = Mock::new()
// Split over multiple reads will read all of it
.read(b"HTTP/1.1 200 OK\r\n")
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -326,8 +302,8 @@ mod tests {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut task::Context<'_>,
- _buf: &mut [u8],
- ) -> Poll<io::Result<usize>> {
+ _buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
unreachable!("Mock::poll_read")
}
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -335,21 +311,23 @@ mod tests {
impl AsyncWrite for Mock {
fn poll_write(
self: Pin<&mut Self>,
- _cx: &mut task::Context<'_>,
- _buf: &[u8],
+ _: &mut task::Context<'_>,
+ buf: &[u8],
) -> Poll<io::Result<usize>> {
- panic!("poll_write shouldn't be called");
+ // panic!("poll_write shouldn't be called");
+ Poll::Ready(Ok(buf.len()))
}
- fn poll_write_buf<B: Buf>(
- self: Pin<&mut Self>,
- _cx: &mut task::Context<'_>,
- buf: &mut B,
- ) -> Poll<io::Result<usize>> {
- let n = buf.remaining();
- buf.advance(n);
- Poll::Ready(Ok(n))
- }
+ // TODO(eliza): :(
+ // fn poll_write_buf<B: Buf>(
+ // self: Pin<&mut Self>,
+ // _cx: &mut task::Context<'_>,
+ // buf: &mut B,
+ // ) -> Poll<io::Result<usize>> {
+ // let n = buf.remaining();
+ // buf.advance(n);
+ // Poll::Ready(Ok(n))
+ // }
fn poll_flush(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
unreachable!("Mock::poll_flush")
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -18,7 +18,7 @@ use futures_channel::oneshot;
use futures_core::{Future, Stream, TryFuture};
use futures_util::future::{self, FutureExt, TryFutureExt};
use tokio::net::TcpStream;
-use tokio::runtime::Runtime;
+mod support;
fn s(buf: &[u8]) -> &str {
std::str::from_utf8(buf).expect("from_utf8")
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -115,12 +115,12 @@ macro_rules! test {
#[test]
fn $name() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().expect("runtime new");
+ let rt = support::runtime();
let res = test! {
INNER;
name: $name,
- runtime: &mut rt,
+ runtime: &rt,
server:
expected: $server_expected,
reply: $server_reply,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -169,12 +169,12 @@ macro_rules! test {
#[test]
fn $name() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().expect("runtime new");
+ let rt = support::runtime();
let err: ::hyper::Error = test! {
INNER;
name: $name,
- runtime: &mut rt,
+ runtime: &rt,
server:
expected: $server_expected,
reply: $server_reply,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -963,10 +963,10 @@ mod dispatch_impl {
use futures_util::future::{FutureExt, TryFutureExt};
use futures_util::stream::StreamExt;
use http::Uri;
- use tokio::io::{AsyncRead, AsyncWrite};
+ use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
- use tokio::runtime::Runtime;
+ use super::support;
use hyper::body::HttpBody;
use hyper::client::connect::{Connected, Connection, HttpConnector};
use hyper::Client;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -978,7 +978,7 @@ mod dispatch_impl {
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (closes_tx, closes) = mpsc::channel(10);
let client = Client::builder().build(DebugConnector::with_http_and_closes(
HttpConnector::new(),
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1016,7 +1016,7 @@ mod dispatch_impl {
rt.block_on(async move {
let (res, ()) = future::join(res, rx).await;
res.unwrap();
- tokio::time::delay_for(Duration::from_secs(1)).await;
+ tokio::time::sleep(Duration::from_secs(1)).await;
});
rt.block_on(closes.into_future()).0.expect("closes");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1029,7 +1029,7 @@ mod dispatch_impl {
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (closes_tx, closes) = mpsc::channel(10);
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1075,7 +1075,7 @@ mod dispatch_impl {
rt.block_on(async move {
let (res, ()) = future::join(res, rx).await;
res.unwrap();
- tokio::time::delay_for(Duration::from_secs(1)).await;
+ tokio::time::sleep(Duration::from_secs(1)).await;
});
rt.block_on(closes.into_future()).0.expect("closes");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1113,9 +1113,7 @@ mod dispatch_impl {
// prevent this thread from closing until end of test, so the connection
// stays open and idle until Client is dropped
- Runtime::new()
- .unwrap()
- .block_on(client_drop_rx.into_future())
+ support::runtime().block_on(client_drop_rx.into_future())
});
let client = Client::builder().build(DebugConnector::with_http_and_closes(
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1147,7 +1145,7 @@ mod dispatch_impl {
drop(client);
// and wait a few ticks for the connections to close
- let t = tokio::time::delay_for(Duration::from_millis(100)).map(|_| panic!("time out"));
+ let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1195,7 +1193,7 @@ mod dispatch_impl {
future::select(res, rx1).await;
// res now dropped
- let t = tokio::time::delay_for(Duration::from_millis(100)).map(|_| panic!("time out"));
+ let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1250,7 +1248,7 @@ mod dispatch_impl {
res.unwrap();
// and wait a few ticks to see the connection drop
- let t = tokio::time::delay_for(Duration::from_millis(100)).map(|_| panic!("time out"));
+ let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1300,7 +1298,7 @@ mod dispatch_impl {
let (res, ()) = future::join(res, rx).await;
res.unwrap();
- let t = tokio::time::delay_for(Duration::from_millis(100)).map(|_| panic!("time out"));
+ let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1346,7 +1344,7 @@ mod dispatch_impl {
let (res, ()) = future::join(res, rx).await;
res.unwrap();
- let t = tokio::time::delay_for(Duration::from_millis(100)).map(|_| panic!("time out"));
+ let t = tokio::time::sleep(Duration::from_millis(100)).map(|_| panic!("time out"));
let close = closes.into_future().map(|(opt, _)| opt.expect("closes"));
future::select(t, close).await;
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1357,7 +1355,7 @@ mod dispatch_impl {
// idle connections that the Checkout would have found
let _ = pretty_env_logger::try_init();
- let _rt = Runtime::new().unwrap();
+ let _rt = support::runtime();
let connector = DebugConnector::new();
let connects = connector.connects.clone();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1379,7 +1377,7 @@ mod dispatch_impl {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let connector = DebugConnector::new();
let connects = connector.connects.clone();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1445,7 +1443,7 @@ mod dispatch_impl {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let connector = DebugConnector::new();
let connects = connector.connects.clone();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1507,7 +1505,7 @@ mod dispatch_impl {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let connector = DebugConnector::new();
let connects = connector.connects.clone();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1544,7 +1542,7 @@ mod dispatch_impl {
assert_eq!(connects.load(Ordering::Relaxed), 0);
let delayed_body = rx1
- .then(|_| tokio::time::delay_for(Duration::from_millis(200)))
+ .then(|_| tokio::time::sleep(Duration::from_millis(200)))
.map(|_| Ok::<_, ()>("hello a"))
.map_err(|_| -> hyper::Error { panic!("rx1") })
.into_stream();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1559,7 +1557,7 @@ mod dispatch_impl {
// req 1
let fut = future::join(client.request(req), rx)
- .then(|_| tokio::time::delay_for(Duration::from_millis(200)))
+ .then(|_| tokio::time::sleep(Duration::from_millis(200)))
// req 2
.then(move |()| {
let rx = rx3.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1646,7 +1644,7 @@ mod dispatch_impl {
// sleep real quick to let the threadpool put connection in ready
// state and back into client pool
- tokio::time::delay_for(Duration::from_millis(50)).await;
+ tokio::time::sleep(Duration::from_millis(50)).await;
let rx = rx2.expect("thread panicked");
let req = Request::builder()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1669,7 +1667,7 @@ mod dispatch_impl {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let connector = DebugConnector::new().proxy();
let client = Client::builder().build(connector);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1708,7 +1706,7 @@ mod dispatch_impl {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let connector = DebugConnector::new().proxy();
let client = Client::builder().build(connector);
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1750,7 +1748,7 @@ mod dispatch_impl {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let connector = DebugConnector::new();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1814,8 +1812,8 @@ mod dispatch_impl {
use tokio::net::TcpListener;
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let mut listener = rt
+ let rt = support::runtime();
+ let listener = rt
.block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))))
.unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1963,8 +1961,8 @@ mod dispatch_impl {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- buf: &mut [u8],
- ) -> Poll<Result<usize, io::Error>> {
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
Pin::new(&mut self.tcp).poll_read(cx, buf)
}
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1993,19 +1991,18 @@ mod conn {
use futures_channel::oneshot;
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
use futures_util::StreamExt;
- use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _};
+ use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _, ReadBuf};
use tokio::net::{TcpListener as TkTcpListener, TcpStream};
- use tokio::runtime::Runtime;
use hyper::client::conn;
use hyper::{self, Body, Method, Request};
- use super::{concat, s, tcp_connect, FutureHyperExt};
+ use super::{concat, s, support, tcp_connect, FutureHyperExt};
#[tokio::test]
async fn get() {
let _ = ::pretty_env_logger::try_init();
- let mut listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
.await
.unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2052,7 +2049,7 @@ mod conn {
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2090,7 +2087,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
let chunk = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
assert_eq!(chunk.len(), 5);
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2100,7 +2097,7 @@ mod conn {
let _ = ::pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (tx, rx) = oneshot::channel();
let server = thread::spawn(move || {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2127,7 +2124,7 @@ mod conn {
let (mut sender, body) = Body::channel();
let sender = thread::spawn(move || {
sender.try_send_data("hello".into()).expect("try_send_data");
- Runtime::new().unwrap().block_on(rx).unwrap();
+ support::runtime().block_on(rx).unwrap();
sender.abort();
});
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2147,7 +2144,7 @@ mod conn {
fn uri_absolute_form() {
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2185,7 +2182,7 @@ mod conn {
concat(res)
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2193,7 +2190,7 @@ mod conn {
fn http1_conn_coerces_http2_request() {
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2231,7 +2228,7 @@ mod conn {
concat(res)
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2239,7 +2236,7 @@ mod conn {
fn pipeline() {
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2283,20 +2280,18 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
rt.block_on(future::join3(res1, res2, rx).map(|r| r.0))
.unwrap();
}
#[test]
fn upgrade() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = ::pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2346,7 +2341,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
rt.block_on(future::join3(until_upgrade, res, rx).map(|r| r.0))
.unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2379,13 +2374,11 @@ mod conn {
#[test]
fn connect_method() {
- use tokio::io::{AsyncReadExt, AsyncWriteExt};
-
let _ = ::pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
+ let rt = support::runtime();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2439,7 +2432,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::sleep(Duration::from_millis(200)));
rt.block_on(future::join3(until_tunneled, res, rx).map(|r| r.0))
.unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2529,7 +2522,7 @@ mod conn {
let _ = shdn_tx.send(());
// Allow time for graceful shutdown roundtrips...
- tokio::time::delay_for(Duration::from_millis(100)).await;
+ tokio::time::sleep(Duration::from_millis(100)).await;
// After graceful shutdown roundtrips, the client should be closed...
future::poll_fn(|ctx| client.poll_ready(ctx))
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2541,7 +2534,7 @@ mod conn {
async fn http2_keep_alive_detects_unresponsive_server() {
let _ = pretty_env_logger::try_init();
- let mut listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
.await
.unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2581,7 +2574,7 @@ mod conn {
let _ = pretty_env_logger::try_init();
- let mut listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
.await
.unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2606,7 +2599,7 @@ mod conn {
});
// sleep longer than keepalive would trigger
- tokio::time::delay_for(Duration::from_secs(4)).await;
+ tokio::time::sleep(Duration::from_secs(4)).await;
future::poll_fn(|ctx| client.poll_ready(ctx))
.await
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2617,7 +2610,7 @@ mod conn {
async fn http2_keep_alive_closes_open_streams() {
let _ = pretty_env_logger::try_init();
- let mut listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
.await
.unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2667,7 +2660,7 @@ mod conn {
let _ = pretty_env_logger::try_init();
- let mut listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
+ let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))
.await
.unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2711,7 +2704,7 @@ mod conn {
let _resp = client.send_request(req1).await.expect("send_request");
// sleep longer than keepalive would trigger
- tokio::time::delay_for(Duration::from_secs(4)).await;
+ tokio::time::sleep(Duration::from_secs(4)).await;
future::poll_fn(|ctx| client.poll_ready(ctx))
.await
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2763,8 +2756,8 @@ mod conn {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- buf: &mut [u8],
- ) -> Poll<Result<usize, io::Error>> {
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
Pin::new(&mut self.tcp).poll_read(cx, buf)
}
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -18,9 +18,8 @@ use futures_util::future::{self, Either, FutureExt, TryFutureExt};
#[cfg(feature = "stream")]
use futures_util::stream::StreamExt as _;
use http::header::{HeaderName, HeaderValue};
-use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
+use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use tokio::net::{TcpListener, TcpStream as TkTcpStream};
-use tokio::runtime::Runtime;
use hyper::body::HttpBody as _;
use hyper::client::Client;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -29,6 +28,8 @@ use hyper::server::Server;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Request, Response, StatusCode, Version};
+mod support;
+
#[test]
fn get_should_ignore_body() {
let server = serve();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -788,7 +789,7 @@ fn expect_continue_but_no_body_is_ignored() {
#[tokio::test]
async fn expect_continue_waits_for_body_poll() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let child = thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -821,7 +822,7 @@ async fn expect_continue_waits_for_body_poll() {
service_fn(|req| {
assert_eq!(req.headers()["expect"], "100-continue");
// But! We're never going to poll the body!
- tokio::time::delay_for(Duration::from_millis(50)).map(move |_| {
+ tokio::time::sleep(Duration::from_millis(50)).map(move |_| {
// Move and drop the req, so we don't auto-close
drop(req);
Response::builder()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -956,7 +957,7 @@ fn http_10_request_receives_http_10_response() {
#[tokio::test]
async fn disable_keep_alive_mid_request() {
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -994,7 +995,7 @@ async fn disable_keep_alive_mid_request() {
#[tokio::test]
async fn disable_keep_alive_post_request() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1046,7 +1047,7 @@ async fn disable_keep_alive_post_request() {
#[tokio::test]
async fn empty_parse_eof_does_not_return_error() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1062,7 +1063,7 @@ async fn empty_parse_eof_does_not_return_error() {
#[tokio::test]
async fn nonempty_parse_eof_returns_error() {
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1080,7 +1081,7 @@ async fn nonempty_parse_eof_returns_error() {
#[tokio::test]
async fn http1_allow_half_close() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let t1 = thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1100,7 +1101,7 @@ async fn http1_allow_half_close() {
.serve_connection(
socket,
service_fn(|_| {
- tokio::time::delay_for(Duration::from_millis(500))
+ tokio::time::sleep(Duration::from_millis(500))
.map(|_| Ok::<_, hyper::Error>(Response::new(Body::empty())))
}),
)
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1113,7 +1114,7 @@ async fn http1_allow_half_close() {
#[tokio::test]
async fn disconnect_after_reading_request_before_responding() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1127,7 +1128,7 @@ async fn disconnect_after_reading_request_before_responding() {
.serve_connection(
socket,
service_fn(|_| {
- tokio::time::delay_for(Duration::from_secs(2)).map(
+ tokio::time::sleep(Duration::from_secs(2)).map(
|_| -> Result<Response<Body>, hyper::Error> {
panic!("response future should have been dropped");
},
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1140,7 +1141,7 @@ async fn disconnect_after_reading_request_before_responding() {
#[tokio::test]
async fn returning_1xx_response_is_error() {
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1193,7 +1194,7 @@ async fn upgrades() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx, rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1252,7 +1253,7 @@ async fn http_connect() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx, rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1308,7 +1309,7 @@ async fn upgrades_new() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (read_101_tx, read_101_rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1375,7 +1376,7 @@ async fn upgrades_new() {
#[tokio::test]
async fn upgrades_ignored() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1417,7 +1418,7 @@ async fn http_connect_new() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (read_200_tx, read_200_rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1480,7 +1481,7 @@ async fn http_connect_new() {
#[tokio::test]
async fn parse_errors_send_4xx_response() {
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1502,7 +1503,7 @@ async fn parse_errors_send_4xx_response() {
#[tokio::test]
async fn illegal_request_length_returns_400_response() {
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1538,7 +1539,7 @@ fn max_buf_size_no_panic() {
#[tokio::test]
async fn max_buf_size() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
const MAX: usize = 16_000;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1592,7 +1593,7 @@ fn http1_response_with_http2_version() {
let server = serve();
let addr_str = format!("http://{}", server.addr());
- let mut rt = Runtime::new().expect("runtime new");
+ let rt = support::runtime();
server.reply().version(hyper::Version::HTTP_2);
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1609,7 +1610,7 @@ fn try_h2() {
let server = serve();
let addr_str = format!("http://{}", server.addr());
- let mut rt = Runtime::new().expect("runtime new");
+ let rt = support::runtime();
rt.block_on({
let client = Client::builder()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1629,7 +1630,7 @@ fn http1_only() {
let server = serve_opts().http1_only().serve();
let addr_str = format!("http://{}", server.addr());
- let mut rt = Runtime::new().expect("runtime new");
+ let rt = support::runtime();
rt.block_on({
let client = Client::builder()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1684,7 +1685,7 @@ fn http2_body_user_error_sends_reset_reason() {
server.reply().body_stream(b);
- let mut rt = Runtime::new().expect("runtime new");
+ let rt = support::runtime();
let err: hyper::Error = rt
.block_on(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1823,7 +1824,7 @@ fn skips_content_length_and_body_for_304_responses() {
async fn http2_keep_alive_detects_unresponsive_client() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
// Spawn a "client" conn that only reads until EOF
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1871,7 +1872,7 @@ async fn http2_keep_alive_detects_unresponsive_client() {
async fn http2_keep_alive_with_responsive_client() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1897,7 +1898,7 @@ async fn http2_keep_alive_with_responsive_client() {
conn.await.expect("client conn");
});
- tokio::time::delay_for(Duration::from_secs(4)).await;
+ tokio::time::sleep(Duration::from_secs(4)).await;
let req = http::Request::new(hyper::Body::empty());
client.send_request(req).await.expect("client.send_request");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1938,7 +1939,7 @@ async fn write_pong_frame(conn: &mut TkTcpStream) {
async fn http2_keep_alive_count_server_pings() {
let _ = pretty_env_logger::try_init();
- let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2294,38 +2295,32 @@ impl ServeOptions {
let thread = thread::Builder::new()
.name(thread_name)
.spawn(move || {
- let mut rt = tokio::runtime::Builder::new()
- .enable_io()
- .enable_time()
- .basic_scheduler()
- .build()
- .expect("rt new");
-
- rt.block_on(async move {
- let service = make_service_fn(|_| {
- let msg_tx = msg_tx.clone();
- let reply_rx = reply_rx.clone();
- future::ok::<_, BoxError>(TestService {
- tx: msg_tx,
- reply: reply_rx,
- })
- });
-
- let server = Server::bind(&addr)
- .http1_only(options.http1_only)
- .http1_keepalive(options.keep_alive)
- .http1_pipeline_flush(options.pipeline)
- .serve(service);
-
- addr_tx.send(server.local_addr()).expect("server addr tx");
-
- server
- .with_graceful_shutdown(async {
- let _ = shutdown_rx.await;
- })
- .await
- })
- .expect("serve()");
+ support::runtime()
+ .block_on(async move {
+ let service = make_service_fn(|_| {
+ let msg_tx = msg_tx.clone();
+ let reply_rx = reply_rx.clone();
+ future::ok::<_, BoxError>(TestService {
+ tx: msg_tx,
+ reply: reply_rx,
+ })
+ });
+
+ let server = Server::bind(&addr)
+ .http1_only(options.http1_only)
+ .http1_keepalive(options.keep_alive)
+ .http1_pipeline_flush(options.pipeline)
+ .serve(service);
+
+ addr_tx.send(server.local_addr()).expect("server addr tx");
+
+ server
+ .with_graceful_shutdown(async {
+ let _ = shutdown_rx.await;
+ })
+ .await
+ })
+ .expect("serve()");
})
.expect("thread spawn");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2353,6 +2348,7 @@ fn has_header(msg: &str, name: &str) -> bool {
fn tcp_bind(addr: &SocketAddr) -> ::tokio::io::Result<TcpListener> {
let std_listener = StdTcpListener::bind(addr).unwrap();
+ std_listener.set_nonblocking(true).unwrap();
TcpListener::from_std(std_listener)
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2429,8 +2425,8 @@ impl<T: AsyncRead + Unpin, D: Unpin> AsyncRead for DebugStream<T, D> {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
- buf: &mut [u8],
- ) -> Poll<io::Result<usize>> {
+ buf: &mut ReadBuf<'_>,
+ ) -> Poll<io::Result<()>> {
Pin::new(&mut self.stream).poll_read(cx, buf)
}
}
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -1,3 +1,4 @@
+#![allow(dead_code)]
use std::future::Future;
use std::pin::Pin;
use std::sync::{
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -15,6 +16,7 @@ pub use futures_util::{
pub use hyper::{HeaderMap, StatusCode};
pub use std::net::SocketAddr;
+#[allow(unused_macros)]
macro_rules! t {
(
$name:ident,
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -303,15 +305,16 @@ pub struct __TestConfig {
pub proxy: bool,
}
-pub fn __run_test(cfg: __TestConfig) {
- let _ = pretty_env_logger::try_init();
- tokio::runtime::Builder::new()
- .enable_io()
- .enable_time()
- .basic_scheduler()
+pub fn runtime() -> tokio::runtime::Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
.build()
.expect("new rt")
- .block_on(async_test(cfg));
+}
+
+pub fn __run_test(cfg: __TestConfig) {
+ let _ = pretty_env_logger::try_init();
+ runtime().block_on(async_test(cfg));
}
async fn async_test(cfg: __TestConfig) {
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"2310"
] |
0.13
|
fb19f3a86997af1c8a31a7d5ce6f2b018c9b5a0d
|
diff --git a/src/proto/h2/ping.rs b/src/proto/h2/ping.rs
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -442,7 +442,16 @@ impl KeepAlive {
let interval = shared.last_read_at() + self.interval;
self.timer.reset(interval);
}
- KeepAliveState::Scheduled | KeepAliveState::PingSent => (),
+ KeepAliveState::PingSent => {
+ if shared.is_ping_sent() {
+ return;
+ }
+
+ self.state = KeepAliveState::Scheduled;
+ let interval = shared.last_read_at() + self.interval;
+ self.timer.reset(interval);
+ }
+ KeepAliveState::Scheduled => (),
}
}
|
Thanks for the nice write-up! It does seem like there's a bug in here. Would be good to get this fixed, probably writing a test for this might get trickier...
Hi @seanmonstar ,
Can you help me to see if I'm on the right track here?
To test the fix I suggested above, we could write a test like below.
Basically we expect the client to receive 3 pings in less than 5 seconds.
We reply with pong (ping + ack) to every ping.
Thanks in advance!
```rust
fn is_ping_frame(buf: &[u8]) -> bool {
buf[3] == 6
}
fn assert_ping_frame(buf: &[u8], len: usize) {
// Assert the StreamId is zero
let mut ubuf = [0; 4];
ubuf.copy_from_slice(&buf[5..9]);
let unpacked = u32::from_be_bytes(ubuf);
assert_eq!(unpacked & !(1 << 31), 0);
// Assert ACK flag is unset (only set for PONG).
let flags = buf[4];
assert_eq!(flags & 0x1, 0);
// Assert total frame size
assert_eq!(len, 17);
}
async fn write_pong_frame(conn: &mut TkTcpStream) {
conn.write_all(&[
0, 0, 8, // len
6, // kind
0x1, // flag
0, 0, 0, 0, // stream id
0x3b, 0x7c, 0xdb, 0x7a, 0x0b, 0x87, 0x16, 0xb4, // payload
])
.await
.expect("client pong");
}
#[tokio::test]
async fn http2_keep_alive_count_server_pings() {
let _ = pretty_env_logger::try_init();
let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
let (socket, _) = listener.accept().await.expect("accept");
Http::new()
.http2_only(true)
.http2_keep_alive_interval(Duration::from_secs(1))
.http2_keep_alive_timeout(Duration::from_secs(1))
.serve_connection(socket, unreachable_service())
.await
.expect("serve_connection");
});
// Spawn a "client" conn that only reads until EOF
let mut conn = connect_async(addr).await;
// write h2 magic preface and settings frame
conn.write_all(b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")
.await
.expect("client preface");
conn.write_all(&[
0, 0, 0, // len
4, // kind
0, // flag
0, 0, 0, 0, // stream id
])
.await
.expect("client settings");
let read_pings = async {
// read until 3 pings are received
let mut pings = 0;
let mut buf = [0u8; 1024];
while pings < 3 {
let n = conn.read(&mut buf).await.expect("client.read");
assert!(n != 0);
if is_ping_frame(&buf) {
assert_ping_frame(&buf, n);
write_pong_frame(&mut conn).await;
pings += 1;
}
}
};
// Expect all pings to occurs under 5 seconds
tokio::time::timeout(Duration::from_secs(5), read_pings)
.await
.expect("timed out waiting for pings");
}
```
Yea that looks right to me, does it fail, and then with your suggested patch, succeed?
Indeed, just fixed the use of `tokio::time::timeout` to make sure it fails on regression.
I'll submit a PR if agreed.
Thanks!
Yep, let's do it!
|
2020-10-29T15:13:29Z
| 2,315
|
Only one HTTP2 Ping is sent with keep alive enabled
I believe there is a bug in the HTTP2 keep alive implementation, more concretely in `src/proto/h2/ping.rs`.
This issue was originally detected in https://github.com/hyperium/tonic/issues/474, credits to @alce.
It seems that when enabling HTTP2 server keep alive, only the first Ping is sent after the configured interval.
Afterwards, no more Pings seem to be sent.
## How to reproduce
I'm applying the following changes to the `echo` example:
```diff
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -14,7 +14,10 @@ async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Er>
))),
// Simply echo the body back to the client.
- (&Method::POST, "/echo") => Ok(Response::new(req.into_body())),
+ (&Method::POST, "/echo") => {
+ tokio::time::delay_for(std::time::Duration::from_secs(60)).await;
+ Ok(Response::new(req.into_body()))
+ },
// Convert to uppercase before sending back to client using a stream.
(&Method::POST, "/echo/uppercase") => {
@@ -55,7 +58,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>
let service = make_service_fn(|_| async { Ok::<_, hyper::Error>(service_fn(echo)) }>
- let server = Server::bind(&addr).serve(service);
+ let server = Server::bind(&addr)
+ .http2_keep_alive_interval(std::time::Duration::from_secs(10))
+ .http2_keep_alive_timeout(std::time::Duration::from_secs(5))
+ .serve(service);
println!("Listening on http://{}", addr);
```
Running the example with `cargo run --example echo`, sniffing with `tcpdump -i lo port 3000`, and making a request with `curl -v --http2-prior-knowledge http://localhost:3000/echo --data "foo"`, I get the following:
```tcpdump
20:03:21.621105 IP localhost.hbci > localhost.46506: Flags [P.], seq 22:44, ack 160, win 512, options [nop,nop,TS val 2305866698 ecr 2305866696], length 22
20:03:21.621120 IP localhost.46506 > localhost.hbci: Flags [.], ack 44, win 512, options [nop,nop,TS val 2305866698 ecr 2305866698], length 0
20:03:31.623825 IP localhost.hbci > localhost.46506: Flags [P.], seq 44:61, ack 160, win 512, options [nop,nop,TS val 2305876700 ecr 2305866698], length 17
20:03:31.623854 IP localhost.46506 > localhost.hbci: Flags [.], ack 61, win 512, options [nop,nop,TS val 2305876700 ecr 2305876700], length 0
20:03:31.623926 IP localhost.46506 > localhost.hbci: Flags [P.], seq 160:177, ack 61, win 512, options [nop,nop,TS val 2305876701 ecr 2305876700], length 17
20:03:31.623948 IP localhost.hbci > localhost.46506: Flags [.], ack 177, win 512, options [nop,nop,TS val 2305876701 ecr 2305876701], length 0
20:04:21.624549 IP localhost.hbci > localhost.46506: Flags [P.], seq 61:111, ack 177, win 512, options [nop,nop,TS val 2305926701 ecr 2305876701], length 50
20:04:21.624593 IP localhost.46506 > localhost.hbci: Flags [.], ack 111, win 512, options [nop,nop,TS val 2305926701 ecr 2305926701], length 0
20:04:21.625177 IP localhost.46506 > localhost.hbci: Flags [F.], seq 177, ack 111, win 512, options [nop,nop,TS val 2305926702 ecr 2305926701], length 0
20:04:21.625825 IP localhost.hbci > localhost.46506: Flags [F.], seq 111, ack 178, win 512, options [nop,nop,TS val 2305926702 ecr 2305926702], length 0
20:04:21.625858 IP localhost.46506 > localhost.hbci: Flags [.], ack 112, win 512, options [nop,nop,TS val 2305926702 ecr 2305926702], length 0
```
The **first batch** of packets belongs to the start of the request, the **second batch** is the first Ping+Pong, the **third batch** is the end of the request. As you can see the keep alive interval of 10seconds separates the first two batches. However no more pings occur between the second and last batch, as I would expect.
After taking a look at `src/proto/h2/ping.rs` and debugging a bit, it seems that once `KeepAlive` enters the state `KeepAliveState::PingSent` it does not go back to `KeepAliveState::Scheduled`, even after a Pong is received.
A possible fix would be to check if `ping_sent_at` has been cleared when the `KeepAlive::scheduled()` method is called:
```diff
--- a/src/proto/h2/ping.rs
+++ b/src/proto/h2/ping.rs
@@ -442,7 +442,16 @@ impl KeepAlive {
let interval = shared.last_read_at() + self.interval;
self.timer.reset(interval);
}
- KeepAliveState::Scheduled | KeepAliveState::PingSent => (),
+ KeepAliveState::PingSent => {
+ if shared.is_ping_sent() {
+ return;
+ }
+
+ self.state = KeepAliveState::Scheduled;
+ let interval = shared.last_read_at() + self.interval;
+ self.timer.reset(interval);
+ }
+ KeepAliveState::Scheduled => (),
}
}
```
I'm not sure if this was the intended behavior of the original implementation.
I'll be happy to submit a PR if agreed.
Thanks in advance and keep up the good work!
|
hyperium__hyper-2315
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1903,6 +1903,94 @@ async fn http2_keep_alive_with_responsive_client() {
client.send_request(req).await.expect("client.send_request");
}
+fn is_ping_frame(buf: &[u8]) -> bool {
+ buf[3] == 6
+}
+
+fn assert_ping_frame(buf: &[u8], len: usize) {
+ // Assert the StreamId is zero
+ let mut ubuf = [0; 4];
+ ubuf.copy_from_slice(&buf[5..9]);
+ let unpacked = u32::from_be_bytes(ubuf);
+ assert_eq!(unpacked & !(1 << 31), 0);
+
+ // Assert ACK flag is unset (only set for PONG).
+ let flags = buf[4];
+ assert_eq!(flags & 0x1, 0);
+
+ // Assert total frame size
+ assert_eq!(len, 17);
+}
+
+async fn write_pong_frame(conn: &mut TkTcpStream) {
+ conn.write_all(&[
+ 0, 0, 8, // len
+ 6, // kind
+ 0x1, // flag
+ 0, 0, 0, 0, // stream id
+ 0x3b, 0x7c, 0xdb, 0x7a, 0x0b, 0x87, 0x16, 0xb4, // payload
+ ])
+ .await
+ .expect("client pong");
+}
+
+#[tokio::test]
+async fn http2_keep_alive_count_server_pings() {
+ let _ = pretty_env_logger::try_init();
+
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ tokio::spawn(async move {
+ let (socket, _) = listener.accept().await.expect("accept");
+
+ Http::new()
+ .http2_only(true)
+ .http2_keep_alive_interval(Duration::from_secs(1))
+ .http2_keep_alive_timeout(Duration::from_secs(1))
+ .serve_connection(socket, unreachable_service())
+ .await
+ .expect("serve_connection");
+ });
+
+ // Spawn a "client" conn that only reads until EOF
+ let mut conn = connect_async(addr).await;
+
+ // write h2 magic preface and settings frame
+ conn.write_all(b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n")
+ .await
+ .expect("client preface");
+ conn.write_all(&[
+ 0, 0, 0, // len
+ 4, // kind
+ 0, // flag
+ 0, 0, 0, 0, // stream id
+ ])
+ .await
+ .expect("client settings");
+
+ let read_pings = async {
+ // read until 3 pings are received
+ let mut pings = 0;
+ let mut buf = [0u8; 1024];
+ while pings < 3 {
+ let n = conn.read(&mut buf).await.expect("client.read");
+ assert!(n != 0);
+
+ if is_ping_frame(&buf) {
+ assert_ping_frame(&buf, n);
+ write_pong_frame(&mut conn).await;
+ pings += 1;
+ }
+ }
+ };
+
+ // Expect all pings to occurs under 5 seconds
+ tokio::time::timeout(Duration::from_secs(5), read_pings)
+ .await
+ .expect("timed out waiting for pings");
+}
+
// -------------------------------------------------
// the Server that is used to run all the tests with
// -------------------------------------------------
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"2263"
] |
0.13
|
3de81c822e6ac5a5b0640059f53838d0906f68c4
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -12,6 +12,7 @@ use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext
use crate::common::{task, Pin, Poll, Unpin};
use crate::headers::connection_keep_alive;
use crate::proto::{BodyLength, DecodedLength, MessageHead};
+use crate::Result;
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -584,7 +585,7 @@ where
self.state.writing = state;
}
- pub fn end_body(&mut self) {
+ pub fn end_body(&mut self) -> Result<()> {
debug_assert!(self.can_write_body());
let state = match self.state.writing {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -601,13 +602,18 @@ where
Writing::KeepAlive
}
}
- Err(_not_eof) => Writing::Closed,
+ Err(_not_eof) => {
+ return Err(crate::Error::new_user_body(
+ crate::Error::new_body_write_aborted(),
+ ))
+ }
}
}
- _ => return,
+ _ => return Ok(()),
};
self.state.writing = state;
+ Ok(())
}
// When we get a parse error, depending on what side we are, we might be able
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -338,7 +338,7 @@ where
*clear_body = true;
if chunk.remaining() == 0 {
trace!("discarding empty chunk");
- self.conn.end_body();
+ self.conn.end_body()?;
} else {
self.conn.write_body_and_end(chunk);
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -351,7 +351,7 @@ where
}
} else {
*clear_body = true;
- self.conn.end_body();
+ self.conn.end_body()?;
}
} else {
return Poll::Pending;
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -87,7 +87,8 @@ impl Encoder {
Kind::Chunked => Ok(Some(EncodedBuf {
kind: BufKind::ChunkedEnd(b"0\r\n\r\n"),
})),
- _ => Err(NotEof),
+ Kind::CloseDelimited => Ok(None),
+ Kind::Length(_) => Err(NotEof),
}
}
|
Currently the writing state just gets closed, [here](https://github.com/hyperium/hyper/blob/3de81c822e6ac5a5b0640059f53838d0906f68c4/src/proto/h1/conn.rs#L604). Instead, `end_body` should probably be changed to return a `crate::Result<()>`, and that case should return an error that will shutdown the connection.
|
2020-08-10T23:08:55Z
| 2,264
|
Sending a streamed non-chunked request will hang when body sender is dropped early
When sending a streaming request (using `Body::channel`) without content-length, the request terminates when dropping the `Sender`. But when a content-length is set, dropping the `Sender` before all bytes are sent will result in a request being stuck.
```rust
use hyper::{self, Body};
#[tokio::main]
async fn main() {
let (tx, body) = Body::channel();
let req = hyper::Request::builder()
.uri("http://httpbin.org/post")
.method(hyper::Method::POST)
.header("content-length", 1337)
.body(body)
.unwrap();
let client = hyper::Client::new();
std::mem::drop(tx);
println!("Sending request...");
let res = client.request(req).await.unwrap();
println!("Response: {:?}", res);
}
```
The example above will terminate when the content-length header is removed, but like this it hangs until the server closes the connection.
On the other hand, when explicitly aborting the sender using `tx.abort()`, the request will terminate with an error.
|
hyperium__hyper-2264
|
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -405,7 +406,7 @@ mod tests {
assert_eq!(dst, b"foo bar");
assert!(!encoder.is_eof());
- encoder.end::<()>().unwrap_err();
+ encoder.end::<()>().unwrap();
let msg2 = b"baz".as_ref();
let buf2 = encoder.encode(msg2);
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -413,6 +414,6 @@ mod tests {
assert_eq!(dst, b"foo barbaz");
assert!(!encoder.is_eof());
- encoder.end::<()>().unwrap_err();
+ encoder.end::<()>().unwrap();
}
}
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"2215"
] |
0.14
|
f162ca2f2fd14681e11dd8b9ba8d1469b2b9271b
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -562,13 +562,13 @@ impl Http1Transaction for Server {
}
}
None | Some(BodyLength::Known(0)) => {
- if msg.head.subject != StatusCode::NOT_MODIFIED {
+ if Server::can_have_content_length(msg.req_method, msg.head.subject) {
extend(dst, b"content-length: 0\r\n");
}
Encoder::length(0)
}
Some(BodyLength::Known(len)) => {
- if msg.head.subject == StatusCode::NOT_MODIFIED {
+ if !Server::can_have_content_length(msg.req_method, msg.head.subject) {
Encoder::length(0)
} else {
extend(dst, b"content-length: ");
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -638,13 +638,22 @@ impl Server {
if method == &Some(Method::HEAD) || method == &Some(Method::CONNECT) && status.is_success()
{
false
+ } else if status.is_informational() {
+ false
+ } else {
+ match status {
+ StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false,
+ _ => true,
+ }
+ }
+ }
+
+ fn can_have_content_length(method: &Option<Method>, status: StatusCode) -> bool {
+ if status.is_informational() || method == &Some(Method::CONNECT) && status.is_success() {
+ false
} else {
match status {
- // TODO: support for 1xx codes needs improvement everywhere
- // would be 100...199 => false
- StatusCode::SWITCHING_PROTOCOLS
- | StatusCode::NO_CONTENT
- | StatusCode::NOT_MODIFIED => false,
+ StatusCode::NO_CONTENT | StatusCode::NOT_MODIFIED => false,
_ => true,
}
}
|
2020-06-03T09:57:53Z
| 2,216
|
Hyper should skip automatic Content-Length header for HTTP 1xx responses
Similar to issue #1797 hyper should not set a content-length header for a number of different scenario's. Quoting [RFC 7230, section 3.3.2](https://tools.ietf.org/html/rfc7230#section-3.3.2) 6th paragraph:
> A server MUST NOT send a Content-Length header field in any response
> with a status code of 1xx (Informational) or 204 (No Content). A
> server MUST NOT send a Content-Length header field in any 2xx
> (Successful) response to a CONNECT request (Section 4.3.6 of
> [RFC7231]).
At this moment the content-length header is set for 101 Switch Protocols responses. The resulting issue is that e.g. with a warp WebSocket server, a .Net client will drop the connection after 100 seconds: https://stackoverflow.com/a/44553521/85514 .
I believe the solution should be similar to [b342c38f], and I'm willing to make a PR to do that. However, I'm not familiar enough with the code base to see if that would fix the problem / has any other implications.
|
hyperium__hyper-2216
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1330,8 +1330,9 @@ async fn upgrades_new() {
let mut buf = [0; 256];
tcp.read(&mut buf).expect("read 1");
- let expected = "HTTP/1.1 101 Switching Protocols\r\n";
- assert_eq!(s(&buf[..expected.len()]), expected);
+ let response = s(&buf);
+ assert!(response.starts_with("HTTP/1.1 101 Switching Protocols\r\n"));
+ assert!(!has_header(&response, "content-length"));
let _ = read_101_tx.send(());
let n = tcp.read(&mut buf).expect("read 2");
|
hyperium/hyper
|
a24f0c0af8e1f4c6b7cc3a47c83eb6e4af88aca6
|
|
[
"2188"
] |
0.13
|
203621e3be6b0dab6b79d1914482c556075a19ff
|
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -22,17 +22,14 @@ pub use self::aggregate::aggregate;
pub use self::body::{Body, Sender};
pub use self::to_bytes::to_bytes;
-pub(crate) use self::payload::Payload;
-
mod aggregate;
mod body;
-mod payload;
mod to_bytes;
/// An optimization to try to take a full body if immediately available.
///
/// This is currently limited to *only* `hyper::Body`s.
-pub(crate) fn take_full_data<T: Payload + 'static>(body: &mut T) -> Option<T::Data> {
+pub(crate) fn take_full_data<T: HttpBody + 'static>(body: &mut T) -> Option<T::Data> {
use std::any::{Any, TypeId};
// This static type check can be optimized at compile-time.
diff --git a/src/body/payload.rs /dev/null
--- a/src/body/payload.rs
+++ /dev/null
@@ -1,139 +0,0 @@
-use std::error::Error as StdError;
-
-use bytes::Buf;
-use http::HeaderMap;
-
-use crate::common::{task, Pin, Poll};
-use http_body::{Body as HttpBody, SizeHint};
-
-/// This trait represents a streaming body of a `Request` or `Response`.
-///
-/// The built-in implementation of this trait is [`Body`](::Body), in case you
-/// don't need to customize a send stream for your own application.
-pub trait Payload: sealed::Sealed + Send + 'static {
- /// A buffer of bytes representing a single chunk of a body.
- type Data: Buf + Send;
-
- /// The error type of this stream.
- type Error: Into<Box<dyn StdError + Send + Sync>>;
-
- /// Poll for a `Data` buffer.
- ///
- /// Similar to `Stream::poll_next`, this yields `Some(Data)` until
- /// the body ends, when it yields `None`.
- fn poll_data(
- self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<Result<Self::Data, Self::Error>>>;
-
- /// Poll for an optional **single** `HeaderMap` of trailers.
- ///
- /// This should **only** be called after `poll_data` has ended.
- ///
- /// Note: Trailers aren't currently used for HTTP/1, only for HTTP/2.
- fn poll_trailers(
- self: Pin<&mut Self>,
- _cx: &mut task::Context<'_>,
- ) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
- Poll::Ready(Ok(None))
- }
-
- /// A hint that the `Body` is complete, and doesn't need to be polled more.
- ///
- /// This can be useful to determine if the there is any body or trailers
- /// without having to poll. An empty `Body` could return `true` and hyper
- /// would be able to know that only the headers need to be sent. Or, it can
- /// also be checked after each `poll_data` call, to allow hyper to try to
- /// end the underlying stream with the last chunk, instead of needing to
- /// send an extra `DATA` frame just to mark the stream as finished.
- ///
- /// As a hint, it is used to try to optimize, and thus is OK for a default
- /// implementation to return `false`.
- fn is_end_stream(&self) -> bool {
- false
- }
-
- /// Returns a `SizeHint` providing an upper and lower bound on the possible size.
- ///
- /// If there is an exact size of bytes known, this would allow hyper to
- /// send a `Content-Length` header automatically, not needing to fall back to
- /// `TransferEncoding: chunked`.
- ///
- /// This does not need to be kept updated after polls, it will only be
- /// called once to create the headers.
- fn size_hint(&self) -> SizeHint {
- SizeHint::default()
- }
-}
-
-impl<T> Payload for T
-where
- T: HttpBody + Send + 'static,
- T::Data: Send,
- T::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
- type Data = T::Data;
- type Error = T::Error;
-
- fn poll_data(
- self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Option<Result<Self::Data, Self::Error>>> {
- HttpBody::poll_data(self, cx)
- }
-
- fn poll_trailers(
- self: Pin<&mut Self>,
- cx: &mut task::Context<'_>,
- ) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
- HttpBody::poll_trailers(self, cx)
- }
-
- fn is_end_stream(&self) -> bool {
- HttpBody::is_end_stream(self)
- }
-
- fn size_hint(&self) -> SizeHint {
- HttpBody::size_hint(self)
- }
-}
-
-impl<T> sealed::Sealed for T
-where
- T: HttpBody + Send + 'static,
- T::Data: Send,
- T::Error: Into<Box<dyn StdError + Send + Sync>>,
-{
-}
-
-mod sealed {
- pub trait Sealed {}
-}
-
-/*
-impl<E: Payload> Payload for Box<E> {
- type Data = E::Data;
- type Error = E::Error;
-
- fn poll_data(&mut self) -> Poll<Option<Self::Data>, Self::Error> {
- (**self).poll_data()
- }
-
- fn poll_trailers(&mut self) -> Poll<Option<HeaderMap>, Self::Error> {
- (**self).poll_trailers()
- }
-
- fn is_end_stream(&self) -> bool {
- (**self).is_end_stream()
- }
-
- fn content_length(&self) -> Option<u64> {
- (**self).content_length()
- }
-
- #[doc(hidden)]
- fn __hyper_full_data(&mut self, arg: FullDataArg) -> FullDataRet<Self::Data> {
- (**self).__hyper_full_data(arg)
- }
-}
-*/
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -8,6 +8,7 @@
//! If don't have need to manage connections yourself, consider using the
//! higher-level [Client](super) API.
+use std::error::Error as StdError;
use std::fmt;
use std::mem;
use std::sync::Arc;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -21,7 +22,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use tower_service::Service;
use super::dispatch;
-use crate::body::Payload;
+use crate::body::HttpBody;
use crate::common::{task, BoxSendFuture, Exec, Executor, Future, Pin, Poll};
use crate::proto;
use crate::upgrade::Upgraded;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -32,7 +33,7 @@ type Http1Dispatcher<T, B, R> = proto::dispatch::Dispatcher<proto::dispatch::Cli
#[pin_project]
enum ProtoClient<T, B>
where
- B: Payload,
+ B: HttpBody,
{
H1(#[pin] Http1Dispatcher<T, B, proto::h1::ClientTransaction>),
H2(#[pin] proto::h2::ClientTask<B>),
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -63,7 +64,7 @@ pub struct SendRequest<B> {
pub struct Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: Payload + 'static,
+ B: HttpBody + 'static,
{
inner: Option<ProtoClient<T, B>>,
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -160,7 +161,7 @@ impl<B> SendRequest<B> {
impl<B> SendRequest<B>
where
- B: Payload + 'static,
+ B: HttpBody + 'static,
{
/// Sends a `Request` on the associated connection.
///
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -245,7 +246,7 @@ where
impl<B> Service<Request<B>> for SendRequest<B>
where
- B: Payload + 'static,
+ B: HttpBody + 'static,
{
type Response = Response<Body>;
type Error = crate::Error;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -280,7 +281,7 @@ impl<B> Http2SendRequest<B> {
impl<B> Http2SendRequest<B>
where
- B: Payload + 'static,
+ B: HttpBody + 'static,
{
pub(super) fn send_request_retryable(
&mut self,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -328,7 +329,9 @@ impl<B> Clone for Http2SendRequest<B> {
impl<T, B> Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Payload + Unpin + 'static,
+ B: HttpBody + Unpin + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
/// Return the inner IO object, and additional information.
///
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -380,7 +383,9 @@ where
impl<T, B> Future for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Payload + 'static,
+ B: HttpBody + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<()>;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -404,7 +409,7 @@ where
impl<T, B> fmt::Debug for Connection<T, B>
where
T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static,
- B: Payload + 'static,
+ B: HttpBody + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connection").finish()
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -580,7 +585,9 @@ impl Builder {
) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Payload + 'static,
+ B: HttpBody + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
let opts = self.clone();
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -652,7 +659,9 @@ impl fmt::Debug for ResponseFuture {
impl<T, B> Future for ProtoClient<T, B>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
- B: Payload + 'static,
+ B: HttpBody + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<proto::Dispatched>;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -678,7 +687,8 @@ impl<B: Send> AssertSendSync for SendRequest<B> {}
impl<T: Send, B: Send> AssertSend for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: Payload + 'static,
+ B: HttpBody + 'static,
+ B::Data: Send,
{
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -686,7 +696,7 @@ where
impl<T: Send + Sync, B: Send + Sync> AssertSendSync for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Send + 'static,
- B: Payload + 'static,
+ B: HttpBody + 'static,
B::Data: Send + Sync + 'static,
{
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -48,6 +48,7 @@
//! # fn main () {}
//! ```
+use std::error::Error as StdError;
use std::fmt;
use std::mem;
use std::time::Duration;
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -60,7 +61,7 @@ use http::{Method, Request, Response, Uri, Version};
use self::connect::{sealed::Connect, Alpn, Connected, Connection};
use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation};
-use crate::body::{Body, Payload};
+use crate::body::{Body, HttpBody};
use crate::common::{lazy as hyper_lazy, task, BoxSendFuture, Executor, Future, Lazy, Pin, Poll};
#[cfg(feature = "tcp")]
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -150,16 +151,17 @@ impl Client<(), Body> {
impl<C, B> Client<C, B>
where
C: Connect + Clone + Send + Sync + 'static,
- B: Payload + Send + 'static,
+ B: HttpBody + Send + 'static,
B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
/// Send a `GET` request to the supplied `Uri`.
///
/// # Note
///
- /// This requires that the `Payload` type have a `Default` implementation.
+ /// This requires that the `HttpBody` type have a `Default` implementation.
/// It *should* return an "empty" version of itself, such that
- /// `Payload::is_end_stream` is `true`.
+ /// `HttpBody::is_end_stream` is `true`.
///
/// # Example
///
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -180,7 +182,7 @@ where
{
let body = B::default();
if !body.is_end_stream() {
- warn!("default Payload used for get() does not return true for is_end_stream");
+ warn!("default HttpBody used for get() does not return true for is_end_stream");
}
let mut req = Request::new(body);
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -543,8 +545,9 @@ where
impl<C, B> tower_service::Service<Request<B>> for Client<C, B>
where
C: Connect + Clone + Send + Sync + 'static,
- B: Payload + Send + 'static,
+ B: HttpBody + Send + 'static,
B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Response = Response<Body>;
type Error = crate::Error;
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -653,7 +656,7 @@ impl<B> PoolClient<B> {
}
}
-impl<B: Payload + 'static> PoolClient<B> {
+impl<B: HttpBody + 'static> PoolClient<B> {
fn send_request_retryable(
&mut self,
req: Request<B>,
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -1132,7 +1135,7 @@ impl Builder {
#[cfg(feature = "tcp")]
pub fn build_http<B>(&self) -> Client<HttpConnector, B>
where
- B: Payload + Send,
+ B: HttpBody + Send,
B::Data: Send,
{
let mut connector = HttpConnector::new();
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -1146,7 +1149,7 @@ impl Builder {
pub fn build<C, B>(&self, connector: C) -> Client<C, B>
where
C: Connect + Clone,
- B: Payload + Send,
+ B: HttpBody + Send,
B::Data: Send,
{
Client {
diff --git a/src/client/service.rs b/src/client/service.rs
--- a/src/client/service.rs
+++ b/src/client/service.rs
@@ -8,7 +8,7 @@ use std::marker::PhantomData;
use super::conn::{Builder, SendRequest};
use crate::{
- body::Payload,
+ body::HttpBody,
common::{task, Pin, Poll},
service::{MakeConnection, Service},
};
diff --git a/src/client/service.rs b/src/client/service.rs
--- a/src/client/service.rs
+++ b/src/client/service.rs
@@ -43,8 +43,9 @@ where
C::Connection: Unpin + Send + 'static,
C::Future: Send + 'static,
C::Error: Into<Box<dyn StdError + Send + Sync>> + Send,
- B: Payload + Unpin + 'static,
- B::Data: Unpin,
+ B: HttpBody + Unpin + Send + 'static,
+ B::Data: Send + Unpin,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Response = SendRequest<B>;
type Error = crate::Error;
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -3,7 +3,7 @@ use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
-use crate::body::{Body, Payload};
+use crate::body::{Body, HttpBody};
use crate::proto::h2::server::H2Stream;
use crate::server::conn::spawn_all::{NewSvcTask, Watcher};
use crate::service::HttpService;
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -14,7 +14,7 @@ pub trait Executor<Fut> {
fn execute(&self, fut: Fut);
}
-pub trait H2Exec<F, B: Payload>: Clone {
+pub trait H2Exec<F, B: HttpBody>: Clone {
fn execute_h2stream(&mut self, fut: H2Stream<F, B>);
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -67,7 +67,7 @@ impl fmt::Debug for Exec {
impl<F, B> H2Exec<F, B> for Exec
where
H2Stream<F, B>: Future<Output = ()> + Send + 'static,
- B: Payload,
+ B: HttpBody,
{
fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
self.execute(fut)
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -91,7 +91,7 @@ impl<E, F, B> H2Exec<F, B> for E
where
E: Executor<H2Stream<F, B>> + Clone,
H2Stream<F, B>: Future<Output = ()>,
- B: Payload,
+ B: HttpBody,
{
fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
self.execute(fut)
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -65,7 +65,7 @@ pub(crate) enum Parse {
#[derive(Debug, PartialEq)]
pub(crate) enum User {
- /// Error calling user's Payload::poll_data().
+ /// Error calling user's HttpBody::poll_data().
Body,
/// Error calling user's MakeService.
MakeService,
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -316,7 +316,7 @@ impl Error {
Kind::Http2 => "http2 error",
Kind::Io => "connection error",
- Kind::User(User::Body) => "error from user's Payload stream",
+ Kind::User(User::Body) => "error from user's HttpBody stream",
Kind::User(User::MakeService) => "error from user's MakeService",
Kind::User(User::Service) => "error from user's Service",
Kind::User(User::UnexpectedHeader) => "user sent unexpected header",
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -5,7 +5,7 @@ use http::{Request, Response, StatusCode};
use tokio::io::{AsyncRead, AsyncWrite};
use super::{Http1Transaction, Wants};
-use crate::body::{Body, Payload};
+use crate::body::{Body, HttpBody};
use crate::common::{task, Future, Never, Pin, Poll, Unpin};
use crate::proto::{
BodyLength, Conn, DecodedLength, Dispatched, MessageHead, RequestHead, RequestLine,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -13,7 +13,7 @@ use crate::proto::{
};
use crate::service::HttpService;
-pub(crate) struct Dispatcher<D, Bs: Payload, I, T> {
+pub(crate) struct Dispatcher<D, Bs: HttpBody, I, T> {
conn: Conn<I, Bs::Data, T>,
dispatch: D,
body_tx: Option<crate::body::Sender>,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -58,7 +58,8 @@ where
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
- Bs: Payload,
+ Bs: HttpBody + 'static,
+ Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
{
pub fn new(dispatch: D, conn: Conn<I, Bs::Data, T>) -> Self {
Dispatcher {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -400,7 +401,8 @@ where
D::PollError: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
T: Http1Transaction + Unpin,
- Bs: Payload,
+ Bs: HttpBody + 'static,
+ Bs::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<Dispatched>;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -459,7 +461,7 @@ impl<S, Bs> Dispatch for Server<S, Body>
where
S: HttpService<Body, ResBody = Bs>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bs: Payload,
+ Bs: HttpBody,
{
type PollItem = MessageHead<StatusCode>;
type PollBody = Bs;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -530,7 +532,7 @@ impl<B> Client<B> {
impl<B> Dispatch for Client<B>
where
- B: Payload,
+ B: HttpBody,
{
type PollItem = RequestHead;
type PollBody = B;
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -170,7 +170,7 @@ impl Encoder {
/// Encodes the full body, without verifying the remaining length matches.
///
- /// This is used in conjunction with Payload::__hyper_full_data(), which
+ /// This is used in conjunction with HttpBody::__hyper_full_data(), which
/// means we can trust that the buf has the correct size (the buf itself
/// was checked to make the headers).
pub(super) fn danger_full_buf<B>(self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>)
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -359,7 +359,7 @@ impl Http1Transaction for Server {
}
match msg.body {
Some(BodyLength::Known(known_len)) => {
- // The Payload claims to know a length, and
+ // The HttpBody claims to know a length, and
// the headers are already set. For performance
// reasons, we are just going to trust that
// the values match.
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -388,7 +388,7 @@ impl Http1Transaction for Server {
continue 'headers;
}
Some(BodyLength::Unknown) => {
- // The Payload impl didn't know how long the
+ // The HttpBody impl didn't know how long the
// body is, but a length header was included.
// We have to parse the value to return our
// Encoder...
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -825,7 +825,7 @@ impl Client {
let headers = &mut head.headers;
// If the user already set specific headers, we should respect them, regardless
- // of what the Payload knows about itself. They set them for a reason.
+ // of what the HttpBody knows about itself. They set them for a reason.
// Because of the borrow checker, we can't check the for an existing
// Content-Length header while holding an `Entry` for the Transfer-Encoding
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -1,3 +1,4 @@
+use std::error::Error as StdError;
#[cfg(feature = "runtime")]
use std::time::Duration;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -8,7 +9,7 @@ use h2::client::{Builder, SendRequest};
use tokio::io::{AsyncRead, AsyncWrite};
use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
-use crate::body::Payload;
+use crate::body::HttpBody;
use crate::common::{task, Exec, Future, Never, Pin, Poll};
use crate::headers;
use crate::proto::Dispatched;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -67,7 +68,8 @@ pub(crate) async fn handshake<T, B>(
) -> crate::Result<ClientTask<B>>
where
T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
- B: Payload,
+ B: HttpBody,
+ B::Data: Send + 'static,
{
let (h2_tx, mut conn) = Builder::default()
.initial_window_size(config.initial_stream_window_size)
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -167,7 +169,7 @@ where
pub(crate) struct ClientTask<B>
where
- B: Payload,
+ B: HttpBody,
{
ping: ping::Recorder,
conn_drop_ref: ConnDropRef,
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -179,7 +181,9 @@ where
impl<B> Future for ClientTask<B>
where
- B: Payload + 'static,
+ B: HttpBody + Send + 'static,
+ B::Data: Send,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<Dispatched>;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -6,9 +6,10 @@ use http::header::{
};
use http::HeaderMap;
use pin_project::pin_project;
+use std::error::Error as StdError;
use super::DecodedLength;
-use crate::body::Payload;
+use crate::body::HttpBody;
use crate::common::{task, Future, Pin, Poll};
use crate::headers::content_length_parse_all;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -91,7 +92,7 @@ fn decode_content_length(headers: &HeaderMap) -> DecodedLength {
#[pin_project]
struct PipeToSendStream<S>
where
- S: Payload,
+ S: HttpBody,
{
body_tx: SendStream<SendBuf<S::Data>>,
data_done: bool,
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -101,7 +102,7 @@ where
impl<S> PipeToSendStream<S>
where
- S: Payload,
+ S: HttpBody,
{
fn new(stream: S, tx: SendStream<SendBuf<S::Data>>) -> PipeToSendStream<S> {
PipeToSendStream {
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -114,7 +115,8 @@ where
impl<S> Future for PipeToSendStream<S>
where
- S: Payload,
+ S: HttpBody,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = crate::Result<()>;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -9,7 +9,7 @@ use pin_project::{pin_project, project};
use tokio::io::{AsyncRead, AsyncWrite};
use super::{decode_content_length, ping, PipeToSendStream, SendBuf};
-use crate::body::Payload;
+use crate::body::HttpBody;
use crate::common::exec::H2Exec;
use crate::common::{task, Future, Pin, Poll};
use crate::headers;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -58,7 +58,7 @@ impl Default for Config {
pub(crate) struct Server<T, S, B, E>
where
S: HttpService<Body>,
- B: Payload,
+ B: HttpBody,
{
exec: E,
service: S,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -67,7 +67,7 @@ where
enum State<T, B>
where
- B: Payload,
+ B: HttpBody,
{
Handshaking {
ping_config: ping::Config,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -79,7 +79,7 @@ where
struct Serving<T, B>
where
- B: Payload,
+ B: HttpBody,
{
ping: Option<(ping::Recorder, ping::Ponger)>,
conn: Connection<T, SendBuf<B::Data>>,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -91,7 +91,7 @@ where
T: AsyncRead + AsyncWrite + Unpin,
S: HttpService<Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ B: HttpBody + 'static,
E: H2Exec<S::Future, B>,
{
pub(crate) fn new(io: T, service: S, config: &Config, exec: E) -> Server<T, S, B, E> {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -157,7 +157,7 @@ where
T: AsyncRead + AsyncWrite + Unpin,
S: HttpService<Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ B: HttpBody + 'static,
E: H2Exec<S::Future, B>,
{
type Output = crate::Result<Dispatched>;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -201,7 +201,7 @@ where
impl<T, B> Serving<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
- B: Payload,
+ B: HttpBody + 'static,
{
fn poll_server<S, E>(
&mut self,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -315,7 +315,7 @@ where
#[pin_project]
pub struct H2Stream<F, B>
where
- B: Payload,
+ B: HttpBody,
{
reply: SendResponse<SendBuf<B::Data>>,
#[pin]
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -325,7 +325,7 @@ where
#[pin_project]
enum H2StreamState<F, B>
where
- B: Payload,
+ B: HttpBody,
{
Service(#[pin] F),
Body(#[pin] PipeToSendStream<B>),
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -333,7 +333,7 @@ where
impl<F, B> H2Stream<F, B>
where
- B: Payload,
+ B: HttpBody,
{
fn new(fut: F, respond: SendResponse<SendBuf<B::Data>>) -> H2Stream<F, B> {
H2Stream {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -359,7 +359,8 @@ macro_rules! reply {
impl<F, B, E> H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
- B: Payload,
+ B: HttpBody,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Into<Box<dyn StdError + Send + Sync>>,
{
#[project]
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -424,7 +425,8 @@ where
impl<F, B, E> Future for H2Stream<F, B>
where
F: Future<Output = Result<Response<B>, E>>,
- B: Payload,
+ B: HttpBody,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: Into<Box<dyn StdError + Send + Sync>>,
{
type Output = ();
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -21,7 +21,7 @@ use pin_project::{pin_project, project};
use tokio::io::{AsyncRead, AsyncWrite};
use super::Accept;
-use crate::body::{Body, Payload};
+use crate::body::{Body, HttpBody};
use crate::common::exec::{Exec, H2Exec, NewSvcExec};
use crate::common::io::Rewind;
use crate::common::{task, Future, Pin, Poll, Unpin};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -122,7 +122,7 @@ where
pub(super) enum ProtoServer<T, B, S, E = Exec>
where
S: HttpService<Body>,
- B: Payload,
+ B: HttpBody,
{
H1(
#[pin]
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -429,7 +429,8 @@ impl<E> Http<E> {
where
S: HttpService<Body, ResBody = Bd>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bd: Payload,
+ Bd: HttpBody + 'static,
+ Bd::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
E: H2Exec<S::Future, Bd>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -477,7 +478,7 @@ impl<E> Http<E> {
IO: AsyncRead + AsyncWrite + Unpin,
S: MakeServiceRef<IO, Body, ResBody = Bd>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bd: Payload,
+ Bd: HttpBody,
E: H2Exec<<S::Service as HttpService<Body>>::Future, Bd>,
{
Serve {
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -495,7 +496,8 @@ where
S: HttpService<Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
- B: Payload + 'static,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
/// Start a graceful shutdown process for this connection.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -640,7 +642,8 @@ where
S: HttpService<Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + 'static,
- B: Payload + 'static,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
type Output = crate::Result<()>;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -707,7 +710,7 @@ where
IO: AsyncRead + AsyncWrite + Unpin,
IE: Into<Box<dyn StdError + Send + Sync>>,
S: MakeServiceRef<IO, Body, ResBody = B>,
- B: Payload,
+ B: HttpBody,
E: H2Exec<<S::Service as HttpService<Body>>::Future, B>,
{
fn poll_next_(
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -744,7 +747,8 @@ where
I: AsyncRead + AsyncWrite + Unpin,
F: Future<Output = Result<S, FE>>,
S: HttpService<Body, ResBody = B>,
- B: Payload,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
type Output = Result<Connection<I, S, E>, FE>;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -778,7 +782,7 @@ where
IE: Into<Box<dyn StdError + Send + Sync>>,
IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: MakeServiceRef<IO, Body, ResBody = B>,
- B: Payload,
+ B: HttpBody,
E: H2Exec<<S::Service as HttpService<Body>>::Future, B>,
{
pub(super) fn poll_watch<W>(
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -814,7 +818,8 @@ where
T: AsyncRead + AsyncWrite + Unpin,
S: HttpService<Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
type Output = crate::Result<proto::Dispatched>;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -834,7 +839,7 @@ pub(crate) mod spawn_all {
use tokio::io::{AsyncRead, AsyncWrite};
use super::{Connecting, UpgradeableConnection};
- use crate::body::{Body, Payload};
+ use crate::body::{Body, HttpBody};
use crate::common::exec::H2Exec;
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::HttpService;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -863,6 +868,8 @@ pub(crate) mod spawn_all {
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: HttpService<Body>,
E: H2Exec<S::Future, S::ResBody>,
+ S::ResBody: 'static,
+ <S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Future = UpgradeableConnection<I, S, E>;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -908,7 +915,8 @@ pub(crate) mod spawn_all {
N: Future<Output = Result<S, NE>>,
NE: Into<Box<dyn StdError + Send + Sync>>,
S: HttpService<Body, ResBody = B>,
- B: Payload,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
W: Watcher<I, S, E>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -975,7 +983,8 @@ mod upgrades {
S: HttpService<Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
- B: Payload + 'static,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
/// Start a graceful shutdown process for this connection.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -992,7 +1001,8 @@ mod upgrades {
S: HttpService<Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Payload + 'static,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: super::H2Exec<S::Future, B>,
{
type Output = crate::Result<()>;
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -69,7 +69,7 @@ use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite};
use self::accept::Accept;
-use crate::body::{Body, Payload};
+use crate::body::{Body, HttpBody};
use crate::common::exec::{Exec, H2Exec, NewSvcExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
use crate::service::{HttpService, MakeServiceRef};
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -152,7 +152,8 @@ where
IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: MakeServiceRef<IO, Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ B: HttpBody + Send + Sync + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<<S::Service as HttpService<Body>>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
{
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -207,7 +208,8 @@ where
IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: MakeServiceRef<IO, Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<<S::Service as HttpService<Body>>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>,
{
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -430,7 +432,8 @@ impl<I, E> Builder<I, E> {
I::Conn: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: MakeServiceRef<I::Conn, Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ B: HttpBody + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
E: NewSvcExec<I::Conn, S::Future, S::Service, E, NoopWatcher>,
E: H2Exec<<S::Service as HttpService<Body>>::Future, B>,
{
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -5,7 +5,7 @@ use tokio::io::{AsyncRead, AsyncWrite};
use super::conn::{SpawnAll, UpgradeableConnection, Watcher};
use super::Accept;
-use crate::body::{Body, Payload};
+use crate::body::{Body, HttpBody};
use crate::common::drain::{self, Draining, Signal, Watch, Watching};
use crate::common::exec::{H2Exec, NewSvcExec};
use crate::common::{task, Future, Pin, Poll, Unpin};
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -50,7 +50,8 @@ where
IO: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: MakeServiceRef<IO, Body, ResBody = B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ B: HttpBody + Send + Sync + 'static,
+ B::Error: Into<Box<dyn StdError + Send + Sync>>,
F: Future<Output = ()>,
E: H2Exec<<S::Service as HttpService<Body>>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -98,6 +99,8 @@ where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: HttpService<Body>,
E: H2Exec<S::Future, S::ResBody>,
+ S::ResBody: Send + Sync + 'static,
+ <S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Future =
Watching<UpgradeableConnection<I, S, E>, fn(Pin<&mut UpgradeableConnection<I, S, E>>)>;
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -112,7 +115,8 @@ where
S: HttpService<Body>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
- S::ResBody: Payload + 'static,
+ S::ResBody: HttpBody + Send + 'static,
+ <S::ResBody as HttpBody>::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, S::ResBody>,
{
conn.graceful_shutdown()
diff --git a/src/service/http.rs b/src/service/http.rs
--- a/src/service/http.rs
+++ b/src/service/http.rs
@@ -1,13 +1,13 @@
use std::error::Error as StdError;
-use crate::body::Payload;
+use crate::body::HttpBody;
use crate::common::{task, Future, Poll};
use crate::{Request, Response};
/// An asynchronous function from `Request` to `Response`.
pub trait HttpService<ReqBody>: sealed::Sealed<ReqBody> {
- /// The `Payload` body of the `http::Response`.
- type ResBody: Payload;
+ /// The `HttpBody` body of the `http::Response`.
+ type ResBody: HttpBody;
/// The error type that can occur within this `Service`.
///
diff --git a/src/service/http.rs b/src/service/http.rs
--- a/src/service/http.rs
+++ b/src/service/http.rs
@@ -29,7 +29,7 @@ pub trait HttpService<ReqBody>: sealed::Sealed<ReqBody> {
impl<T, B1, B2> HttpService<B1> for T
where
T: tower_service::Service<Request<B1>, Response = Response<B2>>,
- B2: Payload,
+ B2: HttpBody,
T::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type ResBody = B2;
diff --git a/src/service/http.rs b/src/service/http.rs
--- a/src/service/http.rs
+++ b/src/service/http.rs
@@ -49,7 +49,7 @@ where
impl<T, B1, B2> sealed::Sealed<B1> for T
where
T: tower_service::Service<Request<B1>, Response = Response<B2>>,
- B2: Payload,
+ B2: HttpBody,
{
}
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -4,7 +4,7 @@ use std::fmt;
use tokio::io::{AsyncRead, AsyncWrite};
use super::{HttpService, Service};
-use crate::body::Payload;
+use crate::body::HttpBody;
use crate::common::{task, Future, Poll};
// The same "trait alias" as tower::MakeConnection, but inlined to reduce
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -41,7 +41,7 @@ where
// Just a sort-of "trait alias" of `MakeService`, not to be implemented
// by anyone, only used as bounds.
pub trait MakeServiceRef<Target, ReqBody>: self::sealed::Sealed<(Target, ReqBody)> {
- type ResBody: Payload;
+ type ResBody: HttpBody;
type Error: Into<Box<dyn StdError + Send + Sync>>;
type Service: HttpService<ReqBody, ResBody = Self::ResBody, Error = Self::Error>;
type MakeError: Into<Box<dyn StdError + Send + Sync>>;
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -70,8 +70,8 @@ where
ME: Into<Box<dyn StdError + Send + Sync>>,
S: HttpService<IB, ResBody = OB, Error = E>,
F: Future<Output = Result<S, ME>>,
- IB: Payload,
- OB: Payload,
+ IB: HttpBody,
+ OB: HttpBody,
{
type Error = E;
type Service = S;
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -94,8 +94,8 @@ impl<T, Target, S, B1, B2> self::sealed::Sealed<(Target, B1)> for T
where
T: for<'a> Service<&'a Target, Response = S>,
S: HttpService<B1, ResBody = B2>,
- B1: Payload,
- B2: Payload,
+ B1: HttpBody,
+ B2: HttpBody,
{
}
diff --git a/src/service/util.rs b/src/service/util.rs
--- a/src/service/util.rs
+++ b/src/service/util.rs
@@ -2,7 +2,7 @@ use std::error::Error as StdError;
use std::fmt;
use std::marker::PhantomData;
-use crate::body::Payload;
+use crate::body::HttpBody;
use crate::common::{task, Future, Poll};
use crate::{Request, Response};
diff --git a/src/service/util.rs b/src/service/util.rs
--- a/src/service/util.rs
+++ b/src/service/util.rs
@@ -45,10 +45,10 @@ impl<F, ReqBody, Ret, ResBody, E> tower_service::Service<crate::Request<ReqBody>
for ServiceFn<F, ReqBody>
where
F: FnMut(Request<ReqBody>) -> Ret,
- ReqBody: Payload,
+ ReqBody: HttpBody,
Ret: Future<Output = Result<Response<ResBody>, E>>,
E: Into<Box<dyn StdError + Send + Sync>>,
- ResBody: Payload,
+ ResBody: HttpBody,
{
type Response = crate::Response<ResBody>;
type Error = E;
|
2020-05-14T20:17:18Z
| 2,202
|
refactor: use HttpBody with extra bounds instead of Payload trait
I've done most of the legwork, but I'm confused about the interaction of the error types that the compiler is complaining about at this point. If someone else wants to take a look, that'd be great.
|
hyperium__hyper-2202
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -258,7 +258,7 @@ mod response_body_lengths {
fn auto_response_with_unknown_length() {
run_test(TestCase {
version: 1,
- // no headers means trying to guess from Payload
+ // no headers means trying to guess from HttpBody
headers: &[],
body: Bd::Unknown("foo bar baz"),
expects_chunked: true,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -270,7 +270,7 @@ mod response_body_lengths {
fn auto_response_with_known_length() {
run_test(TestCase {
version: 1,
- // no headers means trying to guess from Payload
+ // no headers means trying to guess from HttpBody
headers: &[],
body: Bd::Known("foo bar baz"),
expects_chunked: false,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -282,7 +282,7 @@ mod response_body_lengths {
fn auto_response_known_empty() {
run_test(TestCase {
version: 1,
- // no headers means trying to guess from Payload
+ // no headers means trying to guess from HttpBody
headers: &[],
body: Bd::Known(""),
expects_chunked: false,
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -294,7 +294,7 @@ mod response_body_lengths {
fn http10_auto_response_with_unknown_length() {
run_test(TestCase {
version: 0,
- // no headers means trying to guess from Payload
+ // no headers means trying to guess from HttpBody
headers: &[],
body: Bd::Unknown("foo bar baz"),
expects_chunked: false,
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
|
[
"2176"
] |
0.13
|
e08a271eb93711f7209da98ab6261851f92da235
|
diff --git a/examples/README.md b/examples/README.md
--- a/examples/README.md
+++ b/examples/README.md
@@ -39,7 +39,7 @@ pretty_env_logger = "0.4"
* [`params`](params.rs) - A webserver that accept a form, with a name and a number, checks the parameters are presents and validates the input.
-* [`send_file`](send_file.rs) - A server that sends back content of files using tokio_fs to read the files asynchronously.
+* [`send_file`](send_file.rs) - A server that sends back content of files using tokio-util to read the files asynchronously.
* [`single_threaded`](single_threaded.rs) - A server only running on 1 thread, so it can make use of `!Send` app state (like an `Rc` counter).
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -1,13 +1,13 @@
#![deny(warnings)]
use tokio::fs::File;
-use tokio::io::AsyncReadExt;
+
+use tokio_util::codec::{BytesCodec, FramedRead};
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Method, Request, Response, Result, Server, StatusCode};
static INDEX: &str = "examples/send_file_index.html";
-static INTERNAL_SERVER_ERROR: &[u8] = b"Internal Server Error";
static NOTFOUND: &[u8] = b"Not Found";
#[tokio::main]
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -30,9 +30,7 @@ async fn main() {
async fn response_examples(req: Request<Body>) -> Result<Response<Body>> {
match (req.method(), req.uri().path()) {
- (&Method::GET, "/") | (&Method::GET, "/index.html") | (&Method::GET, "/big_file.html") => {
- simple_file_send(INDEX).await
- }
+ (&Method::GET, "/") | (&Method::GET, "/index.html") => simple_file_send(INDEX).await,
(&Method::GET, "/no_file.html") => {
// Test what happens when file cannot be be found
simple_file_send("this_file_should_not_exist.html").await
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -49,26 +47,13 @@ fn not_found() -> Response<Body> {
.unwrap()
}
-/// HTTP status code 500
-fn internal_server_error() -> Response<Body> {
- Response::builder()
- .status(StatusCode::INTERNAL_SERVER_ERROR)
- .body(INTERNAL_SERVER_ERROR.into())
- .unwrap()
-}
-
async fn simple_file_send(filename: &str) -> Result<Response<Body>> {
- // Serve a file by asynchronously reading it entirely into memory.
- // Uses tokio_fs to open file asynchronously, then tokio::io::AsyncReadExt
- // to read into memory asynchronously.
-
- if let Ok(mut file) = File::open(filename).await {
- let mut buf = Vec::new();
- if let Ok(_) = file.read_to_end(&mut buf).await {
- return Ok(Response::new(buf.into()));
- }
+ // Serve a file by asynchronously reading it by chunks using tokio-util crate.
- return Ok(internal_server_error());
+ if let Ok(file) = File::open(filename).await {
+ let stream = FramedRead::new(file, BytesCodec::new());
+ let body = Body::wrap_stream(stream);
+ return Ok(Response::new(body));
}
Ok(not_found())
|
I think this is because futures-fs crate uses `Future`s and `Stream`s from [futures](https://crates.io/crates/futures) crate of version 0.1
But hyper 0.13 works with futures of version 0.3, these versions are incompatible.
There is a way to convert file into stream by [tokio-util](https://crates.io/crates/tokio-util) crate. I've mentioned it [here](https://github.com/hyperium/hyper/issues/2166#issuecomment-612363623).
|
2020-04-24T08:56:16Z
| 2,193
|
Serve a file through a readable stream
I want to send a file without putting the whole file in memory, for that i use futures-fs who creat a readable stream, but i cant give to a Hyper response, with the Body::wrap_stream function:
Code:
```rust
use hyper::service::{ make_service_fn, service_fn };
use hyper::{ Body, Request, Response, Server };
use futures_fs::FsPool;
type GenericError = Box<dyn std::error::Error + Send + Sync>;
type Result<T> = std::result::Result<T, GenericError>;
async fn serve(fs: FsPool, req: Request<Body>) -> Result<Response<Body>> {
// let mut not_found = Response::default();
let stream = fs.read("/tmp/streams/hello_world.html", Default::default());
Ok(Response::new(Body::wrap_stream(stream)))
}
#[tokio::main]
async fn main() -> Result<()> {
let addr = ([127, 0, 0, 1], 3000).into();
let fs = FsPool::default();
let service = make_service_fn(|_| {
let fs = fs.clone();
async move {
Ok::<_, hyper::Error>(service_fn(move |req| {
serve(fs.clone(), req)
}))
}
});
let server = Server::bind(&addr).serve(service);
server.await?;
Ok(())
}
```
I got the following error:
```bash
error[E0277]: the trait bound `futures_fs::read::FsReadStream: futures_core::stream::Stream` is not satisfied
--> src/main.rs:11:37
|
11 | Ok(Response::new(Body::wrap_stream(stream)))
| ^^^^^^ the trait `futures_core::stream::Stream` is not implemented for `futures_fs::read::FsReadStream`
|
::: /home/tet/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.13.4/src/body/body.rs:159:12
|
159 | S: Stream<Item = Result<O, E>> + Send + Sync + 'static,
| --------------------------- required by this bound in `hyper::body::body::Body::wrap_stream`
error: aborting due to previous error
```
|
hyperium__hyper-2193
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -51,6 +51,7 @@ serde_derive = "1.0"
serde_json = "1.0"
tokio = { version = "0.2.2", features = ["fs", "macros", "io-std", "rt-util", "sync", "time", "test-util"] }
tokio-test = "0.2"
+tokio-util = { version = "0.3", features = ["codec"] }
tower-util = "0.3"
url = "1.0"
diff --git a/examples/send_file_index.html b/examples/send_file_index.html
--- a/examples/send_file_index.html
+++ b/examples/send_file_index.html
@@ -3,9 +3,8 @@
<title>Hyper responding example</title>
</head>
<body>
- <h1>Hyper responding example</h1>
+ <h1>Hyper responding example, streamed in chunks</h1>
<a href="index.html">index.html</a> Top Level<br>
- <a href="big_file.html">big_file.html</a> This page, streamed in chunks<br>
<a href="no_file.html">no_file.html</a> A 404 test, the requested file does not exist<br>
</body>
</html>
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"838"
] |
0.13
|
a354580e3f7f22dfebb872df0b9de67d0e1f1840
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -11,7 +11,7 @@ use futures_util::TryStreamExt;
use http::HeaderMap;
use http_body::{Body as HttpBody, SizeHint};
-use crate::common::{task, Future, Never, Pin, Poll};
+use crate::common::{task, watch, Future, Never, Pin, Poll};
use crate::proto::DecodedLength;
use crate::upgrade::OnUpgrade;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -33,7 +33,7 @@ enum Kind {
Once(Option<Bytes>),
Chan {
content_length: DecodedLength,
- abort_rx: oneshot::Receiver<()>,
+ want_tx: watch::Sender,
rx: mpsc::Receiver<Result<Bytes, crate::Error>>,
},
H2 {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -79,12 +79,14 @@ enum DelayEof {
/// Useful when wanting to stream chunks from another thread. See
/// [`Body::channel`](Body::channel) for more.
#[must_use = "Sender does nothing unless sent on"]
-#[derive(Debug)]
pub struct Sender {
- abort_tx: oneshot::Sender<()>,
+ want_rx: watch::Receiver,
tx: BodySender,
}
+const WANT_PENDING: usize = 1;
+const WANT_READY: usize = 2;
+
impl Body {
/// Create an empty `Body` stream.
///
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -106,17 +108,22 @@ impl Body {
/// Useful when wanting to stream chunks from another thread.
#[inline]
pub fn channel() -> (Sender, Body) {
- Self::new_channel(DecodedLength::CHUNKED)
+ Self::new_channel(DecodedLength::CHUNKED, /*wanter =*/ false)
}
- pub(crate) fn new_channel(content_length: DecodedLength) -> (Sender, Body) {
+ pub(crate) fn new_channel(content_length: DecodedLength, wanter: bool) -> (Sender, Body) {
let (tx, rx) = mpsc::channel(0);
- let (abort_tx, abort_rx) = oneshot::channel();
- let tx = Sender { abort_tx, tx };
+ // If wanter is true, `Sender::poll_ready()` won't becoming ready
+ // until the `Body` has been polled for data once.
+ let want = if wanter { WANT_PENDING } else { WANT_READY };
+
+ let (want_tx, want_rx) = watch::channel(want);
+
+ let tx = Sender { want_rx, tx };
let rx = Body::new(Kind::Chan {
content_length,
- abort_rx,
+ want_tx,
rx,
});
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -236,11 +243,9 @@ impl Body {
Kind::Chan {
content_length: ref mut len,
ref mut rx,
- ref mut abort_rx,
+ ref mut want_tx,
} => {
- if let Poll::Ready(Ok(())) = Pin::new(abort_rx).poll(cx) {
- return Poll::Ready(Some(Err(crate::Error::new_body_write_aborted())));
- }
+ want_tx.send(WANT_READY);
match ready!(Pin::new(rx).poll_next(cx)?) {
Some(chunk) => {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -460,19 +465,29 @@ impl From<Cow<'static, str>> for Body {
impl Sender {
/// Check to see if this `Sender` can send more data.
pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
- match self.abort_tx.poll_canceled(cx) {
- Poll::Ready(()) => return Poll::Ready(Err(crate::Error::new_closed())),
- Poll::Pending => (), // fallthrough
- }
-
+ // Check if the receiver end has tried polling for the body yet
+ ready!(self.poll_want(cx)?);
self.tx
.poll_ready(cx)
.map_err(|_| crate::Error::new_closed())
}
+ fn poll_want(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ match self.want_rx.load(cx) {
+ WANT_READY => Poll::Ready(Ok(())),
+ WANT_PENDING => Poll::Pending,
+ watch::CLOSED => Poll::Ready(Err(crate::Error::new_closed())),
+ unexpected => unreachable!("want_rx value: {}", unexpected),
+ }
+ }
+
+ async fn ready(&mut self) -> crate::Result<()> {
+ futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
+ }
+
/// Send data on this channel when it is ready.
pub async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> {
- futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await?;
+ self.ready().await?;
self.tx
.try_send(Ok(chunk))
.map_err(|_| crate::Error::new_closed())
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -498,8 +513,11 @@ impl Sender {
/// Aborts the body in an abnormal fashion.
pub fn abort(self) {
- // TODO(sean): this can just be `self.tx.clone().try_send()`
- let _ = self.abort_tx.send(());
+ let _ = self
+ .tx
+ // clone so the send works even if buffer is full
+ .clone()
+ .try_send(Err(crate::Error::new_body_write_aborted()));
}
pub(crate) fn send_error(&mut self, err: crate::Error) {
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -14,6 +14,7 @@ pub(crate) mod io;
mod lazy;
mod never;
pub(crate) mod task;
+pub(crate) mod watch;
pub use self::exec::Executor;
pub(crate) use self::exec::{BoxSendFuture, Exec};
diff --git /dev/null b/src/common/watch.rs
new file mode 100644
--- /dev/null
+++ b/src/common/watch.rs
@@ -0,0 +1,73 @@
+//! An SPSC broadcast channel.
+//!
+//! - The value can only be a `usize`.
+//! - The consumer is only notified if the value is different.
+//! - The value `0` is reserved for closed.
+
+use futures_util::task::AtomicWaker;
+use std::sync::{
+ atomic::{AtomicUsize, Ordering},
+ Arc,
+};
+use std::task;
+
+type Value = usize;
+
+pub(crate) const CLOSED: usize = 0;
+
+pub(crate) fn channel(initial: Value) -> (Sender, Receiver) {
+ debug_assert!(
+ initial != CLOSED,
+ "watch::channel initial state of 0 is reserved"
+ );
+
+ let shared = Arc::new(Shared {
+ value: AtomicUsize::new(initial),
+ waker: AtomicWaker::new(),
+ });
+
+ (
+ Sender {
+ shared: shared.clone(),
+ },
+ Receiver { shared },
+ )
+}
+
+pub(crate) struct Sender {
+ shared: Arc<Shared>,
+}
+
+pub(crate) struct Receiver {
+ shared: Arc<Shared>,
+}
+
+struct Shared {
+ value: AtomicUsize,
+ waker: AtomicWaker,
+}
+
+impl Sender {
+ pub(crate) fn send(&mut self, value: Value) {
+ if self.shared.value.swap(value, Ordering::SeqCst) != value {
+ self.shared.waker.wake();
+ }
+ }
+}
+
+impl Drop for Sender {
+ fn drop(&mut self) {
+ self.send(CLOSED);
+ }
+}
+
+impl Receiver {
+ pub(crate) fn load(&mut self, cx: &mut task::Context<'_>) -> Value {
+ self.shared.waker.register(cx.waker());
+ self.shared.value.load(Ordering::SeqCst)
+ }
+
+ pub(crate) fn peek(&self) -> Value {
+ self.shared.value.load(Ordering::Relaxed)
+ }
+}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -8,7 +8,7 @@ use http::{HeaderMap, Method, Version};
use tokio::io::{AsyncRead, AsyncWrite};
use super::io::Buffered;
-use super::{/*Decode,*/ Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext,};
+use super::{Decoder, Encode, EncodedBuf, Encoder, Http1Transaction, ParseContext, Wants};
use crate::common::{task, Pin, Poll, Unpin};
use crate::headers::connection_keep_alive;
use crate::proto::{BodyLength, DecodedLength, MessageHead};
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -114,7 +114,7 @@ where
pub fn can_read_body(&self) -> bool {
match self.state.reading {
- Reading::Body(..) => true,
+ Reading::Body(..) | Reading::Continue(..) => true,
_ => false,
}
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -129,10 +129,10 @@ where
read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE
}
- pub fn poll_read_head(
+ pub(super) fn poll_read_head(
&mut self,
cx: &mut task::Context<'_>,
- ) -> Poll<Option<crate::Result<(MessageHead<T::Incoming>, DecodedLength, bool)>>> {
+ ) -> Poll<Option<crate::Result<(MessageHead<T::Incoming>, DecodedLength, Wants)>>> {
debug_assert!(self.can_read_head());
trace!("Conn::read_head");
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -156,23 +156,28 @@ where
self.state.keep_alive &= msg.keep_alive;
self.state.version = msg.head.version;
+ let mut wants = if msg.wants_upgrade {
+ Wants::UPGRADE
+ } else {
+ Wants::EMPTY
+ };
+
if msg.decode == DecodedLength::ZERO {
- if log_enabled!(log::Level::Debug) && msg.expect_continue {
+ if msg.expect_continue {
debug!("ignoring expect-continue since body is empty");
}
self.state.reading = Reading::KeepAlive;
if !T::should_read_first() {
self.try_keep_alive(cx);
}
+ } else if msg.expect_continue {
+ self.state.reading = Reading::Continue(Decoder::new(msg.decode));
+ wants = wants.add(Wants::EXPECT);
} else {
- if msg.expect_continue {
- let cont = b"HTTP/1.1 100 Continue\r\n\r\n";
- self.io.headers_buf().extend_from_slice(cont);
- }
self.state.reading = Reading::Body(Decoder::new(msg.decode));
- };
+ }
- Poll::Ready(Some(Ok((msg.head, msg.decode, msg.wants_upgrade))))
+ Poll::Ready(Some(Ok((msg.head, msg.decode, wants))))
}
fn on_read_head_error<Z>(&mut self, e: crate::Error) -> Poll<Option<crate::Result<Z>>> {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -239,7 +244,19 @@ where
}
}
}
- _ => unreachable!("read_body invalid state: {:?}", self.state.reading),
+ Reading::Continue(ref decoder) => {
+ // Write the 100 Continue if not already responded...
+ if let Writing::Init = self.state.writing {
+ trace!("automatically sending 100 Continue");
+ let cont = b"HTTP/1.1 100 Continue\r\n\r\n";
+ self.io.headers_buf().extend_from_slice(cont);
+ }
+
+ // And now recurse once in the Reading::Body state...
+ self.state.reading = Reading::Body(decoder.clone());
+ return self.poll_read_body(cx);
+ }
+ _ => unreachable!("poll_read_body invalid state: {:?}", self.state.reading),
};
self.state.reading = reading;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -346,7 +363,9 @@ where
// would finish.
match self.state.reading {
- Reading::Body(..) | Reading::KeepAlive | Reading::Closed => return,
+ Reading::Continue(..) | Reading::Body(..) | Reading::KeepAlive | Reading::Closed => {
+ return
+ }
Reading::Init => (),
};
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -711,6 +730,7 @@ struct State {
#[derive(Debug)]
enum Reading {
Init,
+ Continue(Decoder),
Body(Decoder),
KeepAlive,
Closed,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -4,7 +4,7 @@ use bytes::{Buf, Bytes};
use http::{Request, Response, StatusCode};
use tokio::io::{AsyncRead, AsyncWrite};
-use super::Http1Transaction;
+use super::{Http1Transaction, Wants};
use crate::body::{Body, Payload};
use crate::common::{task, Future, Never, Pin, Poll, Unpin};
use crate::proto::{
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -235,16 +235,16 @@ where
}
// dispatch is ready for a message, try to read one
match ready!(self.conn.poll_read_head(cx)) {
- Some(Ok((head, body_len, wants_upgrade))) => {
+ Some(Ok((head, body_len, wants))) => {
let mut body = match body_len {
DecodedLength::ZERO => Body::empty(),
other => {
- let (tx, rx) = Body::new_channel(other);
+ let (tx, rx) = Body::new_channel(other, wants.contains(Wants::EXPECT));
self.body_tx = Some(tx);
rx
}
};
- if wants_upgrade {
+ if wants.contains(Wants::UPGRADE) {
body.set_on_upgrade(self.conn.on_upgrade());
}
self.dispatch.recv_msg(Ok((head, body)))?;
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -74,3 +74,22 @@ pub(crate) struct Encode<'a, T> {
req_method: &'a mut Option<Method>,
title_case_headers: bool,
}
+
+/// Extra flags that a request "wants", like expect-continue or upgrades.
+#[derive(Clone, Copy, Debug)]
+struct Wants(u8);
+
+impl Wants {
+ const EMPTY: Wants = Wants(0b00);
+ const EXPECT: Wants = Wants(0b01);
+ const UPGRADE: Wants = Wants(0b10);
+
+ #[must_use]
+ fn add(self, other: Wants) -> Wants {
+ Wants(self.0 | other.0)
+ }
+
+ fn contains(&self, other: Wants) -> bool {
+ (self.0 & other.0) == other.0
+ }
+}
|
> master - looks like this feature is forgotten.
Not so much forgotten, I just wanted to come up with a better design. What you outline here seems like it should work on master:
``` rust
fn on_request(&mut self, req: Request) -> Next {
match request.headers().get() {
Some(&ContentLength(len)) if len > 10_000_000 => {
self.status = StatusCode::PayloadTooLarge;
Next::write()
},
None => {
self.status = StatusCode::LengthRequired;
Next::write()
},
Some(&ContentLength(len)) => {
self.length = len;
// hyper looks for Expect-100, and sends 100 status because of this read()
Next::read()
}
}
}
```
Yes, about this behavior and I had in mind.
I'm sorry if offended. Loss (sometimes temporary) features on full code redesign is a normal and I did not mean anything bad. On the contrary, from my point of view this is a very good time to remind about corner cases :)
Not offended at all! Thanks for helping think this issue through.
On Mon, Jun 20, 2016, 11:42 PM Artem V. Navrotskiy notifications@github.com
wrote:
> Yes, about this behavior and I had in mind.
>
> I'm sorry if offended. Loss (sometimes temporary) features on full code
> redesign is a normal and I did not mean anything bad. On the contrary, from
> my point of view this is a very good time to remind about corner cases :)
>
> —
> You are receiving this because you commented.
>
> Reply to this email directly, view it on GitHub
> https://github.com/hyperium/hyper/issues/838#issuecomment-227353726, or mute
> the thread
> https://github.com/notifications/unsubscribe/AADJF2kg53rTezjlZGbnHTCzmF2ekBXDks5qN4ffgaJpZM4I5p6B
> .
Hey all,
I'm looking at implementing this on master with the tokio integration.
I've literally only been using rust a few days so it is hard for me to follow through the code with inferred type assignments and generics mixed together.
Anyway, Here is what I have so far.
```git.patch
diff --git a/src/http/conn.rs b/src/http/conn.rs
index 7954fadb..3e90c8ba 100644
--- a/src/http/conn.rs
+++ b/src/http/conn.rs
@@ -116,6 +116,7 @@ impl<I: Io, T: Http1Transaction, K: KeepAlive> Conn<I, T, K> {
}
};
self.state.busy();
+ let wants_continue = head.expecting_continue();
let wants_keep_alive = head.should_keep_alive();
self.state.keep_alive &= wants_keep_alive;
let (body, reading) = if decoder.is_eof() {
@@ -124,6 +125,10 @@ impl<I: Io, T: Http1Transaction, K: KeepAlive> Conn<I, T, K> {
(true, Reading::Body(decoder))
};
self.state.reading = reading;
+ if wants_continue {
+ self.state.reading = Reading::Init;
+ }
+
return Ok(Async::Ready(Some(Frame::Message { message: head, body: body })));
},
_ => {
@@ -674,6 +679,8 @@ mod tests {
Ok(())
}).wait();
}
+
+
#[test]
fn test_conn_closed_write() {
let io = AsyncIo::new_buf(vec![], 0);
diff --git a/src/http/mod.rs b/src/http/mod.rs
index 13c50119..a0ea5a27 100644
--- a/src/http/mod.rs
+++ b/src/http/mod.rs
@@ -2,7 +2,7 @@
use std::borrow::Cow;
use std::fmt;
-use header::{Connection, ConnectionOption};
+use header::{Connection, ConnectionOption, Expect};
use header::Headers;
use method::Method;
use status::StatusCode;
@@ -68,6 +68,10 @@ impl<S> MessageHead<S> {
pub fn should_keep_alive(&self) -> bool {
should_keep_alive(self.version, &self.headers)
}
+
+ pub fn expecting_continue(&self) -> bool {
+ expecting_continue(self.version, &self.headers)
+ }
}
/// The raw status code and reason-phrase.
@@ -115,6 +119,16 @@ pub fn should_keep_alive(version: HttpVersion, headers: &Headers) -> bool {
ret
}
+#[inline]
+pub fn expecting_continue(version: HttpVersion, headers: &Headers) -> bool {
+ let ret = match (version, headers.get::<Expect>()) {
+ (Http11, Some(expect)) if expect == &Expect::Continue => true,
+ _ => false
+ };
+ trace!("expecting_continue(version={:?}, header={:?}) = {:?}", version, headers.get::<Expect>(), ret);
+ ret
+}
+
#[derive(Debug)]
pub enum ServerTransaction {}
```
The function I added to `http/mod.rs` seems correct and with `RUST_LOG=trace` I get
```
TRACE:hyper::http: expecting_continue(version=Http11, header=Some(Continue)) = true
```
Ignoring the changes I made to `conn.rs` just for testing.
@seanmonstar can you give me some direction here?
I can gather that we would be using `Sink` and not a `Stream` for this but I am not sure about what would be the best way to go about integrating this into the code base.
Where should I be piping the data back etc.
I'm am assuming this is a good time for api breakages.
@martell You've only been using Rust a couple days? That makes this work even more impressive! I'd suggest opening a Pull Request with your work, it makes it easier to discuss changes.
As for your current diff, it definitely looks like the right direction. I imagine we would want to add some new states, of which I'm not entirely certain the best combination at the moment. I'll think more, but I can describe the expected behavior.
We would need to save somehow that the request would like a 100-continue response. We would also need a way to know whether we should respond with it, and also to know that we already have. A way we could decide to write `100-continue` is if the user decides that they wish to keep reading from the body, we should flush that response. If the user decides they don't want to read from the body, they'll just return a `Response`, and we can just write that.
```rust
if !req.headers().has::<ContentLength>()
return Response::new().with_status(StatusCode::LengthRequired);
} else {
req.body().for_each(|chunk| something(chunk))
}
```
In the above example, if the request didn't have a `Content-Length` header, the server wants to reject it immediately. In this case, hyper should not have responded with `100-continue`. We can do this because `Conn::poll()` will be called again if the user tries to poll on the request body (the `req.body().for_each`).
So, we can record that an expectation exists, and then in `Conn::poll()`, if were in this `expecting` state, we can write (and it needs to be flushed immediately) the `100-continue` response (record that we've since written that), and then try to keep reading the body. If `Conn::start_send()` is called to write a response, we can just remove the expectation state, since it no longer matters.
Does this all make sense?
Thanks for taking the time to look.
Most of that makes sense to me.
I have setup a WIP PR for this
We have to be compliant with the rfc spec to only send the `100-continue` when we get the `Content-Length` makes sense to me.
Where does the above code go in the code base however?
The use case for this is typically for a REST API server and we only really want to send the body after `Auth` has happened based on the header data, probably an oauth2 bearer token etc.
So when the user requests the body is an ideal time to send the `100-continue`
I see the `Conn::poll()` in `conn.rs`
I'm just not sure what states yet either but can play around with that to achieve the goal and refactor, from initial reading I see
```
Reading::Init
Reading::Body(..) in the function `can_read_body`
Reading::KeepAlive
Reading::Close
```
I think we want a state like `Reading::Head` because we are beyond the init stage but are not onto reading the body yet.
My biggest problem is I can't find where I can write to the `SINK`, this is probably a combination of my lack of rust knowledge and the fact that this is not my own code base :)
I've gone back and forth whether it should be `Reading::Expect` or `Writing::Expect`. It kind of affects both. Whichever is chosen, we'd want to update `read_body` to check that we've written the `100-continue`, and `write_head` would want to update the state that that there is no need to send a `100-continue`, since some other response is already being sent.
The content is written via `self.io.buffer(bytes)` and then flushed with `self.io.flush()`.
I have a PR open to implement this: #1232.
@sfackler Added support for immediately replying with `100-continue`
This is a good short term fix for getting support in tree.
Ideally we would only send this on the attempted read of the body.
Discussions on IRC lead to some work being done in tokio which needed a new future Sink to support this. https://github.com/alexcrichton/futures-rs/pull/414
This was then dropped afterwards.
It is still up in the air about how we are going to best solve this.
I wanted to get some context into this thread so we can proceed.
@seanmonstar did mention he might have an idea about how to help this in hyper?
This thread might give the best context on what is needed.
https://github.com/alexcrichton/futures-rs/issues/409
This should no longer be blocked on any dependencies, on master. We have our own dispatcher, and use a custom channel for the body.
I think it wouldn't be too hard to tie `want` into the body channel, to know when and if the user has polled for data on the body.
@seanmonstar was this not fixed by 26417fc24?
```rust
if msg.expect_continue {
let cont = b"HTTP/1.1 100 Continue\r\n\r\n";
self.io.headers_buf().extend_from_slice(cont);
}
@AlexDvorak no, that's adding the `100 Continue` response to the write buffer immediately, right after parsing. This issue is describing that it **shouldn't** add it immediately, but instead only if the user polls the `Body`. That way, if the user rejects the request for some reason, the client doesn't see a weird `HTTP/1.1 100 Continue\r\n\r\nHTTP/1.1 400 Bad Request`.
So, would modification to the `Body` struct like so be fine?
```rust
pub struct Body {
kind: Kind,
/// Keep the extra bits in an `Option<Box<Extra>>`, so that
/// Body stays small in the common case (no extras needed).
extra: Option<Box<Extra>>,
UserHasPolled: bool,
}
```
Or should the modification be happenning elsewhere?
If that's ok then how would one access the `Body` from the `proto/h1/conn.rs`?
That wouldn't work, since the `Conn` no longer has the `Body` side. When there's a request body, a channel is made (`Body::channel`). The channel implementation could be where this information is stored. The `client::dispatcher` already has a concept of knowing when the receiver side has been polled, by making use of `want`.
what are you reffering to by channel implementation? I assume your're not reffering to `mpsc::channel`
|
2020-01-27T23:30:38Z
| 2,119
|
Change 100-continue behavior to send when Body has been polled
Use of the 100 (Continue) Status (https://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html#sec8.2.3):
> The purpose of the 100 (Continue) status (see section 10.1.1) is to allow a client that is sending a request message with a request body to determine if the origin server is willing to accept the request (based on the request headers) before the client sends the request body. In some cases, it might either be inappropriate or highly inefficient for the client to send the body if the server will reject the message without looking at the body.
In my case, for example, I can upload file or reject already uploaded file in one atomic request.
Current state:
- 0.9.x - handler can make desigion and allow/reject client request by additional `check_continue` method. This method is not supported by https://github.com/iron/iron and https://github.com/nickel-org/nickel.rs. Also this method require duplicate logic (it not called on HTTP/1.0) for routing and check permissions;
- master - looks like this feature is forgotten.
I think, much better to send "100 Continue" status on first Body::read() call (similar logic implemented by https://github.com/frewsxcv/tiny-http project).
In this case no additional logic/callback needed for make desigion about "100 Continue":
- If you call Body::read() method, then you needs to request body and hyper send to client "100 Continue";
- If you have anought information to send final status without body, then hyper send status without "100 Continue".
I try to change logic on 0.9.x branch by send "100 Continue" status of first read call, but I failed :(
|
hyperium__hyper-2119
|
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -507,11 +525,29 @@ impl Sender {
}
}
+impl fmt::Debug for Sender {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ #[derive(Debug)]
+ struct Open;
+ #[derive(Debug)]
+ struct Closed;
+
+ let mut builder = f.debug_tuple("Sender");
+ match self.want_rx.peek() {
+ watch::CLOSED => builder.field(&Closed),
+ _ => builder.field(&Open),
+ };
+
+ builder.finish()
+ }
+}
+
#[cfg(test)]
mod tests {
use std::mem;
+ use std::task::Poll;
- use super::{Body, Sender};
+ use super::{Body, DecodedLength, HttpBody, Sender};
#[test]
fn test_size_of() {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -541,4 +577,97 @@ mod tests {
"Option<Sender>"
);
}
+
+ #[tokio::test]
+ async fn channel_abort() {
+ let (tx, mut rx) = Body::channel();
+
+ tx.abort();
+
+ let err = rx.data().await.unwrap().unwrap_err();
+ assert!(err.is_body_write_aborted(), "{:?}", err);
+ }
+
+ #[tokio::test]
+ async fn channel_abort_when_buffer_is_full() {
+ let (mut tx, mut rx) = Body::channel();
+
+ tx.try_send_data("chunk 1".into()).expect("send 1");
+ // buffer is full, but can still send abort
+ tx.abort();
+
+ let chunk1 = rx.data().await.expect("item 1").expect("chunk 1");
+ assert_eq!(chunk1, "chunk 1");
+
+ let err = rx.data().await.unwrap().unwrap_err();
+ assert!(err.is_body_write_aborted(), "{:?}", err);
+ }
+
+ #[test]
+ fn channel_buffers_one() {
+ let (mut tx, _rx) = Body::channel();
+
+ tx.try_send_data("chunk 1".into()).expect("send 1");
+
+ // buffer is now full
+ let chunk2 = tx.try_send_data("chunk 2".into()).expect_err("send 2");
+ assert_eq!(chunk2, "chunk 2");
+ }
+
+ #[tokio::test]
+ async fn channel_empty() {
+ let (_, mut rx) = Body::channel();
+
+ assert!(rx.data().await.is_none());
+ }
+
+ #[test]
+ fn channel_ready() {
+ let (mut tx, _rx) = Body::new_channel(DecodedLength::CHUNKED, /*wanter = */ false);
+
+ let mut tx_ready = tokio_test::task::spawn(tx.ready());
+
+ assert!(tx_ready.poll().is_ready(), "tx is ready immediately");
+ }
+
+ #[test]
+ fn channel_wanter() {
+ let (mut tx, mut rx) = Body::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
+
+ let mut tx_ready = tokio_test::task::spawn(tx.ready());
+ let mut rx_data = tokio_test::task::spawn(rx.data());
+
+ assert!(
+ tx_ready.poll().is_pending(),
+ "tx isn't ready before rx has been polled"
+ );
+
+ assert!(rx_data.poll().is_pending(), "poll rx.data");
+ assert!(tx_ready.is_woken(), "rx poll wakes tx");
+
+ assert!(
+ tx_ready.poll().is_ready(),
+ "tx is ready after rx has been polled"
+ );
+ }
+
+ #[test]
+ fn channel_notices_closure() {
+ let (mut tx, rx) = Body::new_channel(DecodedLength::CHUNKED, /*wanter = */ true);
+
+ let mut tx_ready = tokio_test::task::spawn(tx.ready());
+
+ assert!(
+ tx_ready.poll().is_pending(),
+ "tx isn't ready before rx has been polled"
+ );
+
+ drop(rx);
+ assert!(tx_ready.is_woken(), "dropping rx wakes tx");
+
+ match tx_ready.poll() {
+ Poll::Ready(Err(ref e)) if e.is_closed() => (),
+ unexpected => panic!("tx poll ready unexpected: {:?}", unexpected),
+ }
+ }
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -785,6 +785,57 @@ fn expect_continue_but_no_body_is_ignored() {
assert_eq!(&resp[..expected.len()], expected);
}
+#[tokio::test]
+async fn expect_continue_waits_for_body_poll() {
+ let _ = pretty_env_logger::try_init();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ let child = thread::spawn(move || {
+ let mut tcp = connect(&addr);
+
+ tcp.write_all(
+ b"\
+ POST /foo HTTP/1.1\r\n\
+ Host: example.domain\r\n\
+ Expect: 100-continue\r\n\
+ Content-Length: 100\r\n\
+ Connection: Close\r\n\
+ \r\n\
+ ",
+ )
+ .expect("write");
+
+ let expected = "HTTP/1.1 400 Bad Request\r\n";
+ let mut resp = String::new();
+ tcp.read_to_string(&mut resp).expect("read");
+
+ assert_eq!(&resp[..expected.len()], expected);
+ });
+
+ let (socket, _) = listener.accept().await.expect("accept");
+
+ Http::new()
+ .serve_connection(
+ socket,
+ service_fn(|req| {
+ assert_eq!(req.headers()["expect"], "100-continue");
+ // But! We're never going to poll the body!
+ tokio::time::delay_for(Duration::from_millis(50)).map(move |_| {
+ // Move and drop the req, so we don't auto-close
+ drop(req);
+ Response::builder()
+ .status(StatusCode::BAD_REQUEST)
+ .body(hyper::Body::empty())
+ })
+ }),
+ )
+ .await
+ .expect("serve_connection");
+
+ child.join().expect("client thread");
+}
+
#[test]
fn pipeline_disabled() {
let server = serve();
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"2114"
] |
0.13
|
ba2a144f8b81042247088215425f91760d8694a1
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -902,7 +902,6 @@ impl State {
}
fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade {
- debug_assert!(self.upgrade.is_none());
let (tx, rx) = crate::upgrade::pending();
self.upgrade = Some(tx);
rx
|
Looks like a `debug_assert`, but I can't possibly remember why it's there...
|
2020-01-24T00:27:24Z
| 2,115
|
'assertion failed: self.upgrade.is_none()' when reusing connection with ignored upgrade header
Hyper threads panic (though the server remains running) when a client makes multiple requests with an upgrade header (which hyper ignores). The first request is handled without issue, but when the second request is made the hyper thread panics and the connection is terminated.
Steps to reproduce:
* Run the hello world example
* `cargo run --example hello`
* issue multiple requests over the same connection with an Upgrade header.
* `curl -v -H "Connection: Upgrade" -H "Upgrade: foo" http://127.0.0.1:3000 http://127.0.0.1:3000`
The hyper output:
```
Listening on http://127.0.0.1:3000
thread 'main' panicked at 'assertion failed: self.upgrade.is_none()', src/proto/h1/conn.rs:905:9
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace.
```
The curl output:
```
* Trying 127.0.0.1:3000...
* TCP_NODELAY set
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0* Connected to 127.0.0.1 (127.0.0.1) port 3000 (#0)
> GET / HTTP/1.1
> Host: 127.0.0.1:3000
> User-Agent: curl/7.66.0
> Accept: */*
> Connection: Upgrade
> Upgrade: foo
>
* Mark bundle as not supporting multiuse
< HTTP/1.1 200 OK
< content-length: 12
< date: Thu, 23 Jan 2020 23:45:26 GMT
<
{ [12 bytes data]
100 12 100 12 0 0 12000 0 --:--:-- --:--:-- --:--:-- 12000
* Connection #0 to host 127.0.0.1 left intact
Hello World!* Found bundle for host 127.0.0.1: 0x5649a99f0170 [serially]
* Can not multiplex, even if we wanted to!
* Re-using existing connection! (#0) with host 127.0.0.1
* Connected to 127.0.0.1 (127.0.0.1) port 3000 (#0)
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0> GET / HTTP/1.1
> Host: 127.0.0.1:3000
> User-Agent: curl/7.66.0
> Accept: */*
> Connection: Upgrade
> Upgrade: foo
>
* Connection died, retrying a fresh connect
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
* Closing connection 0
* Issue another request to this URL: 'http://127.0.0.1:3000/'
* Hostname 127.0.0.1 was found in DNS cache
* Trying 127.0.0.1:3000...
* TCP_NODELAY set
* Connected to 127.0.0.1 (127.0.0.1) port 3000 (#1)
> GET / HTTP/1.1
> Host: 127.0.0.1:3000
> User-Agent: curl/7.66.0
> Accept: */*
> Connection: Upgrade
> Upgrade: foo
>
* Mark bundle as not supporting multiuse
< HTTP/1.1 200 OK
< content-length: 12
< date: Thu, 23 Jan 2020 23:45:26 GMT
<
{ [12 bytes data]
100 12 100 12 0 0 12000 0 --:--:-- --:--:-- --:--:-- 12000
* Connection #1 to host 127.0.0.1 left intact
Hello World!
```
You can see that the connection dies when the second request is issued (corresponding to the panic in the hyper output), curl then establishes a new connection and reissues the request which succeeds.
|
hyperium__hyper-2115
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1323,6 +1323,46 @@ async fn upgrades_new() {
assert_eq!(s(&vec), "bar=foo");
}
+#[tokio::test]
+async fn upgrades_ignored() {
+ let _ = pretty_env_logger::try_init();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ tokio::spawn(async move {
+ let svc = service_fn(move |req: Request<Body>| {
+ assert_eq!(req.headers()["upgrade"], "yolo");
+ future::ok::<_, hyper::Error>(Response::new(hyper::Body::empty()))
+ });
+
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .expect("server task");
+ });
+
+ let client = hyper::Client::new();
+ let url = format!("http://{}/", addr);
+
+ let make_req = || {
+ hyper::Request::builder()
+ .uri(&*url)
+ .header("upgrade", "yolo")
+ .header("connection", "upgrade")
+ .body(hyper::Body::empty())
+ .expect("make_req")
+ };
+
+ let res1 = client.request(make_req()).await.expect("req 1");
+ assert_eq!(res1.status(), 200);
+ drop(res1);
+
+ let res2 = client.request(make_req()).await.expect("req 2");
+ assert_eq!(res2.status(), 200);
+}
+
#[tokio::test]
async fn http_connect_new() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"2067"
] |
0.13
|
bfda3906170b3b7f184e586d18bdcfa2f7b8e941
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -25,7 +25,7 @@ futures-core = { version = "0.3", default-features = false }
futures-channel = "0.3"
futures-util = { version = "0.3", default-features = false }
http = "0.2"
-http-body = "0.3"
+http-body = "0.3.1"
httparse = "1.0"
h2 = "0.2.1"
itoa = "0.4.1"
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -42,7 +42,7 @@ async fn fetch_url(url: hyper::Uri) -> Result<()> {
// Stream the body, writing each chunk to stdout as we get it
// (instead of buffering and printing at the end).
- while let Some(next) = res.body_mut().data().await {
+ while let Some(next) = res.data().await {
let chunk = next?;
io::stdout().write_all(&chunk).await?;
}
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -30,7 +30,7 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
let res = client.get(url).await?;
// asynchronously aggregate the chunks of the body
- let body = hyper::body::aggregate(res.into_body()).await?;
+ let body = hyper::body::aggregate(res).await?;
// try to parse as json with serde_json
let users = serde_json::from_reader(body.reader())?;
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -17,7 +17,7 @@ async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Erro
(&Method::GET, "/") | (&Method::GET, "/post") => Ok(Response::new(INDEX.into())),
(&Method::POST, "/post") => {
// Concatenate the body...
- let b = hyper::body::to_bytes(req.into_body()).await?;
+ let b = hyper::body::to_bytes(req).await?;
// Parse the request body. form_urlencoded::parse
// always succeeds, but in general parsing may
// fail (for example, an invalid post of json), so
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -40,7 +40,7 @@ async fn client_request_response(client: &Client<HttpConnector>) -> Result<Respo
async fn api_post_response(req: Request<Body>) -> Result<Response<Body>> {
// Aggregate the body...
- let whole_body = hyper::body::aggregate(req.into_body()).await?;
+ let whole_body = hyper::body::aggregate(req).await?;
// Decode as JSON...
let mut data: serde_json::Value = serde_json::from_reader(whole_body.reader())?;
// Change the JSON...
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -18,7 +18,7 @@ type BodySender = mpsc::Sender<Result<Bytes, crate::Error>>;
/// A stream of `Bytes`, used when receiving bodies.
///
-/// A good default [`HttpBody`](crates::body::HttpBody) to use in many
+/// A good default [`HttpBody`](crate::body::HttpBody) to use in many
/// applications.
#[must_use = "streams do nothing unless polled"]
pub struct Body {
diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs
--- a/src/body/to_bytes.rs
+++ b/src/body/to_bytes.rs
@@ -5,7 +5,7 @@ use super::HttpBody;
/// Concatenate the buffers from a body into a single `Bytes` asynchronously.
///
/// This may require copying the data into a single buffer. If you don't need
-/// a contiguous buffer, prefer the [`aggregate`](crate::body::aggregate)
+/// a contiguous buffer, prefer the [`aggregate`](crate::body::aggregate())
/// function.
pub async fn to_bytes<T>(body: T) -> Result<Bytes, T::Error>
where
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -40,7 +40,7 @@
//! println!("status: {}", res.status());
//!
//! // Concatenate the body stream into a single buffer...
-//! let buf = hyper::body::to_bytes(res.into_body()).await?;
+//! let buf = hyper::body::to_bytes(res).await?;
//!
//! println!("body: {:?}", buf);
//! # Ok(())
|
I think this makes sense but forces http to be async I guess unless we put it behind a feature flag. Otherwise, I think this could be a great addition.
How does it force http to be async? It'd only implement in the `http-body` crate.
Ah you're right, if the impl is in the http-body crate then I am +1
|
2019-12-13T18:20:56Z
| 2,077
|
Consider implementing HttpBody for Request and Response
In https://github.com/hyperium/http/issues/107, we didn't implement `Stream` for `http::{Request, Response}` so as to not have the external dependency. However, in `http-body`, since we define the trait and import `http`, we could provide this implementation.
It'd make these slightly nicer:
```rust
while let Some(data) = req.data().await {}
let buf = hyper::body::aggregate(req);
```
|
hyperium__hyper-2077
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -146,7 +146,7 @@ macro_rules! test {
);
)*
- let body = rt.block_on(concat(res.into_body()))
+ let body = rt.block_on(concat(res))
.expect("body concat wait");
let expected_res_body = Option::<&[u8]>::from($response_body)
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1065,7 +1065,7 @@ mod dispatch_impl {
.request(req)
.and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
})
.map_ok(|_| ())
};
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1128,7 +1128,7 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
});
let rx = rx1.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1296,7 +1296,7 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
});
let rx = rx1.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1342,7 +1342,7 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
});
let rx = rx1.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2098,7 +2098,7 @@ mod conn {
let res = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
});
let rx = rx1.expect("thread panicked");
let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2144,7 +2144,7 @@ mod conn {
let res = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
});
let rx = rx1.expect("thread panicked");
let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2184,7 +2184,7 @@ mod conn {
.unwrap();
let res1 = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
});
// pipelined request will hit NotReady, and thus should return an Error::Cancel
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2258,7 +2258,7 @@ mod conn {
let res = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::SWITCHING_PROTOCOLS);
assert_eq!(res.headers()["Upgrade"], "foobar");
- concat(res.into_body())
+ concat(res)
});
let rx = rx1.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2348,7 +2348,7 @@ mod conn {
.send_request(req)
.and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- concat(res.into_body())
+ concat(res)
})
.map_ok(|body| {
assert_eq!(body.as_ref(), b"");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1887,7 +1887,7 @@ impl tower_service::Service<Request<Body>> for TestService {
let replies = self.reply.clone();
Box::pin(async move {
- while let Some(chunk) = req.body_mut().data().await {
+ while let Some(chunk) = req.data().await {
match chunk {
Ok(chunk) => {
tx.send(Msg::Chunk(chunk.to_vec())).unwrap();
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -355,7 +355,7 @@ async fn async_test(cfg: __TestConfig) {
func(&req.headers());
}
let sbody = sreq.body;
- hyper::body::to_bytes(req.into_body()).map_ok(move |body| {
+ hyper::body::to_bytes(req).map_ok(move |body| {
assert_eq!(body.as_ref(), sbody.as_slice(), "client body");
let mut res = Response::builder()
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -410,7 +410,7 @@ async fn async_test(cfg: __TestConfig) {
for func in &cheaders {
func(&res.headers());
}
- hyper::body::to_bytes(res.into_body())
+ hyper::body::to_bytes(res)
})
.map_ok(move |body| {
assert_eq!(body.as_ref(), cbody.as_slice(), "server body");
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"2058"
] |
0.13
|
e12329054a6707d05bed342a249e1c75a932d52f
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -800,7 +800,7 @@ impl Client {
Ok(Some((DecodedLength::CHUNKED, false)))
} else {
trace!("not chunked, read till eof");
- Ok(Some((DecodedLength::CHUNKED, false)))
+ Ok(Some((DecodedLength::CLOSE_DELIMITED, false)))
}
} else if let Some(len) = headers::content_length_parse_all(&inc.headers) {
Ok(Some((DecodedLength::checked_new(len)?, false)))
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -47,7 +47,7 @@ pub(crate) enum Dispatched {
mod body_length {
use std::fmt;
- #[derive(Clone, Copy, Debug, PartialEq, Eq)]
+ #[derive(Clone, Copy, PartialEq, Eq)]
pub(crate) struct DecodedLength(u64);
const MAX_LEN: u64 = ::std::u64::MAX - 2;
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -92,6 +92,16 @@ mod body_length {
}
}
+ impl fmt::Debug for DecodedLength {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ DecodedLength::CLOSE_DELIMITED => f.write_str("CLOSE_DELIMITED"),
+ DecodedLength::CHUNKED => f.write_str("CHUNKED"),
+ DecodedLength(n) => f.debug_tuple("DecodedLength").field(&n).finish(),
+ }
+ }
+ }
+
impl fmt::Display for DecodedLength {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
|
Yep, that line looks wrong indeed!
|
2019-12-12T21:04:25Z
| 2,075
|
Transfer-Encoding: identity
I have a web service that returns its content with "Transfer-Encoding: identity". If I read the spec correctly that means that the content should not be touched and interpreted as it is.
Unfortunately hyper seems to assume that whenever transfer encoding is present in a response header it interprets the content as if it would be chunked.
I modified the following line locally to CLOSE_DELIMITED and now it works for me (tm)
https://github.com/hyperium/hyper/blob/0.12.x/src/proto/h1/role.rs#L768
interestingly the code comment doesn't really match the actual code. not sure what the background of this is.
|
hyperium__hyper-2075
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1441,7 +1441,7 @@ mod tests {
",
);
- // transfer-encoding
+ // transfer-encoding: chunked
assert_eq!(
parse(
"\
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1454,6 +1454,19 @@ mod tests {
DecodedLength::CHUNKED
);
+ // transfer-encoding not-chunked is close-delimited
+ assert_eq!(
+ parse(
+ "\
+ HTTP/1.1 200 OK\r\n\
+ transfer-encoding: yolo\r\n\
+ \r\n\
+ "
+ )
+ .decode,
+ DecodedLength::CLOSE_DELIMITED
+ );
+
// transfer-encoding and content-length = chunked
assert_eq!(
parse(
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -655,6 +655,35 @@ test! {
body: None,
}
+test! {
+ name: client_response_transfer_encoding_not_chunked,
+
+ server:
+ expected: "\
+ GET /te-not-chunked HTTP/1.1\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: "\
+ HTTP/1.1 200 OK\r\n\
+ transfer-encoding: yolo\r\n\
+ \r\n\
+ hallo\
+ ",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/te-not-chunked",
+ },
+ response:
+ status: OK,
+ headers: {
+ "transfer-encoding" => "yolo",
+ },
+ body: &b"hallo"[..],
+}
+
test! {
name: client_pipeline_responses_extra,
|
hyperium/hyper
|
42560c7c40d8f934658624114fda4eb819cefda8
|
[
"1931"
] |
0.2
|
c56ccfb03366036dcc48d037e4c212d1f0fc7eb9
|
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -3,7 +3,7 @@
use futures_util::{StreamExt, TryStreamExt};
use hyper::client::HttpConnector;
use hyper::service::{make_service_fn, service_fn};
-use hyper::{header, Body, Chunk, Client, Method, Request, Response, Server, StatusCode};
+use hyper::{header, Body, Client, Method, Request, Response, Server, StatusCode};
type GenericError = Box<dyn std::error::Error + Send + Sync>;
type Result<T> = std::result::Result<T, GenericError>;
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -25,11 +25,11 @@ async fn client_request_response(client: &Client<HttpConnector>) -> Result<Respo
let web_res = client.request(req).await?;
// Compare the JSON we sent (before) with what we received (after):
let body = Body::wrap_stream(web_res.into_body().map_ok(|b| {
- Chunk::from(format!(
+ format!(
"<b>POST request body</b>: {}<br><b>Response</b>: {}",
POST_DATA,
std::str::from_utf8(&b).unwrap()
- ))
+ )
}));
Ok(Response::new(body))
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -11,13 +11,12 @@ use futures_util::TryStreamExt;
use http::HeaderMap;
use http_body::{Body as HttpBody, SizeHint};
-use super::Chunk;
use crate::common::{task, Future, Never, Pin, Poll};
use crate::upgrade::OnUpgrade;
-type BodySender = mpsc::Sender<Result<Chunk, crate::Error>>;
+type BodySender = mpsc::Sender<Result<Bytes, crate::Error>>;
-/// A stream of `Chunk`s, used when receiving bodies.
+/// A stream of `Bytes`s, used when receiving bodies.
///
/// A good default `Payload` to use in many applications.
#[must_use = "streams do nothing unless polled"]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -29,11 +28,11 @@ pub struct Body {
}
enum Kind {
- Once(Option<Chunk>),
+ Once(Option<Bytes>),
Chan {
content_length: Option<u64>,
abort_rx: oneshot::Receiver<()>,
- rx: mpsc::Receiver<Result<Chunk, crate::Error>>,
+ rx: mpsc::Receiver<Result<Bytes, crate::Error>>,
},
H2 {
content_length: Option<u64>,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -45,7 +44,7 @@ enum Kind {
// See https://github.com/rust-lang/rust/issues/57017
#[cfg(feature = "stream")]
Wrapped(
- Pin<Box<dyn Stream<Item = Result<Chunk, Box<dyn StdError + Send + Sync>>> + Send + Sync>>,
+ Pin<Box<dyn Stream<Item = Result<Bytes, Box<dyn StdError + Send + Sync>>> + Send + Sync>>,
),
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -152,7 +151,7 @@ impl Body {
pub fn wrap_stream<S, O, E>(stream: S) -> Body
where
S: Stream<Item = Result<O, E>> + Send + Sync + 'static,
- O: Into<Chunk> + 'static,
+ O: Into<Bytes> + 'static,
E: Into<Box<dyn StdError + Send + Sync>> + 'static,
{
let mapped = stream.map_ok(Into::into).map_err(Into::into);
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -208,7 +207,7 @@ impl Body {
})
}
- fn poll_eof(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<Chunk>>> {
+ fn poll_eof(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<Bytes>>> {
match self.take_delayed_eof() {
Some(DelayEof::NotEof(mut delay)) => match self.poll_inner(cx) {
ok @ Poll::Ready(Some(Ok(..))) | ok @ Poll::Pending => {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -237,7 +236,7 @@ impl Body {
}
}
- fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<Chunk>>> {
+ fn poll_inner(&mut self, cx: &mut task::Context<'_>) -> Poll<Option<crate::Result<Bytes>>> {
match self.kind {
Kind::Once(ref mut val) => Poll::Ready(val.take().map(Ok)),
Kind::Chan {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -265,7 +264,7 @@ impl Body {
} => match ready!(h2.poll_data(cx)) {
Some(Ok(bytes)) => {
let _ = h2.flow_control().release_capacity(bytes.len());
- Poll::Ready(Some(Ok(Chunk::from(bytes))))
+ Poll::Ready(Some(Ok(bytes)))
}
Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
None => Poll::Ready(None),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -279,7 +278,7 @@ impl Body {
}
}
- pub(super) fn take_full_data(&mut self) -> Option<Chunk> {
+ pub(super) fn take_full_data(&mut self) -> Option<Bytes> {
if let Kind::Once(ref mut chunk) = self.kind {
chunk.take()
} else {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -297,7 +296,7 @@ impl Default for Body {
}
impl HttpBody for Body {
- type Data = Chunk;
+ type Data = Bytes;
type Error = crate::Error;
fn poll_data(
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -362,7 +361,7 @@ impl fmt::Debug for Body {
#[derive(Debug)]
struct Empty;
#[derive(Debug)]
- struct Full<'a>(&'a Chunk);
+ struct Full<'a>(&'a Bytes);
let mut builder = f.debug_tuple("Body");
match self.kind {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -381,7 +380,7 @@ impl fmt::Debug for Body {
/// `Cargo.toml`.
#[cfg(feature = "stream")]
impl Stream for Body {
- type Item = crate::Result<Chunk>;
+ type Item = crate::Result<Bytes>;
fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Self::Item>> {
HttpBody::poll_data(self, cx)
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -393,22 +392,22 @@ impl Stream for Body {
/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
#[cfg(feature = "stream")]
-impl From<Box<dyn Stream<Item = Result<Chunk, Box<dyn StdError + Send + Sync>>> + Send + Sync>>
+impl From<Box<dyn Stream<Item = Result<Bytes, Box<dyn StdError + Send + Sync>>> + Send + Sync>>
for Body
{
#[inline]
fn from(
stream: Box<
- dyn Stream<Item = Result<Chunk, Box<dyn StdError + Send + Sync>>> + Send + Sync,
+ dyn Stream<Item = Result<Bytes, Box<dyn StdError + Send + Sync>>> + Send + Sync,
>,
) -> Body {
Body::new(Kind::Wrapped(stream.into()))
}
}
-impl From<Chunk> for Body {
+impl From<Bytes> for Body {
#[inline]
- fn from(chunk: Chunk) -> Body {
+ fn from(chunk: Bytes) -> Body {
if chunk.is_empty() {
Body::empty()
} else {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -417,24 +416,17 @@ impl From<Chunk> for Body {
}
}
-impl From<Bytes> for Body {
- #[inline]
- fn from(bytes: Bytes) -> Body {
- Body::from(Chunk::from(bytes))
- }
-}
-
impl From<Vec<u8>> for Body {
#[inline]
fn from(vec: Vec<u8>) -> Body {
- Body::from(Chunk::from(vec))
+ Body::from(Bytes::from(vec))
}
}
impl From<&'static [u8]> for Body {
#[inline]
fn from(slice: &'static [u8]) -> Body {
- Body::from(Chunk::from(slice))
+ Body::from(Bytes::from(slice))
}
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -451,14 +443,14 @@ impl From<Cow<'static, [u8]>> for Body {
impl From<String> for Body {
#[inline]
fn from(s: String) -> Body {
- Body::from(Chunk::from(s.into_bytes()))
+ Body::from(Bytes::from(s.into_bytes()))
}
}
impl From<&'static str> for Body {
#[inline]
fn from(slice: &'static str) -> Body {
- Body::from(Chunk::from(slice.as_bytes()))
+ Body::from(Bytes::from(slice.as_bytes()))
}
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -486,7 +478,7 @@ impl Sender {
}
/// Send data on this channel when it is ready.
- pub async fn send_data(&mut self, chunk: Chunk) -> crate::Result<()> {
+ pub async fn send_data(&mut self, chunk: Bytes) -> crate::Result<()> {
futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await?;
self.tx
.try_send(Ok(chunk))
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -497,15 +489,15 @@ impl Sender {
///
/// # Errors
///
- /// Returns `Err(Chunk)` if the channel could not (currently) accept
- /// another `Chunk`.
+ /// Returns `Err(Bytes)` if the channel could not (currently) accept
+ /// another `Bytes`.
///
/// # Note
///
/// This is mostly useful for when trying to send from some other thread
/// that doesn't have an async context. If in an async context, prefer
/// [`send_data`][] instead.
- pub fn try_send_data(&mut self, chunk: Chunk) -> Result<(), Chunk> {
+ pub fn try_send_data(&mut self, chunk: Bytes) -> Result<(), Bytes> {
self.tx
.try_send(Ok(chunk))
.map_err(|err| err.into_inner().expect("just sent Ok"))
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -15,14 +15,13 @@
//! requests and client responses). It is also a decent default implementation
//! if you don't have very custom needs of your send streams.
+pub use bytes::{Buf, Bytes};
pub use http_body::Body as HttpBody;
pub use self::body::{Body, Sender};
-pub use self::chunk::Chunk;
pub(crate) use self::payload::Payload;
mod body;
-mod chunk;
mod payload;
/// An optimization to try to take a full body if immediately available.
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -56,6 +55,5 @@ fn _assert_send_sync() {
fn _assert_sync<T: Sync>() {}
_assert_send::<Body>();
- _assert_send::<Chunk>();
- _assert_sync::<Chunk>();
+ _assert_sync::<Body>();
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -12,7 +12,6 @@ use super::{/*Decode,*/ Decoder, Encode, EncodedBuf, Encoder, Http1Transaction,
use crate::common::{task, Pin, Poll, Unpin};
use crate::headers::connection_keep_alive;
use crate::proto::{BodyLength, DecodedLength, MessageHead};
-use crate::Chunk;
const H2_PREFACE: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -205,7 +204,7 @@ where
pub fn poll_read_body(
&mut self,
cx: &mut task::Context<'_>,
- ) -> Poll<Option<io::Result<Chunk>>> {
+ ) -> Poll<Option<io::Result<Bytes>>> {
debug_assert!(self.can_read_body());
let (reading, ret) = match self.state.reading {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -217,7 +216,7 @@ where
(
Reading::KeepAlive,
if !slice.is_empty() {
- Some(Ok(Chunk::from(slice)))
+ Some(Ok(slice))
} else {
None
},
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -229,7 +228,7 @@ where
// an empty slice...
(Reading::Closed, None)
} else {
- return Poll::Ready(Some(Ok(Chunk::from(slice))));
+ return Poll::Ready(Some(Ok(slice)));
};
(reading, Poll::Ready(chunk))
}
|
I'm a firm +1 to this suggestion. I'd also be okay with _maybe_ replacing `Bytes` usage with _just_ `Vec<u8>`, but I feel less strongly about the latter point.
I haven't checked if `BytesMut` is reused internally once all strong refs are gone. I assumed that was the major reason for using `Bytes` in the first place.
I think it might be, but I’m not qualified to give an answer on that at the moment. Disregard my comment about Bytes!
This could work out. I'd just hope all the same convenient constructors match up (I think in bytes 0.4, `Bytes::from(static_str)` will make a copy, but that's fixed in 0.5.x).
When you say "same convenient constructors match up", are you referring to the constructors on Bytes in 0.5.x, or something else?
Yea, I think my concerns are fixed in 0.5.x, so I'd probably wait until that is available before making the change in hyper.
@seanmonstar , I interpreted this issue as implying that you wanted to make Chunk into a type alias, but it's also possible that you intended that Chunk should be removed entirely. I've just submitted a PR for the former.
|
2019-12-06T00:58:05Z
| 2,048
|
Remove Chunk in favor of using Bytes directly
Compared to `Bytes`, `Chunk` is pointless and kinda frustrating:
* It's a trivial wrapper around `Bytes` but `Bytes` is exposed via the `From`/`Into` impl anyways so it's not like it's keeping it out of the public API
* `Chunk` adds 0 new functionality over `Bytes`, all of its methods forwards to `Bytes` anyway
* There's no way to split a `Chunk` without round-tripping through `Bytes`
* Any third-party crate that wants to build on `Stream<Item = Bytes>` needs special adapters to work with `hyper::Body`
That last one is a real problem for me because I'm finally updating `multipart-async` and trying to maximize interop by generifying the API as much as possible. Because `hyper::Body` doesn't implement `AsyncRead` I've instead standardized around a `Stream<Item = impl BodyChunk>` API where `BodyChunk` covers two operations: splitting and dereffing to `&[u8]`. My final constraint is that I want to be able to return the same `impl BodyChunk` type, mostly to make it easy to drop `multipart-async` into existing request handlers.
Without implementing `BodyChunk` specifically for `hyper::Chunk` while still having an applicable impl for it, I would need an impl that's generic over `T: From<Bytes>, Bytes: From<T>, T: AsRef<[u8]>`. My hangup on this is that I now cannot implement `BodyChunk` for `&[u8]` directly, which complicates testing as I have to convert all bytestring literals to `Bytes`. (I also want to avoid a generic impl based on `Into<Bytes>` as that operation may perform implicit copies and I'm trying to be near-zero-copy.)
I realize `Chunk` exists because it used to be a custom implementation of `Bytes` but at this point it can be changed to just be a type alias and basically nothing will break (except usages of `Chunk::into_bytes()`, ironically).
|
hyperium__hyper-2048
|
diff --git a/src/body/chunk.rs /dev/null
--- a/src/body/chunk.rs
+++ /dev/null
@@ -1,175 +0,0 @@
-use std::fmt;
-
-use bytes::{Buf, Bytes};
-
-/// A piece of a message body.
-///
-/// These are returned by [`Body`](::Body). It is an efficient buffer type.
-///
-/// A `Chunk` can be easily created by many of Rust's standard types that
-/// represent a collection of bytes, using `Chunk::from`.
-pub struct Chunk {
- /// The buffer of bytes making up this body.
- bytes: Bytes,
-}
-
-// An unexported type to prevent locking `Chunk::into_iter()` to `Bytes::into_iter()`.
-#[derive(Debug)]
-pub struct IntoIter {
- inner: <Bytes as IntoIterator>::IntoIter,
-}
-
-impl Chunk {
- /// Converts this `Chunk` directly into the `Bytes` type without copies.
- ///
- /// This is simply an inherent alias for `Bytes::from(chunk)`, which exists,
- /// but doesn't appear in rustdocs.
- #[inline]
- pub fn into_bytes(self) -> Bytes {
- self.into()
- }
-}
-
-impl Buf for Chunk {
- #[inline]
- fn remaining(&self) -> usize {
- //perf: Bytes::len() isn't inline yet,
- //so it's slightly slower than checking
- //the length of the slice.
- self.bytes().len()
- }
-
- #[inline]
- fn bytes(&self) -> &[u8] {
- &self.bytes
- }
-
- #[inline]
- fn advance(&mut self, cnt: usize) {
- self.bytes.advance(cnt);
- }
-}
-
-impl From<Vec<u8>> for Chunk {
- #[inline]
- fn from(v: Vec<u8>) -> Chunk {
- Chunk::from(Bytes::from(v))
- }
-}
-
-impl From<&'static [u8]> for Chunk {
- #[inline]
- fn from(slice: &'static [u8]) -> Chunk {
- Chunk::from(Bytes::from_static(slice))
- }
-}
-
-impl From<String> for Chunk {
- #[inline]
- fn from(s: String) -> Chunk {
- s.into_bytes().into()
- }
-}
-
-impl From<&'static str> for Chunk {
- #[inline]
- fn from(slice: &'static str) -> Chunk {
- slice.as_bytes().into()
- }
-}
-
-impl From<Bytes> for Chunk {
- #[inline]
- fn from(bytes: Bytes) -> Chunk {
- Chunk { bytes: bytes }
- }
-}
-
-impl From<Chunk> for Bytes {
- #[inline]
- fn from(chunk: Chunk) -> Bytes {
- chunk.bytes
- }
-}
-
-impl ::std::ops::Deref for Chunk {
- type Target = [u8];
-
- #[inline]
- fn deref(&self) -> &Self::Target {
- self.as_ref()
- }
-}
-
-impl AsRef<[u8]> for Chunk {
- #[inline]
- fn as_ref(&self) -> &[u8] {
- &self.bytes
- }
-}
-
-impl fmt::Debug for Chunk {
- #[inline]
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&self.bytes, f)
- }
-}
-
-impl Default for Chunk {
- #[inline]
- fn default() -> Chunk {
- Chunk::from(Bytes::new())
- }
-}
-
-impl IntoIterator for Chunk {
- type Item = u8;
- type IntoIter = IntoIter;
-
- #[inline]
- fn into_iter(self) -> Self::IntoIter {
- IntoIter {
- inner: self.bytes.into_iter(),
- }
- }
-}
-
-impl Iterator for IntoIter {
- type Item = u8;
-
- #[inline]
- fn next(&mut self) -> Option<Self::Item> {
- self.inner.next()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.inner.size_hint()
- }
-}
-
-impl ExactSizeIterator for IntoIter {}
-
-#[cfg(test)]
-mod tests {
- #[cfg(feature = "nightly")]
- use test::Bencher;
-
- #[cfg(feature = "nightly")]
- #[bench]
- fn bench_chunk_static_buf(b: &mut Bencher) {
- use bytes::BufMut;
-
- let s = "Hello, World!";
- b.bytes = s.len() as u64;
-
- let mut dst = Vec::with_capacity(128);
-
- b.iter(|| {
- let chunk = crate::Chunk::from(s);
- dst.put(chunk);
- ::test::black_box(&dst);
- dst.clear();
- })
- }
-}
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -39,7 +39,7 @@ extern crate test;
pub use http::{header, HeaderMap, Method, Request, Response, StatusCode, Uri, Version};
-pub use crate::body::{Body, Chunk};
+pub use crate::body::Body;
pub use crate::client::Client;
pub use crate::error::{Error, Result};
pub use crate::server::Server;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -930,7 +929,7 @@ mod tests {
// an empty IO, we'll be skipping and using the read buffer anyways
let io = tokio_test::io::Builder::new().build();
- let mut conn = Conn::<_, crate::Chunk, crate::proto::h1::ServerTransaction>::new(io);
+ let mut conn = Conn::<_, bytes::Bytes, crate::proto::h1::ServerTransaction>::new(io);
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -981,7 +980,7 @@ mod tests {
let good_message = b"GET / HTTP/1.1\r\n\r\n".to_vec();
let len = good_message.len();
let io = AsyncIo::new_buf(good_message, len);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
match conn.poll().unwrap() {
Async::Ready(Some(Frame::Message { message, body: false })) => {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -999,7 +998,7 @@ mod tests {
let _: Result<(), ()> = future::lazy(|| {
let good_message = b"GET / HTTP/1.1\r\nHost: foo.bar\r\n\r\n".to_vec();
let io = AsyncIo::new_buf(good_message, 10);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
assert!(conn.poll().unwrap().is_not_ready());
conn.io.io_mut().block_in(50);
let async = conn.poll().unwrap();
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1015,7 +1014,7 @@ mod tests {
#[test]
fn test_conn_init_read_eof_idle() {
let io = AsyncIo::new_buf(vec![], 1);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.idle();
match conn.poll().unwrap() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1027,7 +1026,7 @@ mod tests {
#[test]
fn test_conn_init_read_eof_idle_partial_parse() {
let io = AsyncIo::new_buf(b"GET / HTTP/1.1".to_vec(), 100);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.idle();
match conn.poll() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1041,7 +1040,7 @@ mod tests {
let _: Result<(), ()> = future::lazy(|| {
// server ignores
let io = AsyncIo::new_eof();
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.busy();
match conn.poll().unwrap() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1051,7 +1050,7 @@ mod tests {
// client
let io = AsyncIo::new_eof();
- let mut conn = Conn::<_, proto::Chunk, ClientTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io);
conn.state.busy();
match conn.poll() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1066,7 +1065,7 @@ mod tests {
fn test_conn_body_finish_read_eof() {
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_eof();
- let mut conn = Conn::<_, proto::Chunk, ClientTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io);
conn.state.busy();
conn.state.writing = Writing::KeepAlive;
conn.state.reading = Reading::Body(Decoder::length(0));
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1091,7 +1090,7 @@ mod tests {
fn test_conn_message_empty_body_read_eof() {
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec(), 1024);
- let mut conn = Conn::<_, proto::Chunk, ClientTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ClientTransaction>::new(io);
conn.state.busy();
conn.state.writing = Writing::KeepAlive;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1115,7 +1114,7 @@ mod tests {
fn test_conn_read_body_end() {
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(b"POST / HTTP/1.1\r\nContent-Length: 5\r\n\r\n12345".to_vec(), 1024);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.busy();
match conn.poll() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1145,7 +1144,7 @@ mod tests {
#[test]
fn test_conn_closed_read() {
let io = AsyncIo::new_buf(vec![], 0);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.close();
match conn.poll().unwrap() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1159,7 +1158,7 @@ mod tests {
let _ = pretty_env_logger::try_init();
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 0);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
let max = super::super::io::DEFAULT_MAX_BUFFER_SIZE + 4096;
conn.state.writing = Writing::Body(Encoder::length((max * 2) as u64));
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1184,7 +1183,7 @@ mod tests {
fn test_conn_body_write_chunked() {
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.writing = Writing::Body(Encoder::chunked());
assert!(conn.start_send(Frame::Body { chunk: Some("headers".into()) }).unwrap().is_ready());
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1197,7 +1196,7 @@ mod tests {
fn test_conn_body_flush() {
let _: Result<(), ()> = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 1024 * 1024 * 5);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.writing = Writing::Body(Encoder::length(1024 * 1024));
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 1024].into()) }).unwrap().is_ready());
assert!(!conn.can_buffer_body());
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1234,7 +1233,7 @@ mod tests {
// test that once writing is done, unparks
let f = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.reading = Reading::KeepAlive;
assert!(conn.poll().unwrap().is_not_ready());
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1248,7 +1247,7 @@ mod tests {
// test that flushing when not waiting on read doesn't unpark
let f = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.writing = Writing::KeepAlive;
assert!(conn.poll_complete().unwrap().is_ready());
Ok::<(), ()>(())
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1259,7 +1258,7 @@ mod tests {
// test that flushing and writing isn't done doesn't unpark
let f = future::lazy(|| {
let io = AsyncIo::new_buf(vec![], 4096);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.reading = Reading::KeepAlive;
assert!(conn.poll().unwrap().is_not_ready());
conn.state.writing = Writing::Body(Encoder::length(5_000));
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1272,7 +1271,7 @@ mod tests {
#[test]
fn test_conn_closed_write() {
let io = AsyncIo::new_buf(vec![], 0);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.close();
match conn.start_send(Frame::Body { chunk: Some(b"foobar".to_vec().into()) }) {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -1286,7 +1285,7 @@ mod tests {
#[test]
fn test_conn_write_empty_chunk() {
let io = AsyncIo::new_buf(vec![], 0);
- let mut conn = Conn::<_, proto::Chunk, ServerTransaction>::new(io);
+ let mut conn = Conn::<_, proto::Bytes, ServerTransaction>::new(io);
conn.state.writing = Writing::KeepAlive;
assert!(conn.start_send(Frame::Body { chunk: None }).unwrap().is_ready());
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -646,7 +646,7 @@ mod tests {
// the request is ready to write later...
//let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\n\r\n".to_vec(), 0);
let (mut tx, rx) = crate::client::dispatch::channel();
- let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
// First poll is needed to allow tx to send...
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -681,7 +681,7 @@ mod tests {
.build();
let (mut tx, rx) = crate::client::dispatch::channel();
- let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
+ let conn = Conn::<_, bytes::Bytes, ClientTransaction>::new(io);
let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn));
// First poll is needed to allow tx to send...
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -962,10 +962,10 @@ mod tests {
let s = "Hello, World!";
b.bytes = s.len() as u64;
- let mut write_buf = WriteBuf::<crate::Chunk>::new();
+ let mut write_buf = WriteBuf::<bytes::Bytes>::new();
write_buf.set_strategy(WriteStrategy::Flatten);
b.iter(|| {
- let chunk = crate::Chunk::from(s);
+ let chunk = bytes::Bytes::from(s);
write_buf.buffer(chunk);
::test::black_box(&write_buf);
write_buf.headers.bytes.clear();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -28,7 +28,7 @@ fn tcp_connect(addr: &SocketAddr) -> impl Future<Output = std::io::Result<TcpStr
TcpStream::connect(*addr)
}
-async fn concat(mut body: Body) -> Result<hyper::Chunk, hyper::Error> {
+async fn concat(mut body: Body) -> Result<bytes::Bytes, hyper::Error> {
let mut vec = Vec::new();
while let Some(chunk) = body.next().await {
vec.extend_from_slice(&chunk?);
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -474,7 +474,7 @@ fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>) {
(proxy_addr, srv.map(|res| res.expect("proxy error")))
}
-async fn concat(mut body: Body) -> Result<hyper::Chunk, hyper::Error> {
+async fn concat(mut body: Body) -> Result<bytes::Bytes, hyper::Error> {
let mut vec = Vec::new();
while let Some(chunk) = body.next().await {
vec.extend_from_slice(&chunk?);
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1673"
] |
0.2
|
edbd10ac96c5cc6dbeca80ada80f143dbd13d118
|
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -2,11 +2,13 @@ use std::collections::{HashMap, HashSet, VecDeque};
use std::fmt;
use std::ops::{Deref, DerefMut};
use std::sync::{Arc, Mutex, Weak};
+
+#[cfg(not(feature = "runtime"))]
use std::time::{Duration, Instant};
use futures_channel::oneshot;
#[cfg(feature = "runtime")]
-use tokio::time::Interval;
+use tokio::time::{Duration, Instant, Interval};
use crate::common::{Exec, Future, Pin, Poll, Unpin, task};
use super::Ver;
|
I'm noticing this more and more, since AppVeyor won't build any jobs in parallel, nor branches, so if a couple PRs appear, it can take a couple hours to get run (and even just a single PR will run 4 jobs at around ~6-7minutes each, for ~30mins total).
Previous attempt: https://github.com/hyperium/hyper/pull/1690
Apparently [Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/?nav=min) allows up to 10 parallel builds for open source projects, so that might be an alternative. It also supports Windows, macOS and Linux.
|
2019-12-05T01:55:47Z
| 2,044
|
Switch from Travis and AppVeyor to GitHub Actions
AppVeyor is quite slow, and the current script in hyper tests Windows nightly, which can mean Windows testing can be flaky. It'd be nice to move to Travis-CI's new Windows support, running the tests on the same matrix as AppVeyor, but probably sticking with just stable Rust.
|
hyperium__hyper-2044
|
diff --git /dev/null b/.github/workflows/CI.yml
new file mode 100644
--- /dev/null
+++ b/.github/workflows/CI.yml
@@ -0,0 +1,76 @@
+name: CI
+on:
+ pull_request:
+ push:
+ branches:
+ - master
+
+env:
+ RUST_BACKTRACE: 1
+
+jobs:
+ test:
+ name: Test ${{ matrix.rust }} on ${{ matrix.os }}
+
+ strategy:
+ matrix:
+ rust:
+ - stable
+ - beta
+ - nightly
+ # - 1.39.0
+
+ os:
+ - ubuntu-latest
+ - windows-latest
+ - macOS-latest
+
+ include:
+ - rust: stable
+ features: ""
+ - rust: beta
+ features: ""
+ - rust: nightly
+ features: "--features nightly"
+ benches: true
+ # Limit the Happy Eyeballs tests to Linux
+ - rust: stable
+ os: ubuntu-latest
+ features: "--features __internal_happy_eyeballs_tests"
+ # - rust: 1.39.0
+ # features: "--no-default-features --features runtime"
+ # build-only: true
+
+ runs-on: ${{ matrix.os }}
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v1
+
+ - name: Install Rust (${{ matrix.rust }})
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ matrix.rust }}
+ override: true
+
+ - name: Build only
+ if: matrix.build-only
+ uses: actions-rs/cargo@v1
+ with:
+ command: build
+ args: ${{ matrix.features }}
+
+ - name: Test
+ if: matrix.build-only != true
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: ${{ matrix.features }}
+
+ - name: Test all benches
+ if: matrix.benches && matrix.build-only != true
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --benches ${{ matrix.features }}
diff --git a/.travis.yml /dev/null
--- a/.travis.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-language: rust
-sudo: true # Required for functional IPv6 (forces VM instead of Docker).
-dist: trusty
-matrix:
- fast_finish: true
- include:
- - rust: nightly
- env: FEATURES="--no-default-features --features runtime,stream,nightly"
- - rust: beta
- env: FEATURES="--no-default-features --features runtime,stream,__internal_happy_eyeballs_tests"
- #- rust: stable
- # env: FEATURES="--no-default-features --features runtime,stream,__internal_happy_eyeballs_tests"
- - rust: beta #stable
- env: FEATURES="--no-default-features"
- # Minimum Supported Rust Version
- #- rust: 1.39.0
- # env: FEATURES="--no-default-features --features runtime" BUILD_ONLY="1"
-
-before_script:
- # Add an IPv6 config - see the corresponding Travis issue
- # https://github.com/travis-ci/travis-ci/issues/83
- - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
- sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6';
- fi
-
-script:
- - 'if [ "$BUILD_ONLY" != "1" ]; then cargo test $FEATURES -- --test-threads=1; fi'
- - 'if [ $TRAVIS_RUST_VERSION = nightly ]; then cargo test --benches $FEATURES; fi'
-
-env:
- global:
- - RUST_BACKTRACE=1
- - secure: KipdEhZsGIrb2W0HsDbC95x8FJ1RKEWPq8uSK8wSZwGw6MtvoZDX0edfrtf4o3/skA0h84yn35ZWF/rpo1ZEesgFY1g+l+me+jtyGvMwEsXTGjNP4oNR2MrDizjO8eYDm4hRUCLEmJVvsq4j7oNVdLGHfdrcnwqk8/NxJsRzqXM=
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -55,7 +55,6 @@ url = "1.0"
[features]
default = [
- "__internal_flaky_tests",
"runtime",
"stream",
]
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -75,7 +74,6 @@ stream = []
# internal features used in CI
nightly = []
-__internal_flaky_tests = []
__internal_happy_eyeballs_tests = []
[package.metadata.docs.rs]
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -899,6 +901,9 @@ mod tests {
#[cfg(feature = "runtime")]
#[tokio::test]
async fn test_pool_timer_removes_expired() {
+ let _ = pretty_env_logger::try_init();
+ tokio::time::pause();
+
let pool = Pool::new(super::Config {
enabled: true,
keep_alive_timeout: Some(Duration::from_millis(10)),
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -916,7 +921,9 @@ mod tests {
assert_eq!(pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3));
// Let the timer tick passed the expiration...
- tokio::time::delay_for(Duration::from_millis(50)).await;
+ tokio::time::advance(Duration::from_millis(30)).await;
+ // Yield so the Interval can reap...
+ tokio::task::yield_now().await;
assert!(pool.locked().idle.get(&key).is_none());
}
diff --git a/tests/integration.rs b/tests/integration.rs
--- a/tests/integration.rs
+++ b/tests/integration.rs
@@ -333,9 +333,6 @@ t! {
;
}
-// In rare cases, the h2 client connection does not shutdown, resulting
-// in this test simply hanging... :(
-#[cfg(feature = "__internal_flaky_tests")]
t! {
http2_parallel_10,
parallel: 0..10
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"2034"
] |
0.2
|
aa66de4f27748cad84efe8113744f607e7b6b79a
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -69,8 +70,8 @@ tcp = [
"tokio/time",
]
-# unstable features
-unstable-stream = []
+# `impl Stream` for things
+stream = []
# internal features used in CI
nightly = []
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -99,12 +100,12 @@ required-features = ["runtime"]
[[example]]
name = "client_json"
path = "examples/client_json.rs"
-required-features = ["runtime", "unstable-stream"]
+required-features = ["runtime", "stream"]
[[example]]
name = "echo"
path = "examples/echo.rs"
-required-features = ["runtime", "unstable-stream"]
+required-features = ["runtime", "stream"]
[[example]]
name = "hello"
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -119,7 +120,7 @@ required-features = ["runtime"]
[[example]]
name = "params"
path = "examples/params.rs"
-required-features = ["runtime", "unstable-stream"]
+required-features = ["runtime", "stream"]
[[example]]
name = "proxy"
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -160,7 +161,7 @@ required-features = ["runtime"]
[[example]]
name = "web_api"
path = "examples/web_api.rs"
-required-features = ["runtime", "unstable-stream"]
+required-features = ["runtime", "stream"]
[[bench]]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -1,12 +1,12 @@
use std::borrow::Cow;
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
use std::error::Error as StdError;
use std::fmt;
use bytes::Bytes;
use futures_core::Stream; // for mpsc::Receiver
use futures_channel::{mpsc, oneshot};
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
use futures_util::TryStreamExt;
use http_body::{SizeHint, Body as HttpBody};
use http::HeaderMap;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -43,7 +43,7 @@ enum Kind {
// while a borrow of a `Request<Body>` exists.
//
// See https://github.com/rust-lang/rust/issues/57017
- #[cfg(feature = "unstable-stream")]
+ #[cfg(feature = "stream")]
Wrapped(Pin<Box<dyn Stream<Item = Result<Chunk, Box<dyn StdError + Send + Sync>>> + Send + Sync>>),
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -142,11 +142,11 @@ impl Body {
/// # }
/// ```
///
- /// # Unstable
+ /// # Optional
///
- /// This function requires enabling the `unstable-stream` feature in your
+ /// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
- #[cfg(feature = "unstable-stream")]
+ #[cfg(feature = "stream")]
pub fn wrap_stream<S, O, E>(stream: S) -> Body
where
S: Stream<Item = Result<O, E>> + Send + Sync + 'static,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -280,7 +280,7 @@ impl Body {
None => Poll::Ready(None),
},
- #[cfg(feature = "unstable-stream")]
+ #[cfg(feature = "stream")]
Kind::Wrapped(ref mut s) => {
match ready!(s.as_mut().poll_next(cx)) {
Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -330,7 +330,7 @@ impl HttpBody for Body {
Kind::Once(ref val) => val.is_none(),
Kind::Chan { content_length, .. } => content_length == Some(0),
Kind::H2 { recv: ref h2, .. } => h2.is_end_stream(),
- #[cfg(feature = "unstable-stream")]
+ #[cfg(feature = "stream")]
Kind::Wrapped(..) => false,
}
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -345,7 +345,7 @@ impl HttpBody for Body {
Kind::Once(None) => {
SizeHint::default()
},
- #[cfg(feature = "unstable-stream")]
+ #[cfg(feature = "stream")]
Kind::Wrapped(..) => SizeHint::default(),
Kind::Chan { content_length, .. } | Kind::H2 { content_length, .. } => {
let mut hint = SizeHint::default();
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -380,11 +380,11 @@ impl fmt::Debug for Body {
}
}
-/// # Unstable
+/// # Optional
///
-/// This function requires enabling the `unstable-stream` feature in your
+/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
impl Stream for Body {
type Item = crate::Result<Chunk>;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -394,11 +394,11 @@ impl Stream for Body {
}
-/// # Unstable
+/// # Optional
///
-/// This function requires enabling the `unstable-stream` feature in your
+/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
impl
From<Box<dyn Stream<Item = Result<Chunk, Box<dyn StdError + Send + Sync>>> + Send + Sync>>
for Body
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -27,10 +27,7 @@
//! executor.
//! - `tcp` (*enabled by default*): Enables convenient implementations over
//! TCP (using tokio).
-//! - `unstable-stream` (*unstable*): Provides `futures::Stream` capabilities.
-//!
-//! Due to the `Stream` trait not being stable, this feature is also
-//! unstable. It does not follow normal semver.
+//! - `stream` (*enabled by default*): Provides `futures::Stream` capabilities.
#[doc(hidden)] pub use http;
#[macro_use] extern crate log;
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -6,7 +6,7 @@
//! connections.
//! - Utilities like `poll_fn` to ease creating a custom `Accept`.
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
use futures_core::Stream;
use crate::common::{Pin, task::{self, Poll}};
diff --git a/src/server/accept.rs b/src/server/accept.rs
--- a/src/server/accept.rs
+++ b/src/server/accept.rs
@@ -68,11 +68,11 @@ where
/// Adapt a `Stream` of incoming connections into an `Accept`.
///
-/// # Unstable
+/// # Optional
///
-/// This function requires enabling the `unstable-stream` feature in your
+/// This function requires enabling the `stream` feature in your
/// `Cargo.toml`.
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
pub fn from_stream<S, IO, E>(stream: S) -> impl Accept<Conn = IO, Error = E>
where
S: Stream<Item = Result<IO, E>>,
|
To me, `Stream` seemed like a core trait, just like `Future`, so I was surprised when I had to switch it on in hyper.
I know that the reality around what’s core at the moment is a bit more complicated at the moment, but the above is just my initial reaction.
I would vote yes for `stream` as default, but perhaps there’s downsides I didn’t think of.
I think it should be. Worst comes to worst, Hyper can issue a smaller `0.14` release that walks back on the on-by-default `stream` feature.
I forgot to note this, but: I think it's reasonable to say that a hypothetical 0.14 release will come out in a half a year to a year's time. I don't believe `Stream` will be in the standard library six months from now.
|
2019-12-05T00:50:21Z
| 2,042
|
Rename `unstable-stream` feature to just `stream`
While hyper is pre-1.0, it's not really fair to say other pre-1.0 dependencies are unstable.
Should `stream` be enabled by default?
|
hyperium__hyper-2042
|
diff --git a/.travis.yml b/.travis.yml
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,11 +5,11 @@ matrix:
fast_finish: true
include:
- rust: nightly
- env: FEATURES="--no-default-features --features runtime,unstable-stream,nightly"
+ env: FEATURES="--no-default-features --features runtime,stream,nightly"
- rust: beta
- env: FEATURES="--no-default-features --features runtime,unstable-stream,__internal_happy_eyeballs_tests"
+ env: FEATURES="--no-default-features --features runtime,stream,__internal_happy_eyeballs_tests"
#- rust: stable
- # env: FEATURES="--no-default-features --features runtime,unstable-stream,__internal_happy_eyeballs_tests"
+ # env: FEATURES="--no-default-features --features runtime,stream,__internal_happy_eyeballs_tests"
- rust: beta #stable
env: FEATURES="--no-default-features"
# Minimum Supported Rust Version
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -57,6 +57,7 @@ url = "1.0"
default = [
"__internal_flaky_tests",
"runtime",
+ "stream",
]
runtime = [
"tcp",
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -80,7 +81,7 @@ __internal_happy_eyeballs_tests = []
[package.metadata.docs.rs]
features = [
"runtime",
- "unstable-stream",
+ "stream",
]
[profile.release]
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -181,18 +182,18 @@ required-features = ["runtime"]
[[bench]]
name = "server"
path = "benches/server.rs"
-required-features = ["runtime", "unstable-stream"]
+required-features = ["runtime", "stream"]
[[test]]
name = "client"
path = "tests/client.rs"
-required-features = ["runtime", "unstable-stream"]
+required-features = ["runtime", "stream"]
[[test]]
name = "integration"
path = "tests/integration.rs"
-required-features = ["runtime", "unstable-stream"]
+required-features = ["runtime", "stream"]
[[test]]
name = "server"
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -15,7 +15,7 @@ use std::time::Duration;
use futures_channel::oneshot;
use futures_util::future::{self, Either, FutureExt, TryFutureExt};
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
use futures_util::stream::StreamExt as _;
use http::header::{HeaderName, HeaderValue};
use tokio::net::{TcpListener, TcpStream as TkTcpStream};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1383,7 +1383,7 @@ async fn max_buf_size() {
.expect_err("should TooLarge error");
}
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
#[test]
fn streaming_body() {
let _ = pretty_env_logger::try_init();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1497,7 +1497,7 @@ async fn http2_service_error_sends_reset_reason() {
assert_eq!(h2_err.reason(), Some(h2::Reason::INADEQUATE_SECURITY));
}
-#[cfg(feature = "unstable-stream")]
+#[cfg(feature = "stream")]
#[test]
fn http2_body_user_error_sends_reset_reason() {
use std::error::Error;
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"2039"
] |
0.2
|
4d7a2266b88b2c5c92231bcd2bd75d5842198add
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -25,7 +25,7 @@ futures-core = { version = "0.3", default-features = false }
futures-channel = "0.3"
futures-util = { version = "0.3", default-features = false }
http = "0.2"
-http-body = "0.2"
+http-body = "0.3"
httparse = "1.0"
h2 = "0.2"
itoa = "0.4.1"
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -319,7 +319,7 @@ impl Opts {
async {
let res = fut.await.expect("client wait");
let mut body = res.into_body();
- while let Some(_chunk) = body.next().await {}
+ while let Some(_chunk) = body.data().await {}
}
};
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -356,7 +356,7 @@ fn spawn_server(rt: &mut tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
.serve(make_service_fn( move |_| async move {
Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| async move {
let mut req_body = req.into_body();
- while let Some(_chunk) = req_body.next().await {}
+ while let Some(_chunk) = req_body.data().await {}
Ok::<_, hyper::Error>(Response::new(Body::from(body)))
}))
}))
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -1,9 +1,9 @@
#![deny(warnings)]
#![warn(rust_2018_idioms)]
use std::env;
-use std::io::{self, Write};
use hyper::{Client, body::HttpBody as _};
+use tokio::io::{self, AsyncWriteExt as _};
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -35,16 +35,14 @@ async fn main() -> Result<()> {
async fn fetch_url(url: hyper::Uri) -> Result<()> {
let client = Client::new();
- let res = client.get(url).await?;
+ let mut res = client.get(url).await?;
println!("Response: {}", res.status());
println!("Headers: {:#?}\n", res.headers());
- let mut body = res.into_body();
-
- while let Some(next) = body.next().await {
+ while let Some(next) = res.body_mut().data().await {
let chunk = next?;
- io::stdout().write_all(&chunk)?;
+ io::stdout().write_all(&chunk).await?;
}
println!("\n\nDone!");
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -42,7 +42,7 @@
//! // Concatenate the body stream into a single buffer...
//! let mut body = res.into_body();
//! let mut bytes = Vec::new();
-//! while let Some(next) = body.next().await {
+//! while let Some(next) = body.data().await {
//! let chunk = next?;
//! bytes.extend(chunk);
//! }
|
2019-12-05T00:30:16Z
| 2,040
|
Update to http-body 0.3
|
hyperium__hyper-2040
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -49,7 +49,7 @@ spmc = "0.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
-tokio = { version = "0.2.2", features = ["fs", "macros", "rt-util", "sync", "time", "test-util"] }
+tokio = { version = "0.2.2", features = ["fs", "macros", "io-std", "rt-util", "sync", "time", "test-util"] }
tokio-test = "0.2"
url = "1.0"
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -23,6 +23,7 @@ use tokio::runtime::Runtime;
use tokio::io::{AsyncRead, AsyncWrite};
use hyper::{Body, Request, Response, StatusCode, Version};
+use hyper::body::HttpBody as _;
use hyper::client::Client;
use hyper::server::conn::Http;
use hyper::server::Server;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1779,7 +1780,7 @@ impl tower_service::Service<Request<Body>> for TestService {
let replies = self.reply.clone();
Box::pin(async move {
- while let Some(chunk) = hyper::body::HttpBody::next(req.body_mut()).await {
+ while let Some(chunk) = req.body_mut().data().await {
match chunk {
Ok(chunk) => {
tx.send(Msg::Chunk(chunk.to_vec())).unwrap();
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
|
[
"2025"
] |
0.2
|
131962c86ab0a31c2413261cf4532eca88d67dcb
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -20,35 +20,28 @@ include = [
]
[dependencies]
-bytes = "0.4.6"
-futures-core = "0.3.1"
-futures-channel = "0.3.1"
-futures-util = "0.3.1"
-http = "0.1.15"
-http-body = "=0.2.0-alpha.3"
+bytes = "0.5"
+futures-core = { version = "0.3", default-features = false }
+futures-channel = "0.3"
+futures-util = { version = "0.3", default-features = false }
+http = "0.2"
+http-body = "0.2"
httparse = "1.0"
-h2 = "=0.2.0-alpha.3"
-iovec = "0.1"
+h2 = "0.2"
itoa = "0.4.1"
log = "0.4"
pin-project = "0.4"
time = "0.1"
tower-service = "=0.3.0-alpha.2"
-tokio-executor = "=0.2.0-alpha.6"
-tokio-io = "=0.2.0-alpha.6"
-tokio-sync = "=0.2.0-alpha.6"
+tokio = { version = "0.2", features = ["sync"] }
want = "0.3"
# Optional
net2 = { version = "0.2.32", optional = true }
-tokio = { version = "=0.2.0-alpha.6", optional = true, default-features = false, features = ["rt-full"] }
-tokio-net = { version = "=0.2.0-alpha.6", optional = true, features = ["tcp"] }
-tokio-timer = { version = "=0.3.0-alpha.6", optional = true }
-
[dev-dependencies]
-futures-util-a19 = { version = "=0.3.0-alpha.19", package = "futures-util-preview" }
+futures-util = { version = "0.3", default-features = false, features = ["alloc"] }
matches = "0.1"
num_cpus = "1.0"
pretty_env_logger = "0.3"
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -68,13 +60,13 @@ default = [
]
runtime = [
"tcp",
- "tokio",
+ "tokio/time",
]
tcp = [
"net2",
- "tokio-executor/blocking",
- "tokio-net",
- "tokio-timer",
+ "tokio/blocking",
+ "tokio/tcp",
+ "tokio/time",
]
# unstable features
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -297,7 +300,7 @@ impl Opts {
for _ in 0..chunk_cnt {
tx.send_data(chunk.into()).await.expect("send_data");
}
- }).expect("body tx spawn");
+ });
body
} else {
self
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -340,22 +343,24 @@ impl Opts {
}
}
-fn spawn_server(rt: &mut Runtime, opts: &Opts) -> SocketAddr {
+fn spawn_server(rt: &mut tokio::runtime::Runtime, opts: &Opts) -> SocketAddr {
use hyper::service::{make_service_fn, service_fn};
let addr = "127.0.0.1:0".parse().unwrap();
let body = opts.response_body;
- let srv = Server::bind(&addr)
- .http2_only(opts.http2)
- .http2_initial_stream_window_size(opts.http2_stream_window)
- .http2_initial_connection_window_size(opts.http2_conn_window)
- .serve(make_service_fn( move |_| async move {
- Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| async move {
- let mut req_body = req.into_body();
- while let Some(_chunk) = req_body.next().await {}
- Ok::<_, hyper::Error>(Response::new(Body::from(body)))
+ let srv = rt.block_on(async move {
+ Server::bind(&addr)
+ .http2_only(opts.http2)
+ .http2_initial_stream_window_size(opts.http2_stream_window)
+ .http2_initial_connection_window_size(opts.http2_conn_window)
+ .serve(make_service_fn( move |_| async move {
+ Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| async move {
+ let mut req_body = req.into_body();
+ while let Some(_chunk) = req_body.next().await {}
+ Ok::<_, hyper::Error>(Response::new(Body::from(body)))
+ }))
}))
- }));
+ });
let addr = srv.local_addr();
rt.spawn(async {
if let Err(err) = srv.await {
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -8,7 +8,6 @@ use std::net::{TcpStream};
use std::sync::mpsc;
use std::time::Duration;
-use tokio::runtime::current_thread;
use tokio::sync::oneshot;
use hyper::{Body, Response, Server};
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -9,7 +9,6 @@ use std::sync::mpsc;
use std::time::Duration;
use futures_util::{stream, StreamExt};
-use tokio::runtime::current_thread;
use tokio::sync::oneshot;
use hyper::{Body, Response, Server};
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -33,8 +32,17 @@ macro_rules! bench_server {
)
}))
});
- let srv = Server::bind(&addr)
- .serve(make_svc);
+
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .expect("rt build");
+
+ let srv = rt.block_on(async move {
+ Server::bind(&addr)
+ .serve(make_svc)
+ });
addr_tx.send(srv.local_addr()).unwrap();
diff --git a/benches/server.rs b/benches/server.rs
--- a/benches/server.rs
+++ b/benches/server.rs
@@ -42,13 +50,11 @@ macro_rules! bench_server {
.with_graceful_shutdown(async {
until_rx.await.ok();
});
- let mut rt = current_thread::Runtime::new().unwrap();
- rt.spawn(async {
+ rt.block_on(async move {
if let Err(e) = graceful.await {
panic!("server error: {}", e);
}
});
- rt.run().unwrap();
});
addr_rx.recv().unwrap()
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -4,6 +4,7 @@ use std::env;
use std::io::{self, Write};
use hyper::Client;
+use futures_util::StreamExt;
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/client.rs b/examples/client.rs
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -24,7 +25,7 @@ async fn main() -> Result<()> {
// HTTPS requires picking a TLS implementation, so give a better
// warning if the user tries to request an 'https' URL.
let url = url.parse::<hyper::Uri>().unwrap();
- if url.scheme_part().map(|s| s.as_ref()) != Some("http") {
+ if url.scheme_str() != Some("http") {
println!("This example only works with 'http' URLs.");
return Ok(());
}
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -5,7 +5,7 @@
extern crate serde_derive;
use hyper::Client;
-use futures_util::TryStreamExt;
+use futures_util::StreamExt;
// A simple type alias so as to DRY.
type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>;
diff --git a/examples/client_json.rs b/examples/client_json.rs
--- a/examples/client_json.rs
+++ b/examples/client_json.rs
@@ -27,9 +27,12 @@ async fn fetch_json(url: hyper::Uri) -> Result<Vec<User>> {
let client = Client::new();
// Fetch the url...
- let res = client.get(url).await?;
+ let mut res = client.get(url).await?;
// asynchronously concatenate chunks of the body
- let body = res.into_body().try_concat().await?;
+ let mut body = Vec::new();
+ while let Some(chunk) = res.body_mut().next().await {
+ body.extend_from_slice(&chunk?);
+ }
// try to parse as json with serde_json
let users = serde_json::from_slice(&body)?;
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -2,12 +2,11 @@
use hyper::{Body, Method, Request, Response, Server, StatusCode};
use hyper::service::{make_service_fn, service_fn};
-use futures_util::TryStreamExt;
+use futures_util::{StreamExt, TryStreamExt};
/// This is our service handler. It receives a Request, routes on its
/// path, and returns a Future of a Response.
-async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
-
+async fn echo(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
match (req.method(), req.uri().path()) {
// Serve some instructions at /
(&Method::GET, "/") => {
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -37,13 +36,17 @@ async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
// So here we do `.await` on the future, waiting on concatenating the full body,
// then afterwards the content can be reversed. Only then can we return a `Response`.
(&Method::POST, "/echo/reversed") => {
- let whole_chunk = req.into_body().try_concat().await;
-
- let reversed_chunk = whole_chunk.map(move |chunk| {
- chunk.iter().rev().cloned().collect::<Vec<u8>>()
-
- })?;
- Ok(Response::new(Body::from(reversed_chunk)))
+ let mut whole_body = Vec::new();
+ while let Some(chunk) = req.body_mut().next().await {
+ whole_body.extend_from_slice(&chunk?);
+ }
+
+ let reversed_body = whole_body
+ .iter()
+ .rev()
+ .cloned()
+ .collect::<Vec<u8>>();
+ Ok(Response::new(Body::from(reversed_body)))
}
// Return the 404 Not Found for other routes.
diff --git a/examples/params.rs b/examples/params.rs
--- a/examples/params.rs
+++ b/examples/params.rs
@@ -6,20 +6,24 @@ use hyper::service::{service_fn, make_service_fn};
use std::collections::HashMap;
use url::form_urlencoded;
-use futures_util::TryStreamExt;
+use futures_util::StreamExt;
static INDEX: &[u8] = b"<html><body><form action=\"post\" method=\"post\">Name: <input type=\"text\" name=\"name\"><br>Number: <input type=\"text\" name=\"number\"><br><input type=\"submit\"></body></html>";
static MISSING: &[u8] = b"Missing field";
static NOTNUMERIC: &[u8] = b"Number field is not numeric";
// Using service_fn, we can turn this function into a `Service`.
-async fn param_example(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
+async fn param_example(mut req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
match (req.method(), req.uri().path()) {
(&Method::GET, "/") | (&Method::GET, "/post") => {
Ok(Response::new(INDEX.into()))
},
(&Method::POST, "/post") => {
- let b = req.into_body().try_concat().await?;
+ // Concatenate the body...
+ let mut b = Vec::new();
+ while let Some(chunk) = req.body_mut().next().await {
+ b.extend_from_slice(&chunk?);
+ }
// Parse the request body. form_urlencoded::parse
// always succeeds, but in general parsing may
// fail (for example, an invalid post of json), so
diff --git a/examples/send_file.rs b/examples/send_file.rs
--- a/examples/send_file.rs
+++ b/examples/send_file.rs
@@ -1,7 +1,7 @@
#![deny(warnings)]
use tokio::io::AsyncReadExt;
-use tokio_fs::File;
+use tokio::fs::File;
use hyper::{Body, Method, Result, Request, Response, Server, StatusCode};
use hyper::service::{make_service_fn, service_fn};
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -1,3 +1,5 @@
+fn main() {}
+/*
#![deny(warnings)]
use std::cell::Cell;
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -5,14 +7,24 @@ use std::rc::Rc;
use hyper::{Body, Error, Response, Server};
use hyper::service::{make_service_fn, service_fn};
-use tokio::runtime::current_thread;
-// Configure a runtime that runs everything on the current thread,
-// which means it can spawn !Send futures...
-#[tokio::main(single_thread)]
-async fn main() {
+fn main() {
pretty_env_logger::init();
+ // Configure a runtime that runs everything on the current thread
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .expect("build runtime");
+
+ // Combine it with a `LocalSet, which means it can spawn !Send futures...
+ let local = tokio::task::LocalSet::new();
+ local.block_on(&mut rt, run());
+}
+
+async fn run() {
+
let addr = ([127, 0, 0, 1], 3000).into();
// Using a !Send request counter is fine on 1 thread...
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -36,12 +48,8 @@ async fn main() {
}
});
- // Since the Server needs to spawn some background tasks, we needed
- // to configure an Executor that can spawn !Send futures...
- let exec = current_thread::TaskExecutor::current();
-
let server = Server::bind(&addr)
- .executor(exec)
+ .executor(LocalExec)
.serve(make_service);
println!("Listening on http://{}", addr);
diff --git a/examples/single_threaded.rs b/examples/single_threaded.rs
--- a/examples/single_threaded.rs
+++ b/examples/single_threaded.rs
@@ -52,3 +60,18 @@ async fn main() {
}
}
+// Since the Server needs to spawn some background tasks, we needed
+// to configure an Executor that can spawn !Send futures...
+#[derive(Clone, Copy, Debug)]
+struct LocalExec;
+
+impl<F> hyper::rt::Executor<F> for LocalExec
+where
+ F: std::future::Future + 'static, // not requiring `Send`
+{
+ fn execute(&self, fut: F) {
+ // This will spawn into the currently running `LocalSet`.
+ tokio::task::spawn_local(fut);
+ }
+}
+*/
diff --git a/examples/tower_server.rs b/examples/tower_server.rs
--- a/examples/tower_server.rs
+++ b/examples/tower_server.rs
@@ -21,7 +21,7 @@ impl Service<Request<Body>> for Svc {
}
fn call(&mut self, req: Request<Body>) -> Self::Future {
- let mut rsp = Response::builder();
+ let rsp = Response::builder();
let uri = req.uri();
if uri.path() != ROOT {
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -51,7 +51,7 @@ async fn server_upgrade(req: Request<Body>) -> Result<Response<Body>> {
// Note: This can't possibly be fulfilled until the 101 response
// is returned below, so it's better to spawn this future instead
// waiting for it to complete to then return a response.
- hyper::rt::spawn(async move {
+ tokio::task::spawn(async move {
match req.into_body().on_upgrade().await {
Ok(upgraded) => {
if let Err(e) = server_upgraded_io(upgraded).await {
diff --git a/examples/upgrades.rs b/examples/upgrades.rs
--- a/examples/upgrades.rs
+++ b/examples/upgrades.rs
@@ -129,13 +129,13 @@ async fn main() {
// the server should be shutdown.
let (tx, rx) = oneshot::channel::<()>();
let server = server
- .with_graceful_shutdown(async {
+ .with_graceful_shutdown(async move {
rx.await.ok();
});
// Spawn server on the default executor,
// which is usually a thread-pool from tokio default runtime.
- hyper::rt::spawn(async {
+ tokio::task::spawn(async move {
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -3,7 +3,7 @@
use hyper::{Body, Chunk, Client, Method, Request, Response, Server, StatusCode, header};
use hyper::client::HttpConnector;
use hyper::service::{make_service_fn, service_fn};
-use futures_util::{TryStreamExt};
+use futures_util::{StreamExt, TryStreamExt};
type GenericError = Box<dyn std::error::Error + Send + Sync>;
type Result<T> = std::result::Result<T, GenericError>;
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -157,11 +157,6 @@ impl Body {
Body::new(Kind::Wrapped(Box::pin(mapped)))
}
- /// dox
- pub async fn next(&mut self) -> Option<crate::Result<Chunk>> {
- futures_util::future::poll_fn(|cx| self.poll_eof(cx)).await
- }
-
/// Converts this `Body` into a `Future` of a pending HTTP upgrade.
///
/// See [the `upgrade` module](::upgrade) for more.
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -278,7 +273,7 @@ impl Body {
recv: ref mut h2, ..
} => match ready!(h2.poll_data(cx)) {
Some(Ok(bytes)) => {
- let _ = h2.release_capacity().release_capacity(bytes.len());
+ let _ = h2.flow_control().release_capacity(bytes.len());
Poll::Ready(Some(Ok(Chunk::from(bytes))))
},
Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -528,24 +523,3 @@ impl Sender {
let _ = self.tx.try_send(Err(err));
}
}
-
-/*
-impl Sink for Sender {
- type SinkItem = Chunk;
- type SinkError = crate::Error;
-
- fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
- Poll::Ready(Ok(()))
- }
-
- fn start_send(&mut self, msg: Chunk) -> StartSend<Self::SinkItem, Self::SinkError> {
- match self.poll_ready()? {
- Async::Ready(_) => {
- self.send_data(msg).map_err(|_| crate::Error::new_closed())?;
- Ok(AsyncSink::Ready)
- }
- Async::NotReady => Ok(AsyncSink::NotReady(msg)),
- }
- }
-}
-*/
diff --git a/src/body/chunk.rs b/src/body/chunk.rs
--- a/src/body/chunk.rs
+++ b/src/body/chunk.rs
@@ -137,13 +137,6 @@ impl IntoIterator for Chunk {
}
}
-impl Extend<u8> for Chunk {
- #[inline]
- fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item=u8> {
- self.bytes.extend(iter)
- }
-}
-
impl Iterator for IntoIter {
type Item = u8;
diff --git a/src/body/mod.rs b/src/body/mod.rs
--- a/src/body/mod.rs
+++ b/src/body/mod.rs
@@ -15,6 +15,9 @@
//! client responses). It is also a decent default implementation if you don't
//! have very custom needs of your send streams.
+#[doc(hidden)]
+pub use http_body::Body as HttpBody;
+
pub use self::body::{Body, Sender};
pub use self::chunk::Chunk;
pub use self::payload::Payload;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -14,11 +14,11 @@ use std::sync::Arc;
use bytes::Bytes;
use futures_util::future::{self, Either, FutureExt as _};
use pin_project::{pin_project, project};
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use tower_service::Service;
use crate::body::Payload;
-use crate::common::{Exec, Future, Pin, Poll, task};
+use crate::common::{BoxSendFuture, Exec, Executor, Future, Pin, Poll, task};
use crate::upgrade::Upgraded;
use crate::proto;
use super::dispatch;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -458,8 +458,7 @@ impl Builder {
/// Provide an executor to execute background HTTP2 tasks.
pub fn executor<E>(&mut self, exec: E) -> &mut Builder
where
- for<'a> &'a E: tokio_executor::Executor,
- E: Send + Sync + 'static,
+ E: Executor<BoxSendFuture> + Send + Sync + 'static,
{
self.exec = Exec::Executor(Arc::new(exec));
self
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -30,6 +30,7 @@ use std::net::{
};
use std::str::FromStr;
+use tokio::task::JoinHandle;
use tower_service::Service;
use crate::common::{Future, Pin, Poll, task};
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -54,7 +55,7 @@ pub struct GaiAddrs {
/// A future to resolve a name returned by `GaiResolver`.
pub struct GaiFuture {
- inner: tokio_executor::blocking::Blocking<Result<IpAddrs, io::Error>>,
+ inner: JoinHandle<Result<IpAddrs, io::Error>>,
}
impl Name {
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -123,7 +124,7 @@ impl Service<Name> for GaiResolver {
}
fn call(&mut self, name: Name) -> Self::Future {
- let blocking = tokio_executor::blocking::run(move || {
+ let blocking = tokio::task::spawn_blocking(move || {
debug!("resolving host={:?}", name.host);
(&*name.host, 0).to_socket_addrs()
.map(|i| IpAddrs { iter: i })
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -146,8 +147,9 @@ impl Future for GaiFuture {
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).poll(cx).map(|res| match res {
- Ok(addrs) => Ok(GaiAddrs { inner: addrs }),
- Err(err) => Err(err),
+ Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }),
+ Ok(Err(err)) => Err(err),
+ Err(join_err) => panic!("gai background task failed: {:?}", join_err),
})
}
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -232,6 +234,7 @@ impl Iterator for IpAddrs {
}
}
+/*
/// A resolver using `getaddrinfo` calls via the `tokio_executor::threadpool::blocking` API.
///
/// Unlike the `GaiResolver` this will not spawn dedicated threads, but only works when running on the
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -286,6 +289,7 @@ impl Future for TokioThreadpoolGaiFuture {
}
}
}
+*/
mod sealed {
use tower_service::Service;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -7,17 +7,16 @@ use std::sync::Arc;
use std::time::Duration;
use http::uri::{Scheme, Uri};
-use futures_util::{TryFutureExt, FutureExt};
+use futures_util::{TryFutureExt};
use net2::TcpBuilder;
use pin_project::{pin_project, project};
-use tokio_net::driver::Handle;
-use tokio_net::tcp::TcpStream;
-use tokio_timer::{Delay, Timeout};
+use tokio::net::TcpStream;
+use tokio::time::Delay;
use crate::common::{Future, Pin, Poll, task};
use super::{Connected, Destination};
use super::dns::{self, GaiResolver, Resolve};
-#[cfg(feature = "runtime")] use super::dns::TokioThreadpoolGaiResolver;
+//#[cfg(feature = "runtime")] use super::dns::TokioThreadpoolGaiResolver;
// TODO: unbox me?
type ConnectFuture = Pin<Box<dyn Future<Output = io::Result<TcpStream>> + Send>>;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -73,7 +72,6 @@ pub struct HttpInfo {
struct Config {
connect_timeout: Option<Duration>,
enforce_http: bool,
- handle: Option<Handle>,
happy_eyeballs_timeout: Option<Duration>,
keep_alive_timeout: Option<Duration>,
local_address: Option<IpAddr>,
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -92,6 +90,7 @@ impl HttpConnector {
}
}
+/*
#[cfg(feature = "runtime")]
impl HttpConnector<TokioThreadpoolGaiResolver> {
/// Construct a new HttpConnector using the `TokioThreadpoolGaiResolver`.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -101,6 +100,7 @@ impl HttpConnector<TokioThreadpoolGaiResolver> {
HttpConnector::new_with_resolver(TokioThreadpoolGaiResolver::new())
}
}
+*/
impl<R> HttpConnector<R> {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -112,7 +112,6 @@ impl<R> HttpConnector<R> {
config: Arc::new(Config {
connect_timeout: None,
enforce_http: true,
- handle: None,
happy_eyeballs_timeout: Some(Duration::from_millis(300)),
keep_alive_timeout: None,
local_address: None,
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -133,14 +132,6 @@ impl<R> HttpConnector<R> {
self.config_mut().enforce_http = is_enforced;
}
- /// Set a handle to a `Reactor` to register connections to.
- ///
- /// If `None`, the implicit default reactor will be used.
- #[inline]
- pub fn set_reactor(&mut self, handle: Option<Handle>) {
- self.config_mut().handle = handle;
- }
-
/// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration.
///
/// If `None`, the option will not be set.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -276,10 +267,10 @@ where
);
if self.config.enforce_http {
- if dst.uri.scheme_part() != Some(&Scheme::HTTP) {
+ if dst.uri.scheme() != Some(&Scheme::HTTP) {
return self.invalid_url(INVALID_NOT_HTTP);
}
- } else if dst.uri.scheme_part().is_none() {
+ } else if dst.uri.scheme().is_none() {
return self.invalid_url(INVALID_MISSING_SCHEME);
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -287,9 +278,9 @@ where
Some(s) => s,
None => return self.invalid_url(INVALID_MISSING_HOST),
};
- let port = match dst.uri.port_part() {
+ let port = match dst.uri.port() {
Some(port) => port.as_u16(),
- None => if dst.uri.scheme_part() == Some(&Scheme::HTTPS) { 443 } else { 80 },
+ None => if dst.uri.scheme() == Some(&Scheme::HTTPS) { 443 } else { 80 },
};
HttpConnecting {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -314,10 +305,7 @@ where
}
fn call(&mut self, uri: Uri) -> Self::Future {
- self
- .call(Destination { uri })
- .map_ok(|(s, _)| s)
- .boxed()
+ Box::pin(self.call(Destination { uri }).map_ok(|(s, _)| s))
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -447,7 +435,7 @@ impl<R: Resolve> Future for HttpConnecting<R> {
config.local_address, addrs, config.connect_timeout, config.happy_eyeballs_timeout, config.reuse_address));
},
State::Connecting(ref mut c) => {
- let sock = ready!(c.poll(cx, &config.handle))
+ let sock = ready!(c.poll(cx))
.map_err(ConnectError::m("tcp connect error"))?;
if let Some(dur) = config.keep_alive_timeout {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -515,7 +503,7 @@ impl ConnectingTcp {
local_addr,
preferred: ConnectingTcpRemote::new(preferred_addrs, connect_timeout),
fallback: Some(ConnectingTcpFallback {
- delay: tokio_timer::delay_for(fallback_timeout),
+ delay: tokio::time::delay_for(fallback_timeout),
remote: ConnectingTcpRemote::new(fallback_addrs, connect_timeout),
}),
reuse_address,
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -555,12 +543,10 @@ impl ConnectingTcpRemote {
}
impl ConnectingTcpRemote {
- // not a Future, since passing a &Handle to poll
fn poll(
&mut self,
cx: &mut task::Context<'_>,
local_addr: &Option<IpAddr>,
- handle: &Option<Handle>,
reuse_address: bool,
) -> Poll<io::Result<TcpStream>> {
let mut err = None;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -577,14 +563,14 @@ impl ConnectingTcpRemote {
err = Some(e);
if let Some(addr) = self.addrs.next() {
debug!("connecting to {}", addr);
- *current = connect(&addr, local_addr, handle, reuse_address, self.connect_timeout)?;
+ *current = connect(&addr, local_addr, reuse_address, self.connect_timeout)?;
continue;
}
}
}
} else if let Some(addr) = self.addrs.next() {
debug!("connecting to {}", addr);
- self.current = Some(connect(&addr, local_addr, handle, reuse_address, self.connect_timeout)?);
+ self.current = Some(connect(&addr, local_addr, reuse_address, self.connect_timeout)?);
continue;
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -596,7 +582,6 @@ impl ConnectingTcpRemote {
fn connect(
addr: &SocketAddr,
local_addr: &Option<IpAddr>,
- handle: &Option<Handle>,
reuse_address: bool,
connect_timeout: Option<Duration>,
) -> io::Result<ConnectFuture> {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -625,18 +610,14 @@ fn connect(
builder.bind(any)?;
}
- let handle = match *handle {
- Some(ref handle) => handle.clone(),
- None => Handle::default(),
- };
let addr = *addr;
let std_tcp = builder.to_tcp_stream()?;
Ok(Box::pin(async move {
- let connect = TcpStream::connect_std(std_tcp, &addr, &handle);
+ let connect = TcpStream::connect_std(std_tcp, &addr);
match connect_timeout {
- Some(timeout) => match Timeout::new(connect, timeout).await {
+ Some(dur) => match tokio::time::timeout(dur, connect).await {
Ok(Ok(s)) => Ok(s),
Ok(Err(e)) => Err(e),
Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -647,16 +628,16 @@ fn connect(
}
impl ConnectingTcp {
- fn poll(&mut self, cx: &mut task::Context<'_>, handle: &Option<Handle>) -> Poll<io::Result<TcpStream>> {
+ fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<TcpStream>> {
match self.fallback.take() {
- None => self.preferred.poll(cx, &self.local_addr, handle, self.reuse_address),
- Some(mut fallback) => match self.preferred.poll(cx, &self.local_addr, handle, self.reuse_address) {
+ None => self.preferred.poll(cx, &self.local_addr, self.reuse_address),
+ Some(mut fallback) => match self.preferred.poll(cx, &self.local_addr, self.reuse_address) {
Poll::Ready(Ok(stream)) => {
// Preferred successful - drop fallback.
Poll::Ready(Ok(stream))
}
Poll::Pending => match Pin::new(&mut fallback.delay).poll(cx) {
- Poll::Ready(()) => match fallback.remote.poll(cx, &self.local_addr, handle, self.reuse_address) {
+ Poll::Ready(()) => match fallback.remote.poll(cx, &self.local_addr, self.reuse_address) {
Poll::Ready(Ok(stream)) => {
// Fallback successful - drop current preferred,
// but keep fallback as new preferred.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -682,7 +663,7 @@ impl ConnectingTcp {
Poll::Ready(Err(_)) => {
// Preferred failed - use fallback as new preferred.
self.preferred = fallback.remote;
- self.preferred.poll(cx, &self.local_addr, handle, self.reuse_address)
+ self.preferred.poll(cx, &self.local_addr, self.reuse_address)
}
}
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -50,8 +50,8 @@ impl Destination {
/// Returns an error if the uri contains no authority or
/// no scheme.
pub fn try_from_uri(uri: Uri) -> crate::Result<Self> {
- uri.authority_part().ok_or(crate::error::Parse::Uri)?;
- uri.scheme_part().ok_or(crate::error::Parse::Uri)?;
+ uri.authority().ok_or(crate::error::Parse::Uri)?;
+ uri.scheme().ok_or(crate::error::Parse::Uri)?;
Ok(Destination { uri })
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -131,11 +131,11 @@ impl Destination {
}
let auth = if let Some(port) = self.port() {
let bytes = Bytes::from(format!("{}:{}", host, port));
- uri::Authority::from_shared(bytes)
+ uri::Authority::from_maybe_shared(bytes)
.map_err(crate::error::Parse::from)?
} else {
let auth = host.parse::<uri::Authority>().map_err(crate::error::Parse::from)?;
- if auth.port_part().is_some() { // std::uri::Authority::Uri
+ if auth.port().is_some() { // std::uri::Authority::Uri
return Err(crate::error::Parse::Uri.into());
}
auth
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -186,7 +186,7 @@ impl Destination {
write!(buf, "{}", port)
.expect("should have space for 5 digits");
- uri::Authority::from_shared(buf.freeze())
+ uri::Authority::from_maybe_shared(buf.freeze())
.expect("valid host + :port should be valid authority")
} else {
self.host().parse()
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -372,7 +372,7 @@ where
pub(super) mod sealed {
use std::error::Error as StdError;
- use tokio_io::{AsyncRead, AsyncWrite};
+ use tokio::io::{AsyncRead, AsyncWrite};
use crate::common::{Future, Unpin};
use super::{Connected, Destination};
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -27,7 +27,7 @@
//! [full client example](https://github.com/hyperium/hyper/blob/master/examples/client.rs).
//!
//! ```
-//! use hyper::{Client, Uri};
+//! use hyper::{body::HttpBody as _, Client, Uri};
//!
//! # #[cfg(feature = "tcp")]
//! # async fn fetch_httpbin() -> hyper::Result<()> {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -70,7 +70,7 @@ use http::header::{HeaderValue, HOST};
use http::uri::Scheme;
use crate::body::{Body, Payload};
-use crate::common::{lazy as hyper_lazy, Lazy, Future, Pin, Poll, task};
+use crate::common::{lazy as hyper_lazy, BoxSendFuture, Executor, Lazy, Future, Pin, Poll, task};
use self::connect::{Alpn, sealed::Connect, Connected, Destination};
use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation};
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -285,10 +285,9 @@ where C: Connect + Clone + Send + Sync + 'static,
req
.headers_mut()
.entry(HOST)
- .expect("HOST is always valid header name")
.or_insert_with(|| {
let hostname = uri.host().expect("authority implies host");
- if let Some(port) = uri.port_part() {
+ if let Some(port) = uri.port() {
let s = format!("{}:{}", hostname, port);
HeaderValue::from_str(&s)
} else {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -359,10 +358,7 @@ where C: Connect + Clone + Send + Sync + 'static,
drop(delayed_tx);
});
- if let Err(err) = executor.execute(on_idle) {
- // This task isn't critical, so just log and ignore.
- warn!("error spawning task to insert idle connection: {}", err);
- }
+ executor.execute(on_idle);
} else {
// There's no body to delay, but the connection isn't
// ready yet. Only re-insert when it's ready
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -371,10 +367,7 @@ where C: Connect + Clone + Send + Sync + 'static,
})
.map(|_| ());
- if let Err(err) = executor.execute(on_idle) {
- // This task isn't critical, so just log and ignore.
- warn!("error spawning task to insert idle connection: {}", err);
- }
+ executor.execute(on_idle);
}
res
})))
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -513,20 +506,13 @@ where C: Connect + Clone + Send + Sync + 'static,
.handshake(io)
.and_then(move |(tx, conn)| {
trace!("handshake complete, spawning background dispatcher task");
- let bg = executor.execute(conn.map_err(|e| {
+ executor.execute(conn.map_err(|e| {
debug!("client connection error: {}", e)
}).map(|_| ()));
- // This task is critical, so an execute error
- // should be returned.
- if let Err(err) = bg {
- warn!("error spawning critical client task: {}", err);
- return Either::Left(future::err(err));
- }
-
// Wait for 'conn' to ready up before we
// declare this tx as usable
- Either::Right(tx.when_ready())
+ tx.when_ready()
})
.map_ok(move |tx| {
pool.pooled(connecting, PoolClient {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -742,12 +728,12 @@ fn origin_form(uri: &mut Uri) {
}
fn absolute_form(uri: &mut Uri) {
- debug_assert!(uri.scheme_part().is_some(), "absolute_form needs a scheme");
- debug_assert!(uri.authority_part().is_some(), "absolute_form needs an authority");
+ debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme");
+ debug_assert!(uri.authority().is_some(), "absolute_form needs an authority");
// If the URI is to HTTPS, and the connector claimed to be a proxy,
// then it *should* have tunneled, and so we don't want to send
// absolute-form in that case.
- if uri.scheme_part() == Some(&Scheme::HTTPS) {
+ if uri.scheme() == Some(&Scheme::HTTPS) {
origin_form(uri);
}
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -765,7 +751,7 @@ fn authority_form(uri: &mut Uri) {
}
}
}
- *uri = match uri.authority_part() {
+ *uri = match uri.authority() {
Some(auth) => {
let mut parts = ::http::uri::Parts::default();
parts.authority = Some(auth.clone());
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -779,14 +765,13 @@ fn authority_form(uri: &mut Uri) {
fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result<String> {
let uri_clone = uri.clone();
- match (uri_clone.scheme_part(), uri_clone.authority_part()) {
+ match (uri_clone.scheme(), uri_clone.authority()) {
(Some(scheme), Some(auth)) => {
Ok(format!("{}://{}", scheme, auth))
}
(None, Some(auth)) if is_http_connect => {
- let port = auth.port_part();
- let scheme = match port.as_ref().map(|p| p.as_str()) {
- Some("443") => {
+ let scheme = match auth.port_u16() {
+ Some(443) => {
set_scheme(uri, Scheme::HTTPS);
"https"
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -805,7 +790,7 @@ fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result<String>
}
fn set_scheme(uri: &mut Uri, scheme: Scheme) {
- debug_assert!(uri.scheme_part().is_none(), "set_scheme expects no existing scheme");
+ debug_assert!(uri.scheme().is_none(), "set_scheme expects no existing scheme");
let old = mem::replace(uri, Uri::default());
let mut parts: ::http::uri::Parts = old.into();
parts.scheme = Some(scheme);
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -1013,8 +998,7 @@ impl Builder {
/// Provide an executor to execute background `Connection` tasks.
pub fn executor<E>(&mut self, exec: E) -> &mut Self
where
- for<'a> &'a E: tokio_executor::Executor,
- E: Send + Sync + 'static,
+ E: Executor<BoxSendFuture> + Send + Sync + 'static,
{
self.conn_builder.executor(exec);
self
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -6,7 +6,7 @@ use std::time::{Duration, Instant};
use futures_channel::oneshot;
#[cfg(feature = "runtime")]
-use tokio_timer::Interval;
+use tokio::time::Interval;
use crate::common::{Exec, Future, Pin, Poll, Unpin, task};
use super::Ver;
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -414,18 +414,13 @@ impl<T: Poolable> PoolInner<T> {
}
};
- let start = Instant::now() + dur;
-
let interval = IdleTask {
- interval: Interval::new(start, dur),
+ interval: tokio::time::interval(dur),
pool: WeakOpt::downgrade(pool_ref),
pool_drop_notifier: rx,
};
- if let Err(err) = self.exec.execute(interval) {
- // This task isn't critical, so simply log and ignore.
- warn!("error spawning connection pool idle interval: {}", err);
- }
+ self.exec.execute(interval);
}
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -743,7 +738,7 @@ impl<T: Poolable + 'static> Future for IdleTask<T> {
}
}
- ready!(Pin::new(&mut self.interval).poll_next(cx));
+ ready!(self.interval.poll_tick(cx));
if let Some(inner) = self.pool.upgrade() {
if let Ok(mut inner) = inner.lock() {
diff --git a/src/client/service.rs b/src/client/service.rs
--- a/src/client/service.rs
+++ b/src/client/service.rs
@@ -63,7 +63,7 @@ where
if let Err(e) = conn.await {
debug!("connection error: {:?}", e);
}
- })?;
+ });
Ok(sr)
},
Err(e) => Err(e)
diff --git a/src/common/buf.rs /dev/null
--- a/src/common/buf.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-use bytes::Buf;
-use iovec::IoVec;
-
-/// A `Buf` wrapping a static byte slice.
-#[derive(Debug)]
-pub(crate) struct StaticBuf(pub(crate) &'static [u8]);
-
-impl Buf for StaticBuf {
- #[inline]
- fn remaining(&self) -> usize {
- self.0.len()
- }
-
- #[inline]
- fn bytes(&self) -> &[u8] {
- self.0
- }
-
- #[inline]
- fn advance(&mut self, cnt: usize) {
- self.0 = &self.0[cnt..];
- }
-
- #[inline]
- fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize {
- if dst.is_empty() || self.0.is_empty() {
- 0
- } else {
- dst[0] = self.0.into();
- 1
- }
- }
-}
-
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -1,12 +1,12 @@
use std::mem;
-use futures_util::FutureExt as _;
-use tokio_sync::{mpsc, watch};
+use tokio::sync::{mpsc, watch};
use pin_project::pin_project;
use super::{Future, Never, Poll, Pin, task};
// Sentinel value signaling that the watch is still open
+#[derive(Clone, Copy)]
enum Action {
Open,
// Closed isn't sent via the `Action` type, but rather once
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -103,10 +103,7 @@ where
loop {
match mem::replace(me.state, State::Draining) {
State::Watch(on_drain) => {
- let recv = me.watch.rx.recv_ref();
- futures_util::pin_mut!(recv);
-
- match recv.poll_unpin(cx) {
+ match me.watch.rx.poll_recv_ref(cx) {
Poll::Ready(None) => {
// Drain has been triggered!
on_drain(me.future.as_mut());
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -3,48 +3,39 @@ use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
-use tokio_executor::{SpawnError, TypedExecutor};
-
use crate::body::{Payload, Body};
use crate::proto::h2::server::H2Stream;
use crate::server::conn::spawn_all::{NewSvcTask, Watcher};
use crate::service::HttpService;
+/// An executor of futures.
+pub trait Executor<Fut> {
+ /// Place the future into the executor to be run.
+ fn execute(&self, fut: Fut);
+}
+
pub trait H2Exec<F, B: Payload>: Clone {
- fn execute_h2stream(&mut self, fut: H2Stream<F, B>) -> crate::Result<()>;
+ fn execute_h2stream(&mut self, fut: H2Stream<F, B>);
}
pub trait NewSvcExec<I, N, S: HttpService<Body>, E, W: Watcher<I, S, E>>: Clone {
- fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>) -> crate::Result<()>;
+ fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>);
}
-type BoxFuture = Pin<Box<dyn Future<Output=()> + Send>>;
-
-pub trait SharedExecutor {
- fn shared_spawn(&self, future: BoxFuture) -> Result<(), SpawnError>;
-}
-
-impl<E> SharedExecutor for E
-where
- for<'a> &'a E: tokio_executor::Executor,
-{
- fn shared_spawn(mut self: &Self, future: BoxFuture) -> Result<(), SpawnError> {
- tokio_executor::Executor::spawn(&mut self, future)
- }
-}
+pub type BoxSendFuture = Pin<Box<dyn Future<Output=()> + Send>>;
// Either the user provides an executor for background tasks, or we use
// `tokio::spawn`.
#[derive(Clone)]
pub enum Exec {
Default,
- Executor(Arc<dyn SharedExecutor + Send + Sync>),
+ Executor(Arc<dyn Executor<BoxSendFuture> + Send + Sync>),
}
// ===== impl Exec =====
impl Exec {
- pub(crate) fn execute<F>(&self, fut: F) -> crate::Result<()>
+ pub(crate) fn execute<F>(&self, fut: F)
where
F: Future<Output=()> + Send + 'static,
{
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -52,34 +43,7 @@ impl Exec {
Exec::Default => {
#[cfg(feature = "tcp")]
{
- use std::error::Error as StdError;
-
- struct TokioSpawnError;
-
- impl fmt::Debug for TokioSpawnError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt("tokio::spawn failed (is a tokio runtime running this future?)", f)
- }
- }
-
- impl fmt::Display for TokioSpawnError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt("tokio::spawn failed (is a tokio runtime running this future?)", f)
- }
- }
-
- impl StdError for TokioSpawnError {
- fn description(&self) -> &str {
- "tokio::spawn failed"
- }
- }
-
- ::tokio_executor::DefaultExecutor::current()
- .spawn(Box::pin(fut))
- .map_err(|err| {
- warn!("executor error: {:?}", err);
- crate::Error::new_execute(TokioSpawnError)
- })
+ tokio::task::spawn(fut);
}
#[cfg(not(feature = "tcp"))]
{
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -88,11 +52,7 @@ impl Exec {
}
},
Exec::Executor(ref e) => {
- e.shared_spawn(Box::pin(fut))
- .map_err(|err| {
- warn!("executor error: {:?}", err);
- crate::Error::new_execute("custom executor failed")
- })
+ e.execute(Box::pin(fut));
},
}
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -111,7 +71,7 @@ where
H2Stream<F, B>: Future<Output = ()> + Send + 'static,
B: Payload,
{
- fn execute_h2stream(&mut self, fut: H2Stream<F, B>) -> crate::Result<()> {
+ fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
self.execute(fut)
}
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -122,7 +82,7 @@ where
S: HttpService<Body>,
W: Watcher<I, S, E>,
{
- fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>) -> crate::Result<()> {
+ fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>) {
self.execute(fut)
}
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -131,34 +91,24 @@ where
impl<E, F, B> H2Exec<F, B> for E
where
- E: TypedExecutor<H2Stream<F, B>> + Clone,
+ E: Executor<H2Stream<F, B>> + Clone,
H2Stream<F, B>: Future<Output=()>,
B: Payload,
{
- fn execute_h2stream(&mut self, fut: H2Stream<F, B>) -> crate::Result<()> {
- self.spawn(fut)
- .map_err(|err| {
- warn!("executor error: {:?}", err);
- crate::Error::new_execute("custom executor failed")
- })
+ fn execute_h2stream(&mut self, fut: H2Stream<F, B>) {
+ self.execute(fut)
}
}
impl<I, N, S, E, W> NewSvcExec<I, N, S, E, W> for E
where
- E: TypedExecutor<NewSvcTask<I, N, S, E, W>> + Clone,
+ E: Executor<NewSvcTask<I, N, S, E, W>> + Clone,
NewSvcTask<I, N, S, E, W>: Future<Output=()>,
S: HttpService<Body>,
W: Watcher<I, S, E>,
{
- fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>) -> crate::Result<()> {
- self.spawn(fut)
- .map_err(|err| {
- warn!("executor error: {:?}", err);
- crate::Error::new_execute("custom executor failed")
- })
+ fn execute_new_svc(&mut self, fut: NewSvcTask<I, N, S, E, W>) {
+ self.execute(fut)
}
}
-// ===== StdError impls =====
-
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -1,8 +1,8 @@
-use std::io::{self, Read};
+use std::{cmp, io};
use std::marker::Unpin;
-use bytes::{Buf, Bytes, IntoBuf};
-use tokio_io::{AsyncRead, AsyncWrite};
+use bytes::{Buf, Bytes};
+use tokio::io::{AsyncRead, AsyncWrite};
use crate::common::{Pin, Poll, task};
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -43,26 +43,22 @@ where
T: AsyncRead + Unpin,
{
#[inline]
- unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
fn poll_read(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut [u8]) -> Poll<io::Result<usize>> {
- if let Some(pre_bs) = self.pre.take() {
+ if let Some(mut prefix) = self.pre.take() {
// If there are no remaining bytes, let the bytes get dropped.
- if pre_bs.len() > 0 {
- let mut pre_reader = pre_bs.into_buf().reader();
- let read_cnt = pre_reader.read(buf)?;
-
- let mut new_pre = pre_reader.into_inner().into_inner();
- new_pre.advance(read_cnt);
-
+ if prefix.len() > 0 {
+ let copy_len = cmp::min(prefix.len(), buf.len());
+ prefix.copy_to_slice(&mut buf[..copy_len]);
// Put back whats left
- if new_pre.len() > 0 {
- self.pre = Some(new_pre);
+ if prefix.len() > 0 {
+ self.pre = Some(prefix);
}
- return Poll::Ready(Ok(read_cnt));
+ return Poll::Ready(Ok(copy_len));
}
}
Pin::new(&mut self.inner).poll_read(cx, buf)
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -7,7 +7,6 @@ macro_rules! ready {
)
}
-mod buf;
pub(crate) mod drain;
pub(crate) mod exec;
pub(crate) mod io;
diff --git a/src/common/mod.rs b/src/common/mod.rs
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -15,8 +14,8 @@ mod lazy;
mod never;
pub(crate) mod task;
-pub(crate) use self::buf::StaticBuf;
-pub(crate) use self::exec::Exec;
+pub(crate) use self::exec::{BoxSendFuture, Exec};
+pub use self::exec::Executor;
pub(crate) use self::lazy::{lazy, Started as Lazy};
pub use self::never::Never;
pub(crate) use self::task::Poll;
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -89,9 +89,6 @@ pub(crate) enum User {
/// User polled for an upgrade, but low-level API is not using upgrades.
ManualUpgrade,
-
- /// Error trying to call `Executor::execute`.
- Execute,
}
impl Error {
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -277,10 +274,6 @@ impl Error {
Error::new(Kind::Shutdown).with(cause)
}
- pub(crate) fn new_execute<E: Into<Cause>>(cause: E) -> Error {
- Error::new_user(User::Execute).with(cause)
- }
-
pub(crate) fn new_h2(cause: ::h2::Error) -> Error {
if cause.is_io() {
Error::new_io(cause.into_io().expect("h2::Error::is_io"))
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -346,7 +339,6 @@ impl StdError for Error {
Kind::User(User::AbsoluteUriRequired) => "client requires absolute-form URIs",
Kind::User(User::NoUpgrade) => "no upgrade available",
Kind::User(User::ManualUpgrade) => "upgrade expected but low level API in use",
- Kind::User(User::Execute) => "executor failed to spawn task",
}
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -398,12 +390,6 @@ impl From<http::uri::InvalidUri> for Parse {
}
}
-impl From<http::uri::InvalidUriBytes> for Parse {
- fn from(_: http::uri::InvalidUriBytes) -> Parse {
- Parse::Uri
- }
-}
-
impl From<http::uri::InvalidUriParts> for Parse {
fn from(_: http::uri::InvalidUriParts) -> Parse {
Parse::Uri
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -66,7 +66,6 @@ pub fn content_length_parse_all_values(values: ValueIter<'_, HeaderValue>) -> Op
pub fn set_content_length_if_missing(headers: &mut HeaderMap, len: u64) {
headers
.entry(CONTENT_LENGTH)
- .unwrap()
.or_insert_with(|| HeaderValue::from(len));
}
diff --git a/src/headers.rs b/src/headers.rs
--- a/src/headers.rs
+++ b/src/headers.rs
@@ -105,7 +104,7 @@ pub fn add_chunked(mut entry: OccupiedEntry<'_, HeaderValue>) {
buf.copy_from_slice(b", ");
buf.copy_from_slice(CHUNKED.as_bytes());
- *line = HeaderValue::from_shared(buf.freeze())
+ *line = HeaderValue::from_maybe_shared(buf.freeze())
.expect("original header value plus ascii is valid");
return;
}
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -65,5 +65,5 @@ mod headers;
mod proto;
pub mod server;
pub mod service;
-#[cfg(feature = "runtime")] pub mod rt;
+pub mod rt;
pub mod upgrade;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -5,7 +5,7 @@ use std::marker::PhantomData;
use bytes::{Buf, Bytes};
use http::{HeaderMap, Method, Version};
use http::header::{HeaderValue, CONNECTION};
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use crate::Chunk;
use crate::common::{Pin, Poll, Unpin, task};
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -2,7 +2,7 @@ use std::error::Error as StdError;
use bytes::{Buf, Bytes};
use http::{Request, Response, StatusCode};
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use crate::body::{Body, Payload};
use crate::common::{Future, Never, Poll, Pin, Unpin, task};
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -1,12 +1,13 @@
use std::fmt;
+use std::io::IoSlice;
-use bytes::{Buf, IntoBuf};
-use bytes::buf::{Chain, Take};
-use iovec::IoVec;
+use bytes::Buf;
+use bytes::buf::ext::{BufExt, Chain, Take};
-use crate::common::StaticBuf;
use super::io::WriteBuf;
+type StaticBuf = &'static [u8];
+
/// Encoders to handle different Transfer-Encodings.
#[derive(Debug, Clone, PartialEq)]
pub struct Encoder {
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -84,17 +85,16 @@ impl Encoder {
match self.kind {
Kind::Length(0) => Ok(None),
Kind::Chunked => Ok(Some(EncodedBuf {
- kind: BufKind::ChunkedEnd(StaticBuf(b"0\r\n\r\n")),
+ kind: BufKind::ChunkedEnd(b"0\r\n\r\n"),
})),
_ => Err(NotEof),
}
}
- pub fn encode<B>(&mut self, msg: B) -> EncodedBuf<B::Buf>
+ pub fn encode<B>(&mut self, msg: B) -> EncodedBuf<B>
where
- B: IntoBuf,
+ B: Buf,
{
- let msg = msg.into_buf();
let len = msg.remaining();
debug_assert!(len > 0, "encode() called with empty buf");
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -103,7 +103,7 @@ impl Encoder {
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
- .chain(StaticBuf(b"\r\n"));
+ .chain(b"\r\n" as &'static [u8]);
BufKind::Chunked(buf)
},
Kind::Length(ref mut remaining) => {
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -127,11 +127,10 @@ impl Encoder {
}
}
- pub(super) fn encode_and_end<B>(&self, msg: B, dst: &mut WriteBuf<EncodedBuf<B::Buf>>) -> bool
+ pub(super) fn encode_and_end<B>(&self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>) -> bool
where
- B: IntoBuf,
+ B: Buf,
{
- let msg = msg.into_buf();
let len = msg.remaining();
debug_assert!(len > 0, "encode() called with empty buf");
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -140,7 +139,7 @@ impl Encoder {
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
- .chain(StaticBuf(b"\r\n0\r\n\r\n"));
+ .chain(b"\r\n0\r\n\r\n" as &'static [u8]);
dst.buffer(buf);
!self.is_last
},
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -176,11 +175,10 @@ impl Encoder {
/// This is used in conjunction with Payload::__hyper_full_data(), which
/// means we can trust that the buf has the correct size (the buf itself
/// was checked to make the headers).
- pub(super) fn danger_full_buf<B>(self, msg: B, dst: &mut WriteBuf<EncodedBuf<B::Buf>>)
+ pub(super) fn danger_full_buf<B>(self, msg: B, dst: &mut WriteBuf<EncodedBuf<B>>)
where
- B: IntoBuf,
+ B: Buf,
{
- let msg = msg.into_buf();
debug_assert!(msg.remaining() > 0, "encode() called with empty buf");
debug_assert!(match self.kind {
Kind::Length(len) => len == msg.remaining() as u64,
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -193,7 +191,7 @@ impl Encoder {
trace!("encoding chunked {}B", len);
let buf = ChunkSize::new(len)
.chain(msg)
- .chain(StaticBuf(b"\r\n0\r\n\r\n"));
+ .chain(b"\r\n0\r\n\r\n" as &'static [u8]);
dst.buffer(buf);
},
_ => {
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -238,12 +236,12 @@ where
}
#[inline]
- fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize {
+ fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
match self.kind {
- BufKind::Exact(ref b) => b.bytes_vec(dst),
- BufKind::Limited(ref b) => b.bytes_vec(dst),
- BufKind::Chunked(ref b) => b.bytes_vec(dst),
- BufKind::ChunkedEnd(ref b) => b.bytes_vec(dst),
+ BufKind::Exact(ref b) => b.bytes_vectored(dst),
+ BufKind::Limited(ref b) => b.bytes_vectored(dst),
+ BufKind::Chunked(ref b) => b.bytes_vectored(dst),
+ BufKind::ChunkedEnd(ref b) => b.bytes_vectored(dst),
}
}
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -2,11 +2,10 @@ use std::cell::Cell;
use std::cmp;
use std::collections::VecDeque;
use std::fmt;
-use std::io;
+use std::io::{self, IoSlice};
use bytes::{Buf, BufMut, Bytes, BytesMut};
-use iovec::IoVec;
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use crate::common::{Pin, Poll, Unpin, task};
use super::{Http1Transaction, ParseContext, ParsedMessage};
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -105,6 +104,12 @@ where
&mut self.read_buf
}
+ /// Return the "allocated" available space, not the potential space
+ /// that could be allocated in the future.
+ fn read_buf_remaining_mut(&self) -> usize {
+ self.read_buf.capacity() - self.read_buf.len()
+ }
+
pub fn headers_buf(&mut self) -> &mut Vec<u8> {
let buf = self.write_buf.headers_mut();
&mut buf.bytes
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -170,7 +175,7 @@ where
pub fn poll_read_from_io(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<usize>> {
self.read_blocked = false;
let next = self.read_buf_strategy.next();
- if self.read_buf.remaining_mut() < next {
+ if self.read_buf_remaining_mut() < next {
self.read_buf.reserve(next);
}
match Pin::new(&mut self.io).poll_read_buf(cx, &mut self.read_buf) {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -520,9 +525,9 @@ impl<B: Buf> Buf for WriteBuf<B> {
}
#[inline]
- fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize {
- let n = self.headers.bytes_vec(dst);
- self.queue.bytes_vec(&mut dst[n..]) + n
+ fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
+ let n = self.headers.bytes_vectored(dst);
+ self.queue.bytes_vectored(&mut dst[n..]) + n
}
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -562,9 +567,9 @@ impl<'a, B: Buf> Buf for WriteBufAuto<'a, B> {
}
#[inline]
- fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize {
+ fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
self.bytes_vec_called.set(true);
- self.inner.bytes_vec(dst)
+ self.inner.bytes_vectored(dst)
}
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -638,13 +643,13 @@ impl<T: Buf> Buf for BufDeque<T> {
}
#[inline]
- fn bytes_vec<'t>(&'t self, dst: &mut [&'t IoVec]) -> usize {
+ fn bytes_vectored<'t>(&'t self, dst: &mut [IoSlice<'t>]) -> usize {
if dst.is_empty() {
return 0;
}
let mut vecs = 0;
for buf in &self.bufs {
- vecs += buf.bytes_vec(&mut dst[vecs..]);
+ vecs += buf.bytes_vectored(&mut dst[vecs..]);
if vecs == dst.len() {
break;
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -23,7 +23,7 @@ macro_rules! header_name {
{
match HeaderName::from_bytes($bytes) {
Ok(name) => name,
- Err(_) => panic!("illegal header name from httparse: {:?}", ::bytes::Bytes::from($bytes)),
+ Err(_) => panic!("illegal header name from httparse: {:?}", ::bytes::Bytes::copy_from_slice($bytes)),
}
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -40,7 +40,7 @@ macro_rules! header_value {
#[cfg(debug_assertions)]
{
let __hvb: ::bytes::Bytes = $bytes;
- match HeaderValue::from_shared(__hvb.clone()) {
+ match HeaderValue::from_maybe_shared(__hvb.clone()) {
Ok(name) => name,
Err(_) => panic!("illegal header value from httparse: {:?}", __hvb),
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -50,7 +50,7 @@ macro_rules! header_value {
{
// Unsafe: httparse already validated header value
unsafe {
- HeaderValue::from_shared_unchecked($bytes)
+ HeaderValue::from_maybe_shared_unchecked($bytes)
}
}
});
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -153,7 +153,7 @@ impl Http1Transaction for Server {
for header in &headers_indices[..headers_len] {
let name = header_name!(&slice[header.name.0..header.name.1]);
- let value = header_value!(slice.slice(header.value.0, header.value.1));
+ let value = header_value!(slice.slice(header.value.0..header.value.1));
match name {
header::TRANSFER_ENCODING => {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -302,10 +302,40 @@ impl Http1Transaction for Server {
let mut encoder = Encoder::length(0);
let mut wrote_date = false;
- 'headers: for (name, mut values) in msg.head.headers.drain() {
- match name {
+ let mut cur_name = None;
+ let mut is_name_written = false;
+ let mut must_write_chunked = false;
+ let mut prev_con_len = None;
+
+ macro_rules! handle_is_name_written {
+ () => ({
+ if is_name_written {
+ // we need to clean up and write the newline
+ debug_assert_ne!(
+ &dst[dst.len() - 2 ..],
+ b"\r\n",
+ "previous header wrote newline but set is_name_written"
+ );
+
+ if must_write_chunked {
+ extend(dst, b", chunked\r\n");
+ } else {
+ extend(dst, b"\r\n");
+ }
+ }
+ })
+ }
+
+ 'headers: for (opt_name, value) in msg.head.headers.drain() {
+ if let Some(n) = opt_name {
+ cur_name = Some(n);
+ handle_is_name_written!();
+ is_name_written = false;
+ }
+ let name = cur_name.as_ref().expect("current header name");
+ match *name {
header::CONTENT_LENGTH => {
- if wrote_len {
+ if wrote_len && !is_name_written {
warn!("unexpected content-length found, canceling");
rewind(dst);
return Err(crate::Error::new_user_header());
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -319,77 +349,56 @@ impl Http1Transaction for Server {
//
// In debug builds, we'll assert they are the
// same to help developers find bugs.
- encoder = Encoder::length(known_len);
-
#[cfg(debug_assertions)]
{
- let mut folded = None::<(u64, HeaderValue)>;
- for value in values {
- if let Some(len) = headers::content_length_parse(&value) {
- if let Some(fold) = folded {
- if fold.0 != len {
- panic!("multiple Content-Length values found: [{}, {}]", fold.0, len);
- }
- folded = Some(fold);
- } else {
- folded = Some((len, value));
- }
- } else {
- panic!("illegal Content-Length value: {:?}", value);
- }
- }
- if let Some((len, value)) = folded {
+ if let Some(len) = headers::content_length_parse(&value) {
assert!(
len == known_len,
"payload claims content-length of {}, custom content-length header claims {}",
known_len,
len,
);
- extend(dst, b"content-length: ");
- extend(dst, value.as_bytes());
- extend(dst, b"\r\n");
- wrote_len = true;
- continue 'headers;
- } else {
- // No values in content-length... ignore?
- continue 'headers;
}
}
+
+ if !is_name_written {
+ encoder = Encoder::length(known_len);
+ extend(dst, b"content-length: ");
+ extend(dst, value.as_bytes());
+ wrote_len = true;
+ is_name_written = true;
+ }
+ continue 'headers;
},
Some(BodyLength::Unknown) => {
// The Payload impl didn't know how long the
// body is, but a length header was included.
// We have to parse the value to return our
// Encoder...
- let mut folded = None::<(u64, HeaderValue)>;
- for value in values {
- if let Some(len) = headers::content_length_parse(&value) {
- if let Some(fold) = folded {
- if fold.0 != len {
- warn!("multiple Content-Length values found: [{}, {}]", fold.0, len);
- rewind(dst);
- return Err(crate::Error::new_user_header());
- }
- folded = Some(fold);
- } else {
- folded = Some((len, value));
+
+ if let Some(len) = headers::content_length_parse(&value) {
+ if let Some(prev) = prev_con_len {
+ if prev != len {
+ warn!("multiple Content-Length values found: [{}, {}]", prev, len);
+ rewind(dst);
+ return Err(crate::Error::new_user_header());
}
+ debug_assert!(is_name_written);
+ continue 'headers;
} else {
- warn!("illegal Content-Length value: {:?}", value);
- rewind(dst);
- return Err(crate::Error::new_user_header());
+ // we haven't written content-lenght yet!
+ encoder = Encoder::length(len);
+ extend(dst, b"content-length: ");
+ extend(dst, value.as_bytes());
+ wrote_len = true;
+ is_name_written = true;
+ prev_con_len = Some(len);
+ continue 'headers;
}
- }
- if let Some((len, value)) = folded {
- encoder = Encoder::length(len);
- extend(dst, b"content-length: ");
- extend(dst, value.as_bytes());
- extend(dst, b"\r\n");
- wrote_len = true;
- continue 'headers;
} else {
- // No values in content-length... ignore?
- continue 'headers;
+ warn!("illegal Content-Length value: {:?}", value);
+ rewind(dst);
+ return Err(crate::Error::new_user_header());
}
},
None => {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -402,10 +411,8 @@ impl Http1Transaction for Server {
if msg.req_method == &Some(Method::HEAD) {
debug_assert_eq!(encoder, Encoder::length(0));
} else {
- for value in values {
- if value.as_bytes() != b"0" {
- warn!("content-length value found, but empty body provided: {:?}", value);
- }
+ if value.as_bytes() != b"0" {
+ warn!("content-length value found, but empty body provided: {:?}", value);
}
continue 'headers;
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -414,7 +421,7 @@ impl Http1Transaction for Server {
wrote_len = true;
},
header::TRANSFER_ENCODING => {
- if wrote_len {
+ if wrote_len && !is_name_written {
warn!("unexpected transfer-encoding found, canceling");
rewind(dst);
return Err(crate::Error::new_user_header());
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -424,44 +431,36 @@ impl Http1Transaction for Server {
continue;
}
wrote_len = true;
- encoder = Encoder::chunked();
-
- extend(dst, b"transfer-encoding: ");
-
- let mut saw_chunked;
- if let Some(te) = values.next() {
- extend(dst, te.as_bytes());
- saw_chunked = headers::is_chunked_(&te);
- for value in values {
- extend(dst, b", ");
- extend(dst, value.as_bytes());
- saw_chunked = headers::is_chunked_(&value);
- }
- if !saw_chunked {
- extend(dst, b", chunked\r\n");
- } else {
- extend(dst, b"\r\n");
- }
+ // Must check each value, because `chunked` needs to be the
+ // last encoding, or else we add it.
+ must_write_chunked = !headers::is_chunked_(&value);
+
+ if !is_name_written {
+ encoder = Encoder::chunked();
+ is_name_written = true;
+ extend(dst, b"transfer-encoding: ");
+ extend(dst, value.as_bytes());
} else {
- // zero lines? add a chunked line then
- extend(dst, b"chunked\r\n");
+ extend(dst, b", ");
+ extend(dst, value.as_bytes());
}
continue 'headers;
},
header::CONNECTION => {
if !is_last {
- for value in values {
- extend(dst, name.as_str().as_bytes());
- extend(dst, b": ");
- extend(dst, value.as_bytes());
- extend(dst, b"\r\n");
-
- if headers::connection_close(&value) {
- is_last = true;
- }
+ if headers::connection_close(&value) {
+ is_last = true;
}
- continue 'headers;
}
+ if !is_name_written {
+ is_name_written = true;
+ extend(dst, b"connection: ");
+ extend(dst, value.as_bytes());
+ } else {
+ extend(dst, b", ");
+ extend(dst, value.as_bytes());
+ }
+ continue 'headers;
},
header::DATE => {
wrote_date = true;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -470,14 +469,21 @@ impl Http1Transaction for Server {
}
//TODO: this should perhaps instead combine them into
//single lines, as RFC7230 suggests is preferable.
- for value in values {
- extend(dst, name.as_str().as_bytes());
- extend(dst, b": ");
- extend(dst, value.as_bytes());
- extend(dst, b"\r\n");
- }
+
+ // non-special write Name and Value
+ debug_assert!(
+ !is_name_written,
+ "{:?} set is_name_written and didn't continue loop",
+ name,
+ );
+ extend(dst, name.as_str().as_bytes());
+ extend(dst, b": ");
+ extend(dst, value.as_bytes());
+ extend(dst, b"\r\n");
}
+ handle_is_name_written!();
+
if !wrote_len {
encoder = match msg.body {
Some(BodyLength::Unknown) => {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -629,7 +635,7 @@ impl Http1Transaction for Client {
headers.reserve(headers_len);
for header in &headers_indices[..headers_len] {
let name = header_name!(&slice[header.name.0..header.name.1]);
- let value = header_value!(slice.slice(header.value.0, header.value.1));
+ let value = header_value!(slice.slice(header.value.0..header.value.1));
if let header::CONNECTION = name {
// keep_alive was previously set to default for Version
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -820,8 +826,7 @@ impl Client {
// If the user set a transfer-encoding, respect that. Let's just
// make sure `chunked` is the final encoding.
- let encoder = match headers.entry(header::TRANSFER_ENCODING)
- .expect("TRANSFER_ENCODING is valid HeaderName") {
+ let encoder = match headers.entry(header::TRANSFER_ENCODING) {
Entry::Occupied(te) => {
should_remove_con_len = true;
if headers::is_chunked(te.iter()) {
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -2,7 +2,7 @@ use futures_channel::{mpsc, oneshot};
use futures_util::future::{self, FutureExt as _, TryFutureExt as _, Either};
use futures_util::stream::StreamExt as _;
use h2::client::{Builder, SendRequest};
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use crate::headers::content_length_parse_all;
use crate::body::Payload;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -71,7 +71,7 @@ where
}
};
- exec.execute(conn_task)?;
+ exec.execute(conn_task);
Ok(ClientTask {
conn_drop_ref,
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -155,7 +155,7 @@ where
drop(conn_drop_ref);
x
});
- self.executor.execute(pipe)?;
+ self.executor.execute(pipe);
}
}
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -175,7 +175,7 @@ where
}
}
});
- self.executor.execute(cb.send_when(fut))?;
+ self.executor.execute(cb.send_when(fut));
continue;
},
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -4,7 +4,7 @@ use std::marker::Unpin;
use pin_project::{pin_project, project};
use h2::Reason;
use h2::server::{Builder, Connection, Handshake, SendResponse};
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use crate::body::Payload;
use crate::common::exec::H2Exec;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -175,7 +175,7 @@ where
crate::Body::h2(stream, content_length)
});
let fut = H2Stream::new(service.call(req), respond);
- exec.execute_h2stream(fut)?;
+ exec.execute_h2stream(fut);
},
Some(Err(e)) => {
return Poll::Ready(Err(crate::Error::new_h2(e)));
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -285,7 +285,6 @@ where
res
.headers_mut()
.entry(::http::header::DATE)
- .expect("DATE is a valid HeaderName")
.or_insert_with(crate::proto::h1::date::update_and_header_value);
diff --git a/src/rt.rs b/src/rt.rs
--- a/src/rt.rs
+++ b/src/rt.rs
@@ -1,42 +1,8 @@
-//! Default runtime
+//! Runtime components
//!
-//! By default, hyper includes the [tokio](https://tokio.rs) runtime. To ease
-//! using it, several types are re-exported here.
+//! By default, hyper includes the [tokio](https://tokio.rs) runtime.
//!
-//! The inclusion of a default runtime can be disabled by turning off hyper's
-//! `runtime` Cargo feature.
+//! If the `runtime` feature is disabled, the types in this module can be used
+//! to plug in other runtimes.
-pub use std::future::Future;
-pub use futures_core::Stream;
-
-use self::inner::Spawn;
-
-/// Spawns a future on the default executor.
-///
-/// # Panics
-///
-/// This function will panic if the default executor is not set.
-///
-/// # Note
-///
-/// The `Spawn` return type is not currently meant for anything other than
-/// to reserve adding new trait implementations to it later. It can be
-/// ignored for now.
-pub fn spawn<F>(f: F) -> Spawn
-where
- F: Future<Output = ()> + Send + 'static,
-{
- tokio::spawn(f);
- Spawn {
- _inner: (),
- }
-}
-
-// Make the `Spawn` type an unnameable, so we can add
-// methods or trait impls to it later without a breaking change.
-mod inner {
- #[allow(missing_debug_implementations)]
- pub struct Spawn {
- pub(super) _inner: (),
- }
-}
+//pub use crate::common::Executor;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -12,13 +12,11 @@ use std::error::Error as StdError;
use std::fmt;
use std::mem;
#[cfg(feature = "tcp")] use std::net::SocketAddr;
-#[cfg(feature = "tcp")] use std::time::Duration;
use bytes::Bytes;
use futures_core::Stream;
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use pin_project::{pin_project, project};
-#[cfg(feature = "tcp")] use tokio_net::driver::Handle;
use crate::body::{Body, Payload};
use crate::common::exec::{Exec, H2Exec, NewSvcExec};
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -357,7 +355,7 @@ impl<E> Http<E> {
/// # use hyper::{Body, Request, Response};
/// # use hyper::service::Service;
/// # use hyper::server::conn::Http;
- /// # use tokio_io::{AsyncRead, AsyncWrite};
+ /// # use tokio::io::{AsyncRead, AsyncWrite};
/// # async fn run<I, S>(some_io: I, some_service: S)
/// # where
/// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -419,74 +417,6 @@ impl<E> Http<E> {
}
}
- #[cfg(feature = "tcp")]
- #[doc(hidden)]
- #[deprecated]
- #[allow(deprecated)]
- pub fn serve_addr<S, Bd>(&self, addr: &SocketAddr, make_service: S) -> crate::Result<Serve<AddrIncoming, S, E>>
- where
- S: MakeServiceRef<
- AddrStream,
- Body,
- ResBody=Bd,
- >,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- S::Service: HttpService<Body>,
- Bd: Payload,
- E: H2Exec<<S::Service as HttpService<Body>>::Future, Bd>,
- {
- let mut incoming = AddrIncoming::new(addr, None)?;
- if self.keep_alive {
- incoming.set_keepalive(Some(Duration::from_secs(90)));
- }
- Ok(self.serve_incoming(incoming, make_service))
- }
-
- #[cfg(feature = "tcp")]
- #[doc(hidden)]
- #[deprecated]
- #[allow(deprecated)]
- pub fn serve_addr_handle<S, Bd>(&self, addr: &SocketAddr, handle: &Handle, make_service: S) -> crate::Result<Serve<AddrIncoming, S, E>>
- where
- S: MakeServiceRef<
- AddrStream,
- Body,
- ResBody=Bd,
- >,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bd: Payload,
- E: H2Exec<<S::Service as HttpService<Body>>::Future, Bd>,
- {
- let mut incoming = AddrIncoming::new(addr, Some(handle))?;
- if self.keep_alive {
- incoming.set_keepalive(Some(Duration::from_secs(90)));
- }
- Ok(self.serve_incoming(incoming, make_service))
- }
-
- #[doc(hidden)]
- #[deprecated]
- pub fn serve_incoming<I, IO, IE, S, Bd>(&self, incoming: I, make_service: S) -> Serve<I, S, E>
- where
- I: Accept<Conn=IO, Error=IE>,
- IE: Into<Box<dyn StdError + Send + Sync>>,
- IO: AsyncRead + AsyncWrite + Unpin,
- S: MakeServiceRef<
- IO,
- Body,
- ResBody=Bd,
- >,
- S::Error: Into<Box<dyn StdError + Send + Sync>>,
- Bd: Payload,
- E: H2Exec<<S::Service as HttpService<Body>>::Future, Bd>,
- {
- Serve {
- incoming,
- make_service,
- protocol: self.clone(),
- }
- }
-
pub(super) fn serve<I, IO, IE, S, Bd>(&self, incoming: I, make_service: S) -> Serve<I, S, E>
where
I: Accept<Conn=IO, Error=IE>,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -843,7 +773,7 @@ where
loop {
if let Some(connecting) = ready!(me.serve.as_mut().poll_next_(cx)?) {
let fut = NewSvcTask::new(connecting, watcher.clone());
- me.serve.as_mut().project().protocol.exec.execute_new_svc(fut)?;
+ me.serve.as_mut().project().protocol.exec.execute_new_svc(fut);
} else {
return Poll::Ready(Ok(()));
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -876,7 +806,7 @@ where
pub(crate) mod spawn_all {
use std::error::Error as StdError;
- use tokio_io::{AsyncRead, AsyncWrite};
+ use tokio::io::{AsyncRead, AsyncWrite};
use crate::body::{Body, Payload};
use crate::common::exec::H2Exec;
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -59,7 +59,7 @@ use std::fmt;
#[cfg(feature = "tcp")] use std::time::Duration;
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use pin_project::pin_project;
use crate::body::{Body, Payload};
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -113,7 +113,7 @@ impl Server<AddrIncoming, ()> {
/// This method will panic if binding to the address fails. For a method
/// to bind to an address and return a `Result`, see `Server::try_bind`.
pub fn bind(addr: &SocketAddr) -> Builder<AddrIncoming> {
- let incoming = AddrIncoming::new(addr, None)
+ let incoming = AddrIncoming::new(addr)
.unwrap_or_else(|e| {
panic!("error binding to {}: {}", addr, e);
});
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -122,14 +122,13 @@ impl Server<AddrIncoming, ()> {
/// Tries to bind to the provided address, and returns a [`Builder`](Builder).
pub fn try_bind(addr: &SocketAddr) -> crate::Result<Builder<AddrIncoming>> {
- AddrIncoming::new(addr, None)
+ AddrIncoming::new(addr)
.map(Server::builder)
}
/// Create a new instance from a `std::net::TcpListener` instance.
pub fn from_tcp(listener: StdTcpListener) -> Result<Builder<AddrIncoming>, crate::Error> {
- let handle = tokio_net::driver::Handle::default();
- AddrIncoming::from_std(listener, &handle)
+ AddrIncoming::from_std(listener)
.map(Server::builder)
}
}
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -1,6 +1,6 @@
use std::error::Error as StdError;
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use pin_project::{pin_project, project};
use crate::body::{Body, Payload};
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -4,9 +4,8 @@ use std::net::{SocketAddr, TcpListener as StdTcpListener};
use std::time::Duration;
use futures_util::FutureExt as _;
-use tokio_net::driver::Handle;
-use tokio_net::tcp::TcpListener;
-use tokio_timer::Delay;
+use tokio::net::TcpListener;
+use tokio::time::Delay;
use crate::common::{Future, Pin, Poll, task};
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -25,20 +24,15 @@ pub struct AddrIncoming {
}
impl AddrIncoming {
- pub(super) fn new(addr: &SocketAddr, handle: Option<&Handle>) -> crate::Result<Self> {
+ pub(super) fn new(addr: &SocketAddr) -> crate::Result<Self> {
let std_listener = StdTcpListener::bind(addr)
.map_err(crate::Error::new_listen)?;
- if let Some(handle) = handle {
- AddrIncoming::from_std(std_listener, handle)
- } else {
- let handle = Handle::default();
- AddrIncoming::from_std(std_listener, &handle)
- }
+ AddrIncoming::from_std(std_listener)
}
- pub(super) fn from_std(std_listener: StdTcpListener, handle: &Handle) -> crate::Result<Self> {
- let listener = TcpListener::from_std(std_listener, &handle)
+ pub(super) fn from_std(std_listener: StdTcpListener) -> crate::Result<Self> {
+ let listener = TcpListener::from_std(std_listener)
.map_err(crate::Error::new_listen)?;
let addr = listener.local_addr().map_err(crate::Error::new_listen)?;
Ok(AddrIncoming {
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -53,7 +47,7 @@ impl AddrIncoming {
/// Creates a new `AddrIncoming` binding to provided socket address.
pub fn bind(addr: &SocketAddr) -> crate::Result<Self> {
- AddrIncoming::new(addr, None)
+ AddrIncoming::new(addr)
}
/// Get the local address bound to this listener.
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -135,7 +129,7 @@ impl AddrIncoming {
error!("accept error: {}", e);
// Sleep 1s.
- let mut timeout = tokio_timer::delay_for(Duration::from_secs(1));
+ let mut timeout = tokio::time::delay_for(Duration::from_secs(1));
match Pin::new(&mut timeout).poll(cx) {
Poll::Ready(()) => {
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -197,8 +191,8 @@ mod addr_stream {
use std::io;
use std::net::SocketAddr;
use bytes::{Buf, BufMut};
- use tokio_net::tcp::TcpStream;
- use tokio_io::{AsyncRead, AsyncWrite};
+ use tokio::net::TcpStream;
+ use tokio::io::{AsyncRead, AsyncWrite};
use crate::common::{Pin, Poll, task};
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -232,8 +226,7 @@ mod addr_stream {
}
impl AsyncRead for AddrStream {
- #[inline]
- unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
diff --git a/src/service/make.rs b/src/service/make.rs
--- a/src/service/make.rs
+++ b/src/service/make.rs
@@ -1,7 +1,7 @@
use std::error::Error as StdError;
use std::fmt;
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::io::{AsyncRead, AsyncWrite};
use crate::body::Payload;
use crate::common::{Future, Poll, task};
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -12,8 +12,8 @@ use std::io;
use std::marker::Unpin;
use bytes::{/*Buf, BufMut, */Bytes};
-use tokio_io::{AsyncRead, AsyncWrite};
-use tokio_sync::oneshot;
+use tokio::io::{AsyncRead, AsyncWrite};
+use tokio::sync::oneshot;
use crate::common::io::Rewind;
use crate::common::{Future, Pin, Poll, task};
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -136,7 +136,7 @@ impl Upgraded {
}
impl AsyncRead for Upgraded {
- unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
+ unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
self.io.prepare_uninitialized_buffer(buf)
}
|
looks like the blocker is gone now? :)
The h2 change was just commited to the code repository and no new release uploaded to `crates.io`. :disappointed:
A follow up change is needed to `h2` to restore all behavior: https://github.com/hyperium/h2/pull/429
Please keep in mind that in the US it's Thanksgiving weekend, so many folks will not be looking at github, instead spending time with friends and family and over eating.
hyperium/h2#429 has merged, though for the remaining blocker of publishing a new h2 release let's utilize threads in the relevant repo: https://github.com/hyperium/h2/issues/430
Yep, `h2` updated its `tokio` dependency, there's one last remaing thing there: we need a new `http` release since `tokio` uses a newer `bytes`.
This is my highest priority this week!
|
2019-12-04T00:07:28Z
| 2,030
|
Update to tokio 0.2
- [ ] Blocked on `h2`: https://github.com/hyperium/h2/milestone/2
|
hyperium__hyper-2030
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -56,9 +49,8 @@ spmc = "0.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
-tokio = "=0.2.0-alpha.6" # using #[tokio::test] attributes
-tokio-fs = "=0.2.0-alpha.6"
-tokio-test = "=0.2.0-alpha.6"
+tokio = { version = "0.2.2", features = ["fs", "macros", "rt-util", "sync", "time", "test-util"] }
+tokio-test = "0.2"
url = "1.0"
[features]
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -206,3 +198,4 @@ required-features = ["runtime", "unstable-stream"]
name = "server"
path = "tests/server.rs"
required-features = ["runtime"]
+
diff --git a/benches/connect.rs b/benches/connect.rs
--- a/benches/connect.rs
+++ b/benches/connect.rs
@@ -3,8 +3,8 @@
extern crate test;
+use std::net::SocketAddr;
use tokio::net::TcpListener;
-use tokio::runtime::current_thread::Runtime;
use hyper::client::connect::{Destination, HttpConnector};
use hyper::service::Service;
use http::Uri;
diff --git a/benches/connect.rs b/benches/connect.rs
--- a/benches/connect.rs
+++ b/benches/connect.rs
@@ -12,8 +12,12 @@ use http::Uri;
#[bench]
fn http_connector(b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let mut listener = rt.block_on(TcpListener::bind("127.0.0.1:0")).expect("bind");
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .expect("rt build");
+ let mut listener = rt.block_on(TcpListener::bind(&SocketAddr::from(([127, 0, 0, 1], 0)))).expect("bind");
let addr = listener.local_addr().expect("local_addr");
let uri: Uri = format!("http://{}/", addr).parse().expect("uri parse");
let dst = Destination::try_from_uri(uri).expect("destination");
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -6,9 +6,8 @@ extern crate test;
use std::net::SocketAddr;
use futures_util::future::join_all;
-use tokio::runtime::current_thread::Runtime;
-use hyper::{Body, Method, Request, Response, Server};
+use hyper::{body::HttpBody as _, Body, Method, Request, Response, Server};
use hyper::client::HttpConnector;
// HTTP1
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -264,8 +263,12 @@ impl Opts {
fn bench(self, b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
// Create a runtime of current thread.
- let mut rt = Runtime::new().unwrap();
- let exec = rt.handle();
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .expect("rt build");
+ let exec = rt.handle().clone();
let req_len = self.request_body.map(|b| b.len()).unwrap_or(0) as u64;
let req_len = if self.request_chunks > 0 {
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -31,9 +30,17 @@ fn hello_world(b: &mut test::Bencher) {
Ok::<_, hyper::Error>(Response::new(Body::from("Hello, World!")))
}))
});
- let srv = Server::bind(&addr)
- .http1_pipeline_flush(true)
- .serve(make_svc);
+
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .expect("rt build");
+ let srv = rt.block_on(async move {
+ Server::bind(&addr)
+ .http1_pipeline_flush(true)
+ .serve(make_svc)
+ });
addr_tx.send(srv.local_addr()).unwrap();
diff --git a/benches/pipeline.rs b/benches/pipeline.rs
--- a/benches/pipeline.rs
+++ b/benches/pipeline.rs
@@ -42,13 +49,11 @@ fn hello_world(b: &mut test::Bencher) {
until_rx.await.ok();
});
- let mut rt = current_thread::Runtime::new().unwrap();
- rt.spawn(async {
+ rt.block_on(async {
if let Err(e) = graceful.await {
panic!("server error: {}", e);
}
});
- rt.run().unwrap();
});
addr_rx.recv().unwrap()
diff --git a/examples/web_api.rs b/examples/web_api.rs
--- a/examples/web_api.rs
+++ b/examples/web_api.rs
@@ -35,13 +35,17 @@ async fn client_request_response(
Ok(Response::new(body))
}
-async fn api_post_response(req: Request<Body>) -> Result<Response<Body>> {
- // A web api to run against
- let entire_body = req.into_body().try_concat().await?;
- // TODO: Replace all unwraps with proper error handling
- let str = String::from_utf8(entire_body.to_vec())?;
- let mut data : serde_json::Value = serde_json::from_str(&str)?;
+async fn api_post_response(mut req: Request<Body>) -> Result<Response<Body>> {
+ // Concatenate the body...
+ let mut whole_body = Vec::new();
+ while let Some(chunk) = req.body_mut().next().await {
+ whole_body.extend_from_slice(&chunk?);
+ }
+ // Decode as JSON...
+ let mut data: serde_json::Value = serde_json::from_slice(&whole_body)?;
+ // Change the JSON...
data["test"] = serde_json::Value::from("test_value");
+ // And respond with the new JSON.
let json = serde_json::to_string(&data)?;
let response = Response::builder()
.status(StatusCode::OK)
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -693,8 +674,6 @@ impl ConnectingTcp {
mod tests {
use std::io;
- use tokio_net::driver::Handle;
-
use super::{Connected, Destination, HttpConnector};
use super::super::sealed::Connect;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -738,8 +717,6 @@ mod tests {
use std::task::Poll;
use std::time::{Duration, Instant};
- use tokio::runtime::current_thread::Runtime;
-
use crate::common::{Pin, task};
use super::dns;
use super::ConnectingTcp;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -748,7 +725,12 @@ mod tests {
let server4 = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server4.local_addr().unwrap();
let _server6 = TcpListener::bind(&format!("[::1]:{}", addr.port())).unwrap();
- let mut rt = Runtime::new().unwrap();
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_io()
+ .enable_time()
+ .basic_scheduler()
+ .build()
+ .unwrap();
let local_timeout = Duration::default();
let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -804,12 +786,13 @@ mod tests {
}
let addrs = hosts.iter().map(|host| (host.clone(), addr.port()).into()).collect();
- let connecting_tcp = ConnectingTcp::new(None, dns::IpAddrs::new(addrs), None, Some(fallback_timeout), false);
- let fut = ConnectingTcpFuture(connecting_tcp);
-
- let start = Instant::now();
- let res = rt.block_on(fut).unwrap();
- let duration = start.elapsed();
+ let (res, duration) = rt.block_on(async move {
+ let connecting_tcp = ConnectingTcp::new(None, dns::IpAddrs::new(addrs), None, Some(fallback_timeout), false);
+ let fut = ConnectingTcpFuture(connecting_tcp);
+ let start = Instant::now();
+ let res = fut.await.unwrap();
+ (res, start.elapsed())
+ });
// Allow actual duration to be +/- 150ms off.
let min_duration = if timeout >= Duration::from_millis(150) {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -830,7 +813,7 @@ mod tests {
type Output = Result<u8, std::io::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match self.0.poll(cx,&Some(Handle::default())) {
+ match self.0.poll(cx) {
Poll::Ready(Ok(stream)) => Poll::Ready(Ok(
if stream.peer_addr().unwrap().is_ipv4() { 4 } else { 6 }
)),
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -253,8 +253,6 @@ mod tests {
use std::pin::Pin;
use std::task::{Context, Poll};
- use tokio::runtime::current_thread::Runtime;
-
use super::{Callback, channel, Receiver};
#[derive(Debug)]
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -285,56 +283,43 @@ mod tests {
}
}
- #[test]
- fn drop_receiver_sends_cancel_errors() {
+ #[tokio::test]
+ async fn drop_receiver_sends_cancel_errors() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
let (mut tx, mut rx) = channel::<Custom, ()>();
// must poll once for try_send to succeed
- rt.block_on(async {
- let poll_once = PollOnce(&mut rx);
- assert!(poll_once.await.is_none(), "rx empty");
- });
+ assert!(PollOnce(&mut rx).await.is_none(), "rx empty");
let promise = tx.try_send(Custom(43)).unwrap();
drop(rx);
- rt.block_on(async {
- let fulfilled = promise.await;
- let err = fulfilled
- .expect("fulfilled")
- .expect_err("promise should error");
- match (err.0.kind(), err.1) {
- (&crate::error::Kind::Canceled, Some(_)) => (),
- e => panic!("expected Error::Cancel(_), found {:?}", e),
- }
- });
+ let fulfilled = promise.await;
+ let err = fulfilled
+ .expect("fulfilled")
+ .expect_err("promise should error");
+ match (err.0.kind(), err.1) {
+ (&crate::error::Kind::Canceled, Some(_)) => (),
+ e => panic!("expected Error::Cancel(_), found {:?}", e),
+ }
}
- #[test]
- fn sender_checks_for_want_on_send() {
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn sender_checks_for_want_on_send() {
let (mut tx, mut rx) = channel::<Custom, ()>();
// one is allowed to buffer, second is rejected
let _ = tx.try_send(Custom(1)).expect("1 buffered");
tx.try_send(Custom(2)).expect_err("2 not ready");
- rt.block_on(async {
- let poll_once = PollOnce(&mut rx);
- assert!(poll_once.await.is_some(), "rx empty");
- });
+ assert!(PollOnce(&mut rx).await.is_some(), "rx once");
// Even though 1 has been popped, only 1 could be buffered for the
// lifetime of the channel.
tx.try_send(Custom(2)).expect_err("2 still not ready");
- rt.block_on(async {
- let poll_once = PollOnce(&mut rx);
- assert!(poll_once.await.is_none(), "rx empty");
- });
+ assert!(PollOnce(&mut rx).await.is_none(), "rx empty");
let _ = tx.try_send(Custom(2)).expect("2 ready");
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -358,7 +343,11 @@ mod tests {
fn giver_queue_throughput(b: &mut test::Bencher) {
use crate::{Body, Request, Response};
- let mut rt = Runtime::new().unwrap();
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .unwrap();
let (mut tx, mut rx) = channel::<Request<Body>, Response<Body>>();
b.iter(move || {
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -378,7 +367,11 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn giver_queue_not_ready(b: &mut test::Bencher) {
- let mut rt = Runtime::new().unwrap();
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .unwrap();
let (_tx, mut rx) = channel::<i32, ()>();
b.iter(move || {
rt.block_on(async {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -779,8 +774,6 @@ mod tests {
use std::task::Poll;
use std::time::Duration;
- use tokio::runtime::current_thread::Runtime;
-
use crate::common::{Exec, Future, Pin, task};
use super::{Connecting, Key, Poolable, Pool, Reservation, WeakOpt};
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -825,21 +818,18 @@ mod tests {
pool
}
- #[test]
- fn test_pool_checkout_smoke() {
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn test_pool_checkout_smoke() {
let pool = pool_no_timer();
let key = Arc::new("foo".to_string());
let pooled = pool.pooled(c(key.clone()), Uniq(41));
drop(pooled);
- rt.block_on(async {
- match pool.checkout(key).await {
- Ok(pooled) => assert_eq!(*pooled, Uniq(41)),
- Err(_) => panic!("not ready"),
- };
- })
+ match pool.checkout(key).await {
+ Ok(pooled) => assert_eq!(*pooled, Uniq(41)),
+ Err(_) => panic!("not ready"),
+ };
}
/// Helper to check if the future is ready after polling once.
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -859,27 +849,23 @@ mod tests {
}
}
- #[test]
- fn test_pool_checkout_returns_none_if_expired() {
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn test_pool_checkout_returns_none_if_expired() {
let pool = pool_no_timer();
let key = Arc::new("foo".to_string());
let pooled = pool.pooled(c(key.clone()), Uniq(41));
drop(pooled);
- std::thread::sleep(pool.locked().timeout.unwrap());
- rt.block_on(async {
- let mut checkout = pool.checkout(key);
- let poll_once = PollOnce(&mut checkout);
- let is_not_ready = poll_once.await.is_none();
- assert!(is_not_ready);
- });
+ tokio::time::delay_for(pool.locked().timeout.unwrap()).await;
+ let mut checkout = pool.checkout(key);
+ let poll_once = PollOnce(&mut checkout);
+ let is_not_ready = poll_once.await.is_none();
+ assert!(is_not_ready);
}
#[cfg(feature = "runtime")]
- #[test]
- fn test_pool_checkout_removes_expired() {
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn test_pool_checkout_removes_expired() {
let pool = pool_no_timer();
let key = Arc::new("foo".to_string());
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -888,15 +874,13 @@ mod tests {
pool.pooled(c(key.clone()), Uniq(99));
assert_eq!(pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3));
- std::thread::sleep(pool.locked().timeout.unwrap());
-
- rt.block_on(async {
- let mut checkout = pool.checkout(key.clone());
- let poll_once = PollOnce(&mut checkout);
- // checkout.await should clean out the expired
- poll_once.await;
- assert!(pool.locked().idle.get(&key).is_none());
- });
+ tokio::time::delay_for(pool.locked().timeout.unwrap()).await;
+
+ let mut checkout = pool.checkout(key.clone());
+ let poll_once = PollOnce(&mut checkout);
+ // checkout.await should clean out the expired
+ poll_once.await;
+ assert!(pool.locked().idle.get(&key).is_none());
}
#[test]
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -913,14 +897,11 @@ mod tests {
}
#[cfg(feature = "runtime")]
- #[test]
- fn test_pool_timer_removes_expired() {
- use std::time::Instant;
- use tokio_timer::delay;
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn test_pool_timer_removes_expired() {
let pool = Pool::new(super::Config {
enabled: true,
- keep_alive_timeout: Some(Duration::from_millis(100)),
+ keep_alive_timeout: Some(Duration::from_millis(10)),
max_idle_per_host: ::std::usize::MAX,
},
&Exec::Default,
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -928,32 +909,23 @@ mod tests {
let key = Arc::new("foo".to_string());
- // Since pool.pooled() will be calling spawn on drop, need to be sure
- // those drops are called while `rt` is the current executor. To do so,
- // call those inside a future.
- rt.block_on(async {
- pool.pooled(c(key.clone()), Uniq(41));
- pool.pooled(c(key.clone()), Uniq(5));
- pool.pooled(c(key.clone()), Uniq(99));
- });
+ pool.pooled(c(key.clone()), Uniq(41));
+ pool.pooled(c(key.clone()), Uniq(5));
+ pool.pooled(c(key.clone()), Uniq(99));
assert_eq!(pool.locked().idle.get(&key).map(|entries| entries.len()), Some(3));
// Let the timer tick passed the expiration...
- rt.block_on(async {
- let deadline = Instant::now() + Duration::from_millis(200);
- delay(deadline).await;
- });
+ tokio::time::delay_for(Duration::from_millis(50)).await;
assert!(pool.locked().idle.get(&key).is_none());
}
- #[test]
- fn test_pool_checkout_task_unparked() {
+ #[tokio::test]
+ async fn test_pool_checkout_task_unparked() {
use futures_util::future::join;
use futures_util::FutureExt;
- let mut rt = Runtime::new().unwrap();
let pool = pool_no_timer();
let key = Arc::new("foo".to_string());
let pooled = pool.pooled(c(key.clone()), Uniq(41));
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -970,14 +942,11 @@ mod tests {
},
).map(|(entry, _)| entry);
- rt.block_on(async {
- assert_eq!(*checkout.await.unwrap(), Uniq(41));
- });
+ assert_eq!(*checkout.await.unwrap(), Uniq(41));
}
- #[test]
- fn test_pool_checkout_drop_cleans_up_waiters() {
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn test_pool_checkout_drop_cleans_up_waiters() {
let pool = pool_no_timer::<Uniq<i32>>();
let key = Arc::new("localhost:12345".to_string());
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -988,12 +957,10 @@ mod tests {
let poll_once2 = PollOnce(&mut checkout2);
// first poll needed to get into Pool's parked
- rt.block_on(async {
- poll_once1.await;
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
- poll_once2.await;
- assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2);
- });
+ poll_once1.await;
+ assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1);
+ poll_once2.await;
+ assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2);
// on drop, clean up Pool
drop(checkout1);
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -151,7 +148,8 @@ mod tests {
#[test]
fn watch() {
- tokio_test::task::mock(|cx| {
+ let mut mock = tokio_test::task::spawn(());
+ mock.enter(|cx, _| {
let (tx, rx) = channel();
let fut = TestMe {
draining: false,
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -198,7 +196,8 @@ mod tests {
#[test]
fn watch_clones() {
- tokio_test::task::mock(|cx| {
+ let mut mock = tokio_test::task::spawn(());
+ mock.enter(|cx, _| {
let (tx, rx) = channel();
let fut1 = TestMe {
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -118,7 +114,7 @@ mod tests {
// Rewind the stream so that it is as if we never read in the first place.
- stream.rewind(Bytes::from(&buf[..]));
+ stream.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -148,7 +144,7 @@ mod tests {
// Rewind the stream so that it is as if we never read in the first place.
- stream.rewind(Bytes::from(&buf[..]));
+ stream.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -915,7 +915,11 @@ mod tests {
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
- let mut rt = tokio::runtime::current_thread::Runtime::new().unwrap();
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_all()
+ .basic_scheduler()
+ .build()
+ .unwrap();
b.iter(|| {
rt.block_on(futures_util::future::poll_fn(|cx| {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -328,7 +328,7 @@ impl StdError for IncompleteBody {
mod tests {
use std::time::Duration;
use std::pin::Pin;
- use tokio_io::AsyncRead;
+ use tokio::io::AsyncRead;
use super::*;
impl<'a> MemRead for &'a [u8] {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -336,7 +336,7 @@ mod tests {
let n = ::std::cmp::min(len, self.len());
if n > 0 {
let (a, b) = self.split_at(n);
- let buf = Bytes::from(a);
+ let buf = Bytes::copy_from_slice(a);
*self = b;
Poll::Ready(Ok(buf))
} else {
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -349,7 +349,7 @@ mod tests {
fn read_mem(&mut self, cx: &mut task::Context<'_>, len: usize) -> Poll<io::Result<Bytes>> {
let mut v = vec![0; len];
let n = ready!(Pin::new(self).poll_read(cx, &mut v)?);
- Poll::Ready(Ok(Bytes::from(&v[..n])))
+ Poll::Ready(Ok(Bytes::copy_from_slice(&v[..n])))
}
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -605,7 +605,7 @@ mod tests {
fn client_read_bytes_before_writing_request() {
let _ = pretty_env_logger::try_init();
- tokio_test::task::mock(|cx| {
+ tokio_test::task::spawn(()).enter(|cx, _| {
let (io, mut handle) = tokio_test::io::Builder::new()
.build_with_handle();
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -637,36 +637,32 @@ mod tests {
});
}
- #[test]
- fn body_empty_chunks_ignored() {
+ #[tokio::test]
+ async fn body_empty_chunks_ignored() {
let _ = pretty_env_logger::try_init();
- tokio_test::clock::mock(|_timer| {
- tokio_test::task::mock(|cx| {
- let io = tokio_test::io::Builder::new()
- // no reading or writing, just be blocked for the test...
- .wait(Duration::from_secs(5))
- .build();
+ let io = tokio_test::io::Builder::new()
+ // no reading or writing, just be blocked for the test...
+ .wait(Duration::from_secs(5))
+ .build();
- let (mut tx, rx) = crate::client::dispatch::channel();
- let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
- let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
+ let (mut tx, rx) = crate::client::dispatch::channel();
+ let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
+ let mut dispatcher = tokio_test::task::spawn(Dispatcher::new(Client::new(rx), conn));
- // First poll is needed to allow tx to send...
- assert!(Pin::new(&mut dispatcher).poll(cx).is_pending());
+ // First poll is needed to allow tx to send...
+ assert!(dispatcher.poll().is_pending());
- let body = {
- let (mut tx, body) = crate::Body::channel();
- tx.try_send_data("".into()).unwrap();
- body
- };
+ let body = {
+ let (mut tx, body) = crate::Body::channel();
+ tx.try_send_data("".into()).unwrap();
+ body
+ };
- let _res_rx = tx.try_send(crate::Request::new(body)).unwrap();
+ let _res_rx = tx.try_send(crate::Request::new(body)).unwrap();
- // Ensure conn.write_body wasn't called with the empty chunk.
- // If it is, it will trigger an assertion.
- assert!(Pin::new(&mut dispatcher).poll(cx).is_pending());
- });
- });
+ // Ensure conn.write_body wasn't called with the empty chunk.
+ // If it is, it will trigger an assertion.
+ assert!(dispatcher.poll().is_pending());
}
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -906,8 +911,7 @@ fn set_content_length(headers: &mut HeaderMap, len: u64) -> Encoder {
// so perhaps only do that while the user is developing/testing.
if cfg!(debug_assertions) {
- match headers.entry(header::CONTENT_LENGTH)
- .expect("CONTENT_LENGTH is valid HeaderName") {
+ match headers.entry(header::CONTENT_LENGTH) {
Entry::Occupied(mut cl) => {
// Internal sanity check, we should have already determined
// that the header was illegal before calling this function.
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1067,7 +1071,7 @@ mod tests {
#[test]
fn test_parse_request() {
let _ = pretty_env_logger::try_init();
- let mut raw = BytesMut::from(b"GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n".to_vec());
+ let mut raw = BytesMut::from("GET /echo HTTP/1.1\r\nHost: hyper.rs\r\n\r\n");
let mut method = None;
let msg = Server::parse(&mut raw, ParseContext {
cached_headers: &mut None,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1086,7 +1090,7 @@ mod tests {
#[test]
fn test_parse_response() {
let _ = pretty_env_logger::try_init();
- let mut raw = BytesMut::from(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec());
+ let mut raw = BytesMut::from("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n");
let ctx = ParseContext {
cached_headers: &mut None,
req_method: &mut Some(crate::Method::GET),
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1101,7 +1105,7 @@ mod tests {
#[test]
fn test_parse_request_errors() {
- let mut raw = BytesMut::from(b"GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n".to_vec());
+ let mut raw = BytesMut::from("GET htt:p// HTTP/1.1\r\nHost: hyper.rs\r\n\r\n");
let ctx = ParseContext {
cached_headers: &mut None,
req_method: &mut None,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1480,7 +1484,7 @@ mod tests {
#[bench]
fn bench_parse_incoming(b: &mut Bencher) {
let mut raw = BytesMut::from(
- b"GET /super_long_uri/and_whatever?what_should_we_talk_about/\
+ &b"GET /super_long_uri/and_whatever?what_should_we_talk_about/\
I_wonder/Hard_to_write_in_an_uri_after_all/you_have_to_make\
_up_the_punctuation_yourself/how_fun_is_that?test=foo&test1=\
foo1&test2=foo2&test3=foo3&test4=foo4 HTTP/1.1\r\nHost: \
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1496,7 +1500,7 @@ mod tests {
X-Content-Duration: None\r\nX-Content-Security-Policy: None\
\r\nX-DNSPrefetch-Control: None\r\nX-Frame-Options: \
Something important obviously\r\nX-Requested-With: Nothing\
- \r\n\r\n".to_vec()
+ \r\n\r\n"[..]
);
let len = raw.len();
let mut headers = Some(HeaderMap::new());
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1526,7 +1530,7 @@ mod tests {
#[bench]
fn bench_parse_short(b: &mut Bencher) {
let s = &b"GET / HTTP/1.1\r\nHost: localhost:8080\r\n\r\n"[..];
- let mut raw = BytesMut::from(s.to_vec());
+ let mut raw = BytesMut::from(s);
let len = raw.len();
let mut headers = Some(HeaderMap::new());
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -15,9 +15,9 @@ use hyper::{Body, Client, Method, Request, StatusCode};
use futures_core::{Future, Stream, TryFuture};
use futures_channel::oneshot;
use futures_util::future::{self, FutureExt, TryFutureExt};
-use futures_util::stream::TryStreamExt;
-use tokio::runtime::current_thread::Runtime;
-use tokio_net::tcp::TcpStream;
+use futures_util::StreamExt;
+use tokio::runtime::Runtime;
+use tokio::net::TcpStream;
fn s(buf: &[u8]) -> &str {
::std::str::from_utf8(buf).expect("from_utf8")
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -27,6 +27,14 @@ fn tcp_connect(addr: &SocketAddr) -> impl Future<Output = std::io::Result<TcpStr
TcpStream::connect(*addr)
}
+async fn concat(mut body: Body) -> Result<hyper::Chunk, hyper::Error> {
+ let mut vec = Vec::new();
+ while let Some(chunk) = body.next().await {
+ vec.extend_from_slice(&chunk?);
+ }
+ Ok(vec.into())
+}
+
macro_rules! test {
(
name: $name:ident,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -145,7 +153,7 @@ macro_rules! test {
);
)*
- let body = rt.block_on(res.into_body().try_concat())
+ let body = rt.block_on(concat(res.into_body()))
.expect("body concat wait");
let expected_res_body = Option::<&[u8]>::from($response_body)
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -285,15 +293,15 @@ macro_rules! __client_req_prop {
});
($req_builder:ident, $body:ident, $addr:ident, method: $method:ident) => ({
- $req_builder.method(Method::$method);
+ $req_builder = $req_builder.method(Method::$method);
});
($req_builder:ident, $body:ident, $addr:ident, version: $version:ident) => ({
- $req_builder.version(hyper::Version::$version);
+ $req_builder = $req_builder.version(hyper::Version::$version);
});
($req_builder:ident, $body:ident, $addr:ident, url: $url:expr) => ({
- $req_builder.uri(format!($url, addr=$addr));
+ $req_builder = $req_builder.uri(format!($url, addr=$addr));
});
($req_builder:ident, $body:ident, $addr:ident, body: $body_e:expr) => ({
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -304,7 +312,7 @@ macro_rules! __client_req_prop {
macro_rules! __client_req_header {
($req_builder:ident, { $($name:expr => $val:expr,)* }) => {
$(
- $req_builder.header($name, $val);
+ $req_builder = $req_builder.header($name, $val);
)*
}
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -931,10 +939,10 @@ mod dispatch_impl {
use futures_core::{self, Future};
use futures_channel::{mpsc, oneshot};
use futures_util::future::{FutureExt, TryFutureExt};
- use futures_util::stream::{StreamExt, TryStreamExt};
- use tokio::runtime::current_thread::Runtime;
- use tokio_io::{AsyncRead, AsyncWrite};
- use tokio_net::tcp::TcpStream;
+ use futures_util::stream::{StreamExt};
+ use tokio::runtime::Runtime;
+ use tokio::io::{AsyncRead, AsyncWrite};
+ use tokio::net::TcpStream;
use hyper::client::connect::{Connected, Destination, HttpConnector};
use hyper::Client;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -971,10 +979,13 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).map_ok(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- tokio_timer::delay_for(Duration::from_secs(1))
});
let rx = rx1.expect("thread panicked");
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
+ rt.block_on(async move {
+ let (res, ()) = future::join(res, rx).await;
+ res.unwrap();
+ tokio::time::delay_for(Duration::from_secs(1)).await;
+ });
rt.block_on(closes.into_future()).0.expect("closes");
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1013,28 +1024,29 @@ mod dispatch_impl {
.unwrap();
client.request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
- }).map_ok(|_| {
- tokio_timer::delay_for(Duration::from_secs(1))
- })
+ concat(res.into_body())
+ }).map_ok(|_| ())
};
// client is dropped
let rx = rx1.expect("thread panicked");
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
+ rt.block_on(async move {
+ let (res, ()) = future::join(res, rx).await;
+ res.unwrap();
+ tokio::time::delay_for(Duration::from_secs(1)).await;
+ });
rt.block_on(closes.into_future()).0.expect("closes");
}
- #[test]
- fn drop_client_closes_idle_connections() {
+ #[tokio::test]
+ async fn drop_client_closes_idle_connections() {
use futures_util::future;
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
let (closes_tx, mut closes) = mpsc::channel(10);
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1065,35 +1077,37 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
+ concat(res.into_body())
});
let rx = rx1.expect("thread panicked");
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
+
+ let (res, ()) = future::join(res, rx).await;
+ res.unwrap();
// not closed yet, just idle
- {
- rt.block_on(future::poll_fn(|ctx| {
- assert!(Pin::new(&mut closes).poll_next(ctx).is_pending());
- Poll::Ready(Ok::<_, ()>(()))
- })).unwrap();
- }
+ future::poll_fn(|ctx| {
+ assert!(Pin::new(&mut closes).poll_next(ctx).is_pending());
+ Poll::Ready(())
+ }).await;
+
+ // drop to start the connections closing
drop(client);
- let t = tokio_timer::delay_for(Duration::from_millis(100))
+ // and wait a few ticks for the connections to close
+ let t = tokio::time::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
.map(|(opt, _)| opt.expect("closes"));
- let _ = rt.block_on(future::select(t, close));
+ future::select(t, close).await;
}
- #[test]
- fn drop_response_future_closes_in_progress_connection() {
+ #[tokio::test]
+ async fn drop_response_future_closes_in_progress_connection() {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
let (closes_tx, closes) = mpsc::channel(10);
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1122,27 +1136,28 @@ mod dispatch_impl {
.uri(&*format!("http://{}/a", addr))
.body(Body::empty())
.unwrap();
- client.request(req)
+ client
+ .request(req)
+ .map(|_| unreachable!())
};
- rt.block_on(future::select(res, rx1));
+ future::select(res, rx1).await;
// res now dropped
- let t = tokio_timer::delay_for(Duration::from_millis(100))
+ let t = tokio::time::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
.map(|(opt, _)| opt.expect("closes"));
- let _ = rt.block_on(future::select(t, close));
+ future::select(t, close).await;
}
- #[test]
- fn drop_response_body_closes_in_progress_connection() {
+ #[tokio::test]
+ async fn drop_response_body_closes_in_progress_connection() {
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
let (closes_tx, closes) = mpsc::channel(10);
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1162,6 +1177,7 @@ mod dispatch_impl {
let _ = client_drop_rx.recv();
});
+ let rx = rx1.expect("thread panicked");
let res = {
let client = Client::builder()
.build(DebugConnector::with_http_and_closes(HttpConnector::new(), closes_tx));
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1174,25 +1190,27 @@ mod dispatch_impl {
client.request(req)
};
- let rx = rx1.expect("thread panicked");
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- let t = tokio_timer::delay_for(Duration::from_millis(100))
+ let (res, ()) = future::join(res, rx).await;
+ // drop the body
+ res.unwrap();
+
+ // and wait a few ticks to see the connection drop
+ let t = tokio::time::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
.map(|(opt, _)| opt.expect("closes"));
- let _ = rt.block_on(future::select(t, close));
+ future::select(t, close).await;
}
- #[test]
- fn no_keep_alive_closes_connection() {
+ #[tokio::test]
+ async fn no_keep_alive_closes_connection() {
// https://github.com/hyperium/hyper/issues/1383
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
let (closes_tx, closes) = mpsc::channel(10);
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1222,27 +1240,28 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
+ concat(res.into_body())
});
let rx = rx1.expect("thread panicked");
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- let t = tokio_timer::delay_for(Duration::from_millis(100))
+ let (res, ()) = future::join(res, rx).await;
+ res.unwrap();
+
+ let t = tokio::time::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
.map(|(opt, _)| opt.expect("closes"));
- let _ = rt.block_on(future::select(t, close));
+ future::select(t, close).await;
}
- #[test]
- fn socket_disconnect_closes_idle_conn() {
+ #[tokio::test]
+ async fn socket_disconnect_closes_idle_conn() {
// notably when keep-alive is enabled
let _ = pretty_env_logger::try_init();
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
- let mut rt = Runtime::new().unwrap();
let (closes_tx, closes) = mpsc::channel(10);
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1266,17 +1285,19 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
+ concat(res.into_body())
});
let rx = rx1.expect("thread panicked");
- rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- let t = tokio_timer::delay_for(Duration::from_millis(100))
+ let (res, ()) = future::join(res, rx).await;
+ res.unwrap();
+
+ let t = tokio::time::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
.map(|(opt, _)| opt.expect("closes"));
- let _ = rt.block_on(future::select(t, close));
+ future::select(t, close).await;
}
#[test]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1466,7 +1487,7 @@ mod dispatch_impl {
assert_eq!(connects.load(Ordering::Relaxed), 0);
let delayed_body = rx1
- .then(|_| tokio_timer::delay_for(Duration::from_millis(200)))
+ .then(|_| tokio::time::delay_for(Duration::from_millis(200)))
.map(|_| Ok::<_, ()>("hello a"))
.map_err(|_| -> hyper::Error { panic!("rx1") })
.into_stream();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1481,7 +1502,7 @@ mod dispatch_impl {
// req 1
let fut = future::join(client.request(req), rx)
- .then(|_| tokio_timer::delay_for(Duration::from_millis(200)))
+ .then(|_| tokio::time::delay_for(Duration::from_millis(200)))
// req 2
.then(move |()| {
let rx = rx3.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1639,11 +1660,11 @@ mod dispatch_impl {
use hyper::Response;
use hyper::server::conn::Http;
use hyper::service::service_fn;
- use tokio_net::tcp::TcpListener;
+ use tokio::net::TcpListener;
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().unwrap();
- let mut listener = rt.block_on(TcpListener::bind("127.0.0.1:0")).unwrap();
+ let mut listener = rt.block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))).unwrap();
let addr = listener.local_addr().unwrap();
let mut connector = DebugConnector::new();
connector.alpn_h2 = true;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1782,7 +1803,7 @@ mod dispatch_impl {
mod conn {
use std::io::{self, Read, Write};
- use std::net::TcpListener;
+ use std::net::{SocketAddr, TcpListener};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::thread;
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1790,20 +1811,20 @@ mod conn {
use futures_channel::oneshot;
use futures_util::future::{self, poll_fn, FutureExt, TryFutureExt};
- use futures_util::stream::TryStreamExt;
- use tokio::runtime::current_thread::Runtime;
- use tokio_io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _};
- use tokio_net::tcp::{TcpListener as TkTcpListener, TcpStream};
+ use futures_util::StreamExt;
+ use tokio::runtime::Runtime;
+ use tokio::io::{AsyncRead, AsyncReadExt as _, AsyncWrite, AsyncWriteExt as _};
+ use tokio::net::{TcpListener as TkTcpListener, TcpStream};
use hyper::{self, Request, Body, Method};
use hyper::client::conn;
- use super::{s, tcp_connect, FutureHyperExt};
+ use super::{concat, s, tcp_connect, FutureHyperExt};
#[tokio::test]
async fn get() {
let _ = ::pretty_env_logger::try_init();
- let mut listener = TkTcpListener::bind("127.0.0.1:0").await.unwrap();
+ let mut listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))).await.unwrap();
let addr = listener.local_addr().unwrap();
let server = async move {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1824,7 +1845,7 @@ mod conn {
let tcp = tcp_connect(&addr).await.expect("connect");
let (mut client, conn) = conn::handshake(tcp).await.expect("handshake");
- hyper::rt::spawn(async move {
+ tokio::task::spawn(async move {
conn.await.expect("http conn");
});
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1842,7 +1863,7 @@ mod conn {
#[test]
fn incoming_content_length() {
- use hyper::body::Payload;
+ use hyper::body::HttpBody;
let server = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = server.local_addr().unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1882,7 +1903,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
let chunk = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
assert_eq!(chunk.len(), 5);
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1971,10 +1992,10 @@ mod conn {
let res = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
+ concat(res.into_body())
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2015,10 +2036,10 @@ mod conn {
let res = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
+ concat(res.into_body())
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2053,7 +2074,7 @@ mod conn {
.unwrap();
let res1 = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
+ concat(res.into_body())
});
// pipelined request will hit NotReady, and thus should return an Error::Cancel
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2070,7 +2091,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
rt.block_on(future::join3(res1, res2, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2126,11 +2147,11 @@ mod conn {
let res = client.send_request(req).and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::SWITCHING_PROTOCOLS);
assert_eq!(res.headers()["Upgrade"], "foobar");
- res.into_body().try_concat()
+ concat(res.into_body())
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
rt.block_on(future::join3(until_upgrade, res, rx).map(|r| r.0)).unwrap();
// should not be ready now
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2211,14 +2232,14 @@ mod conn {
.send_request(req)
.and_then(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- res.into_body().try_concat()
+ concat(res.into_body())
})
.map_ok(|body| {
assert_eq!(body.as_ref(), b"");
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio::time::delay_for(Duration::from_millis(200)));
rt.block_on(future::join3(until_tunneled, res, rx).map(|r| r.0)).unwrap();
// should not be ready now
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2247,16 +2268,14 @@ mod conn {
assert_eq!(vec, b"bar=foo");
}
- #[test]
- fn http2_detect_conn_eof() {
+ #[tokio::test]
+ async fn http2_detect_conn_eof() {
use futures_util::future;
use hyper::{Response, Server};
use hyper::service::{make_service_fn, service_fn};
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
-
let server = Server::bind(&([127, 0, 0, 1], 0).into())
.http2_only(true)
.serve(make_service_fn(|_| async move {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2264,40 +2283,53 @@ mod conn {
}));
let addr = server.local_addr();
let (shdn_tx, shdn_rx) = oneshot::channel();
- rt.spawn(server.with_graceful_shutdown(async {
- shdn_rx.await.ok();
- }).map(|_| ()));
+ tokio::task::spawn(async move {
+ server
+ .with_graceful_shutdown(async move {
+ let _ = shdn_rx.await;
+ })
+ .await
+ .expect("server")
+ });
- let io = rt.block_on(tcp_connect(&addr)).expect("tcp connect");
- let (mut client, conn) = rt.block_on(
- conn::Builder::new().http2_only(true).handshake::<_, Body>(io)
- ).expect("http handshake");
+ let io = tcp_connect(&addr).await.expect("tcp connect");
+ let (mut client, conn) = conn::Builder::new()
+ .http2_only(true)
+ .handshake::<_, Body>(io)
+ .await
+ .expect("http handshake");
- rt.spawn(conn
- .map_err(|e| panic!("client conn error: {:?}", e))
- .map(|_| ()));
+ tokio::task::spawn(async move {
+ conn.await.expect("client conn");
+ });
// Sanity check that client is ready
- rt.block_on(future::poll_fn(|ctx| client.poll_ready(ctx))).expect("client poll ready sanity");
+ future::poll_fn(|ctx| client.poll_ready(ctx))
+ .await
+ .expect("client poll ready sanity");
let req = Request::builder()
.uri(format!("http://{}/", addr))
.body(Body::empty())
.expect("request builder");
- rt.block_on(client.send_request(req)).expect("req1 send");
+ client.send_request(req).await.expect("req1 send");
// Sanity check that client is STILL ready
- rt.block_on(future::poll_fn(|ctx| client.poll_ready(ctx))).expect("client poll ready after");
+ future::poll_fn(|ctx| client.poll_ready(ctx))
+ .await
+ .expect("client poll ready after");
// Trigger the server shutdown...
let _ = shdn_tx.send(());
// Allow time for graceful shutdown roundtrips...
- rt.block_on(tokio_timer::delay_for(Duration::from_millis(100)));
+ tokio::time::delay_for(Duration::from_millis(100)).await;
// After graceful shutdown roundtrips, the client should be closed...
- rt.block_on(future::poll_fn(|ctx| client.poll_ready(ctx))).expect_err("client should be closed");
+ future::poll_fn(|ctx| client.poll_ready(ctx))
+ .await
+ .expect_err("client should be closed");
}
struct DebugStream {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1,6 +1,7 @@
#![deny(warnings)]
#![deny(rust_2018_idioms)]
+use std::future::Future;
use std::net::{TcpStream, Shutdown, SocketAddr};
use std::io::{self, Read, Write};
use std::sync::atomic::{AtomicBool, Ordering};
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -13,18 +14,13 @@ use std::thread;
use std::time::Duration;
use futures_channel::oneshot;
-use futures_core::ready;
-use futures_core::future::BoxFuture;
use futures_util::future::{self, Either, FutureExt, TryFutureExt};
#[cfg(feature = "unstable-stream")]
use futures_util::stream::StreamExt as _;
-// TODO: remove once tokio is updated to futures 0.3
-use futures_util_a19::stream::StreamExt as _;
use http::header::{HeaderName, HeaderValue};
-use tokio_net::driver::Handle;
-use tokio_net::tcp::{TcpListener, TcpStream as TkTcpStream};
-use tokio::runtime::current_thread::Runtime;
-use tokio_io::{AsyncRead, AsyncWrite};
+use tokio::net::{TcpListener, TcpStream as TkTcpStream};
+use tokio::runtime::Runtime;
+use tokio::io::{AsyncRead, AsyncWrite};
use hyper::{Body, Request, Response, StatusCode, Version};
use hyper::client::Client;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -120,19 +116,22 @@ mod response_body_lengths {
assert_eq!(
case.expects_chunked,
has_header(&body, "transfer-encoding:"),
- "expects_chunked"
+ "expects_chunked, headers = {:?}",
+ body
);
assert_eq!(
case.expects_chunked,
has_header(&body, "chunked\r\n"),
- "expects_chunked"
+ "expects_chunked, headers = {:?}",
+ body
);
assert_eq!(
case.expects_con_len,
has_header(&body, "content-length:"),
- "expects_con_len"
+ "expects_con_len, headers = {:?}",
+ body
);
let n = body.find("\r\n\r\n").unwrap() + 4;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -295,36 +294,28 @@ mod response_body_lengths {
});
}
- #[test]
- fn http2_auto_response_with_known_length() {
+ #[tokio::test]
+ async fn http2_auto_response_with_known_length() {
use http_body::Body;
let server = serve();
let addr_str = format!("http://{}", server.addr());
server.reply().body("Hello, World!");
- let mut rt = Runtime::new().expect("rt new");
- rt.block_on({
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
- let uri = addr_str
- .parse::<hyper::Uri>()
- .expect("server addr should parse");
-
- client
- .get(uri)
- .map_ok(|res| {
- assert_eq!(res.headers().get("content-length").unwrap(), "13");
- assert_eq!(res.body().size_hint().exact(), Some(13));
- ()
- })
- .map_err(|_e| ())
- }).unwrap();
+ let client = Client::builder()
+ .http2_only(true)
+ .build_http::<hyper::Body>();
+ let uri = addr_str
+ .parse::<hyper::Uri>()
+ .expect("server addr should parse");
+
+ let res = client.get(uri).await.unwrap();
+ assert_eq!(res.headers().get("content-length").unwrap(), "13");
+ assert_eq!(res.body().size_hint().exact(), Some(13));
}
- #[test]
- fn http2_auto_response_with_conflicting_lengths() {
+ #[tokio::test]
+ async fn http2_auto_response_with_conflicting_lengths() {
use http_body::Body;
let server = serve();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -334,24 +325,16 @@ mod response_body_lengths {
.header("content-length", "10")
.body("Hello, World!");
- let mut rt = Runtime::new().expect("rt new");
- rt.block_on({
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
- let uri = addr_str
- .parse::<hyper::Uri>()
- .expect("server addr should parse");
-
- client
- .get(uri)
- .map_ok(|res| {
- assert_eq!(res.headers().get("content-length").unwrap(), "10");
- assert_eq!(res.body().size_hint().exact(), Some(10));
- ()
- })
- .map_err(|_e| ())
- }).unwrap();
+ let client = Client::builder()
+ .http2_only(true)
+ .build_http::<hyper::Body>();
+ let uri = addr_str
+ .parse::<hyper::Uri>()
+ .expect("server addr should parse");
+
+ let res = client.get(uri).await.unwrap();
+ assert_eq!(res.headers().get("content-length").unwrap(), "10");
+ assert_eq!(res.body().size_hint().exact(), Some(10));
}
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -855,10 +838,9 @@ fn http_10_request_receives_http_10_response() {
assert_eq!(s(&buf[..expected.len()]), expected);
}
-#[test]
-fn disable_keep_alive_mid_request() {
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+#[tokio::test]
+async fn disable_keep_alive_mid_request() {
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -874,36 +856,31 @@ fn disable_keep_alive_mid_request() {
req.read_to_end(&mut buf).unwrap();
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- let srv = Http::new().serve_connection(socket, HelloWorld);
- future::try_select(srv, rx1)
- .then(|r| {
- match r {
- Ok(Either::Left(_)) => panic!("expected rx first"),
- Ok(Either::Right(((), mut conn))) => {
- Pin::new(&mut conn).graceful_shutdown();
- tx2.send(()).unwrap();
- conn
- }
- Err(Either::Left((e, _))) => panic!("unexpected error {}", e),
- Err(Either::Right((e, _))) => panic!("unexpected error {}", e),
- }
- })
- });
+ let (socket, _) = listener.accept().await.unwrap();
+ let srv = Http::new().serve_connection(socket, HelloWorld);
+ future::try_select(srv, rx1)
+ .then(|r| {
+ match r {
+ Ok(Either::Left(_)) => panic!("expected rx first"),
+ Ok(Either::Right(((), mut conn))) => {
+ Pin::new(&mut conn).graceful_shutdown();
+ tx2.send(()).unwrap();
+ conn
+ }
+ Err(Either::Left((e, _))) => panic!("unexpected error {}", e),
+ Err(Either::Right((e, _))) => panic!("unexpected error {}", e),
+ }
+ })
+ .await
+ .unwrap();
- rt.block_on(fut).unwrap();
child.join().unwrap();
}
-#[test]
-fn disable_keep_alive_post_request() {
+#[tokio::test]
+async fn disable_keep_alive_post_request() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx1, rx1) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -929,66 +906,51 @@ fn disable_keep_alive_post_request() {
let dropped = Dropped::new();
let dropped2 = dropped.clone();
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- let transport = DebugStream {
- stream: socket,
- _debug: dropped2,
- };
- let server = Http::new().serve_connection(transport, HelloWorld);
- future::try_select(server, rx1)
- .then(|r| {
- match r {
- Ok(Either::Left(_)) => panic!("expected rx first"),
- Ok(Either::Right(((), mut conn))) => {
- Pin::new(&mut conn).graceful_shutdown();
- conn
- }
- Err(Either::Left((e, _))) => panic!("unexpected error {}", e),
- Err(Either::Right((e, _))) => panic!("unexpected error {}", e),
- }
- })
+ let (socket, _) = listener.accept().await.unwrap();
+ let transport = DebugStream {
+ stream: socket,
+ _debug: dropped2,
+ };
+ let server = Http::new().serve_connection(transport, HelloWorld);
+ let fut = future::try_select(server, rx1)
+ .then(|r| {
+ match r {
+ Ok(Either::Left(_)) => panic!("expected rx first"),
+ Ok(Either::Right(((), mut conn))) => {
+ Pin::new(&mut conn).graceful_shutdown();
+ conn
+ }
+ Err(Either::Left((e, _))) => panic!("unexpected error {}", e),
+ Err(Either::Right((e, _))) => panic!("unexpected error {}", e),
+ }
});
assert!(!dropped.load());
- rt.block_on(fut).unwrap();
- // we must poll the Core one more time in order for Windows to drop
- // the read-blocked socket.
- //
- // See https://github.com/carllerche/mio/issues/776
- let timeout = tokio_timer::delay_for(Duration::from_millis(10));
- rt.block_on(timeout);
+ fut.await.unwrap();
assert!(dropped.load());
child.join().unwrap();
}
-#[test]
-fn empty_parse_eof_does_not_return_error() {
+#[tokio::test]
+async fn empty_parse_eof_does_not_return_error() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
let _tcp = connect(&addr);
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| Http::new().serve_connection(socket, HelloWorld));
-
- rt.block_on(fut).expect("empty parse eof is ok");
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, HelloWorld)
+ .await
+ .expect("empty parse eof is ok");
}
-#[test]
-fn nonempty_parse_eof_returns_error() {
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+#[tokio::test]
+async fn nonempty_parse_eof_returns_error() {
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -996,20 +958,17 @@ fn nonempty_parse_eof_returns_error() {
tcp.write_all(b"GET / HTTP/1.1").unwrap();
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| Http::new().serve_connection(socket, HelloWorld));
-
- rt.block_on(fut).expect_err("partial parse eof is error");
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, HelloWorld)
+ .await
+ .expect_err("partial parse eof is error");
}
-#[test]
-fn http1_allow_half_close() {
+#[tokio::test]
+async fn http1_allow_half_close() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let t1 = thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1023,28 +982,23 @@ fn http1_allow_half_close() {
assert_eq!(s(&buf[..expected.len()]), expected);
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- Http::new()
- .http1_half_close(true)
- .serve_connection(socket, service_fn(|_| {
- tokio_timer::delay_for(Duration::from_millis(500))
- .map(|_| Ok::<_, hyper::Error>(Response::new(Body::empty())))
- }))
- });
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .http1_half_close(true)
+ .serve_connection(socket, service_fn(|_| {
+ tokio::time::delay_for(Duration::from_millis(500))
+ .map(|_| Ok::<_, hyper::Error>(Response::new(Body::empty())))
+ }))
+ .await
+ .unwrap();
- rt.block_on(fut).unwrap();
t1.join().expect("client thread");
}
-#[test]
-fn disconnect_after_reading_request_before_responding() {
+#[tokio::test]
+async fn disconnect_after_reading_request_before_responding() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1052,28 +1006,22 @@ fn disconnect_after_reading_request_before_responding() {
tcp.write_all(b"GET / HTTP/1.1\r\n\r\n").unwrap();
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- Http::new()
- .http1_half_close(false)
- .serve_connection(socket, service_fn(|_| {
- tokio_timer::delay_for(Duration::from_secs(2))
- .map(|_| -> Result<Response<Body>, hyper::Error> {
- panic!("response future should have been dropped");
- })
- }))
- });
-
- rt.block_on(fut).expect_err("socket disconnected");
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .http1_half_close(false)
+ .serve_connection(socket, service_fn(|_| {
+ tokio::time::delay_for(Duration::from_secs(2))
+ .map(|_| -> Result<Response<Body>, hyper::Error> {
+ panic!("response future should have been dropped");
+ })
+ }))
+ .await
+ .expect_err("socket disconnected");
}
-#[test]
-fn returning_1xx_response_is_error() {
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+#[tokio::test]
+async fn returning_1xx_response_is_error() {
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1086,21 +1034,16 @@ fn returning_1xx_response_is_error() {
assert_eq!(s(&buf[..expected.len()]), expected);
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- Http::new()
- .serve_connection(socket, service_fn(|_| async move {
- Ok::<_, hyper::Error>(Response::builder()
- .status(StatusCode::CONTINUE)
- .body(Body::empty())
- .unwrap())
- }))
- });
-
- rt.block_on(fut).expect_err("1xx status code should error");
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, service_fn(|_| async move {
+ Ok::<_, hyper::Error>(Response::builder()
+ .status(StatusCode::CONTINUE)
+ .body(Body::empty())
+ .unwrap())
+ }))
+ .await
+ .expect_err("1xx status code should error");
}
#[test]
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1121,13 +1064,12 @@ fn header_name_too_long() {
assert!(s(&buf[..n]).starts_with("HTTP/1.1 431 Request Header Fields Too Large\r\n"));
}
-#[test]
-fn upgrades() {
+#[tokio::test]
+async fn upgrades() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx, rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1152,51 +1094,36 @@ fn upgrades() {
tcp.write_all(b"bar=foo").expect("write 2");
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- let conn = Http::new()
- .serve_connection(socket, service_fn(|_| {
- let res = Response::builder()
- .status(101)
- .header("upgrade", "foobar")
- .body(hyper::Body::empty())
- .unwrap();
- future::ready(Ok::<_, hyper::Error>(res))
- }));
-
- let mut conn_opt = Some(conn);
- future::poll_fn(move |ctx| {
- ready!(conn_opt.as_mut().unwrap().poll_without_shutdown(ctx)).unwrap();
- // conn is done with HTTP now
- Poll::Ready(Ok(conn_opt.take().unwrap()))
- })
- });
-
- let conn = rt.block_on(fut).unwrap();
+ let (socket, _) = listener.accept().await.unwrap();
+ let conn = Http::new()
+ .serve_connection(socket, service_fn(|_| {
+ let res = Response::builder()
+ .status(101)
+ .header("upgrade", "foobar")
+ .body(hyper::Body::empty())
+ .unwrap();
+ future::ready(Ok::<_, hyper::Error>(res))
+ }));
+
+ let parts = conn.without_shutdown().await.unwrap();
+ assert_eq!(parts.read_buf, "eagerly optimistic");
// wait so that we don't write until other side saw 101 response
- rt.block_on(rx).unwrap();
-
- let parts = conn.into_parts();
- assert_eq!(parts.read_buf, "eagerly optimistic");
+ rx.await.unwrap();
let mut io = parts.io;
- rt.block_on(io.write_all(b"foo=bar")).unwrap();
+ io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
- rt.block_on(io.read_to_end(&mut vec)).unwrap();
+ io.read_to_end(&mut vec).await.unwrap();
assert_eq!(vec, b"bar=foo");
}
-#[test]
-fn http_connect() {
+#[tokio::test]
+async fn http_connect() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (tx, rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1219,50 +1146,35 @@ fn http_connect() {
tcp.write_all(b"bar=foo").expect("write 2");
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- let conn = Http::new()
- .serve_connection(socket, service_fn(|_| {
- let res = Response::builder()
- .status(200)
- .body(hyper::Body::empty())
- .unwrap();
- future::ready(Ok::<_, hyper::Error>(res))
- }));
-
- let mut conn_opt = Some(conn);
- future::poll_fn(move |ctx| {
- ready!(conn_opt.as_mut().unwrap().poll_without_shutdown(ctx)).unwrap();
- // conn is done with HTTP now
- Poll::Ready(Ok(conn_opt.take().unwrap()))
- })
- });
-
- let conn = rt.block_on(fut).unwrap();
+ let (socket, _) = listener.accept().await.unwrap();
+ let conn = Http::new()
+ .serve_connection(socket, service_fn(|_| {
+ let res = Response::builder()
+ .status(200)
+ .body(hyper::Body::empty())
+ .unwrap();
+ future::ready(Ok::<_, hyper::Error>(res))
+ }));
+
+ let parts = conn.without_shutdown().await.unwrap();
+ assert_eq!(parts.read_buf, "eagerly optimistic");
// wait so that we don't write until other side saw 101 response
- rt.block_on(rx).unwrap();
-
- let parts = conn.into_parts();
- assert_eq!(parts.read_buf, "eagerly optimistic");
+ rx.await.unwrap();
let mut io = parts.io;
- rt.block_on(io.write_all(b"foo=bar")).unwrap();
+ io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
- rt.block_on(io.read_to_end(&mut vec)).unwrap();
+ io.read_to_end(&mut vec).await.unwrap();
assert_eq!(vec, b"bar=foo");
}
-#[test]
-fn upgrades_new() {
+#[tokio::test]
+async fn upgrades_new() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (read_101_tx, read_101_rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1300,38 +1212,35 @@ fn upgrades_new() {
.unwrap())
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- Http::new().serve_connection(socket, svc).with_upgrades()
- });
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .unwrap();
- rt.block_on(fut).unwrap();
let on_upgrade = upgrades_rx.recv().unwrap();
// wait so that we don't write until other side saw 101 response
- rt.block_on(read_101_rx).unwrap();
+ read_101_rx.await.unwrap();
- let upgraded = rt.block_on(on_upgrade).unwrap();
+ let upgraded = on_upgrade.await.expect("on_upgrade");
let parts = upgraded.downcast::<TkTcpStream>().unwrap();
assert_eq!(parts.read_buf, "eagerly optimistic");
let mut io = parts.io;
- rt.block_on(io.write_all(b"foo=bar")).unwrap();
+ io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
- rt.block_on(io.read_to_end(&mut vec)).unwrap();
+ io.read_to_end(&mut vec).await.unwrap();
assert_eq!(s(&vec), "bar=foo");
}
-#[test]
-fn http_connect_new() {
+#[tokio::test]
+async fn http_connect_new() {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
let (read_200_tx, read_200_rx) = oneshot::channel();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1366,38 +1275,33 @@ fn http_connect_new() {
.unwrap())
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- Http::new().serve_connection(socket, svc).with_upgrades()
- });
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, svc)
+ .with_upgrades()
+ .await
+ .unwrap();
- rt.block_on(fut).unwrap();
let on_upgrade = upgrades_rx.recv().unwrap();
// wait so that we don't write until other side saw 200
- rt.block_on(read_200_rx).unwrap();
+ read_200_rx.await.unwrap();
- let upgraded = rt.block_on(on_upgrade).unwrap();
+ let upgraded = on_upgrade.await.expect("on_upgrade");
let parts = upgraded.downcast::<TkTcpStream>().unwrap();
assert_eq!(parts.read_buf, "eagerly optimistic");
let mut io = parts.io;
- rt.block_on(io.write_all(b"foo=bar")).unwrap();
+ io.write_all(b"foo=bar").await.unwrap();
let mut vec = vec![];
- rt.block_on(io.read_to_end(&mut vec)).unwrap();
+ io.read_to_end(&mut vec).await.unwrap();
assert_eq!(s(&vec), "bar=foo");
}
-#[test]
-fn parse_errors_send_4xx_response() {
-
-
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+#[tokio::test]
+async fn parse_errors_send_4xx_response() {
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1410,21 +1314,16 @@ fn parse_errors_send_4xx_response() {
assert_eq!(s(&buf[..expected.len()]), expected);
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| Http::new().serve_connection(socket, HelloWorld));
-
- rt.block_on(fut).expect_err("HTTP parse error");
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, HelloWorld)
+ .await
+ .expect_err("HTTP parse error");
}
-#[test]
-fn illegal_request_length_returns_400_response() {
-
-
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+#[tokio::test]
+async fn illegal_request_length_returns_400_response() {
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
thread::spawn(move || {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1437,13 +1336,11 @@ fn illegal_request_length_returns_400_response() {
assert_eq!(s(&buf[..expected.len()]), expected);
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| Http::new().serve_connection(socket, HelloWorld));
-
- rt.block_on(fut).expect_err("illegal Content-Length should error");
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .serve_connection(socket, HelloWorld)
+ .await
+ .expect_err("illegal Content-Length should error");
}
#[test]
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1458,11 +1355,10 @@ fn max_buf_size_no_panic() {
Http::new().max_buf_size(MAX);
}
-#[test]
-fn max_buf_size() {
+#[tokio::test]
+async fn max_buf_size() {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().unwrap();
- let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let mut listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
const MAX: usize = 16_000;
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1478,17 +1374,12 @@ fn max_buf_size() {
assert_eq!(s(&buf[..expected.len()]), expected);
});
- let mut incoming = listener.incoming();
- let fut = incoming.next()
- .map(Option::unwrap)
- .map_err(|_| unreachable!())
- .and_then(|socket| {
- Http::new()
- .max_buf_size(MAX)
- .serve_connection(socket, HelloWorld)
- });
-
- rt.block_on(fut).expect_err("should TooLarge error");
+ let (socket, _) = listener.accept().await.unwrap();
+ Http::new()
+ .max_buf_size(MAX)
+ .serve_connection(socket, HelloWorld)
+ .await
+ .expect_err("should TooLarge error");
}
#[cfg(feature = "unstable-stream")]
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1576,8 +1467,8 @@ fn http1_only() {
}).unwrap_err();
}
-#[test]
-fn http2_service_error_sends_reset_reason() {
+#[tokio::test]
+async fn http2_service_error_sends_reset_reason() {
use std::error::Error;
let server = serve();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1587,22 +1478,20 @@ fn http2_service_error_sends_reset_reason() {
.reply()
.error(h2::Error::from(h2::Reason::INADEQUATE_SECURITY));
- let mut rt = Runtime::new().expect("runtime new");
-
- let err = rt.block_on({
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
- let uri = addr_str.parse().expect("server addr should parse");
-
- client.get(uri)
- }).unwrap_err();
+ let uri = addr_str.parse().expect("server addr should parse");
+ dbg!("start");
+ let err = dbg!(Client::builder()
+ .http2_only(true)
+ .build_http::<hyper::Body>()
+ .get(uri)
+ .await
+ .expect_err("client.get"));
let h2_err = err
.source()
- .unwrap()
+ .expect("err.source")
.downcast_ref::<h2::Error>()
- .unwrap();
+ .expect("downcast");
assert_eq!(h2_err.reason(), Some(h2::Reason::INADEQUATE_SECURITY));
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1648,9 +1537,9 @@ fn http2_body_user_error_sends_reset_reason() {
assert_eq!(h2_err.reason(), Some(h2::Reason::INADEQUATE_SECURITY));
}
-struct Svc;
+struct Http2ReadyErrorSvc;
-impl tower_service::Service<Request<Body>> for Svc {
+impl tower_service::Service<Request<Body>> for Http2ReadyErrorSvc {
type Response = Response<Body>;
type Error = h2::Error;
type Future = Box<dyn futures_core::Future<
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1666,31 +1555,30 @@ impl tower_service::Service<Request<Body>> for Svc {
}
}
-#[test]
-fn http2_service_poll_ready_error_sends_goaway() {
+#[tokio::test]
+#[ignore] // sometimes ECONNRESET wins the race
+async fn http2_service_poll_ready_error_sends_goaway() {
use std::error::Error;
let _ = pretty_env_logger::try_init();
let server = hyper::Server::bind(&([127, 0, 0, 1], 0).into())
.http2_only(true)
- .serve(make_service_fn(|_| async move { Ok::<_, BoxError>(Svc) }));
+ .serve(make_service_fn(|_| async move { Ok::<_, BoxError>(Http2ReadyErrorSvc) }));
let addr_str = format!("http://{}", server.local_addr());
- let mut rt = Runtime::new().expect("runtime new");
-
- rt.spawn(server
- .map_err(|e| unreachable!("server shouldn't error: {:?}", e))
- .map(|_| ()));
+ tokio::task::spawn(async move {
+ server.await.expect("server");
+ });
- let err = rt.block_on({
- let client = Client::builder()
- .http2_only(true)
- .build_http::<hyper::Body>();
- let uri = addr_str.parse().expect("server addr should parse");
- client.get(uri)
- }).unwrap_err();
+ let uri = addr_str.parse().expect("server addr should parse");
+ let err = dbg!(Client::builder()
+ .http2_only(true)
+ .build_http::<hyper::Body>()
+ .get(uri)
+ .await
+ .expect_err("client.get should fail"));
// client request should have gotten the specific GOAWAY error...
let h2_err = err
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1843,11 +1731,14 @@ impl<'a> Drop for ReplyBuilder<'a> {
impl Drop for Serve {
fn drop(&mut self) {
drop(self.shutdown_signal.take());
+ drop(self.thread.take());
+ /*
let r = self.thread.take().unwrap().join();
if let Err(ref e) = r {
println!("{:?}", e);
}
r.unwrap();
+ */
}
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1877,7 +1768,7 @@ enum Msg {
impl tower_service::Service<Request<Body>> for TestService {
type Response = Response<Body>;
type Error = BoxError;
- type Future = BoxFuture<'static, Result<Response<Body>, BoxError>>;
+ type Future = Pin<Box<dyn Future<Output = Result<Response<Body>, BoxError>> + Send>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1886,7 +1777,8 @@ impl tower_service::Service<Request<Body>> for TestService {
fn call(&mut self, mut req: Request<Body>) -> Self::Future {
let tx = self.tx.clone();
let replies = self.reply.clone();
- hyper::rt::spawn(async move {
+
+ Box::pin(async move {
while let Some(chunk) = req.body_mut().next().await {
match chunk {
Ok(chunk) => {
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1894,15 +1786,13 @@ impl tower_service::Service<Request<Body>> for TestService {
},
Err(err) => {
tx.send(Msg::Error(err)).unwrap();
- return;
+ return Err("req body error".into());
},
}
}
tx.send(Msg::End).unwrap();
- });
- Box::pin(async move {
TestService::build_reply(replies)
})
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1940,7 +1830,7 @@ struct HelloWorld;
impl tower_service::Service<Request<Body>> for HelloWorld {
type Response = Response<Body>;
type Error = hyper::Error;
- type Future = BoxFuture<'static, Result<Response<Body>, Self::Error>>;
+ type Future = future::Ready<Result<Response<Body>, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1948,7 +1838,7 @@ impl tower_service::Service<Request<Body>> for HelloWorld {
fn call(&mut self, _req: Request<Body>) -> Self::Future {
let response = Response::new(HELLO.into());
- future::ok(response).boxed()
+ future::ok(response)
}
}
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2021,34 +1911,39 @@ impl ServeOptions {
let thread = thread::Builder::new()
.name(thread_name)
.spawn(move || {
- let service = make_service_fn(|_| {
- let msg_tx = msg_tx.clone();
- let reply_rx = reply_rx.clone();
- future::ok::<_, BoxError>(TestService {
- tx: msg_tx.clone(),
- reply: reply_rx.clone(),
- })
- });
-
- let server = Server::bind(&addr)
- .http1_only(options.http1_only)
- .http1_keepalive(options.keep_alive)
- .http1_pipeline_flush(options.pipeline)
- .serve(service);
-
- addr_tx.send(
- server.local_addr()
- ).expect("server addr tx");
-
- let fut = server
- .with_graceful_shutdown(async {
- shutdown_rx.await.ok();
+ let mut rt = tokio::runtime::Builder::new()
+ .enable_io()
+ .enable_time()
+ .basic_scheduler()
+ .build()
+ .expect("rt new");
+
+ rt.block_on(async move {
+ let service = make_service_fn(|_| {
+ let msg_tx = msg_tx.clone();
+ let reply_rx = reply_rx.clone();
+ future::ok::<_, BoxError>(TestService {
+ tx: msg_tx.clone(),
+ reply: reply_rx.clone(),
+ })
});
- let mut rt = Runtime::new().expect("rt new");
- rt
- .block_on(fut)
- .unwrap();
+ let server = Server::bind(&addr)
+ .http1_only(options.http1_only)
+ .http1_keepalive(options.keep_alive)
+ .http1_pipeline_flush(options.pipeline)
+ .serve(service);
+
+ addr_tx.send(
+ server.local_addr()
+ ).expect("server addr tx");
+
+ server
+ .with_graceful_shutdown(async {
+ let _ = shutdown_rx.await;
+ })
+ .await
+ }).expect("serve()");
})
.expect("thread spawn");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2076,7 +1971,7 @@ fn has_header(msg: &str, name: &str) -> bool {
fn tcp_bind(addr: &SocketAddr) -> ::tokio::io::Result<TcpListener> {
let std_listener = StdTcpListener::bind(addr).unwrap();
- TcpListener::from_std(std_listener, &Handle::default())
+ TcpListener::from_std(std_listener)
}
fn read_until<R, F>(io: &mut R, func: F) -> io::Result<Vec<u8>>
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -9,9 +9,7 @@ use hyper::service::{make_service_fn, service_fn};
pub use std::net::SocketAddr;
pub use futures_util::{future, FutureExt as _, StreamExt as _, TryFutureExt as _, TryStreamExt as _};
-//pub use self::futures_channel::oneshot;
pub use hyper::{HeaderMap, StatusCode};
-pub use tokio::runtime::current_thread::Runtime;
macro_rules! t {
(
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -303,8 +301,16 @@ pub struct __TestConfig {
pub fn __run_test(cfg: __TestConfig) {
let _ = pretty_env_logger::try_init();
- let mut rt = Runtime::new().expect("new rt");
+ tokio::runtime::Builder::new()
+ .enable_io()
+ .enable_time()
+ .basic_scheduler()
+ .build()
+ .expect("new rt")
+ .block_on(async_test(cfg));
+}
+async fn async_test(cfg: __TestConfig) {
assert_eq!(cfg.client_version, cfg.server_version);
let version = if cfg.client_version == 2 {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -348,8 +354,7 @@ pub fn __run_test(cfg: __TestConfig) {
func(&req.headers());
}
let sbody = sreq.body;
- req.into_body()
- .try_concat()
+ concat(req.into_body())
.map_ok(move |body| {
assert_eq!(body.as_ref(), sbody.as_slice(), "client body");
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -369,7 +374,7 @@ pub fn __run_test(cfg: __TestConfig) {
let mut addr = server.local_addr();
- rt.spawn(server.map(|result| {
+ tokio::task::spawn(server.map(|result| {
let _ = result.expect("server error");
}));
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -379,7 +384,7 @@ pub fn __run_test(cfg: __TestConfig) {
dst: addr,
version: cfg.server_version,
});
- rt.spawn(proxy);
+ tokio::task::spawn(proxy);
addr = proxy_addr;
}
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -403,7 +408,7 @@ pub fn __run_test(cfg: __TestConfig) {
for func in &cheaders {
func(&res.headers());
}
- res.into_body().try_concat()
+ concat(res.into_body())
})
.map_ok(move |body| {
assert_eq!(body.as_ref(), cbody.as_slice(), "server body");
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -435,7 +440,7 @@ pub fn __run_test(cfg: __TestConfig) {
Box::pin(client_futures.map(|_| ()))
};
- rt.block_on(client_futures);
+ client_futures.await;
}
struct ProxyConfig {
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -470,3 +475,11 @@ fn naive_proxy(cfg: ProxyConfig) -> (SocketAddr, impl Future<Output = ()>) {
let proxy_addr = srv.local_addr();
(proxy_addr, srv.map(|res| res.expect("proxy error")))
}
+
+async fn concat(mut body: Body) -> Result<hyper::Chunk, hyper::Error> {
+ let mut vec = Vec::new();
+ while let Some(chunk) = body.next().await {
+ vec.extend_from_slice(&chunk?);
+ }
+ Ok(vec.into())
+}
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1984"
] |
0.2
|
19a7aab51f4b70ef1561ecd912c988d9ddfc7532
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -21,18 +21,16 @@
//! Ok::<_, Infallible>(iter::once(IpAddr::from([127, 0, 0, 1])))
//! });
//! ```
-use std::{fmt, io, vec};
use std::error::Error;
-use std::net::{
- IpAddr, Ipv4Addr, Ipv6Addr,
- SocketAddr, ToSocketAddrs,
- SocketAddrV4, SocketAddrV6,
-};
+use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
use std::str::FromStr;
+use std::task::{self, Poll};
+use std::pin::Pin;
+use std::future::Future;
+use std::{fmt, io, vec};
use tokio::task::JoinHandle;
use tower_service::Service;
-use crate::common::{Future, Pin, Poll, task};
pub(super) use self::sealed::Resolve;
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -60,9 +58,7 @@ pub struct GaiFuture {
impl Name {
pub(super) fn new(host: String) -> Name {
- Name {
- host,
- }
+ Name { host }
}
/// View the hostname as a string slice.
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -104,13 +100,10 @@ impl fmt::Display for InvalidNameError {
impl Error for InvalidNameError {}
-
impl GaiResolver {
/// Construct a new `GaiResolver`.
pub fn new() -> Self {
- GaiResolver {
- _priv: (),
- }
+ GaiResolver { _priv: () }
}
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -126,13 +119,12 @@ impl Service<Name> for GaiResolver {
fn call(&mut self, name: Name) -> Self::Future {
let blocking = tokio::task::spawn_blocking(move || {
debug!("resolving host={:?}", name.host);
- (&*name.host, 0).to_socket_addrs()
+ (&*name.host, 0)
+ .to_socket_addrs()
.map(|i| IpAddrs { iter: i })
});
- GaiFuture {
- inner: blocking,
- }
+ GaiFuture { inner: blocking }
}
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -180,37 +172,46 @@ pub(super) struct IpAddrs {
impl IpAddrs {
pub(super) fn new(addrs: Vec<SocketAddr>) -> Self {
- IpAddrs { iter: addrs.into_iter() }
+ IpAddrs {
+ iter: addrs.into_iter(),
+ }
}
pub(super) fn try_parse(host: &str, port: u16) -> Option<IpAddrs> {
if let Ok(addr) = host.parse::<Ipv4Addr>() {
let addr = SocketAddrV4::new(addr, port);
- return Some(IpAddrs { iter: vec![SocketAddr::V4(addr)].into_iter() })
+ return Some(IpAddrs {
+ iter: vec![SocketAddr::V4(addr)].into_iter(),
+ });
}
let host = host.trim_start_matches('[').trim_end_matches(']');
if let Ok(addr) = host.parse::<Ipv6Addr>() {
let addr = SocketAddrV6::new(addr, port, 0, 0);
- return Some(IpAddrs { iter: vec![SocketAddr::V6(addr)].into_iter() })
+ return Some(IpAddrs {
+ iter: vec![SocketAddr::V6(addr)].into_iter(),
+ });
}
None
}
pub(super) fn split_by_preference(self, local_addr: Option<IpAddr>) -> (IpAddrs, IpAddrs) {
if let Some(local_addr) = local_addr {
- let preferred = self.iter
+ let preferred = self
+ .iter
.filter(|addr| addr.is_ipv6() == local_addr.is_ipv6())
.collect();
(IpAddrs::new(preferred), IpAddrs::new(vec![]))
} else {
- let preferring_v6 = self.iter
+ let preferring_v6 = self
+ .iter
.as_slice()
.first()
.map(SocketAddr::is_ipv6)
.unwrap_or(false);
- let (preferred, fallback) = self.iter
+ let (preferred, fallback) = self
+ .iter
.partition::<Vec<_>, _>(|addr| addr.is_ipv6() == preferring_v6);
(IpAddrs::new(preferred), IpAddrs::new(fallback))
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -281,8 +282,15 @@ impl Future for TokioThreadpoolGaiFuture {
type Output = Result<GaiAddrs, io::Error>;
fn poll(self: Pin<&mut Self>, _cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match ready!(tokio_executor::threadpool::blocking(|| (self.name.as_str(), 0).to_socket_addrs())) {
- Ok(Ok(iter)) => Poll::Ready(Ok(GaiAddrs { inner: IpAddrs { iter } })),
+ match ready!(tokio_executor::threadpool::blocking(|| (
+ self.name.as_str(),
+ 0
+ )
+ .to_socket_addrs()))
+ {
+ Ok(Ok(iter)) => Poll::Ready(Ok(GaiAddrs {
+ inner: IpAddrs { iter },
+ })),
Ok(Err(e)) => Poll::Ready(Err(e)),
// a BlockingError, meaning not on a tokio_executor::threadpool :(
Err(e) => Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, e))),
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -292,15 +300,15 @@ impl Future for TokioThreadpoolGaiFuture {
*/
mod sealed {
- use tower_service::Service;
- use crate::common::{Future, Poll, task};
use super::{IpAddr, Name};
+ use crate::common::{task, Future, Poll};
+ use tower_service::Service;
// "Trait alias" for `Service<Name, Response = Addrs>`
pub trait Resolve {
- type Addrs: Iterator<Item=IpAddr>;
+ type Addrs: Iterator<Item = IpAddr>;
type Error: Into<Box<dyn std::error::Error + Send + Sync>>;
- type Future: Future<Output=Result<Self::Addrs, Self::Error>>;
+ type Future: Future<Output = Result<Self::Addrs, Self::Error>>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
fn resolve(&mut self, name: Name) -> Self::Future;
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -309,7 +317,7 @@ mod sealed {
impl<S> Resolve for S
where
S: Service<Name>,
- S::Response: Iterator<Item=IpAddr>,
+ S::Response: Iterator<Item = IpAddr>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
type Addrs = S::Response;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -1,26 +1,23 @@
-use std::fmt;
use std::error::Error as StdError;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{self, Poll};
+use std::fmt;
use std::io;
-use std::mem;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
+use futures_util::future::Either;
use http::uri::{Scheme, Uri};
-use futures_util::{TryFutureExt};
use net2::TcpBuilder;
-use pin_project::{pin_project, project};
use tokio::net::TcpStream;
use tokio::time::Delay;
-use crate::common::{Future, Pin, Poll, task};
+use super::dns::{self, resolve, GaiResolver, Resolve};
use super::{Connected, Destination};
-use super::dns::{self, GaiResolver, Resolve};
//#[cfg(feature = "runtime")] use super::dns::TokioThreadpoolGaiResolver;
-// TODO: unbox me?
-type ConnectFuture = Pin<Box<dyn Future<Output = io::Result<TcpStream>> + Send>>;
-
/// A connector for the `http` scheme.
///
/// Performs DNS resolution in a thread pool, and then connects over TCP.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -102,7 +99,6 @@ impl HttpConnector<TokioThreadpoolGaiResolver> {
}
*/
-
impl<R> HttpConnector<R> {
/// Construct a new HttpConnector.
///
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -223,35 +219,22 @@ static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http";
static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing";
static INVALID_MISSING_HOST: &str = "invalid URL, host is missing";
-impl<R: Resolve> HttpConnector<R> {
- fn invalid_url(&self, msg: impl Into<Box<str>>) -> HttpConnecting<R> {
- HttpConnecting {
- config: self.config.clone(),
- state: State::Error(Some(ConnectError {
- msg: msg.into(),
- cause: None,
- })),
- port: 0,
- }
- }
-}
-
// R: Debug required for now to allow adding it to debug output later...
impl<R: fmt::Debug> fmt::Debug for HttpConnector<R> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_struct("HttpConnector")
- .finish()
+ f.debug_struct("HttpConnector").finish()
}
}
impl<R> tower_service::Service<Destination> for HttpConnector<R>
where
- R: Resolve + Clone + Send + Sync,
+ R: Resolve + Clone + Send + Sync + 'static,
R::Future: Send,
{
type Response = (TcpStream, Connected);
type Error = ConnectError;
- type Future = HttpConnecting<R>;
+ type Future =
+ Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -259,6 +242,19 @@ where
}
fn call(&mut self, dst: Destination) -> Self::Future {
+ let mut self_ = self.clone();
+ Box::pin(async move { self_.call_async(dst).await })
+ }
+}
+
+impl<R> HttpConnector<R>
+where
+ R: Resolve,
+{
+ async fn call_async(
+ &mut self,
+ dst: Destination,
+ ) -> Result<(TcpStream, Connected), ConnectError> {
trace!(
"Http::connect; scheme={}, host={}, port={:?}",
dst.scheme(),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -268,26 +264,85 @@ where
if self.config.enforce_http {
if dst.uri.scheme() != Some(&Scheme::HTTP) {
- return self.invalid_url(INVALID_NOT_HTTP);
+ return Err(ConnectError {
+ msg: INVALID_NOT_HTTP.into(),
+ cause: None,
+ });
}
} else if dst.uri.scheme().is_none() {
- return self.invalid_url(INVALID_MISSING_SCHEME);
+ return Err(ConnectError {
+ msg: INVALID_MISSING_SCHEME.into(),
+ cause: None,
+ });
}
let host = match dst.uri.host() {
Some(s) => s,
- None => return self.invalid_url(INVALID_MISSING_HOST),
+ None => {
+ return Err(ConnectError {
+ msg: INVALID_MISSING_HOST.into(),
+ cause: None,
+ })
+ }
};
let port = match dst.uri.port() {
Some(port) => port.as_u16(),
None => if dst.uri.scheme() == Some(&Scheme::HTTPS) { 443 } else { 80 },
};
- HttpConnecting {
- config: self.config.clone(),
- state: State::Lazy(self.resolver.clone(), host.into()),
- port,
+ let config = &self.config;
+
+ // If the host is already an IP addr (v4 or v6),
+ // skip resolving the dns and start connecting right away.
+ let addrs = if let Some(addrs) = dns::IpAddrs::try_parse(host, port) {
+ addrs
+ } else {
+ let addrs = resolve(&mut self.resolver, dns::Name::new(host.into()))
+ .await
+ .map_err(ConnectError::dns)?;
+ let addrs = addrs.map(|addr| SocketAddr::new(addr, port)).collect();
+ dns::IpAddrs::new(addrs)
+ };
+
+ let c = ConnectingTcp::new(
+ config.local_address,
+ addrs,
+ config.connect_timeout,
+ config.happy_eyeballs_timeout,
+ config.reuse_address,
+ );
+
+ let sock = c
+ .connect()
+ .await
+ .map_err(ConnectError::m("tcp connect error"))?;
+
+ if let Some(dur) = config.keep_alive_timeout {
+ sock.set_keepalive(Some(dur))
+ .map_err(ConnectError::m("tcp set_keepalive error"))?;
+ }
+
+ if let Some(size) = config.send_buffer_size {
+ sock.set_send_buffer_size(size)
+ .map_err(ConnectError::m("tcp set_send_buffer_size error"))?;
+ }
+
+ if let Some(size) = config.recv_buffer_size {
+ sock.set_recv_buffer_size(size)
+ .map_err(ConnectError::m("tcp set_recv_buffer_size error"))?;
}
+
+ sock.set_nodelay(config.nodelay)
+ .map_err(ConnectError::m("tcp set_nodelay error"))?;
+
+ let extra = HttpInfo {
+ remote_addr: sock
+ .peer_addr()
+ .map_err(ConnectError::m("tcp peer_addr error"))?,
+ };
+ let connected = Connected::new().extra(extra);
+
+ Ok((sock, connected))
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -298,14 +353,16 @@ where
{
type Response = TcpStream;
type Error = ConnectError;
- type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
+ type Future =
+ Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
tower_service::Service::<Destination>::poll_ready(self, cx)
}
fn call(&mut self, uri: Uri) -> Self::Future {
- Box::pin(self.call(Destination { uri }).map_ok(|(s, _)| s))
+ let mut self_ = self.clone();
+ Box::pin(async move { self_.call_async(Destination { uri }).await.map(|(s, _)| s) })
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -346,9 +403,7 @@ impl ConnectError {
S: Into<Box<str>>,
E: Into<Box<dyn StdError + Send + Sync>>,
{
- move |cause| {
- ConnectError::new(msg, cause)
- }
+ move |cause| ConnectError::new(msg, cause)
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -383,96 +438,6 @@ impl StdError for ConnectError {
}
}
-/// A Future representing work to connect to a URL.
-#[must_use = "futures do nothing unless polled"]
-#[pin_project]
-pub struct HttpConnecting<R: Resolve = GaiResolver> {
- config: Arc<Config>,
- #[pin]
- state: State<R>,
- port: u16,
-}
-
-#[pin_project]
-enum State<R: Resolve> {
- Lazy(R, String),
- Resolving(#[pin] R::Future),
- Connecting(ConnectingTcp),
- Error(Option<ConnectError>),
-}
-
-impl<R: Resolve> Future for HttpConnecting<R> {
- type Output = Result<(TcpStream, Connected), ConnectError>;
-
- #[project]
- fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let mut me = self.project();
- let config: &Config = &me.config;
- loop {
- let state;
- #[project]
- match me.state.as_mut().project() {
- State::Lazy(ref mut resolver, ref mut host) => {
- // If the host is already an IP addr (v4 or v6),
- // skip resolving the dns and start connecting right away.
- if let Some(addrs) = dns::IpAddrs::try_parse(host, *me.port) {
- state = State::Connecting(ConnectingTcp::new(
- config.local_address, addrs, config.connect_timeout, config.happy_eyeballs_timeout, config.reuse_address));
- } else {
- ready!(resolver.poll_ready(cx)).map_err(ConnectError::dns)?;
- let name = dns::Name::new(mem::replace(host, String::new()));
- state = State::Resolving(resolver.resolve(name));
- }
- },
- State::Resolving(future) => {
- let addrs = ready!(future.poll(cx)).map_err(ConnectError::dns)?;
- let port = *me.port;
- let addrs = addrs
- .map(|addr| SocketAddr::new(addr, port))
- .collect();
- let addrs = dns::IpAddrs::new(addrs);
- state = State::Connecting(ConnectingTcp::new(
- config.local_address, addrs, config.connect_timeout, config.happy_eyeballs_timeout, config.reuse_address));
- },
- State::Connecting(ref mut c) => {
- let sock = ready!(c.poll(cx))
- .map_err(ConnectError::m("tcp connect error"))?;
-
- if let Some(dur) = config.keep_alive_timeout {
- sock.set_keepalive(Some(dur)).map_err(ConnectError::m("tcp set_keepalive error"))?;
- }
-
- if let Some(size) = config.send_buffer_size {
- sock.set_send_buffer_size(size).map_err(ConnectError::m("tcp set_send_buffer_size error"))?;
- }
-
- if let Some(size) = config.recv_buffer_size {
- sock.set_recv_buffer_size(size).map_err(ConnectError::m("tcp set_recv_buffer_size error"))?;
- }
-
- sock.set_nodelay(config.nodelay).map_err(ConnectError::m("tcp set_nodelay error"))?;
-
- let extra = HttpInfo {
- remote_addr: sock.peer_addr().map_err(ConnectError::m("tcp peer_addr error"))?,
- };
- let connected = Connected::new()
- .extra(extra);
-
- return Poll::Ready(Ok((sock, connected)));
- },
- State::Error(ref mut e) => return Poll::Ready(Err(e.take().expect("polled more than once"))),
- }
- me.state.set(state);
- }
- }
-}
-
-impl<R: Resolve + fmt::Debug> fmt::Debug for HttpConnecting<R> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("HttpConnecting")
- }
-}
-
struct ConnectingTcp {
local_addr: Option<IpAddr>,
preferred: ConnectingTcpRemote,
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -527,7 +492,6 @@ struct ConnectingTcpFallback {
struct ConnectingTcpRemote {
addrs: dns::IpAddrs,
connect_timeout: Option<Duration>,
- current: Option<ConnectFuture>,
}
impl ConnectingTcpRemote {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -537,45 +501,39 @@ impl ConnectingTcpRemote {
Self {
addrs,
connect_timeout,
- current: None,
}
}
}
impl ConnectingTcpRemote {
- fn poll(
+ async fn connect(
&mut self,
- cx: &mut task::Context<'_>,
local_addr: &Option<IpAddr>,
reuse_address: bool,
- ) -> Poll<io::Result<TcpStream>> {
+ ) -> io::Result<TcpStream> {
let mut err = None;
- loop {
- if let Some(ref mut current) = self.current {
- match current.as_mut().poll(cx) {
- Poll::Ready(Ok(tcp)) => {
- debug!("connected to {:?}", tcp.peer_addr().ok());
- return Poll::Ready(Ok(tcp));
- },
- Poll::Pending => return Poll::Pending,
- Poll::Ready(Err(e)) => {
- trace!("connect error {:?}", e);
- err = Some(e);
- if let Some(addr) = self.addrs.next() {
- debug!("connecting to {}", addr);
- *current = connect(&addr, local_addr, reuse_address, self.connect_timeout)?;
- continue;
- }
- }
+ for addr in &mut self.addrs {
+ debug!("connecting to {}", addr);
+ match connect(
+ &addr,
+ local_addr,
+ reuse_address,
+ self.connect_timeout,
+ )?
+ .await
+ {
+ Ok(tcp) => {
+ debug!("connected to {:?}", tcp.peer_addr().ok());
+ return Ok(tcp);
+ }
+ Err(e) => {
+ trace!("connect error {:?}", e);
+ err = Some(e);
}
- } else if let Some(addr) = self.addrs.next() {
- debug!("connecting to {}", addr);
- self.current = Some(connect(&addr, local_addr, reuse_address, self.connect_timeout)?);
- continue;
}
-
- return Poll::Ready(Err(err.take().expect("missing connect error")));
}
+
+ return Err(err.take().expect("missing connect error"));
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -584,7 +542,7 @@ fn connect(
local_addr: &Option<IpAddr>,
reuse_address: bool,
connect_timeout: Option<Duration>,
-) -> io::Result<ConnectFuture> {
+) -> io::Result<impl Future<Output = io::Result<TcpStream>>> {
let builder = match addr {
&SocketAddr::V4(_) => TcpBuilder::new_v4()?,
&SocketAddr::V6(_) => TcpBuilder::new_v6()?,
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -600,12 +558,8 @@ fn connect(
} else if cfg!(windows) {
// Windows requires a socket be bound before calling connect
let any: SocketAddr = match addr {
- &SocketAddr::V4(_) => {
- ([0, 0, 0, 0], 0).into()
- },
- &SocketAddr::V6(_) => {
- ([0, 0, 0, 0, 0, 0, 0, 0], 0).into()
- }
+ &SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(),
+ &SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(),
};
builder.bind(any)?;
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -614,56 +568,58 @@ fn connect(
let std_tcp = builder.to_tcp_stream()?;
- Ok(Box::pin(async move {
+ Ok(async move {
let connect = TcpStream::connect_std(std_tcp, &addr);
match connect_timeout {
Some(dur) => match tokio::time::timeout(dur, connect).await {
Ok(Ok(s)) => Ok(s),
Ok(Err(e)) => Err(e),
Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)),
- }
+ },
None => connect.await,
}
- }))
+ })
}
impl ConnectingTcp {
- fn poll(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<TcpStream>> {
- match self.fallback.take() {
- None => self.preferred.poll(cx, &self.local_addr, self.reuse_address),
- Some(mut fallback) => match self.preferred.poll(cx, &self.local_addr, self.reuse_address) {
- Poll::Ready(Ok(stream)) => {
- // Preferred successful - drop fallback.
- Poll::Ready(Ok(stream))
- }
- Poll::Pending => match Pin::new(&mut fallback.delay).poll(cx) {
- Poll::Ready(()) => match fallback.remote.poll(cx, &self.local_addr, self.reuse_address) {
- Poll::Ready(Ok(stream)) => {
- // Fallback successful - drop current preferred,
- // but keep fallback as new preferred.
- self.preferred = fallback.remote;
- Poll::Ready(Ok(stream))
- }
- Poll::Pending => {
- // Neither preferred nor fallback are ready.
- self.fallback = Some(fallback);
- Poll::Pending
+ async fn connect(mut self) -> io::Result<TcpStream> {
+ let Self {
+ ref local_addr,
+ reuse_address,
+ ..
+ } = self;
+ match self.fallback {
+ None => {
+ self.preferred
+ .connect(local_addr, reuse_address)
+ .await
+ }
+ Some(mut fallback) => {
+ let preferred_fut = self.preferred.connect(local_addr, reuse_address);
+ futures_util::pin_mut!(preferred_fut);
+
+ let fallback_fut = fallback.remote.connect(local_addr, reuse_address);
+ futures_util::pin_mut!(fallback_fut);
+
+ let (result, future) =
+ match futures_util::future::select(preferred_fut, fallback.delay).await {
+ Either::Left((result, _fallback_delay)) => {
+ (result, Either::Right(fallback_fut))
}
- Poll::Ready(Err(_)) => {
- // Fallback failed - resume with preferred only.
- Poll::Pending
+ Either::Right(((), preferred_fut)) => {
+ // Delay is done, start polling both the preferred and the fallback
+ futures_util::future::select(preferred_fut, fallback_fut)
+ .await
+ .factor_first()
}
- },
- Poll::Pending => {
- // Too early to attempt fallback.
- self.fallback = Some(fallback);
- Poll::Pending
- }
- }
- Poll::Ready(Err(_)) => {
- // Preferred failed - use fallback as new preferred.
- self.preferred = fallback.remote;
- self.preferred.poll(cx, &self.local_addr, self.reuse_address)
+ };
+
+ if let Err(_) = result {
+ // Fallback to the remaining future (could be preferred or fallback)
+ // if we get an error
+ future.await
+ } else {
+ result
}
}
}
|
2019-11-19T17:39:03Z
| 2,019
|
Rewrite HttpConnector internals with async/await
The internal code handling the `HttpConnector` is messy from manual future implementations. I believe it could be much cleaner if written with `async fn` and `await`.
|
hyperium__hyper-2019
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -326,33 +334,49 @@ mod sealed {
}
}
+pub(crate) async fn resolve<R>(resolver: &mut R, name: Name) -> Result<R::Addrs, R::Error>
+where
+ R: Resolve,
+{
+ futures_util::future::poll_fn(|cx| resolver.poll_ready(cx)).await?;
+ resolver.resolve(name).await
+}
+
#[cfg(test)]
mod tests {
- use std::net::{Ipv4Addr, Ipv6Addr};
use super::*;
+ use std::net::{Ipv4Addr, Ipv6Addr};
#[test]
fn test_ip_addrs_split_by_preference() {
let v4_addr = (Ipv4Addr::new(127, 0, 0, 1), 80).into();
let v6_addr = (Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1), 80).into();
- let (mut preferred, mut fallback) =
- IpAddrs { iter: vec![v4_addr, v6_addr].into_iter() }.split_by_preference(None);
+ let (mut preferred, mut fallback) = IpAddrs {
+ iter: vec![v4_addr, v6_addr].into_iter(),
+ }
+ .split_by_preference(None);
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.next().unwrap().is_ipv6());
- let (mut preferred, mut fallback) =
- IpAddrs { iter: vec![v6_addr, v4_addr].into_iter() }.split_by_preference(None);
+ let (mut preferred, mut fallback) = IpAddrs {
+ iter: vec![v6_addr, v4_addr].into_iter(),
+ }
+ .split_by_preference(None);
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
- let (mut preferred, fallback) =
- IpAddrs { iter: vec![v4_addr, v6_addr].into_iter() }.split_by_preference(Some(v4_addr.ip()));
+ let (mut preferred, fallback) = IpAddrs {
+ iter: vec![v4_addr, v6_addr].into_iter(),
+ }
+ .split_by_preference(Some(v4_addr.ip()));
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.is_empty());
- let (mut preferred, fallback) =
- IpAddrs { iter: vec![v4_addr, v6_addr].into_iter() }.split_by_preference(Some(v6_addr.ip()));
+ let (mut preferred, fallback) = IpAddrs {
+ iter: vec![v4_addr, v6_addr].into_iter(),
+ }
+ .split_by_preference(Some(v6_addr.ip()));
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.is_empty());
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -370,10 +394,8 @@ mod tests {
let uri = ::http::Uri::from_static("http://[::1]:8080/");
let dst = super::super::Destination { uri };
- let mut addrs = IpAddrs::try_parse(
- dst.host(),
- dst.port().expect("port")
- ).expect("try_parse");
+ let mut addrs =
+ IpAddrs::try_parse(dst.host(), dst.port().expect("port")).expect("try_parse");
let expected = "[::1]:8080".parse::<SocketAddr>().expect("expected");
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -674,10 +630,13 @@ impl ConnectingTcp {
mod tests {
use std::io;
- use super::{Connected, Destination, HttpConnector};
use super::super::sealed::Connect;
+ use super::{Connected, Destination, HttpConnector};
- async fn connect<C>(connector: C, dst: Destination) -> Result<(C::Transport, Connected), C::Error>
+ async fn connect<C>(
+ connector: C,
+ dst: Destination,
+ ) -> Result<(C::Transport, Connected), C::Error>
where
C: Connect,
{
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -687,9 +646,7 @@ mod tests {
#[tokio::test]
async fn test_errors_enforce_http() {
let uri = "https://example.domain/foo/bar?baz".parse().unwrap();
- let dst = Destination {
- uri,
- };
+ let dst = Destination { uri };
let connector = HttpConnector::new();
let err = connect(connector, dst).await.unwrap_err();
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -699,9 +656,7 @@ mod tests {
#[tokio::test]
async fn test_errors_missing_scheme() {
let uri = "example.domain".parse().unwrap();
- let dst = Destination {
- uri,
- };
+ let dst = Destination { uri };
let mut connector = HttpConnector::new();
connector.enforce_http(false);
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -712,12 +667,9 @@ mod tests {
#[test]
#[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)]
fn client_happy_eyeballs() {
- use std::future::Future;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener};
- use std::task::Poll;
use std::time::{Duration, Instant};
- use crate::common::{Pin, task};
use super::dns;
use super::ConnectingTcp;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -740,40 +692,81 @@ mod tests {
let scenarios = &[
// Fast primary, without fallback.
- (&[local_ipv4_addr()][..],
- 4, local_timeout, false),
- (&[local_ipv6_addr()][..],
- 6, local_timeout, false),
-
+ (&[local_ipv4_addr()][..], 4, local_timeout, false),
+ (&[local_ipv6_addr()][..], 6, local_timeout, false),
// Fast primary, with (unused) fallback.
- (&[local_ipv4_addr(), local_ipv6_addr()][..],
- 4, local_timeout, false),
- (&[local_ipv6_addr(), local_ipv4_addr()][..],
- 6, local_timeout, false),
-
+ (
+ &[local_ipv4_addr(), local_ipv6_addr()][..],
+ 4,
+ local_timeout,
+ false,
+ ),
+ (
+ &[local_ipv6_addr(), local_ipv4_addr()][..],
+ 6,
+ local_timeout,
+ false,
+ ),
// Unreachable + fast primary, without fallback.
- (&[unreachable_ipv4_addr(), local_ipv4_addr()][..],
- 4, unreachable_v4_timeout, false),
- (&[unreachable_ipv6_addr(), local_ipv6_addr()][..],
- 6, unreachable_v6_timeout, false),
-
+ (
+ &[unreachable_ipv4_addr(), local_ipv4_addr()][..],
+ 4,
+ unreachable_v4_timeout,
+ false,
+ ),
+ (
+ &[unreachable_ipv6_addr(), local_ipv6_addr()][..],
+ 6,
+ unreachable_v6_timeout,
+ false,
+ ),
// Unreachable + fast primary, with (unused) fallback.
- (&[unreachable_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..],
- 4, unreachable_v4_timeout, false),
- (&[unreachable_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..],
- 6, unreachable_v6_timeout, true),
-
+ (
+ &[
+ unreachable_ipv4_addr(),
+ local_ipv4_addr(),
+ local_ipv6_addr(),
+ ][..],
+ 4,
+ unreachable_v4_timeout,
+ false,
+ ),
+ (
+ &[
+ unreachable_ipv6_addr(),
+ local_ipv6_addr(),
+ local_ipv4_addr(),
+ ][..],
+ 6,
+ unreachable_v6_timeout,
+ true,
+ ),
// Slow primary, with (used) fallback.
- (&[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..],
- 6, fallback_timeout, false),
- (&[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..],
- 4, fallback_timeout, true),
-
+ (
+ &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..],
+ 6,
+ fallback_timeout,
+ false,
+ ),
+ (
+ &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..],
+ 4,
+ fallback_timeout,
+ true,
+ ),
// Slow primary, with (used) unreachable + fast fallback.
- (&[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..],
- 6, fallback_timeout + unreachable_v6_timeout, false),
- (&[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..],
- 4, fallback_timeout + unreachable_v4_timeout, true),
+ (
+ &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..],
+ 6,
+ fallback_timeout + unreachable_v6_timeout,
+ false,
+ ),
+ (
+ &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..],
+ 4,
+ fallback_timeout + unreachable_v4_timeout,
+ true,
+ ),
];
// Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -785,14 +778,30 @@ mod tests {
continue;
}
- let addrs = hosts.iter().map(|host| (host.clone(), addr.port()).into()).collect();
- let (res, duration) = rt.block_on(async move {
- let connecting_tcp = ConnectingTcp::new(None, dns::IpAddrs::new(addrs), None, Some(fallback_timeout), false);
- let fut = ConnectingTcpFuture(connecting_tcp);
- let start = Instant::now();
- let res = fut.await.unwrap();
- (res, start.elapsed())
- });
+
+ let (start, stream) = rt
+ .block_on(async move {
+ let addrs = hosts
+ .iter()
+ .map(|host| (host.clone(), addr.port()).into())
+ .collect();
+ let connecting_tcp = ConnectingTcp::new(
+ None,
+ dns::IpAddrs::new(addrs),
+ None,
+ Some(fallback_timeout),
+ false,
+ );
+ let start = Instant::now();
+ Ok::<_, io::Error>((start, connecting_tcp.connect().await?))
+ })
+ .unwrap();
+ let res = if stream.peer_addr().unwrap().is_ipv4() {
+ 4
+ } else {
+ 6
+ };
+ let duration = start.elapsed();
// Allow actual duration to be +/- 150ms off.
let min_duration = if timeout >= Duration::from_millis(150) {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -807,22 +816,6 @@ mod tests {
assert!(duration <= max_duration);
}
- struct ConnectingTcpFuture(ConnectingTcp);
-
- impl Future for ConnectingTcpFuture {
- type Output = Result<u8, std::io::Error>;
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- match self.0.poll(cx) {
- Poll::Ready(Ok(stream)) => Poll::Ready(Ok(
- if stream.peer_addr().unwrap().is_ipv4() { 4 } else { 6 }
- )),
- Poll::Ready(Err(e)) => Poll::Ready(Err(e)),
- Poll::Pending => Poll::Pending,
- }
- }
- }
-
fn local_ipv4_addr() -> IpAddr {
Ipv4Addr::new(127, 0, 0, 1).into()
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -851,8 +844,8 @@ mod tests {
fn measure_connect(addr: IpAddr) -> (bool, Duration) {
let start = Instant::now();
- let result = ::std::net::TcpStream::connect_timeout(
- &(addr, 80).into(), Duration::from_secs(1));
+ let result =
+ ::std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1));
let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut;
let duration = start.elapsed();
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -860,4 +853,3 @@ mod tests {
}
}
}
-
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
|
[
"1903"
] |
0.2
|
039281b89cf1ab54a0ecc10c5e7fee56d4da0cf4
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -1,11 +1,26 @@
-//! The `Resolve` trait, support types, and some basic implementations.
+//! DNS Resolution used by the `HttpConnector`.
//!
//! This module contains:
//!
//! - A [`GaiResolver`](dns::GaiResolver) that is the default resolver for the
//! `HttpConnector`.
-//! - The [`Resolve`](dns::Resolve) trait and related types to build a custom
-//! resolver for use with the `HttpConnector`.
+//! - The `Name` type used as an argument to custom resolvers.
+//!
+//! # Resolvers are `Service`s
+//!
+//! A resolver is just a
+//! `Service<Name, Response = impl Iterator<Item = IpAddr>>`.
+//!
+//! A simple resolver that ignores the name and always returns a specific
+//! address:
+//!
+//! ```rust,ignore
+//! use std::{convert::Infallible, iter, net::IpAddr};
+//!
+//! let resolver = tower::service_fn(|_name| async {
+//! Ok::<_, Infallible>(iter::once(IpAddr::from([127, 0, 0, 1])))
+//! });
+//! ```
use std::{fmt, io, vec};
use std::error::Error;
use std::net::{
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -15,19 +30,10 @@ use std::net::{
};
use std::str::FromStr;
-use tokio_sync::{mpsc, oneshot};
+use tower_service::Service;
+use crate::common::{Future, Pin, Poll, task};
-use crate::common::{Future, Never, Pin, Poll, task};
-
-/// Resolve a hostname to a set of IP addresses.
-pub trait Resolve {
- /// The set of IP addresses to try to connect to.
- type Addrs: Iterator<Item=IpAddr>;
- /// A Future of the resolved set of addresses.
- type Future: Future<Output=Result<Self::Addrs, io::Error>>;
- /// Resolve a hostname.
- fn resolve(&self, name: Name) -> Self::Future;
-}
+pub(super) use self::sealed::Resolve;
/// A domain name to resolve into IP addresses.
#[derive(Clone, Hash, Eq, PartialEq)]
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -41,15 +47,12 @@ pub struct GaiResolver {
_priv: (),
}
-#[derive(Clone)]
-struct ThreadPoolKeepAlive(mpsc::Sender<Never>);
-
/// An iterator of IP addresses returned from `getaddrinfo`.
pub struct GaiAddrs {
inner: IpAddrs,
}
-/// A future to resole a name returned by `GaiResolver`.
+/// A future to resolve a name returned by `GaiResolver`.
pub struct GaiFuture {
inner: tokio_executor::blocking::Blocking<Result<IpAddrs, io::Error>>,
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -110,11 +113,16 @@ impl GaiResolver {
}
}
-impl Resolve for GaiResolver {
- type Addrs = GaiAddrs;
+impl Service<Name> for GaiResolver {
+ type Response = GaiAddrs;
+ type Error = io::Error;
type Future = GaiFuture;
- fn resolve(&self, name: Name) -> Self::Future {
+ fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn call(&mut self, name: Name) -> Self::Future {
let blocking = tokio_executor::blocking::run(move || {
debug!("resolving host={:?}", name.host);
(&*name.host, 0).to_socket_addrs()
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -164,39 +172,6 @@ impl fmt::Debug for GaiAddrs {
}
}
-
-pub(super) struct GaiBlocking {
- host: String,
- tx: Option<oneshot::Sender<io::Result<IpAddrs>>>,
-}
-
-impl GaiBlocking {
- fn block(&self) -> io::Result<IpAddrs> {
- debug!("resolving host={:?}", self.host);
- (&*self.host, 0).to_socket_addrs()
- .map(|i| IpAddrs { iter: i })
-
- }
-}
-
-impl Future for GaiBlocking {
- type Output = ();
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- if self.tx.as_mut().expect("polled after complete").poll_closed(cx).is_ready() {
- trace!("resolve future canceled for {:?}", self.host);
- return Poll::Ready(());
- }
-
- let res = self.block();
-
- let tx = self.tx.take().expect("polled after complete");
- let _ = tx.send(res);
-
- Poll::Ready(())
- }
-}
-
pub(super) struct IpAddrs {
iter: vec::IntoIter<SocketAddr>,
}
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -276,11 +251,16 @@ impl TokioThreadpoolGaiResolver {
}
#[cfg(feature = "runtime")]
-impl Resolve for TokioThreadpoolGaiResolver {
- type Addrs = GaiAddrs;
+impl Service<Name> for TokioThreadpoolGaiResolver {
+ type Response = GaiAddrs;
+ type Error = io::Error;
type Future = TokioThreadpoolGaiFuture;
- fn resolve(&self, name: Name) -> TokioThreadpoolGaiFuture {
+ fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn call(&mut self, name: Name) -> Self::Future {
TokioThreadpoolGaiFuture { name }
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -228,11 +228,18 @@ impl<R> HttpConnector<R> {
}
}
+static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http";
+static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing";
+static INVALID_MISSING_HOST: &str = "invalid URL, host is missing";
+
impl<R: Resolve> HttpConnector<R> {
- fn invalid_url(&self, err: InvalidUrl) -> HttpConnecting<R> {
+ fn invalid_url(&self, msg: impl Into<Box<str>>) -> HttpConnecting<R> {
HttpConnecting {
config: self.config.clone(),
- state: State::Error(Some(io::Error::new(io::ErrorKind::InvalidInput, err))),
+ state: State::Error(Some(ConnectError {
+ msg: msg.into(),
+ cause: None,
+ })),
port: 0,
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -252,14 +259,11 @@ where
R::Future: Send,
{
type Response = (TcpStream, Connected);
- type Error = io::Error;
+ type Error = ConnectError;
type Future = HttpConnecting<R>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
- // For now, always ready.
- // TODO: When `Resolve` becomes an alias for `Service`, check
- // the resolver's readiness.
- drop(cx);
+ ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?;
Poll::Ready(Ok(()))
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -273,15 +277,15 @@ where
if self.config.enforce_http {
if dst.uri.scheme_part() != Some(&Scheme::HTTP) {
- return self.invalid_url(InvalidUrl::NotHttp);
+ return self.invalid_url(INVALID_NOT_HTTP);
}
} else if dst.uri.scheme_part().is_none() {
- return self.invalid_url(InvalidUrl::MissingScheme);
+ return self.invalid_url(INVALID_MISSING_SCHEME);
}
let host = match dst.uri.host() {
Some(s) => s,
- None => return self.invalid_url(InvalidUrl::MissingAuthority),
+ None => return self.invalid_url(INVALID_MISSING_HOST),
};
let port = match dst.uri.port_part() {
Some(port) => port.as_u16(),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -302,7 +306,7 @@ where
R::Future: Send,
{
type Response = TcpStream;
- type Error = io::Error;
+ type Error = ConnectError;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send + 'static>>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -324,28 +328,73 @@ impl HttpInfo {
}
}
-#[derive(Debug, Clone, Copy)]
-enum InvalidUrl {
- MissingScheme,
- NotHttp,
- MissingAuthority,
+// Not publicly exported (so missing_docs doesn't trigger).
+pub struct ConnectError {
+ msg: Box<str>,
+ cause: Option<Box<dyn StdError + Send + Sync>>,
+}
+
+impl ConnectError {
+ fn new<S, E>(msg: S, cause: E) -> ConnectError
+ where
+ S: Into<Box<str>>,
+ E: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ ConnectError {
+ msg: msg.into(),
+ cause: Some(cause.into()),
+ }
+ }
+
+ fn dns<E>(cause: E) -> ConnectError
+ where
+ E: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ ConnectError::new("dns error", cause)
+ }
+
+ fn m<S, E>(msg: S) -> impl FnOnce(E) -> ConnectError
+ where
+ S: Into<Box<str>>,
+ E: Into<Box<dyn StdError + Send + Sync>>,
+ {
+ move |cause| {
+ ConnectError::new(msg, cause)
+ }
+ }
}
-impl fmt::Display for InvalidUrl {
+impl fmt::Debug for ConnectError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str(self.description())
+ if let Some(ref cause) = self.cause {
+ f.debug_tuple("ConnectError")
+ .field(&self.msg)
+ .field(cause)
+ .finish()
+ } else {
+ self.msg.fmt(f)
+ }
}
}
-impl StdError for InvalidUrl {
- fn description(&self) -> &str {
- match *self {
- InvalidUrl::MissingScheme => "invalid URL, missing scheme",
- InvalidUrl::NotHttp => "invalid URL, scheme must be http",
- InvalidUrl::MissingAuthority => "invalid URL, missing domain",
+impl fmt::Display for ConnectError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(&self.msg)?;
+
+ if let Some(ref cause) = self.cause {
+ write!(f, ": {}", cause)?;
}
+
+ Ok(())
}
}
+
+impl StdError for ConnectError {
+ fn source(&self) -> Option<&(dyn StdError + 'static)> {
+ self.cause.as_ref().map(|e| &**e as _)
+ }
+}
+
/// A Future representing work to connect to a URL.
#[must_use = "futures do nothing unless polled"]
#[pin_project]
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -361,11 +410,11 @@ enum State<R: Resolve> {
Lazy(R, String),
Resolving(#[pin] R::Future),
Connecting(ConnectingTcp),
- Error(Option<io::Error>),
+ Error(Option<ConnectError>),
}
impl<R: Resolve> Future for HttpConnecting<R> {
- type Output = Result<(TcpStream, Connected), io::Error>;
+ type Output = Result<(TcpStream, Connected), ConnectError>;
#[project]
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -375,19 +424,20 @@ impl<R: Resolve> Future for HttpConnecting<R> {
let state;
#[project]
match me.state.as_mut().project() {
- State::Lazy(ref resolver, ref mut host) => {
+ State::Lazy(ref mut resolver, ref mut host) => {
// If the host is already an IP addr (v4 or v6),
// skip resolving the dns and start connecting right away.
if let Some(addrs) = dns::IpAddrs::try_parse(host, *me.port) {
state = State::Connecting(ConnectingTcp::new(
config.local_address, addrs, config.connect_timeout, config.happy_eyeballs_timeout, config.reuse_address));
} else {
+ ready!(resolver.poll_ready(cx)).map_err(ConnectError::dns)?;
let name = dns::Name::new(mem::replace(host, String::new()));
state = State::Resolving(resolver.resolve(name));
}
},
State::Resolving(future) => {
- let addrs = ready!(future.poll(cx))?;
+ let addrs = ready!(future.poll(cx)).map_err(ConnectError::dns)?;
let port = *me.port;
let addrs = addrs
.map(|addr| SocketAddr::new(addr, port))
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -397,24 +447,25 @@ impl<R: Resolve> Future for HttpConnecting<R> {
config.local_address, addrs, config.connect_timeout, config.happy_eyeballs_timeout, config.reuse_address));
},
State::Connecting(ref mut c) => {
- let sock = ready!(c.poll(cx, &config.handle))?;
+ let sock = ready!(c.poll(cx, &config.handle))
+ .map_err(ConnectError::m("tcp connect error"))?;
if let Some(dur) = config.keep_alive_timeout {
- sock.set_keepalive(Some(dur))?;
+ sock.set_keepalive(Some(dur)).map_err(ConnectError::m("tcp set_keepalive error"))?;
}
if let Some(size) = config.send_buffer_size {
- sock.set_send_buffer_size(size)?;
+ sock.set_send_buffer_size(size).map_err(ConnectError::m("tcp set_send_buffer_size error"))?;
}
if let Some(size) = config.recv_buffer_size {
- sock.set_recv_buffer_size(size)?;
+ sock.set_recv_buffer_size(size).map_err(ConnectError::m("tcp set_recv_buffer_size error"))?;
}
- sock.set_nodelay(config.nodelay)?;
+ sock.set_nodelay(config.nodelay).map_err(ConnectError::m("tcp set_nodelay error"))?;
let extra = HttpInfo {
- remote_addr: sock.peer_addr()?,
+ remote_addr: sock.peer_addr().map_err(ConnectError::m("tcp peer_addr error"))?,
};
let connected = Connected::new()
.extra(extra);
|
Now that #1792 is closed, what do you have in mind by "basically an alias"? I assume this isn't referring to waiting for https://github.com/rust-lang/rust/issues/41517 , so what should `Resolve` become?
It'd be similar to `HttpService`, basically just bounds that mean `R: Service<Name, Response = impl Iterator<Item = IpAddr>>`. As the top comment mentions, that'd allow composing `Service`s, such as easily adding in a timeout layer.
|
2019-11-12T20:08:57Z
| 2,011
|
Change `Resolve` to alias `Service`
After #1782 is done, the `Resolve` trait should be changed to basically an alias for a `Service<Name>`. This would allow more composability, being able to use ServiceExt to customize resolvers.
|
hyperium__hyper-2011
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -299,6 +279,41 @@ impl Future for TokioThreadpoolGaiFuture {
}
}
+mod sealed {
+ use tower_service::Service;
+ use crate::common::{Future, Poll, task};
+ use super::{IpAddr, Name};
+
+ // "Trait alias" for `Service<Name, Response = Addrs>`
+ pub trait Resolve {
+ type Addrs: Iterator<Item=IpAddr>;
+ type Error: Into<Box<dyn std::error::Error + Send + Sync>>;
+ type Future: Future<Output=Result<Self::Addrs, Self::Error>>;
+
+ fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
+ fn resolve(&mut self, name: Name) -> Self::Future;
+ }
+
+ impl<S> Resolve for S
+ where
+ S: Service<Name>,
+ S::Response: Iterator<Item=IpAddr>,
+ S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
+ {
+ type Addrs = S::Response;
+ type Error = S::Error;
+ type Future = S::Future;
+
+ fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
+ Service::poll_ready(self, cx)
+ }
+
+ fn resolve(&mut self, name: Name) -> Self::Future {
+ Service::call(self, name)
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -642,7 +693,6 @@ impl ConnectingTcp {
mod tests {
use std::io;
- use tokio::runtime::current_thread::Runtime;
use tokio_net::driver::Handle;
use super::{Connected, Destination, HttpConnector};
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -655,55 +705,29 @@ mod tests {
connector.connect(super::super::sealed::Internal, dst).await
}
- #[test]
- fn test_errors_missing_authority() {
- let mut rt = Runtime::new().unwrap();
- let uri = "/foo/bar?baz".parse().unwrap();
- let dst = Destination {
- uri,
- };
- let connector = HttpConnector::new();
-
- rt.block_on(async {
- assert_eq!(
- connect(connector, dst).await.unwrap_err().kind(),
- io::ErrorKind::InvalidInput,
- );
- })
- }
-
- #[test]
- fn test_errors_enforce_http() {
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn test_errors_enforce_http() {
let uri = "https://example.domain/foo/bar?baz".parse().unwrap();
let dst = Destination {
uri,
};
let connector = HttpConnector::new();
- rt.block_on(async {
- assert_eq!(
- connect(connector, dst).await.unwrap_err().kind(),
- io::ErrorKind::InvalidInput,
- );
- })
+ let err = connect(connector, dst).await.unwrap_err();
+ assert_eq!(&*err.msg, super::INVALID_NOT_HTTP);
}
- #[test]
- fn test_errors_missing_scheme() {
- let mut rt = Runtime::new().unwrap();
+ #[tokio::test]
+ async fn test_errors_missing_scheme() {
let uri = "example.domain".parse().unwrap();
let dst = Destination {
uri,
};
- let connector = HttpConnector::new();
+ let mut connector = HttpConnector::new();
+ connector.enforce_http(false);
- rt.block_on(async {
- assert_eq!(
- connect(connector, dst).await.unwrap_err().kind(),
- io::ErrorKind::InvalidInput,
- );
- });
+ let err = connect(connector, dst).await.unwrap_err();
+ assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME);
}
#[test]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1722,7 +1722,7 @@ mod dispatch_impl {
impl hyper::service::Service<Destination> for DebugConnector {
type Response = (DebugStream, Connected);
- type Error = io::Error;
+ type Error = <HttpConnector as hyper::service::Service<Destination>>::Error;
type Future = Pin<Box<dyn Future<
Output = Result<Self::Response, Self::Error>
> + Send>>;
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1843"
] |
0.2
|
a1fe383c9d86a813fb2574cc1145ec654ee47e0d
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -151,7 +151,9 @@ where I: AsyncRead + AsyncWrite + Unpin,
self.state.version = msg.head.version;
if msg.decode == DecodedLength::ZERO {
- debug_assert!(!msg.expect_continue, "expect-continue needs a body");
+ if log_enabled!(log::Level::Debug) && msg.expect_continue {
+ debug!("ignoring expect-continue since body is empty");
+ }
self.state.reading = Reading::KeepAlive;
if !T::should_read_first() {
self.try_keep_alive(cx);
|
2019-10-18T22:34:09Z
| 1,983
|
server debug panic on `Expect: 100-continue` without `Content-Length`
This is related to #1842 but seems distinct enough to warrant its own issue.
When doing Icecast streaming, the client typically sends an authorization header and an `Expect: 100-continue` to wait for authorization before trying to upload a stream. However, because of #1842 the hyper server decides the client is not actually going to send any data, and if the server is compiled in debug mode, this causes a panic `expect-continue needs a body`. When built in release mode the server ignores the `Expect: 100-continue` and handles the message as if it has no body.
You can trigger this using curl: `curl -X PUT http://127.0.0.1:5000/ -d test -H 'Expect: 100-continue' -H 'Content-Length:'`.
|
hyperium__hyper-1983
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -724,6 +724,28 @@ fn expect_continue_sends_100() {
assert_eq!(body, msg);
}
+#[test]
+fn expect_continue_but_no_body_is_ignored() {
+ let server = serve();
+ let mut req = connect(server.addr());
+ server.reply();
+
+ // no content-length or transfer-encoding means no body!
+ req.write_all(b"\
+ POST /foo HTTP/1.1\r\n\
+ Host: example.domain\r\n\
+ Expect: 100-continue\r\n\
+ Connection: Close\r\n\
+ \r\n\
+ ").expect("write");
+
+ let expected = "HTTP/1.1 200 OK\r\n";
+ let mut resp = String::new();
+ req.read_to_string(&mut resp).expect("read");
+
+ assert_eq!(&resp[..expected.len()], expected);
+}
+
#[test]
fn pipeline_disabled() {
let server = serve();
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
|
[
"1764"
] |
0.2
|
8e7ebd80cd81d3b1ad900debd150667812c8d8e3
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -38,7 +38,7 @@ where I: AsyncRead + AsyncWrite + Unpin,
Conn {
io: Buffered::new(io),
state: State {
- allow_half_close: true,
+ allow_half_close: false,
cached_headers: None,
error: None,
keep_alive: KA::Busy,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -76,8 +76,8 @@ where I: AsyncRead + AsyncWrite + Unpin,
self.state.title_case_headers = true;
}
- pub(crate) fn set_disable_half_close(&mut self) {
- self.state.allow_half_close = false;
+ pub(crate) fn set_allow_half_close(&mut self) {
+ self.state.allow_half_close = true;
}
pub fn into_inner(self) -> (I, Bytes) {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -172,7 +172,7 @@ where I: AsyncRead + AsyncWrite + Unpin,
// message should be reported as an error. If not, it is just
// the connection closing gracefully.
let must_error = self.should_error_on_eof();
- self.state.close_read();
+ self.close_read();
self.io.consume_leading_lines();
let was_mid_parse = e.is_parse() || !self.io.read_buf().is_empty();
if was_mid_parse || must_error {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -185,6 +185,7 @@ where I: AsyncRead + AsyncWrite + Unpin,
}
} else {
debug!("read eof");
+ self.close_write();
Poll::Ready(None)
}
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -204,7 +205,7 @@ where I: AsyncRead + AsyncWrite + Unpin,
None
})
} else if slice.is_empty() {
- error!("decode stream unexpectedly ended");
+ error!("incoming body unexpectedly ended");
// This should be unreachable, since all 3 decoders
// either set eof=true or return an Err when reading
// an empty slice...
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -216,7 +217,7 @@ where I: AsyncRead + AsyncWrite + Unpin,
},
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => {
- debug!("decode stream error: {}", e);
+ debug!("incoming body decode error: {}", e);
(Reading::Closed, Poll::Ready(Some(Err(e))))
},
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -294,6 +295,10 @@ where I: AsyncRead + AsyncWrite + Unpin,
return Poll::Pending;
}
+ if self.state.is_read_closed() {
+ return Poll::Ready(Err(crate::Error::new_incomplete()));
+ }
+
let num_read = ready!(self.force_io_read(cx)).map_err(crate::Error::new_io)?;
if num_read == 0 {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -306,6 +311,8 @@ where I: AsyncRead + AsyncWrite + Unpin,
}
fn force_io_read(&mut self, cx: &mut task::Context<'_>) -> Poll<io::Result<usize>> {
+ debug_assert!(!self.state.is_read_closed());
+
let result = ready!(self.io.poll_read_from_io(cx));
Poll::Ready(result.map_err(|e| {
trace!("force_io_read; io error = {:?}", e);
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -619,8 +626,10 @@ where I: AsyncRead + AsyncWrite + Unpin,
pub fn disable_keep_alive(&mut self) {
if self.state.is_idle() {
- self.state.close_read();
+ trace!("disable_keep_alive; closing idle connection");
+ self.state.close();
} else {
+ trace!("disable_keep_alive; in-progress connection");
self.state.disable_keep_alive();
}
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -60,7 +60,10 @@ where
}
pub fn disable_keep_alive(&mut self) {
- self.conn.disable_keep_alive()
+ self.conn.disable_keep_alive();
+ if self.conn.is_write_closed() {
+ self.close();
+ }
}
pub fn into_inner(self) -> (I, Bytes, D) {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -233,10 +236,17 @@ where
// if here, the dispatcher gave the user the error
// somewhere else. we still need to shutdown, but
// not as a second error.
+ self.close();
Poll::Ready(Ok(()))
},
None => {
- // read eof, conn will start to shutdown automatically
+ // read eof, the write side will have been closed too unless
+ // allow_read_close was set to true, in which case just do
+ // nothing...
+ debug_assert!(self.conn.is_read_closed());
+ if self.conn.is_write_closed() {
+ self.close();
+ }
Poll::Ready(Ok(()))
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -194,7 +194,7 @@ impl Http {
Http {
exec: Exec::Default,
- h1_half_close: true,
+ h1_half_close: false,
h1_writev: true,
h2_builder,
mode: ConnectionMode::Fallback,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -221,12 +221,11 @@ impl<E> Http<E> {
/// Set whether HTTP/1 connections should support half-closures.
///
/// Clients can chose to shutdown their write-side while waiting
- /// for the server to respond. Setting this to `false` will
- /// automatically close any connection immediately if `read`
- /// detects an EOF.
+ /// for the server to respond. Setting this to `true` will
+ /// prevent closing the connection immediately if `read`
+ /// detects an EOF in the middle of a request.
///
- /// Default is `true`.
- #[inline]
+ /// Default is `false`.
pub fn http1_half_close(&mut self, val: bool) -> &mut Self {
self.h1_half_close = val;
self
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -390,8 +389,8 @@ impl<E> Http<E> {
if !self.keep_alive {
conn.disable_keep_alive();
}
- if !self.h1_half_close {
- conn.set_disable_half_close();
+ if self.h1_half_close {
+ conn.set_allow_half_close();
}
if !self.h1_writev {
conn.set_write_strategy_flatten();
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -252,11 +252,11 @@ impl<I, E> Builder<I, E> {
/// Set whether HTTP/1 connections should support half-closures.
///
/// Clients can chose to shutdown their write-side while waiting
- /// for the server to respond. Setting this to `false` will
- /// automatically close any connection immediately if `read`
- /// detects an EOF.
+ /// for the server to respond. Setting this to `true` will
+ /// prevent closing the connection immediately if `read`
+ /// detects an EOF in the middle of a request.
///
- /// Default is `true`.
+ /// Default is `false`.
pub fn http1_half_close(mut self, val: bool) -> Self {
self.protocol.http1_half_close(val);
self
|
I can see the same behaviour with the latest Hyper v0.12.24 (tokio v0.1.15, tokio-io v0.1.11). The application works correctly when I put it behind nginx proxy.
Hm, sounds like it's looping here? https://github.com/hyperium/hyper/blob/877606d5c81195374259561aa98b973a00fa6056/src/proto/h1/io.rs#L215
Does the log message after it trigger over and over?
Yes, I also think it's at this line. I'm not sure which log message you mean. If you mean the stack trace, then it's also correct. Every time I interrupt the process in GDB, I get this stack trace (or a stack trace corresponding to this one but reaching only one of the callers).
I meant the `debug!("flushed {} bytes", n);` that happens just after `write_buf` line. It'd be useful to know if there is something mis-behaving, since the loop will exit in any of these 3 conditions:
- The transport returned `WouldBlock` (or any other error).
- The transport said it wrote `Ok(0)` bytes, meaning it is closed.
- The buffer of chunks reports there is no more bytes to write.
Sorry, I did not have much time to analyse it. I got back to it today.
I've discovered that the flushed message appears regularly until I stop the Docker container with my client. Once the container is stopped, the message stops appearing entirely. This would probably suggest that the `self.io.write_buf(...)` method returns `NotReady` and the `try_ready!(...)` macro does early return over and over again.
I've managed to create a minimum working example (see the attachment below). It's a simple Hyper server that responds with an infinite streamed response to every request. The project contains also a directory called "test". The directory contains a simple Python script that polls data from the server and a Docker file for creating a Docker image with the Python script.
You can re-create the problem using the following steps:
```bash
# unzip the file and enter the project directory and then:
cargo build
# the server will by default listen on 0.0.0.0:12345:
target/debug/hyper-loop
```
in a separate terminal:
```bash
# enter the project directory and then:
cd test
docker build -t hyper-loop-test .
# note: replace 172.17.42.1 with the address of your docker network interface
docker run --name=kill-me hyper-loop-test python3 -u test.py http://172.17.42.1:12345/
```
and finally kill the "kill-me" Docker container from a separate terminal:
```bash
docker rm -f kill-me
```
The server will never print the "client has left" message as defined in main.rs on line 29 and it will get into an infinite loop.
I've also noticed that it's sensitive to the amount of data the server sends at once. The bigger chunks of data I send, the bigger is the chance for getting stuck in an infinite loop.
Attachment: [hyper-loop.zip](https://github.com/hyperium/hyper/files/2967567/hyper-loop.zip)
Thanks for more details! By chance, do you happen to know if it it was logging the debug line "flushed n bytes" in the loop?
As I mentioned, the line "flushed n bytes" stops appearing when I stop the Docker container with my client. Here is a trace log that I captured from the server example:
```
TRACE - loop process - 0 events, 0.000s
TRACE - loop process - 0 events, 0.000s
TRACE - -> wakeup; idx=3
TRACE - loop process - 0 events, 0.000s
TRACE - Notifier::notify; id=0x7fa820002e70
TRACE - -> submit internal; idx=3
TRACE - signal_work -- notify; idx=3
TRACE - -> wakeup; idx=3
TRACE - Task::run; state=Running
TRACE - encoding chunked 340000B
DEBUG - flushed 340009 bytes
TRACE - flushed({role=server}): State { reading: KeepAlive, writing: Body(Encoder { kind: Chunked, is_last: false }), keep_alive: Busy }
TRACE - -> not ready
TRACE - event Readable Token(4194303)
TRACE - loop process - 1 events, 0.000s
TRACE - Worker::sleep; worker=WorkerId(3)
TRACE - sleeping -- push to stack; idx=3
TRACE - -> starting to sleep; idx=3
TRACE - loop process - 0 events, 0.000s
TRACE - event Readable | Writable | Hup Token(4194305)
TRACE - loop process - 1 events, 0.000s
TRACE - -> wakeup; idx=3
TRACE - loop process - 0 events, 0.000s
TRACE - -> wakeup; idx=3
TRACE - loop process - 0 events, 0.000s
TRACE - Notifier::notify; id=0x7fa820002e70
TRACE - -> submit internal; idx=3
TRACE - signal_work -- notify; idx=3
TRACE - -> wakeup; idx=3
TRACE - Task::run; state=Running
TRACE - encoding chunked 340000B
DEBUG - flushed 340009 bytes
TRACE - flushed({role=server}): State { reading: KeepAlive, writing: Body(Encoder { kind: Chunked, is_last: false }), keep_alive: Busy }
TRACE - -> not ready
TRACE - event Readable Token(4194303)
TRACE - loop process - 1 events, 0.000s
TRACE - Worker::sleep; worker=WorkerId(3)
TRACE - sleeping -- push to stack; idx=3
TRACE - -> starting to sleep; idx=3
TRACE - loop process - 0 events, 0.000s
TRACE - loop process - 0 events, 0.000s
TRACE - -> wakeup; idx=3
TRACE - loop process - 0 events, 0.000s
TRACE - -> wakeup; idx=3
TRACE - loop process - 0 events, 0.000s
TRACE - Notifier::notify; id=0x7fa820002e70
TRACE - -> submit internal; idx=3
TRACE - signal_work -- notify; idx=3
TRACE - -> wakeup; idx=3
TRACE - Task::run; state=Running
TRACE - encoding chunked 340000B
DEBUG - flushed 271 bytes
TRACE - Notifier::notify; id=0x7fa820002e70
TRACE - -> not ready
TRACE - event Readable Token(4194303)
TRACE - loop process - 1 events, 0.000s
TRACE - Task::run; state=Running
TRACE - Notifier::notify; id=0x7fa820002e70
TRACE - -> not ready
TRACE - Task::run; state=Running
TRACE - Notifier::notify; id=0x7fa820002e70
TRACE - -> not ready
TRACE - Task::run; state=Running
TRACE - Notifier::notify; id=0x7fa820002e70
TRACE - -> not ready
...
```
The last three lines repeat over and over again. Clearly, there's a HUP event processed by the event loop (the line `TRACE - event Readable | Writable | Hup Token(4194305)`) but the application keeps calling the flush method even though it probably should not do it.
Ah yes, this is #1716. hyper by default allows for half-closed transports (calling `shutdown(Write)` sends a HUP). Often times, sockets will also error on write with `EPIPE`, I'm not sure why it wouldn't in your case.
Thank you for the info. Setting `http1_half_close` to `false` in the server builder helped.
I'm just wondering if this should not be the default behaviour. I realize it's a breaking change for some applications. However, if half-closed connections are enabled by default, it'll lead to this problem sooner or later. I guess that we don't want production applications to end up with 100% CPU usage for no apparent reason. It might be even considered a DoS vulnerability.
Regarding EPIPE - in my opinion, writing into a (half) closed socket does not necessarily have to end up with EPIPE as the remote peer might be behind firewall/NAT. In such case, the remote end would simply drop all incoming packets instead of sending an RST TCP packet.
|
2019-10-18T20:14:19Z
| 1,981
|
100% CPU usage on client disconnection when serving a streamed response
I ran into an issue when load testing my server application based on Hyper 0.12.23. The servers provides very long (video) streams to clients. The problem is that sometimes when a client disconnects, the server ends up in an infinite loop trying to flush the response.
I have a load testing script written in Python that simulates behaviour of these clients. When I run the script directly from command line and stop the script using SIGINT, server load decreases and everything works as expected. However, when I run the script in a Docker container and then I stop the container, the server application suddenly starts consuming a lot of CPU time.
I managed to get the following stack trace:
```
#0 0x00007fd1148b0ffd in writev () at ../sysdeps/unix/syscall-template.S:84
#1 0x0000560974aad6b7 in mio::net::tcp::TcpStream::write_bufs::hc89b0d02dfa8ddfa ()
#2 0x00005609745ba2f5 in _$LT$tokio_tcp..stream..TcpStream$u20$as$u20$tokio_io..async_write..AsyncWrite$GT$::write_buf::h6c9ba9b717fe0dfb ()
#3 0x0000560974620419 in _$LT$hyper..proto..h1..io..Buffered$LT$T$C$$u20$B$GT$$GT$::flush::hf6e9c87ae11953e9 ()
#4 0x00005609746e6605 in _$LT$hyper..proto..h1..dispatch..Dispatcher$LT$D$C$$u20$Bs$C$$u20$I$C$$u20$T$GT$$GT$::poll_flush::h788fa5e1ea237e85 ()
#5 0x00005609746e4769 in _$LT$hyper..proto..h1..dispatch..Dispatcher$LT$D$C$$u20$Bs$C$$u20$I$C$$u20$T$GT$$GT$::poll_catch::hf5cbb1e929b70897 ()
#6 0x00005609746ac614 in _$LT$hyper..server..conn..upgrades..UpgradeableConnection$LT$I$C$$u20$S$C$$u20$E$GT$$u20$as$u20$futures..future..Future$GT$::poll::h6c066c4ab3ebbcc7 ()
#7 0x000056097460f214 in _$LT$hyper..server..conn..spawn_all..NewSvcTask$LT$I$C$$u20$N$C$$u20$S$C$$u20$E$C$$u20$W$GT$$u20$as$u20$futures..future..Future$GT$::poll::hb0caae453b560ddd ()
#8 0x0000560974abe582 in futures::task_impl::std::set::h752dbb2b3f4dcdbd ()
#9 0x0000560974ac29b6 in _$LT$futures..task_impl..Spawn$LT$T$GT$$GT$::poll_future_notify::h0240129271f3aef3 ()
```
It's the same for all busy worker threads.
In my opinion, running the load testing script in a Docker container and then stopping it leaves some connections half open and Hyper is probably not able to recognize this. I'm not sure if this can be related to issue #1716.
|
hyperium__hyper-1981
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -945,6 +945,7 @@ fn disable_keep_alive_post_request() {
#[test]
fn empty_parse_eof_does_not_return_error() {
+ let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().unwrap();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -983,13 +984,13 @@ fn nonempty_parse_eof_returns_error() {
}
#[test]
-fn socket_half_closed() {
+fn http1_allow_half_close() {
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().unwrap();
let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
let addr = listener.local_addr().unwrap();
- thread::spawn(move || {
+ let t1 = thread::spawn(move || {
let mut tcp = connect(&addr);
tcp.write_all(b"GET / HTTP/1.1\r\n\r\n").unwrap();
tcp.shutdown(::std::net::Shutdown::Write).expect("SHDN_WR");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1005,13 +1006,16 @@ fn socket_half_closed() {
.map(Option::unwrap)
.map_err(|_| unreachable!())
.and_then(|socket| {
- Http::new().serve_connection(socket, service_fn(|_| {
+ Http::new()
+ .http1_half_close(true)
+ .serve_connection(socket, service_fn(|_| {
tokio_timer::delay_for(Duration::from_millis(500))
.map(|_| Ok::<_, hyper::Error>(Response::new(Body::empty())))
}))
});
rt.block_on(fut).unwrap();
+ t1.join().expect("client thread");
}
#[test]
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1852,28 +1856,28 @@ impl tower_service::Service<Request<Body>> for TestService {
Ok(()).into()
}
- fn call(&mut self, req: Request<Body>) -> Self::Future {
- let tx1 = self.tx.clone();
- let tx2 = self.tx.clone();
+ fn call(&mut self, mut req: Request<Body>) -> Self::Future {
+ let tx = self.tx.clone();
let replies = self.reply.clone();
- req
- .into_body()
- .try_concat()
- .map_ok(move |chunk| {
- tx1.send(Msg::Chunk(chunk.to_vec())).unwrap();
- ()
- })
- .map(move |result| {
- let msg = match result {
- Ok(()) => Msg::End,
- Err(e) => Msg::Error(e),
- };
- tx2.send(msg).unwrap();
- })
- .map(move |_| {
- TestService::build_reply(replies)
- })
- .boxed()
+ hyper::rt::spawn(async move {
+ while let Some(chunk) = req.body_mut().next().await {
+ match chunk {
+ Ok(chunk) => {
+ tx.send(Msg::Chunk(chunk.to_vec())).unwrap();
+ },
+ Err(err) => {
+ tx.send(Msg::Error(err)).unwrap();
+ return;
+ },
+ }
+ }
+
+ tx.send(Msg::End).unwrap();
+ });
+
+ Box::pin(async move {
+ TestService::build_reply(replies)
+ })
}
}
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1945"
] |
0.2
|
dc54ee199f2d19d65913d224b900a61ab3bf2415
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -27,21 +27,21 @@ futures-util-preview = { version = "=0.3.0-alpha.18" }
http = "0.1.15"
http-body = "=0.2.0-alpha.1"
httparse = "1.0"
-h2 = "=0.2.0-alpha.1"
+h2 = "=0.2.0-alpha.2"
iovec = "0.1"
itoa = "0.4.1"
log = "0.4"
net2 = { version = "0.2.32", optional = true }
pin-project = { version = "=0.4.0-alpha.11", features = ["project_attr"] }
time = "0.1"
-tokio = { version = "=0.2.0-alpha.4", optional = true, default-features = false, features = ["rt-full"] }
+tokio = { version = "=0.2.0-alpha.5", optional = true, default-features = false, features = ["rt-full"] }
tower-service = "=0.3.0-alpha.1"
-tower-make = { version = "=0.1.0-alpha.2", features = ['io'] }
-tokio-executor = { version = "=0.2.0-alpha.4", features = ["blocking"] }
-tokio-io = "=0.2.0-alpha.4"
-tokio-sync = "=0.2.0-alpha.4"
-tokio-net = { version = "=0.2.0-alpha.4", optional = true, features = ["tcp"] }
-tokio-timer = { version = "=0.3.0-alpha.4", optional = true }
+tower-make = { version = "=0.3.0-alpha.2", features = ['io'] }
+tokio-executor = { version = "=0.2.0-alpha.5", features = ["blocking"] }
+tokio-io = "=0.2.0-alpha.5"
+tokio-sync = "=0.2.0-alpha.5"
+tokio-net = { version = "=0.2.0-alpha.5", optional = true, features = ["tcp"] }
+tokio-timer = { version = "=0.3.0-alpha.5", optional = true }
want = "0.3"
[dev-dependencies]
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -470,7 +470,7 @@ impl ConnectingTcp {
local_addr,
preferred: ConnectingTcpRemote::new(preferred_addrs),
fallback: Some(ConnectingTcpFallback {
- delay: tokio_timer::sleep(fallback_timeout),
+ delay: tokio_timer::delay_for(fallback_timeout),
remote: ConnectingTcpRemote::new(fallback_addrs),
}),
reuse_address,
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -135,7 +135,7 @@ impl AddrIncoming {
error!("accept error: {}", e);
// Sleep 1s.
- let mut timeout = tokio_timer::sleep(Duration::from_secs(1));
+ let mut timeout = tokio_timer::delay_for(Duration::from_secs(1));
match Pin::new(&mut timeout).poll(cx) {
Poll::Ready(()) => {
|
h2 update: https://github.com/hyperium/h2/pull/409
|
2019-09-22T04:13:15Z
| 1,947
|
Update to tokio alpha 5
- [ ] update h2
- [ ] update hyper
|
hyperium__hyper-1947
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -52,9 +52,9 @@ spmc = "0.3"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
-tokio = "=0.2.0-alpha.4" # using #[tokio::test] attributes
-tokio-fs = "=0.2.0-alpha.4"
-tokio-test = "=0.2.0-alpha.4"
+tokio = "=0.2.0-alpha.5" # using #[tokio::test] attributes
+tokio-fs = "=0.2.0-alpha.5"
+tokio-test = "=0.2.0-alpha.5"
url = "1.0"
[features]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -974,7 +974,7 @@ mod dispatch_impl {
.unwrap();
let res = client.request(req).map_ok(move |res| {
assert_eq!(res.status(), hyper::StatusCode::OK);
- tokio_timer::sleep(Duration::from_secs(1))
+ tokio_timer::delay_for(Duration::from_secs(1))
});
let rx = rx1.expect("thread panicked");
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1018,7 +1018,7 @@ mod dispatch_impl {
assert_eq!(res.status(), hyper::StatusCode::OK);
res.into_body().try_concat()
}).map_ok(|_| {
- tokio_timer::sleep(Duration::from_secs(1))
+ tokio_timer::delay_for(Duration::from_secs(1))
})
};
// client is dropped
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1082,7 +1082,7 @@ mod dispatch_impl {
}
drop(client);
- let t = tokio_timer::sleep(Duration::from_millis(100))
+ let t = tokio_timer::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1131,7 +1131,7 @@ mod dispatch_impl {
rt.block_on(future::select(res, rx1));
// res now dropped
- let t = tokio_timer::sleep(Duration::from_millis(100))
+ let t = tokio_timer::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1180,7 +1180,7 @@ mod dispatch_impl {
let rx = rx1.expect("thread panicked");
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- let t = tokio_timer::sleep(Duration::from_millis(100))
+ let t = tokio_timer::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1230,7 +1230,7 @@ mod dispatch_impl {
let rx = rx1.expect("thread panicked");
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- let t = tokio_timer::sleep(Duration::from_millis(100))
+ let t = tokio_timer::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1274,7 +1274,7 @@ mod dispatch_impl {
let rx = rx1.expect("thread panicked");
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
- let t = tokio_timer::sleep(Duration::from_millis(100))
+ let t = tokio_timer::delay_for(Duration::from_millis(100))
.map(|_| panic!("time out"));
let close = closes
.into_future()
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1469,7 +1469,7 @@ mod dispatch_impl {
assert_eq!(connects.load(Ordering::Relaxed), 0);
let delayed_body = rx1
- .then(|_| tokio_timer::sleep(Duration::from_millis(200)))
+ .then(|_| tokio_timer::delay_for(Duration::from_millis(200)))
.map(|_| Ok::<_, ()>("hello a"))
.map_err(|_| -> hyper::Error { panic!("rx1") })
.into_stream();
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1484,7 +1484,7 @@ mod dispatch_impl {
// req 1
let fut = future::join(client.request(req), rx)
- .then(|_| tokio_timer::sleep(Duration::from_millis(200)))
+ .then(|_| tokio_timer::delay_for(Duration::from_millis(200)))
// req 2
.then(move |()| {
let rx = rx3.expect("thread panicked");
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1838,7 +1838,7 @@ mod conn {
res.into_body().try_concat()
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1884,7 +1884,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
let chunk = rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
assert_eq!(chunk.len(), 5);
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1976,7 +1976,7 @@ mod conn {
res.into_body().try_concat()
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2020,7 +2020,7 @@ mod conn {
res.into_body().try_concat()
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
rt.block_on(future::join(res, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2072,7 +2072,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
rt.block_on(future::join3(res1, res2, rx).map(|r| r.0)).unwrap();
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2132,7 +2132,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
rt.block_on(future::join3(until_upgrade, res, rx).map(|r| r.0)).unwrap();
// should not be ready now
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2220,7 +2220,7 @@ mod conn {
});
let rx = rx1.expect("thread panicked");
- let rx = rx.then(|_| tokio_timer::sleep(Duration::from_millis(200)));
+ let rx = rx.then(|_| tokio_timer::delay_for(Duration::from_millis(200)));
rt.block_on(future::join3(until_tunneled, res, rx).map(|r| r.0)).unwrap();
// should not be ready now
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -2296,7 +2296,7 @@ mod conn {
let _ = shdn_tx.send(());
// Allow time for graceful shutdown roundtrips...
- rt.block_on(tokio_timer::sleep(Duration::from_millis(100)));
+ rt.block_on(tokio_timer::delay_for(Duration::from_millis(100)));
// After graceful shutdown roundtrips, the client should be closed...
rt.block_on(future::poll_fn(|ctx| client.poll_ready(ctx))).expect_err("client should be closed");
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -937,7 +937,7 @@ fn disable_keep_alive_post_request() {
// the read-blocked socket.
//
// See https://github.com/carllerche/mio/issues/776
- let timeout = tokio_timer::sleep(Duration::from_millis(10));
+ let timeout = tokio_timer::delay_for(Duration::from_millis(10));
rt.block_on(timeout);
assert!(dropped.load());
child.join().unwrap();
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1006,7 +1006,7 @@ fn socket_half_closed() {
.map_err(|_| unreachable!())
.and_then(|socket| {
Http::new().serve_connection(socket, service_fn(|_| {
- tokio_timer::sleep(Duration::from_millis(500))
+ tokio_timer::delay_for(Duration::from_millis(500))
.map(|_| Ok::<_, hyper::Error>(Response::new(Body::empty())))
}))
});
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1034,7 +1034,7 @@ fn disconnect_after_reading_request_before_responding() {
Http::new()
.http1_half_close(false)
.serve_connection(socket, service_fn(|_| {
- tokio_timer::sleep(Duration::from_secs(2))
+ tokio_timer::delay_for(Duration::from_secs(2))
.map(|_| -> Result<Response<Body>, hyper::Error> {
panic!("response future should have been dropped");
})
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1925"
] |
0.12
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -781,31 +781,44 @@ impl Client {
impl Client {
fn set_length(head: &mut RequestHead, body: Option<BodyLength>) -> Encoder {
- if let Some(body) = body {
- let can_chunked = head.version == Version::HTTP_11
- && (head.subject.0 != Method::HEAD)
- && (head.subject.0 != Method::GET)
- && (head.subject.0 != Method::CONNECT);
- set_length(&mut head.headers, body, can_chunked)
+ let body = if let Some(body) = body {
+ body
} else {
head.headers.remove(header::TRANSFER_ENCODING);
- Encoder::length(0)
- }
- }
-}
+ return Encoder::length(0)
+ };
+
+ // HTTP/1.0 doesn't know about chunked
+ let can_chunked = head.version == Version::HTTP_11;
+ let headers = &mut head.headers;
+
+ // If the user already set specific headers, we should respect them, regardless
+ // of what the Payload knows about itself. They set them for a reason.
-fn set_length(headers: &mut HeaderMap, body: BodyLength, can_chunked: bool) -> Encoder {
- // If the user already set specific headers, we should respect them, regardless
- // of what the Payload knows about itself. They set them for a reason.
+ // Because of the borrow checker, we can't check the for an existing
+ // Content-Length header while holding an `Entry` for the Transfer-Encoding
+ // header, so unfortunately, we must do the check here, first.
- // Because of the borrow checker, we can't check the for an existing
- // Content-Length header while holding an `Entry` for the Transfer-Encoding
- // header, so unfortunately, we must do the check here, first.
+ let existing_con_len = headers::content_length_parse_all(headers);
+ let mut should_remove_con_len = false;
- let existing_con_len = headers::content_length_parse_all(headers);
- let mut should_remove_con_len = false;
+ if !can_chunked {
+ // Chunked isn't legal, so if it is set, we need to remove it.
+ if headers.remove(header::TRANSFER_ENCODING).is_some() {
+ trace!("removing illegal transfer-encoding header");
+ }
+
+ return if let Some(len) = existing_con_len {
+ Encoder::length(len)
+ } else if let BodyLength::Known(len) = body {
+ set_content_length(headers, len)
+ } else {
+ // HTTP/1.0 client requests without a content-length
+ // cannot have any body at all.
+ Encoder::length(0)
+ };
+ }
- if can_chunked {
// If the user set a transfer-encoding, respect that. Let's just
// make sure `chunked` is the final encoding.
let encoder = match headers.entry(header::TRANSFER_ENCODING)
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -841,9 +854,22 @@ fn set_length(headers: &mut HeaderMap, body: BodyLength, can_chunked: bool) -> E
if let Some(len) = existing_con_len {
Some(Encoder::length(len))
} else if let BodyLength::Unknown = body {
- should_remove_con_len = true;
- te.insert(HeaderValue::from_static("chunked"));
- Some(Encoder::chunked())
+ // GET, HEAD, and CONNECT almost never have bodies.
+ //
+ // So instead of sending a "chunked" body with a 0-chunk,
+ // assume no body here. If you *must* send a body,
+ // set the headers explicitly.
+ match head.subject.0 {
+ Method::GET |
+ Method::HEAD |
+ Method::CONNECT => {
+ Some(Encoder::length(0))
+ },
+ _ => {
+ te.insert(HeaderValue::from_static("chunked"));
+ Some(Encoder::chunked())
+ },
+ }
} else {
None
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -869,27 +895,6 @@ fn set_length(headers: &mut HeaderMap, body: BodyLength, can_chunked: bool) -> E
};
set_content_length(headers, len)
- } else {
- // Chunked isn't legal, so if it is set, we need to remove it.
- // Also, if it *is* set, then we shouldn't replace with a length,
- // since the user tried to imply there isn't a length.
- let encoder = if headers.remove(header::TRANSFER_ENCODING).is_some() {
- trace!("removing illegal transfer-encoding header");
- should_remove_con_len = true;
- Encoder::close_delimited()
- } else if let Some(len) = existing_con_len {
- Encoder::length(len)
- } else if let BodyLength::Known(len) = body {
- set_content_length(headers, len)
- } else {
- Encoder::close_delimited()
- };
-
- if should_remove_con_len && existing_con_len.is_some() {
- headers.remove(header::CONTENT_LENGTH);
- }
-
- encoder
}
}
|
2019-09-04T21:33:38Z
| 1,926
|
Client GET requests with transfer-encoding are wrongly stripped
The client wrongly strips `transfer-encoding: chunked` from GET requests, thinking that GET requests shouldn't have payloads. However, [that's not explicitly true](https://tools.ietf.org/html/rfc7231#section-4.3.1):
> A payload within a GET request message has no defined semantics;
> sending a payload body on a GET request might cause some existing
> implementations to reject the request.
The original implementation was trying to protect from empty `Body::wrap_stream(some_empty_stream)`s automatically adding `transfer-encoding: chunked` to a GET request.
**The fix** should probably still protect against that, but if the `transfer-encoding` header has been explicitly set on the `Request`, it should be forwarded as-is.
|
hyperium__hyper-1926
|
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -356,7 +356,7 @@ test! {
}
test! {
- name: client_get_implicitly_empty,
+ name: client_get_req_body_implicitly_empty,
server:
expected: "GET / HTTP/1.1\r\nhost: {addr}\r\n\r\n",
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -370,9 +370,153 @@ test! {
},
response:
status: OK,
+ headers: {},
+ body: None,
+}
+
+test! {
+ name: client_get_req_body_chunked,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ transfer-encoding: chunked\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ 5\r\n\
+ hello\r\n\
+ 0\r\n\r\n\
+ ",
+ reply: REPLY_OK,
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
headers: {
- "Content-Length" => "0",
+ "transfer-encoding" => "chunked",
+ },
+ body: "hello", // not Body::empty
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: None,
+}
+
+test! {
+ name: client_get_req_body_chunked_http10,
+
+ server:
+ expected: "\
+ GET / HTTP/1.0\r\n\
+ host: {addr}\r\n\
+ content-length: 5\r\n\
+ \r\n\
+ hello\
+ ",
+ reply: "HTTP/1.0 200 OK\r\ncontent-length: 0\r\n\r\n",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ headers: {
+ "transfer-encoding" => "chunked",
+ },
+ version: HTTP_10,
+ body: "hello",
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: None,
+}
+
+test! {
+ name: client_get_req_body_sized,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ content-length: 5\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ hello\
+ ",
+ reply: REPLY_OK,
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ headers: {
+ "Content-Length" => "5",
+ },
+ body: (Body::wrap_stream(Body::from("hello"))),
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: None,
+}
+
+test! {
+ name: client_get_req_body_unknown,
+
+ server:
+ expected: "\
+ GET / HTTP/1.1\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: REPLY_OK,
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ // wrap_steam means we don't know the content-length,
+ // but we're wrapping a non-empty stream.
+ //
+ // But since the headers cannot tell us, and the method typically
+ // doesn't have a body, the body must be ignored.
+ body: (Body::wrap_stream(Body::from("hello"))),
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: None,
+}
+
+test! {
+ name: client_get_req_body_unknown_http10,
+
+ server:
+ expected: "\
+ GET / HTTP/1.0\r\n\
+ host: {addr}\r\n\
+ \r\n\
+ ",
+ reply: "HTTP/1.0 200 OK\r\ncontent-length: 0\r\n\r\n",
+
+ client:
+ request: {
+ method: GET,
+ url: "http://{addr}/",
+ headers: {
+ "transfer-encoding" => "chunked",
},
+ version: HTTP_10,
+ // wrap_steam means we don't know the content-length,
+ // but we're wrapping a non-empty stream.
+ //
+ // But since the headers cannot tell us, the body must be ignored.
+ body: (Body::wrap_stream(Body::from("hello"))),
+ },
+ response:
+ status: OK,
+ headers: {},
body: None,
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -434,6 +578,33 @@ test! {
body: None,
}
+test! {
+ name: client_post_unknown,
+
+ server:
+ expected: "\
+ POST /chunks HTTP/1.1\r\n\
+ host: {addr}\r\n\
+ transfer-encoding: chunked\r\n\
+ \r\n\
+ B\r\n\
+ foo bar baz\r\n\
+ 0\r\n\r\n\
+ ",
+ reply: REPLY_OK,
+
+ client:
+ request: {
+ method: POST,
+ url: "http://{addr}/chunks",
+ body: (Body::wrap_stream(Body::from("foo bar baz"))),
+ },
+ response:
+ status: OK,
+ headers: {},
+ body: None,
+}
+
test! {
name: client_post_empty,
|
hyperium/hyper
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
|
[
"1902"
] |
0.2
|
4f2743991c227836c3886778512afe1297df3e5b
|
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -3,6 +3,7 @@ use std::error::Error as StdError;
use std::io;
use std::mem;
use std::net::{IpAddr, SocketAddr};
+use std::sync::Arc;
use std::time::Duration;
use http::uri::{Scheme, Uri};
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -13,7 +14,7 @@ use tokio_net::tcp::TcpStream;
use tokio_timer::{Delay, Timeout};
use crate::common::{Future, Pin, Poll, task};
-use super::{Connect, Connected, Destination};
+use super::{Connected, Destination};
use super::dns::{self, GaiResolver, Resolve};
#[cfg(feature = "runtime")] use super::dns::TokioThreadpoolGaiResolver;
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -30,17 +31,8 @@ type ConnectFuture = Pin<Box<dyn Future<Output = io::Result<TcpStream>> + Send>>
/// transport information such as the remote socket address used.
#[derive(Clone)]
pub struct HttpConnector<R = GaiResolver> {
- enforce_http: bool,
- handle: Option<Handle>,
- connect_timeout: Option<Duration>,
- happy_eyeballs_timeout: Option<Duration>,
- keep_alive_timeout: Option<Duration>,
- local_address: Option<IpAddr>,
- nodelay: bool,
+ config: Arc<Config>,
resolver: R,
- reuse_address: bool,
- send_buffer_size: Option<usize>,
- recv_buffer_size: Option<usize>,
}
/// Extra information about the transport when an HttpConnector is used.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -76,6 +68,22 @@ pub struct HttpInfo {
remote_addr: SocketAddr,
}
+#[derive(Clone)]
+struct Config {
+ connect_timeout: Option<Duration>,
+ enforce_http: bool,
+ handle: Option<Handle>,
+ happy_eyeballs_timeout: Option<Duration>,
+ keep_alive_timeout: Option<Duration>,
+ local_address: Option<IpAddr>,
+ nodelay: bool,
+ reuse_address: bool,
+ send_buffer_size: Option<usize>,
+ recv_buffer_size: Option<usize>,
+}
+
+// ===== impl HttpConnector =====
+
impl HttpConnector {
/// Construct a new HttpConnector.
pub fn new() -> HttpConnector {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -100,17 +108,19 @@ impl<R> HttpConnector<R> {
/// Takes a `Resolve` to handle DNS lookups.
pub fn new_with_resolver(resolver: R) -> HttpConnector<R> {
HttpConnector {
- enforce_http: true,
- handle: None,
- connect_timeout: None,
- happy_eyeballs_timeout: Some(Duration::from_millis(300)),
- keep_alive_timeout: None,
- local_address: None,
- nodelay: false,
+ config: Arc::new(Config {
+ connect_timeout: None,
+ enforce_http: true,
+ handle: None,
+ happy_eyeballs_timeout: Some(Duration::from_millis(300)),
+ keep_alive_timeout: None,
+ local_address: None,
+ nodelay: false,
+ reuse_address: false,
+ send_buffer_size: None,
+ recv_buffer_size: None,
+ }),
resolver,
- reuse_address: false,
- send_buffer_size: None,
- recv_buffer_size: None,
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -119,7 +129,7 @@ impl<R> HttpConnector<R> {
/// Enabled by default.
#[inline]
pub fn enforce_http(&mut self, is_enforced: bool) {
- self.enforce_http = is_enforced;
+ self.config_mut().enforce_http = is_enforced;
}
/// Set a handle to a `Reactor` to register connections to.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -127,7 +137,7 @@ impl<R> HttpConnector<R> {
/// If `None`, the implicit default reactor will be used.
#[inline]
pub fn set_reactor(&mut self, handle: Option<Handle>) {
- self.handle = handle;
+ self.config_mut().handle = handle;
}
/// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -137,7 +147,7 @@ impl<R> HttpConnector<R> {
/// Default is `None`.
#[inline]
pub fn set_keepalive(&mut self, dur: Option<Duration>) {
- self.keep_alive_timeout = dur;
+ self.config_mut().keep_alive_timeout = dur;
}
/// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -145,19 +155,19 @@ impl<R> HttpConnector<R> {
/// Default is `false`.
#[inline]
pub fn set_nodelay(&mut self, nodelay: bool) {
- self.nodelay = nodelay;
+ self.config_mut().nodelay = nodelay;
}
/// Sets the value of the SO_SNDBUF option on the socket.
#[inline]
pub fn set_send_buffer_size(&mut self, size: Option<usize>) {
- self.send_buffer_size = size;
+ self.config_mut().send_buffer_size = size;
}
/// Sets the value of the SO_RCVBUF option on the socket.
#[inline]
pub fn set_recv_buffer_size(&mut self, size: Option<usize>) {
- self.recv_buffer_size = size;
+ self.config_mut().recv_buffer_size = size;
}
/// Set that all sockets are bound to the configured address before connection.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -167,7 +177,7 @@ impl<R> HttpConnector<R> {
/// Default is `None`.
#[inline]
pub fn set_local_address(&mut self, addr: Option<IpAddr>) {
- self.local_address = addr;
+ self.config_mut().local_address = addr;
}
/// Set the connect timeout.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -178,7 +188,7 @@ impl<R> HttpConnector<R> {
/// Default is `None`.
#[inline]
pub fn set_connect_timeout(&mut self, dur: Option<Duration>) {
- self.connect_timeout = dur;
+ self.config_mut().connect_timeout = dur;
}
/// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -195,7 +205,7 @@ impl<R> HttpConnector<R> {
/// [RFC 6555]: https://tools.ietf.org/html/rfc6555
#[inline]
pub fn set_happy_eyeballs_timeout(&mut self, dur: Option<Duration>) {
- self.happy_eyeballs_timeout = dur;
+ self.config_mut().happy_eyeballs_timeout = dur;
}
/// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`.
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -203,9 +213,18 @@ impl<R> HttpConnector<R> {
/// Default is `false`.
#[inline]
pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self {
- self.reuse_address = reuse_address;
+ self.config_mut().reuse_address = reuse_address;
self
}
+
+ // private
+
+ fn config_mut(&mut self) -> &mut Config {
+ // If the are HttpConnector clones, this will clone the inner
+ // config. So mutating the config won't ever affect previous
+ // clones.
+ Arc::make_mut(&mut self.config)
+ }
}
// R: Debug required for now to allow adding it to debug output later...
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -216,16 +235,24 @@ impl<R: fmt::Debug> fmt::Debug for HttpConnector<R> {
}
}
-impl<R> Connect for HttpConnector<R>
+impl<R> tower_service::Service<Destination> for HttpConnector<R>
where
R: Resolve + Clone + Send + Sync,
R::Future: Send,
{
- type Transport = TcpStream;
+ type Response = (TcpStream, Connected);
type Error = io::Error;
type Future = HttpConnecting<R>;
- fn connect(&self, dst: Destination) -> Self::Future {
+ fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
+ // For now, always ready.
+ // TODO: When `Resolve` becomes an alias for `Service`, check
+ // the resolver's readiness.
+ drop(cx);
+ Poll::Ready(Ok(()))
+ }
+
+ fn call(&mut self, dst: Destination) -> Self::Future {
trace!(
"Http::connect; scheme={}, host={}, port={:?}",
dst.scheme(),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -233,17 +260,17 @@ where
dst.port(),
);
- if self.enforce_http {
+ if self.config.enforce_http {
if dst.uri.scheme_part() != Some(&Scheme::HTTP) {
- return invalid_url(InvalidUrl::NotHttp, &self.handle);
+ return invalid_url(InvalidUrl::NotHttp, &self.config.handle);
}
} else if dst.uri.scheme_part().is_none() {
- return invalid_url(InvalidUrl::MissingScheme, &self.handle);
+ return invalid_url(InvalidUrl::MissingScheme, &self.config.handle);
}
let host = match dst.uri.host() {
Some(s) => s,
- None => return invalid_url(InvalidUrl::MissingAuthority, &self.handle),
+ None => return invalid_url(InvalidUrl::MissingAuthority, &self.config.handle),
};
let port = match dst.uri.port_part() {
Some(port) => port.as_u16(),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -251,16 +278,16 @@ where
};
HttpConnecting {
- state: State::Lazy(self.resolver.clone(), host.into(), self.local_address),
- handle: self.handle.clone(),
- connect_timeout: self.connect_timeout,
- happy_eyeballs_timeout: self.happy_eyeballs_timeout,
- keep_alive_timeout: self.keep_alive_timeout,
- nodelay: self.nodelay,
+ state: State::Lazy(self.resolver.clone(), host.into(), self.config.local_address),
+ handle: self.config.handle.clone(),
+ connect_timeout: self.config.connect_timeout,
+ happy_eyeballs_timeout: self.config.happy_eyeballs_timeout,
+ keep_alive_timeout: self.config.keep_alive_timeout,
+ nodelay: self.config.nodelay,
port,
- reuse_address: self.reuse_address,
- send_buffer_size: self.send_buffer_size,
- recv_buffer_size: self.recv_buffer_size,
+ reuse_address: self.config.reuse_address,
+ send_buffer_size: self.config.send_buffer_size,
+ recv_buffer_size: self.config.recv_buffer_size,
}
}
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -289,17 +316,17 @@ where
dst.port(),
);
- if self.enforce_http {
+ if self.config.enforce_http {
if dst.uri.scheme_part() != Some(&Scheme::HTTP) {
- return invalid_url::<R>(InvalidUrl::NotHttp, &self.handle).map_ok(|(s, _)| s).boxed();
+ return invalid_url::<R>(InvalidUrl::NotHttp, &self.config.handle).map_ok(|(s, _)| s).boxed();
}
} else if dst.uri.scheme_part().is_none() {
- return invalid_url::<R>(InvalidUrl::MissingScheme, &self.handle).map_ok(|(s, _)| s).boxed();
+ return invalid_url::<R>(InvalidUrl::MissingScheme, &self.config.handle).map_ok(|(s, _)| s).boxed();
}
let host = match dst.uri.host() {
Some(s) => s,
- None => return invalid_url::<R>(InvalidUrl::MissingAuthority, &self.handle).map_ok(|(s, _)| s).boxed(),
+ None => return invalid_url::<R>(InvalidUrl::MissingAuthority, &self.config.handle).map_ok(|(s, _)| s).boxed(),
};
let port = match dst.uri.port_part() {
Some(port) => port.as_u16(),
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -307,16 +334,16 @@ where
};
let fut = HttpConnecting {
- state: State::Lazy(self.resolver.clone(), host.into(), self.local_address),
- handle: self.handle.clone(),
- connect_timeout: self.connect_timeout,
- happy_eyeballs_timeout: self.happy_eyeballs_timeout,
- keep_alive_timeout: self.keep_alive_timeout,
- nodelay: self.nodelay,
+ state: State::Lazy(self.resolver.clone(), host.into(), self.config.local_address),
+ handle: self.config.handle.clone(),
+ connect_timeout: self.config.connect_timeout,
+ happy_eyeballs_timeout: self.config.happy_eyeballs_timeout,
+ keep_alive_timeout: self.config.keep_alive_timeout,
+ nodelay: self.config.nodelay,
port,
- reuse_address: self.reuse_address,
- send_buffer_size: self.send_buffer_size,
- recv_buffer_size: self.recv_buffer_size,
+ reuse_address: self.config.reuse_address,
+ send_buffer_size: self.config.send_buffer_size,
+ recv_buffer_size: self.config.recv_buffer_size,
};
fut.map_ok(|(s, _)| s).boxed()
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -1,52 +1,23 @@
-//! The `Connect` trait, and supporting types.
+//! Connectors used by the `Client`.
//!
//! This module contains:
//!
//! - A default [`HttpConnector`](HttpConnector) that does DNS resolution and
//! establishes connections over TCP.
-//! - The [`Connect`](Connect) trait and related types to build custom connectors.
+//! - Types to build custom connectors.
use std::convert::TryFrom;
-use std::error::Error as StdError;
use std::{fmt, mem};
use bytes::{BufMut, Bytes, BytesMut};
use ::http::{uri, Response, Uri};
-use tokio_io::{AsyncRead, AsyncWrite};
-
-use crate::common::{Future, Unpin};
#[cfg(feature = "tcp")] pub mod dns;
#[cfg(feature = "tcp")] mod http;
#[cfg(feature = "tcp")] pub use self::http::{HttpConnector, HttpInfo};
-/// Connect to a destination, returning an IO transport.
-///
-/// A connector receives a [`Destination`](Destination) describing how a
-/// connection should be estabilished, and returns a `Future` of the
-/// ready connection.
-pub trait Connect: Send + Sync {
- /// The connected IO Stream.
- type Transport: AsyncRead + AsyncWrite + Unpin + Send + 'static;
- /// An error occured when trying to connect.
- type Error: Into<Box<dyn StdError + Send + Sync>>;
- /// A Future that will resolve to the connected Transport.
- type Future: Future<Output=Result<(Self::Transport, Connected), Self::Error>> + Unpin + Send;
- /// Connect to a destination.
- fn connect(&self, dst: Destination) -> Self::Future;
-}
-
-impl<T: Connect + ?Sized> Connect for Box<T> {
- type Transport = <T as Connect>::Transport;
- type Error = <T as Connect>::Error;
- type Future = <T as Connect>::Future;
- fn connect(&self, dst: Destination) -> Self::Future {
- <T as Connect>::connect(self, dst)
- }
-}
-
/// A set of properties to describe where and how to try to connect.
///
-/// This type is passed an argument for the [`Connect`](Connect) trait.
+/// This type is passed an argument to connectors.
#[derive(Clone, Debug)]
pub struct Destination {
pub(super) uri: Uri,
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -72,7 +72,7 @@ use http::uri::Scheme;
use crate::body::{Body, Payload};
use crate::common::{lazy as hyper_lazy, Lazy, Future, Pin, Poll, task};
-use self::connect::{Alpn, Connect, Connected, Destination};
+use self::connect::{Alpn, sealed::Connect, Connected, Destination};
use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation};
#[cfg(feature = "tcp")] pub use self::connect::HttpConnector;
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -158,9 +158,9 @@ impl Client<(), Body> {
}
impl<C, B> Client<C, B>
-where C: Connect + Sync + 'static,
- C::Transport: 'static,
- C::Future: 'static,
+where C: Connect + Clone + Send + Sync + 'static,
+ C::Transport: Unpin + Send + 'static,
+ C::Future: Unpin + Send + 'static,
B: Payload + Unpin + Send + 'static,
B::Data: Send + Unpin,
{
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -486,7 +486,7 @@ where C: Connect + Sync + 'static,
return Either::Right(future::err(canceled));
}
};
- Either::Left(connector.connect(dst)
+ Either::Left(connector.connect(connect::sealed::Internal, dst)
.map_err(crate::Error::new_connect)
.and_then(move |(io, connected)| {
// If ALPN is h2 and we aren't http2_only already,
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -544,7 +544,7 @@ where C: Connect + Sync + 'static,
}
}
-impl<C, B> Clone for Client<C, B> {
+impl<C: Clone, B> Clone for Client<C, B> {
fn clone(&self) -> Client<C, B> {
Client {
config: self.config.clone(),
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -1038,16 +1038,14 @@ impl Builder {
/// Combine the configuration of this builder with a connector to create a `Client`.
pub fn build<C, B>(&self, connector: C) -> Client<C, B>
where
- C: Connect,
- C::Transport: 'static,
- C::Future: 'static,
+ C: Connect + Clone,
B: Payload + Send,
B::Data: Send,
{
Client {
config: self.client_config,
conn_builder: self.conn_builder.clone(),
- connector: Arc::new(connector),
+ connector,
pool: Pool::new(self.pool_config, &self.conn_builder.exec),
}
}
diff --git a/src/service/mod.rs b/src/service/mod.rs
--- a/src/service/mod.rs
+++ b/src/service/mod.rs
@@ -38,10 +38,13 @@ pub use tower_service::Service;
mod http;
mod make;
+mod oneshot;
mod util;
pub(crate) use self::make::{MakeConnection, MakeServiceRef};
pub(crate) use self::http::HttpService;
+pub(crate) use self::oneshot::{oneshot, Oneshot};
pub use self::make::make_service_fn;
pub use self::util::service_fn;
+
diff --git /dev/null b/src/service/oneshot.rs
new file mode 100644
--- /dev/null
+++ b/src/service/oneshot.rs
@@ -0,0 +1,70 @@
+// TODO: Eventually to be replaced with tower_util::Oneshot.
+
+use std::mem;
+use std::marker::Unpin;
+
+use tower_service::Service;
+
+use crate::common::{Future, Pin, Poll, task};
+
+pub(crate) fn oneshot<S, Req>(svc: S, req: Req) -> Oneshot<S, Req>
+where
+ S: Service<Req>,
+{
+ Oneshot {
+ state: State::NotReady(svc, req),
+ }
+}
+
+// A `Future` consuming a `Service` and request, waiting until the `Service`
+// is ready, and then calling `Service::call` with the request, and
+// waiting for that `Future`.
+#[allow(missing_debug_implementations)]
+pub struct Oneshot<S: Service<Req>, Req> {
+ state: State<S, Req>,
+}
+
+enum State<S: Service<Req>, Req> {
+ NotReady(S, Req),
+ Called(S::Future),
+ Tmp,
+}
+
+// Unpin is projected to S::Future, but never S.
+impl<S, Req> Unpin for Oneshot<S, Req>
+where
+ S: Service<Req>,
+ S::Future: Unpin,
+{}
+
+impl<S, Req> Future for Oneshot<S, Req>
+where
+ S: Service<Req>,
+{
+ type Output = Result<S::Response, S::Error>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ // Safety: The service's future is never moved once we get one.
+ let mut me = unsafe { Pin::get_unchecked_mut(self) };
+
+ loop {
+ match me.state {
+ State::NotReady(ref mut svc, _) => {
+ ready!(svc.poll_ready(cx))?;
+ // fallthrough out of the match's borrow
+ },
+ State::Called(ref mut fut) => {
+ return unsafe { Pin::new_unchecked(fut) }.poll(cx);
+ },
+ State::Tmp => unreachable!(),
+ }
+
+ match mem::replace(&mut me.state, State::Tmp) {
+ State::NotReady(mut svc, req) => {
+ me.state = State::Called(svc.call(req));
+ },
+ _ => unreachable!(),
+ }
+ }
+ }
+}
|
I just remember there is also `MakeConnection` in tower, perhaps that can just be used instead. One difference is that hyper's `Connect` currently returns a `Connected` metadata type.
@seanmonstar I totally missed this issue! But we should drop the connected struct and use `http_connection::HttpConnection` trait. Which pretty much does the same but allows us to be a bit more flexible. I'm honestly in the boat that we should just drop Connect or have it be what https://github.com/tower-rs/tower-http/blob/master/tower-http-util/src/connection.rs#L15 was.
|
2019-08-21T22:19:24Z
| 1,912
|
Change `Connect` trait to alias `Service`
After #1782 is done, the `Connect` trait should be changed to basically an alias for a `Service<Destination>`. This would allow more composability, being able to use `ServiceExt` to customize connectors.
|
hyperium__hyper-1912
|
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -671,7 +698,15 @@ mod tests {
use tokio::runtime::current_thread::Runtime;
use tokio_net::driver::Handle;
- use super::{Connect, Destination, HttpConnector};
+ use super::{Connected, Destination, HttpConnector};
+ use super::super::sealed::Connect;
+
+ async fn connect<C>(connector: C, dst: Destination) -> Result<(C::Transport, Connected), C::Error>
+ where
+ C: Connect,
+ {
+ connector.connect(super::super::sealed::Internal, dst).await
+ }
#[test]
fn test_errors_missing_authority() {
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -684,7 +719,7 @@ mod tests {
rt.block_on(async {
assert_eq!(
- connector.connect(dst).await.unwrap_err().kind(),
+ connect(connector, dst).await.unwrap_err().kind(),
io::ErrorKind::InvalidInput,
);
})
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -701,7 +736,7 @@ mod tests {
rt.block_on(async {
assert_eq!(
- connector.connect(dst).await.unwrap_err().kind(),
+ connect(connector, dst).await.unwrap_err().kind(),
io::ErrorKind::InvalidInput,
);
})
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -718,7 +753,7 @@ mod tests {
rt.block_on(async {
assert_eq!(
- connector.connect(dst).await.unwrap_err().kind(),
+ connect(connector, dst).await.unwrap_err().kind(),
io::ErrorKind::InvalidInput,
);
});
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -398,6 +369,66 @@ where
}
}
+pub(super) mod sealed {
+ use std::error::Error as StdError;
+
+ use tokio_io::{AsyncRead, AsyncWrite};
+
+ use crate::common::{Future, Unpin};
+ use super::{Connected, Destination};
+
+ /// Connect to a destination, returning an IO transport.
+ ///
+ /// A connector receives a [`Destination`](Destination) describing how a
+ /// connection should be estabilished, and returns a `Future` of the
+ /// ready connection.
+ ///
+ /// # Trait Alias
+ ///
+ /// This is really just an *alias* for the `tower::Service` trait, with
+ /// additional bounds set for convenience *inside* hyper. You don't actually
+ /// implement this trait, but `tower::Service<Destination>` instead.
+ // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot
+ // fit the `Connect` bounds because of the blanket impl for `Service`.
+ pub trait Connect: Sealed + Sized {
+ /// The connected IO Stream.
+ type Transport: AsyncRead + AsyncWrite;
+ /// An error occured when trying to connect.
+ type Error: Into<Box<dyn StdError + Send + Sync>>;
+ /// A Future that will resolve to the connected Transport.
+ type Future: Future<Output=Result<(Self::Transport, Connected), Self::Error>>;
+ #[doc(hidden)]
+ fn connect(self, internal_only: Internal, dst: Destination) -> Self::Future;
+ }
+
+ impl<S, T> Connect for S
+ where
+ S: tower_service::Service<Destination, Response=(T, Connected)> + Send,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ S::Future: Unpin + Send,
+ T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ {
+ type Transport = T;
+ type Error = S::Error;
+ type Future = crate::service::Oneshot<S, Destination>;
+ fn connect(self, _: Internal, dst: Destination) -> Self::Future {
+ crate::service::oneshot(self, dst)
+ }
+ }
+
+ impl<S, T> Sealed for S
+ where
+ S: tower_service::Service<Destination, Response=(T, Connected)> + Send,
+ S::Error: Into<Box<dyn StdError + Send + Sync>>,
+ S::Future: Unpin + Send,
+ T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
+ {}
+
+ pub trait Sealed {}
+ #[allow(missing_debug_implementations)]
+ pub struct Internal;
+}
+
#[cfg(test)]
mod tests {
use super::{Connected, Destination, TryFrom};
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -89,7 +89,7 @@ mod tests;
pub struct Client<C, B = Body> {
config: Config,
conn_builder: conn::Builder,
- connector: Arc<C>,
+ connector: C,
pool: Pool<PoolClient<B>>,
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -939,7 +939,7 @@ mod dispatch_impl {
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_net::tcp::TcpStream;
- use hyper::client::connect::{Connect, Connected, Destination, HttpConnector};
+ use hyper::client::connect::{Connected, Destination, HttpConnector};
use hyper::Client;
#[test]
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1688,6 +1688,7 @@ mod dispatch_impl {
}
+ #[derive(Clone)]
struct DebugConnector {
http: HttpConnector,
closes: mpsc::Sender<()>,
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1719,19 +1720,24 @@ mod dispatch_impl {
}
}
- impl Connect for DebugConnector {
- type Transport = DebugStream;
+ impl hyper::service::Service<Destination> for DebugConnector {
+ type Response = (DebugStream, Connected);
type Error = io::Error;
type Future = Pin<Box<dyn Future<
- Output = Result<(DebugStream, Connected), io::Error>
+ Output = Result<Self::Response, Self::Error>
> + Send>>;
- fn connect(&self, dst: Destination) -> Self::Future {
+ fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
+ // don't forget to check inner service is ready :)
+ hyper::service::Service::<Destination>::poll_ready(&mut self.http, cx)
+ }
+
+ fn call(&mut self, dst: Destination) -> Self::Future {
self.connects.fetch_add(1, Ordering::SeqCst);
let closes = self.closes.clone();
let is_proxy = self.is_proxy;
let is_alpn_h2 = self.alpn_h2;
- Box::pin(self.http.connect(dst).map_ok(move |(s, mut c)| {
+ Box::pin(self.http.call(dst).map_ok(move |(s, mut c)| {
if is_alpn_h2 {
c = c.negotiated_h2();
}
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1851"
] |
0.2
|
4920f5e264c57f87dc8b91e7ed9a575359cc093c
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -29,7 +29,7 @@ futures-util-preview = { version = "0.3.0-alpha.17" }
http = "0.1.15"
http-body = "0.1"
httparse = "1.0"
-h2 = "0.1.10"
+h2 = { git = "https://github.com/hyperium/h2" }
iovec = "0.1"
itoa = "0.4.1"
log = "0.4"
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -269,25 +269,17 @@ impl Body {
}
None => Poll::Ready(None),
}
- }
+ },
Kind::H2 {
- /*recv: ref mut h2,*/ ..
- } => {
- unimplemented!("h2.poll_inner");
- /*
- h2
- .poll()
- .map(|r#async| {
- r#async.map(|opt| {
- opt.map(|bytes| {
- let _ = h2.release_capacity().release_capacity(bytes.len());
- Chunk::from(bytes)
- })
- })
- })
- .map_err(crate::Error::new_body)
- */
- }
+ recv: ref mut h2, ..
+ } => match ready!(Pin::new(&mut *h2).poll_next(cx)) {
+ Some(Ok(bytes)) => {
+ let _ = h2.release_capacity().release_capacity(bytes.len());
+ Poll::Ready(Some(Ok(Chunk::from(bytes))))
+ },
+ Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_body(e)))),
+ None => Poll::Ready(None),
+ },
Kind::Wrapped(ref mut s) => {
match ready!(s.as_mut().poll_next(cx)) {
Some(res) => Poll::Ready(Some(res.map_err(crate::Error::new_body))),
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -314,11 +306,12 @@ impl Payload for Body {
self.poll_eof(cx)
}
- fn poll_trailers(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Result<HeaderMap, Self::Error>>> {
+ fn poll_trailers(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Option<Result<HeaderMap, Self::Error>>> {
match self.kind {
- Kind::H2 { /*recv: ref mut h2,*/ .. } => {
- unimplemented!("h2.poll_trailers");
- //h2.poll_trailers().map_err(crate::Error::new_h2)
+ Kind::H2 { recv: ref mut h2, .. } => match ready!(h2.poll_trailers(cx)) {
+ Some(Ok(t)) => Poll::Ready(Some(Ok(t))),
+ Some(Err(e)) => Poll::Ready(Some(Err(crate::Error::new_h2(e)))),
+ None => Poll::Ready(None),
},
_ => Poll::Ready(None),
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -32,18 +32,19 @@ type Http1Dispatcher<T, B, R> = proto::dispatch::Dispatcher<
>;
type ConnEither<T, B> = Either<
Http1Dispatcher<T, B, proto::h1::ClientTransaction>,
- proto::h2::Client<T, B>,
+ proto::h2::ClientTask<B>,
>;
-/// Returns a `Handshake` future over some IO.
+/// Returns a handshake future over some IO.
///
/// This is a shortcut for `Builder::new().handshake(io)`.
-pub fn handshake<T>(io: T) -> Handshake<T, crate::Body>
+pub async fn handshake<T>(io: T) -> crate::Result<(SendRequest<crate::Body>, Connection<T, crate::Body>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
Builder::new()
.handshake(io)
+ .await
}
/// The sender side of an established connection.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -68,7 +69,7 @@ where
/// A builder to configure an HTTP connection.
///
-/// After setting options, the builder is used to create a `Handshake` future.
+/// After setting options, the builder is used to create a handshake future.
#[derive(Clone, Debug)]
pub struct Builder {
pub(super) exec: Exec,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -80,16 +81,6 @@ pub struct Builder {
h2_builder: h2::client::Builder,
}
-/// A future setting up HTTP over an IO object.
-///
-/// If successful, yields a `(SendRequest, Connection)` pair.
-#[must_use = "futures do nothing unless polled"]
-pub struct Handshake<T, B> {
- builder: Builder,
- io: Option<T>,
- _marker: PhantomData<fn(B)>,
-}
-
/// A future returned by `SendRequest::send_request`.
///
/// Yields a `Response` if successful.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -334,7 +325,8 @@ impl<B> Clone for Http2SendRequest<B> {
impl<T, B> Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Payload + 'static,
+ B: Payload + Unpin + 'static,
+ B::Data: Unpin,
{
/// Return the inner IO object, and additional information.
///
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -365,29 +357,20 @@ where
/// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
/// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
/// to work with this function; or use the `without_shutdown` wrapper.
- pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>>
- where
- B: Unpin,
- {
+ pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
match self.inner.as_mut().expect("already upgraded") {
&mut Either::Left(ref mut h1) => {
h1.poll_without_shutdown(cx)
},
&mut Either::Right(ref mut h2) => {
- unimplemented!("h2 poll_without_shutdown");
- /*
- h2.poll().map(|x| x.map(|_| ()))
- */
+ Pin::new(h2).poll(cx).map_ok(|_| ())
}
}
}
/// Prevent shutdown of the underlying IO object at the end of service the request,
/// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
- pub fn without_shutdown(self) -> impl Future<Output=crate::Result<Parts<T>>>
- where
- B: Unpin,
- {
+ pub fn without_shutdown(self) -> impl Future<Output=crate::Result<Parts<T>>> {
let mut conn = Some(self);
future::poll_fn(move |cx| -> Poll<crate::Result<Parts<T>>> {
ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -400,6 +383,7 @@ impl<T, B> Future for Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + Unpin + 'static,
+ B::Data: Unpin,
{
type Output = crate::Result<()>;
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -522,70 +506,46 @@ impl Builder {
}
/// Constructs a connection with the configured options and IO.
- #[inline]
- pub fn handshake<T, B>(&self, io: T) -> Handshake<T, B>
+ pub async fn handshake<T, B>(self, io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + 'static,
+ B::Data: Unpin,
{
trace!("client handshake HTTP/{}", if self.http2 { 2 } else { 1 });
- Handshake {
- builder: self.clone(),
- io: Some(io),
- _marker: PhantomData,
- }
- }
-}
-
-// ===== impl Handshake
-
-impl<T, B> Future for Handshake<T, B>
-where
- T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
- B: Payload + 'static,
-{
- type Output = crate::Result<(SendRequest<B>, Connection<T, B>)>;
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- let io = self.io.take().expect("polled more than once");
let (tx, rx) = dispatch::channel();
- let either = if !self.builder.http2 {
+ let either = if !self.http2 {
let mut conn = proto::Conn::new(io);
- if !self.builder.h1_writev {
+ if !self.h1_writev {
conn.set_write_strategy_flatten();
}
- if self.builder.h1_title_case_headers {
+ if self.h1_title_case_headers {
conn.set_title_case_headers();
}
- if let Some(sz) = self.builder.h1_read_buf_exact_size {
+ if let Some(sz) = self.h1_read_buf_exact_size {
conn.set_read_buf_exact_size(sz);
}
- if let Some(max) = self.builder.h1_max_buf_size {
+ if let Some(max) = self.h1_max_buf_size {
conn.set_max_buf_size(max);
}
let cd = proto::h1::dispatch::Client::new(rx);
let dispatch = proto::h1::Dispatcher::new(cd, conn);
Either::Left(dispatch)
} else {
- let h2 = proto::h2::Client::new(io, rx, &self.builder.h2_builder, self.builder.exec.clone());
+ let h2 = proto::h2::client::handshake(io, rx, &self.h2_builder, self.exec.clone())
+ .await?;
Either::Right(h2)
};
- Poll::Ready(Ok((
+ Ok((
SendRequest {
dispatch: tx,
},
Connection {
inner: Some(either),
},
- )))
- }
-}
-
-impl<T, B> fmt::Debug for Handshake<T, B> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_struct("Handshake")
- .finish()
+ ))
}
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -163,7 +163,7 @@ where C: Connect + Sync + 'static,
C::Transport: 'static,
C::Future: 'static,
B: Payload + Unpin + Send + 'static,
- B::Data: Send,
+ B::Data: Send + Unpin,
{
/// Send a `GET` request to the supplied `Uri`.
///
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -512,8 +512,10 @@ where C: Connect + Sync + 'static,
connecting
};
let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
- Either::Left(conn_builder
+ Either::Left(Box::pin(conn_builder
.http2_only(is_h2)
+ // TODO: convert client::conn::Builder to be by-value?
+ .clone()
.handshake(io)
.and_then(move |(tx, conn)| {
trace!("handshake complete, spawning background dispatcher task");
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -541,7 +543,7 @@ where C: Connect + Sync + 'static,
PoolTx::Http1(tx)
},
})
- }))
+ })))
}))
})
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -1,8 +1,11 @@
use bytes::IntoBuf;
-//use futures::{Async, Future, Poll, Stream};
+use futures_channel::{mpsc, oneshot};
+use futures_util::future::{self, FutureExt as _, Either};
+use futures_util::stream::StreamExt as _;
+use futures_util::try_future::TryFutureExt as _;
//use futures::future::{self, Either};
//use futures::sync::{mpsc, oneshot};
-use h2::client::{Builder, Handshake, SendRequest};
+use h2::client::{Builder, SendRequest};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::headers::content_length_parse_all;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -14,192 +17,187 @@ use super::{PipeToSendStream, SendBuf};
use crate::{Body, Request, Response};
type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
+
///// An mpsc channel is used to help notify the `Connection` task when *all*
///// other handles to it have been dropped, so that it can shutdown.
-//type ConnDropRef = mpsc::Sender<Never>;
+type ConnDropRef = mpsc::Sender<Never>;
///// A oneshot channel watches the `Connection` task, and when it completes,
///// the "dispatch" task will be notified and can shutdown sooner.
-//type ConnEof = oneshot::Receiver<Never>;
+type ConnEof = oneshot::Receiver<Never>;
-pub(crate) struct Client<T, B>
+pub(crate) async fn handshake<T, B>(
+ io: T,
+ req_rx: ClientRx<B>,
+ builder: &Builder,
+ exec: Exec,
+) -> crate::Result<ClientTask<B>>
where
+ T: AsyncRead + AsyncWrite + Send + Unpin + 'static,
B: Payload,
+ B::Data: Unpin,
{
- executor: Exec,
- rx: ClientRx<B>,
- state: State<T, SendBuf<B::Data>>,
-}
+ let (h2_tx, conn) = builder
+ .handshake::<_, SendBuf<B::Data>>(io)
+ .await
+ .map_err(crate::Error::new_h2)?;
+
+ // An mpsc channel is used entirely to detect when the
+ // 'Client' has been dropped. This is to get around a bug
+ // in h2 where dropping all SendRequests won't notify a
+ // parked Connection.
+ let (conn_drop_ref, rx) = mpsc::channel(1);
+ let (cancel_tx, conn_eof) = oneshot::channel();
+
+ let conn_drop_rx = rx.into_future()
+ .map(|(item, _rx)| {
+ match item {
+ Some(never) => match never {},
+ None => (),
+ }
+ });
+
+ let conn = conn.map_err(|e| debug!("connection error: {}", e));
+
+ let conn_task = async move {
+ match future::select(conn, conn_drop_rx).await {
+ Either::Left(_) => {
+ // ok or err, the `conn` has finished
+ }
+ Either::Right(((), conn)) => {
+ // mpsc has been dropped, hopefully polling
+ // the connection some more should start shutdown
+ // and then close
+ trace!("send_request dropped, starting conn shutdown");
+ drop(cancel_tx);
+ let _ = conn.await;
+ }
+ }
+ };
+
+ exec.execute(conn_task)?;
-enum State<T, B> where B: IntoBuf {
- Handshaking(Handshake<T, B>),
- //Ready(SendRequest<B>, ConnDropRef, ConnEof),
+ Ok(ClientTask {
+ conn_drop_ref,
+ conn_eof,
+ executor: exec,
+ h2_tx,
+ req_rx,
+ })
}
-impl<T, B> Client<T, B>
+pub(crate) struct ClientTask<B>
where
- T: AsyncRead + AsyncWrite + Send + 'static,
B: Payload,
{
- pub(crate) fn new(io: T, rx: ClientRx<B>, builder: &Builder, exec: Exec) -> Client<T, B> {
- unimplemented!("proto::h2::Client::new");
- /*
- let handshake = builder.handshake(io);
-
- Client {
- executor: exec,
- rx: rx,
- state: State::Handshaking(handshake),
- }
- */
- }
+ conn_drop_ref: ConnDropRef,
+ conn_eof: ConnEof,
+ executor: Exec,
+ h2_tx: SendRequest<SendBuf<B::Data>>,
+ req_rx: ClientRx<B>,
}
-impl<T, B> Future for Client<T, B>
+impl<B> Future for ClientTask<B>
where
- T: AsyncRead + AsyncWrite + Send + 'static,
- B: Payload + 'static,
+ B: Payload + Unpin + 'static,
+ B::Data: Unpin,
{
type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- unimplemented!("impl Future for proto::h2::Client");
- /*
loop {
- let next = match self.state {
- State::Handshaking(ref mut h) => {
- let (request_tx, conn) = try_ready!(h.poll().map_err(crate::Error::new_h2));
- // An mpsc channel is used entirely to detect when the
- // 'Client' has been dropped. This is to get around a bug
- // in h2 where dropping all SendRequests won't notify a
- // parked Connection.
- let (tx, rx) = mpsc::channel(0);
- let (cancel_tx, cancel_rx) = oneshot::channel();
- let rx = rx.into_future()
- .map(|(msg, _)| match msg {
- Some(never) => match never {},
- None => (),
- })
- .map_err(|_| -> Never { unreachable!("mpsc cannot error") });
- let fut = conn
- .inspect(move |_| {
- drop(cancel_tx);
- trace!("connection complete")
- })
- .map_err(|e| debug!("connection error: {}", e))
- .select2(rx)
- .then(|res| match res {
- Ok(Either::A(((), _))) |
- Err(Either::A(((), _))) => {
- // conn has finished either way
- Either::A(future::ok(()))
- },
- Ok(Either::B(((), conn))) => {
- // mpsc has been dropped, hopefully polling
- // the connection some more should start shutdown
- // and then close
- trace!("send_request dropped, starting conn shutdown");
- Either::B(conn)
- }
- Err(Either::B((never, _))) => match never {},
- });
- self.executor.execute(fut)?;
- State::Ready(request_tx, tx, cancel_rx)
- },
- State::Ready(ref mut tx, ref conn_dropper, ref mut cancel_rx) => {
- match tx.poll_ready() {
- Ok(Async::Ready(())) => (),
- Ok(Async::NotReady) => return Ok(Async::NotReady),
+ match ready!(self.h2_tx.poll_ready(cx)) {
+ Ok(()) => (),
+ Err(err) => {
+ return if err.reason() == Some(::h2::Reason::NO_ERROR) {
+ trace!("connection gracefully shutdown");
+ Poll::Ready(Ok(Dispatched::Shutdown))
+ } else {
+ Poll::Ready(Err(crate::Error::new_h2(err)))
+ };
+ }
+ };
+
+ match Pin::new(&mut self.req_rx).poll_next(cx) {
+ Poll::Ready(Some((req, cb))) => {
+ // check that future hasn't been canceled already
+ if cb.is_canceled() {
+ trace!("request callback is canceled");
+ continue;
+ }
+ let (head, body) = req.into_parts();
+ let mut req = ::http::Request::from_parts(head, ());
+ super::strip_connection_headers(req.headers_mut(), true);
+ if let Some(len) = body.content_length() {
+ headers::set_content_length_if_missing(req.headers_mut(), len);
+ }
+ let eos = body.is_end_stream();
+ let (fut, body_tx) = match self.h2_tx.send_request(req, eos) {
+ Ok(ok) => ok,
Err(err) => {
- return if err.reason() == Some(::h2::Reason::NO_ERROR) {
- trace!("connection gracefully shutdown");
- Ok(Async::Ready(Dispatched::Shutdown))
- } else {
- Err(crate::Error::new_h2(err))
- };
+ debug!("client send request error: {}", err);
+ cb.send(Err((crate::Error::new_h2(err), None)));
+ continue;
}
- }
- match self.rx.poll() {
- Ok(Async::Ready(Some((req, cb)))) => {
- // check that future hasn't been canceled already
- if cb.is_canceled() {
- trace!("request callback is canceled");
- continue;
- }
- let (head, body) = req.into_parts();
- let mut req = ::http::Request::from_parts(head, ());
- super::strip_connection_headers(req.headers_mut(), true);
- if let Some(len) = body.content_length() {
- headers::set_content_length_if_missing(req.headers_mut(), len);
- }
- let eos = body.is_end_stream();
- let (fut, body_tx) = match tx.send_request(req, eos) {
- Ok(ok) => ok,
- Err(err) => {
- debug!("client send request error: {}", err);
- cb.send(Err((crate::Error::new_h2(err), None)));
- continue;
- }
- };
- if !eos {
- let mut pipe = PipeToSendStream::new(body, body_tx)
- .map_err(|e| debug!("client request body error: {}", e));
-
- // eagerly see if the body pipe is ready and
- // can thus skip allocating in the executor
- match pipe.poll() {
- Ok(Async::Ready(())) | Err(()) => (),
- Ok(Async::NotReady) => {
- let conn_drop_ref = conn_dropper.clone();
- let pipe = pipe.then(move |x| {
- drop(conn_drop_ref);
- x
- });
- self.executor.execute(pipe)?;
- }
+ };
+
+ if !eos {
+ let mut pipe = PipeToSendStream::new(body, body_tx)
+ .map(|res| {
+ if let Err(e) = res {
+ debug!("client request body error: {}", e);
}
+ });
+
+ // eagerly see if the body pipe is ready and
+ // can thus skip allocating in the executor
+ match Pin::new(&mut pipe).poll(cx) {
+ Poll::Ready(_) => (),
+ Poll::Pending => {
+ let conn_drop_ref = self.conn_drop_ref.clone();
+ let pipe = pipe.map(move |x| {
+ drop(conn_drop_ref);
+ x
+ });
+ self.executor.execute(pipe)?;
}
+ }
+ }
- let fut = fut
- .then(move |result| {
- match result {
- Ok(res) => {
- let content_length = content_length_parse_all(res.headers());
- let res = res.map(|stream|
- crate::Body::h2(stream, content_length));
- Ok(res)
- },
- Err(err) => {
- debug!("client response error: {}", err);
- Err((crate::Error::new_h2(err), None))
- }
- }
- });
- self.executor.execute(cb.send_when(fut))?;
- continue;
- },
-
- Ok(Async::NotReady) => {
- match cancel_rx.poll() {
- Ok(Async::Ready(never)) => match never {},
- Ok(Async::NotReady) => return Ok(Async::NotReady),
- Err(_conn_is_eof) => {
- trace!("connection task is closed, closing dispatch task");
- return Ok(Async::Ready(Dispatched::Shutdown));
+ let fut = fut
+ .map(move |result| {
+ match result {
+ Ok(res) => {
+ let content_length = content_length_parse_all(res.headers());
+ let res = res.map(|stream|
+ crate::Body::h2(stream, content_length));
+ Ok(res)
+ },
+ Err(err) => {
+ debug!("client response error: {}", err);
+ Err((crate::Error::new_h2(err), None))
}
}
- },
+ });
+ self.executor.execute(cb.send_when(fut))?;
+ continue;
+ },
+
+ Poll::Ready(None) => {
+ trace!("client::dispatch::Sender dropped");
+ return Poll::Ready(Ok(Dispatched::Shutdown));
+ }
- Ok(Async::Ready(None)) => {
- trace!("client::dispatch::Sender dropped");
- return Ok(Async::Ready(Dispatched::Shutdown));
- },
- Err(never) => match never {},
+ Poll::Pending => {
+ match ready!(Pin::new(&mut self.conn_eof).poll(cx)) {
+ Ok(never) => match never {},
+ Err(_conn_is_eof) => {
+ trace!("connection task is closed, closing dispatch task");
+ return Poll::Ready(Ok(Dispatched::Shutdown));
+ }
}
},
- };
- self.state = next;
+ }
}
- */
}
}
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -1,5 +1,4 @@
use bytes::Buf;
-//use futures::{Async, Future, Poll};
use h2::{SendStream};
use http::header::{
HeaderName, CONNECTION, PROXY_AUTHENTICATE, PROXY_AUTHORIZATION, TE, TRAILER,
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -8,11 +7,12 @@ use http::header::{
use http::HeaderMap;
use crate::body::Payload;
+use crate::common::{Future, Pin, Poll, task};
-mod client;
+pub(crate) mod client;
pub(crate) mod server;
-pub(crate) use self::client::Client;
+pub(crate) use self::client::ClientTask;
pub(crate) use self::server::Server;
fn strip_connection_headers(headers: &mut HeaderMap, is_request: bool) {
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -106,17 +106,13 @@ where
}
}
-/*
impl<S> Future for PipeToSendStream<S>
where
- S: Payload,
+ S: Payload + Unpin,
{
- type Item = ();
- type Error = crate::Error;
+ type Output = crate::Result<()>;
- fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
- unimplemented!("impl Future for PipeToSendStream");
- /*
+ fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
loop {
if !self.data_done {
// we don't have the next chunk of data yet, so just reserve 1 byte to make
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -126,23 +122,25 @@ where
if self.body_tx.capacity() == 0 {
loop {
- match try_ready!(self.body_tx.poll_capacity().map_err(crate::Error::new_body_write)) {
- Some(0) => {}
- Some(_) => break,
- None => return Err(crate::Error::new_canceled()),
+ match ready!(self.body_tx.poll_capacity(cx)) {
+
+ Some(Ok(0)) => {},
+ Some(Ok(_)) => break,
+ Some(Err(e)) => return Poll::Ready(Err(crate::Error::new_body_write(e))) ,
+ None => return Poll::Ready(Err(crate::Error::new_canceled())),
}
}
} else {
- if let Async::Ready(reason) =
- self.body_tx.poll_reset().map_err(crate::Error::new_body_write)?
+ if let Poll::Ready(reason) =
+ self.body_tx.poll_reset(cx).map_err(crate::Error::new_body_write)?
{
debug!("stream received RST_STREAM: {:?}", reason);
- return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
+ return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason))));
}
}
- match try_ready!(self.stream.poll_data().map_err(|e| self.on_user_err(e))) {
- Some(chunk) => {
+ match ready!(Pin::new(&mut self.stream).poll_data(cx)) {
+ Some(Ok(chunk)) => {
let is_eos = self.stream.is_end_stream();
trace!(
"send body chunk: {} bytes, eos={}",
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -156,14 +154,15 @@ where
.map_err(crate::Error::new_body_write)?;
if is_eos {
- return Ok(Async::Ready(()));
+ return Poll::Ready(Ok(()));
}
}
+ Some(Err(e)) => return Poll::Ready(Err(self.on_user_err(e))),
None => {
self.body_tx.reserve_capacity(0);
let is_eos = self.stream.is_end_stream();
if is_eos {
- return self.send_eos_frame().map(Async::Ready);
+ return Poll::Ready(self.send_eos_frame());
} else {
self.data_done = true;
// loop again to poll_trailers
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -171,31 +170,30 @@ where
}
}
} else {
- if let Async::Ready(reason) =
- self.body_tx.poll_reset().map_err(|e| crate::Error::new_body_write(e))?
+ if let Poll::Ready(reason) =
+ self.body_tx.poll_reset(cx).map_err(|e| crate::Error::new_body_write(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
- return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
+ return Poll::Ready(Err(crate::Error::new_body_write(::h2::Error::from(reason))));
}
- match try_ready!(self.stream.poll_trailers().map_err(|e| self.on_user_err(e))) {
- Some(trailers) => {
+ match ready!(Pin::new(&mut self.stream).poll_trailers(cx)) {
+ Some(Ok(trailers)) => {
self.body_tx
.send_trailers(trailers)
.map_err(crate::Error::new_body_write)?;
- return Ok(Async::Ready(()));
+ return Poll::Ready(Ok(()));
}
+ Some(Err(e)) => return Poll::Ready(Err(self.on_user_err(e))),
None => {
// There were no trailers, so send an empty DATA frame...
- return self.send_eos_frame().map(Async::Ready);
+ return Poll::Ready(self.send_eos_frame());
}
}
}
}
- */
}
}
-*/
struct SendBuf<B>(Option<B>);
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -1,5 +1,7 @@
use std::error::Error as StdError;
+use std::marker::Unpin;
+use futures_core::Stream;
use h2::Reason;
use h2::server::{Builder, Connection, Handshake, SendResponse};
use tokio_io::{AsyncRead, AsyncWrite};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -49,27 +51,23 @@ where
impl<T, S, B, E> Server<T, S, B, E>
where
- T: AsyncRead + AsyncWrite,
+ T: AsyncRead + AsyncWrite + Unpin,
S: Service<ReqBody=Body, ResBody=B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
+ B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
pub(crate) fn new(io: T, service: S, builder: &Builder, exec: E) -> Server<T, S, B, E> {
- unimplemented!("proto::h2::Server::new")
- /*
let handshake = builder.handshake(io);
Server {
exec,
state: State::Handshaking(handshake),
service,
}
- */
}
pub fn graceful_shutdown(&mut self) {
- unimplemented!("proto::h2::Server::graceful_shutdown")
- /*
trace!("graceful_shutdown");
match self.state {
State::Handshaking(..) => {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -86,54 +84,53 @@ where
}
}
self.state = State::Closed;
- */
}
}
impl<T, S, B, E> Future for Server<T, S, B, E>
where
- T: AsyncRead + AsyncWrite,
+ T: AsyncRead + AsyncWrite + Unpin,
S: Service<ReqBody=Body, ResBody=B>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
+ B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
type Output = crate::Result<Dispatched>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- unimplemented!("h2 server future")
- /*
+ let me = &mut *self;
loop {
- let next = match self.state {
+ let next = match me.state {
State::Handshaking(ref mut h) => {
- let conn = try_ready!(h.poll().map_err(crate::Error::new_h2));
+ let conn = ready!(Pin::new(h).poll(cx).map_err(crate::Error::new_h2))?;
State::Serving(Serving {
conn,
closing: None,
})
},
State::Serving(ref mut srv) => {
- try_ready!(srv.poll_server(&mut self.service, &self.exec));
- return Ok(Async::Ready(Dispatched::Shutdown));
+ ready!(srv.poll_server(cx, &mut me.service, &mut me.exec))?;
+ return Poll::Ready(Ok(Dispatched::Shutdown));
}
State::Closed => {
// graceful_shutdown was called before handshaking finished,
// nothing to do here...
- return Ok(Async::Ready(Dispatched::Shutdown));
+ return Poll::Ready(Ok(Dispatched::Shutdown));
}
};
- self.state = next;
+ me.state = next;
}
- */
}
}
impl<T, B> Serving<T, B>
where
- T: AsyncRead + AsyncWrite,
+ T: AsyncRead + AsyncWrite + Unpin,
B: Payload,
+ B::Data: Unpin,
{
- fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<crate::Result<()>>
+ fn poll_server<S, E>(&mut self, cx: &mut task::Context<'_>, service: &mut S, exec: &mut E) -> Poll<crate::Result<()>>
where
S: Service<
ReqBody=Body,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -142,19 +139,18 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
E: H2Exec<S::Future, B>,
{
- /*
if self.closing.is_none() {
loop {
// At first, polls the readiness of supplied service.
- match service.poll_ready() {
- Ok(Async::Ready(())) => (),
- Ok(Async::NotReady) => {
+ match service.poll_ready(cx) {
+ Poll::Ready(Ok(())) => (),
+ Poll::Pending => {
// use `poll_close` instead of `poll`, in order to avoid accepting a request.
- try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
+ ready!(self.conn.poll_close(cx).map_err(crate::Error::new_h2))?;
trace!("incoming connection complete");
- return Ok(Async::Ready(()));
+ return Poll::Ready(Ok(()));
}
- Err(err) => {
+ Poll::Ready(Err(err)) => {
let err = crate::Error::new_user_service(err);
debug!("service closed: {}", err);
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -173,29 +169,33 @@ where
}
// When the service is ready, accepts an incoming request.
- if let Some((req, respond)) = try_ready!(self.conn.poll().map_err(crate::Error::new_h2)) {
- trace!("incoming request");
- let content_length = content_length_parse_all(req.headers());
- let req = req.map(|stream| {
- crate::Body::h2(stream, content_length)
- });
- let fut = H2Stream::new(service.call(req), respond);
- exec.execute_h2stream(fut)?;
- } else {
- // no more incoming streams...
- trace!("incoming connection complete");
- return Ok(Async::Ready(()))
+ match ready!(Pin::new(&mut self.conn).poll_next(cx)) {
+ Some(Ok((req, respond))) => {
+ trace!("incoming request");
+ let content_length = content_length_parse_all(req.headers());
+ let req = req.map(|stream| {
+ crate::Body::h2(stream, content_length)
+ });
+ let fut = H2Stream::new(service.call(req), respond);
+ exec.execute_h2stream(fut)?;
+ },
+ Some(Err(e)) => {
+ return Poll::Ready(Err(crate::Error::new_h2(e)));
+ },
+ None => {
+ // no more incoming streams...
+ trace!("incoming connection complete");
+ return Poll::Ready(Ok(()));
+ },
}
}
}
debug_assert!(self.closing.is_some(), "poll_server broke loop without closing");
- try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
+ ready!(self.conn.poll_close(cx).map_err(crate::Error::new_h2))?;
- Err(self.closing.take().expect("polled after error"))
- */
- unimplemented!("h2 server poll_server")
+ Poll::Ready(Err(self.closing.take().expect("polled after error")))
}
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -230,38 +230,37 @@ where
}
}
-impl<F, B> Future for H2Stream<F, B>
+impl<F, B, E> H2Stream<F, B>
where
- //F: Future<Item=Response<B>>,
- //F::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ F: Future<Output = Result<Response<B>, E>>,
+ B: Payload + Unpin,
+ B::Data: Unpin,
+ E: Into<Box<dyn StdError + Send + Sync>>,
{
- type Output = ();
-
- fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
- unimplemented!("impl Future for H2Stream");
- /*
+ fn poll2(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<crate::Result<()>> {
+ // Safety: State::{Service, Body} futures are never moved
+ let me = unsafe { self.get_unchecked_mut() };
loop {
- let next = match self.state {
+ let next = match me.state {
H2StreamState::Service(ref mut h) => {
- let res = match h.poll() {
- Ok(Async::Ready(r)) => r,
- Ok(Async::NotReady) => {
- // Body is not yet ready, so we want to check if the client has sent a
+ let res = match unsafe { Pin::new_unchecked(h) }.poll(cx) {
+ Poll::Ready(Ok(r)) => r,
+ Poll::Pending => {
+ // Response is not yet ready, so we want to check if the client has sent a
// RST_STREAM frame which would cancel the current request.
- if let Async::Ready(reason) =
- self.reply.poll_reset().map_err(|e| crate::Error::new_h2(e))?
+ if let Poll::Ready(reason) =
+ me.reply.poll_reset(cx).map_err(|e| crate::Error::new_h2(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
- return Err(crate::Error::new_h2(reason.into()));
+ return Poll::Ready(Err(crate::Error::new_h2(reason.into())));
}
- return Ok(Async::NotReady);
+ return Poll::Pending;
}
- Err(e) => {
+ Poll::Ready(Err(e)) => {
let err = crate::Error::new_user_service(e);
warn!("http2 service errored: {}", err);
- self.reply.send_reset(err.h2_reason());
- return Err(err);
+ me.reply.send_reset(err.h2_reason());
+ return Poll::Ready(Err(err));
},
};
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -278,12 +277,12 @@ where
macro_rules! reply {
($eos:expr) => ({
- match self.reply.send_response(res, $eos) {
+ match me.reply.send_response(res, $eos) {
Ok(tx) => tx,
Err(e) => {
debug!("send response error: {}", e);
- self.reply.send_reset(Reason::INTERNAL_ERROR);
- return Err(crate::Error::new_h2(e));
+ me.reply.send_reset(Reason::INTERNAL_ERROR);
+ return Poll::Ready(Err(crate::Error::new_h2(e)));
}
}
})
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -300,7 +299,7 @@ where
body_tx
.send_data(buf, true)
.map_err(crate::Error::new_body_write)?;
- return Ok(Async::Ready(()));
+ return Poll::Ready(Ok(()));
}
if !body.is_end_stream() {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -308,32 +307,32 @@ where
H2StreamState::Body(PipeToSendStream::new(body, body_tx))
} else {
reply!(true);
- return Ok(Async::Ready(()));
+ return Poll::Ready(Ok(()));
}
},
H2StreamState::Body(ref mut pipe) => {
- return pipe.poll();
+ return Pin::new(pipe).poll(cx);
}
};
- self.state = next;
+ me.state = next;
}
- */
}
}
-/*
-impl<F, B> Future for H2Stream<F, B>
+
+impl<F, B, E> Future for H2Stream<F, B>
where
- F: Future<Item=Response<B>>,
- F::Error: Into<Box<dyn StdError + Send + Sync>>,
- B: Payload,
+ F: Future<Output = Result<Response<B>, E>>,
+ B: Payload + Unpin,
+ B::Data: Unpin,
+ E: Into<Box<dyn StdError + Send + Sync>>,
{
- type Item = ();
- type Error = ();
+ type Output = ();
- fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
- self.poll2()
- .map_err(|e| debug!("stream error: {}", e))
+ fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
+ self.poll2(cx).map(|res| {
+ if let Err(e) = res {
+ debug!("stream error: {}", e);
+ }
+ })
}
}
-*/
-
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -358,6 +358,7 @@ impl<E> Http<E> {
S: Service<ReqBody=Body, ResBody=Bd>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
Bd: Payload,
+ Bd::Data: Unpin,
I: AsyncRead + AsyncWrite + Unpin,
E: H2Exec<S::Future, Bd>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -479,6 +480,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
B: Payload + 'static,
+ B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
/// Start a graceful shutdown process for this connection.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -549,7 +551,7 @@ where
loop {
let polled = match *self.conn.as_mut().unwrap() {
Either::A(ref mut h1) => h1.poll_without_shutdown(cx),
- Either::B(ref mut h2) => unimplemented!("Connection::poll_without_shutdown h2"),//return h2.poll().map(|x| x.map(|_| ())),
+ Either::B(ref mut h2) => return Pin::new(h2).poll(cx).map_ok(|_| ()),
};
match ready!(polled) {
Ok(x) => return Poll::Ready(Ok(x)),
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -629,6 +631,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + 'static,
B: Payload + 'static,
+ B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
type Output = crate::Result<()>;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -744,6 +747,7 @@ where
F: Future<Output=Result<S, FE>>,
S: Service<ReqBody=Body, ResBody=B>,
B: Payload,
+ B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
type Output = Result<Connection<I, S, E>, FE>;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -852,6 +856,7 @@ pub(crate) mod spawn_all {
where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: Service<ReqBody=Body> + 'static,
+ <S::ResBody as Payload>::Data: Unpin,
E: H2Exec<S::Future, S::ResBody>,
{
type Future = UpgradeableConnection<I, S, E>;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -895,6 +900,7 @@ pub(crate) mod spawn_all {
NE: Into<Box<dyn StdError + Send + Sync>>,
S: Service<ReqBody=Body, ResBody=B>,
B: Payload,
+ B::Data: Unpin,
E: H2Exec<S::Future, B>,
W: Watcher<I, S, E>,
{
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -960,6 +966,7 @@ mod upgrades {
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
B: Payload + 'static,
+ B::Data: Unpin,
E: H2Exec<S::Future, B>,
{
/// Start a graceful shutdown process for this connection.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -977,6 +984,7 @@ mod upgrades {
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
B: Payload + 'static,
+ B::Data: Unpin,
E: super::H2Exec<S::Future, B>,
{
type Output = crate::Result<()>;
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -154,6 +154,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Service: 'static,
B: Payload,
+ B::Data: Unpin,
E: H2Exec<<S::Service as Service>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
{
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -211,6 +212,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Service: 'static,
B: Payload,
+ B::Data: Unpin,
E: H2Exec<<S::Service as Service>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>,
{
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -409,6 +411,7 @@ impl<I, E> Builder<I, E> {
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Service: 'static,
B: Payload,
+ B::Data: Unpin,
E: NewSvcExec<IO, S::Future, S::Service, E, NoopWatcher>,
E: H2Exec<<S::Service as Service>::Future, B>,
{
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -47,6 +47,7 @@ where
S::Service: 'static,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
B: Payload,
+ B::Data: Unpin,
F: Future<Output=()>,
E: H2Exec<<S::Service as Service>::Future, B>,
E: NewSvcExec<IO, S::Future, S::Service, E, GracefulWatcher>,
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -98,6 +99,7 @@ impl<I, S, E> Watcher<I, S, E> for GracefulWatcher
where
I: AsyncRead + AsyncWrite + Unpin + Send + 'static,
S: Service<ReqBody=Body> + 'static,
+ <S::ResBody as Payload>::Data: Unpin,
E: H2Exec<S::Future, S::ResBody>,
{
type Future = Watching<UpgradeableConnection<I, S, E>, fn(Pin<&mut UpgradeableConnection<I, S, E>>)>;
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -116,6 +118,7 @@ where
S::Error: Into<Box<dyn StdError + Send + Sync>>,
I: AsyncRead + AsyncWrite + Unpin,
S::ResBody: Payload + 'static,
+ <S::ResBody as Payload>::Data: Unpin,
E: H2Exec<S::Future, S::ResBody>,
{
conn.graceful_shutdown()
|
Is there an issue/PR/branch that is tracking the update of h2 to `std::future`, is anyone working on it currently?
Related issue in the h2 repo: https://github.com/hyperium/h2/issues/385
The relevant work in the `h2` crate has merged! Next up: re-enabling the http2 stuff inside hyper. It should be a bit easier, mostly just translating old `Future` to new.
|
2019-08-19T22:27:17Z
| 1,906
|
Update h2 to std::future
As part of the upgrade to `std::future` (#1805), the HTTP2 support was left as `unimplemented!`. This needs to be re-enabled, but depends on the `h2` crate updating.
|
hyperium__hyper-1906
|
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -64,18 +64,14 @@ fn http1_parallel_x10_req_10mb(b: &mut test::Bencher) {
}
#[bench]
-#[ignore]
fn http2_get(b: &mut test::Bencher) {
- // FIXME: re-implement tests when `h2` upgrades to `async/await`
opts()
.http2()
.bench(b)
}
#[bench]
-#[ignore]
fn http2_post(b: &mut test::Bencher) {
- // FIXME: re-implement tests when `h2` upgrades to `async/await`
opts()
.http2()
.method(Method::POST)
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -84,9 +80,7 @@ fn http2_post(b: &mut test::Bencher) {
}
#[bench]
-#[ignore]
fn http2_req_100kb(b: &mut test::Bencher) {
- // FIXME: re-implement tests when `h2` upgrades to `async/await`
let body = &[b'x'; 1024 * 100];
opts()
.http2()
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -96,9 +90,7 @@ fn http2_req_100kb(b: &mut test::Bencher) {
}
#[bench]
-#[ignore]
fn http2_parallel_x10_empty(b: &mut test::Bencher) {
- // FIXME: re-implement tests when `h2` upgrades to `async/await`
opts()
.http2()
.parallel(10)
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -106,9 +98,7 @@ fn http2_parallel_x10_empty(b: &mut test::Bencher) {
}
#[bench]
-#[ignore]
fn http2_parallel_x10_req_10mb(b: &mut test::Bencher) {
- // FIXME: re-implement tests when `h2` upgrades to `async/await`
let body = &[b'x'; 1024 * 1024 * 10];
opts()
.http2()
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1893"
] |
0.2
|
9d5299b655b7db86462fdb3929acf16ce5c2f74d
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -36,15 +36,15 @@ log = "0.4"
net2 = { version = "0.2.32", optional = true }
pin-utils = "0.1.0-alpha.4"
time = "0.1"
-tokio = { git = "https://github.com/tokio-rs/tokio", optional = true, default-features = false, features = ["rt-full"] }
-tokio-buf = "0.1"
-tokio-executor = { git = "https://github.com/tokio-rs/tokio" }
-tokio-io = { git = "https://github.com/tokio-rs/tokio" }
-tokio-reactor = { git = "https://github.com/tokio-rs/tokio", optional = true }
-tokio-sync = { git = "https://github.com/tokio-rs/tokio" }
-tokio-tcp = { git = "https://github.com/tokio-rs/tokio", optional = true, features = ["async-traits"] }
-tokio-threadpool = { git = "https://github.com/tokio-rs/tokio", optional = true }
-tokio-timer = { git = "https://github.com/tokio-rs/tokio", optional = true }
+tokio = { version = "0.2.0-alpha.1", optional = true, default-features = false, features = ["rt-full"] }
+tokio-buf = "0.2.0-alpha.1"
+tokio-executor = "0.2.0-alpha.1"
+tokio-io = "0.2.0-alpha.1"
+tokio-reactor = { version = "0.2.0-alpha.1", optional = true }
+tokio-sync = "0.2.0-alpha.1"
+tokio-tcp = { version = "0.2.0-alpha.1", optional = true, features = ["async-traits"] }
+tokio-threadpool = { version = "0.2.0-alpha.1", optional = true }
+tokio-timer = { version = "0.3.0-alpha.1", optional = true }
want = { git = "https://github.com/seanmonstar/want", branch = "std-future" }
[dev-dependencies]
|
2019-08-09T01:55:54Z
| 1,894
|
Update to use tokio alpha from crates.io instead of from git
|
hyperium__hyper-1894
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -55,8 +55,8 @@ spmc = "0.2"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
-tokio-fs = { git = "https://github.com/tokio-rs/tokio" }
-tokio-test = { git = "https://github.com/tokio-rs/tokio" }
+tokio-fs = "0.2.0-alpha.1"
+tokio-test = "0.2.0-alpha.1"
url = "1.0"
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -177,4 +177,3 @@ required-features = ["runtime"]
#name = "server"
#path = "tests/server.rs"
#required-features = ["runtime"]
-
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
|
[
"1863"
] |
0.2
|
f93463b3d98d30f47653acde12bf3e42ba278f68
|
diff --git a/examples/echo.rs b/examples/echo.rs
--- a/examples/echo.rs
+++ b/examples/echo.rs
@@ -1,7 +1,5 @@
#![feature(async_await)]
-#![deny(warnings)]
-
-extern crate hyper;
+//#![deny(warnings)]
use hyper::{Body, Method, Request, Response, Server, StatusCode};
use hyper::service::{make_service_fn, service_fn};
diff --git a/src/mock.rs b/src/mock.rs
--- a/src/mock.rs
+++ b/src/mock.rs
@@ -523,3 +527,4 @@ impl Drop for MockedConnections {
}
}
}
+*/
|
Ah, I didn't realize others were testing hyper master for things. As part of #1836, master disabled running its tests in CI since they aren't updated yet.
I've seen the pattern of moving disabled things to a `tests_disabled` directory in other projects, I think doing that here would fix this?
|
2019-07-12T20:30:29Z
| 1,865
|
master branch fails to build with --all-targets
````
cargo check --all-targets
Checking hyper v0.13.0-a.0 (/tmp/hyper)
error[E0463]: can't find crate for `futures`
--> tests/client.rs:4:1
|
4 | extern crate futures;
| ^^^^^^^^^^^^^^^^^^^^^ can't find crate
error: aborting due to previous error
For more information about this error, try `rustc --explain E0463`.
error: Could not compile `hyper`.
warning: build failed, waiting for other jobs to finish...
error[E0463]: can't find crate for `tokio_mockstream`
--> src/common/io/rewind.rs:128:5
|
128 | extern crate tokio_mockstream;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ can't find crate
error: aborting due to previous error
For more information about this error, try `rustc --explain E0463`.
error: Could not compile `hyper`.
warning: build failed, waiting for other jobs to finish...
error[E0425]: cannot find function `run` in module `rt`
--> examples/proxy.rs:43:9
|
43 | rt::run(server);
| ^^^ not found in `rt`
error: unused import: `Future`
--> examples/proxy.rs:7:23
|
7 | use hyper::rt::{self, Future};
| ^^^^^^
|
note: lint level defined here
--> examples/proxy.rs:1:9
|
1 | #![deny(warnings)]
| ^^^^^^^^
= note: #[deny(unused_imports)] implied by #[deny(warnings)]
error[E0277]: the trait bound `[closure@examples/proxy.rs:21:23: 34:6 client_main:_, out_addr_clone:_]: hyper::service::MakeService<&'a hyper::server::conn::AddrStream>` is not satisfied
--> examples/proxy.rs:37:10
|
37 | .serve(new_service)
| ^^^^^ the trait `hyper::service::MakeService<&'a hyper::server::conn::AddrStream>` is not implemented for `[closure@examples/proxy.rs:21:23: 34:6 client_main:_, out_addr_clone:_]`
|
= note: required because of the requirements on the impl of `hyper::service::MakeServiceRef<hyper::server::conn::AddrStream>` for `[closure@examples/proxy.rs:21:23: 34:6 client_main:_, out_addr_clone:_]`
error[E0599]: no method named `map_err` found for type `hyper::Server<hyper::server::conn::AddrIncoming, [closure@examples/proxy.rs:21:23: 34:6 client_main:_, out_addr_clone:_]>` in the current scope
--> examples/proxy.rs:38:10
|
38 | .map_err(|e| eprintln!("server error: {}", e));
| ^^^^^^^
error: aborting due to 4 previous errors
Some errors have detailed explanations: E0277, E0425, E0599.
For more information about an error, try `rustc --explain E0277`.
error: Could not compile `hyper`.
To learn more, run the command again with --verbose.
````
This broke clippy CI https://github.com/rust-lang/rust-clippy/pull/4270
|
hyperium__hyper-1865
|
diff --git a/.travis.yml b/.travis.yml
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,7 +25,7 @@ matrix:
# fi
script:
- - cargo build $FEATURES
+ - cargo build $FEATURES --all-targets
# Disable tests temporarily
# - 'if [ "$BUILD_ONLY" != "1" ]; then cargo test $FEATURES -- --test-threads=1; fi'
# - 'if [ $TRAVIS_RUST_VERSION = nightly ]; then for f in ./benches/*.rs; do cargo test --bench $(basename $f .rs) $FEATURES; done; fi'
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -114,66 +114,66 @@ name = "params"
path = "examples/params.rs"
required-features = ["runtime"]
-[[example]]
-name = "proxy"
-path = "examples/proxy.rs"
-required-features = ["runtime"]
-
-[[example]]
-name = "send_file"
-path = "examples/send_file.rs"
-required-features = ["runtime"]
-
-[[example]]
-name = "single_threaded"
-path = "examples/single_threaded.rs"
-required-features = ["runtime"]
-
-[[example]]
-name = "state"
-path = "examples/state.rs"
-required-features = ["runtime"]
-
-[[example]]
-name = "upgrades"
-path = "examples/upgrades.rs"
-required-features = ["runtime"]
-
-
-[[example]]
-name = "web_api"
-path = "examples/web_api.rs"
-required-features = ["runtime"]
-
-
-[[bench]]
-name = "end_to_end"
-path = "benches/end_to_end.rs"
-required-features = ["runtime"]
-
-[[bench]]
-name = "pipeline"
-path = "benches/pipeline.rs"
-required-features = ["runtime"]
-
-[[bench]]
-name = "server"
-path = "benches/server.rs"
-required-features = ["runtime"]
-
-
-[[test]]
-name = "client"
-path = "tests/client.rs"
-required-features = ["runtime"]
-
-[[test]]
-name = "integration"
-path = "tests/integration.rs"
-required-features = ["runtime"]
-
-[[test]]
-name = "server"
-path = "tests/server.rs"
-required-features = ["runtime"]
+#[[example]]
+#name = "proxy"
+#path = "examples/proxy.rs"
+#required-features = ["runtime"]
+
+#[[example]]
+#name = "send_file"
+#path = "examples/send_file.rs"
+#required-features = ["runtime"]
+
+#[[example]]
+#name = "single_threaded"
+#path = "examples/single_threaded.rs"
+#required-features = ["runtime"]
+
+#[[example]]
+#name = "state"
+#path = "examples/state.rs"
+#required-features = ["runtime"]
+
+#[[example]]
+#name = "upgrades"
+#path = "examples/upgrades.rs"
+#required-features = ["runtime"]
+
+
+#[[example]]
+#name = "web_api"
+#path = "examples/web_api.rs"
+#required-features = ["runtime"]
+
+
+#[[bench]]
+#name = "end_to_end"
+#path = "benches/end_to_end.rs"
+#required-features = ["runtime"]
+
+#[[bench]]
+#name = "pipeline"
+#path = "benches/pipeline.rs"
+#required-features = ["runtime"]
+
+#[[bench]]
+#name = "server"
+#path = "benches/server.rs"
+#required-features = ["runtime"]
+
+
+#[[test]]
+#name = "client"
+#path = "tests/client.rs"
+#required-features = ["runtime"]
+
+#[[test]]
+#name = "integration"
+#path = "tests/integration.rs"
+#required-features = ["runtime"]
+
+#[[test]]
+#name = "server"
+#path = "tests/server.rs"
+#required-features = ["runtime"]
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -549,10 +549,20 @@ impl Sink for Sender {
}
*/
-#[test]
-fn test_body_stream_concat() {
- let body = Body::from("hello world");
-
- let total = body.concat2().wait().unwrap();
- assert_eq!(total.as_ref(), b"hello world");
+#[cfg(test)]
+mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+
+ /*
+ use super::*;
+ #[test]
+ fn test_body_stream_concat() {
+ let body = Body::from("hello world");
+
+ let total = body.concat2().wait().unwrap();
+ assert_eq!(total.as_ref(), b"hello world");
+ }
+ */
}
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -569,6 +569,10 @@ impl ConnectingTcp {
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+ /*
use std::io;
use futures::Future;
use super::{Connect, Destination, HttpConnector};
diff --git a/src/client/connect/http.rs b/src/client/connect/http.rs
--- a/src/client/connect/http.rs
+++ b/src/client/connect/http.rs
@@ -755,5 +759,6 @@ mod tests {
(reachable, duration)
}
}
+ */
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -250,6 +250,10 @@ impl<T, U> Callback<T, U> {
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+ /*
extern crate pretty_env_logger;
#[cfg(feature = "nightly")]
extern crate test;
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -367,4 +371,5 @@ mod tests {
rx.taker.cancel();
})
}
+ */
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -774,6 +774,11 @@ impl<T> WeakOpt<T> {
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+
+ /*
use std::sync::Arc;
use std::time::Duration;
use futures::{Async, Future};
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -998,4 +1003,5 @@ mod tests {
assert!(!pool.locked().idle.contains_key(&key));
}
+ */
}
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -1,3 +1,7 @@
+// FIXME: re-implement tests with `async/await`, this import should
+// trigger a warning to remind us
+use super::Client;
+/*
#![cfg(feature = "runtime")]
extern crate pretty_env_logger;
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -267,3 +271,4 @@ fn bench_http1_get_10b(b: &mut test::Bencher) {
rt.block_on(res1.join(srv1)).expect("res1");
});
}
+*/
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -121,6 +121,11 @@ where
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+
+ /*
use futures::{future, Async, Future, Poll};
use super::*;
diff --git a/src/common/drain.rs b/src/common/drain.rs
--- a/src/common/drain.rs
+++ b/src/common/drain.rs
@@ -235,5 +240,6 @@ mod tests {
Ok::<_, ()>(())
}).wait().unwrap();
}
+ */
}
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -124,6 +124,10 @@ where
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use super::Rewind;
+ /*
use super::*;
extern crate tokio_mockstream;
use self::tokio_mockstream::MockStream;
diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs
--- a/src/common/io/rewind.rs
+++ b/src/common/io/rewind.rs
@@ -212,4 +216,5 @@ mod tests {
stream.read(&mut o2[cnt..]).unwrap();
assert_eq!(&o2, &bs);
}
+ */
}
diff --git a/src/mock.rs b/src/mock.rs
--- a/src/mock.rs
+++ b/src/mock.rs
@@ -1,3 +1,7 @@
+// FIXME: re-implement tests with `async/await`, this import should
+// trigger a warning to remind us
+use crate::Error;
+/*
#[cfg(feature = "runtime")]
use std::collections::HashMap;
use std::cmp;
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -884,7 +884,7 @@ impl State {
#[cfg(test)]
//TODO: rewrite these using dispatch
mod tests {
-
+ /*
#[cfg(feature = "nightly")]
#[bench]
fn bench_read_head_short(b: &mut ::test::Bencher) {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -918,6 +918,7 @@ mod tests {
conn.state.reading = Reading::Init;
});
}
+ */
/*
use futures::{Async, Future, Stream, Sink};
use futures::future;
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -319,6 +319,10 @@ impl StdError for IncompleteBody {
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+ /*
use std::io;
use std::io::Write;
use super::Decoder;
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -535,5 +539,5 @@ mod tests {
let content = "foobar";
all_async_cases(content, content, Decoder::eof());
}
-
+ */
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -598,6 +598,10 @@ where
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+ /*
extern crate pretty_env_logger;
use super::*;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -656,4 +660,5 @@ mod tests {
Ok::<(), ()>(())
}).wait().unwrap();
}
+ */
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -650,6 +650,10 @@ impl<T: Buf> Buf for BufDeque<T> {
#[cfg(test)]
mod tests {
+ // FIXME: re-implement tests with `async/await`, this import should
+ // trigger a warning to remind us
+ use crate::Error;
+ /*
use super::*;
use std::io::Read;
use crate::mock::AsyncIo;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -657,8 +661,7 @@ mod tests {
#[cfg(feature = "nightly")]
use test::Bencher;
- #[cfg(test)]
- impl<T: Read> MemRead for crate::mock::AsyncIo<T> {
+ impl<T: Read> MemRead for AsyncIo<T> {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
let mut v = vec![0; len];
let n = try_nb!(self.read(v.as_mut_slice()));
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -900,4 +903,5 @@ mod tests {
write_buf.headers.bytes.clear();
})
}
+ */
}
|
hyperium/hyper
|
cc7d3058e84b63d26697026f44cdc153b208de9a
|
[
"1777"
] |
0.1
|
79ae89e066f5fbfc1ce3612299671a14f7b35230
|
diff --git a/.travis.yml b/.travis.yml
--- a/.travis.yml
+++ b/.travis.yml
@@ -13,7 +13,7 @@ matrix:
- rust: stable
env: FEATURES="--no-default-features"
# Minimum Supported Rust Version
- - rust: 1.27.0
+ - rust: 1.31.0
env: FEATURES="--no-default-features --features runtime" BUILD_ONLY="1"
before_script:
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -10,6 +10,7 @@ license = "MIT"
authors = ["Sean McArthur <sean@seanmonstar.com>"]
keywords = ["http", "hyper", "hyperium"]
categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"]
+edition = "2018"
publish = false
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -236,7 +236,7 @@ fn spawn_hello(rt: &mut Runtime, opts: &Opts) -> SocketAddr {
let body = opts.response_body;
let srv = Server::bind(&addr)
- .http2_only(opts.http2);
+ .http2_only(opts.http2)
.http2_initial_stream_window_size_(opts.http2_stream_window)
.http2_initial_connection_window_size_(opts.http2_conn_window)
.serve(move || {
diff --git a/build.rs b/build.rs
--- a/build.rs
+++ b/build.rs
@@ -4,9 +4,6 @@ use rustc_version::{version, Version};
fn main() {
let version = version().unwrap();
- if version >= Version::parse("1.30.0").unwrap() {
- println!("cargo:rustc-cfg=error_source");
- }
if version >= Version::parse("1.34.0").unwrap() {
println!("cargo:rustc-cfg=try_from");
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -9,12 +9,12 @@ use tokio_buf::SizeHint;
use h2;
use http::HeaderMap;
-use common::Never;
+use crate::common::Never;
use super::internal::{FullDataArg, FullDataRet};
use super::{Chunk, Payload};
-use upgrade::OnUpgrade;
+use crate::upgrade::OnUpgrade;
-type BodySender = mpsc::Sender<Result<Chunk, ::Error>>;
+type BodySender = mpsc::Sender<Result<Chunk, crate::Error>>;
/// A stream of `Chunk`s, used when receiving bodies.
///
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -34,7 +34,7 @@ enum Kind {
Chan {
content_length: Option<u64>,
abort_rx: oneshot::Receiver<()>,
- rx: mpsc::Receiver<Result<Chunk, ::Error>>,
+ rx: mpsc::Receiver<Result<Chunk, crate::Error>>,
},
H2 {
content_length: Option<u64>,
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -200,7 +200,7 @@ impl Body {
}))
}
- fn poll_eof(&mut self) -> Poll<Option<Chunk>, ::Error> {
+ fn poll_eof(&mut self) -> Poll<Option<Chunk>, crate::Error> {
match self.take_delayed_eof() {
Some(DelayEof::NotEof(mut delay)) => {
match self.poll_inner() {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -238,7 +238,7 @@ impl Body {
}
}
- fn poll_inner(&mut self) -> Poll<Option<Chunk>, ::Error> {
+ fn poll_inner(&mut self) -> Poll<Option<Chunk>, crate::Error> {
match self.kind {
Kind::Once(ref mut val) => Ok(Async::Ready(val.take())),
Kind::Chan {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -247,7 +247,7 @@ impl Body {
ref mut abort_rx,
} => {
if let Ok(Async::Ready(())) = abort_rx.poll() {
- return Err(::Error::new_body_write("body write aborted"));
+ return Err(crate::Error::new_body_write("body write aborted"));
}
match rx.poll().expect("mpsc cannot error") {
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -267,16 +267,16 @@ impl Body {
recv: ref mut h2, ..
} => h2
.poll()
- .map(|async| {
- async.map(|opt| {
+ .map(|r#async| {
+ r#async.map(|opt| {
opt.map(|bytes| {
let _ = h2.release_capacity().release_capacity(bytes.len());
Chunk::from(bytes)
})
})
})
- .map_err(::Error::new_body),
- Kind::Wrapped(ref mut s) => s.poll().map_err(::Error::new_body),
+ .map_err(crate::Error::new_body),
+ Kind::Wrapped(ref mut s) => s.poll().map_err(crate::Error::new_body),
}
}
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -291,7 +291,7 @@ impl Default for Body {
impl Payload for Body {
type Data = Chunk;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll_data(&mut self) -> Poll<Option<Self::Data>, Self::Error> {
self.poll_eof()
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -301,7 +301,7 @@ impl Payload for Body {
match self.kind {
Kind::H2 {
recv: ref mut h2, ..
- } => h2.poll_trailers().map_err(::Error::new_h2),
+ } => h2.poll_trailers().map_err(crate::Error::new_h2),
_ => Ok(Async::Ready(None)),
}
}
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -336,7 +336,7 @@ impl Payload for Body {
impl ::http_body::Body for Body {
type Data = Chunk;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll_data(&mut self) -> Poll<Option<Self::Data>, Self::Error> {
<Self as Payload>::poll_data(self)
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -366,7 +366,7 @@ impl ::http_body::Body for Body {
impl Stream for Body {
type Item = Chunk;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.poll_data()
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -395,13 +395,13 @@ impl fmt::Debug for Body {
impl Sender {
/// Check to see if this `Sender` can send more data.
- pub fn poll_ready(&mut self) -> Poll<(), ::Error> {
+ pub fn poll_ready(&mut self) -> Poll<(), crate::Error> {
match self.abort_tx.poll_cancel() {
- Ok(Async::Ready(())) | Err(_) => return Err(::Error::new_closed()),
+ Ok(Async::Ready(())) | Err(_) => return Err(crate::Error::new_closed()),
Ok(Async::NotReady) => (),
}
- self.tx.poll_ready().map_err(|_| ::Error::new_closed())
+ self.tx.poll_ready().map_err(|_| crate::Error::new_closed())
}
/// Sends data on this channel.
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -422,14 +422,14 @@ impl Sender {
let _ = self.abort_tx.send(());
}
- pub(crate) fn send_error(&mut self, err: ::Error) {
+ pub(crate) fn send_error(&mut self, err: crate::Error) {
let _ = self.tx.try_send(Err(err));
}
}
impl Sink for Sender {
type SinkItem = Chunk;
- type SinkError = ::Error;
+ type SinkError = crate::Error;
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
Ok(Async::Ready(()))
diff --git a/src/body/body.rs b/src/body/body.rs
--- a/src/body/body.rs
+++ b/src/body/body.rs
@@ -438,7 +438,7 @@ impl Sink for Sender {
fn start_send(&mut self, msg: Chunk) -> StartSend<Self::SinkItem, Self::SinkError> {
match self.poll_ready()? {
Async::Ready(_) => {
- self.send_data(msg).map_err(|_| ::Error::new_closed())?;
+ self.send_data(msg).map_err(|_| crate::Error::new_closed())?;
Ok(AsyncSink::Ready)
}
Async::NotReady => Ok(AsyncSink::NotReady(msg)),
diff --git a/src/body/payload.rs b/src/body/payload.rs
--- a/src/body/payload.rs
+++ b/src/body/payload.rs
@@ -65,7 +65,7 @@ pub trait Payload: Send + 'static {
// The only thing a user *could* do is reference the method, but DON'T
// DO THAT! :)
#[doc(hidden)]
- fn __hyper_full_data(&mut self, FullDataArg) -> FullDataRet<Self::Data> {
+ fn __hyper_full_data(&mut self, _: FullDataArg) -> FullDataRet<Self::Data> {
FullDataRet(None)
}
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -18,12 +18,12 @@ use futures::future::{self, Either, Executor};
use h2;
use tokio_io::{AsyncRead, AsyncWrite};
-use body::Payload;
-use common::Exec;
-use upgrade::Upgraded;
-use proto;
+use crate::body::Payload;
+use crate::common::Exec;
+use crate::upgrade::Upgraded;
+use crate::proto;
use super::dispatch;
-use {Body, Request, Response};
+use crate::{Body, Request, Response};
type Http1Dispatcher<T, B, R> = proto::dispatch::Dispatcher<
proto::dispatch::Client<B>,
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -39,7 +39,7 @@ type ConnEither<T, B> = Either<
/// Returns a `Handshake` future over some IO.
///
/// This is a shortcut for `Builder::new().handshake(io)`.
-pub fn handshake<T>(io: T) -> Handshake<T, ::Body>
+pub fn handshake<T>(io: T) -> Handshake<T, crate::Body>
where
T: AsyncRead + AsyncWrite + Send + 'static,
{
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -98,7 +98,7 @@ pub struct Handshake<T, B> {
pub struct ResponseFuture {
// for now, a Box is used to hide away the internal `B`
// that can be returned if canceled
- inner: Box<dyn Future<Item=Response<Body>, Error=::Error> + Send>,
+ inner: Box<dyn Future<Item=Response<Body>, Error=crate::Error> + Send>,
}
/// Deconstructed parts of a `Connection`.
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -145,7 +145,7 @@ impl<B> SendRequest<B>
/// Polls to determine whether this sender can be used yet for a request.
///
/// If the associated connection is closed, this returns an Error.
- pub fn poll_ready(&mut self) -> Poll<(), ::Error> {
+ pub fn poll_ready(&mut self) -> Poll<(), crate::Error> {
self.dispatch.poll_ready()
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -235,7 +235,7 @@ where
},
Err(_req) => {
debug!("connection was not ready");
- let err = ::Error::new_canceled().with("connection was not ready");
+ let err = crate::Error::new_canceled().with("connection was not ready");
Either::B(future::err(err))
}
};
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -245,7 +245,7 @@ where
}
}
- pub(crate) fn send_request_retryable(&mut self, req: Request<B>) -> impl Future<Item = Response<Body>, Error = (::Error, Option<Request<B>>)>
+ pub(crate) fn send_request_retryable(&mut self, req: Request<B>) -> impl Future<Item = Response<Body>, Error = (crate::Error, Option<Request<B>>)>
where
B: Send,
{
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -262,7 +262,7 @@ where
},
Err(req) => {
debug!("connection was not ready");
- let err = ::Error::new_canceled().with("connection was not ready");
+ let err = crate::Error::new_canceled().with("connection was not ready");
Either::B(future::err((err, Some(req))))
}
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -305,7 +305,7 @@ impl<B> Http2SendRequest<B>
where
B: Payload + 'static,
{
- pub(super) fn send_request_retryable(&mut self, req: Request<B>) -> impl Future<Item=Response<Body>, Error=(::Error, Option<Request<B>>)>
+ pub(super) fn send_request_retryable(&mut self, req: Request<B>) -> impl Future<Item=Response<Body>, Error=(crate::Error, Option<Request<B>>)>
where
B: Send,
{
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -322,7 +322,7 @@ where
},
Err(req) => {
debug!("connection was not ready");
- let err = ::Error::new_canceled().with("connection was not ready");
+ let err = crate::Error::new_canceled().with("connection was not ready");
Either::B(future::err((err, Some(req))))
}
}
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -380,7 +380,7 @@ where
/// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
/// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
/// to work with this function; or use the `without_shutdown` wrapper.
- pub fn poll_without_shutdown(&mut self) -> Poll<(), ::Error> {
+ pub fn poll_without_shutdown(&mut self) -> Poll<(), crate::Error> {
match self.inner.as_mut().expect("already upgraded") {
&mut Either::A(ref mut h1) => {
h1.poll_without_shutdown()
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -393,9 +393,9 @@ where
/// Prevent shutdown of the underlying IO object at the end of service the request,
/// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
- pub fn without_shutdown(self) -> impl Future<Item=Parts<T>, Error=::Error> {
+ pub fn without_shutdown(self) -> impl Future<Item=Parts<T>, Error=crate::Error> {
let mut conn = Some(self);
- ::futures::future::poll_fn(move || -> ::Result<_> {
+ ::futures::future::poll_fn(move || -> crate::Result<_> {
try_ready!(conn.as_mut().unwrap().poll_without_shutdown());
Ok(conn.take().unwrap().into_parts().into())
})
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -408,7 +408,7 @@ where
B: Payload + 'static,
{
type Item = ();
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match try_ready!(self.inner.poll()) {
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -552,7 +552,7 @@ where
B: Payload + 'static,
{
type Item = (SendRequest<B>, Connection<T, B>);
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let io = self.io.take().expect("polled more than once");
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -601,7 +601,7 @@ impl<T, B> fmt::Debug for Handshake<T, B> {
impl Future for ResponseFuture {
type Item = Response<Body>;
- type Error = ::Error;
+ type Error = crate::Error;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -620,7 +620,7 @@ impl fmt::Debug for ResponseFuture {
impl<B> Future for WhenReady<B> {
type Item = SendRequest<B>;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let mut tx = self.tx.take().expect("polled after complete");
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -11,7 +11,7 @@ use std::{fmt, mem};
use bytes::{BufMut, Bytes, BytesMut};
use futures::Future;
-use http::{uri, Response, Uri};
+use ::http::{uri, Response, Uri};
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "runtime")] pub mod dns;
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -68,9 +68,9 @@ impl Destination {
///
/// Returns an error if the uri contains no authority or
/// no scheme.
- pub fn try_from_uri(uri: Uri) -> ::Result<Self> {
- uri.authority_part().ok_or(::error::Parse::Uri)?;
- uri.scheme_part().ok_or(::error::Parse::Uri)?;
+ pub fn try_from_uri(uri: Uri) -> crate::Result<Self> {
+ uri.authority_part().ok_or(crate::error::Parse::Uri)?;
+ uri.scheme_part().ok_or(crate::error::Parse::Uri)?;
Ok(Destination { uri })
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -116,8 +116,8 @@ impl Destination {
/// # Error
///
/// Returns an error if the string is not a valid scheme.
- pub fn set_scheme(&mut self, scheme: &str) -> ::Result<()> {
- let scheme = scheme.parse().map_err(::error::Parse::from)?;
+ pub fn set_scheme(&mut self, scheme: &str) -> crate::Result<()> {
+ let scheme = scheme.parse().map_err(crate::error::Parse::from)?;
self.update_uri(move |parts| {
parts.scheme = Some(scheme);
})
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -143,19 +143,19 @@ impl Destination {
/// # Error
///
/// Returns an error if the string is not a valid hostname.
- pub fn set_host(&mut self, host: &str) -> ::Result<()> {
+ pub fn set_host(&mut self, host: &str) -> crate::Result<()> {
// Prevent any userinfo setting, it's bad!
if host.contains('@') {
- return Err(::error::Parse::Uri.into());
+ return Err(crate::error::Parse::Uri.into());
}
let auth = if let Some(port) = self.port() {
let bytes = Bytes::from(format!("{}:{}", host, port));
uri::Authority::from_shared(bytes)
- .map_err(::error::Parse::from)?
+ .map_err(crate::error::Parse::from)?
} else {
- let auth = host.parse::<uri::Authority>().map_err(::error::Parse::from)?;
+ let auth = host.parse::<uri::Authority>().map_err(crate::error::Parse::from)?;
if auth.port_part().is_some() { // std::uri::Authority::Uri
- return Err(::error::Parse::Uri.into());
+ return Err(crate::error::Parse::Uri.into());
}
auth
};
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -218,7 +218,7 @@ impl Destination {
.expect("valid uri should be valid with port");
}
- fn update_uri<F>(&mut self, f: F) -> ::Result<()>
+ fn update_uri<F>(&mut self, f: F) -> crate::Result<()>
where
F: FnOnce(&mut uri::Parts)
{
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -236,7 +236,7 @@ impl Destination {
},
Err(err) => {
self.uri = old_uri;
- Err(::error::Parse::from(err).into())
+ Err(crate::error::Parse::from(err).into())
},
}
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -254,7 +254,7 @@ impl Destination {
#[cfg(try_from)]
impl TryFrom<Uri> for Destination {
- type Error = ::error::Error;
+ type Error = crate::error::Error;
fn try_from(uri: Uri) -> Result<Self, Self::Error> {
Destination::try_from_uri(uri)
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -325,7 +325,7 @@ impl Connected {
// ===== impl Extra =====
impl Extra {
- pub(super) fn set(&self, res: &mut Response<::Body>) {
+ pub(super) fn set(&self, res: &mut Response<crate::Body>) {
self.0.set(res);
}
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -345,7 +345,7 @@ impl fmt::Debug for Extra {
trait ExtraInner: Send + Sync {
fn clone_box(&self) -> Box<dyn ExtraInner>;
- fn set(&self, res: &mut Response<::Body>);
+ fn set(&self, res: &mut Response<crate::Body>);
}
// This indirection allows the `Connected` to have a type-erased "extra" value,
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -362,7 +362,7 @@ where
Box::new(self.clone())
}
- fn set(&self, res: &mut Response<::Body>) {
+ fn set(&self, res: &mut Response<crate::Body>) {
res.extensions_mut().insert(self.0.clone());
}
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -383,7 +383,7 @@ where
Box::new(self.clone())
}
- fn set(&self, res: &mut Response<::Body>) {
+ fn set(&self, res: &mut Response<crate::Body>) {
self.0.set(res);
res.extensions_mut().insert(self.1.clone());
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -2,10 +2,10 @@ use futures::{future, Async, Future, Poll, Stream};
use futures::sync::{mpsc, oneshot};
use want;
-use common::Never;
+use crate::common::Never;
-pub type RetryPromise<T, U> = oneshot::Receiver<Result<U, (::Error, Option<T>)>>;
-pub type Promise<T> = oneshot::Receiver<Result<T, ::Error>>;
+pub type RetryPromise<T, U> = oneshot::Receiver<Result<U, (crate::Error, Option<T>)>>;
+pub type Promise<T> = oneshot::Receiver<Result<T, crate::Error>>;
pub fn channel<T, U>() -> (Sender<T, U>, Receiver<T, U>) {
let (tx, rx) = mpsc::unbounded();
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -51,9 +51,9 @@ pub struct UnboundedSender<T, U> {
}
impl<T, U> Sender<T, U> {
- pub fn poll_ready(&mut self) -> Poll<(), ::Error> {
+ pub fn poll_ready(&mut self) -> Poll<(), crate::Error> {
self.giver.poll_want()
- .map_err(|_| ::Error::new_closed())
+ .map_err(|_| crate::Error::new_closed())
}
pub fn is_ready(&self) -> bool {
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -167,14 +167,14 @@ struct Envelope<T, U>(Option<(T, Callback<T, U>)>);
impl<T, U> Drop for Envelope<T, U> {
fn drop(&mut self) {
if let Some((val, cb)) = self.0.take() {
- let _ = cb.send(Err((::Error::new_canceled().with("connection closed"), Some(val))));
+ let _ = cb.send(Err((crate::Error::new_canceled().with("connection closed"), Some(val))));
}
}
}
pub enum Callback<T, U> {
- Retry(oneshot::Sender<Result<U, (::Error, Option<T>)>>),
- NoRetry(oneshot::Sender<Result<U, ::Error>>),
+ Retry(oneshot::Sender<Result<U, (crate::Error, Option<T>)>>),
+ NoRetry(oneshot::Sender<Result<U, crate::Error>>),
}
impl<T, U> Callback<T, U> {
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -192,7 +192,7 @@ impl<T, U> Callback<T, U> {
}
}
- pub(crate) fn send(self, val: Result<U, (::Error, Option<T>)>) {
+ pub(crate) fn send(self, val: Result<U, (crate::Error, Option<T>)>) {
match self {
Callback::Retry(tx) => {
let _ = tx.send(val);
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -205,7 +205,7 @@ impl<T, U> Callback<T, U> {
pub(crate) fn send_when(
self,
- mut when: impl Future<Item=U, Error=(::Error, Option<T>)>,
+ mut when: impl Future<Item=U, Error=(crate::Error, Option<T>)>,
) -> impl Future<Item=(), Error=()> {
let mut cb = Some(self);
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -89,8 +89,8 @@ use http::{Method, Request, Response, Uri, Version};
use http::header::{HeaderValue, HOST};
use http::uri::Scheme;
-use body::{Body, Payload};
-use common::{lazy as hyper_lazy, Lazy};
+use crate::body::{Body, Payload};
+use crate::common::{lazy as hyper_lazy, Lazy};
use self::connect::{Alpn, Connect, Connected, Destination};
use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation};
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -237,7 +237,7 @@ where C: Connect + Sync + 'static,
Version::HTTP_11 => (),
Version::HTTP_10 => if is_http_connect {
warn!("CONNECT is not allowed for HTTP/1.0");
- return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_request_method())));
+ return ResponseFuture::new(Box::new(future::err(crate::Error::new_user_unsupported_request_method())));
},
other_h2 @ Version::HTTP_2 => if self.config.ver != Ver::Http2 {
return ResponseFuture::error_version(other_h2);
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -257,7 +257,7 @@ where C: Connect + Sync + 'static,
ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key)))
}
- fn retryably_send_request(&self, req: Request<B>, pool_key: PoolKey) -> impl Future<Item=Response<Body>, Error=::Error> {
+ fn retryably_send_request(&self, req: Request<B>, pool_key: PoolKey) -> impl Future<Item=Response<Body>, Error=crate::Error> {
let client = self.clone();
let uri = req.uri().clone();
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -320,7 +320,7 @@ where C: Connect + Sync + 'static,
};
} else if req.method() == &Method::CONNECT {
debug!("client does not support CONNECT requests over HTTP2");
- return Either::A(future::err(ClientError::Normal(::Error::new_user_unsupported_request_method())));
+ return Either::A(future::err(ClientError::Normal(crate::Error::new_user_unsupported_request_method())));
}
let fut = pooled.send_request_retryable(req)
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -478,7 +478,7 @@ where C: Connect + Sync + 'static,
}
fn connect_to(&self, uri: Uri, pool_key: PoolKey)
- -> impl Lazy<Item=Pooled<PoolClient<B>>, Error=::Error>
+ -> impl Lazy<Item=Pooled<PoolClient<B>>, Error=crate::Error>
{
let executor = self.conn_builder.exec.clone();
let pool = self.pool.clone();
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -498,12 +498,12 @@ where C: Connect + Sync + 'static,
let connecting = match pool.connecting(&pool_key, ver) {
Some(lock) => lock,
None => {
- let canceled = ::Error::new_canceled().with("HTTP/2 connection in progress");
+ let canceled = crate::Error::new_canceled().with("HTTP/2 connection in progress");
return Either::B(future::err(canceled));
}
};
Either::A(connector.connect(dst)
- .map_err(::Error::new_connect)
+ .map_err(crate::Error::new_connect)
.and_then(move |(io, connected)| {
// If ALPN is h2 and we aren't http2_only already,
// then we need to convert our pool checkout into
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -517,7 +517,7 @@ where C: Connect + Sync + 'static,
None => {
// Another connection has already upgraded,
// the pool checkout should finish up for us.
- let canceled = ::Error::new_canceled().with("ALPN upgraded to HTTP/2");
+ let canceled = crate::Error::new_canceled().with("ALPN upgraded to HTTP/2");
return Either::B(future::err(canceled));
}
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -583,11 +583,11 @@ impl<C, B> fmt::Debug for Client<C, B> {
/// This is returned by `Client::request` (and `Client::get`).
#[must_use = "futures do nothing unless polled"]
pub struct ResponseFuture {
- inner: Box<dyn Future<Item=Response<Body>, Error=::Error> + Send>,
+ inner: Box<dyn Future<Item=Response<Body>, Error=crate::Error> + Send>,
}
impl ResponseFuture {
- fn new(fut: Box<dyn Future<Item=Response<Body>, Error=::Error> + Send>) -> Self {
+ fn new(fut: Box<dyn Future<Item=Response<Body>, Error=crate::Error> + Send>) -> Self {
Self {
inner: fut,
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -595,7 +595,7 @@ impl ResponseFuture {
fn error_version(ver: Version) -> Self {
warn!("Request has unsupported version \"{:?}\"", ver);
- ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_version())))
+ ResponseFuture::new(Box::new(future::err(crate::Error::new_user_unsupported_version())))
}
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -607,7 +607,7 @@ impl fmt::Debug for ResponseFuture {
impl Future for ResponseFuture {
type Item = Response<Body>;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.inner.poll()
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -627,7 +627,7 @@ enum PoolTx<B> {
}
impl<B> PoolClient<B> {
- fn poll_ready(&mut self) -> Poll<(), ::Error> {
+ fn poll_ready(&mut self) -> Poll<(), crate::Error> {
match self.tx {
PoolTx::Http1(ref mut tx) => tx.poll_ready(),
PoolTx::Http2(_) => Ok(Async::Ready(())),
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -661,7 +661,7 @@ impl<B> PoolClient<B> {
}
impl<B: Payload + 'static> PoolClient<B> {
- fn send_request_retryable(&mut self, req: Request<B>) -> impl Future<Item = Response<Body>, Error = (::Error, Option<Request<B>>)>
+ fn send_request_retryable(&mut self, req: Request<B>) -> impl Future<Item = Response<Body>, Error = (crate::Error, Option<Request<B>>)>
where
B: Send,
{
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -713,17 +713,17 @@ where
// FIXME: allow() required due to `impl Trait` leaking types to this lint
#[allow(missing_debug_implementations)]
enum ClientError<B> {
- Normal(::Error),
+ Normal(crate::Error),
Canceled {
connection_reused: bool,
req: Request<B>,
- reason: ::Error,
+ reason: crate::Error,
}
}
impl<B> ClientError<B> {
fn map_with_reused(conn_reused: bool)
- -> impl Fn((::Error, Option<Request<B>>)) -> Self
+ -> impl Fn((crate::Error, Option<Request<B>>)) -> Self
{
move |(err, orig_req)| {
if let Some(req) = orig_req {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -797,7 +797,7 @@ fn authority_form(uri: &mut Uri) {
};
}
-fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> ::Result<String> {
+fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> crate::Result<String> {
let uri_clone = uri.clone();
match (uri_clone.scheme_part(), uri_clone.authority_part()) {
(Some(scheme), Some(auth)) => {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -819,7 +819,7 @@ fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> ::Result<String> {
},
_ => {
debug!("Client requires absolute-form URIs, received: {:?}", uri);
- Err(::Error::new_user_absolute_uri_required())
+ Err(crate::Error::new_user_absolute_uri_required())
}
}
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -9,7 +9,7 @@ use futures::sync::oneshot;
#[cfg(feature = "runtime")]
use tokio_timer::Interval;
-use common::Exec;
+use crate::common::Exec;
use super::Ver;
// FIXME: allow() required due to `impl Trait` leaking types to this lint
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -75,7 +75,7 @@ struct PoolInner<T> {
// A oneshot channel is used to allow the interval to be notified when
// the Pool completely drops. That way, the interval can cancel immediately.
#[cfg(feature = "runtime")]
- idle_interval_ref: Option<oneshot::Sender<::common::Never>>,
+ idle_interval_ref: Option<oneshot::Sender<crate::common::Never>>,
#[cfg(feature = "runtime")]
exec: Exec,
timeout: Option<Duration>,
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -569,7 +569,7 @@ pub(super) struct Checkout<T> {
}
impl<T: Poolable> Checkout<T> {
- fn poll_waiter(&mut self) -> Poll<Option<Pooled<T>>, ::Error> {
+ fn poll_waiter(&mut self) -> Poll<Option<Pooled<T>>, crate::Error> {
static CANCELED: &str = "pool checkout failed";
if let Some(mut rx) = self.waiter.take() {
match rx.poll() {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -577,14 +577,14 @@ impl<T: Poolable> Checkout<T> {
if value.is_open() {
Ok(Async::Ready(Some(self.pool.reuse(&self.key, value))))
} else {
- Err(::Error::new_canceled().with(CANCELED))
+ Err(crate::Error::new_canceled().with(CANCELED))
}
},
Ok(Async::NotReady) => {
self.waiter = Some(rx);
Ok(Async::NotReady)
},
- Err(_canceled) => Err(::Error::new_canceled().with(CANCELED)),
+ Err(_canceled) => Err(crate::Error::new_canceled().with(CANCELED)),
}
} else {
Ok(Async::Ready(None))
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -644,7 +644,7 @@ impl<T: Poolable> Checkout<T> {
impl<T: Poolable> Future for Checkout<T> {
type Item = Pooled<T>;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(pooled) = try_ready!(self.poll_waiter()) {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -654,7 +654,7 @@ impl<T: Poolable> Future for Checkout<T> {
if let Some(pooled) = self.checkout() {
Ok(Async::Ready(pooled))
} else if !self.pool.is_enabled() {
- Err(::Error::new_canceled().with("pool is disabled"))
+ Err(crate::Error::new_canceled().with("pool is disabled"))
} else {
Ok(Async::NotReady)
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -723,7 +723,7 @@ struct IdleInterval<T> {
// This allows the IdleInterval to be notified as soon as the entire
// Pool is fully dropped, and shutdown. This channel is never sent on,
// but Err(Canceled) will be received when the Pool is dropped.
- pool_drop_notifier: oneshot::Receiver<::common::Never>,
+ pool_drop_notifier: oneshot::Receiver<crate::common::Never>,
}
#[cfg(feature = "runtime")]
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -3,17 +3,17 @@ use std::sync::Arc;
use futures::future::{Executor, Future};
-use body::Payload;
-use proto::h2::server::H2Stream;
-use server::conn::spawn_all::{NewSvcTask, Watcher};
-use service::Service;
+use crate::body::Payload;
+use crate::proto::h2::server::H2Stream;
+use crate::server::conn::spawn_all::{NewSvcTask, Watcher};
+use crate::service::Service;
pub trait H2Exec<F, B: Payload>: Clone {
- fn execute_h2stream(&self, fut: H2Stream<F, B>) -> ::Result<()>;
+ fn execute_h2stream(&self, fut: H2Stream<F, B>) -> crate::Result<()>;
}
pub trait NewSvcExec<I, N, S: Service, E, W: Watcher<I, S, E>>: Clone {
- fn execute_new_svc(&self, fut: NewSvcTask<I, N, S, E, W>) -> ::Result<()>;
+ fn execute_new_svc(&self, fut: NewSvcTask<I, N, S, E, W>) -> crate::Result<()>;
}
// Either the user provides an executor for background tasks, or we use
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -27,7 +27,7 @@ pub enum Exec {
// ===== impl Exec =====
impl Exec {
- pub(crate) fn execute<F>(&self, fut: F) -> ::Result<()>
+ pub(crate) fn execute<F>(&self, fut: F) -> crate::Result<()>
where
F: Future<Item=(), Error=()> + Send + 'static,
{
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -62,7 +62,7 @@ impl Exec {
.spawn(Box::new(fut))
.map_err(|err| {
warn!("executor error: {:?}", err);
- ::Error::new_execute(TokioSpawnError)
+ crate::Error::new_execute(TokioSpawnError)
})
}
#[cfg(not(feature = "runtime"))]
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -75,7 +75,7 @@ impl Exec {
e.execute(Box::new(fut))
.map_err(|err| {
warn!("executor error: {:?}", err.kind());
- ::Error::new_execute("custom executor failed")
+ crate::Error::new_execute("custom executor failed")
})
},
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -95,7 +95,7 @@ where
H2Stream<F, B>: Future<Item=(), Error=()> + Send + 'static,
B: Payload,
{
- fn execute_h2stream(&self, fut: H2Stream<F, B>) -> ::Result<()> {
+ fn execute_h2stream(&self, fut: H2Stream<F, B>) -> crate::Result<()> {
self.execute(fut)
}
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -106,7 +106,7 @@ where
S: Service,
W: Watcher<I, S, E>,
{
- fn execute_new_svc(&self, fut: NewSvcTask<I, N, S, E, W>) -> ::Result<()> {
+ fn execute_new_svc(&self, fut: NewSvcTask<I, N, S, E, W>) -> crate::Result<()> {
self.execute(fut)
}
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -119,11 +119,11 @@ where
H2Stream<F, B>: Future<Item=(), Error=()>,
B: Payload,
{
- fn execute_h2stream(&self, fut: H2Stream<F, B>) -> ::Result<()> {
+ fn execute_h2stream(&self, fut: H2Stream<F, B>) -> crate::Result<()> {
self.execute(fut)
.map_err(|err| {
warn!("executor error: {:?}", err.kind());
- ::Error::new_execute("custom executor failed")
+ crate::Error::new_execute("custom executor failed")
})
}
}
diff --git a/src/common/exec.rs b/src/common/exec.rs
--- a/src/common/exec.rs
+++ b/src/common/exec.rs
@@ -135,11 +135,11 @@ where
S: Service,
W: Watcher<I, S, E>,
{
- fn execute_new_svc(&self, fut: NewSvcTask<I, N, S, E, W>) -> ::Result<()> {
+ fn execute_new_svc(&self, fut: NewSvcTask<I, N, S, E, W>) -> crate::Result<()> {
self.execute(fut)
.map_err(|err| {
warn!("executor error: {:?}", err.kind());
- ::Error::new_execute("custom executor failed")
+ crate::Error::new_execute("custom executor failed")
})
}
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -133,14 +133,8 @@ impl Error {
self.inner.kind == Kind::IncompleteMessage
}
- #[doc(hidden)]
- #[cfg_attr(error_source, deprecated(note = "use Error::source instead"))]
- pub fn cause2(&self) -> Option<&(dyn StdError + 'static + Sync + Send)> {
- self.inner.cause.as_ref().map(|e| &**e)
- }
-
/// Consumes the error, returning its cause.
- pub fn into_cause(self) -> Option<Box<dyn StdError + Sync + Send>> {
+ pub fn into_cause(self) -> Option<Box<dyn StdError + Send + Sync>> {
self.inner.cause
}
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -162,28 +156,6 @@ impl Error {
&self.inner.kind
}
- #[cfg(not(error_source))]
- pub(crate) fn h2_reason(&self) -> h2::Reason {
- // Since we don't have access to `Error::source`, we can only
- // look so far...
- let mut cause = self.cause2();
- while let Some(err) = cause {
- if let Some(h2_err) = err.downcast_ref::<h2::Error>() {
- return h2_err
- .reason()
- .unwrap_or(h2::Reason::INTERNAL_ERROR);
- }
-
- cause = err
- .downcast_ref::<Error>()
- .and_then(Error::cause2);
- }
-
- // else
- h2::Reason::INTERNAL_ERROR
- }
-
- #[cfg(error_source)]
pub(crate) fn h2_reason(&self) -> h2::Reason {
// Find an h2::Reason somewhere in the cause stack, if it exists,
// otherwise assume an INTERNAL_ERROR.
diff --git a/src/error.rs b/src/error.rs
--- a/src/error.rs
+++ b/src/error.rs
@@ -370,16 +342,6 @@ impl StdError for Error {
}
}
- #[cfg(not(error_source))]
- fn cause(&self) -> Option<&StdError> {
- self
- .inner
- .cause
- .as_ref()
- .map(|cause| &**cause as &StdError)
- }
-
- #[cfg(error_source)]
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self
.inner
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -53,10 +53,10 @@ pub use http::{
Version,
};
-pub use client::Client;
-pub use error::{Result, Error};
-pub use body::{Body, Chunk};
-pub use server::Server;
+pub use crate::client::Client;
+pub use crate::error::{Result, Error};
+pub use crate::body::{Body, Chunk};
+pub use crate::server::Server;
#[macro_use]
mod common;
diff --git a/src/mock.rs b/src/mock.rs
--- a/src/mock.rs
+++ b/src/mock.rs
@@ -13,7 +13,7 @@ use futures::task::{self, Task};
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "runtime")]
-use ::client::connect::{Connect, Connected, Destination};
+use crate::client::connect::{Connect, Connected, Destination};
#[derive(Debug)]
pub struct MockCursor {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -8,9 +8,9 @@ use http::{HeaderMap, Method, Version};
use http::header::{HeaderValue, CONNECTION};
use tokio_io::{AsyncRead, AsyncWrite};
-use ::Chunk;
-use proto::{BodyLength, DecodedLength, MessageHead};
-use headers::connection_keep_alive;
+use crate::Chunk;
+use crate::proto::{BodyLength, DecodedLength, MessageHead};
+use crate::headers::connection_keep_alive;
use super::io::{Buffered};
use super::{EncodedBuf, Encode, Encoder, /*Decode,*/ Decoder, Http1Transaction, ParseContext};
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -84,7 +84,7 @@ where I: AsyncRead + AsyncWrite,
self.io.into_inner()
}
- pub fn pending_upgrade(&mut self) -> Option<::upgrade::Pending> {
+ pub fn pending_upgrade(&mut self) -> Option<crate::upgrade::Pending> {
self.state.upgrade.take()
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -129,7 +129,7 @@ where I: AsyncRead + AsyncWrite,
read_buf.len() >= 24 && read_buf[..24] == *H2_PREFACE
}
- pub fn read_head(&mut self) -> Poll<Option<(MessageHead<T::Incoming>, DecodedLength, bool)>, ::Error> {
+ pub fn read_head(&mut self) -> Poll<Option<(MessageHead<T::Incoming>, DecodedLength, bool)>, crate::Error> {
debug_assert!(self.can_read_head());
trace!("Conn::read_head");
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -168,7 +168,7 @@ where I: AsyncRead + AsyncWrite,
Ok(Async::Ready(Some((msg.head, msg.decode, msg.wants_upgrade))))
}
- fn on_read_head_error<Z>(&mut self, e: ::Error) -> Poll<Option<Z>, ::Error> {
+ fn on_read_head_error<Z>(&mut self, e: crate::Error) -> Poll<Option<Z>, crate::Error> {
// If we are currently waiting on a message, then an empty
// message should be reported as an error. If not, it is just
// the connection closing gracefully.
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -233,7 +233,7 @@ where I: AsyncRead + AsyncWrite,
ret
}
- pub fn read_keep_alive(&mut self) -> Poll<(), ::Error> {
+ pub fn read_keep_alive(&mut self) -> Poll<(), crate::Error> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
if self.is_mid_message() {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -254,22 +254,22 @@ where I: AsyncRead + AsyncWrite,
//
// This should only be called for Clients wanting to enter the idle
// state.
- fn require_empty_read(&mut self) -> Poll<(), ::Error> {
+ fn require_empty_read(&mut self) -> Poll<(), crate::Error> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
debug_assert!(!self.is_mid_message());
debug_assert!(T::is_client());
if !self.io.read_buf().is_empty() {
debug!("received an unexpected {} bytes", self.io.read_buf().len());
- return Err(::Error::new_unexpected_message());
+ return Err(crate::Error::new_unexpected_message());
}
- let num_read = try_ready!(self.force_io_read().map_err(::Error::new_io));
+ let num_read = try_ready!(self.force_io_read().map_err(crate::Error::new_io));
if num_read == 0 {
let ret = if self.should_error_on_eof() {
trace!("found unexpected EOF on busy connection: {:?}", self.state);
- Err(::Error::new_incomplete())
+ Err(crate::Error::new_incomplete())
} else {
trace!("found EOF on idle connection, closing");
Ok(Async::Ready(()))
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -281,10 +281,10 @@ where I: AsyncRead + AsyncWrite,
}
debug!("received unexpected {} bytes on an idle connection", num_read);
- Err(::Error::new_unexpected_message())
+ Err(crate::Error::new_unexpected_message())
}
- fn mid_message_detect_eof(&mut self) -> Poll<(), ::Error> {
+ fn mid_message_detect_eof(&mut self) -> Poll<(), crate::Error> {
debug_assert!(!self.can_read_head() && !self.can_read_body());
debug_assert!(self.is_mid_message());
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -292,12 +292,12 @@ where I: AsyncRead + AsyncWrite,
return Ok(Async::NotReady);
}
- let num_read = try_ready!(self.force_io_read().map_err(::Error::new_io));
+ let num_read = try_ready!(self.force_io_read().map_err(crate::Error::new_io));
if num_read == 0 {
trace!("found unexpected EOF on busy connection: {:?}", self.state);
self.state.close_read();
- Err(::Error::new_incomplete())
+ Err(crate::Error::new_incomplete())
} else {
Ok(Async::Ready(()))
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -563,12 +563,12 @@ where I: AsyncRead + AsyncWrite,
//
// - Client: there is nothing we can do
// - Server: if Response hasn't been written yet, we can send a 4xx response
- fn on_parse_error(&mut self, err: ::Error) -> ::Result<()> {
+ fn on_parse_error(&mut self, err: crate::Error) -> crate::Result<()> {
match self.state.writing {
Writing::Init => {
if self.has_h2_prefix() {
- return Err(::Error::new_version_h2())
+ return Err(crate::Error::new_version_h2())
}
if let Some(msg) = T::on_error(&err) {
// Drop the cached headers so as to not trigger a debug
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -623,7 +623,7 @@ where I: AsyncRead + AsyncWrite,
}
}
- pub fn take_error(&mut self) -> ::Result<()> {
+ pub fn take_error(&mut self) -> crate::Result<()> {
if let Some(err) = self.state.error.take() {
Err(err)
} else {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -631,7 +631,7 @@ where I: AsyncRead + AsyncWrite,
}
}
- pub(super) fn on_upgrade(&mut self) -> ::upgrade::OnUpgrade {
+ pub(super) fn on_upgrade(&mut self) -> crate::upgrade::OnUpgrade {
trace!("{}: prepare possible HTTP upgrade", T::LOG);
self.state.prepare_upgrade()
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -658,7 +658,7 @@ struct State {
cached_headers: Option<HeaderMap>,
/// If an error occurs when there wasn't a direct way to return it
/// back to the user, this is set.
- error: Option<::Error>,
+ error: Option<crate::Error>,
/// Current keep-alive status.
keep_alive: KA,
/// If mid-message, the HTTP Method that started it.
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -675,7 +675,7 @@ struct State {
/// State of allowed writes
writing: Writing,
/// An expected pending HTTP upgrade.
- upgrade: Option<::upgrade::Pending>,
+ upgrade: Option<crate::upgrade::Pending>,
/// Either HTTP/1.0 or 1.1 connection
version: Version,
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -868,9 +868,9 @@ impl State {
}
}
- fn prepare_upgrade(&mut self) -> ::upgrade::OnUpgrade {
+ fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade {
debug_assert!(self.upgrade.is_none());
- let (tx, rx) = ::upgrade::pending();
+ let (tx, rx) = crate::upgrade::pending();
self.upgrade = Some(tx);
rx
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -5,17 +5,17 @@ use futures::{Async, Future, Poll, Stream};
use http::{Request, Response, StatusCode};
use tokio_io::{AsyncRead, AsyncWrite};
-use body::{Body, Payload};
-use body::internal::FullDataArg;
-use common::{Never, YieldNow};
-use proto::{BodyLength, DecodedLength, Conn, Dispatched, MessageHead, RequestHead, RequestLine, ResponseHead};
+use crate::body::{Body, Payload};
+use crate::body::internal::FullDataArg;
+use crate::common::{Never, YieldNow};
+use crate::proto::{BodyLength, DecodedLength, Conn, Dispatched, MessageHead, RequestHead, RequestLine, ResponseHead};
use super::Http1Transaction;
-use service::Service;
+use crate::service::Service;
pub(crate) struct Dispatcher<D, Bs: Payload, I, T> {
conn: Conn<I, Bs::Data, T>,
dispatch: D,
- body_tx: Option<::body::Sender>,
+ body_tx: Option<crate::body::Sender>,
body_rx: Option<Bs>,
is_closing: bool,
/// If the poll loop reaches its max spin count, it will yield by notifying
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -30,7 +30,7 @@ pub(crate) trait Dispatch {
type PollError;
type RecvItem;
fn poll_msg(&mut self) -> Poll<Option<(Self::PollItem, Self::PollBody)>, Self::PollError>;
- fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()>;
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()>;
fn poll_ready(&mut self) -> Poll<(), ()>;
fn should_poll(&self) -> bool;
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -41,11 +41,11 @@ pub struct Server<S: Service> {
}
pub struct Client<B> {
- callback: Option<::client::dispatch::Callback<Request<B>, Response<Body>>>,
+ callback: Option<crate::client::dispatch::Callback<Request<B>, Response<Body>>>,
rx: ClientRx<B>,
}
-type ClientRx<B> = ::client::dispatch::Receiver<Request<B>, Response<Body>>;
+type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
impl<D, Bs, I, T> Dispatcher<D, Bs, I, T>
where
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -80,7 +80,7 @@ where
///
/// This is useful for old-style HTTP upgrades, but ignores
/// newer-style upgrade API.
- pub fn poll_without_shutdown(&mut self) -> Poll<(), ::Error> {
+ pub fn poll_without_shutdown(&mut self) -> Poll<(), crate::Error> {
self.poll_catch(false)
.map(|x| {
x.map(|ds| if let Dispatched::Upgrade(pending) = ds {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -89,7 +89,7 @@ where
})
}
- fn poll_catch(&mut self, should_shutdown: bool) -> Poll<Dispatched, ::Error> {
+ fn poll_catch(&mut self, should_shutdown: bool) -> Poll<Dispatched, crate::Error> {
self.poll_inner(should_shutdown).or_else(|e| {
// An error means we're shutting down either way.
// We just try to give the error to the user,
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -100,7 +100,7 @@ where
})
}
- fn poll_inner(&mut self, should_shutdown: bool) -> Poll<Dispatched, ::Error> {
+ fn poll_inner(&mut self, should_shutdown: bool) -> Poll<Dispatched, crate::Error> {
T::update_date();
try_ready!(self.poll_loop());
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -110,7 +110,7 @@ where
self.conn.take_error()?;
return Ok(Async::Ready(Dispatched::Upgrade(pending)));
} else if should_shutdown {
- try_ready!(self.conn.shutdown().map_err(::Error::new_shutdown));
+ try_ready!(self.conn.shutdown().map_err(crate::Error::new_shutdown));
}
self.conn.take_error()?;
Ok(Async::Ready(Dispatched::Shutdown))
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -119,7 +119,7 @@ where
}
}
- fn poll_loop(&mut self) -> Poll<(), ::Error> {
+ fn poll_loop(&mut self) -> Poll<(), crate::Error> {
// Limit the looping on this connection, in case it is ready far too
// often, so that other futures don't starve.
//
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -155,7 +155,7 @@ where
}
}
- fn poll_read(&mut self) -> Poll<(), ::Error> {
+ fn poll_read(&mut self) -> Poll<(), crate::Error> {
loop {
if self.is_closing {
return Ok(Async::Ready(()));
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -199,7 +199,7 @@ where
return Ok(Async::NotReady);
}
Err(e) => {
- body.send_error(::Error::new_body(e));
+ body.send_error(crate::Error::new_body(e));
}
}
} else {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -211,7 +211,7 @@ where
}
}
- fn poll_read_head(&mut self) -> Poll<(), ::Error> {
+ fn poll_read_head(&mut self) -> Poll<(), crate::Error> {
// can dispatch receive, or does it still care about, an incoming message?
match self.dispatch.poll_ready() {
Ok(Async::Ready(())) => (),
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -255,12 +255,12 @@ where
}
}
- fn poll_write(&mut self) -> Poll<(), ::Error> {
+ fn poll_write(&mut self) -> Poll<(), crate::Error> {
loop {
if self.is_closing {
return Ok(Async::Ready(()));
} else if self.body_rx.is_none() && self.conn.can_write_head() && self.dispatch.should_poll() {
- if let Some((head, mut body)) = try_ready!(self.dispatch.poll_msg().map_err(::Error::new_user_service)) {
+ if let Some((head, mut body)) = try_ready!(self.dispatch.poll_msg().map_err(crate::Error::new_user_service)) {
// Check if the body knows its full data immediately.
//
// If so, we can skip a bit of bookkeeping that streaming
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -294,7 +294,7 @@ where
);
continue;
}
- match body.poll_data().map_err(::Error::new_user_body)? {
+ match body.poll_data().map_err(crate::Error::new_user_body)? {
Async::Ready(Some(chunk)) => {
let eos = body.is_end_stream();
if eos {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -327,10 +327,10 @@ where
}
}
- fn poll_flush(&mut self) -> Poll<(), ::Error> {
+ fn poll_flush(&mut self) -> Poll<(), crate::Error> {
self.conn.flush().map_err(|err| {
debug!("error writing: {}", err);
- ::Error::new_body_write(err)
+ crate::Error::new_body_write(err)
})
}
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -367,7 +367,7 @@ where
Bs: Payload,
{
type Item = Dispatched;
- type Error = ::Error;
+ type Error = crate::Error;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -421,7 +421,7 @@ where
}
}
- fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()> {
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> {
let (msg, body) = msg?;
let mut req = Request::new(body);
*req.method_mut() = msg.subject.0;
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -501,7 +501,7 @@ where
}
}
- fn recv_msg(&mut self, msg: ::Result<(Self::RecvItem, Body)>) -> ::Result<()> {
+ fn recv_msg(&mut self, msg: crate::Result<(Self::RecvItem, Body)>) -> crate::Result<()> {
match msg {
Ok((msg, body)) => {
if let Some(cb) = self.callback.take() {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -515,7 +515,7 @@ where
// Getting here is likely a bug! An error should have happened
// in Conn::require_empty_read() before ever parsing a
// full message!
- Err(::Error::new_unexpected_message())
+ Err(crate::Error::new_unexpected_message())
}
},
Err(err) => {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -526,7 +526,7 @@ where
trace!("canceling queued request with connection error: {}", err);
// in this case, the message was never even started, so it's safe to tell
// the user that the request was completely canceled
- let _ = cb.send(Err((::Error::new_canceled().with(err), Some(req))));
+ let _ = cb.send(Err((crate::Error::new_canceled().with(err), Some(req))));
Ok(())
} else {
Err(err)
diff --git a/src/proto/h1/encode.rs b/src/proto/h1/encode.rs
--- a/src/proto/h1/encode.rs
+++ b/src/proto/h1/encode.rs
@@ -4,7 +4,7 @@ use bytes::{Buf, IntoBuf};
use bytes::buf::{Chain, Take};
use iovec::IoVec;
-use common::StaticBuf;
+use crate::common::StaticBuf;
use super::io::WriteBuf;
/// Encoders to handle different Transfer-Encodings.
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -136,7 +136,7 @@ where
}
pub(super) fn parse<S>(&mut self, ctx: ParseContext)
- -> Poll<ParsedMessage<S::Incoming>, ::Error>
+ -> Poll<ParsedMessage<S::Incoming>, crate::Error>
where
S: Http1Transaction,
{
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -153,14 +153,14 @@ where
let max = self.read_buf_strategy.max();
if self.read_buf.len() >= max {
debug!("max_buf_size ({}) reached, closing", max);
- return Err(::Error::new_too_large());
+ return Err(crate::Error::new_too_large());
}
},
}
- match try_ready!(self.read_from_io().map_err(::Error::new_io)) {
+ match try_ready!(self.read_from_io().map_err(crate::Error::new_io)) {
0 => {
trace!("parse eof");
- return Err(::Error::new_incomplete());
+ return Err(crate::Error::new_incomplete());
}
_ => {},
}
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -1,7 +1,7 @@
use bytes::BytesMut;
use http::{HeaderMap, Method};
-use proto::{MessageHead, BodyLength, DecodedLength};
+use crate::proto::{MessageHead, BodyLength, DecodedLength};
pub(crate) use self::conn::Conn;
pub(crate) use self::dispatch::Dispatcher;
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -27,9 +27,9 @@ pub(crate) trait Http1Transaction {
type Outgoing: Default;
const LOG: &'static str;
fn parse(bytes: &mut BytesMut, ctx: ParseContext) -> ParseResult<Self::Incoming>;
- fn encode(enc: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> ::Result<Encoder>;
+ fn encode(enc: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder>;
- fn on_error(err: &::Error) -> Option<MessageHead<Self::Outgoing>>;
+ fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>>;
fn is_client() -> bool {
!Self::is_server()
diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs
--- a/src/proto/h1/mod.rs
+++ b/src/proto/h1/mod.rs
@@ -51,7 +51,7 @@ pub(crate) trait Http1Transaction {
}
/// Result newtype for Http1Transaction::parse.
-pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, ::error::Parse>;
+pub(crate) type ParseResult<T> = Result<Option<ParsedMessage<T>>, crate::error::Parse>;
#[derive(Debug)]
pub(crate) struct ParsedMessage<T> {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -10,10 +10,10 @@ use http::header::{self, Entry, HeaderName, HeaderValue};
use http::{HeaderMap, Method, StatusCode, Version};
use httparse;
-use error::Parse;
-use headers;
-use proto::{BodyLength, DecodedLength, MessageHead, RequestLine, RequestHead};
-use proto::h1::{Encode, Encoder, Http1Transaction, ParseResult, ParseContext, ParsedMessage, date};
+use crate::error::Parse;
+use crate::headers;
+use crate::proto::{BodyLength, DecodedLength, MessageHead, RequestLine, RequestHead};
+use crate::proto::h1::{Encode, Encoder, Http1Transaction, ParseResult, ParseContext, ParsedMessage, date};
const MAX_HEADERS: usize = 100;
const AVERAGE_HEADER_SIZE: usize = 30; // totally scientific
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -239,7 +239,7 @@ impl Http1Transaction for Server {
}))
}
- fn encode(mut msg: Encode<Self::Outgoing>, mut dst: &mut Vec<u8>) -> ::Result<Encoder> {
+ fn encode(mut msg: Encode<Self::Outgoing>, mut dst: &mut Vec<u8>) -> crate::Result<Encoder> {
trace!(
"Server::encode status={:?}, body={:?}, req_method={:?}",
msg.head.subject,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -266,7 +266,7 @@ impl Http1Transaction for Server {
*msg.head = MessageHead::default();
msg.head.subject = StatusCode::INTERNAL_SERVER_ERROR;
msg.body = None;
- (Err(::Error::new_user_unsupported_status_code()), true)
+ (Err(crate::Error::new_user_unsupported_status_code()), true)
} else {
(Ok(()), !msg.keep_alive)
};
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -309,7 +309,7 @@ impl Http1Transaction for Server {
if wrote_len {
warn!("unexpected content-length found, canceling");
rewind(dst);
- return Err(::Error::new_user_header());
+ return Err(crate::Error::new_user_header());
}
match msg.body {
Some(BodyLength::Known(known_len)) => {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -369,7 +369,7 @@ impl Http1Transaction for Server {
if fold.0 != len {
warn!("multiple Content-Length values found: [{}, {}]", fold.0, len);
rewind(dst);
- return Err(::Error::new_user_header());
+ return Err(crate::Error::new_user_header());
}
folded = Some(fold);
} else {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -378,7 +378,7 @@ impl Http1Transaction for Server {
} else {
warn!("illegal Content-Length value: {:?}", value);
rewind(dst);
- return Err(::Error::new_user_header());
+ return Err(crate::Error::new_user_header());
}
}
if let Some((len, value)) = folded {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -418,7 +418,7 @@ impl Http1Transaction for Server {
if wrote_len {
warn!("unexpected transfer-encoding found, canceling");
rewind(dst);
- return Err(::Error::new_user_header());
+ return Err(crate::Error::new_user_header());
}
// check that we actually can send a chunked body...
if msg.head.version == Version::HTTP_10 || !Server::can_chunked(msg.req_method, msg.head.subject) {
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -531,8 +531,8 @@ impl Http1Transaction for Server {
ret.map(|()| encoder.set_last(is_last))
}
- fn on_error(err: &::Error) -> Option<MessageHead<Self::Outgoing>> {
- use ::error::Kind;
+ fn on_error(err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
+ use crate::error::Kind;
let status = match *err.kind() {
Kind::Parse(Parse::Method) |
Kind::Parse(Parse::Header) |
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -666,7 +666,7 @@ impl Http1Transaction for Client {
}
}
- fn encode(msg: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> ::Result<Encoder> {
+ fn encode(msg: Encode<Self::Outgoing>, dst: &mut Vec<u8>) -> crate::Result<Encoder> {
trace!("Client::encode method={:?}, body={:?}", msg.head.subject.0, msg.body);
*msg.req_method = Some(msg.head.subject.0.clone());
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -704,7 +704,7 @@ impl Http1Transaction for Client {
Ok(body)
}
- fn on_error(_err: &::Error) -> Option<MessageHead<Self::Outgoing>> {
+ fn on_error(_err: &crate::Error) -> Option<MessageHead<Self::Outgoing>> {
// we can't tell the server about any errors it creates
None
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -937,7 +937,7 @@ fn record_header_indices(
bytes: &[u8],
headers: &[httparse::Header],
indices: &mut [HeaderIndices]
-) -> Result<(), ::error::Parse> {
+) -> Result<(), crate::error::Parse> {
let bytes_ptr = bytes.as_ptr() as usize;
// FIXME: This should be a single plain `for` loop.
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -966,7 +966,7 @@ fn record_header_indices(
{
if header.name.len() >= (1 << 16) {
debug!("header name larger than 64kb: {:?}", header.name);
- return Err(::error::Parse::TooLarge);
+ return Err(crate::error::Parse::TooLarge);
}
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -5,15 +5,15 @@ use futures::sync::{mpsc, oneshot};
use h2::client::{Builder, Handshake, SendRequest};
use tokio_io::{AsyncRead, AsyncWrite};
-use headers::content_length_parse_all;
-use body::Payload;
-use ::common::{Exec, Never};
-use headers;
-use ::proto::Dispatched;
+use crate::headers::content_length_parse_all;
+use crate::body::Payload;
+use crate::common::{Exec, Never};
+use crate::headers;
+use crate::proto::Dispatched;
use super::{PipeToSendStream, SendBuf};
-use ::{Body, Request, Response};
+use crate::{Body, Request, Response};
-type ClientRx<B> = ::client::dispatch::Receiver<Request<B>, Response<Body>>;
+type ClientRx<B> = crate::client::dispatch::Receiver<Request<B>, Response<Body>>;
/// An mpsc channel is used to help notify the `Connection` task when *all*
/// other handles to it have been dropped, so that it can shutdown.
type ConnDropRef = mpsc::Sender<Never>;
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -58,13 +58,13 @@ where
B: Payload + 'static,
{
type Item = Dispatched;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let next = match self.state {
State::Handshaking(ref mut h) => {
- let (request_tx, conn) = try_ready!(h.poll().map_err(::Error::new_h2));
+ let (request_tx, conn) = try_ready!(h.poll().map_err(crate::Error::new_h2));
// An mpsc channel is used entirely to detect when the
// 'Client' has been dropped. This is to get around a bug
// in h2 where dropping all SendRequests won't notify a
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -111,7 +111,7 @@ where
trace!("connection gracefully shutdown");
Ok(Async::Ready(Dispatched::Shutdown))
} else {
- Err(::Error::new_h2(err))
+ Err(crate::Error::new_h2(err))
};
}
}
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -133,7 +133,7 @@ where
Ok(ok) => ok,
Err(err) => {
debug!("client send request error: {}", err);
- cb.send(Err((::Error::new_h2(err), None)));
+ cb.send(Err((crate::Error::new_h2(err), None)));
continue;
}
};
diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs
--- a/src/proto/h2/client.rs
+++ b/src/proto/h2/client.rs
@@ -162,12 +162,12 @@ where
Ok(res) => {
let content_length = content_length_parse_all(res.headers());
let res = res.map(|stream|
- ::Body::h2(stream, content_length));
+ crate::Body::h2(stream, content_length));
Ok(res)
},
Err(err) => {
debug!("client response error: {}", err);
- Err((::Error::new_h2(err), None))
+ Err((crate::Error::new_h2(err), None))
}
}
});
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -7,7 +7,7 @@ use http::header::{
};
use http::HeaderMap;
-use body::Payload;
+use crate::body::Payload;
mod client;
pub(crate) mod server;
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -91,18 +91,18 @@ where
}
}
- fn on_user_err(&mut self, err: S::Error) -> ::Error {
- let err = ::Error::new_user_body(err);
+ fn on_user_err(&mut self, err: S::Error) -> crate::Error {
+ let err = crate::Error::new_user_body(err);
debug!("send body user stream error: {}", err);
self.body_tx.send_reset(err.h2_reason());
err
}
- fn send_eos_frame(&mut self) -> ::Result<()> {
+ fn send_eos_frame(&mut self) -> crate::Result<()> {
trace!("send body eos");
self.body_tx
.send_data(SendBuf(None), true)
- .map_err(::Error::new_body_write)
+ .map_err(crate::Error::new_body_write)
}
}
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -111,7 +111,7 @@ where
S: Payload,
{
type Item = ();
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -123,18 +123,18 @@ where
if self.body_tx.capacity() == 0 {
loop {
- match try_ready!(self.body_tx.poll_capacity().map_err(::Error::new_body_write)) {
+ match try_ready!(self.body_tx.poll_capacity().map_err(crate::Error::new_body_write)) {
Some(0) => {}
Some(_) => break,
- None => return Err(::Error::new_canceled()),
+ None => return Err(crate::Error::new_canceled()),
}
}
} else {
if let Async::Ready(reason) =
- self.body_tx.poll_reset().map_err(::Error::new_body_write)?
+ self.body_tx.poll_reset().map_err(crate::Error::new_body_write)?
{
debug!("stream received RST_STREAM: {:?}", reason);
- return Err(::Error::new_body_write(::h2::Error::from(reason)));
+ return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
}
}
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -150,7 +150,7 @@ where
let buf = SendBuf(Some(chunk));
self.body_tx
.send_data(buf, is_eos)
- .map_err(::Error::new_body_write)?;
+ .map_err(crate::Error::new_body_write)?;
if is_eos {
return Ok(Async::Ready(()));
diff --git a/src/proto/h2/mod.rs b/src/proto/h2/mod.rs
--- a/src/proto/h2/mod.rs
+++ b/src/proto/h2/mod.rs
@@ -169,17 +169,17 @@ where
}
} else {
if let Async::Ready(reason) =
- self.body_tx.poll_reset().map_err(|e| ::Error::new_body_write(e))?
+ self.body_tx.poll_reset().map_err(|e| crate::Error::new_body_write(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
- return Err(::Error::new_body_write(::h2::Error::from(reason)));
+ return Err(crate::Error::new_body_write(::h2::Error::from(reason)));
}
match try_ready!(self.stream.poll_trailers().map_err(|e| self.on_user_err(e))) {
Some(trailers) => {
self.body_tx
.send_trailers(trailers)
- .map_err(::Error::new_body_write)?;
+ .map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(()));
}
None => {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -5,16 +5,16 @@ use h2::Reason;
use h2::server::{Builder, Connection, Handshake, SendResponse};
use tokio_io::{AsyncRead, AsyncWrite};
-use ::headers::content_length_parse_all;
-use ::body::Payload;
-use body::internal::FullDataArg;
-use ::common::exec::H2Exec;
-use ::headers;
-use ::service::Service;
-use ::proto::Dispatched;
+use crate::headers::content_length_parse_all;
+use crate::body::Payload;
+use crate::body::internal::FullDataArg;
+use crate::common::exec::H2Exec;
+use crate::headers;
+use crate::service::Service;
+use crate::proto::Dispatched;
use super::{PipeToSendStream, SendBuf};
-use ::{Body, Response};
+use crate::{Body, Response};
pub(crate) struct Server<T, S, B, E>
where
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -40,7 +40,7 @@ where
B: Payload,
{
conn: Connection<T, SendBuf<B::Data>>,
- closing: Option<::Error>,
+ closing: Option<crate::Error>,
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -90,13 +90,13 @@ where
E: H2Exec<S::Future, B>,
{
type Item = Dispatched;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let next = match self.state {
State::Handshaking(ref mut h) => {
- let conn = try_ready!(h.poll().map_err(::Error::new_h2));
+ let conn = try_ready!(h.poll().map_err(crate::Error::new_h2));
State::Serving(Serving {
conn,
closing: None,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -122,7 +122,7 @@ where
T: AsyncRead + AsyncWrite,
B: Payload,
{
- fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<(), ::Error>
+ fn poll_server<S, E>(&mut self, service: &mut S, exec: &E) -> Poll<(), crate::Error>
where
S: Service<
ReqBody=Body,
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -138,12 +138,12 @@ where
Ok(Async::Ready(())) => (),
Ok(Async::NotReady) => {
// use `poll_close` instead of `poll`, in order to avoid accepting a request.
- try_ready!(self.conn.poll_close().map_err(::Error::new_h2));
+ try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
trace!("incoming connection complete");
return Ok(Async::Ready(()));
}
Err(err) => {
- let err = ::Error::new_user_service(err);
+ let err = crate::Error::new_user_service(err);
debug!("service closed: {}", err);
let reason = err.h2_reason();
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -161,11 +161,11 @@ where
}
// When the service is ready, accepts an incoming request.
- if let Some((req, respond)) = try_ready!(self.conn.poll().map_err(::Error::new_h2)) {
+ if let Some((req, respond)) = try_ready!(self.conn.poll().map_err(crate::Error::new_h2)) {
trace!("incoming request");
let content_length = content_length_parse_all(req.headers());
let req = req.map(|stream| {
- ::Body::h2(stream, content_length)
+ crate::Body::h2(stream, content_length)
});
let fut = H2Stream::new(service.call(req), respond);
exec.execute_h2stream(fut)?;
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -179,7 +179,7 @@ where
debug_assert!(self.closing.is_some(), "poll_server broke loop without closing");
- try_ready!(self.conn.poll_close().map_err(::Error::new_h2));
+ try_ready!(self.conn.poll_close().map_err(crate::Error::new_h2));
Err(self.closing.take().expect("polled after error"))
}
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -215,7 +215,7 @@ where
}
}
- fn poll2(&mut self) -> Poll<(), ::Error> {
+ fn poll2(&mut self) -> Poll<(), crate::Error> {
loop {
let next = match self.state {
H2StreamState::Service(ref mut h) => {
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -225,15 +225,15 @@ where
// Body is not yet ready, so we want to check if the client has sent a
// RST_STREAM frame which would cancel the current request.
if let Async::Ready(reason) =
- self.reply.poll_reset().map_err(|e| ::Error::new_h2(e))?
+ self.reply.poll_reset().map_err(|e| crate::Error::new_h2(e))?
{
debug!("stream received RST_STREAM: {:?}", reason);
- return Err(::Error::new_h2(reason.into()));
+ return Err(crate::Error::new_h2(reason.into()));
}
return Ok(Async::NotReady);
}
Err(e) => {
- let err = ::Error::new_user_service(e);
+ let err = crate::Error::new_user_service(e);
warn!("http2 service errored: {}", err);
self.reply.send_reset(err.h2_reason());
return Err(err);
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -249,7 +249,7 @@ where
.headers_mut()
.entry(::http::header::DATE)
.expect("DATE is a valid HeaderName")
- .or_insert_with(::proto::h1::date::update_and_header_value);
+ .or_insert_with(crate::proto::h1::date::update_and_header_value);
macro_rules! reply {
($eos:expr) => ({
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -258,7 +258,7 @@ where
Err(e) => {
debug!("send response error: {}", e);
self.reply.send_reset(Reason::INTERNAL_ERROR);
- return Err(::Error::new_h2(e));
+ return Err(crate::Error::new_h2(e));
}
}
})
diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs
--- a/src/proto/h2/server.rs
+++ b/src/proto/h2/server.rs
@@ -274,7 +274,7 @@ where
let buf = SendBuf(Some(full));
body_tx
.send_data(buf, true)
- .map_err(::Error::new_body_write)?;
+ .map_err(crate::Error::new_body_write)?;
return Ok(Async::Ready(()));
}
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -40,7 +40,7 @@ pub(crate) enum Dispatched {
/// Dispatcher completely shutdown connection.
Shutdown,
/// Dispatcher has pending upgrade, and so did not shutdown.
- Upgrade(::upgrade::Pending),
+ Upgrade(crate::upgrade::Pending),
}
/// A separate module to encapsulate the invariants of the DecodedLength type.
diff --git a/src/proto/mod.rs b/src/proto/mod.rs
--- a/src/proto/mod.rs
+++ b/src/proto/mod.rs
@@ -83,12 +83,12 @@ mod body_length {
}
/// Checks the `u64` is within the maximum allowed for content-length.
- pub(crate) fn checked_new(len: u64) -> Result<Self, ::error::Parse> {
+ pub(crate) fn checked_new(len: u64) -> Result<Self, crate::error::Parse> {
if len <= MAX_LEN {
Ok(DecodedLength(len))
} else {
warn!("content-length bigger than maximum: {} > {}", len, MAX_LEN);
- Err(::error::Parse::TooLarge)
+ Err(crate::error::Parse::TooLarge)
}
}
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -22,13 +22,13 @@ use h2;
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "runtime")] use tokio_reactor::Handle;
-use body::{Body, Payload};
-use common::exec::{Exec, H2Exec, NewSvcExec};
-use common::io::Rewind;
-use error::{Kind, Parse};
-use proto;
-use service::{MakeServiceRef, Service};
-use upgrade::Upgraded;
+use crate::body::{Body, Payload};
+use crate::common::exec::{Exec, H2Exec, NewSvcExec};
+use crate::common::io::Rewind;
+use crate::error::{Kind, Parse};
+use crate::proto;
+use crate::service::{MakeServiceRef, Service};
+use crate::upgrade::Upgraded;
pub(super) use self::spawn_all::NoopWatcher;
use self::spawn_all::NewSvcTask;
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -413,7 +413,7 @@ impl<E> Http<E> {
/// `make_service` object provided, creating a new service per
/// connection.
#[cfg(feature = "runtime")]
- pub fn serve_addr<S, Bd>(&self, addr: &SocketAddr, make_service: S) -> ::Result<Serve<AddrIncoming, S, E>>
+ pub fn serve_addr<S, Bd>(&self, addr: &SocketAddr, make_service: S) -> crate::Result<Serve<AddrIncoming, S, E>>
where
S: MakeServiceRef<
AddrStream,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -438,7 +438,7 @@ impl<E> Http<E> {
/// `make_service` object provided, creating a new service per
/// connection.
#[cfg(feature = "runtime")]
- pub fn serve_addr_handle<S, Bd>(&self, addr: &SocketAddr, handle: &Handle, make_service: S) -> ::Result<Serve<AddrIncoming, S, E>>
+ pub fn serve_addr_handle<S, Bd>(&self, addr: &SocketAddr, handle: &Handle, make_service: S) -> crate::Result<Serve<AddrIncoming, S, E>>
where
S: MakeServiceRef<
AddrStream,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -547,7 +547,7 @@ where
/// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
/// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
/// to work with this function; or use the `without_shutdown` wrapper.
- pub fn poll_without_shutdown(&mut self) -> Poll<(), ::Error> {
+ pub fn poll_without_shutdown(&mut self) -> Poll<(), crate::Error> {
loop {
let polled = match *self.conn.as_mut().unwrap() {
Either::A(ref mut h1) => h1.poll_without_shutdown(),
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -570,9 +570,9 @@ where
/// Prevent shutdown of the underlying IO object at the end of service the request,
/// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
- pub fn without_shutdown(self) -> impl Future<Item=Parts<I,S>, Error=::Error> {
+ pub fn without_shutdown(self) -> impl Future<Item=Parts<I,S>, Error=crate::Error> {
let mut conn = Some(self);
- ::futures::future::poll_fn(move || -> ::Result<_> {
+ ::futures::future::poll_fn(move || -> crate::Result<_> {
try_ready!(conn.as_mut().unwrap().poll_without_shutdown());
Ok(conn.take().unwrap().into_parts().into())
})
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -629,7 +629,7 @@ where
E: H2Exec<S::Future, B>,
{
type Item = ();
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -701,7 +701,7 @@ where
E: H2Exec<<S::Service as Service>::Future, B>,
{
type Item = Connecting<I::Item, S::Future, E>;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.make_service.poll_ready_ref() {
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -709,11 +709,11 @@ where
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => {
trace!("make_service closed");
- return Err(::Error::new_user_make_service(e));
+ return Err(crate::Error::new_user_make_service(e));
}
}
- if let Some(io) = try_ready!(self.incoming.poll().map_err(::Error::new_accept)) {
+ if let Some(io) = try_ready!(self.incoming.poll().map_err(crate::Error::new_accept)) {
let new_fut = self.make_service.make_service_ref(&io);
Ok(Async::Ready(Some(Connecting {
future: new_fut,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -774,7 +774,7 @@ where
B: Payload,
E: H2Exec<<S::Service as Service>::Future, B>,
{
- pub(super) fn poll_watch<W>(&mut self, watcher: &W) -> Poll<(), ::Error>
+ pub(super) fn poll_watch<W>(&mut self, watcher: &W) -> Poll<(), crate::Error>
where
E: NewSvcExec<I::Item, S::Future, S::Service, E, W>,
W: Watcher<I::Item, S::Service, E>,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -795,9 +795,9 @@ pub(crate) mod spawn_all {
use futures::{Future, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
- use body::{Body, Payload};
- use common::exec::H2Exec;
- use service::Service;
+ use crate::body::{Body, Payload};
+ use crate::common::exec::H2Exec;
+ use crate::service::Service;
use super::{Connecting, UpgradeableConnection};
// Used by `SpawnAll` to optionally watch a `Connection` future.
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -809,7 +809,7 @@ pub(crate) mod spawn_all {
// connections, and signal that they start to shutdown when prompted, so
// it has a `GracefulWatcher` implementation to do that.
pub trait Watcher<I, S: Service, E>: Clone {
- type Future: Future<Item=(), Error=::Error>;
+ type Future: Future<Item=(), Error=crate::Error>;
fn watch(&self, conn: UpgradeableConnection<I, S, E>) -> Self::Future;
}
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -878,7 +878,7 @@ pub(crate) mod spawn_all {
let conn = try_ready!(connecting
.poll()
.map_err(|err| {
- let err = ::Error::new_user_make_service(err);
+ let err = crate::Error::new_user_make_service(err);
debug!("connecting error: {}", err);
}));
let connected = watcher.watch(conn.with_upgrades());
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -941,7 +941,7 @@ mod upgrades {
E: super::H2Exec<S::Future, B>,
{
type Item = ();
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -64,9 +64,9 @@ use futures::{Future, Stream, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
#[cfg(feature = "runtime")] use tokio_reactor;
-use body::{Body, Payload};
-use common::exec::{Exec, H2Exec, NewSvcExec};
-use service::{MakeServiceRef, Service};
+use crate::body::{Body, Payload};
+use crate::common::exec::{Exec, H2Exec, NewSvcExec};
+use crate::service::{MakeServiceRef, Service};
// Renamed `Http` as `Http_` for now so that people upgrading don't see an
// error that `hyper::server::Http` is private...
use self::conn::{Http as Http_, NoopWatcher, SpawnAll};
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -119,13 +119,13 @@ impl Server<AddrIncoming, ()> {
}
/// Tries to bind to the provided address, and returns a [`Builder`](Builder).
- pub fn try_bind(addr: &SocketAddr) -> ::Result<Builder<AddrIncoming>> {
+ pub fn try_bind(addr: &SocketAddr) -> crate::Result<Builder<AddrIncoming>> {
AddrIncoming::new(addr, None)
.map(Server::builder)
}
/// Create a new instance from a `std::net::TcpListener` instance.
- pub fn from_tcp(listener: StdTcpListener) -> Result<Builder<AddrIncoming>, ::Error> {
+ pub fn from_tcp(listener: StdTcpListener) -> Result<Builder<AddrIncoming>, crate::Error> {
let handle = tokio_reactor::Handle::default();
AddrIncoming::from_std(listener, &handle)
.map(Server::builder)
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -212,7 +212,7 @@ where
E: NewSvcExec<I::Item, S::Future, S::Service, E, NoopWatcher>,
{
type Item = ();
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.spawn_all.poll_watch(&NoopWatcher)
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -3,10 +3,10 @@ use std::error::Error as StdError;
use futures::{Async, Future, Stream, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
-use body::{Body, Payload};
-use common::drain::{self, Draining, Signal, Watch, Watching};
-use common::exec::{H2Exec, NewSvcExec};
-use service::{MakeServiceRef, Service};
+use crate::body::{Body, Payload};
+use crate::common::drain::{self, Draining, Signal, Watch, Watching};
+use crate::common::exec::{H2Exec, NewSvcExec};
+use crate::service::{MakeServiceRef, Service};
use super::conn::{SpawnAll, UpgradeableConnection, Watcher};
#[allow(missing_debug_implementations)]
diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs
--- a/src/server/shutdown.rs
+++ b/src/server/shutdown.rs
@@ -51,7 +51,7 @@ where
E: NewSvcExec<I::Item, S::Future, S::Service, E, GracefulWatcher>,
{
type Item = ();
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -22,9 +22,9 @@ pub struct AddrIncoming {
}
impl AddrIncoming {
- pub(super) fn new(addr: &SocketAddr, handle: Option<&Handle>) -> ::Result<Self> {
+ pub(super) fn new(addr: &SocketAddr, handle: Option<&Handle>) -> crate::Result<Self> {
let std_listener = StdTcpListener::bind(addr)
- .map_err(::Error::new_listen)?;
+ .map_err(crate::Error::new_listen)?;
if let Some(handle) = handle {
AddrIncoming::from_std(std_listener, handle)
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -34,10 +34,10 @@ impl AddrIncoming {
}
}
- pub(super) fn from_std(std_listener: StdTcpListener, handle: &Handle) -> ::Result<Self> {
+ pub(super) fn from_std(std_listener: StdTcpListener, handle: &Handle) -> crate::Result<Self> {
let listener = TcpListener::from_std(std_listener, &handle)
- .map_err(::Error::new_listen)?;
- let addr = listener.local_addr().map_err(::Error::new_listen)?;
+ .map_err(crate::Error::new_listen)?;
+ let addr = listener.local_addr().map_err(crate::Error::new_listen)?;
Ok(AddrIncoming {
listener,
addr: addr,
diff --git a/src/server/tcp.rs b/src/server/tcp.rs
--- a/src/server/tcp.rs
+++ b/src/server/tcp.rs
@@ -49,7 +49,7 @@ impl AddrIncoming {
}
/// Creates a new `AddrIncoming` binding to provided socket address.
- pub fn bind(addr: &SocketAddr) -> ::Result<Self> {
+ pub fn bind(addr: &SocketAddr) -> crate::Result<Self> {
AddrIncoming::new(addr, None)
}
diff --git a/src/service/make_service.rs b/src/service/make_service.rs
--- a/src/service/make_service.rs
+++ b/src/service/make_service.rs
@@ -3,7 +3,7 @@ use std::fmt;
use futures::{Async, Future, IntoFuture, Poll};
-use body::Payload;
+use crate::body::Payload;
use super::Service;
/// An asynchronous constructor of `Service`s.
diff --git a/src/service/new_service.rs b/src/service/new_service.rs
--- a/src/service/new_service.rs
+++ b/src/service/new_service.rs
@@ -2,7 +2,7 @@ use std::error::Error as StdError;
use futures::{Async, Future, IntoFuture, Poll};
-use body::Payload;
+use crate::body::Payload;
use super::{MakeService, Service};
/// An asynchronous constructor of `Service`s.
diff --git a/src/service/service.rs b/src/service/service.rs
--- a/src/service/service.rs
+++ b/src/service/service.rs
@@ -4,9 +4,9 @@ use std::marker::PhantomData;
use futures::{future, Async, Future, IntoFuture, Poll};
-use body::Payload;
-use common::Never;
-use ::{Request, Response};
+use crate::body::Payload;
+use crate::common::Never;
+use crate::{Request, Response};
/// An asynchronous function from `Request` to `Response`.
pub trait Service {
diff --git a/src/service/service.rs b/src/service/service.rs
--- a/src/service/service.rs
+++ b/src/service/service.rs
@@ -179,16 +179,16 @@ fn _assert_fn_mut() {
let mut val = 0;
- let svc = service_fn(move |_req: Request<::Body>| {
+ let svc = service_fn(move |_req: Request<crate::Body>| {
val += 1;
- future::ok::<_, Never>(Response::new(::Body::empty()))
+ future::ok::<_, Never>(Response::new(crate::Body::empty()))
});
assert_service(&svc);
- let svc = service_fn_ok(move |_req: Request<::Body>| {
+ let svc = service_fn_ok(move |_req: Request<crate::Body>| {
val += 1;
- Response::new(::Body::empty())
+ Response::new(crate::Body::empty())
});
assert_service(&svc);
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -15,7 +15,7 @@ use futures::{Async, Future, Poll};
use futures::sync::oneshot;
use tokio_io::{AsyncRead, AsyncWrite};
-use common::io::Rewind;
+use crate::common::io::Rewind;
/// An upgraded HTTP connection.
///
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -33,7 +33,7 @@ pub struct Upgraded {
///
/// If no upgrade was available, or it doesn't succeed, yields an `Error`.
pub struct OnUpgrade {
- rx: Option<oneshot::Receiver<::Result<Upgraded>>>,
+ rx: Option<oneshot::Receiver<crate::Result<Upgraded>>>,
}
/// The deconstructed parts of an [`Upgraded`](Upgraded) type.
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -57,7 +57,7 @@ pub struct Parts<T> {
}
pub(crate) struct Pending {
- tx: oneshot::Sender<::Result<Upgraded>>
+ tx: oneshot::Sender<crate::Result<Upgraded>>
}
/// Error cause returned when an upgrade was expected but canceled
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -200,7 +200,7 @@ impl OnUpgrade {
impl Future for OnUpgrade {
type Item = Upgraded;
- type Error = ::Error;
+ type Error = crate::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.rx {
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -209,10 +209,10 @@ impl Future for OnUpgrade {
Ok(Async::Ready(Ok(upgraded))) => Ok(Async::Ready(upgraded)),
Ok(Async::Ready(Err(err))) => Err(err),
Err(_oneshot_canceled) => Err(
- ::Error::new_canceled().with(UpgradeExpected(()))
+ crate::Error::new_canceled().with(UpgradeExpected(()))
),
},
- None => Err(::Error::new_user_no_upgrade()),
+ None => Err(crate::Error::new_user_no_upgrade()),
}
}
}
diff --git a/src/upgrade.rs b/src/upgrade.rs
--- a/src/upgrade.rs
+++ b/src/upgrade.rs
@@ -236,7 +236,7 @@ impl Pending {
/// upgrades are handled manually.
pub(crate) fn manual(self) {
trace!("pending upgrade handled manually");
- let _ = self.tx.send(Err(::Error::new_user_manual_upgrade()));
+ let _ = self.tx.send(Err(crate::Error::new_user_manual_upgrade()));
}
}
|
hyper has tried when possible to link new compiler requirements with breaking changes. While we can sometimes use config flags to only enable things on newer version, I don't think there's a way to do that here, since this also requires putting `edition = "2018"` in the `Cargo.toml`...
@seanmonstar hyper does not really have to support Rust pre-1.31 since futures 1.0 have been stabilized and will trigger async ecosystem migration in July anyway.
I've placed this in the 0.13 milestone.
|
2019-07-09T21:51:16Z
| 1,847
|
Migration to Rust 2018
I think this can be done before `std::future` stabilized.
Currently, hyper doesn't follow some Rust 2018 manner;
* module rules (include `extern crate`)
* hidden lifetime parameter
* bare trait
|
hyperium__hyper-1847
|
diff --git a/src/body/chunk.rs b/src/body/chunk.rs
--- a/src/body/chunk.rs
+++ b/src/body/chunk.rs
@@ -176,7 +176,7 @@ mod tests {
let mut dst = Vec::with_capacity(128);
b.iter(|| {
- let chunk = ::Chunk::from(s);
+ let chunk = crate::Chunk::from(s);
dst.put(chunk);
::test::black_box(&dst);
dst.clear();
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -567,7 +567,7 @@ mod tests {
let c1 = Connected::new()
.extra(Ex1(41));
- let mut res1 = ::Response::new(::Body::empty());
+ let mut res1 = crate::Response::new(crate::Body::empty());
assert_eq!(res1.extensions().get::<Ex1>(), None);
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -590,7 +590,7 @@ mod tests {
.extra(Ex2("zoom"))
.extra(Ex3("pew pew"));
- let mut res1 = ::Response::new(::Body::empty());
+ let mut res1 = crate::Response::new(crate::Body::empty());
assert_eq!(res1.extensions().get::<Ex1>(), None);
assert_eq!(res1.extensions().get::<Ex2>(), None);
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -612,7 +612,7 @@ mod tests {
.extra(Ex2("hiccup"))
.extra(Ex1(99));
- let mut res2 = ::Response::new(::Body::empty());
+ let mut res2 = crate::Response::new(crate::Body::empty());
c2
.extra
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -266,7 +266,7 @@ mod tests {
.expect_err("promise should error");
match (err.0.kind(), err.1) {
- (&::error::Kind::Canceled, Some(_)) => (),
+ (&crate::error::Kind::Canceled, Some(_)) => (),
e => panic!("expected Error::Cancel(_), found {:?}", e),
}
diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs
--- a/src/client/dispatch.rs
+++ b/src/client/dispatch.rs
@@ -312,7 +312,7 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn giver_queue_throughput(b: &mut test::Bencher) {
- use {Body, Request, Response};
+ use crate::{Body, Request, Response};
let (mut tx, mut rx) = super::channel::<Request<Body>, Response<Body>>();
b.iter(move || {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -783,7 +783,7 @@ mod tests {
use std::time::Duration;
use futures::{Async, Future};
use futures::future;
- use common::Exec;
+ use crate::common::Exec;
use super::{Connecting, Key, Poolable, Pool, Reservation, WeakOpt};
/// Test unique reservations.
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -6,7 +6,7 @@ use futures::future::poll_fn;
use futures::sync::oneshot;
use tokio::runtime::current_thread::Runtime;
-use mock::MockConnector;
+use crate::mock::MockConnector;
use super::*;
#[test]
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -20,7 +20,7 @@ fn retryable_request() {
let sock2 = connector.mock("http://mock.local");
let client = Client::builder()
- .build::<_, ::Body>(connector);
+ .build::<_, crate::Body>(connector);
client.pool.no_timer();
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -67,7 +67,7 @@ fn conn_reset_after_write() {
let sock1 = connector.mock("http://mock.local");
let client = Client::builder()
- .build::<_, ::Body>(connector);
+ .build::<_, crate::Body>(connector);
client.pool.no_timer();
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -119,11 +119,11 @@ fn checkout_win_allows_connect_future_to_be_pooled() {
let sock2 = connector.mock_fut("http://mock.local", rx);
let client = Client::builder()
- .build::<_, ::Body>(connector);
+ .build::<_, crate::Body>(connector);
client.pool.no_timer();
- let uri = "http://mock.local/a".parse::<::Uri>().expect("uri parse");
+ let uri = "http://mock.local/a".parse::<crate::Uri>().expect("uri parse");
// First request just sets us up to have a connection able to be put
// back in the pool. *However*, it doesn't insert immediately. The
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -214,7 +214,7 @@ fn bench_http1_get_0b(b: &mut test::Bencher) {
let client = Client::builder()
- .build::<_, ::Body>(connector.clone());
+ .build::<_, crate::Body>(connector.clone());
client.pool.no_timer();
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -246,7 +246,7 @@ fn bench_http1_get_10b(b: &mut test::Bencher) {
let client = Client::builder()
- .build::<_, ::Body>(connector.clone());
+ .build::<_, crate::Body>(connector.clone());
client.pool.no_timer();
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -888,9 +888,9 @@ mod tests {
let len = s.len();
b.bytes = len as u64;
- let mut io = ::mock::AsyncIo::new_buf(Vec::new(), 0);
+ let mut io = crate::mock::AsyncIo::new_buf(Vec::new(), 0);
io.panic();
- let mut conn = Conn::<_, ::Chunk, ::proto::h1::ServerTransaction>::new(io);
+ let mut conn = Conn::<_, crate::Chunk, crate::proto::h1::ServerTransaction>::new(io);
*conn.io.read_buf_mut() = ::bytes::BytesMut::from(&s[..]);
conn.state.cached_headers = Some(HeaderMap::with_capacity(2));
diff --git a/src/proto/h1/decode.rs b/src/proto/h1/decode.rs
--- a/src/proto/h1/decode.rs
+++ b/src/proto/h1/decode.rs
@@ -323,7 +323,7 @@ mod tests {
use super::super::io::MemRead;
use futures::{Async, Poll};
use bytes::{BytesMut, Bytes};
- use mock::AsyncIo;
+ use crate::mock::AsyncIo;
impl<'a> MemRead for &'a [u8] {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -559,8 +559,8 @@ mod tests {
extern crate pretty_env_logger;
use super::*;
- use mock::AsyncIo;
- use proto::h1::ClientTransaction;
+ use crate::mock::AsyncIo;
+ use crate::proto::h1::ClientTransaction;
#[test]
fn client_read_bytes_before_writing_request() {
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -569,8 +569,8 @@ mod tests {
// Block at 0 for now, but we will release this response before
// the request is ready to write later...
let io = AsyncIo::new_buf(b"HTTP/1.1 200 OK\r\n\r\n".to_vec(), 0);
- let (mut tx, rx) = ::client::dispatch::channel();
- let conn = Conn::<_, ::Chunk, ClientTransaction>::new(io);
+ let (mut tx, rx) = crate::client::dispatch::channel();
+ let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
// First poll is needed to allow tx to send...
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -578,7 +578,7 @@ mod tests {
// Unblock our IO, which has a response before we've sent request!
dispatcher.conn.io_mut().block_in(100);
- let res_rx = tx.try_send(::Request::new(::Body::empty())).unwrap();
+ let res_rx = tx.try_send(crate::Request::new(crate::Body::empty())).unwrap();
let a1 = dispatcher.poll().expect("error should be sent on channel");
assert!(a1.is_ready(), "dispatcher should be closed");
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -587,7 +587,7 @@ mod tests {
.expect_err("callback response");
match (err.0.kind(), err.1) {
- (&::error::Kind::Canceled, Some(_)) => (),
+ (&crate::error::Kind::Canceled, Some(_)) => (),
other => panic!("expected Canceled, got {:?}", other),
}
Ok::<(), ()>(())
diff --git a/src/proto/h1/dispatch.rs b/src/proto/h1/dispatch.rs
--- a/src/proto/h1/dispatch.rs
+++ b/src/proto/h1/dispatch.rs
@@ -599,16 +599,16 @@ mod tests {
let _ = pretty_env_logger::try_init();
::futures::lazy(|| {
let io = AsyncIo::new_buf(vec![], 0);
- let (mut tx, rx) = ::client::dispatch::channel();
- let conn = Conn::<_, ::Chunk, ClientTransaction>::new(io);
+ let (mut tx, rx) = crate::client::dispatch::channel();
+ let conn = Conn::<_, crate::Chunk, ClientTransaction>::new(io);
let mut dispatcher = Dispatcher::new(Client::new(rx), conn);
// First poll is needed to allow tx to send...
assert!(dispatcher.poll().expect("nothing is ready").is_not_ready());
- let body = ::Body::wrap_stream(::futures::stream::once(Ok::<_, ::Error>("")));
+ let body = crate::Body::wrap_stream(::futures::stream::once(Ok::<_, crate::Error>("")));
- let _res_rx = tx.try_send(::Request::new(body)).unwrap();
+ let _res_rx = tx.try_send(crate::Request::new(body)).unwrap();
dispatcher.poll().expect("empty body shouldn't panic");
Ok::<(), ()>(())
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -651,13 +651,13 @@ impl<T: Buf> Buf for BufDeque<T> {
mod tests {
use super::*;
use std::io::Read;
- use mock::AsyncIo;
+ use crate::mock::AsyncIo;
#[cfg(feature = "nightly")]
use test::Bencher;
#[cfg(test)]
- impl<T: Read> MemRead for ::mock::AsyncIo<T> {
+ impl<T: Read> MemRead for crate::mock::AsyncIo<T> {
fn read_mem(&mut self, len: usize) -> Poll<Bytes, io::Error> {
let mut v = vec![0; len];
let n = try_nb!(self.read(v.as_mut_slice()));
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -689,7 +689,7 @@ mod tests {
cached_headers: &mut None,
req_method: &mut None,
};
- assert!(buffered.parse::<::proto::h1::ClientTransaction>(ctx).unwrap().is_not_ready());
+ assert!(buffered.parse::<crate::proto::h1::ClientTransaction>(ctx).unwrap().is_not_ready());
assert!(buffered.io.blocked());
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -890,10 +890,10 @@ mod tests {
let s = "Hello, World!";
b.bytes = s.len() as u64;
- let mut write_buf = WriteBuf::<::Chunk>::new();
+ let mut write_buf = WriteBuf::<crate::Chunk>::new();
write_buf.set_strategy(WriteStrategy::Flatten);
b.iter(|| {
- let chunk = ::Chunk::from(s);
+ let chunk = crate::Chunk::from(s);
write_buf.buffer(chunk);
::test::black_box(&write_buf);
write_buf.headers.bytes.clear();
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1071,12 +1071,12 @@ mod tests {
req_method: &mut method,
}).unwrap().unwrap();
assert_eq!(raw.len(), 0);
- assert_eq!(msg.head.subject.0, ::Method::GET);
+ assert_eq!(msg.head.subject.0, crate::Method::GET);
assert_eq!(msg.head.subject.1, "/echo");
- assert_eq!(msg.head.version, ::Version::HTTP_11);
+ assert_eq!(msg.head.version, crate::Version::HTTP_11);
assert_eq!(msg.head.headers.len(), 1);
assert_eq!(msg.head.headers["Host"], "hyper.rs");
- assert_eq!(method, Some(::Method::GET));
+ assert_eq!(method, Some(crate::Method::GET));
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1087,12 +1087,12 @@ mod tests {
let mut raw = BytesMut::from(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".to_vec());
let ctx = ParseContext {
cached_headers: &mut None,
- req_method: &mut Some(::Method::GET),
+ req_method: &mut Some(crate::Method::GET),
};
let msg = Client::parse(&mut raw, ctx).unwrap().unwrap();
assert_eq!(raw.len(), 0);
- assert_eq!(msg.head.subject, ::StatusCode::OK);
- assert_eq!(msg.head.version, ::Version::HTTP_11);
+ assert_eq!(msg.head.subject, crate::StatusCode::OK);
+ assert_eq!(msg.head.version, crate::Version::HTTP_11);
assert_eq!(msg.head.headers.len(), 1);
assert_eq!(msg.head.headers["Content-Length"], "0");
}
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1120,7 +1120,7 @@ mod tests {
.expect("parse complete")
}
- fn parse_err(s: &str, comment: &str) -> ::error::Parse {
+ fn parse_err(s: &str, comment: &str) -> crate::error::Parse {
let mut bytes = BytesMut::from(s);
Server::parse(&mut bytes, ParseContext {
cached_headers: &mut None,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1266,7 +1266,7 @@ mod tests {
.expect("parse complete")
}
- fn parse_err(s: &str) -> ::error::Parse {
+ fn parse_err(s: &str) -> crate::error::Parse {
let mut bytes = BytesMut::from(s);
Client::parse(&mut bytes, ParseContext {
cached_headers: &mut None,
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1423,7 +1423,7 @@ mod tests {
#[test]
fn test_client_request_encode_title_case() {
use http::header::HeaderValue;
- use proto::BodyLength;
+ use crate::proto::BodyLength;
let mut head = MessageHead::default();
head.headers.insert("content-length", HeaderValue::from_static("10"));
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1553,7 +1553,7 @@ mod tests {
#[bench]
fn bench_server_encode_headers_preset(b: &mut Bencher) {
use http::header::HeaderValue;
- use proto::BodyLength;
+ use crate::proto::BodyLength;
let len = 108;
b.bytes = len as u64;
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1581,7 +1581,7 @@ mod tests {
#[cfg(feature = "nightly")]
#[bench]
fn bench_server_encode_no_headers(b: &mut Bencher) {
- use proto::BodyLength;
+ use crate::proto::BodyLength;
let len = 76;
b.bytes = len as u64;
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -5,9 +5,9 @@ pub extern crate tokio;
use std::sync::{Arc, Mutex, atomic::{AtomicUsize, Ordering}};
use std::time::Duration;
-use hyper::{Body, Client, Request, Response, Server, Version};
-use hyper::client::HttpConnector;
-use hyper::service::service_fn;
+use crate::hyper::{Body, Client, Request, Response, Server, Version};
+use crate::hyper::client::HttpConnector;
+use crate::hyper::service::service_fn;
pub use std::net::SocketAddr;
pub use self::futures::{future, Future, Stream};
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -207,12 +207,12 @@ macro_rules! __internal_headers_map {
macro_rules! __internal_headers_eq {
(@pat $name: expr, $pat:pat) => {
- ::std::sync::Arc::new(move |__hdrs: &::hyper::HeaderMap| {
+ ::std::sync::Arc::new(move |__hdrs: &crate::hyper::HeaderMap| {
match __hdrs.get($name) {
$pat => (),
other => panic!("headers[{}] was not {}: {:?}", stringify!($name), stringify!($pat), other),
}
- }) as ::std::sync::Arc<dyn Fn(&::hyper::HeaderMap) + Send + Sync>
+ }) as ::std::sync::Arc<dyn Fn(&crate::hyper::HeaderMap) + Send + Sync>
};
(@val $name: expr, NONE) => {
__internal_headers_eq!(@pat $name, None);
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -222,13 +222,13 @@ macro_rules! __internal_headers_eq {
};
(@val $name: expr, $val:expr) => ({
let __val = Option::from($val);
- ::std::sync::Arc::new(move |__hdrs: &::hyper::HeaderMap| {
+ ::std::sync::Arc::new(move |__hdrs: &crate::hyper::HeaderMap| {
if let Some(ref val) = __val {
assert_eq!(__hdrs.get($name).expect(stringify!($name)), val.to_string().as_str(), stringify!($name));
} else {
assert_eq!(__hdrs.get($name), None, stringify!($name));
}
- }) as ::std::sync::Arc<dyn Fn(&::hyper::HeaderMap) + Send + Sync>
+ }) as ::std::sync::Arc<dyn Fn(&crate::hyper::HeaderMap) + Send + Sync>
});
($headers:ident, { $($name:expr => $val:tt,)* }) => {
$(
diff --git a/tests/support/mod.rs b/tests/support/mod.rs
--- a/tests/support/mod.rs
+++ b/tests/support/mod.rs
@@ -378,7 +378,7 @@ pub fn __run_test(cfg: __TestConfig) {
.map_err(|never| -> hyper::Error { match never {} })
.flatten()
.map_err(|e| panic!("server connection error: {}", e));
- ::tokio::spawn(fut);
+ crate::tokio::spawn(fut);
Ok::<_, hyper::Error>(cnt)
})
.map(|_| ())
|
hyperium/hyper
|
79ae89e066f5fbfc1ce3612299671a14f7b35230
|
[
"1797"
] |
0.12
|
7fde9ba6b8a6cbc1880a518ee303aeb3e08f7847
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -476,14 +476,20 @@ impl Http1Transaction for Server {
},
None |
Some(BodyLength::Known(0)) => {
- extend(dst, b"content-length: 0\r\n");
+ if msg.head.subject != StatusCode::NOT_MODIFIED {
+ extend(dst, b"content-length: 0\r\n");
+ }
Encoder::length(0)
},
Some(BodyLength::Known(len)) => {
- extend(dst, b"content-length: ");
- let _ = ::itoa::write(&mut dst, len);
- extend(dst, b"\r\n");
- Encoder::length(len)
+ if msg.head.subject == StatusCode::NOT_MODIFIED {
+ Encoder::length(0)
+ } else {
+ extend(dst, b"content-length: ");
+ let _ = ::itoa::write(&mut dst, len);
+ extend(dst, b"\r\n");
+ Encoder::length(len)
+ }
},
};
}
|
This gets set here: https://github.com/hyperium/hyper/blob/8d70baca611869c1997571e8513717396b13328b/src/proto/h1/role.rs#L479
A check could be added there for cases when there should be no header added.
|
2019-04-25T01:33:34Z
| 1,801
|
Hyper should skip automatic Content-Length header for HTTP 304 responses
It's unnecessary, see [RFC 7232](https://tools.ietf.org/html/rfc7232#section-4.1).
Currently Hyper for empty bodies with Content-Length header unset automatically sets the value 0 which is incorrect for HTTP 304 responses, the correct values are either to not provide Content-Length (recommended by specification) or to provide Content-Length of a full response.
|
hyperium__hyper-1801
|
diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs
--- a/src/proto/h1/role.rs
+++ b/src/proto/h1/role.rs
@@ -1591,4 +1597,3 @@ mod tests {
})
}
}
-
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -1687,6 +1687,51 @@ fn http2_service_poll_ready_error_sends_goaway() {
assert_eq!(h2_err.reason(), Some(h2::Reason::INADEQUATE_SECURITY));
}
+#[test]
+fn skips_content_length_for_304_responses() {
+ let server = serve();
+ server.reply()
+
+ .status(hyper::StatusCode::NOT_MODIFIED)
+ .body("foo");
+ let mut req = connect(server.addr());
+ req.write_all(b"\
+ GET / HTTP/1.1\r\n\
+ Host: example.domain\r\n\
+ Connection: close\r\n\
+ \r\n\
+ ").unwrap();
+
+ let mut response = String::new();
+ req.read_to_string(&mut response).unwrap();
+ assert!(!response.contains("content-length:"));
+}
+
+#[test]
+fn skips_content_length_and_body_for_304_responses() {
+ let server = serve();
+ server.reply()
+
+ .status(hyper::StatusCode::NOT_MODIFIED)
+ .body("foo");
+ let mut req = connect(server.addr());
+ req.write_all(b"\
+ GET / HTTP/1.1\r\n\
+ Host: example.domain\r\n\
+ Connection: close\r\n\
+ \r\n\
+ ").unwrap();
+
+ let mut response = String::new();
+ req.read_to_string(&mut response).unwrap();
+ assert!(!response.contains("content-length:"));
+ let mut lines = response.lines();
+ assert_eq!(lines.next(), Some("HTTP/1.1 304 Not Modified"));
+
+ let mut lines = lines.skip_while(|line| !line.is_empty());
+ assert_eq!(lines.next(), Some(""));
+ assert_eq!(lines.next(), None);
+}
// -------------------------------------------------
// the Server that is used to run all the tests with
// -------------------------------------------------
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -2058,4 +2103,3 @@ impl Drop for Dropped {
self.0.store(true, Ordering::SeqCst);
}
}
-
|
hyperium/hyper
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
[
"1740"
] |
0.12
|
ec7b93c982dfaf7b213170d967bab809dbe53d8a
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -7,11 +7,13 @@
//! - The [`Resolve`](Resolve) trait and related types to build a custom
//! resolver for use with the `HttpConnector`.
use std::{fmt, io, vec};
+use std::error::Error;
use std::net::{
IpAddr, Ipv4Addr, Ipv6Addr,
SocketAddr, ToSocketAddrs,
SocketAddrV4, SocketAddrV6,
};
+use std::str::FromStr;
use std::sync::Arc;
use futures::{Async, Future, Poll};
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -72,6 +74,32 @@ impl fmt::Debug for Name {
}
}
+impl FromStr for Name {
+ type Err = InvalidNameError;
+
+ fn from_str(host: &str) -> Result<Self, Self::Err> {
+ // Possibly add validation later
+ Ok(Name::new(host.to_owned()))
+ }
+}
+
+/// Error indicating a given string was not a valid domain name.
+#[derive(Debug)]
+pub struct InvalidNameError(());
+
+impl fmt::Display for InvalidNameError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.description().fmt(f)
+ }
+}
+
+impl Error for InvalidNameError {
+ fn description(&self) -> &str {
+ "Not a valid domain name"
+ }
+}
+
+
impl GaiResolver {
/// Construct a new `GaiResolver`.
///
|
2019-01-10T10:02:14Z
| 1,741
|
Add FromStr impl for hyper::client::connect::dns::Name
To allow people to construct a `Name`, such as when unit testing a `Resolve` implementation. Providing a `FromStr` impl instead of just `new(String) -> Name` allows us to add in validation in the future if need be.
|
hyperium__hyper-1741
|
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -317,4 +345,10 @@ mod tests {
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
}
+
+ #[test]
+ fn test_name_from_str() {
+ let name = Name::from_str("test.example.com").expect("Should be a valid domain");
+ assert_eq!(name.as_str(), "test.example.com");
+ }
}
|
hyperium/hyper
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
|
[
"1708"
] |
0.12
|
a6fff13a392d3394cacb1215f83bd8ec87671566
|
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -96,6 +116,11 @@ impl Opts {
self
}
+ fn response_body(mut self, body: &'static [u8]) -> Self {
+ self.response_body = body;
+ self
+ }
+
fn parallel(mut self, cnt: u32) -> Self {
assert!(cnt > 0, "parallel count must be larger than 0");
self.parallel_cnt = cnt;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -1,4 +1,5 @@
use std::cell::Cell;
+use std::cmp;
use std::collections::VecDeque;
use std::fmt;
use std::io;
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -60,9 +61,7 @@ where
io: io,
read_blocked: false,
read_buf: BytesMut::with_capacity(0),
- read_buf_strategy: ReadStrategy::Adaptive {
- max: DEFAULT_MAX_BUFFER_SIZE,
- },
+ read_buf_strategy: ReadStrategy::default(),
write_buf: WriteBuf::new(),
}
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -81,9 +80,7 @@ where
"The max_buf_size cannot be smaller than {}.",
MINIMUM_MAX_BUFFER_SIZE,
);
- self.read_buf_strategy = ReadStrategy::Adaptive {
- max,
- };
+ self.read_buf_strategy = ReadStrategy::with_max(max);
self.write_buf.max_buf_size = max;
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -149,18 +146,11 @@ where
debug!("parsed {} headers", msg.head.headers.len());
return Ok(Async::Ready(msg))
},
- None => match self.read_buf_strategy {
- ReadStrategy::Adaptive { max } => {
- if self.read_buf.len() >= max {
- debug!("max_buf_size ({}) reached, closing", max);
- return Err(::Error::new_too_large());
- }
- },
- ReadStrategy::Exact(exact) => {
- if self.read_buf.len() >= exact {
- debug!("exact buf size ({}) filled, closing", exact);
- return Err(::Error::new_too_large());
- }
+ None => {
+ let max = self.read_buf_strategy.max();
+ if self.read_buf.len() >= max {
+ debug!("max_buf_size ({}) reached, closing", max);
+ return Err(::Error::new_too_large());
}
},
}
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -177,22 +167,15 @@ where
pub fn read_from_io(&mut self) -> Poll<usize, io::Error> {
use bytes::BufMut;
self.read_blocked = false;
- match self.read_buf_strategy {
- ReadStrategy::Adaptive { .. } => {
- if self.read_buf.remaining_mut() < INIT_BUFFER_SIZE {
- self.read_buf.reserve(INIT_BUFFER_SIZE);
- }
- },
- ReadStrategy::Exact(exact) => {
- if self.read_buf.capacity() < exact {
- self.read_buf.reserve(exact);
- }
- },
+ let next = self.read_buf_strategy.next();
+ if self.read_buf.remaining_mut() < next {
+ self.read_buf.reserve(next);
}
self.io.read_buf(&mut self.read_buf).map(|ok| {
match ok {
Async::Ready(n) => {
debug!("read {} bytes", n);
+ self.read_buf_strategy.record(n);
Async::Ready(n)
},
Async::NotReady => {
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -285,11 +268,82 @@ where
#[derive(Clone, Copy, Debug)]
enum ReadStrategy {
Adaptive {
+ decrease_now: bool,
+ next: usize,
max: usize
},
Exact(usize),
}
+impl ReadStrategy {
+ fn with_max(max: usize) -> ReadStrategy {
+ ReadStrategy::Adaptive {
+ decrease_now: false,
+ next: INIT_BUFFER_SIZE,
+ max,
+ }
+ }
+
+ fn next(&self) -> usize {
+ match *self {
+ ReadStrategy::Adaptive { next, .. } => next,
+ ReadStrategy::Exact(exact) => exact,
+ }
+ }
+
+ fn max(&self) -> usize {
+ match *self {
+ ReadStrategy::Adaptive { max, .. } => max,
+ ReadStrategy::Exact(exact) => exact,
+ }
+ }
+
+ fn record(&mut self, bytes_read: usize) {
+ match *self {
+ ReadStrategy::Adaptive { ref mut decrease_now, ref mut next, max, .. } => {
+ if bytes_read >= *next {
+ *next = cmp::min(incr_power_of_two(*next), max);
+ *decrease_now = false;
+ } else {
+ let decr_to = prev_power_of_two(*next);
+ if bytes_read < decr_to {
+ if *decrease_now {
+ *next = cmp::max(decr_to, INIT_BUFFER_SIZE);
+ *decrease_now = false;
+ } else {
+ // Decreasing is a two "record" process.
+ *decrease_now = true;
+ }
+ } else {
+ // A read within the current range should cancel
+ // a potential decrease, since we just saw proof
+ // that we still need this size.
+ *decrease_now = false;
+ }
+ }
+ },
+ _ => (),
+ }
+ }
+}
+
+fn incr_power_of_two(n: usize) -> usize {
+ n.saturating_mul(2)
+}
+
+fn prev_power_of_two(n: usize) -> usize {
+ // Only way this shift can underflow is if n is less than 4.
+ // (Which would means `usize::MAX >> 64` and underflowed!)
+ debug_assert!(n >= 4);
+ (::std::usize::MAX >> (n.leading_zeros() + 2)) + 1
+}
+
+impl Default for ReadStrategy {
+ fn default() -> ReadStrategy {
+ ReadStrategy::with_max(DEFAULT_MAX_BUFFER_SIZE)
+ }
+}
+
#[derive(Clone)]
pub struct Cursor<T> {
bytes: T,
|
2018-11-28T01:02:13Z
| 1,725
|
Adaptive Read Buffer
Add an adaptive read buffer strategy for `proto::h1::io::Buffered`, such that the size of the read buffer changes depending on the amount of bytes that are found on the transport for each `read` operation. This concept is similar to Netty's [AdaptiveRecvByteBufAllocator](https://github.com/netty/netty/blob/4.1/transport/src/main/java/io/netty/channel/AdaptiveRecvByteBufAllocator.java).
- After a `read` on the transport, the number of bytes read should be compared with the current guess size.
- If the number of bytes is significantly less, the guess should be lowered.
- If the number of bytes is significantly more, the guess should raised.
- It should likely make use of "steps". Something like starting at 1024 and doubling each step is probably a good start.
- If the bytes read stays within the current step, no adjustment is needed.
|
hyperium__hyper-1725
|
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -29,6 +29,26 @@ fn http1_post(b: &mut test::Bencher) {
.bench(b)
}
+#[bench]
+fn http1_body_100kb(b: &mut test::Bencher) {
+ let body = &[b'x'; 1024 * 100];
+ opts()
+ .method(Method::POST)
+ .request_body(body)
+ .response_body(body)
+ .bench(b)
+}
+
+#[bench]
+fn http1_body_10mb(b: &mut test::Bencher) {
+ let body = &[b'x'; 1024 * 1024 * 10];
+ opts()
+ .method(Method::POST)
+ .request_body(body)
+ .response_body(body)
+ .bench(b)
+}
+
#[bench]
fn http1_get_parallel(b: &mut test::Bencher) {
opts()
diff --git a/benches/end_to_end.rs b/benches/end_to_end.rs
--- a/benches/end_to_end.rs
+++ b/benches/end_to_end.rs
@@ -105,6 +130,9 @@ impl Opts {
fn bench(self, b: &mut test::Bencher) {
let _ = pretty_env_logger::try_init();
let mut rt = Runtime::new().unwrap();
+
+ b.bytes = self.response_body.len() as u64 + self.request_body.map(|b| b.len()).unwrap_or(0) as u64;
+
let addr = spawn_hello(&mut rt, self.response_body);
let connector = HttpConnector::new(1);
diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs
--- a/src/proto/h1/io.rs
+++ b/src/proto/h1/io.rs
@@ -637,6 +691,97 @@ mod tests {
assert!(buffered.io.blocked());
}
+ #[test]
+ fn read_strategy_adaptive_increments() {
+ let mut strategy = ReadStrategy::default();
+ assert_eq!(strategy.next(), 8192);
+
+ // Grows if record == next
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384);
+
+ strategy.record(16384);
+ assert_eq!(strategy.next(), 32768);
+
+ // Enormous records still increment at same rate
+ strategy.record(::std::usize::MAX);
+ assert_eq!(strategy.next(), 65536);
+
+ let max = strategy.max();
+ while strategy.next() < max {
+ strategy.record(max);
+ }
+
+ assert_eq!(strategy.next(), max, "never goes over max");
+ strategy.record(max + 1);
+ assert_eq!(strategy.next(), max, "never goes over max");
+ }
+
+ #[test]
+ fn read_strategy_adaptive_decrements() {
+ let mut strategy = ReadStrategy::default();
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384);
+
+ strategy.record(1);
+ assert_eq!(strategy.next(), 16384, "first smaller record doesn't decrement yet");
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384, "record was with range");
+
+ strategy.record(1);
+ assert_eq!(strategy.next(), 16384, "in-range record should make this the 'first' again");
+
+ strategy.record(1);
+ assert_eq!(strategy.next(), 8192, "second smaller record decrements");
+
+ strategy.record(1);
+ assert_eq!(strategy.next(), 8192, "first doesn't decrement");
+ strategy.record(1);
+ assert_eq!(strategy.next(), 8192, "doesn't decrement under minimum");
+ }
+
+ #[test]
+ fn read_strategy_adaptive_stays_the_same() {
+ let mut strategy = ReadStrategy::default();
+ strategy.record(8192);
+ assert_eq!(strategy.next(), 16384);
+
+ strategy.record(8193);
+ assert_eq!(strategy.next(), 16384, "first smaller record doesn't decrement yet");
+
+ strategy.record(8193);
+ assert_eq!(strategy.next(), 16384, "with current step does not decrement");
+ }
+
+ #[test]
+ fn read_strategy_adaptive_max_fuzz() {
+ fn fuzz(max: usize) {
+ let mut strategy = ReadStrategy::with_max(max);
+ while strategy.next() < max {
+ strategy.record(::std::usize::MAX);
+ }
+ let mut next = strategy.next();
+ while next > 8192 {
+ strategy.record(1);
+ strategy.record(1);
+ next = strategy.next();
+ assert!(
+ next.is_power_of_two(),
+ "decrement should be powers of two: {} (max = {})",
+ next,
+ max,
+ );
+ }
+ }
+
+ let mut max = 8192;
+ while max < ::std::usize::MAX {
+ fuzz(max);
+ max = (max / 2).saturating_mul(3);
+ }
+ fuzz(::std::usize::MAX);
+ }
+
#[test]
#[should_panic]
fn write_buf_requires_non_empty_bufs() {
|
hyperium/hyper
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
|
[
"1716"
] |
0.12
|
c35bdca8fab3facfc69a2d85c2fe4eb46887e716
|
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -38,6 +38,7 @@ where I: AsyncRead + AsyncWrite,
Conn {
io: Buffered::new(io),
state: State {
+ allow_half_close: true,
cached_headers: None,
error: None,
keep_alive: KA::Busy,
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -75,6 +76,10 @@ where I: AsyncRead + AsyncWrite,
self.state.title_case_headers = true;
}
+ pub(crate) fn set_disable_half_close(&mut self) {
+ self.state.allow_half_close = false;
+ }
+
pub fn into_inner(self) -> (I, Bytes) {
self.io.into_inner()
}
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -228,10 +233,11 @@ where I: AsyncRead + AsyncWrite,
trace!("read_keep_alive; is_mid_message={}", self.is_mid_message());
- if !self.is_mid_message() {
- self.require_empty_read().map_err(::Error::new_io)?;
+ if self.is_mid_message() {
+ self.mid_message_detect_eof().map_err(::Error::new_io)
+ } else {
+ self.require_empty_read().map_err(::Error::new_io)
}
- Ok(())
}
fn is_mid_message(&self) -> bool {
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -252,7 +258,7 @@ where I: AsyncRead + AsyncWrite,
// This should only be called for Clients wanting to enter the idle
// state.
fn require_empty_read(&mut self) -> io::Result<()> {
- assert!(!self.can_read_head() && !self.can_read_body());
+ debug_assert!(!self.can_read_head() && !self.can_read_body());
if !self.io.read_buf().is_empty() {
debug!("received an unexpected {} bytes", self.io.read_buf().len());
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -279,11 +285,21 @@ where I: AsyncRead + AsyncWrite,
}
}
+ fn mid_message_detect_eof(&mut self) -> io::Result<()> {
+ debug_assert!(!self.can_read_head() && !self.can_read_body());
+
+ if self.state.allow_half_close || !self.io.read_buf().is_empty() {
+ Ok(())
+ } else {
+ self.try_io_read().map(|_| ())
+ }
+ }
+
fn try_io_read(&mut self) -> Poll<usize, io::Error> {
match self.io.read_from_io() {
Ok(Async::Ready(0)) => {
trace!("try_io_read; found EOF on connection: {:?}", self.state);
- let must_error = self.should_error_on_eof();
+ let must_error = !self.state.is_idle();
let ret = if must_error {
let desc = if self.is_mid_message() {
"unexpected EOF waiting for response"
diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs
--- a/src/proto/h1/conn.rs
+++ b/src/proto/h1/conn.rs
@@ -655,6 +671,7 @@ impl<I, B: Buf, T> fmt::Debug for Conn<I, B, T> {
}
struct State {
+ allow_half_close: bool,
/// Re-usable HeaderMap to reduce allocating new ones.
cached_headers: Option<HeaderMap>,
/// If an error occurs when there wasn't a direct way to return it
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -45,6 +45,7 @@ pub(super) use self::upgrades::UpgradeableConnection;
#[derive(Clone, Debug)]
pub struct Http<E = Exec> {
exec: E,
+ h1_half_close: bool,
h1_writev: bool,
mode: ConnectionMode,
keep_alive: bool,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -163,6 +164,7 @@ impl Http {
pub fn new() -> Http {
Http {
exec: Exec::Default,
+ h1_half_close: true,
h1_writev: true,
mode: ConnectionMode::Fallback,
keep_alive: true,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -195,6 +197,20 @@ impl<E> Http<E> {
self
}
+ /// Set whether HTTP/1 connections should support half-closures.
+ ///
+ /// Clients can chose to shutdown their write-side while waiting
+ /// for the server to respond. Setting this to `false` will
+ /// automatically close any connection immediately if `read`
+ /// detects an EOF.
+ ///
+ /// Default is `true`.
+ #[inline]
+ pub fn http1_half_close(&mut self, val: bool) -> &mut Self {
+ self.h1_half_close = val;
+ self
+ }
+
/// Set whether HTTP/1 connections should try to use vectored writes,
/// or always flatten into a single buffer.
///
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -261,6 +277,7 @@ impl<E> Http<E> {
pub fn with_executor<E2>(self, exec: E2) -> Http<E2> {
Http {
exec,
+ h1_half_close: self.h1_half_close,
h1_writev: self.h1_writev,
mode: self.mode,
keep_alive: self.keep_alive,
diff --git a/src/server/conn.rs b/src/server/conn.rs
--- a/src/server/conn.rs
+++ b/src/server/conn.rs
@@ -319,6 +336,9 @@ impl<E> Http<E> {
if !self.keep_alive {
conn.disable_keep_alive();
}
+ if !self.h1_half_close {
+ conn.set_disable_half_close();
+ }
if !self.h1_writev {
conn.set_write_strategy_flatten();
}
diff --git a/src/server/mod.rs b/src/server/mod.rs
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -247,6 +247,20 @@ impl<I, E> Builder<I, E> {
self
}
+
+ /// Set whether HTTP/1 connections should support half-closures.
+ ///
+ /// Clients can chose to shutdown their write-side while waiting
+ /// for the server to respond. Setting this to `false` will
+ /// automatically close any connection immediately if `read`
+ /// detects an EOF.
+ ///
+ /// Default is `true`.
+ pub fn http1_half_close(mut self, val: bool) -> Self {
+ self.protocol.http1_half_close(val);
+ self
+ }
+
/// Sets whether HTTP/1 is required.
///
/// Default is `false`.
|
I haven't really investigated, could the fix for #1717 have fixed this too?
I will try and report back tomorrow.
I tried. It's not fixed. The way to reproduce it:
1. Run the broker.
2. Run the worker.
...
3. Restart the worker. The broker should report "Dropping Next()" but does not.
next curl request fails.
I think the bug is on the server, not the client: the broker is the server and it does not drop the future on disconnect from the client (worker)
When I run it with trace log-level, the messages after I cancel the worker are:
```
2018-11-26 22:57:43 TRACE [tokio_reactor] event Readable | Writable | Hup Token(4194305)
2018-11-26 22:57:43 TRACE [tokio_reactor] loop process - 1 events, 0.000s
```
... and nothing else. So looks like nobody is polling the connection while the response future is waiting on the oneshot::Receiver for the Body.
The connection isn't waiting on a read, due to this line: https://github.com/hyperium/hyper/blob/master/src/proto/h1/conn.rs#L231
It's basically to apply TCP backpressure, to not start reading a second message while waiting to respond to the first. If there were a way to register for `HUP` on `AsyncRead`, that could help achieve it without possibly reading a next message.
Hm, will look how it is implemented on the h2 side and try to come with a patch.
mio supports HUP registration via UnixReady, but that's not piped through Tokio. Adding `TcpStream::poll_hup` would be *great* though, even if it would only work on Unix. The server I work on processes long-running requests, and we have request cancellation detection but it won't work via HTTP/1 without that.
In h2, it's always reading, since it needs to in order to handle HTTP2 bookkeeping, since frames can come in related to any stream (or for the connection).
We could try adding a `read` check on the transport even if `is_mid_message`, and just not error if some bytes are found. It should work if a client isn't using HTTP/1.1 pipelining, but if it is, it'd be quite hard to detect an EOF read if a pipelined request is sitting in the buffer. Perhaps that's better than nothing (and the real world doesn't really use HTTP/1.1 pipelining anyways).
Just for test, I disabled the check for `is_mid_message` but it still don't drop the response future. Here is the trace (with some more trace logs added):
```
2018-11-26 23:31:57 TRACE [tokio_reactor] event Readable | Writable | Hup Token(8388609)
2018-11-26 23:31:57 TRACE [tokio_reactor] loop process - 1 events, 0.000s
2018-11-26 23:31:57 TRACE [hyper::proto::h1::dispatch] poll_catch
2018-11-26 23:31:57 TRACE [hyper::proto::h1::dispatch] poll_inner
2018-11-26 23:31:57 TRACE [hyper::proto::h1::dispatch] poll_read
2018-11-26 23:31:57 TRACE [hyper::proto::h1::conn] read_keep_alive; is_mid_read=false
2018-11-26 23:31:57 TRACE [hyper::proto::h1::io] io.read_buf -> Ready(0)
2018-11-26 23:31:57 DEBUG [hyper::proto::h1::io] read 0 bytes
2018-11-26 23:31:57 TRACE [hyper::proto::h1::conn] try_io_read; found EOF on connection: State { reading: KeepAlive, writing: Init, keep_alive: Busy, error: None }
2018-11-26 23:31:57 TRACE [hyper::proto::h1::conn] State::close_read()
2018-11-26 23:31:57 TRACE [hyper::proto::h1::dispatch] poll_write
2018-11-26 23:31:57 TRACE [hyper::proto::h1::dispatch] poll_flush
2018-11-26 23:31:57 TRACE [hyper::proto::h1::conn] flushed({role=server}): State { reading: Closed, writing: Init, keep_alive: Disabled, error: None }
2018-11-26 23:31:57 TRACE [hyper::proto::h1::dispatch] poll_inner.is_done?
2018-11-26 23:31:57 TRACE [hyper::proto::h1::conn] conn.is_read_closed true
2018-11-26 23:31:57 TRACE [hyper::proto::h1::conn] conn.is_write_closed false
2018-11-26 23:31:57 TRACE [hyper::proto::h1::dispatch] poll_inner.is_done -> false
2018-11-26 23:31:57 TRACE [tokio_reactor] event Readable Token(4194303)
2018-11-26 23:31:57 TRACE [tokio_reactor] loop process - 1 events, 0.000s
2018-11-26 23:31:57 TRACE [tokio_reactor] loop process - 0 events, 0.000s
```
Ah sure, if it were to close the connection immediately, then connections that half-close after writing the request would never be able to read the response...
Yes, you are right.
A possible solution is to offer some sort of "support half-closed" config option (default would need to be true to not break existing behavior), and if false, then that keep-alive read can forcibly close future down.
|
2018-11-27T00:24:56Z
| 1,723
|
Response future not dropped on disconnect
In certain cases I see Response future is not dropped when the connection closes. I have a minimal reproduction here: https://github.com/luben/repro-hyper
The reproduction is simplified work queue - workers wait on `GET /next`, clients submit work to `POST /`, worker submit response to `POST /response`. What happens is that if a worker restarts, it is left registered but disconnected and it leads to next client request to hang. I added a small debugging class that tells me when the futures are dropped. It confirms that the response future from /next is not dropped on worker disconnect.
Changing the workers to use HTTP/2 instead of HTTP/1.1 (un-comment https://github.com/luben/repro-hyper/blob/master/src/bin/worker.rs#L16) solves the issue.
|
hyperium__hyper-1723
|
diff --git a/tests/server.rs b/tests/server.rs
--- a/tests/server.rs
+++ b/tests/server.rs
@@ -991,6 +991,71 @@ fn nonempty_parse_eof_returns_error() {
rt.block_on(fut).expect_err("partial parse eof is error");
}
+#[test]
+fn socket_half_closed() {
+ let _ = pretty_env_logger::try_init();
+ let mut rt = Runtime::new().unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ thread::spawn(move || {
+ let mut tcp = connect(&addr);
+ tcp.write_all(b"GET / HTTP/1.1\r\n\r\n").unwrap();
+ tcp.shutdown(::std::net::Shutdown::Write).expect("SHDN_WR");
+
+ let mut buf = [0; 256];
+ tcp.read(&mut buf).unwrap();
+ let expected = "HTTP/1.1 200 OK\r\n";
+ assert_eq!(s(&buf[..expected.len()]), expected);
+ });
+
+ let fut = listener.incoming()
+ .into_future()
+ .map_err(|_| unreachable!())
+ .and_then(|(item, _incoming)| {
+ let socket = item.unwrap();
+ Http::new()
+ .serve_connection(socket, service_fn(|_| {
+ Delay::new(Duration::from_millis(500))
+ .map(|_| {
+ Response::new(Body::empty())
+ })
+ }))
+ });
+
+ rt.block_on(fut).unwrap();
+}
+
+#[test]
+fn disconnect_after_reading_request_before_responding() {
+ let _ = pretty_env_logger::try_init();
+ let mut rt = Runtime::new().unwrap();
+ let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+
+ thread::spawn(move || {
+ let mut tcp = connect(&addr);
+ tcp.write_all(b"GET / HTTP/1.1\r\n\r\n").unwrap();
+ });
+
+ let fut = listener.incoming()
+ .into_future()
+ .map_err(|_| unreachable!())
+ .and_then(|(item, _incoming)| {
+ let socket = item.unwrap();
+ Http::new()
+ .http1_half_close(false)
+ .serve_connection(socket, service_fn(|_| {
+ Delay::new(Duration::from_secs(2))
+ .map(|_| -> Response<Body> {
+ panic!("response future should have been dropped");
+ })
+ }))
+ });
+
+ rt.block_on(fut).expect_err("socket disconnected");
+}
+
#[test]
fn returning_1xx_response_is_error() {
let mut rt = Runtime::new().unwrap();
|
hyperium/hyper
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
[
"1485"
] |
0.12
|
bf188b28fe1f2c1c368824926a77df78f0d00a13
|
diff --git a/src/client/conn.rs b/src/client/conn.rs
--- a/src/client/conn.rs
+++ b/src/client/conn.rs
@@ -466,6 +466,7 @@ impl Builder {
T: AsyncRead + AsyncWrite + Send + 'static,
B: Payload + 'static,
{
+ trace!("client handshake HTTP/{}", if self.http2 { 2 } else { 1 });
Handshake {
builder: self.clone(),
io: Some(io),
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -36,7 +36,6 @@ pub trait Connect: Send + Sync {
/// A set of properties to describe where and how to try to connect.
#[derive(Clone, Debug)]
pub struct Destination {
- //pub(super) alpn: Alpn,
pub(super) uri: Uri,
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -46,21 +45,18 @@ pub struct Destination {
/// was used, or if connected to an HTTP proxy.
#[derive(Debug)]
pub struct Connected {
- //alpn: Alpn,
+ pub(super) alpn: Alpn,
pub(super) is_proxied: bool,
pub(super) extra: Option<Extra>,
}
pub(super) struct Extra(Box<ExtraInner>);
-/*TODO: when HTTP1 Upgrades to H2 are added, this will be needed
-#[derive(Debug)]
+#[derive(Clone, Copy, Debug, PartialEq)]
pub(super) enum Alpn {
- Http1,
- //H2,
- //Http1OrH2
+ H2,
+ None,
}
-*/
impl Destination {
/// Get the protocol scheme.
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -246,7 +242,7 @@ impl Connected {
/// Create new `Connected` type with empty metadata.
pub fn new() -> Connected {
Connected {
- //alpn: Alpn::Http1,
+ alpn: Alpn::None,
is_proxied: false,
extra: None,
}
diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs
--- a/src/client/connect/mod.rs
+++ b/src/client/connect/mod.rs
@@ -274,19 +270,18 @@ impl Connected {
self
}
- /*
/// Set that the connected transport negotiated HTTP/2 as it's
/// next protocol.
- pub fn h2(mut self) -> Connected {
+ pub fn negotiated_h2(mut self) -> Connected {
self.alpn = Alpn::H2;
self
}
- */
// Don't public expose that `Connected` is `Clone`, unsure if we want to
// keep that contract...
pub(super) fn clone(&self) -> Connected {
Connected {
+ alpn: self.alpn.clone(),
is_proxied: self.is_proxied,
extra: self.extra.clone(),
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -86,12 +86,12 @@ use futures::{Async, Future, Poll};
use futures::future::{self, Either, Executor};
use futures::sync::oneshot;
use http::{Method, Request, Response, Uri, Version};
-use http::header::{Entry, HeaderValue, HOST};
+use http::header::{HeaderValue, HOST};
use http::uri::Scheme;
use body::{Body, Payload};
use common::{Exec, lazy as hyper_lazy, Lazy};
-use self::connect::{Connect, Connected, Destination};
+use self::connect::{Alpn, Connect, Connected, Destination};
use self::pool::{Key as PoolKey, Pool, Poolable, Pooled, Reservation};
#[cfg(feature = "runtime")] pub use self::connect::HttpConnector;
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -192,23 +192,19 @@ where C: Connect + Sync + 'static,
/// Send a constructed Request using this Client.
pub fn request(&self, mut req: Request<B>) -> ResponseFuture {
- let is_http_11 = self.ver == Ver::Http1 && match req.version() {
- Version::HTTP_11 => true,
- Version::HTTP_10 => false,
- other => {
+ let is_http_connect = req.method() == &Method::CONNECT;
+ match req.version() {
+ Version::HTTP_11 => (),
+ Version::HTTP_10 => if is_http_connect {
+ debug!("CONNECT is not allowed for HTTP/1.0");
+ return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_request_method())));
+ },
+ other => if self.ver != Ver::Http2 {
error!("Request has unsupported version \"{:?}\"", other);
return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_version())));
}
};
- let is_http_connect = req.method() == &Method::CONNECT;
-
- if !is_http_11 && is_http_connect {
- debug!("client does not support CONNECT requests for {:?}", req.version());
- return ResponseFuture::new(Box::new(future::err(::Error::new_user_unsupported_request_method())));
- }
-
-
let uri = req.uri().clone();
let domain = match (uri.scheme_part(), uri.authority_part()) {
(Some(scheme), Some(auth)) => {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -233,21 +229,7 @@ where C: Connect + Sync + 'static,
}
};
- if self.set_host && self.ver == Ver::Http1 {
- if let Entry::Vacant(entry) = req.headers_mut().entry(HOST).expect("HOST is always valid header name") {
- let hostname = uri.host().expect("authority implies host");
- let host = if let Some(port) = uri.port() {
- let s = format!("{}:{}", hostname, port);
- HeaderValue::from_str(&s)
- } else {
- HeaderValue::from_str(hostname)
- }.expect("uri host is valid header value");
- entry.insert(host);
- }
- }
-
-
- let pool_key = (Arc::new(domain.to_string()), self.ver);
+ let pool_key = Arc::new(domain.to_string());
ResponseFuture::new(Box::new(self.retryably_send_request(req, pool_key)))
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -283,11 +265,28 @@ where C: Connect + Sync + 'static,
fn send_request(&self, mut req: Request<B>, pool_key: PoolKey) -> impl Future<Item=Response<Body>, Error=ClientError<B>> {
let conn = self.connection_for(req.uri().clone(), pool_key);
- let ver = self.ver;
+ let set_host = self.set_host;
let executor = self.executor.clone();
conn.and_then(move |mut pooled| {
- if ver == Ver::Http1 {
- // CONNECT always sends origin-form, so check it first...
+ if pooled.is_http1() {
+ if set_host {
+ let uri = req.uri().clone();
+ req
+ .headers_mut()
+ .entry(HOST)
+ .expect("HOST is always valid header name")
+ .or_insert_with(|| {
+ let hostname = uri.host().expect("authority implies host");
+ if let Some(port) = uri.port() {
+ let s = format!("{}:{}", hostname, port);
+ HeaderValue::from_str(&s)
+ } else {
+ HeaderValue::from_str(hostname)
+ }.expect("uri host is valid header value")
+ });
+ }
+
+ // CONNECT always sends authority-form, so check it first...
if req.method() == &Method::CONNECT {
authority_form(req.uri_mut());
} else if pooled.conn_info.is_proxied {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -295,11 +294,9 @@ where C: Connect + Sync + 'static,
} else {
origin_form(req.uri_mut());
};
- } else {
- debug_assert!(
- req.method() != &Method::CONNECT,
- "Client should have returned Error for HTTP2 CONNECT"
- );
+ } else if req.method() == &Method::CONNECT {
+ debug!("client does not support CONNECT requests over HTTP2");
+ return Either::A(future::err(ClientError::Normal(::Error::new_user_unsupported_request_method())));
}
let fut = pooled.send_request_retryable(req)
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -322,10 +319,10 @@ where C: Connect + Sync + 'static,
// To counteract this, we must check if our senders 'want' channel
// has been closed after having tried to send. If so, error out...
if pooled.is_closed() {
- return Either::A(fut);
+ return Either::B(Either::A(fut));
}
- Either::B(fut
+ Either::B(Either::B(fut
.and_then(move |mut res| {
// If pooled is HTTP/2, we can toss this reference immediately.
//
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -337,7 +334,7 @@ where C: Connect + Sync + 'static,
// for a new request to start.
//
// It won't be ready if there is a body to stream.
- if ver == Ver::Http2 || !pooled.is_pool_enabled() || pooled.is_ready() {
+ if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() {
drop(pooled);
} else if !res.body().is_end_stream() {
let (delayed_tx, delayed_rx) = oneshot::channel();
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -370,7 +367,7 @@ where C: Connect + Sync + 'static,
}
}
Ok(res)
- }))
+ })))
})
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -463,8 +460,9 @@ where C: Connect + Sync + 'static,
let pool = self.pool.clone();
let h1_writev = self.h1_writev;
let h1_title_case_headers = self.h1_title_case_headers;
+ let ver = self.ver;
+ let is_ver_h2 = self.ver == Ver::Http2;
let connector = self.connector.clone();
- let ver = pool_key.1;
let dst = Destination {
uri,
};
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -474,7 +472,7 @@ where C: Connect + Sync + 'static,
// If the pool_key is for HTTP/2, and there is already a
// connection being estabalished, then this can't take a
// second lock. The "connect_to" future is Canceled.
- let connecting = match pool.connecting(&pool_key) {
+ let connecting = match pool.connecting(&pool_key, ver) {
Some(lock) => lock,
None => {
let canceled = ::Error::new_canceled(Some("HTTP/2 connection in progress"));
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -484,11 +482,31 @@ where C: Connect + Sync + 'static,
Either::A(connector.connect(dst)
.map_err(::Error::new_connect)
.and_then(move |(io, connected)| {
- conn::Builder::new()
+ // If ALPN is h2 and we aren't http2_only already,
+ // then we need to convert our pool checkout into
+ // a single HTTP2 one.
+ let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 {
+ match connecting.alpn_h2(&pool) {
+ Some(lock) => {
+ trace!("ALPN negotiated h2, updating pool");
+ lock
+ },
+ None => {
+ // Another connection has already upgraded,
+ // the pool checkout should finish up for us.
+ let canceled = ::Error::new_canceled(Some("ALPN upgraded to HTTP/2"));
+ return Either::B(future::err(canceled));
+ }
+ }
+ } else {
+ connecting
+ };
+ let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2;
+ Either::A(conn::Builder::new()
.exec(executor.clone())
.h1_writev(h1_writev)
.h1_title_case_headers(h1_title_case_headers)
- .http2_only(pool_key.1 == Ver::Http2)
+ .http2_only(is_h2)
.handshake(io)
.and_then(move |(tx, conn)| {
let bg = executor.execute(conn.map_err(|e| {
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -509,12 +527,13 @@ where C: Connect + Sync + 'static,
.map(move |tx| {
pool.pooled(connecting, PoolClient {
conn_info: connected,
- tx: match ver {
- Ver::Http1 => PoolTx::Http1(tx),
- Ver::Http2 => PoolTx::Http2(tx.into_http2()),
+ tx: if is_h2 {
+ PoolTx::Http2(tx.into_http2())
+ } else {
+ PoolTx::Http1(tx)
},
})
- })
+ }))
}))
})
}
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -591,6 +610,17 @@ impl<B> PoolClient<B> {
}
}
+ fn is_http1(&self) -> bool {
+ !self.is_http2()
+ }
+
+ fn is_http2(&self) -> bool {
+ match self.tx {
+ PoolTx::Http1(_) => false,
+ PoolTx::Http2(_) => true,
+ }
+ }
+
fn is_ready(&self) -> bool {
match self.tx {
PoolTx::Http1(ref tx) => tx.is_ready(),
diff --git a/src/client/mod.rs b/src/client/mod.rs
--- a/src/client/mod.rs
+++ b/src/client/mod.rs
@@ -650,6 +680,10 @@ where
}
}
}
+
+ fn can_share(&self) -> bool {
+ self.is_http2()
+ }
}
// FIXME: allow() required due to `impl Trait` leaking types to this lint
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -30,6 +30,7 @@ pub(super) trait Poolable: Send + Sized + 'static {
///
/// Allows for HTTP/2 to return a shared reservation.
fn reserve(self) -> Reservation<Self>;
+ fn can_share(&self) -> bool;
}
/// When checking out a pooled connection, it might be that the connection
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -50,7 +51,7 @@ pub(super) enum Reservation<T> {
}
/// Simple type alias in case the key type needs to be adjusted.
-pub(super) type Key = (Arc<String>, Ver);
+pub(super) type Key = Arc<String>;
struct PoolInner<T> {
// A flag that a connection is being estabilished, and the connection
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -151,8 +152,8 @@ impl<T: Poolable> Pool<T> {
/// Ensure that there is only ever 1 connecting task for HTTP/2
/// connections. This does nothing for HTTP/1.
- pub(super) fn connecting(&self, key: &Key) -> Option<Connecting<T>> {
- if key.1 == Ver::Http2 {
+ pub(super) fn connecting(&self, key: &Key, ver: Ver) -> Option<Connecting<T>> {
+ if ver == Ver::Http2 {
if let Some(ref enabled) = self.inner {
let mut inner = enabled.lock().unwrap();
return if inner.connecting.insert(key.clone()) {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -162,7 +163,7 @@ impl<T: Poolable> Pool<T> {
};
Some(connecting)
} else {
- trace!("HTTP/2 connecting already in progress for {:?}", key.0);
+ trace!("HTTP/2 connecting already in progress for {:?}", key);
None
};
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -243,11 +244,6 @@ impl<T: Poolable> Pool<T> {
let (value, pool_ref) = if let Some(ref enabled) = self.inner {
match value.reserve() {
Reservation::Shared(to_insert, to_return) => {
- debug_assert_eq!(
- connecting.key.1,
- Ver::Http2,
- "shared reservation without Http2"
- );
let mut inner = enabled.lock().unwrap();
inner.put(connecting.key.clone(), to_insert, enabled);
// Do this here instead of Drop for Connecting because we
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -294,7 +290,7 @@ impl<T: Poolable> Pool<T> {
// unique or shared. So, the hack is to just assume Ver::Http2 means
// shared... :(
let mut pool_ref = WeakOpt::none();
- if key.1 == Ver::Http1 {
+ if !value.can_share() {
if let Some(ref enabled) = self.inner {
pool_ref = WeakOpt::downgrade(enabled);
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -377,7 +373,7 @@ impl<'a, T: Poolable + 'a> IdlePopper<'a, T> {
impl<T: Poolable> PoolInner<T> {
fn put(&mut self, key: Key, value: T, __pool_ref: &Arc<Mutex<PoolInner<T>>>) {
- if key.1 == Ver::Http2 && self.idle.contains_key(&key) {
+ if value.can_share() && self.idle.contains_key(&key) {
trace!("put; existing idle HTTP/2 connection for {:?}", key);
return;
}
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -601,7 +597,7 @@ impl<T: Poolable> Drop for Pooled<T> {
if let Ok(mut inner) = pool.lock() {
inner.put(self.key.clone(), value, &pool);
}
- } else if self.key.1 == Ver::Http1 {
+ } else if !value.can_share() {
trace!("pool dropped, dropping pooled ({:?})", self.key);
}
// Ver::Http2 is already in the Pool (or dead), so we wouldn't
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -705,16 +701,22 @@ pub(super) struct Connecting<T: Poolable> {
pool: WeakOpt<Mutex<PoolInner<T>>>,
}
+impl<T: Poolable> Connecting<T> {
+ pub(super) fn alpn_h2(self, pool: &Pool<T>) -> Option<Self> {
+ debug_assert!(
+ self.pool.0.is_none(),
+ "Connecting::alpn_h2 but already Http2"
+ );
+
+ pool.connecting(&self.key, Ver::Http2)
+ }
+}
+
impl<T: Poolable> Drop for Connecting<T> {
fn drop(&mut self) {
if let Some(pool) = self.pool.upgrade() {
// No need to panic on drop, that could abort!
if let Ok(mut inner) = pool.lock() {
- debug_assert_eq!(
- self.key.1,
- Ver::Http2,
- "Connecting constructed without Http2"
- );
inner.connected(&self.key);
}
}
diff --git a/src/mock.rs b/src/mock.rs
--- a/src/mock.rs
+++ b/src/mock.rs
@@ -358,11 +358,12 @@ impl Read for Duplex {
impl Write for Duplex {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let mut inner = self.inner.lock().unwrap();
+ let ret = inner.write.write(buf);
if let Some(task) = inner.handle_read_task.take() {
trace!("waking DuplexHandle read");
task.notify();
}
- inner.write.write(buf)
+ ret
}
fn flush(&mut self) -> io::Result<()> {
diff --git a/src/mock.rs b/src/mock.rs
--- a/src/mock.rs
+++ b/src/mock.rs
@@ -404,8 +405,7 @@ impl DuplexHandle {
inner.handle_read_task = Some(task::current());
return Ok(Async::NotReady);
}
- inner.write.inner.vec.truncate(0);
- Ok(Async::Ready(inner.write.inner.len()))
+ inner.write.read(buf).map(Async::Ready)
}
pub fn write(&self, bytes: &[u8]) -> Poll<usize, io::Error> {
diff --git a/src/mock.rs b/src/mock.rs
--- a/src/mock.rs
+++ b/src/mock.rs
@@ -456,6 +456,13 @@ impl MockConnector {
}
pub fn mock_fut<F>(&mut self, key: &str, fut: F) -> DuplexHandle
+ where
+ F: Future + Send + 'static,
+ {
+ self.mock_opts(key, Connected::new(), fut)
+ }
+
+ pub fn mock_opts<F>(&mut self, key: &str, connected: Connected, fut: F) -> DuplexHandle
where
F: Future + Send + 'static,
{
diff --git a/src/mock.rs b/src/mock.rs
--- a/src/mock.rs
+++ b/src/mock.rs
@@ -465,7 +472,7 @@ impl MockConnector {
let fut = Box::new(fut.then(move |_| {
trace!("MockConnector mocked fut ready");
- Ok((duplex, Connected::new()))
+ Ok((duplex, connected))
}));
self.mocks.lock().unwrap().entry(key)
.or_insert(Vec::new())
|
Talking through this a bit on IRC, here's some clarification. A user likely has various ways they want to declare when to use HTTP/1 vs HTTP/2. A major way to do is with TLS' ALPN mechanism, but that seems best handled in the `Connect` implementation, since hyper has delegated all transport negotiation and establishment to that trait.
Some variants a user may need:
1. HTTP/1 only
2. HTTP/2 only
3. HTTP/1 or HTTP/2 over ALPN
4. HTTP/1 or HTTP/2, with HTTP/1 upgrades to 2
A key part of ALPN is that if a client chooses to use it, it must advertise the protocols it can speak, such as `http1,h2`. If the server responds back with a protocol identifier, then once the TLS handshake is completed, the connection **MUST** use that negotiated protocol. So, it ALPN agreed on `h2`, hyper **must** use HTTP/2 once the connection is returned from `Connect`.
This means the connector needs some way of telling the `hyper::Client` if a certain protocol is required. We can make use of the `Connected` type to include that information.
----
There is also a question around how a user configures what variant they need. There *is* the `Destination` type, which could gain additional getters so that connectors can know whether to use ALPN. However, a potential downside there is that it seems likely that a user would need to configure their connector **anyways**, and so why should they need to repeat that configuration on the `hyper::Client`.
It might be sufficient to *not* have extra configuration on the `Client`. Of the scenarios above, the only one that seems to require the `hyper::Client` to know about is No. 4 (HTTP/1 upgrade to HTTP/2).
I was initially thinking that the best route would be something like a `Destination::negotiate_h2` method that would tell the connector to use ALPN, but I think it might actually be best to just have the connectors always use ALPN and just tell the `Connected` what protocol was selected. native-tls and openssl don't currently support configuration of ALPN on a per-handshake basis, and while we could add that it's kind of a weird thing to do.
This would require configuration of both the TLS connector and the client if you want to use only HTTP/1 but that seems fine since I think that'd be a pretty rare use case?
Here's what I think a reasonable route forward is:
Public API changes:
```rust
pub struct Protocol(...);
impl Protocol {
const HTTP1_1: Protocol = Protocol(...);
const HTTP2: Protocol = Protocol(...);
}
impl Connected {
pub fn negotiated_protocol(self, protocol: Protocol) -> Self { ... }
}
```
If a connector indicates that it has negotiated a protocol, Hyper has to respect that. If HTTP/1.1 is negotiated but the client was configured with http2_only, it'll have to return an error.
The pool implementation will need to change, since it currently depends on knowing up-front which protocol will be used. The constraint that only a single HTTP/2 connection attempt can exist at any time since we don't know if the connection will be using HTTP/2. It seems like a reasonable approach is that the pool's key type is just the authority, and the value is an enum of either a single HTTP/2 connection, or a set of idle HTTP/1.1 connections. The HTTP/2 mode is preferred, so if a new connection comes in that negotiated HTTP/2, it'll replace any existing HTTP/1.1 connections.
Does this all seem plausible? I can start working on the implementation if so.
The caveats you mention sound about right.
An alternative to exposing a new type would be to just add `negotiated_http1(self)` and `negotiated_http2(self)` on `Connected`. It's not necessarily better, it just allows for the API to exist without a new `Protocol` type.
Does it make sense at all for the `Client` to tell a connector that a user configured `http2_only` or something (another option I've been thinking about is if to implicitly guess the preferred protocol by looking at `Request::version()`)? I suppose if it *does* make sense eventually, it can always be added to `Destination`.
Yeah, negotiated_http1 and negotiated_http2 seem reasonable to me as well.
I'm not sure it'll be all that useful for the connector to know that http2_only was set. It seems pretty weird to have a server that supports h2 but prioritizes http/1.1 over it.
Yea, that'd be odd. There could conceptually be an `http1_only` option, and in that case, if ALPN passed both `http1` and `h2`, and the server supported both, we'd likely have connections that could never work...
Perhaps that concern can just be postponed, and people can configure their HTTPS connector how to do ALPN in the first place.
Yeah I think you'll need to explicitly configure ALPN for hyper-openssl at least for it to be used at all initially.
The changes required in the connection pool are pretty significant, so I want to write them out and make sure it seems reasonable before making them.
There are a couple of constraints we want to enforce:
1. We don't want to allow multiple live HTTP/2 connections to a single host.
2. We want to allow concurrent creation of new HTTP/1 connections to a single host.
The pool currently just tracks the set of pending HTTP/2 connections to guarantee constraint 1, and you ask the pool for permission before making a new connection. This becomes more complicated now since we don't know up-front what protocol we're going to be using until we get part way through the connection process.
There are a couple of options:
1. Defer the pending connection check until ALPN tells us what protocol we're using.
2. Serialize the first connection to a host of any protocol.
Imagine we're spawning off N requests at the same time to a new host. Option 1 is non-ideal in the HTTP/2 case since you'll perform N TCP and TLS handshakes, then throw away N-1 connections and perform a single HTTP/2 handshake. Option 2 is non-ideal in the HTTP/1 case since you'll have N-1 requests queue up behind a single request that performs the first TCP and TLS handshake, realizes the server speaks HTTP/1, and then the other N-1 requests can create their own connections.
On the whole, option 2 seems preferable.
This does mean we now need to track what protocol a host speaks. In particular, there's a distinction between a host we've never connected to and a host that has all of its HTTP/1 connections currently checked out. For simplicity, I'd like to just track that information "forever", even after idle connections have been cleared out.
"We don't want to allow multiple live HTTP/2 connections to a single host."
If single HTTP2 connection is congested, it might be reasonable to open the second connection. Not always, but sometimes it could be a required policy. I think it should be client's decision, not done by hyper.
Sure, there could be an option to open a second connection if the first has no remaining request slots.
I have seen a few other HTTP2 client libraries which easily allow opening multiple h2 connections, but I'm wary about doing that (at least, about making it "easy" to accidentally do so). Note the [spec pleads that clients shouldn't do that](https://httpwg.org/specs/rfc7540.html#rfc.section.9.1):
> Clients SHOULD NOT open more than one HTTP/2 connection to a given host and port pair [...]
Of course, since the connection pool is used by a `Client`, a user could by pass it in hyper by just making a new `Client`...
"Of course, since the connection pool is used by a Client, a user could by pass it in hyper by just making a new Client..."
There is no a reason for a client to be naughty without a good reason :) And the only good reason to have the second connection is congestion, high latency or other factor indicating low performance. This information should be exposed to a client to make a decision, and a client should be able to instruct hyper whether to allow the second, third and so on connection... PS: I also assume that the second connection might easily target alternative IP address, in case DNS is resolved to multiple addresses.
Let me just write this down here to remind me to read the issue if there's going to be any changes:
I'd really appreciate if the new `Connect` refactoring wouldn't open more than one connection. I'm using hyper in my Apple push notification client [a2](https://github.com/pimeys/a2), and if I'd open too many connections to Apple's service, [they'd consider it a denial-of-service attack](https://developer.apple.com/library/archive/documentation/NetworkingInternet/Conceptual/RemoteNotificationsPG/CommunicatingwithAPNs.html#//apple_ref/doc/uid/TP40008194-CH11-SW1) and just ban our servers completely. That would be quite unfortunate.
Looking over what Golang's client does, it opts for your option 1, which is to just open a bunch of connections **if** there isn't an existing HTTP2 connection. If after ALPN, it is HTTP2, then the extras are all closed up and only the first connection is kept.
That seems simpler to me, and less likely to cause problems for people who were successfully using the client for HTTP/1 already. For @pimeys concern, there's a couple things we could do to prevent that:
1. If the `Client` is configured for `http2_only`, then ALPN is never needed, and all connections are treated as HTTP2 (so no mass connect flood).
2. If a `Client` isn't configured that way, we could consider looking to see if `Request::version() == Version::HTTP_2`, and if so, specifically treat just that request as HTTP2-only.
I also think the HTTP/1-and-HTTP/2-and-HTTP1Upgrade option can be removed from consideration. It turns out to be very seldom implemented, and browsers don't bother, since there exist some servers that naively hate upon seeing any `Upgrade` header, so it can't be sent proactively just to advertise.
That sounds reasonable to me.
|
2018-10-27T02:00:20Z
| 1,686
|
Add ALPN/HTTP2 support to Connect
The `Connect` trait allows to provide information to the connector in the `Destination` type. It should be augmented to allow requesting ALPN for HTTP2, and the returned `Connected` type should include a way for the connector to inform the `Client` whether HTTP2 was negotiated.
|
hyperium__hyper-1686
|
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -190,7 +191,7 @@ impl<T: Poolable> Pool<T> {
#[cfg(feature = "runtime")]
#[cfg(test)]
pub(super) fn h1_key(&self, s: &str) -> Key {
- (Arc::new(s.to_string()), Ver::Http1)
+ Arc::new(s.to_string())
}
#[cfg(feature = "runtime")]
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -804,7 +806,7 @@ mod tests {
use futures::{Async, Future};
use futures::future;
use common::Exec;
- use super::{Connecting, Key, Poolable, Pool, Reservation, Ver, WeakOpt};
+ use super::{Connecting, Key, Poolable, Pool, Reservation, WeakOpt};
/// Test unique reservations.
#[derive(Debug, PartialEq, Eq)]
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -818,6 +820,10 @@ mod tests {
fn reserve(self) -> Reservation<Self> {
Reservation::Unique(self)
}
+
+ fn can_share(&self) -> bool {
+ false
+ }
}
fn c<T: Poolable>(key: Key) -> Connecting<T> {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -845,7 +851,7 @@ mod tests {
#[test]
fn test_pool_checkout_smoke() {
let pool = pool_no_timer();
- let key = (Arc::new("foo".to_string()), Ver::Http1);
+ let key = Arc::new("foo".to_string());
let pooled = pool.pooled(c(key.clone()), Uniq(41));
drop(pooled);
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -860,7 +866,7 @@ mod tests {
fn test_pool_checkout_returns_none_if_expired() {
future::lazy(|| {
let pool = pool_no_timer();
- let key = (Arc::new("foo".to_string()), Ver::Http1);
+ let key = Arc::new("foo".to_string());
let pooled = pool.pooled(c(key.clone()), Uniq(41));
drop(pooled);
::std::thread::sleep(pool.locked().timeout.unwrap());
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -873,7 +879,7 @@ mod tests {
fn test_pool_checkout_removes_expired() {
future::lazy(|| {
let pool = pool_no_timer();
- let key = (Arc::new("foo".to_string()), Ver::Http1);
+ let key = Arc::new("foo".to_string());
pool.pooled(c(key.clone()), Uniq(41));
pool.pooled(c(key.clone()), Uniq(5));
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -894,7 +900,7 @@ mod tests {
fn test_pool_max_idle_per_host() {
future::lazy(|| {
let pool = pool_max_idle_no_timer(2);
- let key = (Arc::new("foo".to_string()), Ver::Http1);
+ let key = Arc::new("foo".to_string());
pool.pooled(c(key.clone()), Uniq(41));
pool.pooled(c(key.clone()), Uniq(5));
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -920,7 +926,7 @@ mod tests {
&Exec::Default,
);
- let key = (Arc::new("foo".to_string()), Ver::Http1);
+ let key = Arc::new("foo".to_string());
// Since pool.pooled() will be calling spawn on drop, need to be sure
// those drops are called while `rt` is the current executor. To do so,
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -945,7 +951,7 @@ mod tests {
#[test]
fn test_pool_checkout_task_unparked() {
let pool = pool_no_timer();
- let key = (Arc::new("foo".to_string()), Ver::Http1);
+ let key = Arc::new("foo".to_string());
let pooled = pool.pooled(c(key.clone()), Uniq(41));
let checkout = pool.checkout(key).join(future::lazy(move || {
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -964,7 +970,7 @@ mod tests {
fn test_pool_checkout_drop_cleans_up_waiters() {
future::lazy(|| {
let pool = pool_no_timer::<Uniq<i32>>();
- let key = (Arc::new("localhost:12345".to_string()), Ver::Http1);
+ let key = Arc::new("localhost:12345".to_string());
let mut checkout1 = pool.checkout(key.clone());
let mut checkout2 = pool.checkout(key.clone());
diff --git a/src/client/pool.rs b/src/client/pool.rs
--- a/src/client/pool.rs
+++ b/src/client/pool.rs
@@ -1000,12 +1006,16 @@ mod tests {
fn reserve(self) -> Reservation<Self> {
Reservation::Unique(self)
}
+
+ fn can_share(&self) -> bool {
+ false
+ }
}
#[test]
fn pooled_drop_if_closed_doesnt_reinsert() {
let pool = pool_no_timer();
- let key = (Arc::new("localhost:12345".to_string()), Ver::Http1);
+ let key = Arc::new("localhost:12345".to_string());
pool.pooled(c(key.clone()), CanClose {
val: 57,
closed: true,
diff --git a/src/client/tests.rs b/src/client/tests.rs
--- a/src/client/tests.rs
+++ b/src/client/tests.rs
@@ -207,4 +207,3 @@ fn checkout_win_allows_connect_future_to_be_pooled() {
}
}
-
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -18,7 +18,7 @@ use hyper::{Body, Client, Method, Request, StatusCode};
use futures::{Future, Stream};
use futures::sync::oneshot;
use tokio::runtime::current_thread::Runtime;
-use tokio::net::tcp::{ConnectFuture, TcpStream};
+use tokio::net::tcp::{ConnectFuture, TcpListener as TkTcpListener, TcpStream};
fn s(buf: &[u8]) -> &str {
::std::str::from_utf8(buf).expect("from_utf8")
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1349,12 +1349,66 @@ mod dispatch_impl {
assert_eq!(vec, b"bar=foo");
}
+ #[test]
+ fn alpn_h2() {
+ use hyper::Response;
+ use hyper::server::conn::Http;
+ use hyper::service::service_fn_ok;
+
+ let _ = pretty_env_logger::try_init();
+ let mut rt = Runtime::new().unwrap();
+ let listener = TkTcpListener::bind(&"127.0.0.1:0".parse().unwrap()).unwrap();
+ let addr = listener.local_addr().unwrap();
+ let mut connector = DebugConnector::new();
+ connector.alpn_h2 = true;
+ let connects = connector.connects.clone();
+
+ let client = Client::builder()
+ .build::<_, ::hyper::Body>(connector);
+
+ let srv = listener.incoming()
+ .into_future()
+ .map_err(|_| unreachable!())
+ .and_then(|(item, _incoming)| {
+ let socket = item.unwrap();
+ Http::new()
+ .http2_only(true)
+ .serve_connection(socket, service_fn_ok(|req| {
+ assert_eq!(req.headers().get("host"), None);
+ Response::new(Body::empty())
+ }))
+ })
+ .map_err(|e| panic!("server error: {}", e));
+ rt.spawn(srv);
+
+
+ assert_eq!(connects.load(Ordering::SeqCst), 0);
+
+ let url = format!("http://{}/a", addr).parse::<::hyper::Uri>().unwrap();
+ let res1 = client.get(url.clone());
+ let res2 = client.get(url.clone());
+ let res3 = client.get(url.clone());
+ rt.block_on(res1.join(res2).join(res3)).unwrap();
+
+ // Since the client doesn't know it can ALPN at first, it will have
+ // started 3 connections. But, the server above will only handle 1,
+ // so the unwrapped responses futures show it still worked.
+ assert_eq!(connects.load(Ordering::SeqCst), 3);
+
+ let res4 = client.get(url.clone());
+ rt.block_on(res4).unwrap();
+
+ assert_eq!(connects.load(Ordering::SeqCst), 3, "after ALPN, no more connects");
+ drop(client);
+ }
+
struct DebugConnector {
http: HttpConnector,
closes: mpsc::Sender<()>,
connects: Arc<AtomicUsize>,
is_proxy: bool,
+ alpn_h2: bool,
}
impl DebugConnector {
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1370,6 +1424,7 @@ mod dispatch_impl {
closes: closes,
connects: Arc::new(AtomicUsize::new(0)),
is_proxy: false,
+ alpn_h2: false,
}
}
diff --git a/tests/client.rs b/tests/client.rs
--- a/tests/client.rs
+++ b/tests/client.rs
@@ -1388,7 +1443,11 @@ mod dispatch_impl {
self.connects.fetch_add(1, Ordering::SeqCst);
let closes = self.closes.clone();
let is_proxy = self.is_proxy;
- Box::new(self.http.connect(dst).map(move |(s, c)| {
+ let is_alpn_h2 = self.alpn_h2;
+ Box::new(self.http.connect(dst).map(move |(s, mut c)| {
+ if is_alpn_h2 {
+ c = c.negotiated_h2();
+ }
(DebugStream(s, closes), c.proxy(is_proxy))
}))
}
|
hyperium/hyper
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
[
"1676"
] |
0.12
|
6fe532da4cbbd7395589e7774de79d3728184395
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -35,6 +35,7 @@ tokio-io = "0.1"
tokio-reactor = { version = "0.1", optional = true }
tokio-tcp = { version = "0.1", optional = true }
tokio-timer = { version = "0.2", optional = true }
+tokio-threadpool = { version = "0.1", optional = true }
want = "0.0.6"
[dev-dependencies]
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -13,6 +13,9 @@ use futures_cpupool::{Builder as CpuPoolBuilder};
use self::sealed::GaiTask;
+#[cfg(feature = "runtime")]
+pub use self::blocking::{TokioThreadpoolGaiFuture, TokioThreadpoolGaiResolver};
+
/// Resolve a hostname to a set of IP addresses.
pub trait Resolve {
/// The set of IP addresses to try to connect to.
|
cc @sfackler
|
2018-10-20T17:56:38Z
| 1,678
|
Consider a Resolver that uses tokio-threadpool::blocking instead of futures-cpupool
Using `tokio_threadpool::blocking` can reduce some overhead, and make resolving use an existing threadpool instead of needing a separate dedicated one. It can't be the default, since it only works if the executor is `tokio_threadpool`, whereas the `futures_cpupool` resolver works with any executor, but it'd be nice (and easy) to offer it.
|
hyperium__hyper-1678
|
diff --git a/Cargo.toml b/Cargo.toml
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -62,6 +63,7 @@ runtime = [
"tokio-reactor",
"tokio-tcp",
"tokio-timer",
+ "tokio-threadpool",
]
nightly = []
__internal_flaky_tests = []
diff --git a/src/client/connect/dns.rs b/src/client/connect/dns.rs
--- a/src/client/connect/dns.rs
+++ b/src/client/connect/dns.rs
@@ -239,6 +242,50 @@ pub(super) mod sealed {
}
}
+#[cfg(feature = "runtime")]
+mod blocking {
+ use futures::{Async, Future, Poll};
+ use std::io;
+ use std::net::ToSocketAddrs;
+ use tokio_threadpool;
+
+ use super::{Name, IpAddrs, GaiAddrs, Resolve};
+
+ /// A resolver using `getaddrinfo` calls via the `tokio_threadpool::blocking` API.
+ ///
+ /// Unlike the `GaiResolver` this will not spawn dedicated threads, but only works when running on the
+ /// multi-threaded Tokio runtime.
+ #[derive(Clone)]
+ pub struct TokioThreadpoolGaiResolver(());
+
+ pub struct TokioThreadpoolGaiFuture {
+ name: Name,
+ }
+
+ impl Resolve for TokioThreadpoolGaiResolver {
+ type Addrs = GaiAddrs;
+ type Future = TokioThreadpoolGaiFuture;
+
+ fn resolve(&self, name: Name) -> TokioThreadpoolGaiFuture {
+ TokioThreadpoolGaiFuture { name }
+ }
+ }
+
+ impl Future for TokioThreadpoolGaiFuture {
+ type Item = GaiAddrs;
+ type Error = io::Error;
+
+ fn poll(&mut self) -> Poll<GaiAddrs, io::Error> {
+ match tokio_threadpool::blocking(|| (self.name.as_str(), 0).to_socket_addrs()) {
+ Ok(Async::Ready(Ok(iter))) => Ok(Async::Ready(GaiAddrs { inner: IpAddrs { iter } })),
+ Ok(Async::Ready(Err(e))) => Err(e),
+ Ok(Async::NotReady) => Ok(Async::NotReady),
+ Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
+ }
+ }
+ }
+}
+
#[cfg(test)]
mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
diff --git a/src/lib.rs b/src/lib.rs
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -31,6 +31,7 @@ extern crate time;
#[cfg(feature = "runtime")] extern crate tokio_reactor;
#[cfg(feature = "runtime")] extern crate tokio_tcp;
#[cfg(feature = "runtime")] extern crate tokio_timer;
+#[cfg(feature = "runtime")] extern crate tokio_threadpool;
extern crate want;
#[cfg(all(test, feature = "nightly"))]
|
hyperium/hyper
|
a1609fbb332fccffbeb85d16f1cc0bf98c6ede21
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.