Module trace
Expand description
Middleware that adds high level tracing to a Service
.
§Example
Adding tracing to your service can be as simple as:
use rama_http::{Body, Request, Response};
use rama_core::service::service_fn;
use rama_core::{Context, Layer, Service};
use rama_http::layer::trace::TraceLayer;
use std::convert::Infallible;
async fn handle(request: Request) -> Result<Response, Infallible> {
Ok(Response::new(Body::from("foo")))
}
// Setup tracing
tracing_subscriber::fmt::init();
let mut service = TraceLayer::new_for_http().layer(service_fn(handle));
let request = Request::new(Body::from("foo"));
let response = service
.serve(Context::default(), request)
.await?;
If you run this application with RUST_LOG=rama=trace cargo run
you should see logs like:
Mar 05 20:50:28.523 DEBUG request{method=GET path="/foo"}: rama_http::layer::trace::on_request: started processing request
Mar 05 20:50:28.524 DEBUG request{method=GET path="/foo"}: rama_http::layer::trace::on_response: finished processing request latency=1 ms status=200
§Customization
Trace
comes with good defaults but also supports customizing many aspects of the output.
The default behaviour supports some customization:
use rama_http::{Body, Request, Response, HeaderMap, StatusCode};
use rama_core::service::service_fn;
use rama_core::{Context, Service, Layer};
use tracing::Level;
use rama_http::layer::trace::{
TraceLayer, DefaultMakeSpan, DefaultOnRequest, DefaultOnResponse,
};
use rama_utils::latency::LatencyUnit;
use std::time::Duration;
use std::convert::Infallible;
let service = (
TraceLayer::new_for_http()
.make_span_with(
DefaultMakeSpan::new().include_headers(true)
)
.on_request(
DefaultOnRequest::new().level(Level::INFO)
)
.on_response(
DefaultOnResponse::new()
.level(Level::INFO)
.latency_unit(LatencyUnit::Micros)
),
// on so on for `on_eos`, `on_body_chunk`, and `on_failure`
).layer(service_fn(handle));
However for maximum control you can provide callbacks:
use rama_http::{Body, Request, Response, HeaderMap, StatusCode};
use rama_core::service::service_fn;
use rama_core::{Context, Service, Layer};
use rama_http::layer::{classify::ServerErrorsFailureClass, trace::TraceLayer};
use std::time::Duration;
use tracing::Span;
use std::convert::Infallible;
use bytes::Bytes;
let service = (
TraceLayer::new_for_http()
.make_span_with(|request: &Request| {
tracing::debug_span!("http-request")
})
.on_request(|request: &Request, _span: &Span| {
tracing::debug!("started {} {}", request.method(), request.uri().path())
})
.on_response(|response: &Response, latency: Duration, _span: &Span| {
tracing::debug!("response generated in {:?}", latency)
})
.on_body_chunk(|chunk: &Bytes, latency: Duration, _span: &Span| {
tracing::debug!("sending {} bytes", chunk.len())
})
.on_eos(|trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span| {
tracing::debug!("stream closed after {:?}", stream_duration)
})
.on_failure(|error: ServerErrorsFailureClass, latency: Duration, _span: &Span| {
tracing::debug!("something went wrong")
})
).layer(service_fn(handle));
§Disabling something
Setting the behaviour to ()
will be disable that particular step:
use rama_http::{Body, Request, Response, StatusCode};
use rama_core::service::service_fn;
use rama_core::{Context, Service, Layer};
use rama_http::layer::{classify::ServerErrorsFailureClass, trace::TraceLayer};
use std::time::Duration;
use tracing::Span;
let service = (
// This configuration will only emit events on failures
TraceLayer::new_for_http()
.on_request(())
.on_response(())
.on_body_chunk(())
.on_eos(())
.on_failure(|error: ServerErrorsFailureClass, latency: Duration, _span: &Span| {
tracing::debug!("something went wrong")
})
).layer(service_fn(handle));
§When the callbacks are called
§on_request
The on_request
callback is called when the request arrives at the
middleware in Service::serve
just prior to passing the request to the
inner service.
§on_response
The on_response
callback is called when the inner service’s response
future completes with Ok(response)
regardless if the response is
classified as a success or a failure.
For example if you’re using ServerErrorsAsFailures
as your classifier
and the inner service responds with 500 Internal Server Error
then the
on_response
callback is still called. on_failure
would also be called
in this case since the response was classified as a failure.
§on_body_chunk
The on_body_chunk
callback is called when the response body produces a new
chunk, that is when http_body::Body::poll_frame
returns Poll::Ready(Some(Ok(chunk)))
.
on_body_chunk
is called even if the chunk is empty.
§on_eos
The on_eos
callback is called when a streaming response body ends, that is
when http_body::Body::poll_frame
returns Poll::Ready(None)
.
on_eos
is called even if the trailers produced are None
.
§on_failure
The on_failure
callback is called when:
- The inner
Service
’s response future resolves to an error. - A response is classified as a failure.
http_body::Body::poll_frame
returns an error.- An end-of-stream is classified as a failure.
§Recording fields on the span
All callbacks receive a reference to the tracing Span
, corresponding to this request,
produced by the closure passed to TraceLayer::make_span_with
. It can be used to record
field values that weren’t known when the span was created.
use rama_http::{Body, Request, Response, HeaderMap, StatusCode};
use rama_core::service::service_fn;
use rama_core::Layer;
use rama_http::layer::trace::TraceLayer;
use tracing::Span;
use std::time::Duration;
use std::convert::Infallible;
let service = (
TraceLayer::new_for_http()
.make_span_with(|request: &Request| {
tracing::debug_span!(
"http-request",
status_code = tracing::field::Empty,
)
})
.on_response(|response: &Response, _latency: Duration, span: &Span| {
span.record("status_code", &tracing::field::display(response.status()));
tracing::debug!("response generated")
}),
).layer(service_fn(handle));
§Providing classifiers
Tracing requires determining if a response is a success or failure. MakeClassifier
is used
to create a classifier for the incoming request. See the docs for MakeClassifier
and
ClassifyResponse
for more details on classification.
A MakeClassifier
can be provided when creating a TraceLayer
:
use rama_http::{Body, Request, Response};
use rama_core::service::service_fn;
use rama_core::Layer;
use rama_http::layer::{
trace::TraceLayer,
classify::{
MakeClassifier, ClassifyResponse, ClassifiedResponse, NeverClassifyEos,
SharedClassifier,
},
};
use std::convert::Infallible;
// Our `MakeClassifier` that always crates `MyClassifier` classifiers.
#[derive(Copy, Clone)]
struct MyMakeClassify;
impl MakeClassifier for MyMakeClassify {
type Classifier = MyClassifier;
type FailureClass = &'static str;
type ClassifyEos = NeverClassifyEos<&'static str>;
fn make_classifier<B>(&self, req: &Request<B>) -> Self::Classifier {
MyClassifier
}
}
// A classifier that classifies failures as `"something went wrong..."`.
#[derive(Copy, Clone)]
struct MyClassifier;
impl ClassifyResponse for MyClassifier {
type FailureClass = &'static str;
type ClassifyEos = NeverClassifyEos<&'static str>;
fn classify_response<B>(
self,
res: &Response<B>
) -> ClassifiedResponse<Self::FailureClass, Self::ClassifyEos> {
// Classify based on the status code.
if res.status().is_server_error() {
ClassifiedResponse::Ready(Err("something went wrong..."))
} else {
ClassifiedResponse::Ready(Ok(()))
}
}
fn classify_error<E>(self, error: &E) -> Self::FailureClass
where
E: std::fmt::Display,
{
"something went wrong..."
}
}
let service = (
// Create a trace layer that uses our classifier.
TraceLayer::new(MyMakeClassify),
).layer(service_fn(handle));
// Since `MyClassifier` is `Clone` we can also use `SharedClassifier`
// to avoid having to define a separate `MakeClassifier`.
let service = TraceLayer::new(SharedClassifier::new(MyClassifier)).layer(service_fn(handle));
TraceLayer
comes with convenience methods for using common classifiers:
TraceLayer::new_for_http
classifies based on the status code. It doesn’t consider streaming responses.TraceLayer::new_for_grpc
classifies based on the gRPC protocol and supports streaming responses.
Structs§
- The default
OnBodyChunk
implementation used byTrace
. - The default
OnResponse
implementation used byTrace
. - Response body for
Trace
.
Traits§
- Trait used to tell
Trace
what to do when a body chunk has been sent. - Trait used to tell
Trace
what to do when a stream closes. - Trait used to tell
Trace
what to do when a request fails. - Trait used to tell
Trace
what to do when a request is received. - Trait used to tell
Trace
what to do when a response has been produced.
Type Aliases§
- MakeClassifier for gRPC requests.
- MakeClassifier for HTTP requests.