Merge branch 'main' into next-v2

This commit is contained in:
Alex Auvolat 2025-06-13 14:01:39 +02:00
commit dc1a4ffd76
20 changed files with 354 additions and 34 deletions

12
Cargo.lock generated
View File

@ -1263,6 +1263,7 @@ dependencies = [
"timeago",
"tokio",
"tracing",
"tracing-journald",
"tracing-subscriber",
"utoipa",
]
@ -4563,6 +4564,17 @@ dependencies = [
"tracing",
]
[[package]]
name = "tracing-journald"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657"
dependencies = [
"libc",
"tracing-core",
"tracing-subscriber",
]
[[package]]
name = "tracing-log"
version = "0.2.0"

View File

@ -83,6 +83,7 @@ pretty_env_logger = "0.5"
structopt = { version = "0.3", default-features = false }
syslog-tracing = "0.3"
tracing = "0.1"
tracing-journald = "0.3.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
heed = { version = "0.11", default-features = false, features = ["lmdb"] }

View File

@ -3120,9 +3120,15 @@
"blocksPurged",
"objectsDeleted",
"uploadsDeleted",
"versionsDeleted"
"versionsDeleted",
"blockRefsPurged"
],
"properties": {
"blockRefsPurged": {
"type": "integer",
"format": "int64",
"minimum": 0
},
"blocksPurged": {
"type": "integer",
"format": "int64",
@ -3574,9 +3580,15 @@
"blocksPurged",
"objectsDeleted",
"uploadsDeleted",
"versionsDeleted"
"versionsDeleted",
"blockRefsPurged"
],
"properties": {
"blockRefsPurged": {
"type": "integer",
"format": "int64",
"minimum": 0
},
"blocksPurged": {
"type": "integer",
"format": "int64",

View File

@ -8,18 +8,18 @@ have published Ansible roles. We list them and compare them below.
## Comparison of Ansible roles
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) |
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|
| **Runtime** | Systemd | Docker |
| **Target OS** | Any Linux | Any Linux |
| **Architecture** | amd64, arm64, i686 | amd64, arm64 |
| **Additional software** | None | Traefik |
| **Automatic node connection** | ❌ | ✅ |
| **Layout management** | ❌ | ✅ |
| **Manage buckets & keys** | ❌ | ✅ (basic) |
| **Allow custom Garage config** | ✅ | ❌ |
| **Facilitate Garage upgrades** | ✅ | ❌ |
| **Multiple instances on one host** | ✅ | ✅ |
| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) |
|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------|
| **Runtime** | Systemd | Docker | Systemd |
| **Target OS** | Any Linux | Any Linux | Any Linux |
| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 |
| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) |
| **Automatic node connection** | ❌ | ✅ | ✅ |
| **Layout management** | ❌ | ✅ | ✅ |
| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ |
| **Allow custom Garage config** | ✅ | ❌ | ❌ |
| **Facilitate Garage upgrades** | ✅ | ❌ | ✅ |
| **Multiple instances on one host** | ✅ | ✅ | ❌ |
## zorun/ansible-role-garage
@ -49,3 +49,15 @@ structured DNS names, etc).
As a result, this role makes it easier to start with Garage on Ansible,
but is less flexible.
## eddster2309/ansible-role-garage
[Source code](https://github.com/eddster2309/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/ui/standalone/roles/eddster2309/garage/)
This role is a opinionated but customisable role using the official Garage
static binaries and only requires Systemd. As such it should work on any
Linux based host. It includes all the nesscary configuration to
automatically setup a clustered Garage deployment. Most Garage
configuration options are exposed through Ansible variables so while you
can't provide a custom config you can get very close. It can optionally
installed a HA nginx deployment with Keepalived.

View File

@ -182,11 +182,12 @@ ID Hostname Address Tag Zone Capacit
## Creating a cluster layout
Creating a cluster layout for a Garage deployment means informing Garage
of the disk space available on each node of the cluster
as well as the zone (e.g. datacenter) each machine is located in.
of the disk space available on each node of the cluster, `-c`,
as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in.
For our test deployment, we are using only one node. The way in which we configure
it does not matter, you can simply write:
For our test deployment, we are have only one node with zone named `dc1` and a
capacity of `1G`, though the capacity is ignored for a single node deployment
and can be changed later when adding new nodes.
```bash
garage layout assign -z dc1 -c 1G <node_id>

View File

@ -94,30 +94,30 @@ The following gives details about each available configuration option.
[Environment variables](#env_variables).
Top-level configuration options:
Top-level configuration options, in alphabetical order:
[`allow_punycode`](#allow_punycode),
[`allow_world_readable_secrets`](#allow_world_readable_secrets),
[`block_ram_buffer_max`](#block_ram_buffer_max),
[`block_size`](#block_size),
[`bootstrap_peers`](#bootstrap_peers),
[`compression_level`](#compression_level),
[`consistency_mode`](#consistency_mode),
[`data_dir`](#data_dir),
[`data_fsync`](#data_fsync),
[`db_engine`](#db_engine),
[`disable_scrub`](#disable_scrub),
[`use_local_tz`](#use_local_tz),
[`lmdb_map_size`](#lmdb_map_size),
[`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval),
[`metadata_dir`](#metadata_dir),
[`metadata_fsync`](#metadata_fsync),
[`metadata_snapshots_dir`](#metadata_snapshots_dir),
[`replication_factor`](#replication_factor),
[`consistency_mode`](#consistency_mode),
[`rpc_bind_addr`](#rpc_bind_addr),
[`rpc_bind_outgoing`](#rpc_bind_outgoing),
[`rpc_public_addr`](#rpc_public_addr),
[`rpc_public_addr_subnet`](#rpc_public_addr_subnet)
[`rpc_secret`/`rpc_secret_file`](#rpc_secret).
[`allow_punycode`](#allow_punycode).
[`rpc_secret`/`rpc_secret_file`](#rpc_secret),
[`use_local_tz`](#use_local_tz).
The `[consul_discovery]` section:
[`api`](#consul_api),
@ -162,6 +162,10 @@ variable, it does not exist in the configuration file:
Garage daemon send its logs to `syslog` (using the libc `syslog` function)
instead of printing to stderr.
- `GARAGE_LOG_TO_JOURNALD` (since `v2.0.0`): set this to `1` or `true` to make the
Garage daemon send its logs to `journald` (using the native protocol of `systemd-journald`)
instead of printing to stderr.
The following environment variables can be used to override the corresponding
values in the configuration file:
@ -173,7 +177,7 @@ values in the configuration file:
### Top-level configuration options
#### `replication_factor` {#replication_factor}
#### `replication_factor` (since `v1.0.0`) {#replication_factor}
The replication factor can be any positive integer smaller or equal the node count in your cluster.
The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics.
@ -221,7 +225,7 @@ is in progress. In theory, no data should be lost as rebalancing is a
routine operation for Garage, although we cannot guarantee you that everything
will go right in such an extreme scenario.
#### `consistency_mode` {#consistency_mode}
#### `consistency_mode` (since `v1.0.0`) {#consistency_mode}
The consistency mode setting determines the read and write behaviour of your cluster.

View File

@ -23,7 +23,6 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
- 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them.
## High-level features
| Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) |
@ -34,6 +33,7 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the
| [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ |
| [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) |
| [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ |
| [Bucket versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) | ❌ Missing | ✅ | ✅ | ❌ | ✅ |
*Note:* OpenIO does not says if it supports presigned URLs. Because it is part
of signature v4 and they claim they support it without additional precisions,

View File

@ -74,6 +74,7 @@ let
"metrics"
"telemetry-otlp"
"syslog"
"journald"
]));
featuresStr = lib.concatStringsSep "," rootFeatures;

View File

@ -1309,4 +1309,5 @@ pub struct LocalPurgeBlocksResponse {
pub objects_deleted: u64,
pub uploads_deleted: u64,
pub versions_deleted: u64,
pub block_refs_purged: u64,
}

View File

@ -151,6 +151,7 @@ impl RequestHandler for LocalPurgeBlocksRequest {
let mut obj_dels = 0;
let mut mpu_dels = 0;
let mut ver_dels = 0;
let mut br_dels = 0;
for hash in self.0.iter() {
let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?;
@ -176,11 +177,18 @@ impl RequestHandler for LocalPurgeBlocksRequest {
ver_dels += 1;
}
}
if !br.deleted.get() {
let mut br = br;
br.deleted.set();
garage.block_ref_table.insert(&br).await?;
br_dels += 1;
}
}
}
Ok(LocalPurgeBlocksResponse {
blocks_purged: self.0.len() as u64,
block_refs_purged: br_dels,
versions_deleted: ver_dels,
objects_deleted: obj_dels,
uploads_deleted: mpu_dels,

View File

@ -223,6 +223,7 @@ impl ApiHandler for S3ApiServer {
Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await,
Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx),
Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(),
Endpoint::GetBucketAcl {} => handle_get_bucket_acl(ctx),
Endpoint::ListObjects {
delimiter,
encoding_type,

View File

@ -5,7 +5,7 @@ use hyper::{Request, Response, StatusCode};
use garage_model::bucket_alias_table::*;
use garage_model::bucket_table::Bucket;
use garage_model::garage::Garage;
use garage_model::key_table::Key;
use garage_model::key_table::{Key, KeyParams};
use garage_model::permission::BucketKeyPerm;
use garage_table::util::*;
use garage_util::crdt::*;
@ -44,6 +44,55 @@ pub fn handle_get_bucket_versioning() -> Result<Response<ResBody>, Error> {
.body(string_body(xml))?)
}
pub fn handle_get_bucket_acl(ctx: ReqCtx) -> Result<Response<ResBody>, Error> {
let ReqCtx {
bucket_id, api_key, ..
} = ctx;
let key_p = api_key.params().ok_or_internal_error(
"Key should not be in deleted state at this point (in handle_get_bucket_acl)",
)?;
let mut grants: Vec<s3_xml::Grant> = vec![];
let kp = api_key.bucket_permissions(&bucket_id);
if kp.allow_owner {
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("FULL_CONTROL".to_string()),
});
} else {
if kp.allow_read {
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("READ".to_string()),
});
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("READ_ACP".to_string()),
});
}
if kp.allow_write {
grants.push(s3_xml::Grant {
grantee: create_grantee(&key_p, &api_key),
permission: s3_xml::Value("WRITE".to_string()),
});
}
}
let access_control_policy = s3_xml::AccessControlPolicy {
xmlns: (),
owner: None,
acl: s3_xml::AccessControlList { entries: grants },
};
let xml = s3_xml::to_xml_with_header(&access_control_policy)?;
trace!("xml: {}", xml);
Ok(Response::builder()
.header("Content-Type", "application/xml")
.body(string_body(xml))?)
}
pub async fn handle_list_buckets(
garage: &Garage,
api_key: &Key,
@ -306,6 +355,15 @@ fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option<Option<String>> {
Some(ret)
}
fn create_grantee(key_params: &KeyParams, api_key: &Key) -> s3_xml::Grantee {
s3_xml::Grantee {
xmlns_xsi: (),
typ: "CanonicalUser".to_string(),
display_name: Some(s3_xml::Value(key_params.name.get().to_string())),
id: Some(s3_xml::Value(api_key.key_id.to_string())),
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -26,7 +26,7 @@ use garage_api_common::signature::checksum::*;
use crate::api_server::{ReqBody, ResBody};
use crate::encryption::{EncryptionParams, OekDerivationInfo};
use crate::error::*;
use crate::get::{full_object_byte_stream, PreconditionHeaders};
use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders};
use crate::multipart;
use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult};
use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION;
@ -268,6 +268,7 @@ async fn handle_copy_metaonly(
.get(&source_version.uuid, &EmptyKey)
.await?;
let source_version = source_version.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&source_version)?;
// Write an "uploading" marker in Object table
// This holds a reference to the object in the Version table
@ -468,6 +469,7 @@ pub async fn handle_upload_part_copy(
.get(&source_object_version.uuid, &EmptyKey)
.await?
.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&source_version)?;
// We want to reuse blocks from the source version as much as possible.
// However, we still need to get the data from these blocks

View File

@ -19,12 +19,13 @@ use garage_net::stream::ByteStream;
use garage_rpc::rpc_helper::OrderTag;
use garage_table::EmptyKey;
use garage_util::data::*;
use garage_util::error::OkOrMessage;
use garage_util::error::{Error as UtilError, OkOrMessage};
use garage_model::garage::Garage;
use garage_model::s3::object_table::*;
use garage_model::s3::version_table::*;
use garage_api_common::common_error::CommonError;
use garage_api_common::helpers::*;
use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE};
@ -219,6 +220,7 @@ pub async fn handle_head_without_ctx(
.get(&object_version.uuid, &EmptyKey)
.await?
.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
let (part_offset, part_end) =
calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?;
@ -373,6 +375,21 @@ pub async fn handle_get_without_ctx(
}
}
pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> {
if version.deleted.get() {
// the version was deleted between when the object_table was consulted
// and now, this could mean the object was deleted, or overriden.
// Rather than say the key doesn't exist, return a transient error
// to signal the client to try again.
return Err(CommonError::InternalError(UtilError::Message(
"conflict/inconsistency between object and version state, version is deleted"
.to_string(),
))
.into());
}
Ok(())
}
async fn handle_get_full(
garage: Arc<Garage>,
version: &ObjectVersion,
@ -439,6 +456,7 @@ pub fn full_object_byte_stream(
.ok_or_message("channel closed")?;
let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) {
let stream_block_i = encryption
.get_block(&garage, &vb.hash, Some(order_stream.order(i as u64)))
@ -454,6 +472,14 @@ pub fn full_object_byte_stream(
{
Ok(()) => (),
Err(e) => {
// TODO i think this is a bad idea, we should log
// an error and stop there. If the error happens to
// be exactly the size of what hasn't been streamed
// yet, the client will see the request as a
// success
// instead truncating the output notify the client
// something happened with their download, so that
// they can retry it
let _ = tx.send(error_stream_item(e)).await;
}
}
@ -505,7 +531,7 @@ async fn handle_get_range(
.get(&version.uuid, &EmptyKey)
.await?
.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
let body =
body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end);
Ok(resp_builder.body(body)?)
@ -556,6 +582,8 @@ async fn handle_get_part(
.await?
.ok_or(Error::NoSuchKey)?;
check_version_not_deleted(&version)?;
let (begin, end) =
calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?;

View File

@ -13,6 +13,10 @@ pub fn xmlns_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/")
}
pub fn xmlns_xsi_tag<S: Serializer>(_v: &(), s: S) -> Result<S::Ok, S::Error> {
s.serialize_str("http://www.w3.org/2001/XMLSchema-instance")
}
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct Value(#[serde(rename = "$value")] pub String);
@ -325,6 +329,42 @@ pub struct PostObject {
pub etag: Value,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct Grantee {
#[serde(rename = "xmlns:xsi", serialize_with = "xmlns_xsi_tag")]
pub xmlns_xsi: (),
#[serde(rename = "xsi:type")]
pub typ: String,
#[serde(rename = "DisplayName")]
pub display_name: Option<Value>,
#[serde(rename = "ID")]
pub id: Option<Value>,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct Grant {
#[serde(rename = "Grantee")]
pub grantee: Grantee,
#[serde(rename = "Permission")]
pub permission: Value,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct AccessControlList {
#[serde(rename = "Grant")]
pub entries: Vec<Grant>,
}
#[derive(Debug, Serialize, PartialEq, Eq)]
pub struct AccessControlPolicy {
#[serde(serialize_with = "xmlns_tag")]
pub xmlns: (),
#[serde(rename = "Owner")]
pub owner: Option<Owner>,
#[serde(rename = "AccessControlList")]
pub acl: AccessControlList,
}
#[cfg(test)]
mod tests {
use super::*;
@ -433,6 +473,43 @@ mod tests {
Ok(())
}
#[test]
fn get_bucket_acl_result() -> Result<(), ApiError> {
let grant = Grant {
grantee: Grantee {
xmlns_xsi: (),
typ: "CanonicalUser".to_string(),
display_name: Some(Value("owner_name".to_string())),
id: Some(Value("qsdfjklm".to_string())),
},
permission: Value("FULL_CONTROL".to_string()),
};
let get_bucket_acl = AccessControlPolicy {
xmlns: (),
owner: None,
acl: AccessControlList {
entries: vec![grant],
},
};
assert_eq!(
to_xml_with_header(&get_bucket_acl)?,
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\
<AccessControlPolicy xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\
<AccessControlList>\
<Grant>\
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\">\
<DisplayName>owner_name</DisplayName>\
<ID>qsdfjklm</ID>\
</Grantee>\
<Permission>FULL_CONTROL</Permission>\
</Grant>\
</AccessControlList>\
</AccessControlPolicy>"
);
Ok(())
}
#[test]
fn delete_result() -> Result<(), ApiError> {
let delete_result = DeleteResult {

View File

@ -59,6 +59,7 @@ opentelemetry.workspace = true
opentelemetry-prometheus = { workspace = true, optional = true }
opentelemetry-otlp = { workspace = true, optional = true }
syslog-tracing = { workspace = true, optional = true }
tracing-journald = { workspace = true, optional = true }
[dev-dependencies]
garage_api_common.workspace = true
@ -103,6 +104,8 @@ metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ]
telemetry-otlp = [ "opentelemetry-otlp" ]
# Logging to syslog
syslog = [ "syslog-tracing" ]
# Logging to journald
journald = [ "tracing-journald" ]
# NOTE: bundled-libs and system-libs should be treat as mutually exclusive;
# exactly one of them should be enabled.

View File

@ -160,8 +160,8 @@ impl Cli {
.await?;
println!(
"Purged {} blocks: deleted {} versions, {} objects, {} multipart uploads",
res.blocks_purged, res.versions_deleted, res.objects_deleted, res.uploads_deleted,
"Purged {} blocks: deleted {} block refs, {} versions, {} objects, {} multipart uploads",
res.blocks_purged, res.block_refs_purged, res.versions_deleted, res.objects_deleted, res.uploads_deleted,
);
Ok(())

View File

@ -215,6 +215,43 @@ fn init_logging(opt: &Opt) {
}
}
if std::env::var("GARAGE_LOG_TO_JOURNALD")
.map(|x| x == "1" || x == "true")
.unwrap_or(false)
{
#[cfg(feature = "journald")]
{
use tracing_journald::{Priority, PriorityMappings};
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
let registry = tracing_subscriber::registry()
.with(tracing_subscriber::fmt::layer().with_writer(std::io::sink))
.with(env_filter);
match tracing_journald::layer() {
Ok(layer) => {
registry
.with(layer.with_priority_mappings(PriorityMappings {
info: Priority::Informational,
debug: Priority::Debug,
..PriorityMappings::new()
}))
.init();
}
Err(e) => {
eprintln!("Couldn't connect to journald: {}.", e);
std::process::exit(1);
}
}
return;
}
#[cfg(not(feature = "journald"))]
{
eprintln!("Journald support is not enabled in this build.");
std::process::exit(1);
}
}
tracing_subscriber::fmt()
.with_writer(std::io::stderr)
.with_env_filter(env_filter)

View File

@ -1057,3 +1057,45 @@ async fn test_website_puny() {
);
}
}
#[tokio::test]
async fn test_website_object_not_found() {
const BCKT_NAME: &str = "not-found";
let ctx = common::context();
let _bucket = ctx.create_bucket(BCKT_NAME);
let client = Client::builder(TokioExecutor::new()).build_http();
let req = |suffix| {
Request::builder()
.method("GET")
.uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port))
.header("Host", format!("{}{}", BCKT_NAME, suffix))
.body(Body::new(Bytes::new()))
.unwrap()
};
ctx.garage
.command()
.args(["bucket", "website", "--allow", BCKT_NAME])
.quiet()
.expect_success_status("Could not allow website on bucket");
let resp = client.request(req("")).await.unwrap();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
// the error we return by default are *not* xml
assert_eq!(
resp.headers().get(http::header::CONTENT_TYPE).unwrap(),
"text/html; charset=utf-8"
);
let result = String::from_utf8(
resp.into_body()
.collect()
.await
.unwrap()
.to_bytes()
.to_vec(),
)
.unwrap();
assert!(result.contains("not found"));
}

View File

@ -469,10 +469,30 @@ fn error_to_res(e: Error) -> Response<BoxBody<Error>> {
// was a HEAD request or we couldn't get the error document)
// We do NOT enter this code path when returning the bucket's
// error document (this is handled in serve_file)
let body = string_body(format!("{}\n", e));
let mut http_error = Response::new(body);
let mut body_str = format!(
r"<title>{http_code} {code_text}</title>
<h1>{http_code} {code_text}</h1>",
http_code = e.http_status_code().as_u16(),
code_text = e.http_status_code().canonical_reason().unwrap_or("Unknown"),
);
if let Error::ApiError(ref err) = e {
body_str.push_str(&format!(
r"
<ul>
<li>Code: {s3_code}</li>
<li>Message: {s3_message}.</li>
</ul>",
s3_code = err.aws_code(),
s3_message = err,
));
}
let mut http_error = Response::new(string_body(body_str));
*http_error.status_mut() = e.http_status_code();
e.add_headers(http_error.headers_mut());
http_error.headers_mut().insert(
http::header::CONTENT_TYPE,
"text/html; charset=utf-8".parse().unwrap(),
);
http_error
}