diff --git a/Cargo.lock b/Cargo.lock index f8246f9c..7a5c1612 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1263,6 +1263,7 @@ dependencies = [ "timeago", "tokio", "tracing", + "tracing-journald", "tracing-subscriber", "utoipa", ] @@ -4563,6 +4564,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "tracing-journald" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" +dependencies = [ + "libc", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "tracing-log" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index fdf86a0f..f31629e0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,7 @@ pretty_env_logger = "0.5" structopt = { version = "0.3", default-features = false } syslog-tracing = "0.3" tracing = "0.1" +tracing-journald = "0.3.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } heed = { version = "0.11", default-features = false, features = ["lmdb"] } diff --git a/doc/api/garage-admin-v2.json b/doc/api/garage-admin-v2.json index 210b5559..09651200 100644 --- a/doc/api/garage-admin-v2.json +++ b/doc/api/garage-admin-v2.json @@ -3120,9 +3120,15 @@ "blocksPurged", "objectsDeleted", "uploadsDeleted", - "versionsDeleted" + "versionsDeleted", + "blockRefsPurged" ], "properties": { + "blockRefsPurged": { + "type": "integer", + "format": "int64", + "minimum": 0 + }, "blocksPurged": { "type": "integer", "format": "int64", @@ -3574,9 +3580,15 @@ "blocksPurged", "objectsDeleted", "uploadsDeleted", - "versionsDeleted" + "versionsDeleted", + "blockRefsPurged" ], "properties": { + "blockRefsPurged": { + "type": "integer", + "format": "int64", + "minimum": 0 + }, "blocksPurged": { "type": "integer", "format": "int64", diff --git a/doc/book/cookbook/ansible.md b/doc/book/cookbook/ansible.md index 6d624c9c..8b0d2969 100644 --- a/doc/book/cookbook/ansible.md +++ b/doc/book/cookbook/ansible.md @@ -8,18 +8,18 @@ have published Ansible roles. We list them and compare them below. ## Comparison of Ansible roles -| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | -|------------------------------------|---------------------------------------------|---------------------------------------------------------------| -| **Runtime** | Systemd | Docker | -| **Target OS** | Any Linux | Any Linux | -| **Architecture** | amd64, arm64, i686 | amd64, arm64 | -| **Additional software** | None | Traefik | -| **Automatic node connection** | ❌ | ✅ | -| **Layout management** | ❌ | ✅ | -| **Manage buckets & keys** | ❌ | ✅ (basic) | -| **Allow custom Garage config** | ✅ | ❌ | -| **Facilitate Garage upgrades** | ✅ | ❌ | -| **Multiple instances on one host** | ✅ | ✅ | +| Feature | [ansible-role-garage](#zorun-ansible-role-garage) | [garage-docker-ansible-deploy](#moan0s-garage-docker-ansible-deploy) | [eddster ansible-role-garage](#eddster-ansible-role-garage) | +|------------------------------------|---------------------------------------------|---------------------------------------------------------------|---------------------------------| +| **Runtime** | Systemd | Docker | Systemd | +| **Target OS** | Any Linux | Any Linux | Any Linux | +| **Architecture** | amd64, arm64, i686 | amd64, arm64 | arm64, arm, 386, amd64 | +| **Additional software** | None | Traefik | Ngnix and Keepalived (optional) | +| **Automatic node connection** | ❌ | ✅ | ✅ | +| **Layout management** | ❌ | ✅ | ✅ | +| **Manage buckets & keys** | ❌ | ✅ (basic) | ✅ | +| **Allow custom Garage config** | ✅ | ❌ | ❌ | +| **Facilitate Garage upgrades** | ✅ | ❌ | ✅ | +| **Multiple instances on one host** | ✅ | ✅ | ❌ | ## zorun/ansible-role-garage @@ -49,3 +49,15 @@ structured DNS names, etc). As a result, this role makes it easier to start with Garage on Ansible, but is less flexible. + +## eddster2309/ansible-role-garage + +[Source code](https://github.com/eddster2309/ansible-role-garage), [Ansible galaxy](https://galaxy.ansible.com/ui/standalone/roles/eddster2309/garage/) + +This role is a opinionated but customisable role using the official Garage +static binaries and only requires Systemd. As such it should work on any +Linux based host. It includes all the nesscary configuration to +automatically setup a clustered Garage deployment. Most Garage +configuration options are exposed through Ansible variables so while you +can't provide a custom config you can get very close. It can optionally +installed a HA nginx deployment with Keepalived. diff --git a/doc/book/quick-start/_index.md b/doc/book/quick-start/_index.md index b86b0f91..4d525343 100644 --- a/doc/book/quick-start/_index.md +++ b/doc/book/quick-start/_index.md @@ -182,11 +182,12 @@ ID Hostname Address Tag Zone Capacit ## Creating a cluster layout Creating a cluster layout for a Garage deployment means informing Garage -of the disk space available on each node of the cluster -as well as the zone (e.g. datacenter) each machine is located in. +of the disk space available on each node of the cluster, `-c`, +as well as the name of the zone (e.g. datacenter), `-z`, each machine is located in. -For our test deployment, we are using only one node. The way in which we configure -it does not matter, you can simply write: +For our test deployment, we are have only one node with zone named `dc1` and a +capacity of `1G`, though the capacity is ignored for a single node deployment +and can be changed later when adding new nodes. ```bash garage layout assign -z dc1 -c 1G diff --git a/doc/book/reference-manual/configuration.md b/doc/book/reference-manual/configuration.md index b84162ee..389426b2 100644 --- a/doc/book/reference-manual/configuration.md +++ b/doc/book/reference-manual/configuration.md @@ -94,30 +94,30 @@ The following gives details about each available configuration option. [Environment variables](#env_variables). -Top-level configuration options: +Top-level configuration options, in alphabetical order: +[`allow_punycode`](#allow_punycode), [`allow_world_readable_secrets`](#allow_world_readable_secrets), [`block_ram_buffer_max`](#block_ram_buffer_max), [`block_size`](#block_size), [`bootstrap_peers`](#bootstrap_peers), [`compression_level`](#compression_level), +[`consistency_mode`](#consistency_mode), [`data_dir`](#data_dir), [`data_fsync`](#data_fsync), [`db_engine`](#db_engine), [`disable_scrub`](#disable_scrub), -[`use_local_tz`](#use_local_tz), [`lmdb_map_size`](#lmdb_map_size), [`metadata_auto_snapshot_interval`](#metadata_auto_snapshot_interval), [`metadata_dir`](#metadata_dir), [`metadata_fsync`](#metadata_fsync), [`metadata_snapshots_dir`](#metadata_snapshots_dir), [`replication_factor`](#replication_factor), -[`consistency_mode`](#consistency_mode), [`rpc_bind_addr`](#rpc_bind_addr), [`rpc_bind_outgoing`](#rpc_bind_outgoing), [`rpc_public_addr`](#rpc_public_addr), [`rpc_public_addr_subnet`](#rpc_public_addr_subnet) -[`rpc_secret`/`rpc_secret_file`](#rpc_secret). -[`allow_punycode`](#allow_punycode). +[`rpc_secret`/`rpc_secret_file`](#rpc_secret), +[`use_local_tz`](#use_local_tz). The `[consul_discovery]` section: [`api`](#consul_api), @@ -162,6 +162,10 @@ variable, it does not exist in the configuration file: Garage daemon send its logs to `syslog` (using the libc `syslog` function) instead of printing to stderr. +- `GARAGE_LOG_TO_JOURNALD` (since `v2.0.0`): set this to `1` or `true` to make the + Garage daemon send its logs to `journald` (using the native protocol of `systemd-journald`) + instead of printing to stderr. + The following environment variables can be used to override the corresponding values in the configuration file: @@ -173,7 +177,7 @@ values in the configuration file: ### Top-level configuration options -#### `replication_factor` {#replication_factor} +#### `replication_factor` (since `v1.0.0`) {#replication_factor} The replication factor can be any positive integer smaller or equal the node count in your cluster. The chosen replication factor has a big impact on the cluster's failure tolerancy and performance characteristics. @@ -221,7 +225,7 @@ is in progress. In theory, no data should be lost as rebalancing is a routine operation for Garage, although we cannot guarantee you that everything will go right in such an extreme scenario. -#### `consistency_mode` {#consistency_mode} +#### `consistency_mode` (since `v1.0.0`) {#consistency_mode} The consistency mode setting determines the read and write behaviour of your cluster. diff --git a/doc/book/reference-manual/s3-compatibility.md b/doc/book/reference-manual/s3-compatibility.md index d2c47f3e..edf8de0d 100644 --- a/doc/book/reference-manual/s3-compatibility.md +++ b/doc/book/reference-manual/s3-compatibility.md @@ -23,7 +23,6 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the - 2022-05-25 - Many Ceph S3 endpoints are not documented but implemented. Following a notification from the Ceph community, we added them. - ## High-level features | Feature | Garage | [Openstack Swift](https://docs.openstack.org/swift/latest/s3_compat.html) | [Ceph Object Gateway](https://docs.ceph.com/en/latest/radosgw/s3/) | [Riak CS](https://docs.riak.com/riak/cs/2.1.1/references/apis/storage/s3/index.html) | [OpenIO](https://docs.openio.io/latest/source/arch-design/s3_compliancy.html) | @@ -34,6 +33,7 @@ Feel free to open a PR to suggest fixes this table. Minio is missing because the | [URL vhost-style](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#virtual-hosted-style-access) URL (eg. `bucket.host.tld/key`) | ✅ Implemented | ❌| ✅| ✅ | ✅ | | [Presigned URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html) | ✅ Implemented | ❌| ✅ | ✅ | ✅(❓) | | [SSE-C encryption](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) | ✅ Implemented | ❓ | ✅ | ❌ | ✅ | +| [Bucket versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/Versioning.html) | ❌ Missing | ✅ | ✅ | ❌ | ✅ | *Note:* OpenIO does not says if it supports presigned URLs. Because it is part of signature v4 and they claim they support it without additional precisions, diff --git a/nix/compile.nix b/nix/compile.nix index 8cd88d01..bbadaa37 100644 --- a/nix/compile.nix +++ b/nix/compile.nix @@ -74,6 +74,7 @@ let "metrics" "telemetry-otlp" "syslog" + "journald" ])); featuresStr = lib.concatStringsSep "," rootFeatures; diff --git a/src/api/admin/api.rs b/src/api/admin/api.rs index faf7150d..cfeb9e2e 100644 --- a/src/api/admin/api.rs +++ b/src/api/admin/api.rs @@ -1309,4 +1309,5 @@ pub struct LocalPurgeBlocksResponse { pub objects_deleted: u64, pub uploads_deleted: u64, pub versions_deleted: u64, + pub block_refs_purged: u64, } diff --git a/src/api/admin/block.rs b/src/api/admin/block.rs index 4b8edc63..586f8554 100644 --- a/src/api/admin/block.rs +++ b/src/api/admin/block.rs @@ -151,6 +151,7 @@ impl RequestHandler for LocalPurgeBlocksRequest { let mut obj_dels = 0; let mut mpu_dels = 0; let mut ver_dels = 0; + let mut br_dels = 0; for hash in self.0.iter() { let hash = hex::decode(hash).ok_or_bad_request("invalid hash")?; @@ -176,11 +177,18 @@ impl RequestHandler for LocalPurgeBlocksRequest { ver_dels += 1; } } + if !br.deleted.get() { + let mut br = br; + br.deleted.set(); + garage.block_ref_table.insert(&br).await?; + br_dels += 1; + } } } Ok(LocalPurgeBlocksResponse { blocks_purged: self.0.len() as u64, + block_refs_purged: br_dels, versions_deleted: ver_dels, objects_deleted: obj_dels, uploads_deleted: mpu_dels, diff --git a/src/api/s3/api_server.rs b/src/api/s3/api_server.rs index 1c967d58..cc961dde 100644 --- a/src/api/s3/api_server.rs +++ b/src/api/s3/api_server.rs @@ -223,6 +223,7 @@ impl ApiHandler for S3ApiServer { Endpoint::DeleteBucket {} => handle_delete_bucket(ctx).await, Endpoint::GetBucketLocation {} => handle_get_bucket_location(ctx), Endpoint::GetBucketVersioning {} => handle_get_bucket_versioning(), + Endpoint::GetBucketAcl {} => handle_get_bucket_acl(ctx), Endpoint::ListObjects { delimiter, encoding_type, diff --git a/src/api/s3/bucket.rs b/src/api/s3/bucket.rs index 1bdcf32d..217d74f0 100644 --- a/src/api/s3/bucket.rs +++ b/src/api/s3/bucket.rs @@ -5,7 +5,7 @@ use hyper::{Request, Response, StatusCode}; use garage_model::bucket_alias_table::*; use garage_model::bucket_table::Bucket; use garage_model::garage::Garage; -use garage_model::key_table::Key; +use garage_model::key_table::{Key, KeyParams}; use garage_model::permission::BucketKeyPerm; use garage_table::util::*; use garage_util::crdt::*; @@ -44,6 +44,55 @@ pub fn handle_get_bucket_versioning() -> Result, Error> { .body(string_body(xml))?) } +pub fn handle_get_bucket_acl(ctx: ReqCtx) -> Result, Error> { + let ReqCtx { + bucket_id, api_key, .. + } = ctx; + let key_p = api_key.params().ok_or_internal_error( + "Key should not be in deleted state at this point (in handle_get_bucket_acl)", + )?; + + let mut grants: Vec = vec![]; + let kp = api_key.bucket_permissions(&bucket_id); + + if kp.allow_owner { + grants.push(s3_xml::Grant { + grantee: create_grantee(&key_p, &api_key), + permission: s3_xml::Value("FULL_CONTROL".to_string()), + }); + } else { + if kp.allow_read { + grants.push(s3_xml::Grant { + grantee: create_grantee(&key_p, &api_key), + permission: s3_xml::Value("READ".to_string()), + }); + grants.push(s3_xml::Grant { + grantee: create_grantee(&key_p, &api_key), + permission: s3_xml::Value("READ_ACP".to_string()), + }); + } + if kp.allow_write { + grants.push(s3_xml::Grant { + grantee: create_grantee(&key_p, &api_key), + permission: s3_xml::Value("WRITE".to_string()), + }); + } + } + + let access_control_policy = s3_xml::AccessControlPolicy { + xmlns: (), + owner: None, + acl: s3_xml::AccessControlList { entries: grants }, + }; + + let xml = s3_xml::to_xml_with_header(&access_control_policy)?; + trace!("xml: {}", xml); + + Ok(Response::builder() + .header("Content-Type", "application/xml") + .body(string_body(xml))?) +} + pub async fn handle_list_buckets( garage: &Garage, api_key: &Key, @@ -306,6 +355,15 @@ fn parse_create_bucket_xml(xml_bytes: &[u8]) -> Option> { Some(ret) } +fn create_grantee(key_params: &KeyParams, api_key: &Key) -> s3_xml::Grantee { + s3_xml::Grantee { + xmlns_xsi: (), + typ: "CanonicalUser".to_string(), + display_name: Some(s3_xml::Value(key_params.name.get().to_string())), + id: Some(s3_xml::Value(api_key.key_id.to_string())), + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/api/s3/copy.rs b/src/api/s3/copy.rs index b40f3161..baec1fef 100644 --- a/src/api/s3/copy.rs +++ b/src/api/s3/copy.rs @@ -26,7 +26,7 @@ use garage_api_common::signature::checksum::*; use crate::api_server::{ReqBody, ResBody}; use crate::encryption::{EncryptionParams, OekDerivationInfo}; use crate::error::*; -use crate::get::{full_object_byte_stream, PreconditionHeaders}; +use crate::get::{check_version_not_deleted, full_object_byte_stream, PreconditionHeaders}; use crate::multipart; use crate::put::{extract_metadata_headers, save_stream, ChecksumMode, SaveStreamResult}; use crate::website::X_AMZ_WEBSITE_REDIRECT_LOCATION; @@ -268,6 +268,7 @@ async fn handle_copy_metaonly( .get(&source_version.uuid, &EmptyKey) .await?; let source_version = source_version.ok_or(Error::NoSuchKey)?; + check_version_not_deleted(&source_version)?; // Write an "uploading" marker in Object table // This holds a reference to the object in the Version table @@ -468,6 +469,7 @@ pub async fn handle_upload_part_copy( .get(&source_object_version.uuid, &EmptyKey) .await? .ok_or(Error::NoSuchKey)?; + check_version_not_deleted(&source_version)?; // We want to reuse blocks from the source version as much as possible. // However, we still need to get the data from these blocks diff --git a/src/api/s3/get.rs b/src/api/s3/get.rs index 723e6775..77d8a41a 100644 --- a/src/api/s3/get.rs +++ b/src/api/s3/get.rs @@ -19,12 +19,13 @@ use garage_net::stream::ByteStream; use garage_rpc::rpc_helper::OrderTag; use garage_table::EmptyKey; use garage_util::data::*; -use garage_util::error::OkOrMessage; +use garage_util::error::{Error as UtilError, OkOrMessage}; use garage_model::garage::Garage; use garage_model::s3::object_table::*; use garage_model::s3::version_table::*; +use garage_api_common::common_error::CommonError; use garage_api_common::helpers::*; use garage_api_common::signature::checksum::{add_checksum_response_headers, X_AMZ_CHECKSUM_MODE}; @@ -219,6 +220,7 @@ pub async fn handle_head_without_ctx( .get(&object_version.uuid, &EmptyKey) .await? .ok_or(Error::NoSuchKey)?; + check_version_not_deleted(&version)?; let (part_offset, part_end) = calculate_part_bounds(&version, pn).ok_or(Error::InvalidPart)?; @@ -373,6 +375,21 @@ pub async fn handle_get_without_ctx( } } +pub(crate) fn check_version_not_deleted(version: &Version) -> Result<(), Error> { + if version.deleted.get() { + // the version was deleted between when the object_table was consulted + // and now, this could mean the object was deleted, or overriden. + // Rather than say the key doesn't exist, return a transient error + // to signal the client to try again. + return Err(CommonError::InternalError(UtilError::Message( + "conflict/inconsistency between object and version state, version is deleted" + .to_string(), + )) + .into()); + } + Ok(()) +} + async fn handle_get_full( garage: Arc, version: &ObjectVersion, @@ -439,6 +456,7 @@ pub fn full_object_byte_stream( .ok_or_message("channel closed")?; let version = version_fut.await.unwrap()?.ok_or(Error::NoSuchKey)?; + check_version_not_deleted(&version)?; for (i, (_, vb)) in version.blocks.items().iter().enumerate().skip(1) { let stream_block_i = encryption .get_block(&garage, &vb.hash, Some(order_stream.order(i as u64))) @@ -454,6 +472,14 @@ pub fn full_object_byte_stream( { Ok(()) => (), Err(e) => { + // TODO i think this is a bad idea, we should log + // an error and stop there. If the error happens to + // be exactly the size of what hasn't been streamed + // yet, the client will see the request as a + // success + // instead truncating the output notify the client + // something happened with their download, so that + // they can retry it let _ = tx.send(error_stream_item(e)).await; } } @@ -505,7 +531,7 @@ async fn handle_get_range( .get(&version.uuid, &EmptyKey) .await? .ok_or(Error::NoSuchKey)?; - + check_version_not_deleted(&version)?; let body = body_from_blocks_range(garage, encryption, version.blocks.items(), begin, end); Ok(resp_builder.body(body)?) @@ -556,6 +582,8 @@ async fn handle_get_part( .await? .ok_or(Error::NoSuchKey)?; + check_version_not_deleted(&version)?; + let (begin, end) = calculate_part_bounds(&version, part_number).ok_or(Error::InvalidPart)?; diff --git a/src/api/s3/xml.rs b/src/api/s3/xml.rs index 6c1d8f88..bfe95fff 100644 --- a/src/api/s3/xml.rs +++ b/src/api/s3/xml.rs @@ -13,6 +13,10 @@ pub fn xmlns_tag(_v: &(), s: S) -> Result { s.serialize_str("http://s3.amazonaws.com/doc/2006-03-01/") } +pub fn xmlns_xsi_tag(_v: &(), s: S) -> Result { + s.serialize_str("http://www.w3.org/2001/XMLSchema-instance") +} + #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] pub struct Value(#[serde(rename = "$value")] pub String); @@ -325,6 +329,42 @@ pub struct PostObject { pub etag: Value, } +#[derive(Debug, Serialize, PartialEq, Eq)] +pub struct Grantee { + #[serde(rename = "xmlns:xsi", serialize_with = "xmlns_xsi_tag")] + pub xmlns_xsi: (), + #[serde(rename = "xsi:type")] + pub typ: String, + #[serde(rename = "DisplayName")] + pub display_name: Option, + #[serde(rename = "ID")] + pub id: Option, +} + +#[derive(Debug, Serialize, PartialEq, Eq)] +pub struct Grant { + #[serde(rename = "Grantee")] + pub grantee: Grantee, + #[serde(rename = "Permission")] + pub permission: Value, +} + +#[derive(Debug, Serialize, PartialEq, Eq)] +pub struct AccessControlList { + #[serde(rename = "Grant")] + pub entries: Vec, +} + +#[derive(Debug, Serialize, PartialEq, Eq)] +pub struct AccessControlPolicy { + #[serde(serialize_with = "xmlns_tag")] + pub xmlns: (), + #[serde(rename = "Owner")] + pub owner: Option, + #[serde(rename = "AccessControlList")] + pub acl: AccessControlList, +} + #[cfg(test)] mod tests { use super::*; @@ -433,6 +473,43 @@ mod tests { Ok(()) } + #[test] + fn get_bucket_acl_result() -> Result<(), ApiError> { + let grant = Grant { + grantee: Grantee { + xmlns_xsi: (), + typ: "CanonicalUser".to_string(), + display_name: Some(Value("owner_name".to_string())), + id: Some(Value("qsdfjklm".to_string())), + }, + permission: Value("FULL_CONTROL".to_string()), + }; + + let get_bucket_acl = AccessControlPolicy { + xmlns: (), + owner: None, + acl: AccessControlList { + entries: vec![grant], + }, + }; + assert_eq!( + to_xml_with_header(&get_bucket_acl)?, + "\ +\ + \ + \ + \ + owner_name\ + qsdfjklm\ + \ + FULL_CONTROL\ + \ + \ +" + ); + Ok(()) + } + #[test] fn delete_result() -> Result<(), ApiError> { let delete_result = DeleteResult { diff --git a/src/garage/Cargo.toml b/src/garage/Cargo.toml index 5bcb2cd2..9519ca51 100644 --- a/src/garage/Cargo.toml +++ b/src/garage/Cargo.toml @@ -59,6 +59,7 @@ opentelemetry.workspace = true opentelemetry-prometheus = { workspace = true, optional = true } opentelemetry-otlp = { workspace = true, optional = true } syslog-tracing = { workspace = true, optional = true } +tracing-journald = { workspace = true, optional = true } [dev-dependencies] garage_api_common.workspace = true @@ -103,6 +104,8 @@ metrics = [ "garage_api_admin/metrics", "opentelemetry-prometheus" ] telemetry-otlp = [ "opentelemetry-otlp" ] # Logging to syslog syslog = [ "syslog-tracing" ] +# Logging to journald +journald = [ "tracing-journald" ] # NOTE: bundled-libs and system-libs should be treat as mutually exclusive; # exactly one of them should be enabled. diff --git a/src/garage/cli/remote/block.rs b/src/garage/cli/remote/block.rs index f70decd7..c0e9d98a 100644 --- a/src/garage/cli/remote/block.rs +++ b/src/garage/cli/remote/block.rs @@ -160,8 +160,8 @@ impl Cli { .await?; println!( - "Purged {} blocks: deleted {} versions, {} objects, {} multipart uploads", - res.blocks_purged, res.versions_deleted, res.objects_deleted, res.uploads_deleted, + "Purged {} blocks: deleted {} block refs, {} versions, {} objects, {} multipart uploads", + res.blocks_purged, res.block_refs_purged, res.versions_deleted, res.objects_deleted, res.uploads_deleted, ); Ok(()) diff --git a/src/garage/main.rs b/src/garage/main.rs index a72b860c..72af19a9 100644 --- a/src/garage/main.rs +++ b/src/garage/main.rs @@ -215,6 +215,43 @@ fn init_logging(opt: &Opt) { } } + if std::env::var("GARAGE_LOG_TO_JOURNALD") + .map(|x| x == "1" || x == "true") + .unwrap_or(false) + { + #[cfg(feature = "journald")] + { + use tracing_journald::{Priority, PriorityMappings}; + use tracing_subscriber::layer::SubscriberExt; + use tracing_subscriber::util::SubscriberInitExt; + + let registry = tracing_subscriber::registry() + .with(tracing_subscriber::fmt::layer().with_writer(std::io::sink)) + .with(env_filter); + match tracing_journald::layer() { + Ok(layer) => { + registry + .with(layer.with_priority_mappings(PriorityMappings { + info: Priority::Informational, + debug: Priority::Debug, + ..PriorityMappings::new() + })) + .init(); + } + Err(e) => { + eprintln!("Couldn't connect to journald: {}.", e); + std::process::exit(1); + } + } + return; + } + #[cfg(not(feature = "journald"))] + { + eprintln!("Journald support is not enabled in this build."); + std::process::exit(1); + } + } + tracing_subscriber::fmt() .with_writer(std::io::stderr) .with_env_filter(env_filter) diff --git a/src/garage/tests/s3/website.rs b/src/garage/tests/s3/website.rs index b4820864..eb17e6d5 100644 --- a/src/garage/tests/s3/website.rs +++ b/src/garage/tests/s3/website.rs @@ -1057,3 +1057,45 @@ async fn test_website_puny() { ); } } + +#[tokio::test] +async fn test_website_object_not_found() { + const BCKT_NAME: &str = "not-found"; + let ctx = common::context(); + let _bucket = ctx.create_bucket(BCKT_NAME); + + let client = Client::builder(TokioExecutor::new()).build_http(); + + let req = |suffix| { + Request::builder() + .method("GET") + .uri(format!("http://127.0.0.1:{}/", ctx.garage.web_port)) + .header("Host", format!("{}{}", BCKT_NAME, suffix)) + .body(Body::new(Bytes::new())) + .unwrap() + }; + + ctx.garage + .command() + .args(["bucket", "website", "--allow", BCKT_NAME]) + .quiet() + .expect_success_status("Could not allow website on bucket"); + + let resp = client.request(req("")).await.unwrap(); + assert_eq!(resp.status(), StatusCode::NOT_FOUND); + // the error we return by default are *not* xml + assert_eq!( + resp.headers().get(http::header::CONTENT_TYPE).unwrap(), + "text/html; charset=utf-8" + ); + let result = String::from_utf8( + resp.into_body() + .collect() + .await + .unwrap() + .to_bytes() + .to_vec(), + ) + .unwrap(); + assert!(result.contains("not found")); +} diff --git a/src/web/web_server.rs b/src/web/web_server.rs index fc9c0492..4703acf3 100644 --- a/src/web/web_server.rs +++ b/src/web/web_server.rs @@ -469,10 +469,30 @@ fn error_to_res(e: Error) -> Response> { // was a HEAD request or we couldn't get the error document) // We do NOT enter this code path when returning the bucket's // error document (this is handled in serve_file) - let body = string_body(format!("{}\n", e)); - let mut http_error = Response::new(body); + let mut body_str = format!( + r"{http_code} {code_text} +

{http_code} {code_text}

", + http_code = e.http_status_code().as_u16(), + code_text = e.http_status_code().canonical_reason().unwrap_or("Unknown"), + ); + if let Error::ApiError(ref err) = e { + body_str.push_str(&format!( + r" +
    +
  • Code: {s3_code}
  • +
  • Message: {s3_message}.
  • +
", + s3_code = err.aws_code(), + s3_message = err, + )); + } + let mut http_error = Response::new(string_body(body_str)); *http_error.status_mut() = e.http_status_code(); e.add_headers(http_error.headers_mut()); + http_error.headers_mut().insert( + http::header::CONTENT_TYPE, + "text/html; charset=utf-8".parse().unwrap(), + ); http_error }