refactor: add database crates (#1407)

Closes #1277
This commit is contained in:
Ethan Donowitz 2023-01-10 16:06:03 -05:00 committed by GitHub
parent d11787965c
commit b5b7e57f93
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
122 changed files with 2431 additions and 1492 deletions

View File

@ -43,12 +43,18 @@ commands:
flake8 syncserver/src/tokenserver
flake8 tools/integration_tests
flake8 tools/tokenserver
rust-clippy:
rust-clippy-mysql:
steps:
- run:
name: Rust Clippy
name: Rust Clippy MySQL
command: |
cargo clippy --workspace --all-targets --all-features -- -D warnings
cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql -- -D warnings
rust-clippy-spanner:
steps:
- run:
name: Rust Clippy Spanner
command: |
cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner -- -D warnings
cargo-build:
steps:
- run:
@ -105,7 +111,7 @@ commands:
-f docker-compose.mysql.yaml
-f docker-compose.e2e.mysql.yaml
up
--exit-code-from e2e-tests
--exit-code-from mysql-e2e-tests
--abort-on-container-exit
environment:
SYNCSTORAGE_RS_IMAGE: app:build
@ -129,7 +135,7 @@ commands:
-f docker-compose.spanner.yaml
-f docker-compose.e2e.spanner.yaml
up
--exit-code-from e2e-tests
--exit-code-from spanner-e2e-tests
--abort-on-container-exit
environment:
SYNCSTORAGE_RS_IMAGE: app:build
@ -164,13 +170,15 @@ jobs:
auth:
username: $DOCKER_USER
password: $DOCKER_PASS
resource_class: large
steps:
- checkout
- display-rust
- setup-rust-check
- setup-gcp-grpc
- rust-check
- rust-clippy
- rust-clippy-spanner
- rust-clippy-mysql
- setup-python
- python-check
@ -197,16 +205,6 @@ jobs:
MYSQL_DATABASE: syncstorage
resource_class: large
steps:
- setup_remote_docker:
docker_layer_caching: true
- run:
name: Login to Dockerhub
command: |
if [ "${DOCKER_USER}" == "" ] || [ "${DOCKER_PASS}" == "" ]; then
echo "Skipping Login to DockerHub, credentials unavailable"
else
echo "${DOCKER_PASS}" | docker login -u="${DOCKER_USER}" --password-stdin
fi
- checkout
- display-rust
- setup-python
@ -221,9 +219,20 @@ jobs:
- run-tests
- run-tokenserver-scripts-tests
#- save-sccache-cache
build-mysql-image:
docker:
- image: cimg/rust:1.60.0
auth:
username: $DOCKER_USER
password: $DOCKER_PASS
resource_class: large
steps:
- setup_remote_docker:
docker_layer_caching: true
- checkout
- run:
name: Build Docker image
command: docker build -t app:build .
name: Build MySQL Docker image
command: docker build -t app:build --build-arg DATABASE_BACKEND=mysql .
no_output_timeout: 30m
# save the built docker container into CircleCI's cache. This is
# required since Workflows do not have the same remote docker instance.
@ -234,13 +243,43 @@ jobs:
docker save -o /home/circleci/cache/docker.tar "app:build"
- run:
name: Save docker-compose config
command: cp docker-compose*.yaml /home/circleci/cache
command: cp docker-compose*mysql.yaml /home/circleci/cache
- save_cache:
key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }}
key: mysql-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }}
paths:
- /home/circleci/cache
e2e-tests:
build-spanner-image:
docker:
- image: cimg/rust:1.60.0
auth:
username: $DOCKER_USER
password: $DOCKER_PASS
resource_class: large
steps:
- setup_remote_docker:
docker_layer_caching: true
- checkout
- run:
name: Build Spanner Docker image
command: docker build -t app:build --build-arg DATABASE_BACKEND=spanner .
no_output_timeout: 30m
# save the built docker container into CircleCI's cache. This is
# required since Workflows do not have the same remote docker instance.
- run:
name: docker save app:build
command: |
mkdir -p /home/circleci/cache
docker save -o /home/circleci/cache/docker.tar "app:build"
- run:
name: Save docker-compose config
command: cp docker-compose*spanner.yaml /home/circleci/cache
- save_cache:
key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }}
paths:
- /home/circleci/cache
mysql-e2e-tests:
docker:
- image: docker/compose:1.24.0
auth:
@ -249,7 +288,7 @@ jobs:
steps:
- setup_remote_docker
- restore_cache:
key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}
key: mysql-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Restore Docker image cache
command: docker load -i /home/circleci/cache/docker.tar
@ -257,6 +296,23 @@ jobs:
name: Restore docker-compose config
command: cp /home/circleci/cache/docker-compose*.yaml .
- run-e2e-mysql-tests
spanner-e2e-tests:
docker:
- image: docker/compose:1.24.0
auth:
username: $DOCKER_USER
password: $DOCKER_PASS
steps:
- setup_remote_docker
- restore_cache:
key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Restore Docker image cache
command: docker load -i /home/circleci/cache/docker.tar
- run:
name: Restore docker-compose config
command: cp /home/circleci/cache/docker-compose*.yaml .
- run-e2e-spanner-tests
deploy:
@ -268,7 +324,7 @@ jobs:
steps:
- setup_remote_docker
- restore_cache:
key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}
key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}
- run:
name: Restore Docker image cache
command: docker load -i /home/circleci/cache/docker.tar
@ -346,21 +402,41 @@ workflows:
filters:
tags:
only: /.*/
- e2e-tests:
- build-mysql-image:
requires:
- build-and-test
filters:
tags:
only: /.*/
- build-spanner-image:
requires:
- build-and-test
filters:
tags:
only: /.*/
- mysql-e2e-tests:
requires:
- build-mysql-image
filters:
tags:
only: /.*/
- spanner-e2e-tests:
requires:
- build-spanner-image
filters:
tags:
only: /.*/
- deploy:
requires:
- e2e-tests
- mysql-e2e-tests
- spanner-e2e-tests
filters:
tags:
only: /.*/
- deploy-python-utils:
requires:
- e2e-tests
- mysql-e2e-tests
- spanner-e2e-tests
filters:
tags:
only: /.*/

161
Cargo.lock generated
View File

@ -429,17 +429,6 @@ version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
[[package]]
name = "bb8"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "374bba43fc924d90393ee7768e6f75d223a98307a488fe5bc34b66c3e96932a6"
dependencies = [
"async-trait",
"futures 0.3.19",
"tokio",
]
[[package]]
name = "bindgen"
version = "0.57.0"
@ -3006,41 +2995,27 @@ dependencies = [
"actix-cors",
"actix-http",
"actix-rt",
"actix-service",
"actix-web",
"async-trait",
"backtrace",
"base64 0.13.0",
"bb8",
"bytes 1.1.0",
"cadence",
"chrono",
"deadpool",
"diesel",
"diesel_logger",
"diesel_migrations",
"docopt",
"dyn-clone",
"env_logger",
"futures 0.3.19",
"google-cloud-rust-raw",
"grpcio",
"hawk",
"hex",
"hmac",
"hostname",
"http",
"lazy_static",
"log",
"mime",
"mockito",
"num_cpus",
"protobuf",
"pyo3",
"rand 0.8.5",
"regex",
"reqwest",
"scheduled-thread-pool",
"sentry",
"sentry-backtrace",
"serde 1.0.135",
@ -3057,15 +3032,15 @@ dependencies = [
"syncserver-common",
"syncserver-db-common",
"syncserver-settings",
"syncstorage-db",
"syncstorage-settings",
"thiserror",
"time 0.3.9",
"tokenserver-common",
"tokenserver-db",
"tokenserver-settings",
"tokio",
"url 2.2.2",
"urlencoding",
"uuid",
"validator",
"validator_derive",
"woothee",
@ -3075,30 +3050,29 @@ dependencies = [
name = "syncserver-common"
version = "0.13.1"
dependencies = [
"actix-web",
"cadence",
"futures 0.3.19",
"hkdf",
"serde 1.0.135",
"serde_json",
"sha2",
"slog",
"slog-scope",
]
[[package]]
name = "syncserver-db-common"
version = "0.13.1"
dependencies = [
"async-trait",
"backtrace",
"chrono",
"deadpool",
"diesel",
"diesel_migrations",
"futures 0.3.19",
"grpcio",
"hostname",
"http",
"lazy_static",
"serde 1.0.135",
"serde_json",
"syncserver-common",
"thiserror",
"url 2.2.2",
]
[[package]]
@ -3115,6 +3089,71 @@ dependencies = [
"url 2.2.2",
]
[[package]]
name = "syncstorage-db"
version = "0.12.3"
dependencies = [
"async-trait",
"cadence",
"env_logger",
"futures 0.3.19",
"hostname",
"lazy_static",
"log",
"rand 0.8.5",
"slog-scope",
"syncserver-common",
"syncserver-db-common",
"syncserver-settings",
"syncstorage-db-common",
"syncstorage-mysql",
"syncstorage-settings",
"syncstorage-spanner",
"tokio",
]
[[package]]
name = "syncstorage-db-common"
version = "0.12.3"
dependencies = [
"async-trait",
"backtrace",
"chrono",
"diesel",
"diesel_migrations",
"futures 0.3.19",
"http",
"lazy_static",
"serde 1.0.135",
"serde_json",
"syncserver-common",
"syncserver-db-common",
"thiserror",
]
[[package]]
name = "syncstorage-mysql"
version = "0.12.3"
dependencies = [
"async-trait",
"backtrace",
"base64 0.13.0",
"diesel",
"diesel_logger",
"diesel_migrations",
"env_logger",
"futures 0.3.19",
"http",
"slog-scope",
"syncserver-common",
"syncserver-db-common",
"syncserver-settings",
"syncstorage-db-common",
"syncstorage-settings",
"thiserror",
"url 2.2.2",
]
[[package]]
name = "syncstorage-settings"
version = "0.13.1"
@ -3125,6 +3164,32 @@ dependencies = [
"time 0.3.9",
]
[[package]]
name = "syncstorage-spanner"
version = "0.12.3"
dependencies = [
"async-trait",
"backtrace",
"cadence",
"deadpool",
"env_logger",
"futures 0.3.19",
"google-cloud-rust-raw",
"grpcio",
"http",
"log",
"protobuf",
"slog-scope",
"syncserver-common",
"syncserver-db-common",
"syncstorage-db-common",
"syncstorage-settings",
"thiserror",
"tokio",
"url 2.2.2",
"uuid",
]
[[package]]
name = "synstructure"
version = "0.12.6"
@ -3305,10 +3370,34 @@ dependencies = [
"serde 1.0.135",
"serde_json",
"syncserver-common",
"syncserver-db-common",
"thiserror",
]
[[package]]
name = "tokenserver-db"
version = "0.12.3"
dependencies = [
"async-trait",
"backtrace",
"diesel",
"diesel_logger",
"diesel_migrations",
"env_logger",
"futures 0.3.19",
"http",
"serde 1.0.135",
"serde_derive",
"serde_json",
"slog-scope",
"syncserver-common",
"syncserver-db-common",
"syncserver-settings",
"thiserror",
"tokenserver-common",
"tokenserver-settings",
"tokio",
]
[[package]]
name = "tokenserver-settings"
version = "0.13.1"

View File

@ -1,11 +1,16 @@
[workspace]
resolver = "2"
members = [
"syncserver-settings",
"syncserver-common",
"syncserver-db-common",
"syncserver-settings",
"syncstorage-db",
"syncstorage-db-common",
"syncstorage-mysql",
"syncstorage-settings",
"syncstorage-spanner",
"tokenserver-common",
"tokenserver-db",
"tokenserver-settings",
"syncserver",
]

View File

@ -1,8 +1,35 @@
FROM rust:1.66-buster as builder
FROM lukemathwalker/cargo-chef:0.1.50-rust-1.66-buster as chef
WORKDIR /app
ADD . /app
ENV PATH=$PATH:/root/.cargo/bin
# temp removed --no-install-recommends due to CI docker build issue
FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS cacher
ARG DATABASE_BACKEND=spanner
COPY --from=planner /app/mysql_pubkey.asc mysql_pubkey.asc
# cmake is required to build grpcio-sys for Spanner builds
RUN \
echo "deb https://repo.mysql.com/apt/debian/ buster mysql-8.0" >> /etc/apt/sources.list && \
# mysql_pubkey.asc from:
# https://dev.mysql.com/doc/refman/8.0/en/checking-gpg-signature.html
# related:
# https://dev.mysql.com/doc/mysql-apt-repo-quick-guide/en/#repo-qg-apt-repo-manual-setup
apt-key adv --import mysql_pubkey.asc && \
apt-get -q update && \
apt-get -q install -y --no-install-recommends libmysqlclient-dev cmake
COPY --from=planner /app/recipe.json recipe.json
RUN cargo chef cook --release --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --recipe-path recipe.json
FROM chef as builder
ARG DATABASE_BACKEND=spanner
COPY . /app
COPY --from=cacher /app/target /app/target
COPY --from=cacher $CARGO_HOME /app/$CARGO_HOME
RUN \
echo "deb https://repo.mysql.com/apt/debian/ buster mysql-8.0" >> /etc/apt/sources.list && \
# mysql_pubkey.asc from:
@ -15,11 +42,13 @@ RUN \
pip3 install -r requirements.txt && \
rm -rf /var/lib/apt/lists/*
ENV PATH=$PATH:/root/.cargo/bin
RUN \
cargo --version && \
rustc --version && \
cargo install --path ./syncserver --locked --root /app && \
cargo install --path ./syncserver --locked --root /app --bin purge_ttl
cargo install --path ./syncserver --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --locked --root /app && \
if [ "$DATABASE_BACKEND" = "spanner" ] ; then cargo install --path ./syncstorage-spanner --locked --root /app --bin purge_ttl ; fi
FROM debian:buster-slim
WORKDIR /app
@ -56,7 +85,7 @@ COPY --from=builder /app/tools/integration_tests /app/tools/integration_tests
COPY --from=builder /app/tools/tokenserver/process_account_events.py /app/tools/tokenserver/process_account_events.py
COPY --from=builder /app/tools/tokenserver/requirements.txt /app/tools/tokenserver/requirements.txt
COPY --from=builder /app/scripts/prepare-spanner.sh /app/scripts/prepare-spanner.sh
COPY --from=builder /app/syncserver/src/db/spanner/schema.ddl /app/schema.ddl
COPY --from=builder /app/syncstorage-spanner/src/schema.ddl /app/schema.ddl
RUN chmod +x /app/scripts/prepare-spanner.sh
RUN pip3 install -r /app/tools/integration_tests/requirements.txt

View File

@ -10,9 +10,15 @@ PATH_TO_SYNC_SPANNER_KEYS = `pwd`/service-account.json
# https://github.com/mozilla-services/server-syncstorage
PATH_TO_GRPC_CERT = ../server-syncstorage/local/lib/python2.7/site-packages/grpc/_cython/_credentials/roots.pem
clippy:
SRC_ROOT = $(shell pwd)
clippy_mysql:
# Matches what's run in circleci
cargo clippy --workspace --all-targets --all-features -- -D warnings
cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql -- -D warnings
clippy_spanner:
# Matches what's run in circleci
cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner -- -D warnings
clean:
cargo clean
@ -40,14 +46,28 @@ python:
python3 -m venv venv
venv/bin/python -m pip install -r requirements.txt
run: python
PATH="./venv/bin:$(PATH)" RUST_LOG=debug RUST_BACKTRACE=full cargo run -- --config config/local.toml
run_mysql: python
PATH="./venv/bin:$(PATH)" \
# See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the
# below env var
PYTHONPATH=$(SRC_ROOT)/venv/lib/python3.9/site-packages \
RUST_LOG=debug \
RUST_BACKTRACE=full \
cargo run --no-default-features --features=syncstorage-db/mysql -- --config config/local.toml
run_spanner:
GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=$(PATH_TO_GRPC_CERT) make run
run_spanner: python
GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) \
GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=$(PATH_TO_GRPC_CERT) \
# See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the
# below env var
PYTHONPATH=$(SRC_ROOT)/venv/lib/python3.9/site-packages \
PATH="./venv/bin:$(PATH)" \
RUST_LOG=debug \
RUST_BACKTRACE=full \
cargo run --no-default-features --features=syncstorage-db/spanner -- --config config/local.toml
test:
SYNC_SYNCSTORAGE__DATABASE_URL=mysql://sample_user:sample_password@localhost/syncstorage_rs \
SYNC_TOKENSERVER__DATABASE_URL=mysql://sample_user:sample_password@localhost/tokenserver_rs \
RUST_TEST_THREADS=1 \
cargo test
cargo test --workspace

View File

@ -1,22 +1,21 @@
# Example MySQL DSN:
database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs"
# Example Spanner DSN:
# database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB"
"limits.max_total_records"=1666 # See issues #298/#333
master_secret = "INSERT_SECRET_KEY_HERE"
# removing this line will default to moz_json formatted logs (which is preferred for production envs)
human_logs = 1
# Example Syncstorage settings:
# Example MySQL DSN:
syncstorage.database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs"
# Example Spanner DSN:
# database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB"
# enable quota limits
enable_quota = 0
syncstorage.enable_quota = 0
# set the quota limit to 2GB.
# max_quota_limit = 200000000
syncstorage.enabled = true
syncstorage.limits.max_total_records = 1666 # See issues #298/#333
# Example Tokenserver settings:
disable_syncstorage = false
tokenserver.database_url = "mysql://sample_user:sample_password@localhost/tokenserver_rs"
tokenserver.enabled = true
tokenserver.fxa_email_domain = "api-accounts.stage.mozaws.net"

View File

@ -14,7 +14,7 @@ services:
sleep 15;
/app/bin/syncserver;
"
e2e-tests:
mysql-e2e-tests:
depends_on:
- mock-fxa-server
- syncserver

View File

@ -14,7 +14,7 @@ services:
sleep 15;
/app/bin/syncserver;
"
e2e-tests:
spanner-e2e-tests:
depends_on:
- mock-fxa-server
- syncserver

View File

@ -4,5 +4,16 @@ version = "0.13.1"
edition = "2021"
[dependencies]
actix-web = "3"
cadence = "0.26"
futures = "0.3"
hkdf = "0.11"
sha2 = "0.9"
serde = "1.0"
serde_json = { version = "1.0", features = ["arbitrary_precision"] }
slog = { version = "2.5", features = [
"max_level_info",
"release_max_level_info",
"dynamic-keys",
] }
slog-scope = "4.3"

View File

@ -1,6 +1,19 @@
#[macro_use]
extern crate slog_scope;
use std::{
fmt,
sync::atomic::{AtomicU64, Ordering},
};
use actix_web::{error::BlockingError, web};
use hkdf::Hkdf;
use sha2::Sha256;
mod metrics;
pub use metrics::{metrics_from_opts, MetricError, Metrics};
// header statics must be lower case, numbers and symbols per the RFC spec. This reduces chance of error.
pub static X_LAST_MODIFIED: &str = "x-last-modified";
pub static X_WEAVE_TIMESTAMP: &str = "x-weave-timestamp";
@ -56,3 +69,43 @@ pub trait InternalError {
/// Constructs an internal error with the given error message.
fn internal_error(message: String) -> Self;
}
/// A threadpool on which callers can spawn non-CPU-bound tasks that block their thread (this is
/// mostly useful for running I/O tasks). `BlockingThreadpool` intentionally does not implement
/// `Clone`: `Arc`s are not used internally, so a `BlockingThreadpool` should be instantiated once
/// and shared by passing around `Arc<BlockingThreadpool>`s.
#[derive(Debug, Default)]
pub struct BlockingThreadpool {
spawned_tasks: AtomicU64,
}
impl BlockingThreadpool {
/// Runs a function as a task on the blocking threadpool.
///
/// WARNING: Spawning a blocking task through means other than calling this method will
/// result in inaccurate threadpool metrics being reported. If you want to spawn a task on
/// the blocking threadpool, you **must** use this function.
pub async fn spawn<F, T, E>(&self, f: F) -> Result<T, E>
where
F: FnOnce() -> Result<T, E> + Send + 'static,
T: Send + 'static,
E: fmt::Debug + Send + InternalError + 'static,
{
self.spawned_tasks.fetch_add(1, Ordering::Relaxed);
let result = web::block(f).await.map_err(|e| match e {
BlockingError::Error(e) => e,
BlockingError::Canceled => {
E::internal_error("Blocking threadpool operation canceled".to_owned())
}
});
self.spawned_tasks.fetch_sub(1, Ordering::Relaxed);
result
}
pub fn active_threads(&self) -> u64 {
self.spawned_tasks.load(Ordering::Relaxed)
}
}

View File

@ -2,18 +2,12 @@ use std::collections::HashMap;
use std::net::UdpSocket;
use std::time::Instant;
use actix_web::{dev::Payload, web::Data, FromRequest, HttpRequest};
use cadence::{
BufferedUdpMetricSink, Counted, Metric, NopMetricSink, QueuingMetricSink, StatsdClient, Timed,
};
use futures::future;
use futures::future::Ready;
use slog::{Key, Record, KV};
use crate::error::ApiError;
use crate::server::ServerState;
use crate::tokenserver;
use crate::web::tags::Taggable;
pub use cadence::MetricError;
#[derive(Debug, Clone)]
pub struct MetricTimer {
@ -58,55 +52,6 @@ impl Drop for Metrics {
}
}
impl FromRequest for Metrics {
type Config = ();
type Error = ();
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
let client = {
let syncstorage_metrics = req
.app_data::<Data<ServerState>>()
.map(|state| state.metrics.clone());
let tokenserver_metrics = req
.app_data::<Data<tokenserver::ServerState>>()
.map(|state| state.metrics.clone());
syncstorage_metrics.or(tokenserver_metrics)
};
if client.is_none() {
warn!("⚠️ metric error: No App State");
}
future::ok(Metrics {
client: client.as_deref().cloned(),
tags: req.get_tags(),
timer: None,
})
}
}
impl From<&StatsdClient> for Metrics {
fn from(client: &StatsdClient) -> Self {
Metrics {
client: Some(client.clone()),
tags: HashMap::default(),
timer: None,
}
}
}
impl From<&ServerState> for Metrics {
fn from(state: &ServerState) -> Self {
Metrics {
client: Some(*state.metrics.clone()),
tags: HashMap::default(),
timer: None,
}
}
}
impl Metrics {
pub fn sink() -> StatsdClient {
StatsdClient::builder("", NopMetricSink).build()
@ -191,7 +136,7 @@ pub fn metrics_from_opts(
label: &str,
host: Option<&str>,
port: u16,
) -> Result<StatsdClient, ApiError> {
) -> Result<StatsdClient, MetricError> {
let builder = if let Some(statsd_host) = host {
let socket = UdpSocket::bind("0.0.0.0:0")?;
socket.set_nonblocking(true)?;
@ -210,6 +155,16 @@ pub fn metrics_from_opts(
.build())
}
impl From<&StatsdClient> for Metrics {
fn from(client: &StatsdClient) -> Self {
Metrics {
client: Some(client.clone()),
tags: HashMap::default(),
timer: None,
}
}
}
/// A newtype used solely to allow us to implement KV on HashMap.
struct MetricTags(HashMap<String, String>);

View File

@ -4,25 +4,14 @@ version = "0.13.1"
edition = "2021"
[dependencies]
async-trait = "0.1.40"
backtrace = "0.3.61"
chrono = "0.4"
# Pin to 0.5 for now, to keep it under tokio 0.2 (issue977).
# Fix for #803 (deadpool#92) points to our fork for now
#deadpool = "0.5" # pin to 0.5
deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" }
diesel = { version = "1.4", features = ["mysql", "r2d2"] }
diesel_migrations = { version = "1.4.0", features = ["mysql"] }
# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause
# syncstorage to either fail to either compile, or start. In those cases, try
# `cargo build --features grpcio/openssl ...`
grpcio = { version = "0.9" }
hostname = "0.3.1"
http = "0.2.6"
futures = { version = "0.3", features = ["compat"] }
lazy_static = "1.4.0"
serde = "1.0"
serde_json = { version = "1.0", features = ["arbitrary_precision"] }
http = "0.2.6"
syncserver-common = { path = "../syncserver-common" }
thiserror = "1.0.26"
url = "2.1"

View File

@ -2,153 +2,58 @@ use std::fmt;
use backtrace::Backtrace;
use http::StatusCode;
use syncserver_common::{from_error, impl_fmt_display, InternalError};
use syncserver_common::{from_error, impl_fmt_display};
use thiserror::Error;
/// Error specific to any MySQL database backend. These errors are not related to the syncstorage
/// or tokenserver application logic; rather, they are lower-level errors arising from diesel.
#[derive(Debug)]
pub struct DbError {
kind: DbErrorKind,
pub struct MysqlError {
kind: MysqlErrorKind,
pub status: StatusCode,
pub backtrace: Backtrace,
}
#[derive(Debug, Error)]
pub enum DbErrorKind {
enum MysqlErrorKind {
#[error("A database error occurred: {}", _0)]
DieselQuery(#[from] diesel::result::Error),
#[error("An error occurred while establishing a db connection: {}", _0)]
DieselConnection(#[from] diesel::result::ConnectionError),
#[error("A database error occurred: {}", _0)]
SpannerGrpc(#[from] grpcio::Error),
#[error("Spanner data load too large: {}", _0)]
SpannerTooLarge(String),
#[error("A database pool error occurred: {}", _0)]
Pool(diesel::r2d2::PoolError),
#[error("Error migrating the database: {}", _0)]
Migration(diesel_migrations::RunMigrationsError),
#[error("Specified collection does not exist")]
CollectionNotFound,
#[error("Specified bso does not exist")]
BsoNotFound,
#[error("Specified batch does not exist")]
BatchNotFound,
#[error("An attempt at a conflicting write")]
Conflict,
#[error("Database integrity error: {}", _0)]
Integrity(String),
#[error("Invalid database URL: {}", _0)]
InvalidUrl(String),
#[error("Unexpected error: {}", _0)]
Internal(String),
#[error("User over quota")]
Quota,
#[error("Connection expired")]
Expired,
}
impl DbError {
pub fn internal(msg: &str) -> Self {
DbErrorKind::Internal(msg.to_owned()).into()
}
pub fn is_sentry_event(&self) -> bool {
!matches!(&self.kind, DbErrorKind::Conflict)
}
pub fn metric_label(&self) -> Option<String> {
match &self.kind {
DbErrorKind::Conflict => Some("storage.conflict".to_owned()),
_ => None,
}
}
pub fn is_collection_not_found(&self) -> bool {
matches!(self.kind, DbErrorKind::CollectionNotFound)
}
pub fn is_conflict(&self) -> bool {
matches!(self.kind, DbErrorKind::Conflict)
}
pub fn is_quota(&self) -> bool {
matches!(self.kind, DbErrorKind::Quota)
}
pub fn is_bso_not_found(&self) -> bool {
matches!(self.kind, DbErrorKind::BsoNotFound)
}
pub fn is_batch_not_found(&self) -> bool {
matches!(self.kind, DbErrorKind::BatchNotFound)
}
}
impl From<DbErrorKind> for DbError {
fn from(kind: DbErrorKind) -> Self {
let status = match kind {
DbErrorKind::CollectionNotFound | DbErrorKind::BsoNotFound => StatusCode::NOT_FOUND,
// Matching the Python code here (a 400 vs 404)
DbErrorKind::BatchNotFound | DbErrorKind::SpannerTooLarge(_) => StatusCode::BAD_REQUEST,
// NOTE: the protocol specification states that we should return a
// "409 Conflict" response here, but clients currently do not
// handle these respones very well:
// * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034
// * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032
DbErrorKind::Conflict => StatusCode::SERVICE_UNAVAILABLE,
DbErrorKind::Quota => StatusCode::FORBIDDEN,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
impl From<MysqlErrorKind> for MysqlError {
fn from(kind: MysqlErrorKind) -> Self {
Self {
kind,
status,
status: StatusCode::INTERNAL_SERVER_ERROR,
backtrace: Backtrace::new(),
}
}
}
impl_fmt_display!(DbError, DbErrorKind);
impl_fmt_display!(MysqlError, MysqlErrorKind);
from_error!(diesel::result::Error, DbError, DbErrorKind::DieselQuery);
from_error!(
diesel::result::Error,
MysqlError,
MysqlErrorKind::DieselQuery
);
from_error!(
diesel::result::ConnectionError,
DbError,
DbErrorKind::DieselConnection
MysqlError,
MysqlErrorKind::DieselConnection
);
from_error!(grpcio::Error, DbError, |inner: grpcio::Error| {
// Convert ABORTED (typically due to a transaction abort) into 503s
match inner {
grpcio::Error::RpcFailure(ref status) | grpcio::Error::RpcFinished(Some(ref status))
if status.code() == grpcio::RpcStatusCode::ABORTED =>
{
DbErrorKind::Conflict
}
_ => DbErrorKind::SpannerGrpc(inner),
}
});
from_error!(diesel::r2d2::PoolError, DbError, DbErrorKind::Pool);
from_error!(diesel::r2d2::PoolError, MysqlError, MysqlErrorKind::Pool);
from_error!(
diesel_migrations::RunMigrationsError,
DbError,
DbErrorKind::Migration
MysqlError,
MysqlErrorKind::Migration
);
impl InternalError for DbError {
fn internal_error(message: String) -> Self {
DbErrorKind::Internal(message).into()
}
}

View File

@ -1,78 +1,18 @@
pub mod error;
pub mod params;
pub mod results;
pub mod util;
pub mod test;
use std::fmt::Debug;
use async_trait::async_trait;
use futures::future::{self, LocalBoxFuture, TryFutureExt};
use lazy_static::lazy_static;
use serde::Deserialize;
use futures::future::LocalBoxFuture;
use error::DbError;
use util::SyncTimestamp;
lazy_static! {
/// For efficiency, it's possible to use fixed pre-determined IDs for
/// common collection names. This is the canonical list of such
/// names. Non-standard collections will be allocated IDs starting
/// from the highest ID in this collection.
pub static ref STD_COLLS: Vec<(i32, &'static str)> = {
vec![
(1, "clients"),
(2, "crypto"),
(3, "forms"),
(4, "history"),
(5, "keys"),
(6, "meta"),
(7, "bookmarks"),
(8, "prefs"),
(9, "tabs"),
(10, "passwords"),
(11, "addons"),
(12, "addresses"),
(13, "creditcards"),
]
};
}
/// Rough guesstimate of the maximum reasonable life span of a batch
pub const BATCH_LIFETIME: i64 = 2 * 60 * 60 * 1000; // 2 hours, in milliseconds
/// The ttl to use for rows that are never supposed to expire (in seconds)
pub const DEFAULT_BSO_TTL: u32 = 2_100_000_000;
/// Non-standard collections will be allocated IDs beginning with this value
pub const FIRST_CUSTOM_COLLECTION_ID: i32 = 101;
pub type DbFuture<'a, T> = LocalBoxFuture<'a, Result<T, DbError>>;
#[async_trait]
pub trait DbPool: Sync + Send + Debug + GetPoolState {
async fn get(&self) -> Result<Box<dyn Db<'_>>, DbError>;
fn validate_batch_id(&self, params: params::ValidateBatchId) -> Result<(), DbError>;
fn box_clone(&self) -> Box<dyn DbPool>;
}
impl Clone for Box<dyn DbPool> {
fn clone(&self) -> Box<dyn DbPool> {
self.box_clone()
}
}
pub type DbFuture<'a, T, E> = LocalBoxFuture<'a, Result<T, E>>;
/// A trait to be implemented by database pool data structures. It provides an interface to
/// derive the current state of the pool, as represented by the `PoolState` struct.
pub trait GetPoolState {
fn state(&self) -> PoolState;
}
impl GetPoolState for Box<dyn DbPool> {
fn state(&self) -> PoolState {
(**self).state()
}
}
#[derive(Debug, Default)]
/// A mockable r2d2::State
pub struct PoolState {
@ -97,212 +37,18 @@ impl From<deadpool::Status> for PoolState {
}
}
pub trait Db<'a>: Debug + 'a {
fn lock_for_read(&self, params: params::LockCollection) -> DbFuture<'_, ()>;
fn lock_for_write(&self, params: params::LockCollection) -> DbFuture<'_, ()>;
fn begin(&self, for_write: bool) -> DbFuture<'_, ()>;
fn commit(&self) -> DbFuture<'_, ()>;
fn rollback(&self) -> DbFuture<'_, ()>;
fn get_collection_timestamps(
&self,
params: params::GetCollectionTimestamps,
) -> DbFuture<'_, results::GetCollectionTimestamps>;
fn get_collection_timestamp(
&self,
params: params::GetCollectionTimestamp,
) -> DbFuture<'_, results::GetCollectionTimestamp>;
fn get_collection_counts(
&self,
params: params::GetCollectionCounts,
) -> DbFuture<'_, results::GetCollectionCounts>;
fn get_collection_usage(
&self,
params: params::GetCollectionUsage,
) -> DbFuture<'_, results::GetCollectionUsage>;
fn get_storage_timestamp(
&self,
params: params::GetStorageTimestamp,
) -> DbFuture<'_, results::GetStorageTimestamp>;
fn get_storage_usage(
&self,
params: params::GetStorageUsage,
) -> DbFuture<'_, results::GetStorageUsage>;
fn get_quota_usage(
&self,
params: params::GetQuotaUsage,
) -> DbFuture<'_, results::GetQuotaUsage>;
fn delete_storage(&self, params: params::DeleteStorage)
-> DbFuture<'_, results::DeleteStorage>;
fn delete_collection(
&self,
params: params::DeleteCollection,
) -> DbFuture<'_, results::DeleteCollection>;
fn delete_bsos(&self, params: params::DeleteBsos) -> DbFuture<'_, results::DeleteBsos>;
fn get_bsos(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsos>;
fn get_bso_ids(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsoIds>;
fn post_bsos(&self, params: params::PostBsos) -> DbFuture<'_, results::PostBsos>;
fn delete_bso(&self, params: params::DeleteBso) -> DbFuture<'_, results::DeleteBso>;
fn get_bso(&self, params: params::GetBso) -> DbFuture<'_, Option<results::GetBso>>;
fn get_bso_timestamp(
&self,
params: params::GetBsoTimestamp,
) -> DbFuture<'_, results::GetBsoTimestamp>;
fn put_bso(&self, params: params::PutBso) -> DbFuture<'_, results::PutBso>;
fn create_batch(&self, params: params::CreateBatch) -> DbFuture<'_, results::CreateBatch>;
fn validate_batch(&self, params: params::ValidateBatch)
-> DbFuture<'_, results::ValidateBatch>;
fn append_to_batch(
&self,
params: params::AppendToBatch,
) -> DbFuture<'_, results::AppendToBatch>;
fn get_batch(&self, params: params::GetBatch) -> DbFuture<'_, Option<results::GetBatch>>;
fn commit_batch(&self, params: params::CommitBatch) -> DbFuture<'_, results::CommitBatch>;
fn box_clone(&self) -> Box<dyn Db<'a>>;
fn check(&self) -> DbFuture<'_, results::Check>;
fn get_connection_info(&self) -> results::ConnectionInfo;
/// Retrieve the timestamp for an item/collection
///
/// Modeled on the Python `get_resource_timestamp` function.
fn extract_resource(
&self,
user_id: UserIdentifier,
collection: Option<String>,
bso: Option<String>,
) -> DbFuture<'_, SyncTimestamp> {
// If there's no collection, we return the overall storage timestamp
let collection = match collection {
Some(collection) => collection,
None => return Box::pin(self.get_storage_timestamp(user_id)),
};
// If there's no bso, return the collection
let bso = match bso {
Some(bso) => bso,
None => {
return Box::pin(
self.get_collection_timestamp(params::GetCollectionTimestamp {
user_id,
collection,
})
.or_else(|e| {
if e.is_collection_not_found() {
future::ok(SyncTimestamp::from_seconds(0f64))
} else {
future::err(e)
}
}),
)
}
#[macro_export]
macro_rules! sync_db_method {
($name:ident, $sync_name:ident, $type:ident) => {
sync_db_method!($name, $sync_name, $type, results::$type);
};
($name:ident, $sync_name:ident, $type:ident, $result:ty) => {
fn $name(&self, params: params::$type) -> DbFuture<'_, $result, DbError> {
let db = self.clone();
Box::pin(
self.get_bso_timestamp(params::GetBsoTimestamp {
user_id,
collection,
id: bso,
})
.or_else(|e| {
if e.is_collection_not_found() {
future::ok(SyncTimestamp::from_seconds(0f64))
} else {
future::err(e)
}
}),
self.blocking_threadpool
.spawn(move || db.$sync_name(params)),
)
}
/// Internal methods used by the db tests
fn get_collection_id(&self, name: String) -> DbFuture<'_, i32>;
fn create_collection(&self, name: String) -> DbFuture<'_, i32>;
fn update_collection(&self, params: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp>;
fn timestamp(&self) -> SyncTimestamp;
fn set_timestamp(&self, timestamp: SyncTimestamp);
fn delete_batch(&self, params: params::DeleteBatch) -> DbFuture<'_, ()>;
fn clear_coll_cache(&self) -> DbFuture<'_, ()>;
fn set_quota(&mut self, enabled: bool, limit: usize, enforce: bool);
}
impl<'a> Clone for Box<dyn Db<'a>> {
fn clone(&self) -> Box<dyn Db<'a>> {
self.box_clone()
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Copy)]
#[serde(rename_all = "lowercase")]
pub enum Sorting {
None,
Newest,
Oldest,
Index,
}
impl Default for Sorting {
fn default() -> Self {
Sorting::None
}
}
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)]
pub struct UserIdentifier {
/// For MySQL database backends as the primary key
pub legacy_id: u64,
/// For NoSQL database backends that require randomly distributed primary keys
pub fxa_uid: String,
pub fxa_kid: String,
}
impl UserIdentifier {
/// Create a new legacy id user identifier
pub fn new_legacy(user_id: u64) -> Self {
Self {
legacy_id: user_id,
..Default::default()
}
}
}
impl From<u32> for UserIdentifier {
fn from(val: u32) -> Self {
Self {
legacy_id: val.into(),
..Default::default()
}
}
};
}

View File

@ -0,0 +1,14 @@
use diesel::{
mysql::MysqlConnection,
r2d2::{CustomizeConnection, Error as PoolError},
Connection,
};
#[derive(Debug)]
pub struct TestTransactionCustomizer;
impl CustomizeConnection<MysqlConnection, PoolError> for TestTransactionCustomizer {
fn on_acquire(&self, conn: &mut MysqlConnection) -> Result<(), PoolError> {
conn.begin_test_transaction().map_err(PoolError::QueryError)
}
}

View File

@ -13,9 +13,9 @@ use syncstorage_settings::Settings as SyncstorageSettings;
use tokenserver_settings::Settings as TokenserverSettings;
use url::Url;
pub static PREFIX: &str = "sync";
static PREFIX: &str = "sync";
#[derive(Clone, Deserialize)]
#[derive(Clone, Debug, Deserialize)]
#[serde(default)]
pub struct Settings {
pub port: u16,
@ -134,6 +134,7 @@ impl Settings {
}
}
#[cfg(debug_assertions)]
pub fn test_settings() -> Self {
let mut settings =
Self::with_env_and_config_file(None).expect("Could not get Settings in test_settings");

View File

@ -15,44 +15,21 @@ actix-http = "2"
actix-web = "3"
actix-rt = "1" # Pin to 1.0, due to dependencies on Tokio
actix-cors = "0.5"
actix-service = "1.0.6"
async-trait = "0.1.40"
backtrace = "0.3.61"
base64 = "0.13"
bb8 = "0.4.1" # pin to 0.4 due to dependencies on Tokio
bytes = "1.0"
cadence = "0.26"
chrono = "0.4"
# Pin to 0.5 for now, to keep it under tokio 0.2 (issue977).
# Fix for #803 (deadpool#92) points to our fork for now
#deadpool = "0.5" # pin to 0.5
deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" }
diesel = { version = "1.4", features = ["mysql", "r2d2"] }
diesel_logger = "0.1.1"
diesel_migrations = { version = "1.4.0", features = ["mysql"] }
docopt = "1.1.0"
dyn-clone = "1.0.4"
env_logger = "0.9"
futures = { version = "0.3", features = ["compat"] }
google-cloud-rust-raw = "0.11.0"
# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause
# syncserver to either fail to either compile, or start. In those cases, try
# `cargo build --features grpcio/openssl ...`
grpcio = { version = "0.9" }
hostname = "0.3.1"
lazy_static = "1.4.0"
hawk = "3.2"
hex = "0.4.3"
hostname = "0.3.1"
hmac = "0.11"
http = "0.2.5"
log = { version = "0.4", features = [
"max_level_debug",
"release_max_level_info",
] }
mime = "0.3"
num_cpus = "1"
# must match what's used by googleapis-raw
protobuf = "2.20.0"
pyo3 = { version = "0.14", features = ["auto-initialize"] }
rand = "0.8"
regex = "1.4"
@ -65,7 +42,6 @@ sentry-backtrace = "0.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = { version = "1.0", features = ["arbitrary_precision"] }
scheduled-thread-pool = "0.2"
sha2 = "0.9"
slog = { version = "2.5", features = [
"max_level_info",
@ -78,19 +54,19 @@ slog-mozlog-json = "0.1"
slog-scope = "4.3"
slog-stdlog = "4.1"
slog-term = "2.6"
syncserver-settings = { path = "../syncserver-settings" }
syncserver-db-common = { path = "../syncserver-db-common" }
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
syncserver-settings = { path = "../syncserver-settings" }
syncstorage-db = { path = "../syncstorage-db" }
syncstorage-settings = { path = "../syncstorage-settings" }
time = "^0.3"
thiserror = "1.0.26"
tokenserver-common = { path = "../tokenserver-common" }
tokenserver-db = { path = "../tokenserver-db" }
tokenserver-settings = { path = "../tokenserver-settings" }
# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.)
tokio = { version = "0.2.4", features = ["macros", "sync"] }
url = "2.1"
urlencoding = "2.1"
uuid = { version = "0.8.2", features = ["serde", "v4"] }
validator = "0.14"
validator_derive = "0.14"
woothee = "0.11"
@ -99,7 +75,5 @@ woothee = "0.11"
mockito = "0.30.0"
[features]
default = ["syncstorage-db/mysql"]
no_auth = []
[[bin]]
name = "purge_ttl"

View File

@ -1,12 +0,0 @@
#[macro_use]
mod batch;
mod diesel_ext;
pub mod models;
pub mod pool;
mod schema;
#[cfg(test)]
mod test;
pub use self::pool::MysqlDbPool;
#[cfg(test)]
pub use self::test::TestTransactionCustomizer;

View File

@ -1,6 +0,0 @@
// mod bb8;
mod deadpool;
mod session;
pub use self::deadpool::{Conn, SpannerSessionManager};
pub use self::session::SpannerSession;

View File

@ -1,19 +0,0 @@
use std::time::SystemTime;
#[macro_use]
mod macros;
mod batch;
pub mod manager;
pub mod models;
pub mod pool;
mod support;
pub use self::pool::SpannerDbPool;
pub fn now() -> i64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as i64
}

View File

@ -21,8 +21,9 @@ use serde::{
Serialize,
};
use syncserver_common::{from_error, impl_fmt_display, ReportableError};
use syncserver_db_common::error::DbError;
use syncserver_common::{from_error, impl_fmt_display, MetricError, ReportableError};
use syncstorage_db::{DbError, DbErrorIntrospect};
use thiserror::Error;
use crate::web::error::{HawkError, ValidationError};
@ -57,7 +58,7 @@ pub const RETRY_AFTER: u8 = 10;
#[derive(Debug)]
pub struct ApiError {
kind: ApiErrorKind,
pub(crate) backtrace: Backtrace,
pub(crate) backtrace: Box<Backtrace>,
status: StatusCode,
}
@ -87,8 +88,8 @@ pub enum ApiErrorKind {
impl ApiErrorKind {
pub fn metric_label(&self) -> Option<String> {
match self {
ApiErrorKind::Db(err) => err.metric_label(),
ApiErrorKind::Hawk(err) => err.metric_label(),
ApiErrorKind::Db(err) => err.metric_label(),
ApiErrorKind::Validation(err) => err.metric_label(),
_ => None,
}
@ -96,6 +97,15 @@ impl ApiErrorKind {
}
impl ApiError {
pub fn is_sentry_event(&self) -> bool {
// Should we report this error to sentry?
self.status.is_server_error()
&& match &self.kind {
ApiErrorKind::Db(dbe) => dbe.is_sentry_event(),
_ => self.kind.metric_label().is_none(),
}
}
fn weave_error_code(&self) -> WeaveError {
match &self.kind {
ApiErrorKind::Validation(ver) => ver.weave_error_code(),
@ -148,8 +158,8 @@ impl From<ApiError> for HttpResponse {
}
}
impl From<cadence::MetricError> for ApiError {
fn from(inner: cadence::MetricError) -> Self {
impl From<MetricError> for ApiError {
fn from(inner: MetricError) -> Self {
ApiErrorKind::Internal(inner.to_string()).into()
}
}
@ -173,7 +183,7 @@ impl From<ApiErrorKind> for ApiError {
Self {
kind,
backtrace: Backtrace::new(),
backtrace: Box::new(Backtrace::new()),
status,
}
}

View File

@ -1,10 +1,6 @@
#![warn(rust_2018_idioms)]
#![allow(clippy::try_err)]
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
#[macro_use]
extern crate slog_scope;
#[macro_use]
@ -12,7 +8,6 @@ extern crate validator_derive;
#[macro_use]
pub mod error;
pub mod db;
pub mod logging;
pub mod server;
pub mod tokenserver;

View File

@ -1,33 +1,27 @@
//! Main application server
use std::{
env, fmt,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::Duration,
};
use std::{env, sync::Arc, time::Duration};
use actix_cors::Cors;
use actix_web::{
dev,
error::BlockingError,
dev::{self, Payload},
http::StatusCode,
http::{header::LOCATION, Method},
middleware::errhandlers::ErrorHandlers,
web, App, HttpRequest, HttpResponse, HttpServer,
web::{self, Data},
App, FromRequest, HttpRequest, HttpResponse, HttpServer,
};
use cadence::{Gauged, StatsdClient};
use syncserver_common::InternalError;
use syncserver_db_common::{error::DbError, DbPool, GetPoolState, PoolState};
use futures::future::{self, Ready};
use syncserver_common::{BlockingThreadpool, Metrics};
use syncserver_db_common::{GetPoolState, PoolState};
use syncserver_settings::Settings;
use syncstorage_db::{DbError, DbPool, DbPoolImpl};
use syncstorage_settings::{Deadman, ServerLimits};
use tokio::{sync::RwLock, time};
use crate::db::pool_from_settings;
use crate::error::ApiError;
use crate::server::metrics::Metrics;
use crate::server::tags::Taggable;
use crate::tokenserver;
use crate::web::{handlers, middleware};
@ -38,7 +32,7 @@ pub const SYNC_DOCS_URL: &str =
const MYSQL_UID_REGEX: &str = r"[0-9]{1,10}";
const SYNC_VERSION_PATH: &str = "1.5";
pub mod metrics;
pub mod tags;
#[cfg(test)]
mod test;
pub mod user_agent;
@ -46,7 +40,7 @@ pub mod user_agent;
/// This is the global HTTP state object that will be made available to all
/// HTTP API calls.
pub struct ServerState {
pub db_pool: Box<dyn DbPool>,
pub db_pool: Box<dyn DbPool<Error = DbError>>,
/// Server-enforced limits for request payloads.
pub limits: Arc<ServerLimits>,
@ -249,7 +243,7 @@ macro_rules! build_app_without_syncstorage {
impl Server {
pub async fn with_settings(settings: Settings) -> Result<dev::Server, ApiError> {
let settings_copy = settings.clone();
let metrics = metrics::metrics_from_opts(
let metrics = syncserver_common::metrics_from_opts(
&settings.syncstorage.statsd_label,
settings.statsd_host.as_deref(),
settings.statsd_port,
@ -258,12 +252,11 @@ impl Server {
let port = settings.port;
let deadman = Arc::new(RwLock::new(Deadman::from(&settings.syncstorage)));
let blocking_threadpool = Arc::new(BlockingThreadpool::default());
let db_pool = pool_from_settings(
let db_pool = DbPoolImpl::new(
&settings.syncstorage,
&Metrics::from(&metrics),
blocking_threadpool.clone(),
)
.await?;
)?;
let limits = Arc::new(settings.syncstorage.limits);
let limits_json =
serde_json::to_string(&*limits).expect("ServerLimits failed to serialize");
@ -273,12 +266,12 @@ impl Server {
let tokenserver_state = if settings.tokenserver.enabled {
let state = tokenserver::ServerState::from_settings(
&settings.tokenserver,
metrics::metrics_from_opts(
syncserver_common::metrics_from_opts(
&settings.tokenserver.statsd_label,
settings.statsd_host.as_deref(),
settings.statsd_port,
)?,
blocking_threadpool.clone(),
blocking_threadpool,
)?;
Some(state)
@ -290,7 +283,7 @@ impl Server {
Duration::from_secs(10),
metrics.clone(),
db_pool.clone(),
blocking_threadpool.clone(),
blocking_threadpool,
)?;
None
@ -298,7 +291,7 @@ impl Server {
let mut server = HttpServer::new(move || {
let syncstorage_state = ServerState {
db_pool: db_pool.clone(),
db_pool: Box::new(db_pool.clone()),
limits: Arc::clone(&limits),
limits_json: limits_json.clone(),
metrics: Box::new(metrics.clone()),
@ -337,7 +330,7 @@ impl Server {
let blocking_threadpool = Arc::new(BlockingThreadpool::default());
let tokenserver_state = tokenserver::ServerState::from_settings(
&settings.tokenserver,
metrics::metrics_from_opts(
syncserver_common::metrics_from_opts(
&settings.tokenserver.statsd_label,
settings.statsd_host.as_deref(),
settings.statsd_port,
@ -405,6 +398,37 @@ fn build_cors(settings: &Settings) -> Cors {
cors
}
pub struct MetricsWrapper(pub Metrics);
impl FromRequest for MetricsWrapper {
type Config = ();
type Error = ();
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
let client = {
let syncstorage_metrics = req
.app_data::<Data<ServerState>>()
.map(|state| state.metrics.clone());
let tokenserver_metrics = req
.app_data::<Data<tokenserver::ServerState>>()
.map(|state| state.metrics.clone());
syncstorage_metrics.or(tokenserver_metrics)
};
if client.is_none() {
warn!("⚠️ metric error: No App State");
}
future::ok(MetricsWrapper(Metrics {
client: client.as_deref().cloned(),
tags: req.get_tags(),
timer: None,
}))
}
}
/// Emit database pool and threadpool metrics periodically
fn spawn_metric_periodic_reporter<T: GetPoolState + Send + 'static>(
interval: Duration,
@ -453,43 +477,3 @@ fn spawn_metric_periodic_reporter<T: GetPoolState + Send + 'static>(
Ok(())
}
/// A threadpool on which callers can spawn non-CPU-bound tasks that block their thread (this is
/// mostly useful for running I/O tasks). `BlockingThreadpool` intentionally does not implement
/// `Clone`: `Arc`s are not used internally, so a `BlockingThreadpool` should be instantiated once
/// and shared by passing around `Arc<BlockingThreadpool>`s.
#[derive(Debug, Default)]
pub struct BlockingThreadpool {
spawned_tasks: AtomicU64,
}
impl BlockingThreadpool {
/// Runs a function as a task on the blocking threadpool.
///
/// WARNING: Spawning a blocking task through means other than calling this method will
/// result in inaccurate threadpool metrics being reported. If you want to spawn a task on
/// the blocking threadpool, you **must** use this function.
pub async fn spawn<F, T, E>(&self, f: F) -> Result<T, E>
where
F: FnOnce() -> Result<T, E> + Send + 'static,
T: Send + 'static,
E: fmt::Debug + Send + InternalError + 'static,
{
self.spawned_tasks.fetch_add(1, Ordering::Relaxed);
let result = web::block(f).await.map_err(|e| match e {
BlockingError::Error(e) => e,
BlockingError::Canceled => {
E::internal_error("Blocking threadpool operation canceled".to_owned())
}
});
self.spawned_tasks.fetch_sub(1, Ordering::Relaxed);
result
}
fn active_threads(&self) -> u64 {
self.spawned_tasks.load(Ordering::Relaxed)
}
}

View File

@ -1,4 +1,4 @@
use std::collections::{BTreeMap, HashMap};
use std::collections::HashMap;
use actix_web::HttpMessage;
@ -78,14 +78,3 @@ struct Tags(HashMap<String, String>);
// "Extras" are pieces of metadata with high cardinality to be included in Sentry errors.
#[derive(Default)]
struct Extras(HashMap<String, String>);
impl From<Tags> for BTreeMap<String, String> {
fn from(tags: Tags) -> BTreeMap<String, String> {
let mut result = BTreeMap::new();
for (k, v) in tags.0 {
result.insert(k.clone(), v.clone());
}
result
}
}

View File

@ -16,17 +16,16 @@ use serde::de::DeserializeOwned;
use serde_json::json;
use sha2::Sha256;
use syncserver_common::{self, X_LAST_MODIFIED};
use syncserver_db_common::{
use syncserver_settings::{Secrets, Settings};
use syncstorage_db::{
params,
results::{DeleteBso, GetBso, PostBsos, PutBso},
util::SyncTimestamp,
DbPoolImpl, SyncTimestamp,
};
use syncserver_settings::{Secrets, Settings};
use syncstorage_settings::ServerLimits;
use super::*;
use crate::build_app;
use crate::db::pool_from_settings;
use crate::tokenserver;
use crate::web::{auth::HawkPayload, extractors::BsoBody};
@ -69,13 +68,14 @@ async fn get_test_state(settings: &Settings) -> ServerState {
let blocking_threadpool = Arc::new(BlockingThreadpool::default());
ServerState {
db_pool: pool_from_settings(
db_pool: Box::new(
DbPoolImpl::new(
&settings.syncstorage,
&Metrics::from(&metrics),
blocking_threadpool.clone(),
blocking_threadpool,
)
.await
.expect("Could not get db_pool in get_test_state"),
),
limits: Arc::clone(&SERVER_LIMITS),
limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(),
metrics: Box::new(metrics),

View File

@ -1,7 +1,7 @@
use async_trait::async_trait;
use reqwest::{Client as ReqwestClient, StatusCode};
use serde::{de::Deserializer, Deserialize, Serialize};
use tokenserver_common::error::{ErrorLocation, TokenType, TokenserverError};
use tokenserver_common::{ErrorLocation, TokenType, TokenserverError};
use tokenserver_settings::Settings;
use super::VerifyToken;

View File

@ -10,7 +10,7 @@ use pyo3::{
types::IntoPyDict,
};
use serde::{Deserialize, Serialize};
use tokenserver_common::error::TokenserverError;
use tokenserver_common::TokenserverError;
/// Represents the origin of the token used by Sync clients to access their data.
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]

View File

@ -5,12 +5,12 @@ use pyo3::{
};
use serde::{Deserialize, Serialize};
use serde_json;
use tokenserver_common::error::TokenserverError;
use syncserver_common::BlockingThreadpool;
use tokenserver_common::TokenserverError;
use tokenserver_settings::{Jwk, Settings};
use tokio::time;
use super::VerifyToken;
use crate::server::BlockingThreadpool;
use std::{sync::Arc, time::Duration};

View File

@ -1,5 +0,0 @@
pub mod mock;
pub mod models;
pub mod params;
pub mod pool;
pub mod results;

View File

@ -21,17 +21,11 @@ use regex::Regex;
use serde::Deserialize;
use sha2::Sha256;
use syncserver_settings::Secrets;
use tokenserver_common::{
error::{ErrorLocation, TokenserverError},
NodeType,
};
use tokenserver_common::{ErrorLocation, NodeType, TokenserverError};
use tokenserver_db::{params, results, Db, DbPool};
use super::{
db::{models::Db, params, pool::DbPool, results},
LogItemsMutator, ServerState, TokenserverMetrics,
};
use crate::server::metrics::Metrics;
use crate::web::tags::Taggable;
use super::{LogItemsMutator, ServerState, TokenserverMetrics};
use crate::server::{tags::Taggable, MetricsWrapper};
lazy_static! {
static ref CLIENT_STATE_REGEX: Regex = Regex::new("^[a-zA-Z0-9._-]{1,32}$").unwrap();
@ -218,7 +212,7 @@ impl FromRequest for TokenserverRequest {
hash_device_id(&hashed_fxa_uid, device_id, fxa_metrics_hash_secret)
};
let db = <Box<dyn Db>>::extract(&req).await?;
let DbWrapper(db) = DbWrapper::extract(&req).await?;
let service_id = {
let path = req.match_info();
@ -312,7 +306,10 @@ struct QueryParams {
pub duration: Option<String>,
}
impl FromRequest for Box<dyn Db> {
/// A local "newtype" that wraps `Box<dyn Db>` so we can implement `FromRequest`.
pub struct DbWrapper(pub Box<dyn Db>);
impl FromRequest for DbWrapper {
type Config = ();
type Error = TokenserverError;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
@ -321,10 +318,12 @@ impl FromRequest for Box<dyn Db> {
let req = req.clone();
Box::pin(async move {
<Box<dyn DbPool>>::extract(&req)
DbPoolWrapper::extract(&req)
.await?
.0
.get()
.await
.map(Self)
.map_err(|e| TokenserverError {
context: format!("Couldn't acquire a database connection: {}", e),
..TokenserverError::internal_error()
@ -333,7 +332,9 @@ impl FromRequest for Box<dyn Db> {
}
}
impl FromRequest for Box<dyn DbPool> {
struct DbPoolWrapper(Box<dyn DbPool>);
impl FromRequest for DbPoolWrapper {
type Config = ();
type Error = TokenserverError;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
@ -344,7 +345,7 @@ impl FromRequest for Box<dyn DbPool> {
Box::pin(async move {
let state = get_server_state(&req)?.as_ref();
Ok(state.db_pool.clone())
Ok(Self(state.db_pool.clone()))
})
}
}
@ -648,8 +649,12 @@ impl FromRequest for TokenserverMetrics {
fn from_request(req: &HttpRequest, _payload: &mut Payload) -> Self::Future {
let req = req.clone();
// `Result::unwrap` is safe to use here, since Metrics::extract can never fail
Box::pin(async move { Ok(TokenserverMetrics(Metrics::extract(&req).await.unwrap())) })
// `Result::unwrap` is safe to use here, since MetricsWrapper::extract can never fail
Box::pin(async move {
Ok(TokenserverMetrics(
MetricsWrapper::extract(&req).await.unwrap().0,
))
})
}
}
@ -706,12 +711,11 @@ mod tests {
use serde_json;
use syncserver_settings::Settings as GlobalSettings;
use syncstorage_settings::ServerLimits;
use tokenserver_db::mock::MockDbPool as MockTokenserverPool;
use tokenserver_settings::Settings as TokenserverSettings;
use crate::server::metrics;
use crate::tokenserver::{
auth::{browserid, oauth, MockVerifier},
db::mock::MockDbPool as MockTokenserverPool,
ServerState,
};
@ -1339,7 +1343,7 @@ mod tests {
node_capacity_release_rate: None,
node_type: NodeType::default(),
metrics: Box::new(
metrics::metrics_from_opts(
syncserver_common::metrics_from_opts(
&tokenserver_settings.statsd_label,
syncserver_settings.statsd_host.as_deref(),
syncserver_settings.statsd_port,

View File

@ -6,15 +6,15 @@ use std::{
use actix_web::{http::StatusCode, Error, HttpResponse};
use serde::Serialize;
use serde_json::Value;
use tokenserver_common::{error::TokenserverError, NodeType};
use tokenserver_common::{NodeType, TokenserverError};
use tokenserver_db::{
params::{GetNodeId, PostUser, PutUser, ReplaceUsers},
Db,
};
use super::{
auth::{MakeTokenPlaintext, Tokenlib, TokenserverOrigin},
db::{
models::Db,
params::{GetNodeId, PostUser, PutUser, ReplaceUsers},
},
extractors::TokenserverRequest,
extractors::{DbWrapper, TokenserverRequest},
TokenserverMetrics,
};
@ -32,7 +32,7 @@ pub struct TokenserverResult {
pub async fn get_tokenserver_result(
req: TokenserverRequest,
db: Box<dyn Db>,
DbWrapper(db): DbWrapper,
TokenserverMetrics(mut metrics): TokenserverMetrics,
) -> Result<HttpResponse, TokenserverError> {
let updates = update_user(&req, db).await?;
@ -242,7 +242,7 @@ async fn update_user(
}
}
pub async fn heartbeat(db: Box<dyn Db>) -> Result<HttpResponse, Error> {
pub async fn heartbeat(DbWrapper(db): DbWrapper) -> Result<HttpResponse, Error> {
let mut checklist = HashMap::new();
checklist.insert(
"version".to_owned(),

View File

@ -1,5 +1,4 @@
pub mod auth;
pub mod db;
pub mod extractors;
pub mod handlers;
pub mod logging;
@ -10,18 +9,16 @@ use serde::{
ser::{SerializeMap, Serializer},
Serialize,
};
use syncserver_common::{BlockingThreadpool, Metrics};
use tokenserver_common::NodeType;
use tokenserver_db::{params, DbPool, TokenserverPool};
use tokenserver_settings::Settings;
use crate::{
error::ApiError,
server::{metrics::Metrics, user_agent, BlockingThreadpool},
error::{ApiError, ApiErrorKind},
server::user_agent,
};
use auth::{browserid, oauth, VerifyToken};
use db::{
params,
pool::{DbPool, TokenserverPool},
};
use std::{collections::HashMap, convert::TryFrom, fmt, sync::Arc};
@ -86,7 +83,7 @@ impl ServerState {
token_duration: settings.token_duration,
}
})
.map_err(Into::into)
.map_err(|_| ApiErrorKind::Internal("Failed to create Tokenserver pool".to_owned()).into())
}
}

View File

@ -27,23 +27,23 @@ use serde::{
Deserialize, Serialize,
};
use serde_json::Value;
use syncserver_common::X_WEAVE_RECORDS;
use syncserver_db_common::{
use syncserver_common::{Metrics, X_WEAVE_RECORDS};
use syncstorage_db::{
params::{self, PostCollectionBso},
util::SyncTimestamp,
DbPool, Sorting, UserIdentifier,
DbError, DbPool, Sorting, SyncTimestamp, UserIdentifier,
};
use validator::{Validate, ValidationError};
use crate::db::transaction::DbTransactionPool;
use crate::error::{ApiError, ApiErrorKind};
use crate::label;
use crate::server::{metrics, ServerState, BSO_ID_REGEX, COLLECTION_ID_REGEX};
use crate::server::{
tags::Taggable, MetricsWrapper, ServerState, BSO_ID_REGEX, COLLECTION_ID_REGEX,
};
use crate::tokenserver::auth::TokenserverOrigin;
use crate::web::{
auth::HawkPayload,
error::{HawkErrorKind, ValidationErrorKind},
tags::Taggable,
transaction::DbTransactionPool,
DOCKER_FLOW_ENDPOINTS,
};
const BATCH_MAX_IDS: usize = 100;
@ -408,7 +408,6 @@ impl FromRequest for BsoBody {
)
.into());
}
let state = match req.app_data::<Data<ServerState>>() {
Some(s) => s,
None => {
@ -637,7 +636,7 @@ impl FromRequest for CollectionParam {
pub struct MetaRequest {
pub user_id: UserIdentifier,
pub tokenserver_origin: TokenserverOrigin,
pub metrics: metrics::Metrics,
pub metrics: Metrics,
}
impl FromRequest for MetaRequest {
@ -655,7 +654,7 @@ impl FromRequest for MetaRequest {
Ok(MetaRequest {
tokenserver_origin: user_id.tokenserver_origin,
user_id: user_id.into(),
metrics: metrics::Metrics::extract(&req).await?,
metrics: MetricsWrapper::extract(&req).await?.0,
})
}
.boxed_local()
@ -678,7 +677,7 @@ pub struct CollectionRequest {
pub tokenserver_origin: TokenserverOrigin,
pub query: BsoQueryParams,
pub reply: ReplyFormat,
pub metrics: metrics::Metrics,
pub metrics: Metrics,
}
impl FromRequest for CollectionRequest {
@ -719,7 +718,7 @@ impl FromRequest for CollectionRequest {
user_id: user_id.into(),
query,
reply,
metrics: metrics::Metrics::extract(&req).await?,
metrics: MetricsWrapper::extract(&req).await?.0,
})
}
.boxed_local()
@ -738,7 +737,7 @@ pub struct CollectionPostRequest {
pub query: BsoQueryParams,
pub bsos: BsoBodies,
pub batch: Option<BatchRequest>,
pub metrics: metrics::Metrics,
pub metrics: Metrics,
pub quota_enabled: bool,
}
@ -817,7 +816,7 @@ impl FromRequest for CollectionPostRequest {
query,
bsos,
batch: batch.opt,
metrics: metrics::Metrics::extract(&req).await?,
metrics: MetricsWrapper::extract(&req).await?.0,
quota_enabled: state.quota_enabled,
})
})
@ -834,7 +833,7 @@ pub struct BsoRequest {
pub tokenserver_origin: TokenserverOrigin,
pub query: BsoQueryParams,
pub bso: String,
pub metrics: metrics::Metrics,
pub metrics: Metrics,
}
impl FromRequest for BsoRequest {
@ -860,7 +859,7 @@ impl FromRequest for BsoRequest {
user_id: user_id.into(),
query,
bso: bso.bso,
metrics: metrics::Metrics::extract(&req).await?,
metrics: MetricsWrapper::extract(&req).await?.0,
})
})
}
@ -876,7 +875,7 @@ pub struct BsoPutRequest {
pub query: BsoQueryParams,
pub bso: String,
pub body: BsoBody,
pub metrics: metrics::Metrics,
pub metrics: Metrics,
}
impl FromRequest for BsoPutRequest {
@ -889,7 +888,7 @@ impl FromRequest for BsoPutRequest {
let mut payload = payload.take();
async move {
let metrics = metrics::Metrics::extract(&req).await?;
let metrics = MetricsWrapper::extract(&req).await?.0;
let (user_id, collection, query, bso, body) =
<(
HawkIdentifier,
@ -938,7 +937,7 @@ pub struct QuotaInfo {
#[derive(Clone, Debug)]
pub struct HeartbeatRequest {
pub headers: HeaderMap,
pub db_pool: Box<dyn DbPool>,
pub db_pool: Box<dyn DbPool<Error = DbError>>,
pub quota: QuotaInfo,
}
@ -1755,13 +1754,12 @@ mod tests {
use serde_json::{self, json};
use sha2::Sha256;
use syncserver_common;
use syncserver_db_common::Db;
use syncserver_settings::Settings as GlobalSettings;
use syncstorage_settings::{Deadman, ServerLimits, Settings as SyncstorageSettings};
use tokio::sync::RwLock;
use crate::db::mock::{MockDb, MockDbPool};
use crate::server::{metrics, ServerState};
use crate::server::ServerState;
use syncstorage_db::mock::{MockDb, MockDbPool};
use crate::web::auth::HawkPayload;
@ -1779,8 +1777,8 @@ mod tests {
const INVALID_BSO_NAME: &str =
"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz";
fn make_db() -> Box<dyn Db<'static>> {
Box::new(MockDb::new())
fn make_db() -> MockDb {
MockDb::new()
}
fn make_state() -> ServerState {
@ -1792,7 +1790,7 @@ mod tests {
limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(),
port: 8000,
metrics: Box::new(
metrics::metrics_from_opts(
syncserver_common::metrics_from_opts(
&syncstorage_settings.statsd_label,
syncserver_settings.statsd_host.as_deref(),
syncserver_settings.statsd_port,

View File

@ -6,22 +6,23 @@ use actix_web::{dev::HttpResponseBuilder, http::StatusCode, web::Data, HttpReque
use serde::Serialize;
use serde_json::{json, Value};
use syncserver_common::{X_LAST_MODIFIED, X_WEAVE_NEXT_OFFSET, X_WEAVE_RECORDS};
use syncserver_db_common::{
error::{DbError, DbErrorKind},
use syncstorage_db::{
params,
results::{CreateBatch, Paginated},
Db,
Db, DbError, DbErrorIntrospect,
};
use time;
use crate::{
db::transaction::DbTransactionPool,
error::{ApiError, ApiErrorKind},
server::ServerState,
web::extractors::{
web::{
extractors::{
BsoPutRequest, BsoRequest, CollectionPostRequest, CollectionRequest, EmitApiMetric,
HeartbeatRequest, MetaRequest, ReplyFormat, TestErrorRequest,
},
transaction::DbTransactionPool,
},
};
pub const ONE_KB: f64 = 1024.0;
@ -189,7 +190,7 @@ pub async fn get_collection(
async fn finish_get_collection<T>(
coll: &CollectionRequest,
db: Box<dyn Db<'_> + '_>,
db: Box<dyn Db<Error = DbError>>,
result: Result<Paginated<T>, DbError>,
) -> Result<HttpResponse, DbError>
where
@ -280,15 +281,16 @@ pub async fn post_collection(
// the entire, accumulated if the `commit` flag is set.
pub async fn post_collection_batch(
coll: CollectionPostRequest,
db: Box<dyn Db<'_> + '_>,
db: Box<dyn Db<Error = DbError>>,
) -> Result<HttpResponse, ApiError> {
coll.emit_api_metric("request.post_collection_batch");
trace!("Batch: Post collection batch");
// Bail early if we have nonsensical arguments
// TODO: issue932 may make these multi-level transforms easier
let breq = coll.batch.clone().ok_or_else(|| -> ApiError {
ApiErrorKind::Db(DbErrorKind::BatchNotFound.into()).into()
})?;
let breq = coll
.batch
.clone()
.ok_or_else(|| -> ApiError { ApiErrorKind::Db(DbError::batch_not_found()).into() })?;
let new_batch = if let Some(id) = breq.id.clone() {
trace!("Batch: Validating {}", &id);
@ -319,8 +321,7 @@ pub async fn post_collection_batch(
},
}
} else {
let err: DbError = DbErrorKind::BatchNotFound.into();
return Err(ApiError::from(err));
return Err(ApiErrorKind::Db(DbError::batch_not_found()).into());
}
} else {
trace!("Batch: Creating new batch");
@ -405,8 +406,7 @@ pub async fn post_collection_batch(
})
.await?
} else {
let err: DbError = DbErrorKind::BatchNotFound.into();
return Err(ApiError::from(err));
return Err(ApiErrorKind::Db(DbError::batch_not_found()).into());
};
// Then, write the BSOs contained in the commit request into the BSO table.
@ -594,7 +594,7 @@ pub async fn lbheartbeat(req: HttpRequest) -> Result<HttpResponse, ApiError> {
let db_state = if cfg!(test) {
use actix_web::http::header::HeaderValue;
use std::str::FromStr;
use syncserver_db_common::PoolState;
use syncstorage_db::PoolState;
let test_pool = PoolState {
connections: u32::from_str(

View File

@ -13,9 +13,10 @@ use actix_web::{
dev::{Service, ServiceRequest, ServiceResponse},
web::Data,
};
use syncserver_common::Metrics;
use crate::error::{ApiError, ApiErrorKind};
use crate::server::{metrics::Metrics, ServerState};
use crate::server::ServerState;
use crate::tokenserver::auth::TokenserverOrigin;
pub fn emit_http_status_with_tokenserver_origin(

View File

@ -10,7 +10,7 @@ use lazy_static::lazy_static;
use regex::Regex;
use crate::error::{ApiError, ApiErrorKind};
use crate::server::metrics::Metrics;
use crate::server::MetricsWrapper;
lazy_static! {
// e.g. "Firefox-iOS-Sync/18.0b1 (iPhone; iPhone OS 13.2.2) (Fennec (synctesting))"
@ -43,7 +43,10 @@ pub fn reject_user_agent(
Some(header) if header.to_str().map_or(false, should_reject) => Box::pin(async move {
trace!("Rejecting User-Agent: {:?}", header);
let (req, payload) = request.into_parts();
Metrics::extract(&req).await?.incr("error.rejectua");
MetricsWrapper::extract(&req)
.await?
.0
.incr("error.rejectua");
let sreq = ServiceRequest::from_parts(req, payload).map_err(|_| {
ApiError::from(ApiErrorKind::Internal(
"failed to reconstruct ServiceRequest from its parts".to_owned(),

View File

@ -11,12 +11,11 @@ use actix_web::{
use sentry::protocol::Event;
use sentry_backtrace::parse_stacktrace;
use serde_json::value::Value;
use syncserver_common::ReportableError;
use tokenserver_common::error::TokenserverError;
use syncserver_common::{Metrics, ReportableError};
use tokenserver_common::TokenserverError;
use crate::error::ApiError;
use crate::server::{metrics::Metrics, user_agent};
use crate::web::tags::Taggable;
use crate::server::{tags::Taggable, user_agent, MetricsWrapper};
pub fn report(
tags: HashMap<String, String>,
@ -75,7 +74,7 @@ pub fn report_error(
}
}
Some(e) => {
let metrics = Metrics::extract(sresp.request()).await.unwrap();
let metrics = MetricsWrapper::extract(sresp.request()).await.unwrap().0;
if let Some(apie) = e.as_error::<ApiError>() {
process_error(apie, metrics, tags, extras);

View File

@ -7,7 +7,7 @@ use actix_web::{
};
use syncserver_common::{X_LAST_MODIFIED, X_WEAVE_TIMESTAMP};
use syncserver_db_common::util::SyncTimestamp;
use syncstorage_db::SyncTimestamp;
use crate::error::{ApiError, ApiErrorKind};
use crate::web::DOCKER_FLOW_ENDPOINTS;

View File

@ -4,7 +4,7 @@ pub mod error;
pub mod extractors;
pub mod handlers;
pub mod middleware;
pub mod tags;
mod transaction;
// Known DockerFlow commands for Ops callbacks
pub const DOCKER_FLOW_ENDPOINTS: [&str; 4] = [

View File

@ -9,20 +9,18 @@ use actix_web::{FromRequest, HttpRequest, HttpResponse};
use futures::future::LocalBoxFuture;
use futures::FutureExt;
use syncserver_common::X_LAST_MODIFIED;
use syncserver_db_common::{params, Db, DbPool, UserIdentifier};
use syncstorage_db::{params, results::ConnectionInfo, Db, DbError, DbPool, UserIdentifier};
use crate::db::results::ConnectionInfo;
use crate::error::{ApiError, ApiErrorKind};
use crate::server::metrics::Metrics;
use crate::server::ServerState;
use crate::server::tags::Taggable;
use crate::server::{MetricsWrapper, ServerState};
use crate::web::extractors::{
BsoParam, CollectionParam, HawkIdentifier, PreConditionHeader, PreConditionHeaderOpt,
};
use crate::web::tags::Taggable;
#[derive(Clone)]
pub struct DbTransactionPool {
pool: Box<dyn DbPool>,
pool: Box<dyn DbPool<Error = DbError>>,
is_read: bool,
user_id: UserIdentifier,
collection: Option<String>,
@ -51,10 +49,10 @@ impl DbTransactionPool {
&'a self,
request: HttpRequest,
action: A,
) -> Result<(R, Box<dyn Db<'a>>), ApiError>
) -> Result<(R, Box<dyn Db<Error = DbError>>), ApiError>
where
A: FnOnce(Box<dyn Db<'a>>) -> F,
F: Future<Output = Result<R, ApiError>> + 'a,
A: FnOnce(Box<dyn Db<Error = DbError>>) -> F,
F: Future<Output = Result<R, ApiError>>,
{
// Get connection from pool
let db = self.pool.get().await?;
@ -88,7 +86,7 @@ impl DbTransactionPool {
}
}
pub fn get_pool(&self) -> Result<Box<dyn DbPool>, Error> {
pub fn get_pool(&self) -> Result<Box<dyn DbPool<Error = DbError>>, Error> {
Ok(self.pool.clone())
}
@ -99,7 +97,7 @@ impl DbTransactionPool {
action: A,
) -> Result<R, ApiError>
where
A: FnOnce(Box<dyn Db<'a>>) -> F,
A: FnOnce(Box<dyn Db<Error = DbError>>) -> F,
F: Future<Output = Result<R, ApiError>> + 'a,
{
let (resp, db) = self.transaction_internal(request, action).await?;
@ -117,11 +115,11 @@ impl DbTransactionPool {
action: A,
) -> Result<HttpResponse, ApiError>
where
A: FnOnce(Box<dyn Db<'a>>) -> F,
A: FnOnce(Box<dyn Db<Error = DbError>>) -> F,
F: Future<Output = Result<HttpResponse, ApiError>> + 'a,
{
let mreq = request.clone();
let check_precondition = move |db: Box<dyn Db<'a>>| {
let check_precondition = move |db: Box<dyn Db<Error = DbError>>| {
async move {
// set the extra information for all requests so we capture default err handlers.
set_extra(&mreq, db.get_connection_info());
@ -233,9 +231,10 @@ impl FromRequest for DbTransactionPool {
Err(e) => {
// Semi-example to show how to use metrics inside of middleware.
// `Result::unwrap` is safe to use here, since Metrics::extract can never fail
Metrics::extract(&req)
MetricsWrapper::extract(&req)
.await
.unwrap()
.0
.incr("sync.error.collectionParam");
warn!("⚠️ CollectionParam err: {:?}", e);
return Err(e);

View File

@ -0,0 +1,19 @@
[package]
name = "syncstorage-db-common"
version = "0.12.3"
edition = "2021"
[dependencies]
async-trait = "0.1.40"
backtrace = "0.3.61"
chrono = "0.4"
diesel = { version = "1.4", features = ["mysql", "r2d2"] }
diesel_migrations = { version = "1.4.0", features = ["mysql"] }
futures = { version = "0.3", features = ["compat"] }
http = "0.2.6"
lazy_static = "1.4.0"
serde = "1.0"
serde_json = { version = "1.0", features = ["arbitrary_precision"] }
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
thiserror = "1.0.26"

View File

@ -0,0 +1,138 @@
use std::fmt;
use backtrace::Backtrace;
use http::StatusCode;
use syncserver_common::{impl_fmt_display, ReportableError};
use thiserror::Error;
/// Errors common to all supported syncstorage database backends. These errors can be thought of
/// as being related more to the syncstorage application logic as opposed to a particular
/// database backend.
#[derive(Debug)]
pub struct SyncstorageDbError {
kind: SyncstorageDbErrorKind,
pub status: StatusCode,
pub backtrace: Backtrace,
}
#[derive(Debug, Error)]
enum SyncstorageDbErrorKind {
#[error("Specified collection does not exist")]
CollectionNotFound,
#[error("Specified bso does not exist")]
BsoNotFound,
#[error("Specified batch does not exist")]
BatchNotFound,
#[error("An attempt at a conflicting write")]
Conflict,
#[error("Unexpected error: {}", _0)]
Internal(String),
#[error("User over quota")]
Quota,
}
impl SyncstorageDbError {
pub fn batch_not_found() -> Self {
SyncstorageDbErrorKind::BatchNotFound.into()
}
pub fn bso_not_found() -> Self {
SyncstorageDbErrorKind::BsoNotFound.into()
}
pub fn collection_not_found() -> Self {
SyncstorageDbErrorKind::CollectionNotFound.into()
}
pub fn conflict() -> Self {
SyncstorageDbErrorKind::Conflict.into()
}
pub fn internal(msg: String) -> Self {
SyncstorageDbErrorKind::Internal(msg).into()
}
pub fn quota() -> Self {
SyncstorageDbErrorKind::Quota.into()
}
}
pub trait DbErrorIntrospect {
fn is_collection_not_found(&self) -> bool;
fn is_conflict(&self) -> bool;
fn is_quota(&self) -> bool;
fn is_bso_not_found(&self) -> bool;
fn is_batch_not_found(&self) -> bool;
}
impl DbErrorIntrospect for SyncstorageDbError {
fn is_collection_not_found(&self) -> bool {
matches!(self.kind, SyncstorageDbErrorKind::CollectionNotFound)
}
fn is_conflict(&self) -> bool {
matches!(self.kind, SyncstorageDbErrorKind::Conflict)
}
fn is_quota(&self) -> bool {
matches!(self.kind, SyncstorageDbErrorKind::Quota)
}
fn is_bso_not_found(&self) -> bool {
matches!(self.kind, SyncstorageDbErrorKind::BsoNotFound)
}
fn is_batch_not_found(&self) -> bool {
matches!(self.kind, SyncstorageDbErrorKind::BatchNotFound)
}
}
impl ReportableError for SyncstorageDbError {
fn is_sentry_event(&self) -> bool {
!matches!(&self.kind, SyncstorageDbErrorKind::Conflict)
}
fn metric_label(&self) -> Option<String> {
match &self.kind {
SyncstorageDbErrorKind::Conflict => Some("storage.conflict".to_owned()),
_ => None,
}
}
fn error_backtrace(&self) -> String {
format!("{:#?}", self.backtrace)
}
}
impl From<SyncstorageDbErrorKind> for SyncstorageDbError {
fn from(kind: SyncstorageDbErrorKind) -> Self {
let status = match kind {
SyncstorageDbErrorKind::CollectionNotFound | SyncstorageDbErrorKind::BsoNotFound => {
StatusCode::NOT_FOUND
}
// Matching the Python code here (a 400 vs 404)
SyncstorageDbErrorKind::BatchNotFound => StatusCode::BAD_REQUEST,
// NOTE: the protocol specification states that we should return a
// "409 Conflict" response here, but clients currently do not
// handle these respones very well:
// * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034
// * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032
SyncstorageDbErrorKind::Conflict => StatusCode::SERVICE_UNAVAILABLE,
SyncstorageDbErrorKind::Quota => StatusCode::FORBIDDEN,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
Self {
kind,
status,
backtrace: Backtrace::new(),
}
}
}
impl_fmt_display!(SyncstorageDbError, SyncstorageDbErrorKind);

View File

@ -0,0 +1,304 @@
pub mod error;
pub mod params;
pub mod results;
pub mod util;
use std::fmt::Debug;
use async_trait::async_trait;
use futures::{future, TryFutureExt};
use lazy_static::lazy_static;
use serde::Deserialize;
use syncserver_db_common::{DbFuture, GetPoolState};
use error::DbErrorIntrospect;
use util::SyncTimestamp;
lazy_static! {
/// For efficiency, it's possible to use fixed pre-determined IDs for
/// common collection names. This is the canonical list of such
/// names. Non-standard collections will be allocated IDs starting
/// from the highest ID in this collection.
pub static ref STD_COLLS: Vec<(i32, &'static str)> = {
vec![
(1, "clients"),
(2, "crypto"),
(3, "forms"),
(4, "history"),
(5, "keys"),
(6, "meta"),
(7, "bookmarks"),
(8, "prefs"),
(9, "tabs"),
(10, "passwords"),
(11, "addons"),
(12, "addresses"),
(13, "creditcards"),
]
};
}
/// Rough guesstimate of the maximum reasonable life span of a batch
pub const BATCH_LIFETIME: i64 = 2 * 60 * 60 * 1000; // 2 hours, in milliseconds
/// The ttl to use for rows that are never supposed to expire (in seconds)
pub const DEFAULT_BSO_TTL: u32 = 2_100_000_000;
/// Non-standard collections will be allocated IDs beginning with this value
pub const FIRST_CUSTOM_COLLECTION_ID: i32 = 101;
#[async_trait]
pub trait DbPool: Sync + Send + Debug + GetPoolState {
type Error;
async fn get(&self) -> Result<Box<dyn Db<Error = Self::Error>>, Self::Error>;
fn validate_batch_id(&self, params: params::ValidateBatchId) -> Result<(), Self::Error>;
fn box_clone(&self) -> Box<dyn DbPool<Error = Self::Error>>;
}
impl<E> Clone for Box<dyn DbPool<Error = E>> {
fn clone(&self) -> Box<dyn DbPool<Error = E>> {
self.box_clone()
}
}
pub trait Db: Debug {
type Error: DbErrorIntrospect + 'static;
fn lock_for_read(&self, params: params::LockCollection) -> DbFuture<'_, (), Self::Error>;
fn lock_for_write(&self, params: params::LockCollection) -> DbFuture<'_, (), Self::Error>;
fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error>;
fn commit(&self) -> DbFuture<'_, (), Self::Error>;
fn rollback(&self) -> DbFuture<'_, (), Self::Error>;
fn get_collection_timestamps(
&self,
params: params::GetCollectionTimestamps,
) -> DbFuture<'_, results::GetCollectionTimestamps, Self::Error>;
fn get_collection_timestamp(
&self,
params: params::GetCollectionTimestamp,
) -> DbFuture<'_, results::GetCollectionTimestamp, Self::Error>;
fn get_collection_counts(
&self,
params: params::GetCollectionCounts,
) -> DbFuture<'_, results::GetCollectionCounts, Self::Error>;
fn get_collection_usage(
&self,
params: params::GetCollectionUsage,
) -> DbFuture<'_, results::GetCollectionUsage, Self::Error>;
fn get_storage_timestamp(
&self,
params: params::GetStorageTimestamp,
) -> DbFuture<'_, results::GetStorageTimestamp, Self::Error>;
fn get_storage_usage(
&self,
params: params::GetStorageUsage,
) -> DbFuture<'_, results::GetStorageUsage, Self::Error>;
fn get_quota_usage(
&self,
params: params::GetQuotaUsage,
) -> DbFuture<'_, results::GetQuotaUsage, Self::Error>;
fn delete_storage(
&self,
params: params::DeleteStorage,
) -> DbFuture<'_, results::DeleteStorage, Self::Error>;
fn delete_collection(
&self,
params: params::DeleteCollection,
) -> DbFuture<'_, results::DeleteCollection, Self::Error>;
fn delete_bsos(
&self,
params: params::DeleteBsos,
) -> DbFuture<'_, results::DeleteBsos, Self::Error>;
fn get_bsos(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsos, Self::Error>;
fn get_bso_ids(&self, params: params::GetBsos)
-> DbFuture<'_, results::GetBsoIds, Self::Error>;
fn post_bsos(&self, params: params::PostBsos) -> DbFuture<'_, results::PostBsos, Self::Error>;
fn delete_bso(
&self,
params: params::DeleteBso,
) -> DbFuture<'_, results::DeleteBso, Self::Error>;
fn get_bso(&self, params: params::GetBso)
-> DbFuture<'_, Option<results::GetBso>, Self::Error>;
fn get_bso_timestamp(
&self,
params: params::GetBsoTimestamp,
) -> DbFuture<'_, results::GetBsoTimestamp, Self::Error>;
fn put_bso(&self, params: params::PutBso) -> DbFuture<'_, results::PutBso, Self::Error>;
fn create_batch(
&self,
params: params::CreateBatch,
) -> DbFuture<'_, results::CreateBatch, Self::Error>;
fn validate_batch(
&self,
params: params::ValidateBatch,
) -> DbFuture<'_, results::ValidateBatch, Self::Error>;
fn append_to_batch(
&self,
params: params::AppendToBatch,
) -> DbFuture<'_, results::AppendToBatch, Self::Error>;
fn get_batch(
&self,
params: params::GetBatch,
) -> DbFuture<'_, Option<results::GetBatch>, Self::Error>;
fn commit_batch(
&self,
params: params::CommitBatch,
) -> DbFuture<'_, results::CommitBatch, Self::Error>;
fn box_clone(&self) -> Box<dyn Db<Error = Self::Error>>;
fn check(&self) -> DbFuture<'_, results::Check, Self::Error>;
fn get_connection_info(&self) -> results::ConnectionInfo;
/// Retrieve the timestamp for an item/collection
///
/// Modeled on the Python `get_resource_timestamp` function.
fn extract_resource(
&self,
user_id: UserIdentifier,
collection: Option<String>,
bso: Option<String>,
) -> DbFuture<'_, SyncTimestamp, Self::Error> {
// If there's no collection, we return the overall storage timestamp
let collection = match collection {
Some(collection) => collection,
None => return Box::pin(self.get_storage_timestamp(user_id)),
};
// If there's no bso, return the collection
let bso = match bso {
Some(bso) => bso,
None => {
return Box::pin(
self.get_collection_timestamp(params::GetCollectionTimestamp {
user_id,
collection,
})
.or_else(|e| {
if e.is_collection_not_found() {
future::ok(SyncTimestamp::from_seconds(0f64))
} else {
future::err(e)
}
}),
)
}
};
Box::pin(
self.get_bso_timestamp(params::GetBsoTimestamp {
user_id,
collection,
id: bso,
})
.or_else(|e| {
if e.is_collection_not_found() {
future::ok(SyncTimestamp::from_seconds(0f64))
} else {
future::err(e)
}
}),
)
}
/// Internal methods used by the db tests
fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error>;
fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error>;
fn update_collection(
&self,
params: params::UpdateCollection,
) -> DbFuture<'_, SyncTimestamp, Self::Error>;
fn timestamp(&self) -> SyncTimestamp;
fn set_timestamp(&self, timestamp: SyncTimestamp);
fn delete_batch(&self, params: params::DeleteBatch) -> DbFuture<'_, (), Self::Error>;
fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error>;
fn set_quota(&mut self, enabled: bool, limit: usize, enforce: bool);
}
impl<E> Clone for Box<dyn Db<Error = E>>
where
E: DbErrorIntrospect + 'static,
{
fn clone(&self) -> Box<dyn Db<Error = E>> {
self.box_clone()
}
}
#[derive(Debug, Deserialize, Clone, PartialEq, Eq, Copy)]
#[serde(rename_all = "lowercase")]
pub enum Sorting {
None,
Newest,
Oldest,
Index,
}
impl Default for Sorting {
fn default() -> Self {
Sorting::None
}
}
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)]
pub struct UserIdentifier {
/// For MySQL database backends as the primary key
pub legacy_id: u64,
/// For NoSQL database backends that require randomly distributed primary keys
pub fxa_uid: String,
pub fxa_kid: String,
}
impl UserIdentifier {
/// Create a new legacy id user identifier
pub fn new_legacy(user_id: u64) -> Self {
Self {
legacy_id: user_id,
..Default::default()
}
}
}
impl From<u32> for UserIdentifier {
fn from(val: u32) -> Self {
Self {
legacy_id: val.into(),
..Default::default()
}
}
}

View File

@ -12,7 +12,7 @@ use diesel::{
};
use serde::{ser, Deserialize, Deserializer, Serialize, Serializer};
use super::error::{DbError, DbErrorKind};
use super::error::SyncstorageDbError;
/// Get the time since the UNIX epoch in milliseconds
fn ms_since_epoch() -> i64 {
@ -53,15 +53,17 @@ impl SyncTimestamp {
}
/// Create a `SyncTimestamp` from an i64
pub fn from_i64(val: i64) -> Result<Self, DbError> {
pub fn from_i64(val: i64) -> Result<Self, SyncstorageDbError> {
if val < 0 {
return Err(DbErrorKind::Integrity("Invalid modified i64 (< 0)".to_owned()).into());
return Err(SyncstorageDbError::internal(
"Invalid modified i64 (< 0)".to_owned(),
));
}
Ok(SyncTimestamp::from_milliseconds(val as u64))
}
/// Exposed separately for db tests
pub fn _from_i64(val: i64) -> Result<Self, DbError> {
pub fn _from_i64(val: i64) -> Result<Self, SyncstorageDbError> {
SyncTimestamp::from_i64(val)
}
@ -78,17 +80,19 @@ impl SyncTimestamp {
/// Create a `SyncTimestamp` from an RFC 3339 and ISO 8601 date and time
/// string such as 1996-12-19T16:39:57-08:00
pub fn from_rfc3339(val: &str) -> Result<Self, DbError> {
pub fn from_rfc3339(val: &str) -> Result<Self, SyncstorageDbError> {
let dt = DateTime::parse_from_rfc3339(val)
.map_err(|e| DbErrorKind::Integrity(format!("Invalid TIMESTAMP {}", e)))?;
.map_err(|e| SyncstorageDbError::internal(format!("Invalid TIMESTAMP {}", e)))?;
Self::from_datetime(dt)
}
/// Create a `SyncTimestamp` from a chrono DateTime
fn from_datetime(val: DateTime<FixedOffset>) -> Result<Self, DbError> {
fn from_datetime(val: DateTime<FixedOffset>) -> Result<Self, SyncstorageDbError> {
let millis = val.timestamp_millis();
if millis < 0 {
return Err(DbErrorKind::Integrity("Invalid DateTime (< 0)".to_owned()).into());
return Err(SyncstorageDbError::internal(
"Invalid DateTime (< 0)".to_owned(),
));
}
Ok(SyncTimestamp::from_milliseconds(millis as u64))
}
@ -105,7 +109,7 @@ impl SyncTimestamp {
/// Return the timestamp as an RFC 3339 and ISO 8601 date and time string such as
/// 1996-12-19T16:39:57-08:00
pub fn as_rfc3339(self) -> Result<String, DbError> {
pub fn as_rfc3339(self) -> Result<String, SyncstorageDbError> {
to_rfc3339(self.as_i64())
}
}
@ -167,10 +171,10 @@ where
/// Render a timestamp (as an i64 milliseconds since epoch) as an RFC 3339 and ISO 8601
/// date and time string such as 1996-12-19T16:39:57-08:00
pub fn to_rfc3339(val: i64) -> Result<String, DbError> {
pub fn to_rfc3339(val: i64) -> Result<String, SyncstorageDbError> {
let secs = val / 1000;
let nsecs = ((val % 1000) * 1_000_000).try_into().map_err(|e| {
DbError::internal(&format!("Invalid timestamp (nanoseconds) {}: {}", val, e))
SyncstorageDbError::internal(format!("Invalid timestamp (nanoseconds) {}: {}", val, e))
})?;
Ok(Utc
.timestamp(secs, nsecs)

31
syncstorage-db/Cargo.toml Normal file
View File

@ -0,0 +1,31 @@
[package]
name = "syncstorage-db"
version = "0.12.3"
edition = "2021"
[dependencies]
async-trait = "0.1.40"
cadence = "0.26"
env_logger = "0.9"
futures = { version = "0.3", features = ["compat"] }
hostname = "0.3.1"
lazy_static = "1.4.0"
log = { version = "0.4", features = [
"max_level_debug",
"release_max_level_info",
] }
rand = "0.8"
slog-scope = "4.3"
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
syncserver-settings = { path = "../syncserver-settings" }
syncstorage-db-common = { path = "../syncstorage-db-common" }
syncstorage-mysql = { path = "../syncstorage-mysql", optional = true }
syncstorage-settings = { path = "../syncstorage-settings" }
syncstorage-spanner = { path = "../syncstorage-spanner", optional = true }
# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.)
tokio = { version = "0.2.4", features = ["macros", "sync"] }
[features]
mysql = ['syncstorage-mysql']
spanner = ['syncstorage-spanner']

77
syncstorage-db/src/lib.rs Normal file
View File

@ -0,0 +1,77 @@
//! Generic db abstration.
#[cfg(test)]
#[macro_use]
extern crate slog_scope;
pub mod mock;
#[cfg(test)]
mod tests;
use std::time::Duration;
use cadence::{Gauged, StatsdClient};
use tokio::{self, time};
#[cfg(feature = "mysql")]
pub type DbPoolImpl = syncstorage_mysql::MysqlDbPool;
#[cfg(feature = "mysql")]
pub use syncstorage_mysql::DbError;
#[cfg(feature = "mysql")]
pub type DbImpl = syncstorage_mysql::MysqlDb;
#[cfg(feature = "spanner")]
pub type DbPoolImpl = syncstorage_spanner::SpannerDbPool;
#[cfg(feature = "spanner")]
pub use syncstorage_spanner::DbError;
#[cfg(feature = "spanner")]
pub type DbImpl = syncstorage_spanner::SpannerDb;
pub use syncserver_db_common::{GetPoolState, PoolState};
pub use syncstorage_db_common::error::DbErrorIntrospect;
pub use syncstorage_db_common::{
params, results,
util::{to_rfc3339, SyncTimestamp},
Db, DbPool, Sorting, UserIdentifier,
};
#[cfg(all(feature = "mysql", feature = "spanner"))]
compile_error!("only one of the \"mysql\" and \"spanner\" features can be enabled at a time");
#[cfg(not(any(feature = "mysql", feature = "spanner")))]
compile_error!("exactly one of the \"mysql\" and \"spanner\" features must be enabled");
/// Emit DbPool metrics periodically
pub fn spawn_pool_periodic_reporter<T: GetPoolState + Send + 'static>(
interval: Duration,
metrics: StatsdClient,
pool: T,
) -> Result<(), DbError> {
let hostname = hostname::get()
.expect("Couldn't get hostname")
.into_string()
.expect("Couldn't get hostname");
tokio::spawn(async move {
loop {
let PoolState {
connections,
idle_connections,
} = pool.state();
metrics
.gauge_with_tags(
"storage.pool.connections.active",
(connections - idle_connections) as u64,
)
.with_tag("hostname", &hostname)
.send();
metrics
.gauge_with_tags("storage.pool.connections.idle", idle_connections as u64)
.with_tag("hostname", &hostname)
.send();
time::delay_for(interval).await;
}
});
Ok(())
}

View File

@ -2,10 +2,12 @@
#![allow(clippy::new_without_default)]
use async_trait::async_trait;
use futures::future;
use syncserver_db_common::{
error::DbError, params, results, util::SyncTimestamp, Db, DbFuture, DbPool, GetPoolState,
PoolState,
};
use syncserver_db_common::{GetPoolState, PoolState};
use syncstorage_db_common::{params, results, util::SyncTimestamp, Db, DbPool};
use crate::DbError;
type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>;
#[derive(Clone, Debug)]
pub struct MockDbPool;
@ -18,15 +20,17 @@ impl MockDbPool {
#[async_trait]
impl DbPool for MockDbPool {
async fn get<'a>(&'a self) -> Result<Box<dyn Db<'a>>, DbError> {
Ok(Box::new(MockDb::new()) as Box<dyn Db<'a>>)
type Error = DbError;
async fn get(&self) -> Result<Box<dyn Db<Error = DbError>>, Self::Error> {
Ok(Box::new(MockDb::new()))
}
fn validate_batch_id(&self, _: params::ValidateBatchId) -> Result<(), DbError> {
Ok(())
}
fn box_clone(&self) -> Box<dyn DbPool> {
fn box_clone(&self) -> Box<dyn DbPool<Error = DbError>> {
Box::new(self.clone())
}
}
@ -58,7 +62,9 @@ macro_rules! mock_db_method {
};
}
impl<'a> Db<'a> for MockDb {
impl Db for MockDb {
type Error = DbError;
fn commit(&self) -> DbFuture<'_, ()> {
Box::pin(future::ok(()))
}
@ -71,7 +77,7 @@ impl<'a> Db<'a> for MockDb {
Box::pin(future::ok(()))
}
fn box_clone(&self) -> Box<dyn Db<'a>> {
fn box_clone(&self) -> Box<dyn Db<Error = DbError>> {
Box::new(self.clone())
}

View File

@ -1,8 +1,11 @@
use log::debug;
use syncserver_db_common::{params, results, util::SyncTimestamp, BATCH_LIFETIME};
use syncserver_settings::Settings;
use syncstorage_db_common::{
error::DbErrorIntrospect, params, results, util::SyncTimestamp, BATCH_LIFETIME,
};
use super::support::{db_pool, gbso, hid, pbso, postbso, test_db, Result};
use super::support::{db_pool, gbso, hid, pbso, postbso, test_db};
use crate::DbError;
fn cb(user_id: u32, coll: &str, bsos: Vec<params::PostCollectionBso>) -> params::CreateBatch {
params::CreateBatch {
@ -43,9 +46,9 @@ fn gb(user_id: u32, coll: &str, id: String) -> params::GetBatch {
}
#[tokio::test]
async fn create_delete() -> Result<()> {
async fn create_delete() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 1;
let coll = "clients";
@ -66,9 +69,9 @@ async fn create_delete() -> Result<()> {
}
#[tokio::test]
async fn expiry() -> Result<()> {
async fn expiry() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 1;
let coll = "clients";
@ -90,9 +93,9 @@ async fn expiry() -> Result<()> {
}
#[tokio::test]
async fn update() -> Result<()> {
async fn update() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 1;
let coll = "clients";
@ -114,9 +117,9 @@ async fn update() -> Result<()> {
}
#[tokio::test]
async fn append_commit() -> Result<()> {
async fn append_commit() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 1;
let coll = "clients";
@ -157,7 +160,7 @@ async fn append_commit() -> Result<()> {
}
#[tokio::test]
async fn quota_test_create_batch() -> Result<()> {
async fn quota_test_create_batch() -> Result<(), DbError> {
let mut settings = Settings::test_settings().syncstorage;
if !settings.enable_quota {
@ -169,7 +172,7 @@ async fn quota_test_create_batch() -> Result<()> {
settings.limits.max_quota_limit = limit;
let pool = db_pool(Some(settings.clone())).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 1;
let coll = "clients";
@ -199,7 +202,7 @@ async fn quota_test_create_batch() -> Result<()> {
}
#[tokio::test]
async fn quota_test_append_batch() -> Result<()> {
async fn quota_test_append_batch() -> Result<(), DbError> {
let mut settings = Settings::test_settings().syncstorage;
if !settings.enable_quota {
@ -211,7 +214,7 @@ async fn quota_test_append_batch() -> Result<()> {
settings.limits.max_quota_limit = limit;
let pool = db_pool(Some(settings.clone())).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 1;
let coll = "clients";
@ -244,10 +247,10 @@ async fn quota_test_append_batch() -> Result<()> {
}
#[tokio::test]
async fn test_append_async_w_null() -> Result<()> {
async fn test_append_async_w_null() -> Result<(), DbError> {
let settings = Settings::test_settings().syncstorage;
let pool = db_pool(Some(settings)).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
// Remember: TTL is seconds to live, not an expiry date
let ttl_0 = 86_400;
let ttl_1 = 86_400;

View File

@ -3,10 +3,13 @@ use std::collections::HashMap;
use lazy_static::lazy_static;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use syncserver_db_common::{params, util::SyncTimestamp, Sorting, UserIdentifier, DEFAULT_BSO_TTL};
use syncserver_settings::Settings;
use syncstorage_db_common::{
error::DbErrorIntrospect, params, util::SyncTimestamp, Sorting, UserIdentifier, DEFAULT_BSO_TTL,
};
use super::support::{db_pool, dbso, dbsos, gbso, gbsos, hid, pbso, postbso, test_db, Result};
use super::support::{db_pool, dbso, dbsos, gbso, gbsos, hid, pbso, postbso, test_db};
use crate::DbError;
// distant future (year 2099) timestamp for tests
const MAX_TIMESTAMP: u64 = 4_070_937_600_000;
@ -16,9 +19,9 @@ lazy_static! {
}
#[tokio::test]
async fn bso_successfully_updates_single_values() -> Result<()> {
async fn bso_successfully_updates_single_values() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -57,9 +60,9 @@ async fn bso_successfully_updates_single_values() -> Result<()> {
}
#[tokio::test]
async fn bso_modified_not_changed_on_ttl_touch() -> Result<()> {
async fn bso_modified_not_changed_on_ttl_touch() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -80,9 +83,9 @@ async fn bso_modified_not_changed_on_ttl_touch() -> Result<()> {
}
#[tokio::test]
async fn put_bso_updates() -> Result<()> {
async fn put_bso_updates() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -103,9 +106,9 @@ async fn put_bso_updates() -> Result<()> {
}
#[tokio::test]
async fn get_bsos_limit_offset() -> Result<()> {
async fn get_bsos_limit_offset() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -224,9 +227,9 @@ async fn get_bsos_limit_offset() -> Result<()> {
}
#[tokio::test]
async fn get_bsos_newer() -> Result<()> {
async fn get_bsos_newer() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -309,9 +312,9 @@ async fn get_bsos_newer() -> Result<()> {
}
#[tokio::test]
async fn get_bsos_sort() -> Result<()> {
async fn get_bsos_sort() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -382,9 +385,9 @@ async fn get_bsos_sort() -> Result<()> {
}
#[tokio::test]
async fn delete_bsos_in_correct_collection() -> Result<()> {
async fn delete_bsos_in_correct_collection() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let payload = "data";
@ -399,9 +402,9 @@ async fn delete_bsos_in_correct_collection() -> Result<()> {
}
#[tokio::test]
async fn get_storage_timestamp() -> Result<()> {
async fn get_storage_timestamp() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
db.create_collection("NewCollection1".to_owned()).await?;
@ -422,17 +425,17 @@ async fn get_storage_timestamp() -> Result<()> {
}
#[tokio::test]
async fn get_collection_id() -> Result<()> {
async fn get_collection_id() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
db.get_collection_id("bookmarks".to_owned()).await?;
Ok(())
}
#[tokio::test]
async fn create_collection() -> Result<()> {
async fn create_collection() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let name = "NewCollection";
let cid = db.create_collection(name.to_owned()).await?;
@ -443,9 +446,9 @@ async fn create_collection() -> Result<()> {
}
#[tokio::test]
async fn update_collection() -> Result<()> {
async fn update_collection() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let collection = "test".to_owned();
let cid = db.create_collection(collection.clone()).await?;
@ -459,9 +462,9 @@ async fn update_collection() -> Result<()> {
}
#[tokio::test]
async fn delete_collection() -> Result<()> {
async fn delete_collection() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "NewCollection";
@ -495,9 +498,9 @@ async fn delete_collection() -> Result<()> {
}
#[tokio::test]
async fn delete_collection_tombstone() -> Result<()> {
async fn delete_collection_tombstone() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "test";
@ -555,9 +558,9 @@ async fn delete_collection_tombstone() -> Result<()> {
}
#[tokio::test]
async fn get_collection_timestamps() -> Result<()> {
async fn get_collection_timestamps() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "test".to_owned();
@ -583,9 +586,9 @@ async fn get_collection_timestamps() -> Result<()> {
}
#[tokio::test]
async fn get_collection_timestamps_tombstone() -> Result<()> {
async fn get_collection_timestamps_tombstone() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "test".to_owned();
@ -608,9 +611,9 @@ async fn get_collection_timestamps_tombstone() -> Result<()> {
}
#[tokio::test]
async fn get_collection_usage() -> Result<()> {
async fn get_collection_usage() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 5;
let mut expected = HashMap::new();
@ -660,7 +663,7 @@ async fn get_collection_usage() -> Result<()> {
}
#[tokio::test]
async fn test_quota() -> Result<()> {
async fn test_quota() -> Result<(), DbError> {
let settings = Settings::test_settings();
if !settings.syncstorage.enable_quota {
@ -669,7 +672,7 @@ async fn test_quota() -> Result<()> {
}
let pool = db_pool(None).await?;
let mut db = test_db(pool.as_ref()).await?;
let mut db = test_db(pool).await?;
let uid = 5;
let coll = "bookmarks";
@ -702,9 +705,9 @@ async fn test_quota() -> Result<()> {
}
#[tokio::test]
async fn get_collection_counts() -> Result<()> {
async fn get_collection_counts() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 4;
let mut expected = HashMap::new();
@ -725,9 +728,9 @@ async fn get_collection_counts() -> Result<()> {
}
#[tokio::test]
async fn put_bso() -> Result<()> {
async fn put_bso() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "NewCollection";
@ -765,9 +768,9 @@ async fn put_bso() -> Result<()> {
}
#[tokio::test]
async fn post_bsos() -> Result<()> {
async fn post_bsos() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "NewCollection";
@ -836,9 +839,9 @@ async fn post_bsos() -> Result<()> {
}
#[tokio::test]
async fn get_bso() -> Result<()> {
async fn get_bso() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -857,9 +860,9 @@ async fn get_bso() -> Result<()> {
}
#[tokio::test]
async fn get_bsos() -> Result<()> {
async fn get_bsos() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = 2;
let coll = "clients";
@ -928,9 +931,9 @@ async fn get_bsos() -> Result<()> {
}
#[tokio::test]
async fn get_bso_timestamp() -> Result<()> {
async fn get_bso_timestamp() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -949,9 +952,9 @@ async fn get_bso_timestamp() -> Result<()> {
}
#[tokio::test]
async fn delete_bso() -> Result<()> {
async fn delete_bso() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -965,9 +968,9 @@ async fn delete_bso() -> Result<()> {
}
#[tokio::test]
async fn delete_bsos() -> Result<()> {
async fn delete_bsos() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -1000,31 +1003,31 @@ async fn delete_bsos() -> Result<()> {
/*
#[tokio::test]
async fn usage_stats() -> Result<()> {
async fn usage_stats() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
Ok(())
}
#[tokio::test]
async fn purge_expired() -> Result<()> {
async fn purge_expired() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
Ok(())
}
#[tokio::test]
async fn optimize() -> Result<()> {
async fn optimize() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
Ok(())
}
*/
#[tokio::test]
async fn delete_storage() -> Result<()> {
async fn delete_storage() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let bid = "test";
@ -1048,9 +1051,9 @@ async fn delete_storage() -> Result<()> {
}
#[tokio::test]
async fn collection_cache() -> Result<()> {
async fn collection_cache() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "test";
@ -1069,9 +1072,9 @@ async fn collection_cache() -> Result<()> {
}
#[tokio::test]
async fn lock_for_read() -> Result<()> {
async fn lock_for_read() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -1087,9 +1090,9 @@ async fn lock_for_read() -> Result<()> {
}
#[tokio::test]
async fn lock_for_write() -> Result<()> {
async fn lock_for_write() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
let uid = *UID;
let coll = "clients";
@ -1105,9 +1108,9 @@ async fn lock_for_write() -> Result<()> {
}
#[tokio::test]
async fn heartbeat() -> Result<()> {
async fn heartbeat() -> Result<(), DbError> {
let pool = db_pool(None).await?;
let db = test_db(pool.as_ref()).await?;
let db = test_db(pool).await?;
assert!(db.check().await?);
Ok(())

View File

@ -1,17 +1,14 @@
use std::{str::FromStr, sync::Arc};
use syncserver_db_common::{params, util::SyncTimestamp, Db, Sorting, UserIdentifier};
use syncserver_common::{BlockingThreadpool, Metrics};
use syncserver_settings::Settings as SyncserverSettings;
use syncstorage_db_common::{params, util::SyncTimestamp, Db, DbPool, Sorting, UserIdentifier};
use syncstorage_settings::Settings as SyncstorageSettings;
use crate::db::DbPool;
use crate::error::ApiResult;
use crate::{db::pool_from_settings, db::BlockingThreadpool, error::ApiError, server::metrics};
pub type Result<T> = std::result::Result<T, ApiError>;
use crate::{DbError, DbPoolImpl};
#[cfg(test)]
pub async fn db_pool(settings: Option<SyncstorageSettings>) -> Result<Box<dyn DbPool>> {
pub async fn db_pool(settings: Option<SyncstorageSettings>) -> Result<DbPoolImpl, DbError> {
let _ = env_logger::try_init();
// The default for SYNC_SYNCSTORAGE__DATABASE_USE_TEST_TRANSACTIONS is
// false, but we want the mysql default to be true, so let's check
@ -25,13 +22,12 @@ pub async fn db_pool(settings: Option<SyncstorageSettings>) -> Result<Box<dyn Db
let mut settings = settings.unwrap_or_else(|| SyncserverSettings::test_settings().syncstorage);
settings.database_use_test_transactions = use_test_transactions;
let metrics = metrics::Metrics::noop();
let pool =
pool_from_settings(&settings, &metrics, Arc::new(BlockingThreadpool::default())).await?;
let metrics = Metrics::noop();
let pool = DbPoolImpl::new(&settings, &metrics, Arc::new(BlockingThreadpool::default()))?;
Ok(pool)
}
pub async fn test_db(pool: &dyn DbPool) -> ApiResult<Box<dyn Db<'_>>> {
pub async fn test_db(pool: DbPoolImpl) -> Result<Box<dyn Db<Error = DbError>>, DbError> {
let db = pool.get().await?;
// Spanner won't have a timestamp until lock_for_xxx are called: fill one
// in for it

View File

@ -0,0 +1,25 @@
[package]
name = "syncstorage-mysql"
version = "0.12.3"
edition = "2021"
[dependencies]
async-trait = "0.1.40"
backtrace = "0.3.61"
base64 = "0.13"
diesel = { version = "1.4", features = ["mysql", "r2d2"] }
diesel_logger = "0.1.1"
diesel_migrations = { version = "1.4.0", features = ["mysql"] }
futures = { version = "0.3", features = ["compat"] }
http = "0.2.5"
slog-scope = "4.3"
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
syncstorage-db-common = { path = "../syncstorage-db-common" }
syncstorage-settings = { path = "../syncstorage-settings" }
thiserror = "1.0.26"
url = "2.1"
[dev-dependencies]
syncserver-settings = { path = "../syncserver-settings" }
env_logger = "0.9"

View File

@ -9,19 +9,18 @@ use diesel::{
sql_types::{BigInt, Integer},
ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl,
};
use syncserver_db_common::{
error::{DbError, DbErrorKind},
params, results, UserIdentifier, BATCH_LIFETIME,
};
use syncstorage_db_common::{params, results, UserIdentifier, BATCH_LIFETIME};
use super::{
models::{MysqlDb, Result},
error::DbError,
models::MysqlDb,
schema::{batch_upload_items, batch_uploads},
DbResult,
};
const MAXTTL: i32 = 2_100_000_000;
pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result<results::CreateBatch> {
pub fn create(db: &MysqlDb, params: params::CreateBatch) -> DbResult<results::CreateBatch> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = db.get_collection_id(&params.collection)?;
// Careful, there's some weirdness here!
@ -47,7 +46,7 @@ pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result<results::Crea
.map_err(|e| -> DbError {
match e {
// The user tried to create two batches with the same timestamp
DieselError::DatabaseError(UniqueViolation, _) => DbErrorKind::Conflict.into(),
DieselError::DatabaseError(UniqueViolation, _) => DbError::conflict(),
_ => e.into(),
}
})?;
@ -59,7 +58,7 @@ pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result<results::Crea
})
}
pub fn validate(db: &MysqlDb, params: params::ValidateBatch) -> Result<bool> {
pub fn validate(db: &MysqlDb, params: params::ValidateBatch) -> DbResult<bool> {
let batch_id = decode_id(&params.id)?;
// Avoid hitting the db for batches that are obviously too old. Recall
// that the batchid is a millisecond timestamp.
@ -79,7 +78,7 @@ pub fn validate(db: &MysqlDb, params: params::ValidateBatch) -> Result<bool> {
Ok(exists.is_some())
}
pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> {
pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> DbResult<()> {
let exists = validate(
db,
params::ValidateBatch {
@ -90,7 +89,7 @@ pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> {
)?;
if !exists {
Err(DbErrorKind::BatchNotFound)?
return Err(DbError::batch_not_found());
}
let batch_id = decode_id(&params.batch.id)?;
@ -99,7 +98,7 @@ pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> {
Ok(())
}
pub fn get(db: &MysqlDb, params: params::GetBatch) -> Result<Option<results::GetBatch>> {
pub fn get(db: &MysqlDb, params: params::GetBatch) -> DbResult<Option<results::GetBatch>> {
let is_valid = validate(
db,
params::ValidateBatch {
@ -116,7 +115,7 @@ pub fn get(db: &MysqlDb, params: params::GetBatch) -> Result<Option<results::Get
Ok(batch)
}
pub fn delete(db: &MysqlDb, params: params::DeleteBatch) -> Result<()> {
pub fn delete(db: &MysqlDb, params: params::DeleteBatch) -> DbResult<()> {
let batch_id = decode_id(&params.id)?;
let user_id = params.user_id.legacy_id as i64;
let collection_id = db.get_collection_id(&params.collection)?;
@ -133,7 +132,7 @@ pub fn delete(db: &MysqlDb, params: params::DeleteBatch) -> Result<()> {
}
/// Commits a batch to the bsos table, deleting the batch when succesful
pub fn commit(db: &MysqlDb, params: params::CommitBatch) -> Result<results::CommitBatch> {
pub fn commit(db: &MysqlDb, params: params::CommitBatch) -> DbResult<results::CommitBatch> {
let batch_id = decode_id(&params.batch.id)?;
let user_id = params.user_id.legacy_id as i64;
let collection_id = db.get_collection_id(&params.collection)?;
@ -169,7 +168,7 @@ pub fn do_append(
user_id: UserIdentifier,
_collection_id: i32,
bsos: Vec<params::PostCollectionBso>,
) -> Result<()> {
) -> DbResult<()> {
fn exist_idx(user_id: u64, batch_id: i64, bso_id: &str) -> String {
// Construct something that matches the key for batch_upload_items
format!(
@ -253,7 +252,7 @@ pub fn do_append(
Ok(())
}
pub fn validate_batch_id(id: &str) -> Result<()> {
pub fn validate_batch_id(id: &str) -> DbResult<()> {
decode_id(id).map(|_| ())
}
@ -261,18 +260,17 @@ fn encode_id(id: i64) -> String {
base64::encode(id.to_string())
}
fn decode_id(id: &str) -> Result<i64> {
fn decode_id(id: &str) -> DbResult<i64> {
let bytes = base64::decode(id).unwrap_or_else(|_| id.as_bytes().to_vec());
let decoded = std::str::from_utf8(&bytes).unwrap_or(id);
decoded
.parse::<i64>()
.map_err(|e| DbError::internal(&format!("Invalid batch_id: {}", e)))
.map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e)))
}
#[macro_export]
macro_rules! batch_db_method {
($name:ident, $batch_name:ident, $type:ident) => {
pub fn $name(&self, params: params::$type) -> Result<results::$type> {
pub fn $name(&self, params: params::$type) -> DbResult<results::$type> {
batch::$batch_name(self, params)
}
};

View File

@ -0,0 +1,144 @@
use std::fmt;
use backtrace::Backtrace;
use http::StatusCode;
use syncserver_common::{from_error, impl_fmt_display, InternalError, ReportableError};
use syncserver_db_common::error::MysqlError;
use syncstorage_db_common::error::{DbErrorIntrospect, SyncstorageDbError};
use thiserror::Error;
/// An error type that represents any MySQL-related errors that may occur while processing a
/// syncstorage request. These errors may be application-specific or lower-level errors that arise
/// from the database backend.
#[derive(Debug)]
pub struct DbError {
kind: DbErrorKind,
pub status: StatusCode,
pub backtrace: Box<Backtrace>,
}
impl DbError {
pub fn batch_not_found() -> Self {
DbErrorKind::Common(SyncstorageDbError::batch_not_found()).into()
}
pub fn bso_not_found() -> Self {
DbErrorKind::Common(SyncstorageDbError::bso_not_found()).into()
}
pub fn collection_not_found() -> Self {
DbErrorKind::Common(SyncstorageDbError::collection_not_found()).into()
}
pub fn conflict() -> Self {
DbErrorKind::Common(SyncstorageDbError::conflict()).into()
}
pub fn internal(msg: String) -> Self {
DbErrorKind::Common(SyncstorageDbError::internal(msg)).into()
}
pub fn quota() -> Self {
DbErrorKind::Common(SyncstorageDbError::quota()).into()
}
}
#[derive(Debug, Error)]
enum DbErrorKind {
#[error("{}", _0)]
Common(SyncstorageDbError),
#[error("{}", _0)]
Mysql(MysqlError),
}
impl From<DbErrorKind> for DbError {
fn from(kind: DbErrorKind) -> Self {
match &kind {
DbErrorKind::Common(dbe) => Self {
status: dbe.status,
backtrace: Box::new(dbe.backtrace.clone()),
kind,
},
_ => Self {
kind,
status: StatusCode::INTERNAL_SERVER_ERROR,
backtrace: Box::new(Backtrace::new()),
},
}
}
}
impl DbErrorIntrospect for DbError {
fn is_batch_not_found(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_batch_not_found())
}
fn is_bso_not_found(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_bso_not_found())
}
fn is_collection_not_found(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_collection_not_found())
}
fn is_conflict(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_conflict())
}
fn is_quota(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_quota())
}
}
impl ReportableError for DbError {
fn is_sentry_event(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_sentry_event())
}
fn metric_label(&self) -> Option<String> {
if let DbErrorKind::Common(e) = &self.kind {
e.metric_label()
} else {
None
}
}
fn error_backtrace(&self) -> String {
format!("{:#?}", self.backtrace)
}
}
impl InternalError for DbError {
fn internal_error(message: String) -> Self {
DbErrorKind::Common(SyncstorageDbError::internal(message)).into()
}
}
impl_fmt_display!(DbError, DbErrorKind);
from_error!(SyncstorageDbError, DbError, DbErrorKind::Common);
from_error!(
diesel::result::Error,
DbError,
|error: diesel::result::Error| DbError::from(DbErrorKind::Mysql(MysqlError::from(error)))
);
from_error!(
diesel::result::ConnectionError,
DbError,
|error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Mysql(MysqlError::from(
error
)))
);
from_error!(
diesel::r2d2::PoolError,
DbError,
|error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Mysql(MysqlError::from(error)))
);
from_error!(
diesel_migrations::RunMigrationsError,
DbError,
|error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Mysql(
MysqlError::from(error)
))
);

View File

@ -0,0 +1,22 @@
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
#[macro_use]
extern crate slog_scope;
#[macro_use]
mod batch;
mod diesel_ext;
mod error;
mod models;
mod pool;
mod schema;
#[cfg(test)]
mod test;
pub use error::DbError;
pub use models::MysqlDb;
pub use pool::MysqlDbPool;
pub(crate) type DbResult<T> = Result<T, error::DbError>;

View File

@ -13,44 +13,43 @@ use diesel::{
sql_types::{BigInt, Integer, Nullable, Text},
Connection, ExpressionMethods, GroupByDsl, OptionalExtension, QueryDsl, RunQueryDsl,
};
#[cfg(test)]
#[cfg(debug_assertions)]
use diesel_logger::LoggingConnection;
use syncserver_db_common::{
error::{DbError, DbErrorKind},
params, results,
util::SyncTimestamp,
Db, DbFuture, Sorting, UserIdentifier, DEFAULT_BSO_TTL,
use syncserver_common::{BlockingThreadpool, Metrics};
use syncserver_db_common::{sync_db_method, DbFuture};
use syncstorage_db_common::{
error::DbErrorIntrospect, params, results, util::SyncTimestamp, Db, Sorting, UserIdentifier,
DEFAULT_BSO_TTL,
};
use syncstorage_settings::{Quota, DEFAULT_MAX_TOTAL_RECORDS};
use super::{
batch,
diesel_ext::LockInShareModeDsl,
error::DbError,
pool::CollectionCache,
schema::{bso, collections, user_collections},
DbResult,
};
use crate::db::BlockingThreadpool;
use crate::server::metrics::Metrics;
pub type Result<T> = std::result::Result<T, DbError>;
type Conn = PooledConnection<ConnectionManager<MysqlConnection>>;
// this is the max number of records we will return.
pub static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS;
static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS;
pub const TOMBSTONE: i32 = 0;
const TOMBSTONE: i32 = 0;
/// SQL Variable remapping
/// These names are the legacy values mapped to the new names.
pub const COLLECTION_ID: &str = "collection";
pub const USER_ID: &str = "userid";
pub const MODIFIED: &str = "modified";
pub const EXPIRY: &str = "ttl";
pub const LAST_MODIFIED: &str = "last_modified";
pub const COUNT: &str = "count";
pub const TOTAL_BYTES: &str = "total_bytes";
const COLLECTION_ID: &str = "collection";
const USER_ID: &str = "userid";
const MODIFIED: &str = "modified";
const EXPIRY: &str = "ttl";
const LAST_MODIFIED: &str = "last_modified";
const COUNT: &str = "count";
const TOTAL_BYTES: &str = "total_bytes";
#[derive(Debug)]
pub enum CollectionLock {
enum CollectionLock {
Read,
Write,
}
@ -71,8 +70,8 @@ struct MysqlDbSession {
#[derive(Clone, Debug)]
pub struct MysqlDb {
/// Synchronous Diesel calls are executed in tokio::task::spawn_blocking to satisfy
/// the Db trait's asynchronous interface.
/// Synchronous Diesel calls are executed in web::block to satisfy the Db trait's asynchronous
/// interface.
///
/// Arc<MysqlDbInner> provides a Clone impl utilized for safely moving to
/// the thread pool but does not provide Send as the underlying db
@ -94,9 +93,9 @@ pub struct MysqlDb {
unsafe impl Send for MysqlDb {}
pub struct MysqlDbInner {
#[cfg(not(test))]
#[cfg(not(debug_assertions))]
pub(super) conn: Conn,
#[cfg(test)]
#[cfg(debug_assertions)]
pub(super) conn: LoggingConnection<Conn>, // display SQL when RUST_LOG="diesel_logger=trace"
session: RefCell<MysqlDbSession>,
@ -117,7 +116,7 @@ impl Deref for MysqlDb {
}
impl MysqlDb {
pub fn new(
pub(super) fn new(
conn: Conn,
coll_cache: Arc<CollectionCache>,
metrics: &Metrics,
@ -125,9 +124,9 @@ impl MysqlDb {
blocking_threadpool: Arc<BlockingThreadpool>,
) -> Self {
let inner = MysqlDbInner {
#[cfg(not(test))]
#[cfg(not(debug_assertions))]
conn,
#[cfg(test)]
#[cfg(debug_assertions)]
conn: LoggingConnection::new(conn),
session: RefCell::new(Default::default()),
};
@ -149,7 +148,7 @@ impl MysqlDb {
/// In theory it would be possible to use serializable transactions rather
/// than explicit locking, but our ops team have expressed concerns about
/// the efficiency of that approach at scale.
pub fn lock_for_read_sync(&self, params: params::LockCollection) -> Result<()> {
fn lock_for_read_sync(&self, params: params::LockCollection) -> DbResult<()> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_collection_id(&params.collection).or_else(|e| {
if e.is_collection_not_found() {
@ -196,7 +195,7 @@ impl MysqlDb {
Ok(())
}
pub fn lock_for_write_sync(&self, params: params::LockCollection) -> Result<()> {
fn lock_for_write_sync(&self, params: params::LockCollection) -> DbResult<()> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_or_create_collection_id(&params.collection)?;
if let Some(CollectionLock::Read) = self
@ -205,7 +204,9 @@ impl MysqlDb {
.coll_locks
.get(&(user_id as u32, collection_id))
{
Err(DbError::internal("Can't escalate read-lock to write-lock"))?
return Err(DbError::internal(
"Can't escalate read-lock to write-lock".to_owned(),
));
}
// Lock the db
@ -221,7 +222,7 @@ impl MysqlDb {
let modified = SyncTimestamp::from_i64(modified)?;
// Forbid the write if it would not properly incr the timestamp
if modified >= self.timestamp() {
Err(DbErrorKind::Conflict)?
return Err(DbError::conflict());
}
self.session
.borrow_mut()
@ -235,7 +236,7 @@ impl MysqlDb {
Ok(())
}
pub(super) fn begin(&self, for_write: bool) -> Result<()> {
pub(super) fn begin(&self, for_write: bool) -> DbResult<()> {
self.conn
.transaction_manager()
.begin_transaction(&self.conn)?;
@ -246,11 +247,11 @@ impl MysqlDb {
Ok(())
}
pub async fn begin_async(&self, for_write: bool) -> Result<()> {
async fn begin_async(&self, for_write: bool) -> DbResult<()> {
self.begin(for_write)
}
pub fn commit_sync(&self) -> Result<()> {
fn commit_sync(&self) -> DbResult<()> {
if self.session.borrow().in_transaction {
self.conn
.transaction_manager()
@ -259,7 +260,7 @@ impl MysqlDb {
Ok(())
}
pub fn rollback_sync(&self) -> Result<()> {
fn rollback_sync(&self) -> DbResult<()> {
if self.session.borrow().in_transaction {
self.conn
.transaction_manager()
@ -268,7 +269,7 @@ impl MysqlDb {
Ok(())
}
fn erect_tombstone(&self, user_id: i32) -> Result<()> {
fn erect_tombstone(&self, user_id: i32) -> DbResult<()> {
sql_query(format!(
r#"INSERT INTO user_collections ({user_id}, {collection_id}, {modified})
VALUES (?, ?, ?)
@ -285,7 +286,7 @@ impl MysqlDb {
Ok(())
}
pub fn delete_storage_sync(&self, user_id: UserIdentifier) -> Result<()> {
fn delete_storage_sync(&self, user_id: UserIdentifier) -> DbResult<()> {
let user_id = user_id.legacy_id as i64;
// Delete user data.
delete(bso::table)
@ -301,10 +302,7 @@ impl MysqlDb {
// Deleting the collection should result in:
// - collection does not appear in /info/collections
// - X-Last-Modified timestamp at the storage level changing
pub fn delete_collection_sync(
&self,
params: params::DeleteCollection,
) -> Result<SyncTimestamp> {
fn delete_collection_sync(&self, params: params::DeleteCollection) -> DbResult<SyncTimestamp> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_collection_id(&params.collection)?;
let mut count = delete(bso::table)
@ -316,14 +314,14 @@ impl MysqlDb {
.filter(user_collections::collection_id.eq(&collection_id))
.execute(&self.conn)?;
if count == 0 {
Err(DbErrorKind::CollectionNotFound)?
return Err(DbError::collection_not_found());
} else {
self.erect_tombstone(user_id as i32)?;
}
self.get_storage_timestamp_sync(params.user_id)
}
pub(super) fn get_or_create_collection_id(&self, name: &str) -> Result<i32> {
pub(super) fn get_or_create_collection_id(&self, name: &str) -> DbResult<i32> {
if let Some(id) = self.coll_cache.get_id(name)? {
return Ok(id);
}
@ -346,7 +344,7 @@ impl MysqlDb {
Ok(id)
}
pub(super) fn get_collection_id(&self, name: &str) -> Result<i32> {
pub(super) fn get_collection_id(&self, name: &str) -> DbResult<i32> {
if let Some(id) = self.coll_cache.get_id(name)? {
return Ok(id);
}
@ -359,7 +357,7 @@ impl MysqlDb {
.bind::<Text, _>(name)
.get_result::<IdResult>(&self.conn)
.optional()?
.ok_or(DbErrorKind::CollectionNotFound)?
.ok_or_else(DbError::collection_not_found)?
.id;
if !self.session.borrow().in_write_transaction {
self.coll_cache.put(id, name.to_owned())?;
@ -367,7 +365,7 @@ impl MysqlDb {
Ok(id)
}
fn _get_collection_name(&self, id: i32) -> Result<String> {
fn _get_collection_name(&self, id: i32) -> DbResult<String> {
let name = if let Some(name) = self.coll_cache.get_name(id)? {
name
} else {
@ -379,13 +377,13 @@ impl MysqlDb {
.bind::<Integer, _>(&id)
.get_result::<NameResult>(&self.conn)
.optional()?
.ok_or(DbErrorKind::CollectionNotFound)?
.ok_or_else(DbError::collection_not_found)?
.name
};
Ok(name)
}
pub fn put_bso_sync(&self, bso: params::PutBso) -> Result<results::PutBso> {
fn put_bso_sync(&self, bso: params::PutBso) -> DbResult<results::PutBso> {
/*
if bso.payload.is_none() && bso.sortindex.is_none() && bso.ttl.is_none() {
// XXX: go returns an error here (ErrNothingToDo), and is treated
@ -408,7 +406,7 @@ impl MysqlDb {
tags.insert("collection".to_owned(), bso.collection.clone());
self.metrics.incr_with_tags("storage.quota.at_limit", tags);
if self.quota.enforced {
return Err(DbErrorKind::Quota.into());
return Err(DbError::quota());
} else {
warn!("Quota at limit for user's collection ({} bytes)", usage.total_bytes; "collection"=>bso.collection.clone());
}
@ -476,7 +474,7 @@ impl MysqlDb {
})
}
pub fn get_bsos_sync(&self, params: params::GetBsos) -> Result<results::GetBsos> {
fn get_bsos_sync(&self, params: params::GetBsos) -> DbResult<results::GetBsos> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_collection_id(&params.collection)?;
let now = self.timestamp().as_i64();
@ -566,7 +564,7 @@ impl MysqlDb {
})
}
pub fn get_bso_ids_sync(&self, params: params::GetBsos) -> Result<results::GetBsoIds> {
fn get_bso_ids_sync(&self, params: params::GetBsos) -> DbResult<results::GetBsoIds> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_collection_id(&params.collection)?;
let mut query = bso::table
@ -629,7 +627,7 @@ impl MysqlDb {
})
}
pub fn get_bso_sync(&self, params: params::GetBso) -> Result<Option<results::GetBso>> {
fn get_bso_sync(&self, params: params::GetBso) -> DbResult<Option<results::GetBso>> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_collection_id(&params.collection)?;
Ok(bso::table
@ -648,7 +646,7 @@ impl MysqlDb {
.optional()?)
}
pub fn delete_bso_sync(&self, params: params::DeleteBso) -> Result<results::DeleteBso> {
fn delete_bso_sync(&self, params: params::DeleteBso) -> DbResult<results::DeleteBso> {
let user_id = params.user_id.legacy_id;
let collection_id = self.get_collection_id(&params.collection)?;
let affected_rows = delete(bso::table)
@ -658,12 +656,12 @@ impl MysqlDb {
.filter(bso::expiry.gt(&self.timestamp().as_i64()))
.execute(&self.conn)?;
if affected_rows == 0 {
Err(DbErrorKind::BsoNotFound)?
return Err(DbError::bso_not_found());
}
self.update_collection(user_id as u32, collection_id)
}
pub fn delete_bsos_sync(&self, params: params::DeleteBsos) -> Result<results::DeleteBsos> {
fn delete_bsos_sync(&self, params: params::DeleteBsos) -> DbResult<results::DeleteBsos> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_collection_id(&params.collection)?;
delete(bso::table)
@ -674,7 +672,7 @@ impl MysqlDb {
self.update_collection(user_id as u32, collection_id)
}
pub fn post_bsos_sync(&self, input: params::PostBsos) -> Result<results::PostBsos> {
fn post_bsos_sync(&self, input: params::PostBsos) -> DbResult<results::PostBsos> {
let collection_id = self.get_or_create_collection_id(&input.collection)?;
let mut result = results::PostBsos {
modified: self.timestamp(),
@ -707,20 +705,20 @@ impl MysqlDb {
Ok(result)
}
pub fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> Result<SyncTimestamp> {
fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> DbResult<SyncTimestamp> {
let user_id = user_id.legacy_id as i64;
let modified = user_collections::table
.select(max(user_collections::modified))
.filter(user_collections::user_id.eq(user_id))
.first::<Option<i64>>(&self.conn)?
.unwrap_or_default();
SyncTimestamp::from_i64(modified)
SyncTimestamp::from_i64(modified).map_err(Into::into)
}
pub fn get_collection_timestamp_sync(
fn get_collection_timestamp_sync(
&self,
params: params::GetCollectionTimestamp,
) -> Result<SyncTimestamp> {
) -> DbResult<SyncTimestamp> {
let user_id = params.user_id.legacy_id as u32;
let collection_id = self.get_collection_id(&params.collection)?;
if let Some(modified) = self
@ -737,10 +735,10 @@ impl MysqlDb {
.filter(user_collections::collection_id.eq(collection_id))
.first(&self.conn)
.optional()?
.ok_or_else(|| DbErrorKind::CollectionNotFound.into())
.ok_or_else(DbError::collection_not_found)
}
pub fn get_bso_timestamp_sync(&self, params: params::GetBsoTimestamp) -> Result<SyncTimestamp> {
fn get_bso_timestamp_sync(&self, params: params::GetBsoTimestamp) -> DbResult<SyncTimestamp> {
let user_id = params.user_id.legacy_id as i64;
let collection_id = self.get_collection_id(&params.collection)?;
let modified = bso::table
@ -751,13 +749,13 @@ impl MysqlDb {
.first::<i64>(&self.conn)
.optional()?
.unwrap_or_default();
SyncTimestamp::from_i64(modified)
SyncTimestamp::from_i64(modified).map_err(Into::into)
}
pub fn get_collection_timestamps_sync(
fn get_collection_timestamps_sync(
&self,
user_id: UserIdentifier,
) -> Result<results::GetCollectionTimestamps> {
) -> DbResult<results::GetCollectionTimestamps> {
let modifieds = sql_query(format!(
"SELECT {collection_id}, {modified}
FROM user_collections
@ -771,26 +769,29 @@ impl MysqlDb {
.bind::<Integer, _>(TOMBSTONE)
.load::<UserCollectionsResult>(&self.conn)?
.into_iter()
.map(|cr| SyncTimestamp::from_i64(cr.last_modified).map(|ts| (cr.collection, ts)))
.collect::<Result<HashMap<_, _>>>()?;
.map(|cr| {
SyncTimestamp::from_i64(cr.last_modified)
.map(|ts| (cr.collection, ts))
.map_err(Into::into)
})
.collect::<DbResult<HashMap<_, _>>>()?;
self.map_collection_names(modifieds)
}
fn check_sync(&self) -> Result<results::Check> {
fn check_sync(&self) -> DbResult<results::Check> {
// has the database been up for more than 0 seconds?
let result = sql_query("SHOW STATUS LIKE \"Uptime\"").execute(&self.conn)?;
Ok(result as u64 > 0)
}
fn map_collection_names<T>(&self, by_id: HashMap<i32, T>) -> Result<HashMap<String, T>> {
fn map_collection_names<T>(&self, by_id: HashMap<i32, T>) -> DbResult<HashMap<String, T>> {
let mut names = self.load_collection_names(by_id.keys())?;
by_id
.into_iter()
.map(|(id, value)| {
names
.remove(&id)
.map(|name| (name, value))
.ok_or_else(|| DbError::internal("load_collection_names unknown collection id"))
names.remove(&id).map(|name| (name, value)).ok_or_else(|| {
DbError::internal("load_collection_names unknown collection id".to_owned())
})
})
.collect()
}
@ -798,7 +799,7 @@ impl MysqlDb {
fn load_collection_names<'a>(
&self,
collection_ids: impl Iterator<Item = &'a i32>,
) -> Result<HashMap<i32, String>> {
) -> DbResult<HashMap<i32, String>> {
let mut names = HashMap::new();
let mut uncached = Vec::new();
for &id in collection_ids {
@ -830,7 +831,7 @@ impl MysqlDb {
&self,
user_id: u32,
collection_id: i32,
) -> Result<SyncTimestamp> {
) -> DbResult<SyncTimestamp> {
let quota = if self.quota.enabled {
self.calc_quota_usage_sync(user_id, collection_id)?
} else {
@ -869,10 +870,10 @@ impl MysqlDb {
}
// Perform a lighter weight "read only" storage size check
pub fn get_storage_usage_sync(
fn get_storage_usage_sync(
&self,
user_id: UserIdentifier,
) -> Result<results::GetStorageUsage> {
) -> DbResult<results::GetStorageUsage> {
let uid = user_id.legacy_id as i64;
let total_bytes = bso::table
.select(sql::<Nullable<BigInt>>("SUM(LENGTH(payload))"))
@ -883,10 +884,10 @@ impl MysqlDb {
}
// Perform a lighter weight "read only" quota storage check
pub fn get_quota_usage_sync(
fn get_quota_usage_sync(
&self,
params: params::GetQuotaUsage,
) -> Result<results::GetQuotaUsage> {
) -> DbResult<results::GetQuotaUsage> {
let uid = params.user_id.legacy_id as i64;
let (total_bytes, count): (i64, i32) = user_collections::table
.select((
@ -905,11 +906,11 @@ impl MysqlDb {
}
// perform a heavier weight quota calculation
pub fn calc_quota_usage_sync(
fn calc_quota_usage_sync(
&self,
user_id: u32,
collection_id: i32,
) -> Result<results::GetQuotaUsage> {
) -> DbResult<results::GetQuotaUsage> {
let (total_bytes, count): (i64, i32) = bso::table
.select((
sql::<BigInt>(r#"COALESCE(SUM(LENGTH(COALESCE(payload, ""))),0)"#),
@ -927,10 +928,10 @@ impl MysqlDb {
})
}
pub fn get_collection_usage_sync(
fn get_collection_usage_sync(
&self,
user_id: UserIdentifier,
) -> Result<results::GetCollectionUsage> {
) -> DbResult<results::GetCollectionUsage> {
let counts = bso::table
.select((bso::collection_id, sql::<BigInt>("SUM(LENGTH(payload))")))
.filter(bso::user_id.eq(user_id.legacy_id as i64))
@ -942,10 +943,10 @@ impl MysqlDb {
self.map_collection_names(counts)
}
pub fn get_collection_counts_sync(
fn get_collection_counts_sync(
&self,
user_id: UserIdentifier,
) -> Result<results::GetCollectionCounts> {
) -> DbResult<results::GetCollectionCounts> {
let counts = bso::table
.select((
bso::collection_id,
@ -969,51 +970,34 @@ impl MysqlDb {
batch_db_method!(commit_batch_sync, commit, CommitBatch);
batch_db_method!(delete_batch_sync, delete, DeleteBatch);
pub fn get_batch_sync(&self, params: params::GetBatch) -> Result<Option<results::GetBatch>> {
fn get_batch_sync(&self, params: params::GetBatch) -> DbResult<Option<results::GetBatch>> {
batch::get(self, params)
}
pub fn timestamp(&self) -> SyncTimestamp {
pub(super) fn timestamp(&self) -> SyncTimestamp {
self.session.borrow().timestamp
}
}
#[macro_export]
macro_rules! sync_db_method {
($name:ident, $sync_name:ident, $type:ident) => {
sync_db_method!($name, $sync_name, $type, results::$type);
};
($name:ident, $sync_name:ident, $type:ident, $result:ty) => {
fn $name(&self, params: params::$type) -> DbFuture<'_, $result> {
let db = self.clone();
Box::pin(
self.blocking_threadpool
.spawn(move || db.$sync_name(params)),
)
}
};
}
impl<'a> Db<'a> for MysqlDb {
fn commit(&self) -> DbFuture<'_, ()> {
impl Db for MysqlDb {
type Error = DbError;
fn commit(&self) -> DbFuture<'_, (), Self::Error> {
let db = self.clone();
Box::pin(self.blocking_threadpool.spawn(move || db.commit_sync()))
}
fn rollback(&self) -> DbFuture<'_, ()> {
fn rollback(&self) -> DbFuture<'_, (), Self::Error> {
let db = self.clone();
Box::pin(self.blocking_threadpool.spawn(move || db.rollback_sync()))
}
fn begin(&self, for_write: bool) -> DbFuture<'_, ()> {
fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error> {
let db = self.clone();
Box::pin(async move { db.begin_async(for_write).map_err(Into::into).await })
}
fn box_clone(&self) -> Box<dyn Db<'a>> {
Box::new(self.clone())
}
fn check(&self) -> DbFuture<'_, results::Check> {
fn check(&self) -> DbFuture<'_, results::Check, Self::Error> {
let db = self.clone();
Box::pin(self.blocking_threadpool.spawn(move || db.check_sync()))
}
@ -1073,7 +1057,7 @@ impl<'a> Db<'a> for MysqlDb {
);
sync_db_method!(commit_batch, commit_batch_sync, CommitBatch);
fn get_collection_id(&self, name: String) -> DbFuture<'_, i32> {
fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error> {
let db = self.clone();
Box::pin(
self.blocking_threadpool
@ -1085,7 +1069,7 @@ impl<'a> Db<'a> for MysqlDb {
results::ConnectionInfo::default()
}
fn create_collection(&self, name: String) -> DbFuture<'_, i32> {
fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error> {
let db = self.clone();
Box::pin(
self.blocking_threadpool
@ -1093,7 +1077,10 @@ impl<'a> Db<'a> for MysqlDb {
)
}
fn update_collection(&self, param: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp> {
fn update_collection(
&self,
param: params::UpdateCollection,
) -> DbFuture<'_, SyncTimestamp, Self::Error> {
let db = self.clone();
Box::pin(self.blocking_threadpool.spawn(move || {
db.update_collection(param.user_id.legacy_id as u32, param.collection_id)
@ -1110,7 +1097,7 @@ impl<'a> Db<'a> for MysqlDb {
sync_db_method!(delete_batch, delete_batch_sync, DeleteBatch);
fn clear_coll_cache(&self) -> DbFuture<'_, ()> {
fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error> {
let db = self.clone();
Box::pin(self.blocking_threadpool.spawn(move || {
db.coll_cache.clear();
@ -1125,6 +1112,10 @@ impl<'a> Db<'a> for MysqlDb {
enforced,
}
}
fn box_clone(&self) -> Box<dyn Db<Error = Self::Error>> {
Box::new(self.clone())
}
}
#[derive(Debug, QueryableByName)]

View File

@ -12,16 +12,16 @@ use diesel::{
r2d2::{ConnectionManager, Pool},
Connection,
};
#[cfg(test)]
#[cfg(debug_assertions)]
use diesel_logger::LoggingConnection;
use syncserver_db_common::{error::DbError, Db, DbPool, GetPoolState, PoolState, STD_COLLS};
use syncserver_common::{BlockingThreadpool, Metrics};
#[cfg(debug_assertions)]
use syncserver_db_common::test::TestTransactionCustomizer;
use syncserver_db_common::{GetPoolState, PoolState};
use syncstorage_db_common::{Db, DbPool, STD_COLLS};
use syncstorage_settings::{Quota, Settings};
use super::models::{MysqlDb, Result};
#[cfg(test)]
use super::test::TestTransactionCustomizer;
use crate::db::BlockingThreadpool;
use crate::server::metrics::Metrics;
use super::{error::DbError, models::MysqlDb, DbResult};
embed_migrations!();
@ -29,13 +29,13 @@ embed_migrations!();
///
/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's
/// begin_test_transaction during tests. So this runs on its own separate conn.
pub fn run_embedded_migrations(database_url: &str) -> Result<()> {
fn run_embedded_migrations(database_url: &str) -> DbResult<()> {
let conn = MysqlConnection::establish(database_url)?;
#[cfg(test)]
#[cfg(debug_assertions)]
// XXX: this doesn't show the DDL statements
// https://github.com/shssoichiro/diesel-logger/issues/1
embedded_migrations::run(&LoggingConnection::new(conn))?;
#[cfg(not(test))]
#[cfg(not(debug_assertions))]
embedded_migrations::run(&conn)?;
Ok(())
}
@ -61,7 +61,7 @@ impl MysqlDbPool {
settings: &Settings,
metrics: &Metrics,
blocking_threadpool: Arc<BlockingThreadpool>,
) -> Result<Self> {
) -> DbResult<Self> {
run_embedded_migrations(&settings.database_url)?;
Self::new_without_migrations(settings, metrics, blocking_threadpool)
}
@ -70,7 +70,7 @@ impl MysqlDbPool {
settings: &Settings,
metrics: &Metrics,
blocking_threadpool: Arc<BlockingThreadpool>,
) -> Result<Self> {
) -> DbResult<Self> {
let manager = ConnectionManager::<MysqlConnection>::new(settings.database_url.clone());
let builder = Pool::builder()
.max_size(settings.database_pool_max_size)
@ -79,7 +79,7 @@ impl MysqlDbPool {
))
.min_idle(settings.database_pool_min_idle);
#[cfg(test)]
#[cfg(debug_assertions)]
let builder = if settings.database_use_test_transactions {
builder.connection_customizer(Box::new(TestTransactionCustomizer))
} else {
@ -99,7 +99,7 @@ impl MysqlDbPool {
})
}
pub fn get_sync(&self) -> Result<MysqlDb> {
pub fn get_sync(&self) -> DbResult<MysqlDb> {
Ok(MysqlDb::new(
self.pool.get()?,
Arc::clone(&self.coll_cache),
@ -112,31 +112,25 @@ impl MysqlDbPool {
#[async_trait]
impl DbPool for MysqlDbPool {
async fn get<'a>(&'a self) -> Result<Box<dyn Db<'a>>> {
let pool = self.clone();
let db = self
.blocking_threadpool
.spawn(move || pool.get_sync())
.await?;
type Error = DbError;
Ok(Box::new(db) as Box<dyn Db<'a>>)
async fn get<'a>(&'a self) -> DbResult<Box<dyn Db<Error = Self::Error>>> {
let pool = self.clone();
self.blocking_threadpool
.spawn(move || pool.get_sync())
.await
.map(|db| Box::new(db) as Box<dyn Db<Error = Self::Error>>)
}
fn validate_batch_id(&self, id: String) -> Result<()> {
fn validate_batch_id(&self, id: String) -> DbResult<()> {
super::batch::validate_batch_id(&id)
}
fn box_clone(&self) -> Box<dyn DbPool> {
fn box_clone(&self) -> Box<dyn DbPool<Error = Self::Error>> {
Box::new(self.clone())
}
}
impl GetPoolState for MysqlDbPool {
fn state(&self) -> PoolState {
self.pool.state().into()
}
}
impl fmt::Debug for MysqlDbPool {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("MysqlDbPool")
@ -145,42 +139,48 @@ impl fmt::Debug for MysqlDbPool {
}
}
impl GetPoolState for MysqlDbPool {
fn state(&self) -> PoolState {
self.pool.state().into()
}
}
#[derive(Debug)]
pub struct CollectionCache {
pub(super) struct CollectionCache {
pub by_name: RwLock<HashMap<String, i32>>,
pub by_id: RwLock<HashMap<i32, String>>,
}
impl CollectionCache {
pub fn put(&self, id: i32, name: String) -> Result<()> {
pub fn put(&self, id: i32, name: String) -> DbResult<()> {
// XXX: should this emit a metric?
// XXX: should probably either lock both simultaneously during
// writes or use an RwLock alternative
self.by_name
.write()
.map_err(|_| DbError::internal("by_name write"))?
.map_err(|_| DbError::internal("by_name write".to_owned()))?
.insert(name.clone(), id);
self.by_id
.write()
.map_err(|_| DbError::internal("by_id write"))?
.map_err(|_| DbError::internal("by_id write".to_owned()))?
.insert(id, name);
Ok(())
}
pub fn get_id(&self, name: &str) -> Result<Option<i32>> {
pub fn get_id(&self, name: &str) -> DbResult<Option<i32>> {
Ok(self
.by_name
.read()
.map_err(|_| DbError::internal("by_name read"))?
.map_err(|_| DbError::internal("by_name read".to_owned()))?
.get(name)
.cloned())
}
pub fn get_name(&self, id: i32) -> Result<Option<String>> {
pub fn get_name(&self, id: i32) -> DbResult<Option<String>> {
Ok(self
.by_id
.read()
.map_err(|_| DbError::internal("by_id read"))?
.map_err(|_| DbError::internal("by_id read".to_owned()))?
.get(&id)
.cloned())
}

View File

@ -1,49 +1,32 @@
use std::{collections::HashMap, result::Result as StdResult, sync::Arc};
use std::{collections::HashMap, sync::Arc};
use diesel::{
// expression_methods::TextExpressionMethods, // See note below about `not_like` becoming swedish
mysql::MysqlConnection,
r2d2::{CustomizeConnection, Error as PoolError},
Connection,
ExpressionMethods,
QueryDsl,
RunQueryDsl,
};
use syncserver_common::{BlockingThreadpool, Metrics};
use syncserver_settings::Settings as SyncserverSettings;
use syncstorage_settings::Settings as SyncstorageSettings;
use url::Url;
use crate::db::mysql::{
models::{MysqlDb, Result},
pool::MysqlDbPool,
schema::collections,
};
use crate::db::BlockingThreadpool;
use crate::server::metrics;
use crate::{models::MysqlDb, pool::MysqlDbPool, schema::collections, DbResult};
#[derive(Debug)]
pub struct TestTransactionCustomizer;
impl CustomizeConnection<MysqlConnection, PoolError> for TestTransactionCustomizer {
fn on_acquire(&self, conn: &mut MysqlConnection) -> StdResult<(), PoolError> {
conn.begin_test_transaction().map_err(PoolError::QueryError)
}
}
pub fn db(settings: &SyncstorageSettings) -> Result<MysqlDb> {
pub fn db(settings: &SyncstorageSettings) -> DbResult<MysqlDb> {
let _ = env_logger::try_init();
// inherit SYNC_SYNCSTORAGE__DATABASE_URL from the env
let pool = MysqlDbPool::new(
settings,
&metrics::Metrics::noop(),
&Metrics::noop(),
Arc::new(BlockingThreadpool::default()),
)?;
pool.get_sync()
}
#[test]
fn static_collection_id() -> Result<()> {
fn static_collection_id() -> DbResult<()> {
let settings = SyncserverSettings::test_settings().syncstorage;
if Url::parse(&settings.database_url).unwrap().scheme() != "mysql" {
// Skip this test if we're not using mysql

View File

@ -75,6 +75,7 @@ pub struct Settings {
pub database_pool_connection_lifespan: Option<u32>,
/// Max time a connection should sit idle before being dropped.
pub database_pool_connection_max_idle: Option<u32>,
#[cfg(debug_assertions)]
pub database_use_test_transactions: bool,
/// Server-enforced limits for request payloads.
@ -105,6 +106,7 @@ impl Default for Settings {
database_pool_connection_lifespan: None,
database_pool_connection_max_idle: None,
database_pool_connection_timeout: Some(30),
#[cfg(debug_assertions)]
database_use_test_transactions: false,
limits: ServerLimits::default(),
statsd_label: "syncstorage".to_string(),

View File

@ -0,0 +1,41 @@
[package]
name = "syncstorage-spanner"
version = "0.12.3"
edition = "2021"
[dependencies]
async-trait = "0.1.40"
backtrace = "0.3.61"
cadence = "0.26"
# Pin to 0.5 for now, to keep it under tokio 0.2 (issue977).
# Fix for #803 (deadpool#92) points to our fork for now
#deadpool = "0.5" # pin to 0.5
deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" }
env_logger = "0.9"
futures = { version = "0.3", features = ["compat"] }
google-cloud-rust-raw = "0.11.0"
# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause
# syncserver to either fail to either compile, or start. In those cases, try
# `cargo build --features grpcio/openssl ...`
grpcio = { version = "0.9" }
http = "0.2.5"
log = { version = "0.4", features = [
"max_level_debug",
"release_max_level_info",
] }
# must match what's used by googleapis-raw
protobuf = "2.20.0"
slog-scope = "4.3"
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
syncstorage-db-common = { path = "../syncstorage-db-common" }
syncstorage-settings = { path = "../syncstorage-settings" }
thiserror = "1.0.26"
# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.)
tokio = { version = "0.2.4", features = ["macros", "sync"] }
url = "2.1"
uuid = { version = "0.8.2", features = ["serde", "v4"] }
[[bin]]
name = "purge_ttl"
path = "src/bin/purge_ttl.rs"

View File

@ -8,24 +8,24 @@ use protobuf::{
well_known_types::{ListValue, Value},
RepeatedField,
};
use syncserver_db_common::{
error::{DbError, DbErrorKind},
params, results,
util::to_rfc3339,
UserIdentifier, BATCH_LIFETIME, DEFAULT_BSO_TTL,
use syncstorage_db_common::{
params, results, util::to_rfc3339, UserIdentifier, BATCH_LIFETIME, DEFAULT_BSO_TTL,
};
use uuid::Uuid;
use super::models::{Result, SpannerDb, PRETOUCH_TS};
use crate::error::DbError;
use super::models::{SpannerDb, PRETOUCH_TS};
use super::support::{as_type, null_value, struct_type_field, IntoSpannerValue};
use super::DbResult;
pub async fn create_async(
db: &SpannerDb,
params: params::CreateBatch,
) -> Result<results::CreateBatch> {
) -> DbResult<results::CreateBatch> {
let batch_id = Uuid::new_v4().to_simple().to_string();
let collection_id = db.get_collection_id_async(&params.collection).await?;
let timestamp = db.timestamp()?.as_i64();
let timestamp = db.checked_timestamp()?.as_i64();
// Ensure a parent record exists in user_collections before writing to batches
// (INTERLEAVE IN PARENT user_collections)
@ -66,13 +66,13 @@ pub async fn create_async(
Ok(new_batch)
}
pub async fn validate_async(db: &SpannerDb, params: params::ValidateBatch) -> Result<bool> {
pub async fn validate_async(db: &SpannerDb, params: params::ValidateBatch) -> DbResult<bool> {
let exists = get_async(db, params.into()).await?;
Ok(exists.is_some())
}
// Append a collection to a pending batch (`create_batch` creates a new batch)
pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Result<()> {
pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> DbResult<()> {
let mut metrics = db.metrics.clone();
metrics.start_timer("storage.spanner.append_items_to_batch", None);
let collection_id = db.get_collection_id_async(&params.collection).await?;
@ -98,7 +98,7 @@ pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Resu
if !exists {
// NOTE: db tests expects this but it doesn't seem necessary w/ the
// handler validating the batch before appends
Err(DbErrorKind::BatchNotFound)?
return Err(DbError::batch_not_found());
}
do_append_async(
@ -116,7 +116,7 @@ pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Resu
pub async fn get_async(
db: &SpannerDb,
params: params::GetBatch,
) -> Result<Option<results::GetBatch>> {
) -> DbResult<Option<results::GetBatch>> {
let collection_id = db.get_collection_id_async(&params.collection).await?;
let (sqlparams, sqlparam_types) = params! {
"fxa_uid" => params.user_id.fxa_uid.clone(),
@ -143,7 +143,7 @@ pub async fn get_async(
Ok(batch)
}
pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> Result<()> {
pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> DbResult<()> {
let collection_id = db.get_collection_id_async(&params.collection).await?;
let (sqlparams, sqlparam_types) = params! {
"fxa_uid" => params.user_id.fxa_uid.clone(),
@ -170,7 +170,7 @@ pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> Result
pub async fn commit_async(
db: &SpannerDb,
params: params::CommitBatch,
) -> Result<results::CommitBatch> {
) -> DbResult<results::CommitBatch> {
let mut metrics = db.metrics.clone();
metrics.start_timer("storage.spanner.apply_batch", None);
let collection_id = db.get_collection_id_async(&params.collection).await?;
@ -249,7 +249,7 @@ pub async fn do_append_async(
batch: results::CreateBatch,
bsos: Vec<params::PostCollectionBso>,
collection: &str,
) -> Result<()> {
) -> DbResult<()> {
// Pass an array of struct objects as @values (for UNNEST), e.g.:
// [("<fxa_uid>", "<fxa_kid>", 101, "ba1", "bso1", NULL, "payload1", NULL),
// ("<fxa_uid>", "<fxa_kid>", 101, "ba1", "bso2", NULL, "payload2", NULL)]
@ -528,7 +528,7 @@ async fn pretouch_collection_async(
db: &SpannerDb,
user_id: &UserIdentifier,
collection_id: i32,
) -> Result<()> {
) -> DbResult<()> {
let (mut sqlparams, mut sqlparam_types) = params! {
"fxa_uid" => user_id.fxa_uid.clone(),
"fxa_kid" => user_id.fxa_kid.clone(),
@ -569,8 +569,8 @@ async fn pretouch_collection_async(
Ok(())
}
pub fn validate_batch_id(id: &str) -> Result<()> {
pub fn validate_batch_id(id: &str) -> DbResult<()> {
Uuid::from_str(id)
.map(|_| ())
.map_err(|e| DbError::internal(&format!("Invalid batch_id: {}", e)))
.map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e)))
}

View File

@ -0,0 +1,151 @@
use std::fmt;
use backtrace::Backtrace;
use http::StatusCode;
use syncserver_common::{from_error, impl_fmt_display, InternalError, ReportableError};
use syncstorage_db_common::error::{DbErrorIntrospect, SyncstorageDbError};
use thiserror::Error;
/// An error type that represents any Spanner-related errors that may occur while processing a
/// syncstorage request. These errors may be application-specific or lower-level errors that arise
/// from the database backend.
#[derive(Debug)]
pub struct DbError {
kind: DbErrorKind,
pub status: StatusCode,
pub backtrace: Box<Backtrace>,
}
impl DbError {
pub fn batch_not_found() -> Self {
DbErrorKind::Common(SyncstorageDbError::batch_not_found()).into()
}
pub fn bso_not_found() -> Self {
DbErrorKind::Common(SyncstorageDbError::bso_not_found()).into()
}
pub fn collection_not_found() -> Self {
DbErrorKind::Common(SyncstorageDbError::collection_not_found()).into()
}
pub fn conflict() -> Self {
DbErrorKind::Common(SyncstorageDbError::conflict()).into()
}
pub fn expired() -> Self {
DbErrorKind::Expired.into()
}
pub fn integrity(msg: String) -> Self {
DbErrorKind::Integrity(msg).into()
}
pub fn internal(msg: String) -> Self {
DbErrorKind::Common(SyncstorageDbError::internal(msg)).into()
}
pub fn quota() -> Self {
DbErrorKind::Common(SyncstorageDbError::quota()).into()
}
pub fn too_large(msg: String) -> Self {
DbErrorKind::TooLarge(msg).into()
}
}
#[derive(Debug, Error)]
enum DbErrorKind {
#[error("{}", _0)]
Common(SyncstorageDbError),
#[error("Connection expired")]
Expired,
#[error("A database error occurred: {}", _0)]
Grpc(#[from] grpcio::Error),
#[error("Database integrity error: {}", _0)]
Integrity(String),
#[error("Spanner data load too large: {}", _0)]
TooLarge(String),
}
impl From<DbErrorKind> for DbError {
fn from(kind: DbErrorKind) -> Self {
let status = match &kind {
DbErrorKind::Common(e) => e.status,
// Matching the Python code here (a 400 vs 404)
DbErrorKind::TooLarge(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
Self {
kind,
status,
backtrace: Box::new(Backtrace::new()),
}
}
}
impl DbErrorIntrospect for DbError {
fn is_batch_not_found(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_batch_not_found())
}
fn is_bso_not_found(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_bso_not_found())
}
fn is_collection_not_found(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_collection_not_found())
}
fn is_conflict(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_conflict())
}
fn is_quota(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_quota())
}
}
impl ReportableError for DbError {
fn is_sentry_event(&self) -> bool {
matches!(&self.kind, DbErrorKind::Common(e) if e.is_sentry_event())
}
fn metric_label(&self) -> Option<String> {
if let DbErrorKind::Common(e) = &self.kind {
e.metric_label()
} else {
None
}
}
fn error_backtrace(&self) -> String {
format!("{:#?}", self.backtrace)
}
}
impl InternalError for DbError {
fn internal_error(message: String) -> Self {
DbErrorKind::Common(SyncstorageDbError::internal(message)).into()
}
}
impl_fmt_display!(DbError, DbErrorKind);
from_error!(grpcio::Error, DbError, |inner: grpcio::Error| {
// Convert ABORTED (typically due to a transaction abort) into 503s
match inner {
grpcio::Error::RpcFailure(ref status) | grpcio::Error::RpcFinished(Some(ref status))
if status.code() == grpcio::RpcStatusCode::ABORTED =>
{
DbErrorKind::Common(SyncstorageDbError::conflict())
}
_ => DbErrorKind::Grpc(inner),
}
});
from_error!(SyncstorageDbError, DbError, DbErrorKind::Common);

View File

@ -0,0 +1,27 @@
use std::time::SystemTime;
#[macro_use]
extern crate slog_scope;
#[macro_use]
mod macros;
mod batch;
mod error;
mod manager;
mod models;
mod pool;
mod support;
pub use error::DbError;
pub use models::SpannerDb;
pub use pool::SpannerDbPool;
type DbResult<T> = Result<T, error::DbError>;
fn now() -> i64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or_default()
.as_secs() as i64
}

View File

@ -19,7 +19,7 @@ macro_rules! params {
#[test]
fn test_params_macro() {
use crate::db::spanner::support::IntoSpannerValue;
use super::support::IntoSpannerValue;
use google_cloud_rust_raw::spanner::v1::type_pb::{Type, TypeCode};
use protobuf::{
well_known_types::{ListValue, Value},

View File

@ -10,7 +10,7 @@ use crate::{
error::{DbError, DbErrorKind},
PoolState,
},
server::metrics::Metrics,
server::Metrics,
settings::Settings,
};
@ -19,7 +19,7 @@ use super::session::{create_spanner_session, recycle_spanner_session, SpannerSes
#[allow(dead_code)]
pub type Conn<'a> = PooledConnection<'a, SpannerSessionManager<SpannerSession>>;
pub struct SpannerSessionManager {
pub(super) struct SpannerSessionManager {
database_name: String,
/// The gRPC environment
env: Arc<Environment>,

View File

@ -3,17 +3,15 @@ use std::{fmt, sync::Arc};
use async_trait::async_trait;
use deadpool::managed::{Manager, RecycleError, RecycleResult};
use grpcio::{EnvBuilder, Environment};
use syncserver_db_common::error::{DbError, DbErrorKind};
use syncserver_common::{BlockingThreadpool, Metrics};
use syncstorage_settings::Settings;
use crate::db::BlockingThreadpool;
use crate::server::metrics::Metrics;
use super::session::{create_spanner_session, recycle_spanner_session, SpannerSession};
use crate::error::DbError;
pub type Conn = deadpool::managed::Object<SpannerSession, DbError>;
pub(crate) type Conn = deadpool::managed::Object<SpannerSession, DbError>;
pub struct SpannerSessionManager {
pub(crate) struct SpannerSessionManager {
database_name: String,
/// The gRPC environment
env: Arc<Environment>,
@ -42,7 +40,9 @@ impl SpannerSessionManager {
) -> Result<Self, DbError> {
let database_name = settings
.spanner_database_name()
.ok_or_else(|| DbErrorKind::InvalidUrl(settings.database_url.to_owned()))?
.ok_or_else(|| {
DbError::internal(format!("invalid database url: {}", settings.database_url))
})?
.to_owned();
let env = Arc::new(EnvBuilder::new().build());

View File

@ -0,0 +1,6 @@
// mod bb8;
mod deadpool;
mod session;
pub(super) use self::deadpool::{Conn, SpannerSessionManager};
pub(super) use self::session::SpannerSession;

View File

@ -1,13 +1,13 @@
use std::sync::Arc;
use google_cloud_rust_raw::spanner::v1::{
spanner::{CreateSessionRequest, GetSessionRequest, Session},
spanner_grpc::SpannerClient,
};
use grpcio::{CallOption, ChannelBuilder, ChannelCredentials, Environment, MetadataBuilder};
use std::sync::Arc;
use syncserver_db_common::error::{DbError, DbErrorKind};
use syncserver_common::{BlockingThreadpool, Metrics};
use crate::db::{spanner::now, BlockingThreadpool};
use crate::server::metrics::Metrics;
use crate::error::DbError;
const SPANNER_ADDRESS: &str = "spanner.googleapis.com:443";
@ -21,11 +21,11 @@ pub struct SpannerSession {
pub session: Session,
/// The underlying client (Connection/Channel) for interacting with spanner
pub client: SpannerClient,
pub(in crate::db::spanner) use_test_transactions: bool,
pub(crate) use_test_transactions: bool,
/// A second based UTC for SpannerSession creation.
/// Session has a similar `create_time` value that is managed by protobuf,
/// but some clock skew issues are possible.
pub(in crate::db::spanner) create_time: i64,
pub(crate) create_time: i64,
/// Whether we are using the Spanner emulator
pub using_spanner_emulator: bool,
}
@ -71,7 +71,7 @@ pub async fn create_spanner_session(
session,
client,
use_test_transactions,
create_time: now(),
create_time: crate::now(),
using_spanner_emulator,
})
}
@ -84,7 +84,7 @@ pub async fn recycle_spanner_session(
max_lifetime: Option<u32>,
max_idle: Option<u32>,
) -> Result<(), DbError> {
let now = now();
let now = crate::now();
let mut req = GetSessionRequest::new();
req.set_name(conn.session.get_name().to_owned());
/*
@ -128,7 +128,7 @@ pub async fn recycle_spanner_session(
if age > max_life as i64 {
metrics.incr("db.connection.max_life");
dbg!("### aging out", this_session.get_name());
return Err(DbErrorKind::Expired.into());
return Err(DbError::expired());
}
}
// check how long that this has been idle...
@ -145,7 +145,7 @@ pub async fn recycle_spanner_session(
if idle > max_idle as i64 {
metrics.incr("db.connection.max_idle");
dbg!("### idling out", this_session.get_name());
return Err(DbErrorKind::Expired.into());
return Err(DbError::expired());
}
// and update the connection's reference session info
conn.session = this_session;

View File

@ -1,32 +1,20 @@
use std::{collections::HashMap, fmt, sync::Arc, time::Duration};
use async_trait::async_trait;
use bb8::ErrorSink;
use syncserver_db_common::{error::DbError, Db, DbPool, GetPoolState, PoolState, STD_COLLS};
use syncserver_common::{BlockingThreadpool, Metrics};
use syncserver_db_common::{GetPoolState, PoolState};
use syncstorage_db_common::{Db, DbPool, STD_COLLS};
use syncstorage_settings::{Quota, Settings};
use tokio::sync::RwLock;
use crate::db::BlockingThreadpool;
use crate::server::metrics::Metrics;
pub use super::manager::Conn;
pub(super) use super::manager::Conn;
use super::{
error::DbError,
manager::{SpannerSession, SpannerSessionManager},
models::Result,
models::SpannerDb,
DbResult,
};
embed_migrations!();
/// Run the diesel embedded migrations
///
/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's
/// begin_test_transaction during tests. So this runs on its own separate conn.
//pub fn run_embedded_migrations(settings: &Settings) -> Result<()> {
// let conn = MysqlConnection::establish(&settings.database_url)?;
// Ok(embedded_migrations::run(&conn)?)
//}
#[derive(Clone)]
pub struct SpannerDbPool {
/// Pool of db connections
@ -40,20 +28,20 @@ pub struct SpannerDbPool {
impl SpannerDbPool {
/// Creates a new pool of Spanner db connections.
pub async fn new(
pub fn new(
settings: &Settings,
metrics: &Metrics,
blocking_threadpool: Arc<BlockingThreadpool>,
) -> Result<Self> {
) -> DbResult<Self> {
//run_embedded_migrations(settings)?;
Self::new_without_migrations(settings, metrics, blocking_threadpool).await
Self::new_without_migrations(settings, metrics, blocking_threadpool)
}
pub async fn new_without_migrations(
pub fn new_without_migrations(
settings: &Settings,
metrics: &Metrics,
blocking_threadpool: Arc<BlockingThreadpool>,
) -> Result<Self> {
) -> DbResult<Self> {
let max_size = settings.database_pool_max_size as usize;
let wait = settings
.database_pool_connection_timeout
@ -78,11 +66,11 @@ impl SpannerDbPool {
})
}
pub async fn get_async(&self) -> Result<SpannerDb> {
pub async fn get_async(&self) -> DbResult<SpannerDb> {
let conn = self.pool.get().await.map_err(|e| match e {
deadpool::managed::PoolError::Backend(dbe) => dbe,
deadpool::managed::PoolError::Timeout(timeout_type) => {
DbError::internal(&format!("deadpool Timeout: {:?}", timeout_type))
DbError::internal(format!("deadpool Timeout: {:?}", timeout_type))
}
})?;
Ok(SpannerDb::new(
@ -96,21 +84,23 @@ impl SpannerDbPool {
#[async_trait]
impl DbPool for SpannerDbPool {
async fn get<'a>(&'a self) -> Result<Box<dyn Db<'a>>> {
type Error = DbError;
async fn get(&self) -> DbResult<Box<dyn Db<Error = Self::Error>>> {
let mut metrics = self.metrics.clone();
metrics.start_timer("storage.spanner.get_pool", None);
self.get_async()
.await
.map(|db| Box::new(db) as Box<dyn Db<'a>>)
.map(|db| Box::new(db) as Box<dyn Db<Error = Self::Error>>)
.map_err(Into::into)
}
fn validate_batch_id(&self, id: String) -> Result<()> {
super::batch::validate_batch_id(&id)
fn validate_batch_id(&self, id: String) -> DbResult<()> {
super::batch::validate_batch_id(&id).map_err(Into::into)
}
fn box_clone(&self) -> Box<dyn DbPool> {
fn box_clone(&self) -> Box<dyn DbPool<Error = Self::Error>> {
Box::new(self.clone())
}
}
@ -130,7 +120,7 @@ impl fmt::Debug for SpannerDbPool {
}
#[derive(Debug)]
pub struct CollectionCache {
pub(super) struct CollectionCache {
pub by_name: RwLock<HashMap<String, i32>>,
pub by_id: RwLock<HashMap<i32, String>>,
}
@ -194,19 +184,3 @@ impl Default for CollectionCache {
}
}
}
/// Logs internal bb8 errors
#[derive(Debug, Clone, Copy)]
pub struct LoggingErrorSink;
impl<E: std::error::Error> ErrorSink<E> for LoggingErrorSink {
fn sink(&self, e: E) {
error!("bb8 Error: {}", e);
let event = sentry::event_from_error(&e);
sentry::capture_event(event);
}
fn boxed_clone(&self) -> Box<dyn ErrorSink<E>> {
Box::new(*self)
}
}

View File

@ -1,7 +1,6 @@
use std::{
collections::{HashMap, VecDeque},
mem,
result::Result as StdResult,
};
use futures::stream::{StreamExt, StreamFuture};
@ -15,15 +14,11 @@ use protobuf::{
well_known_types::{ListValue, NullValue, Struct, Value},
RepeatedField,
};
use syncserver_db_common::{
error::{DbError, DbErrorKind},
params, results,
util::to_rfc3339,
util::SyncTimestamp,
UserIdentifier, DEFAULT_BSO_TTL,
use syncstorage_db_common::{
params, results, util::to_rfc3339, util::SyncTimestamp, UserIdentifier, DEFAULT_BSO_TTL,
};
use super::{models::Result, pool::Conn};
use super::{error::DbError, pool::Conn, DbResult};
pub trait IntoSpannerValue {
const TYPE_CODE: TypeCode;
@ -169,7 +164,7 @@ impl ExecuteSqlRequestBuilder {
}
/// Execute a SQL read statement but return a non-blocking streaming result
pub fn execute_async(self, conn: &Conn) -> Result<StreamedResultSetAsync> {
pub fn execute_async(self, conn: &Conn) -> DbResult<StreamedResultSetAsync> {
let stream = conn
.client
.execute_streaming_sql(&self.prepare_request(conn))?;
@ -177,7 +172,7 @@ impl ExecuteSqlRequestBuilder {
}
/// Execute a DML statement, returning the exact count of modified rows
pub async fn execute_dml_async(self, conn: &Conn) -> Result<i64> {
pub async fn execute_dml_async(self, conn: &Conn) -> DbResult<i64> {
let rs = conn
.client
.execute_sql_async(&self.prepare_request(conn))?
@ -230,20 +225,24 @@ impl StreamedResultSetAsync {
}
}
pub async fn one(&mut self) -> Result<Vec<Value>> {
pub async fn one(&mut self) -> DbResult<Vec<Value>> {
if let Some(result) = self.one_or_none().await? {
Ok(result)
} else {
Err(DbError::internal("No rows matched the given query."))?
Err(DbError::internal(
"No rows matched the given query.".to_owned(),
))
}
}
pub async fn one_or_none(&mut self) -> Result<Option<Vec<Value>>> {
pub async fn one_or_none(&mut self) -> DbResult<Option<Vec<Value>>> {
let result = self.next_async().await;
if result.is_none() {
Ok(None)
} else if self.next_async().await.is_some() {
Err(DbError::internal("Expected one result; got more."))?
Err(DbError::internal(
"Expected one result; got more.".to_owned(),
))
} else {
result.transpose()
}
@ -252,7 +251,7 @@ impl StreamedResultSetAsync {
/// Pull and process the next values from the Stream
///
/// Returns false when the stream is finished
async fn consume_next(&mut self) -> Result<bool> {
async fn consume_next(&mut self) -> DbResult<bool> {
let (result, stream) = self
.stream
.take()
@ -286,9 +285,9 @@ impl StreamedResultSetAsync {
let fields = self.fields();
let current_row_i = self.current_row.len();
if fields.len() <= current_row_i {
Err(DbErrorKind::Integrity(
return Err(DbError::integrity(
"Invalid PartialResultSet fields".to_owned(),
))?;
));
}
let field = &fields[current_row_i];
values[0] = merge_by_type(pending_chunk, &values[0], field.get_field_type())?;
@ -314,7 +313,7 @@ impl StreamedResultSetAsync {
// We could implement Stream::poll_next instead of this, but
// this is easier for now and we can refactor into the trait later
pub async fn next_async(&mut self) -> Option<Result<Vec<Value>>> {
pub async fn next_async(&mut self) -> Option<DbResult<Vec<Value>>> {
while self.rows.is_empty() {
match self.consume_next().await {
Ok(true) => (),
@ -329,7 +328,7 @@ impl StreamedResultSetAsync {
}
}
fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> Result<Value> {
fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> DbResult<Value> {
// We only support merging basic string types as that's all we currently use.
// The python client also supports: float64, array, struct. The go client
// only additionally supports array (claiming structs are only returned as
@ -344,25 +343,28 @@ fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> Result<Value> {
}
}
fn unsupported_merge(field_type: &Type) -> Result<Value> {
Err(DbError::internal(&format!(
fn unsupported_merge(field_type: &Type) -> DbResult<Value> {
Err(DbError::internal(format!(
"merge not supported, type: {:?}",
field_type
)))
}
fn merge_string(mut lhs: Value, rhs: &Value) -> Result<Value> {
fn merge_string(mut lhs: Value, rhs: &Value) -> DbResult<Value> {
if !lhs.has_string_value() || !rhs.has_string_value() {
Err(DbError::internal("merge_string has no string value"))?
return Err(DbError::internal(
"merge_string has no string value".to_owned(),
));
}
let mut merged = lhs.take_string_value();
merged.push_str(rhs.get_string_value());
Ok(merged.into_spanner_value())
}
pub fn bso_from_row(mut row: Vec<Value>) -> Result<results::GetBso> {
pub fn bso_from_row(mut row: Vec<Value>) -> DbResult<results::GetBso> {
let modified_string = &row[3].get_string_value();
let modified = SyncTimestamp::from_rfc3339(modified_string)?;
let modified = SyncTimestamp::from_rfc3339(modified_string)
.map_err(|e| DbError::integrity(e.to_string()))?;
Ok(results::GetBso {
id: row[0].take_string_value(),
sortindex: if row[1].has_null_value() {
@ -372,12 +374,14 @@ pub fn bso_from_row(mut row: Vec<Value>) -> Result<results::GetBso> {
row[1]
.get_string_value()
.parse::<i32>()
.map_err(|e| DbErrorKind::Integrity(e.to_string()))?,
.map_err(|e| DbError::integrity(e.to_string()))?,
)
},
payload: row[2].take_string_value(),
modified,
expiry: SyncTimestamp::from_rfc3339(row[4].get_string_value())?.as_i64(),
expiry: SyncTimestamp::from_rfc3339(row[4].get_string_value())
.map_err(|e| DbError::integrity(e.to_string()))?
.as_i64(),
})
}
@ -386,7 +390,7 @@ pub fn bso_to_insert_row(
collection_id: i32,
bso: params::PostCollectionBso,
now: SyncTimestamp,
) -> Result<ListValue> {
) -> DbResult<ListValue> {
let sortindex = bso
.sortindex
.map(|sortindex| sortindex.into_spanner_value())
@ -413,7 +417,7 @@ pub fn bso_to_update_row(
collection_id: i32,
bso: params::PostCollectionBso,
now: SyncTimestamp,
) -> Result<(Vec<&'static str>, ListValue)> {
) -> DbResult<(Vec<&'static str>, ListValue)> {
let mut columns = vec!["fxa_uid", "fxa_kid", "collection_id", "bso_id"];
let mut values = vec![
user_id.fxa_uid.clone().into_spanner_value(),
@ -454,10 +458,10 @@ pub struct MapAndThenIterator<I, F> {
impl<A, B, E, I, F> Iterator for MapAndThenIterator<I, F>
where
F: FnMut(A) -> StdResult<B, E>,
I: Iterator<Item = StdResult<A, E>>,
F: FnMut(A) -> Result<B, E>,
I: Iterator<Item = Result<A, E>>,
{
type Item = StdResult<B, E>;
type Item = Result<B, E>;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|result| result.and_then(&mut self.f))
@ -466,13 +470,13 @@ where
pub trait MapAndThenTrait {
/// Return an iterator adaptor that applies the provided closure to every
/// Result::Ok value. Result::Err values are unchanged.
/// DbResult::Ok value. DbResult::Err values are unchanged.
///
/// The closure can be used for control flow based on result values
fn map_and_then<F, A, B, E>(self, func: F) -> MapAndThenIterator<Self, F>
where
Self: Sized + Iterator<Item = StdResult<A, E>>,
F: FnMut(A) -> StdResult<B, E>,
Self: Sized + Iterator<Item = Result<A, E>>,
F: FnMut(A) -> Result<B, E>,
{
MapAndThenIterator {
iter: self,
@ -481,4 +485,4 @@ pub trait MapAndThenTrait {
}
}
impl<I, T, E> MapAndThenTrait for I where I: Sized + Iterator<Item = StdResult<T, E>> {}
impl<I, T, E> MapAndThenTrait for I where I: Sized + Iterator<Item = Result<T, E>> {}

View File

@ -9,5 +9,4 @@ backtrace = "0.3.61"
serde = "1.0"
serde_json = { version = "1.0", features = ["arbitrary_precision"] }
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
thiserror = "1.0.26"

View File

@ -7,8 +7,9 @@ use serde::{
Serialize,
};
use syncserver_common::{InternalError, ReportableError};
use syncserver_db_common::error::DbError;
/// An error type that represents application-specific errors to Tokenserver. This error is not
/// used to represent database-related errors; database-related errors have their own type.
#[derive(Clone, Debug)]
pub struct TokenserverError {
pub status: &'static str,
@ -249,25 +250,6 @@ impl Serialize for TokenserverError {
}
}
impl From<DbError> for TokenserverError {
fn from(db_error: DbError) -> Self {
TokenserverError {
description: db_error.to_string(),
context: db_error.to_string(),
backtrace: Box::new(db_error.backtrace),
http_status: if db_error.status.is_server_error() {
// Use the status code from the DbError if it already suggests an internal error;
// it might be more specific than `StatusCode::SERVICE_UNAVAILABLE`
db_error.status
} else {
StatusCode::SERVICE_UNAVAILABLE
},
// An unhandled DbError in the Tokenserver code is an internal error
..TokenserverError::internal_error()
}
}
}
impl From<TokenserverError> for HttpResponse {
fn from(inner: TokenserverError) -> Self {
ResponseError::error_response(&inner)

View File

@ -1,7 +1,9 @@
pub mod error;
mod error;
use serde::{Deserialize, Serialize};
pub use error::{ErrorLocation, TokenType, TokenserverError};
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum NodeType {
#[serde(rename = "mysql")]

28
tokenserver-db/Cargo.toml Normal file
View File

@ -0,0 +1,28 @@
[package]
name = "tokenserver-db"
version = "0.12.3"
edition = "2021"
[dependencies]
async-trait = "0.1.40"
backtrace = "0.3.61"
diesel = { version = "1.4", features = ["mysql", "r2d2"] }
diesel_logger = "0.1.1"
diesel_migrations = { version = "1.4.0", features = ["mysql"] }
futures = { version = "0.3", features = ["compat"] }
http = "0.2.5"
serde = "1.0"
serde_derive = "1.0"
serde_json = { version = "1.0", features = ["arbitrary_precision"] }
slog-scope = "4.3"
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
thiserror = "1.0.26"
tokenserver-common = { path = "../tokenserver-common" }
tokenserver-settings = { path = "../tokenserver-settings" }
# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.)
tokio = { version = "0.2.4", features = ["macros", "sync"] }
[dev-dependencies]
syncserver-settings = { path = "../syncserver-settings" }
env_logger = "0.9"

Some files were not shown because too many files have changed in this diff Show More