diff --git a/.circleci/config.yml b/.circleci/config.yml index f13e3629..52422526 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,7 +10,7 @@ commands: steps: - run: name: Display Rust Version - command: + command: rustc --version setup-rust-check: steps: @@ -43,12 +43,18 @@ commands: flake8 syncserver/src/tokenserver flake8 tools/integration_tests flake8 tools/tokenserver - rust-clippy: + rust-clippy-mysql: steps: - run: - name: Rust Clippy + name: Rust Clippy MySQL command: | - cargo clippy --workspace --all-targets --all-features -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql -- -D warnings + rust-clippy-spanner: + steps: + - run: + name: Rust Clippy Spanner + command: | + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner -- -D warnings cargo-build: steps: - run: @@ -69,7 +75,7 @@ commands: - run: name: Create Tokenserver database command: | - mysql -u root -ppassword -h 127.0.0.1 -e 'CREATE DATABASE tokenserver;' + mysql -u root -ppassword -h 127.0.0.1 -e 'CREATE DATABASE tokenserver;' mysql -u root -ppassword -h 127.0.0.1 -e "GRANT ALL ON tokenserver.* to 'test'@'%';" write-version: @@ -105,7 +111,7 @@ commands: -f docker-compose.mysql.yaml -f docker-compose.e2e.mysql.yaml up - --exit-code-from e2e-tests + --exit-code-from mysql-e2e-tests --abort-on-container-exit environment: SYNCSTORAGE_RS_IMAGE: app:build @@ -129,7 +135,7 @@ commands: -f docker-compose.spanner.yaml -f docker-compose.e2e.spanner.yaml up - --exit-code-from e2e-tests + --exit-code-from spanner-e2e-tests --abort-on-container-exit environment: SYNCSTORAGE_RS_IMAGE: app:build @@ -164,13 +170,15 @@ jobs: auth: username: $DOCKER_USER password: $DOCKER_PASS + resource_class: large steps: - checkout - display-rust - setup-rust-check - setup-gcp-grpc - rust-check - - rust-clippy + - rust-clippy-spanner + - rust-clippy-mysql - setup-python - python-check @@ -197,16 +205,6 @@ jobs: MYSQL_DATABASE: syncstorage resource_class: large steps: - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Login to Dockerhub - command: | - if [ "${DOCKER_USER}" == "" ] || [ "${DOCKER_PASS}" == "" ]; then - echo "Skipping Login to DockerHub, credentials unavailable" - else - echo "${DOCKER_PASS}" | docker login -u="${DOCKER_USER}" --password-stdin - fi - checkout - display-rust - setup-python @@ -221,9 +219,20 @@ jobs: - run-tests - run-tokenserver-scripts-tests #- save-sccache-cache + build-mysql-image: + docker: + - image: cimg/rust:1.60.0 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + resource_class: large + steps: + - setup_remote_docker: + docker_layer_caching: true + - checkout - run: - name: Build Docker image - command: docker build -t app:build . + name: Build MySQL Docker image + command: docker build -t app:build --build-arg DATABASE_BACKEND=mysql . no_output_timeout: 30m # save the built docker container into CircleCI's cache. This is # required since Workflows do not have the same remote docker instance. @@ -234,13 +243,43 @@ jobs: docker save -o /home/circleci/cache/docker.tar "app:build" - run: name: Save docker-compose config - command: cp docker-compose*.yaml /home/circleci/cache + command: cp docker-compose*mysql.yaml /home/circleci/cache - save_cache: - key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} + key: mysql-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} paths: - /home/circleci/cache - e2e-tests: + build-spanner-image: + docker: + - image: cimg/rust:1.60.0 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + resource_class: large + steps: + - setup_remote_docker: + docker_layer_caching: true + - checkout + - run: + name: Build Spanner Docker image + command: docker build -t app:build --build-arg DATABASE_BACKEND=spanner . + no_output_timeout: 30m + # save the built docker container into CircleCI's cache. This is + # required since Workflows do not have the same remote docker instance. + - run: + name: docker save app:build + command: | + mkdir -p /home/circleci/cache + docker save -o /home/circleci/cache/docker.tar "app:build" + - run: + name: Save docker-compose config + command: cp docker-compose*spanner.yaml /home/circleci/cache + - save_cache: + key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} + paths: + - /home/circleci/cache + + mysql-e2e-tests: docker: - image: docker/compose:1.24.0 auth: @@ -249,7 +288,7 @@ jobs: steps: - setup_remote_docker - restore_cache: - key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + key: mysql-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} - run: name: Restore Docker image cache command: docker load -i /home/circleci/cache/docker.tar @@ -257,6 +296,23 @@ jobs: name: Restore docker-compose config command: cp /home/circleci/cache/docker-compose*.yaml . - run-e2e-mysql-tests + + spanner-e2e-tests: + docker: + - image: docker/compose:1.24.0 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + steps: + - setup_remote_docker + - restore_cache: + key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + - run: + name: Restore Docker image cache + command: docker load -i /home/circleci/cache/docker.tar + - run: + name: Restore docker-compose config + command: cp /home/circleci/cache/docker-compose*.yaml . - run-e2e-spanner-tests deploy: @@ -268,7 +324,7 @@ jobs: steps: - setup_remote_docker - restore_cache: - key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} - run: name: Restore Docker image cache command: docker load -i /home/circleci/cache/docker.tar @@ -346,21 +402,41 @@ workflows: filters: tags: only: /.*/ - - e2e-tests: + - build-mysql-image: requires: - build-and-test filters: tags: only: /.*/ + - build-spanner-image: + requires: + - build-and-test + filters: + tags: + only: /.*/ + - mysql-e2e-tests: + requires: + - build-mysql-image + filters: + tags: + only: /.*/ + - spanner-e2e-tests: + requires: + - build-spanner-image + filters: + tags: + only: /.*/ - deploy: requires: - - e2e-tests + - mysql-e2e-tests + - spanner-e2e-tests filters: tags: only: /.*/ - deploy-python-utils: requires: - - e2e-tests + - mysql-e2e-tests + - spanner-e2e-tests filters: tags: only: /.*/ diff --git a/Cargo.lock b/Cargo.lock index ed940b41..2581e189 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -429,17 +429,6 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" -[[package]] -name = "bb8" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374bba43fc924d90393ee7768e6f75d223a98307a488fe5bc34b66c3e96932a6" -dependencies = [ - "async-trait", - "futures 0.3.19", - "tokio", -] - [[package]] name = "bindgen" version = "0.57.0" @@ -3006,41 +2995,27 @@ dependencies = [ "actix-cors", "actix-http", "actix-rt", - "actix-service", "actix-web", "async-trait", "backtrace", "base64 0.13.0", - "bb8", - "bytes 1.1.0", "cadence", "chrono", - "deadpool", - "diesel", - "diesel_logger", - "diesel_migrations", "docopt", "dyn-clone", "env_logger", "futures 0.3.19", - "google-cloud-rust-raw", - "grpcio", "hawk", "hex", "hmac", "hostname", - "http", "lazy_static", - "log", "mime", "mockito", - "num_cpus", - "protobuf", "pyo3", "rand 0.8.5", "regex", "reqwest", - "scheduled-thread-pool", "sentry", "sentry-backtrace", "serde 1.0.135", @@ -3057,15 +3032,15 @@ dependencies = [ "syncserver-common", "syncserver-db-common", "syncserver-settings", + "syncstorage-db", "syncstorage-settings", "thiserror", "time 0.3.9", "tokenserver-common", + "tokenserver-db", "tokenserver-settings", "tokio", - "url 2.2.2", "urlencoding", - "uuid", "validator", "validator_derive", "woothee", @@ -3075,30 +3050,29 @@ dependencies = [ name = "syncserver-common" version = "0.13.1" dependencies = [ + "actix-web", + "cadence", + "futures 0.3.19", "hkdf", + "serde 1.0.135", + "serde_json", "sha2", + "slog", + "slog-scope", ] [[package]] name = "syncserver-db-common" version = "0.13.1" dependencies = [ - "async-trait", "backtrace", - "chrono", "deadpool", "diesel", "diesel_migrations", "futures 0.3.19", - "grpcio", - "hostname", "http", - "lazy_static", - "serde 1.0.135", - "serde_json", "syncserver-common", "thiserror", - "url 2.2.2", ] [[package]] @@ -3115,6 +3089,71 @@ dependencies = [ "url 2.2.2", ] +[[package]] +name = "syncstorage-db" +version = "0.12.3" +dependencies = [ + "async-trait", + "cadence", + "env_logger", + "futures 0.3.19", + "hostname", + "lazy_static", + "log", + "rand 0.8.5", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-mysql", + "syncstorage-settings", + "syncstorage-spanner", + "tokio", +] + +[[package]] +name = "syncstorage-db-common" +version = "0.12.3" +dependencies = [ + "async-trait", + "backtrace", + "chrono", + "diesel", + "diesel_migrations", + "futures 0.3.19", + "http", + "lazy_static", + "serde 1.0.135", + "serde_json", + "syncserver-common", + "syncserver-db-common", + "thiserror", +] + +[[package]] +name = "syncstorage-mysql" +version = "0.12.3" +dependencies = [ + "async-trait", + "backtrace", + "base64 0.13.0", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger", + "futures 0.3.19", + "http", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-settings", + "thiserror", + "url 2.2.2", +] + [[package]] name = "syncstorage-settings" version = "0.13.1" @@ -3125,6 +3164,32 @@ dependencies = [ "time 0.3.9", ] +[[package]] +name = "syncstorage-spanner" +version = "0.12.3" +dependencies = [ + "async-trait", + "backtrace", + "cadence", + "deadpool", + "env_logger", + "futures 0.3.19", + "google-cloud-rust-raw", + "grpcio", + "http", + "log", + "protobuf", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncstorage-db-common", + "syncstorage-settings", + "thiserror", + "tokio", + "url 2.2.2", + "uuid", +] + [[package]] name = "synstructure" version = "0.12.6" @@ -3305,10 +3370,34 @@ dependencies = [ "serde 1.0.135", "serde_json", "syncserver-common", - "syncserver-db-common", "thiserror", ] +[[package]] +name = "tokenserver-db" +version = "0.12.3" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger", + "futures 0.3.19", + "http", + "serde 1.0.135", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror", + "tokenserver-common", + "tokenserver-settings", + "tokio", +] + [[package]] name = "tokenserver-settings" version = "0.13.1" diff --git a/Cargo.toml b/Cargo.toml index 56eced59..818c6490 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,16 @@ [workspace] resolver = "2" members = [ - "syncserver-settings", "syncserver-common", "syncserver-db-common", + "syncserver-settings", + "syncstorage-db", + "syncstorage-db-common", + "syncstorage-mysql", "syncstorage-settings", + "syncstorage-spanner", "tokenserver-common", + "tokenserver-db", "tokenserver-settings", "syncserver", ] diff --git a/Dockerfile b/Dockerfile index b61e5d5c..4102c7e5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,35 @@ -FROM rust:1.66-buster as builder +FROM lukemathwalker/cargo-chef:0.1.50-rust-1.66-buster as chef WORKDIR /app -ADD . /app -ENV PATH=$PATH:/root/.cargo/bin -# temp removed --no-install-recommends due to CI docker build issue + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS cacher +ARG DATABASE_BACKEND=spanner +COPY --from=planner /app/mysql_pubkey.asc mysql_pubkey.asc + +# cmake is required to build grpcio-sys for Spanner builds +RUN \ + echo "deb https://repo.mysql.com/apt/debian/ buster mysql-8.0" >> /etc/apt/sources.list && \ + # mysql_pubkey.asc from: + # https://dev.mysql.com/doc/refman/8.0/en/checking-gpg-signature.html + # related: + # https://dev.mysql.com/doc/mysql-apt-repo-quick-guide/en/#repo-qg-apt-repo-manual-setup + apt-key adv --import mysql_pubkey.asc && \ + apt-get -q update && \ + apt-get -q install -y --no-install-recommends libmysqlclient-dev cmake + +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --recipe-path recipe.json + +FROM chef as builder +ARG DATABASE_BACKEND=spanner + +COPY . /app +COPY --from=cacher /app/target /app/target +COPY --from=cacher $CARGO_HOME /app/$CARGO_HOME + RUN \ echo "deb https://repo.mysql.com/apt/debian/ buster mysql-8.0" >> /etc/apt/sources.list && \ # mysql_pubkey.asc from: @@ -15,11 +42,13 @@ RUN \ pip3 install -r requirements.txt && \ rm -rf /var/lib/apt/lists/* +ENV PATH=$PATH:/root/.cargo/bin + RUN \ cargo --version && \ rustc --version && \ - cargo install --path ./syncserver --locked --root /app && \ - cargo install --path ./syncserver --locked --root /app --bin purge_ttl + cargo install --path ./syncserver --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --locked --root /app && \ + if [ "$DATABASE_BACKEND" = "spanner" ] ; then cargo install --path ./syncstorage-spanner --locked --root /app --bin purge_ttl ; fi FROM debian:buster-slim WORKDIR /app @@ -56,7 +85,7 @@ COPY --from=builder /app/tools/integration_tests /app/tools/integration_tests COPY --from=builder /app/tools/tokenserver/process_account_events.py /app/tools/tokenserver/process_account_events.py COPY --from=builder /app/tools/tokenserver/requirements.txt /app/tools/tokenserver/requirements.txt COPY --from=builder /app/scripts/prepare-spanner.sh /app/scripts/prepare-spanner.sh -COPY --from=builder /app/syncserver/src/db/spanner/schema.ddl /app/schema.ddl +COPY --from=builder /app/syncstorage-spanner/src/schema.ddl /app/schema.ddl RUN chmod +x /app/scripts/prepare-spanner.sh RUN pip3 install -r /app/tools/integration_tests/requirements.txt diff --git a/Makefile b/Makefile index 39629617..dada9059 100644 --- a/Makefile +++ b/Makefile @@ -10,9 +10,15 @@ PATH_TO_SYNC_SPANNER_KEYS = `pwd`/service-account.json # https://github.com/mozilla-services/server-syncstorage PATH_TO_GRPC_CERT = ../server-syncstorage/local/lib/python2.7/site-packages/grpc/_cython/_credentials/roots.pem -clippy: +SRC_ROOT = $(shell pwd) + +clippy_mysql: # Matches what's run in circleci - cargo clippy --workspace --all-targets --all-features -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql -- -D warnings + +clippy_spanner: + # Matches what's run in circleci + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner -- -D warnings clean: cargo clean @@ -40,14 +46,28 @@ python: python3 -m venv venv venv/bin/python -m pip install -r requirements.txt -run: python - PATH="./venv/bin:$(PATH)" RUST_LOG=debug RUST_BACKTRACE=full cargo run -- --config config/local.toml +run_mysql: python + PATH="./venv/bin:$(PATH)" \ + # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the + # below env var + PYTHONPATH=$(SRC_ROOT)/venv/lib/python3.9/site-packages \ + RUST_LOG=debug \ + RUST_BACKTRACE=full \ + cargo run --no-default-features --features=syncstorage-db/mysql -- --config config/local.toml -run_spanner: - GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=$(PATH_TO_GRPC_CERT) make run +run_spanner: python + GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) \ + GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=$(PATH_TO_GRPC_CERT) \ + # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the + # below env var + PYTHONPATH=$(SRC_ROOT)/venv/lib/python3.9/site-packages \ + PATH="./venv/bin:$(PATH)" \ + RUST_LOG=debug \ + RUST_BACKTRACE=full \ + cargo run --no-default-features --features=syncstorage-db/spanner -- --config config/local.toml test: SYNC_SYNCSTORAGE__DATABASE_URL=mysql://sample_user:sample_password@localhost/syncstorage_rs \ SYNC_TOKENSERVER__DATABASE_URL=mysql://sample_user:sample_password@localhost/tokenserver_rs \ RUST_TEST_THREADS=1 \ - cargo test + cargo test --workspace diff --git a/config/local.example.toml b/config/local.example.toml index 7e2a687c..f845b5c9 100644 --- a/config/local.example.toml +++ b/config/local.example.toml @@ -1,22 +1,21 @@ -# Example MySQL DSN: -database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs" - -# Example Spanner DSN: -# database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB" - -"limits.max_total_records"=1666 # See issues #298/#333 master_secret = "INSERT_SECRET_KEY_HERE" # removing this line will default to moz_json formatted logs (which is preferred for production envs) human_logs = 1 +# Example Syncstorage settings: +# Example MySQL DSN: +syncstorage.database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs" +# Example Spanner DSN: +# database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB" # enable quota limits -enable_quota = 0 +syncstorage.enable_quota = 0 # set the quota limit to 2GB. # max_quota_limit = 200000000 +syncstorage.enabled = true +syncstorage.limits.max_total_records = 1666 # See issues #298/#333 # Example Tokenserver settings: -disable_syncstorage = false tokenserver.database_url = "mysql://sample_user:sample_password@localhost/tokenserver_rs" tokenserver.enabled = true tokenserver.fxa_email_domain = "api-accounts.stage.mozaws.net" diff --git a/docker-compose.e2e.mysql.yaml b/docker-compose.e2e.mysql.yaml index bb5d6e99..d2d42195 100644 --- a/docker-compose.e2e.mysql.yaml +++ b/docker-compose.e2e.mysql.yaml @@ -14,7 +14,7 @@ services: sleep 15; /app/bin/syncserver; " - e2e-tests: + mysql-e2e-tests: depends_on: - mock-fxa-server - syncserver diff --git a/docker-compose.e2e.spanner.yaml b/docker-compose.e2e.spanner.yaml index b431276b..e0a25a8b 100644 --- a/docker-compose.e2e.spanner.yaml +++ b/docker-compose.e2e.spanner.yaml @@ -14,7 +14,7 @@ services: sleep 15; /app/bin/syncserver; " - e2e-tests: + spanner-e2e-tests: depends_on: - mock-fxa-server - syncserver diff --git a/syncserver-common/Cargo.toml b/syncserver-common/Cargo.toml index a8a0dea1..24f77379 100644 --- a/syncserver-common/Cargo.toml +++ b/syncserver-common/Cargo.toml @@ -4,5 +4,16 @@ version = "0.13.1" edition = "2021" [dependencies] +actix-web = "3" +cadence = "0.26" +futures = "0.3" hkdf = "0.11" sha2 = "0.9" +serde = "1.0" +serde_json = { version = "1.0", features = ["arbitrary_precision"] } +slog = { version = "2.5", features = [ + "max_level_info", + "release_max_level_info", + "dynamic-keys", +] } +slog-scope = "4.3" diff --git a/syncserver-common/src/lib.rs b/syncserver-common/src/lib.rs index 6d836ec9..a0e6080f 100644 --- a/syncserver-common/src/lib.rs +++ b/syncserver-common/src/lib.rs @@ -1,6 +1,19 @@ +#[macro_use] +extern crate slog_scope; + +use std::{ + fmt, + sync::atomic::{AtomicU64, Ordering}, +}; + +use actix_web::{error::BlockingError, web}; use hkdf::Hkdf; use sha2::Sha256; +mod metrics; + +pub use metrics::{metrics_from_opts, MetricError, Metrics}; + // header statics must be lower case, numbers and symbols per the RFC spec. This reduces chance of error. pub static X_LAST_MODIFIED: &str = "x-last-modified"; pub static X_WEAVE_TIMESTAMP: &str = "x-weave-timestamp"; @@ -56,3 +69,43 @@ pub trait InternalError { /// Constructs an internal error with the given error message. fn internal_error(message: String) -> Self; } + +/// A threadpool on which callers can spawn non-CPU-bound tasks that block their thread (this is +/// mostly useful for running I/O tasks). `BlockingThreadpool` intentionally does not implement +/// `Clone`: `Arc`s are not used internally, so a `BlockingThreadpool` should be instantiated once +/// and shared by passing around `Arc`s. +#[derive(Debug, Default)] +pub struct BlockingThreadpool { + spawned_tasks: AtomicU64, +} + +impl BlockingThreadpool { + /// Runs a function as a task on the blocking threadpool. + /// + /// WARNING: Spawning a blocking task through means other than calling this method will + /// result in inaccurate threadpool metrics being reported. If you want to spawn a task on + /// the blocking threadpool, you **must** use this function. + pub async fn spawn(&self, f: F) -> Result + where + F: FnOnce() -> Result + Send + 'static, + T: Send + 'static, + E: fmt::Debug + Send + InternalError + 'static, + { + self.spawned_tasks.fetch_add(1, Ordering::Relaxed); + + let result = web::block(f).await.map_err(|e| match e { + BlockingError::Error(e) => e, + BlockingError::Canceled => { + E::internal_error("Blocking threadpool operation canceled".to_owned()) + } + }); + + self.spawned_tasks.fetch_sub(1, Ordering::Relaxed); + + result + } + + pub fn active_threads(&self) -> u64 { + self.spawned_tasks.load(Ordering::Relaxed) + } +} diff --git a/syncserver/src/server/metrics.rs b/syncserver-common/src/metrics.rs similarity index 80% rename from syncserver/src/server/metrics.rs rename to syncserver-common/src/metrics.rs index 4b29f555..bee5bb78 100644 --- a/syncserver/src/server/metrics.rs +++ b/syncserver-common/src/metrics.rs @@ -2,18 +2,12 @@ use std::collections::HashMap; use std::net::UdpSocket; use std::time::Instant; -use actix_web::{dev::Payload, web::Data, FromRequest, HttpRequest}; use cadence::{ BufferedUdpMetricSink, Counted, Metric, NopMetricSink, QueuingMetricSink, StatsdClient, Timed, }; -use futures::future; -use futures::future::Ready; use slog::{Key, Record, KV}; -use crate::error::ApiError; -use crate::server::ServerState; -use crate::tokenserver; -use crate::web::tags::Taggable; +pub use cadence::MetricError; #[derive(Debug, Clone)] pub struct MetricTimer { @@ -58,55 +52,6 @@ impl Drop for Metrics { } } -impl FromRequest for Metrics { - type Config = (); - type Error = (); - type Future = Ready>; - - fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { - let client = { - let syncstorage_metrics = req - .app_data::>() - .map(|state| state.metrics.clone()); - let tokenserver_metrics = req - .app_data::>() - .map(|state| state.metrics.clone()); - - syncstorage_metrics.or(tokenserver_metrics) - }; - - if client.is_none() { - warn!("⚠️ metric error: No App State"); - } - - future::ok(Metrics { - client: client.as_deref().cloned(), - tags: req.get_tags(), - timer: None, - }) - } -} - -impl From<&StatsdClient> for Metrics { - fn from(client: &StatsdClient) -> Self { - Metrics { - client: Some(client.clone()), - tags: HashMap::default(), - timer: None, - } - } -} - -impl From<&ServerState> for Metrics { - fn from(state: &ServerState) -> Self { - Metrics { - client: Some(*state.metrics.clone()), - tags: HashMap::default(), - timer: None, - } - } -} - impl Metrics { pub fn sink() -> StatsdClient { StatsdClient::builder("", NopMetricSink).build() @@ -191,7 +136,7 @@ pub fn metrics_from_opts( label: &str, host: Option<&str>, port: u16, -) -> Result { +) -> Result { let builder = if let Some(statsd_host) = host { let socket = UdpSocket::bind("0.0.0.0:0")?; socket.set_nonblocking(true)?; @@ -210,6 +155,16 @@ pub fn metrics_from_opts( .build()) } +impl From<&StatsdClient> for Metrics { + fn from(client: &StatsdClient) -> Self { + Metrics { + client: Some(client.clone()), + tags: HashMap::default(), + timer: None, + } + } +} + /// A newtype used solely to allow us to implement KV on HashMap. struct MetricTags(HashMap); diff --git a/syncserver-db-common/Cargo.toml b/syncserver-db-common/Cargo.toml index 99e422db..3eaf6309 100644 --- a/syncserver-db-common/Cargo.toml +++ b/syncserver-db-common/Cargo.toml @@ -4,25 +4,14 @@ version = "0.13.1" edition = "2021" [dependencies] -async-trait = "0.1.40" backtrace = "0.3.61" -chrono = "0.4" # Pin to 0.5 for now, to keep it under tokio 0.2 (issue977). # Fix for #803 (deadpool#92) points to our fork for now #deadpool = "0.5" # pin to 0.5 deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" } diesel = { version = "1.4", features = ["mysql", "r2d2"] } diesel_migrations = { version = "1.4.0", features = ["mysql"] } -# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause -# syncstorage to either fail to either compile, or start. In those cases, try -# `cargo build --features grpcio/openssl ...` -grpcio = { version = "0.9" } -hostname = "0.3.1" -http = "0.2.6" futures = { version = "0.3", features = ["compat"] } -lazy_static = "1.4.0" -serde = "1.0" -serde_json = { version = "1.0", features = ["arbitrary_precision"] } +http = "0.2.6" syncserver-common = { path = "../syncserver-common" } thiserror = "1.0.26" -url = "2.1" diff --git a/syncserver-db-common/src/error.rs b/syncserver-db-common/src/error.rs index 1a2d7a66..e49e89fd 100644 --- a/syncserver-db-common/src/error.rs +++ b/syncserver-db-common/src/error.rs @@ -2,153 +2,58 @@ use std::fmt; use backtrace::Backtrace; use http::StatusCode; -use syncserver_common::{from_error, impl_fmt_display, InternalError}; +use syncserver_common::{from_error, impl_fmt_display}; use thiserror::Error; +/// Error specific to any MySQL database backend. These errors are not related to the syncstorage +/// or tokenserver application logic; rather, they are lower-level errors arising from diesel. #[derive(Debug)] -pub struct DbError { - kind: DbErrorKind, +pub struct MysqlError { + kind: MysqlErrorKind, pub status: StatusCode, pub backtrace: Backtrace, } #[derive(Debug, Error)] -pub enum DbErrorKind { +enum MysqlErrorKind { #[error("A database error occurred: {}", _0)] DieselQuery(#[from] diesel::result::Error), #[error("An error occurred while establishing a db connection: {}", _0)] DieselConnection(#[from] diesel::result::ConnectionError), - #[error("A database error occurred: {}", _0)] - SpannerGrpc(#[from] grpcio::Error), - - #[error("Spanner data load too large: {}", _0)] - SpannerTooLarge(String), - #[error("A database pool error occurred: {}", _0)] Pool(diesel::r2d2::PoolError), #[error("Error migrating the database: {}", _0)] Migration(diesel_migrations::RunMigrationsError), - - #[error("Specified collection does not exist")] - CollectionNotFound, - - #[error("Specified bso does not exist")] - BsoNotFound, - - #[error("Specified batch does not exist")] - BatchNotFound, - - #[error("An attempt at a conflicting write")] - Conflict, - - #[error("Database integrity error: {}", _0)] - Integrity(String), - - #[error("Invalid database URL: {}", _0)] - InvalidUrl(String), - - #[error("Unexpected error: {}", _0)] - Internal(String), - - #[error("User over quota")] - Quota, - - #[error("Connection expired")] - Expired, } -impl DbError { - pub fn internal(msg: &str) -> Self { - DbErrorKind::Internal(msg.to_owned()).into() - } - - pub fn is_sentry_event(&self) -> bool { - !matches!(&self.kind, DbErrorKind::Conflict) - } - - pub fn metric_label(&self) -> Option { - match &self.kind { - DbErrorKind::Conflict => Some("storage.conflict".to_owned()), - _ => None, - } - } - - pub fn is_collection_not_found(&self) -> bool { - matches!(self.kind, DbErrorKind::CollectionNotFound) - } - - pub fn is_conflict(&self) -> bool { - matches!(self.kind, DbErrorKind::Conflict) - } - - pub fn is_quota(&self) -> bool { - matches!(self.kind, DbErrorKind::Quota) - } - - pub fn is_bso_not_found(&self) -> bool { - matches!(self.kind, DbErrorKind::BsoNotFound) - } - - pub fn is_batch_not_found(&self) -> bool { - matches!(self.kind, DbErrorKind::BatchNotFound) - } -} - -impl From for DbError { - fn from(kind: DbErrorKind) -> Self { - let status = match kind { - DbErrorKind::CollectionNotFound | DbErrorKind::BsoNotFound => StatusCode::NOT_FOUND, - // Matching the Python code here (a 400 vs 404) - DbErrorKind::BatchNotFound | DbErrorKind::SpannerTooLarge(_) => StatusCode::BAD_REQUEST, - // NOTE: the protocol specification states that we should return a - // "409 Conflict" response here, but clients currently do not - // handle these respones very well: - // * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034 - // * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032 - DbErrorKind::Conflict => StatusCode::SERVICE_UNAVAILABLE, - DbErrorKind::Quota => StatusCode::FORBIDDEN, - _ => StatusCode::INTERNAL_SERVER_ERROR, - }; - +impl From for MysqlError { + fn from(kind: MysqlErrorKind) -> Self { Self { kind, - status, + status: StatusCode::INTERNAL_SERVER_ERROR, backtrace: Backtrace::new(), } } } -impl_fmt_display!(DbError, DbErrorKind); +impl_fmt_display!(MysqlError, MysqlErrorKind); -from_error!(diesel::result::Error, DbError, DbErrorKind::DieselQuery); +from_error!( + diesel::result::Error, + MysqlError, + MysqlErrorKind::DieselQuery +); from_error!( diesel::result::ConnectionError, - DbError, - DbErrorKind::DieselConnection + MysqlError, + MysqlErrorKind::DieselConnection ); -from_error!(grpcio::Error, DbError, |inner: grpcio::Error| { - // Convert ABORTED (typically due to a transaction abort) into 503s - match inner { - grpcio::Error::RpcFailure(ref status) | grpcio::Error::RpcFinished(Some(ref status)) - if status.code() == grpcio::RpcStatusCode::ABORTED => - { - DbErrorKind::Conflict - } - _ => DbErrorKind::SpannerGrpc(inner), - } -}); -from_error!(diesel::r2d2::PoolError, DbError, DbErrorKind::Pool); +from_error!(diesel::r2d2::PoolError, MysqlError, MysqlErrorKind::Pool); from_error!( diesel_migrations::RunMigrationsError, - DbError, - DbErrorKind::Migration + MysqlError, + MysqlErrorKind::Migration ); - -impl InternalError for DbError { - fn internal_error(message: String) -> Self { - DbErrorKind::Internal(message).into() - } -} diff --git a/syncserver-db-common/src/lib.rs b/syncserver-db-common/src/lib.rs index a850777d..5e227376 100644 --- a/syncserver-db-common/src/lib.rs +++ b/syncserver-db-common/src/lib.rs @@ -1,78 +1,18 @@ pub mod error; -pub mod params; -pub mod results; -pub mod util; +pub mod test; use std::fmt::Debug; -use async_trait::async_trait; -use futures::future::{self, LocalBoxFuture, TryFutureExt}; -use lazy_static::lazy_static; -use serde::Deserialize; +use futures::future::LocalBoxFuture; -use error::DbError; -use util::SyncTimestamp; - -lazy_static! { - /// For efficiency, it's possible to use fixed pre-determined IDs for - /// common collection names. This is the canonical list of such - /// names. Non-standard collections will be allocated IDs starting - /// from the highest ID in this collection. - pub static ref STD_COLLS: Vec<(i32, &'static str)> = { - vec![ - (1, "clients"), - (2, "crypto"), - (3, "forms"), - (4, "history"), - (5, "keys"), - (6, "meta"), - (7, "bookmarks"), - (8, "prefs"), - (9, "tabs"), - (10, "passwords"), - (11, "addons"), - (12, "addresses"), - (13, "creditcards"), - ] - }; -} - -/// Rough guesstimate of the maximum reasonable life span of a batch -pub const BATCH_LIFETIME: i64 = 2 * 60 * 60 * 1000; // 2 hours, in milliseconds - -/// The ttl to use for rows that are never supposed to expire (in seconds) -pub const DEFAULT_BSO_TTL: u32 = 2_100_000_000; - -/// Non-standard collections will be allocated IDs beginning with this value -pub const FIRST_CUSTOM_COLLECTION_ID: i32 = 101; - -pub type DbFuture<'a, T> = LocalBoxFuture<'a, Result>; - -#[async_trait] -pub trait DbPool: Sync + Send + Debug + GetPoolState { - async fn get(&self) -> Result>, DbError>; - - fn validate_batch_id(&self, params: params::ValidateBatchId) -> Result<(), DbError>; - - fn box_clone(&self) -> Box; -} - -impl Clone for Box { - fn clone(&self) -> Box { - self.box_clone() - } -} +pub type DbFuture<'a, T, E> = LocalBoxFuture<'a, Result>; +/// A trait to be implemented by database pool data structures. It provides an interface to +/// derive the current state of the pool, as represented by the `PoolState` struct. pub trait GetPoolState { fn state(&self) -> PoolState; } -impl GetPoolState for Box { - fn state(&self) -> PoolState { - (**self).state() - } -} - #[derive(Debug, Default)] /// A mockable r2d2::State pub struct PoolState { @@ -97,212 +37,18 @@ impl From for PoolState { } } -pub trait Db<'a>: Debug + 'a { - fn lock_for_read(&self, params: params::LockCollection) -> DbFuture<'_, ()>; - - fn lock_for_write(&self, params: params::LockCollection) -> DbFuture<'_, ()>; - - fn begin(&self, for_write: bool) -> DbFuture<'_, ()>; - - fn commit(&self) -> DbFuture<'_, ()>; - - fn rollback(&self) -> DbFuture<'_, ()>; - - fn get_collection_timestamps( - &self, - params: params::GetCollectionTimestamps, - ) -> DbFuture<'_, results::GetCollectionTimestamps>; - - fn get_collection_timestamp( - &self, - params: params::GetCollectionTimestamp, - ) -> DbFuture<'_, results::GetCollectionTimestamp>; - - fn get_collection_counts( - &self, - params: params::GetCollectionCounts, - ) -> DbFuture<'_, results::GetCollectionCounts>; - - fn get_collection_usage( - &self, - params: params::GetCollectionUsage, - ) -> DbFuture<'_, results::GetCollectionUsage>; - - fn get_storage_timestamp( - &self, - params: params::GetStorageTimestamp, - ) -> DbFuture<'_, results::GetStorageTimestamp>; - - fn get_storage_usage( - &self, - params: params::GetStorageUsage, - ) -> DbFuture<'_, results::GetStorageUsage>; - - fn get_quota_usage( - &self, - params: params::GetQuotaUsage, - ) -> DbFuture<'_, results::GetQuotaUsage>; - - fn delete_storage(&self, params: params::DeleteStorage) - -> DbFuture<'_, results::DeleteStorage>; - - fn delete_collection( - &self, - params: params::DeleteCollection, - ) -> DbFuture<'_, results::DeleteCollection>; - - fn delete_bsos(&self, params: params::DeleteBsos) -> DbFuture<'_, results::DeleteBsos>; - - fn get_bsos(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsos>; - - fn get_bso_ids(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsoIds>; - - fn post_bsos(&self, params: params::PostBsos) -> DbFuture<'_, results::PostBsos>; - - fn delete_bso(&self, params: params::DeleteBso) -> DbFuture<'_, results::DeleteBso>; - - fn get_bso(&self, params: params::GetBso) -> DbFuture<'_, Option>; - - fn get_bso_timestamp( - &self, - params: params::GetBsoTimestamp, - ) -> DbFuture<'_, results::GetBsoTimestamp>; - - fn put_bso(&self, params: params::PutBso) -> DbFuture<'_, results::PutBso>; - - fn create_batch(&self, params: params::CreateBatch) -> DbFuture<'_, results::CreateBatch>; - - fn validate_batch(&self, params: params::ValidateBatch) - -> DbFuture<'_, results::ValidateBatch>; - - fn append_to_batch( - &self, - params: params::AppendToBatch, - ) -> DbFuture<'_, results::AppendToBatch>; - - fn get_batch(&self, params: params::GetBatch) -> DbFuture<'_, Option>; - - fn commit_batch(&self, params: params::CommitBatch) -> DbFuture<'_, results::CommitBatch>; - - fn box_clone(&self) -> Box>; - - fn check(&self) -> DbFuture<'_, results::Check>; - - fn get_connection_info(&self) -> results::ConnectionInfo; - - /// Retrieve the timestamp for an item/collection - /// - /// Modeled on the Python `get_resource_timestamp` function. - fn extract_resource( - &self, - user_id: UserIdentifier, - collection: Option, - bso: Option, - ) -> DbFuture<'_, SyncTimestamp> { - // If there's no collection, we return the overall storage timestamp - let collection = match collection { - Some(collection) => collection, - None => return Box::pin(self.get_storage_timestamp(user_id)), - }; - // If there's no bso, return the collection - let bso = match bso { - Some(bso) => bso, - None => { - return Box::pin( - self.get_collection_timestamp(params::GetCollectionTimestamp { - user_id, - collection, - }) - .or_else(|e| { - if e.is_collection_not_found() { - future::ok(SyncTimestamp::from_seconds(0f64)) - } else { - future::err(e) - } - }), - ) - } - }; - Box::pin( - self.get_bso_timestamp(params::GetBsoTimestamp { - user_id, - collection, - id: bso, - }) - .or_else(|e| { - if e.is_collection_not_found() { - future::ok(SyncTimestamp::from_seconds(0f64)) - } else { - future::err(e) - } - }), - ) - } - - /// Internal methods used by the db tests - - fn get_collection_id(&self, name: String) -> DbFuture<'_, i32>; - - fn create_collection(&self, name: String) -> DbFuture<'_, i32>; - - fn update_collection(&self, params: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp>; - - fn timestamp(&self) -> SyncTimestamp; - - fn set_timestamp(&self, timestamp: SyncTimestamp); - - fn delete_batch(&self, params: params::DeleteBatch) -> DbFuture<'_, ()>; - - fn clear_coll_cache(&self) -> DbFuture<'_, ()>; - - fn set_quota(&mut self, enabled: bool, limit: usize, enforce: bool); -} - -impl<'a> Clone for Box> { - fn clone(&self) -> Box> { - self.box_clone() - } -} - -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Copy)] -#[serde(rename_all = "lowercase")] -pub enum Sorting { - None, - Newest, - Oldest, - Index, -} - -impl Default for Sorting { - fn default() -> Self { - Sorting::None - } -} - -#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] -pub struct UserIdentifier { - /// For MySQL database backends as the primary key - pub legacy_id: u64, - /// For NoSQL database backends that require randomly distributed primary keys - pub fxa_uid: String, - pub fxa_kid: String, -} - -impl UserIdentifier { - /// Create a new legacy id user identifier - pub fn new_legacy(user_id: u64) -> Self { - Self { - legacy_id: user_id, - ..Default::default() +#[macro_export] +macro_rules! sync_db_method { + ($name:ident, $sync_name:ident, $type:ident) => { + sync_db_method!($name, $sync_name, $type, results::$type); + }; + ($name:ident, $sync_name:ident, $type:ident, $result:ty) => { + fn $name(&self, params: params::$type) -> DbFuture<'_, $result, DbError> { + let db = self.clone(); + Box::pin( + self.blocking_threadpool + .spawn(move || db.$sync_name(params)), + ) } - } -} - -impl From for UserIdentifier { - fn from(val: u32) -> Self { - Self { - legacy_id: val.into(), - ..Default::default() - } - } + }; } diff --git a/syncserver-db-common/src/test.rs b/syncserver-db-common/src/test.rs new file mode 100644 index 00000000..351888f3 --- /dev/null +++ b/syncserver-db-common/src/test.rs @@ -0,0 +1,14 @@ +use diesel::{ + mysql::MysqlConnection, + r2d2::{CustomizeConnection, Error as PoolError}, + Connection, +}; + +#[derive(Debug)] +pub struct TestTransactionCustomizer; + +impl CustomizeConnection for TestTransactionCustomizer { + fn on_acquire(&self, conn: &mut MysqlConnection) -> Result<(), PoolError> { + conn.begin_test_transaction().map_err(PoolError::QueryError) + } +} diff --git a/syncserver-settings/src/lib.rs b/syncserver-settings/src/lib.rs index 9eb93331..f4fa887f 100644 --- a/syncserver-settings/src/lib.rs +++ b/syncserver-settings/src/lib.rs @@ -13,9 +13,9 @@ use syncstorage_settings::Settings as SyncstorageSettings; use tokenserver_settings::Settings as TokenserverSettings; use url::Url; -pub static PREFIX: &str = "sync"; +static PREFIX: &str = "sync"; -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct Settings { pub port: u16, @@ -134,6 +134,7 @@ impl Settings { } } + #[cfg(debug_assertions)] pub fn test_settings() -> Self { let mut settings = Self::with_env_and_config_file(None).expect("Could not get Settings in test_settings"); diff --git a/syncserver/Cargo.toml b/syncserver/Cargo.toml index 759fe464..2b7018ca 100644 --- a/syncserver/Cargo.toml +++ b/syncserver/Cargo.toml @@ -13,46 +13,23 @@ default-run = "syncserver" [dependencies] actix-http = "2" actix-web = "3" -actix-rt = "1" # Pin to 1.0, due to dependencies on Tokio +actix-rt = "1" # Pin to 1.0, due to dependencies on Tokio actix-cors = "0.5" -actix-service = "1.0.6" async-trait = "0.1.40" backtrace = "0.3.61" base64 = "0.13" -bb8 = "0.4.1" # pin to 0.4 due to dependencies on Tokio -bytes = "1.0" cadence = "0.26" chrono = "0.4" -# Pin to 0.5 for now, to keep it under tokio 0.2 (issue977). -# Fix for #803 (deadpool#92) points to our fork for now -#deadpool = "0.5" # pin to 0.5 -deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" } -diesel = { version = "1.4", features = ["mysql", "r2d2"] } -diesel_logger = "0.1.1" -diesel_migrations = { version = "1.4.0", features = ["mysql"] } docopt = "1.1.0" dyn-clone = "1.0.4" env_logger = "0.9" futures = { version = "0.3", features = ["compat"] } -google-cloud-rust-raw = "0.11.0" -# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause -# syncserver to either fail to either compile, or start. In those cases, try -# `cargo build --features grpcio/openssl ...` -grpcio = { version = "0.9" } +hostname = "0.3.1" lazy_static = "1.4.0" hawk = "3.2" hex = "0.4.3" -hostname = "0.3.1" hmac = "0.11" -http = "0.2.5" -log = { version = "0.4", features = [ - "max_level_debug", - "release_max_level_info", -] } mime = "0.3" -num_cpus = "1" -# must match what's used by googleapis-raw -protobuf = "2.20.0" pyo3 = { version = "0.14", features = ["auto-initialize"] } rand = "0.8" regex = "1.4" @@ -65,7 +42,6 @@ sentry-backtrace = "0.19" serde = "1.0" serde_derive = "1.0" serde_json = { version = "1.0", features = ["arbitrary_precision"] } -scheduled-thread-pool = "0.2" sha2 = "0.9" slog = { version = "2.5", features = [ "max_level_info", @@ -78,19 +54,19 @@ slog-mozlog-json = "0.1" slog-scope = "4.3" slog-stdlog = "4.1" slog-term = "2.6" -syncserver-settings = { path = "../syncserver-settings" } -syncserver-db-common = { path = "../syncserver-db-common" } syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-settings = { path = "../syncserver-settings" } +syncstorage-db = { path = "../syncstorage-db" } syncstorage-settings = { path = "../syncstorage-settings" } time = "^0.3" thiserror = "1.0.26" tokenserver-common = { path = "../tokenserver-common" } +tokenserver-db = { path = "../tokenserver-db" } tokenserver-settings = { path = "../tokenserver-settings" } # pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) tokio = { version = "0.2.4", features = ["macros", "sync"] } -url = "2.1" urlencoding = "2.1" -uuid = { version = "0.8.2", features = ["serde", "v4"] } validator = "0.14" validator_derive = "0.14" woothee = "0.11" @@ -99,7 +75,5 @@ woothee = "0.11" mockito = "0.30.0" [features] +default = ["syncstorage-db/mysql"] no_auth = [] - -[[bin]] -name = "purge_ttl" diff --git a/syncserver/src/db/mysql/mod.rs b/syncserver/src/db/mysql/mod.rs deleted file mode 100644 index 82ea3a9b..00000000 --- a/syncserver/src/db/mysql/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -#[macro_use] -mod batch; -mod diesel_ext; -pub mod models; -pub mod pool; -mod schema; -#[cfg(test)] -mod test; - -pub use self::pool::MysqlDbPool; -#[cfg(test)] -pub use self::test::TestTransactionCustomizer; diff --git a/syncserver/src/db/spanner/manager/mod.rs b/syncserver/src/db/spanner/manager/mod.rs deleted file mode 100644 index b1ee0932..00000000 --- a/syncserver/src/db/spanner/manager/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -// mod bb8; -mod deadpool; -mod session; - -pub use self::deadpool::{Conn, SpannerSessionManager}; -pub use self::session::SpannerSession; diff --git a/syncserver/src/db/spanner/mod.rs b/syncserver/src/db/spanner/mod.rs deleted file mode 100644 index a5962eb3..00000000 --- a/syncserver/src/db/spanner/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::time::SystemTime; - -#[macro_use] -mod macros; - -mod batch; -pub mod manager; -pub mod models; -pub mod pool; -mod support; - -pub use self::pool::SpannerDbPool; - -pub fn now() -> i64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_secs() as i64 -} diff --git a/syncserver/src/error.rs b/syncserver/src/error.rs index c81ea604..933b4de0 100644 --- a/syncserver/src/error.rs +++ b/syncserver/src/error.rs @@ -21,8 +21,9 @@ use serde::{ Serialize, }; -use syncserver_common::{from_error, impl_fmt_display, ReportableError}; -use syncserver_db_common::error::DbError; +use syncserver_common::{from_error, impl_fmt_display, MetricError, ReportableError}; +use syncstorage_db::{DbError, DbErrorIntrospect}; + use thiserror::Error; use crate::web::error::{HawkError, ValidationError}; @@ -57,7 +58,7 @@ pub const RETRY_AFTER: u8 = 10; #[derive(Debug)] pub struct ApiError { kind: ApiErrorKind, - pub(crate) backtrace: Backtrace, + pub(crate) backtrace: Box, status: StatusCode, } @@ -87,8 +88,8 @@ pub enum ApiErrorKind { impl ApiErrorKind { pub fn metric_label(&self) -> Option { match self { - ApiErrorKind::Db(err) => err.metric_label(), ApiErrorKind::Hawk(err) => err.metric_label(), + ApiErrorKind::Db(err) => err.metric_label(), ApiErrorKind::Validation(err) => err.metric_label(), _ => None, } @@ -96,6 +97,15 @@ impl ApiErrorKind { } impl ApiError { + pub fn is_sentry_event(&self) -> bool { + // Should we report this error to sentry? + self.status.is_server_error() + && match &self.kind { + ApiErrorKind::Db(dbe) => dbe.is_sentry_event(), + _ => self.kind.metric_label().is_none(), + } + } + fn weave_error_code(&self) -> WeaveError { match &self.kind { ApiErrorKind::Validation(ver) => ver.weave_error_code(), @@ -148,8 +158,8 @@ impl From for HttpResponse { } } -impl From for ApiError { - fn from(inner: cadence::MetricError) -> Self { +impl From for ApiError { + fn from(inner: MetricError) -> Self { ApiErrorKind::Internal(inner.to_string()).into() } } @@ -173,7 +183,7 @@ impl From for ApiError { Self { kind, - backtrace: Backtrace::new(), + backtrace: Box::new(Backtrace::new()), status, } } diff --git a/syncserver/src/lib.rs b/syncserver/src/lib.rs index 90284b15..ac5b979a 100644 --- a/syncserver/src/lib.rs +++ b/syncserver/src/lib.rs @@ -1,10 +1,6 @@ #![warn(rust_2018_idioms)] #![allow(clippy::try_err)] -#[macro_use] -extern crate diesel; -#[macro_use] -extern crate diesel_migrations; #[macro_use] extern crate slog_scope; #[macro_use] @@ -12,7 +8,6 @@ extern crate validator_derive; #[macro_use] pub mod error; -pub mod db; pub mod logging; pub mod server; pub mod tokenserver; diff --git a/syncserver/src/server/mod.rs b/syncserver/src/server/mod.rs index 9f7a3f80..fc377224 100644 --- a/syncserver/src/server/mod.rs +++ b/syncserver/src/server/mod.rs @@ -1,33 +1,27 @@ //! Main application server -use std::{ - env, fmt, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, - }, - time::Duration, -}; +use std::{env, sync::Arc, time::Duration}; use actix_cors::Cors; use actix_web::{ - dev, - error::BlockingError, + dev::{self, Payload}, http::StatusCode, http::{header::LOCATION, Method}, middleware::errhandlers::ErrorHandlers, - web, App, HttpRequest, HttpResponse, HttpServer, + web::{self, Data}, + App, FromRequest, HttpRequest, HttpResponse, HttpServer, }; use cadence::{Gauged, StatsdClient}; -use syncserver_common::InternalError; -use syncserver_db_common::{error::DbError, DbPool, GetPoolState, PoolState}; +use futures::future::{self, Ready}; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{GetPoolState, PoolState}; use syncserver_settings::Settings; +use syncstorage_db::{DbError, DbPool, DbPoolImpl}; use syncstorage_settings::{Deadman, ServerLimits}; use tokio::{sync::RwLock, time}; -use crate::db::pool_from_settings; use crate::error::ApiError; -use crate::server::metrics::Metrics; +use crate::server::tags::Taggable; use crate::tokenserver; use crate::web::{handlers, middleware}; @@ -38,7 +32,7 @@ pub const SYNC_DOCS_URL: &str = const MYSQL_UID_REGEX: &str = r"[0-9]{1,10}"; const SYNC_VERSION_PATH: &str = "1.5"; -pub mod metrics; +pub mod tags; #[cfg(test)] mod test; pub mod user_agent; @@ -46,7 +40,7 @@ pub mod user_agent; /// This is the global HTTP state object that will be made available to all /// HTTP API calls. pub struct ServerState { - pub db_pool: Box, + pub db_pool: Box>, /// Server-enforced limits for request payloads. pub limits: Arc, @@ -249,7 +243,7 @@ macro_rules! build_app_without_syncstorage { impl Server { pub async fn with_settings(settings: Settings) -> Result { let settings_copy = settings.clone(); - let metrics = metrics::metrics_from_opts( + let metrics = syncserver_common::metrics_from_opts( &settings.syncstorage.statsd_label, settings.statsd_host.as_deref(), settings.statsd_port, @@ -258,12 +252,11 @@ impl Server { let port = settings.port; let deadman = Arc::new(RwLock::new(Deadman::from(&settings.syncstorage))); let blocking_threadpool = Arc::new(BlockingThreadpool::default()); - let db_pool = pool_from_settings( + let db_pool = DbPoolImpl::new( &settings.syncstorage, &Metrics::from(&metrics), blocking_threadpool.clone(), - ) - .await?; + )?; let limits = Arc::new(settings.syncstorage.limits); let limits_json = serde_json::to_string(&*limits).expect("ServerLimits failed to serialize"); @@ -273,12 +266,12 @@ impl Server { let tokenserver_state = if settings.tokenserver.enabled { let state = tokenserver::ServerState::from_settings( &settings.tokenserver, - metrics::metrics_from_opts( + syncserver_common::metrics_from_opts( &settings.tokenserver.statsd_label, settings.statsd_host.as_deref(), settings.statsd_port, )?, - blocking_threadpool.clone(), + blocking_threadpool, )?; Some(state) @@ -290,7 +283,7 @@ impl Server { Duration::from_secs(10), metrics.clone(), db_pool.clone(), - blocking_threadpool.clone(), + blocking_threadpool, )?; None @@ -298,7 +291,7 @@ impl Server { let mut server = HttpServer::new(move || { let syncstorage_state = ServerState { - db_pool: db_pool.clone(), + db_pool: Box::new(db_pool.clone()), limits: Arc::clone(&limits), limits_json: limits_json.clone(), metrics: Box::new(metrics.clone()), @@ -337,7 +330,7 @@ impl Server { let blocking_threadpool = Arc::new(BlockingThreadpool::default()); let tokenserver_state = tokenserver::ServerState::from_settings( &settings.tokenserver, - metrics::metrics_from_opts( + syncserver_common::metrics_from_opts( &settings.tokenserver.statsd_label, settings.statsd_host.as_deref(), settings.statsd_port, @@ -405,6 +398,37 @@ fn build_cors(settings: &Settings) -> Cors { cors } +pub struct MetricsWrapper(pub Metrics); + +impl FromRequest for MetricsWrapper { + type Config = (); + type Error = (); + type Future = Ready>; + + fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { + let client = { + let syncstorage_metrics = req + .app_data::>() + .map(|state| state.metrics.clone()); + let tokenserver_metrics = req + .app_data::>() + .map(|state| state.metrics.clone()); + + syncstorage_metrics.or(tokenserver_metrics) + }; + + if client.is_none() { + warn!("⚠️ metric error: No App State"); + } + + future::ok(MetricsWrapper(Metrics { + client: client.as_deref().cloned(), + tags: req.get_tags(), + timer: None, + })) + } +} + /// Emit database pool and threadpool metrics periodically fn spawn_metric_periodic_reporter( interval: Duration, @@ -453,43 +477,3 @@ fn spawn_metric_periodic_reporter( Ok(()) } - -/// A threadpool on which callers can spawn non-CPU-bound tasks that block their thread (this is -/// mostly useful for running I/O tasks). `BlockingThreadpool` intentionally does not implement -/// `Clone`: `Arc`s are not used internally, so a `BlockingThreadpool` should be instantiated once -/// and shared by passing around `Arc`s. -#[derive(Debug, Default)] -pub struct BlockingThreadpool { - spawned_tasks: AtomicU64, -} - -impl BlockingThreadpool { - /// Runs a function as a task on the blocking threadpool. - /// - /// WARNING: Spawning a blocking task through means other than calling this method will - /// result in inaccurate threadpool metrics being reported. If you want to spawn a task on - /// the blocking threadpool, you **must** use this function. - pub async fn spawn(&self, f: F) -> Result - where - F: FnOnce() -> Result + Send + 'static, - T: Send + 'static, - E: fmt::Debug + Send + InternalError + 'static, - { - self.spawned_tasks.fetch_add(1, Ordering::Relaxed); - - let result = web::block(f).await.map_err(|e| match e { - BlockingError::Error(e) => e, - BlockingError::Canceled => { - E::internal_error("Blocking threadpool operation canceled".to_owned()) - } - }); - - self.spawned_tasks.fetch_sub(1, Ordering::Relaxed); - - result - } - - fn active_threads(&self) -> u64 { - self.spawned_tasks.load(Ordering::Relaxed) - } -} diff --git a/syncserver/src/web/tags.rs b/syncserver/src/server/tags.rs similarity index 89% rename from syncserver/src/web/tags.rs rename to syncserver/src/server/tags.rs index db6f6ab2..933644c9 100644 --- a/syncserver/src/web/tags.rs +++ b/syncserver/src/server/tags.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use actix_web::HttpMessage; @@ -78,14 +78,3 @@ struct Tags(HashMap); // "Extras" are pieces of metadata with high cardinality to be included in Sentry errors. #[derive(Default)] struct Extras(HashMap); - -impl From for BTreeMap { - fn from(tags: Tags) -> BTreeMap { - let mut result = BTreeMap::new(); - - for (k, v) in tags.0 { - result.insert(k.clone(), v.clone()); - } - result - } -} diff --git a/syncserver/src/server/test.rs b/syncserver/src/server/test.rs index 6378e586..76bfd2a9 100644 --- a/syncserver/src/server/test.rs +++ b/syncserver/src/server/test.rs @@ -16,17 +16,16 @@ use serde::de::DeserializeOwned; use serde_json::json; use sha2::Sha256; use syncserver_common::{self, X_LAST_MODIFIED}; -use syncserver_db_common::{ +use syncserver_settings::{Secrets, Settings}; +use syncstorage_db::{ params, results::{DeleteBso, GetBso, PostBsos, PutBso}, - util::SyncTimestamp, + DbPoolImpl, SyncTimestamp, }; -use syncserver_settings::{Secrets, Settings}; use syncstorage_settings::ServerLimits; use super::*; use crate::build_app; -use crate::db::pool_from_settings; use crate::tokenserver; use crate::web::{auth::HawkPayload, extractors::BsoBody}; @@ -69,13 +68,14 @@ async fn get_test_state(settings: &Settings) -> ServerState { let blocking_threadpool = Arc::new(BlockingThreadpool::default()); ServerState { - db_pool: pool_from_settings( - &settings.syncstorage, - &Metrics::from(&metrics), - blocking_threadpool.clone(), - ) - .await - .expect("Could not get db_pool in get_test_state"), + db_pool: Box::new( + DbPoolImpl::new( + &settings.syncstorage, + &Metrics::from(&metrics), + blocking_threadpool, + ) + .expect("Could not get db_pool in get_test_state"), + ), limits: Arc::clone(&SERVER_LIMITS), limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(), metrics: Box::new(metrics), diff --git a/syncserver/src/tokenserver/auth/browserid.rs b/syncserver/src/tokenserver/auth/browserid.rs index 8be9bb0f..620b532f 100644 --- a/syncserver/src/tokenserver/auth/browserid.rs +++ b/syncserver/src/tokenserver/auth/browserid.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use reqwest::{Client as ReqwestClient, StatusCode}; use serde::{de::Deserializer, Deserialize, Serialize}; -use tokenserver_common::error::{ErrorLocation, TokenType, TokenserverError}; +use tokenserver_common::{ErrorLocation, TokenType, TokenserverError}; use tokenserver_settings::Settings; use super::VerifyToken; diff --git a/syncserver/src/tokenserver/auth/mod.rs b/syncserver/src/tokenserver/auth/mod.rs index 1a5e4d54..d119c44d 100644 --- a/syncserver/src/tokenserver/auth/mod.rs +++ b/syncserver/src/tokenserver/auth/mod.rs @@ -10,7 +10,7 @@ use pyo3::{ types::IntoPyDict, }; use serde::{Deserialize, Serialize}; -use tokenserver_common::error::TokenserverError; +use tokenserver_common::TokenserverError; /// Represents the origin of the token used by Sync clients to access their data. #[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] diff --git a/syncserver/src/tokenserver/auth/oauth.rs b/syncserver/src/tokenserver/auth/oauth.rs index 9853498b..02366484 100644 --- a/syncserver/src/tokenserver/auth/oauth.rs +++ b/syncserver/src/tokenserver/auth/oauth.rs @@ -5,12 +5,12 @@ use pyo3::{ }; use serde::{Deserialize, Serialize}; use serde_json; -use tokenserver_common::error::TokenserverError; +use syncserver_common::BlockingThreadpool; +use tokenserver_common::TokenserverError; use tokenserver_settings::{Jwk, Settings}; use tokio::time; use super::VerifyToken; -use crate::server::BlockingThreadpool; use std::{sync::Arc, time::Duration}; diff --git a/syncserver/src/tokenserver/db/mod.rs b/syncserver/src/tokenserver/db/mod.rs deleted file mode 100644 index c068100c..00000000 --- a/syncserver/src/tokenserver/db/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod mock; -pub mod models; -pub mod params; -pub mod pool; -pub mod results; diff --git a/syncserver/src/tokenserver/extractors.rs b/syncserver/src/tokenserver/extractors.rs index 6118da19..a4907f98 100644 --- a/syncserver/src/tokenserver/extractors.rs +++ b/syncserver/src/tokenserver/extractors.rs @@ -21,17 +21,11 @@ use regex::Regex; use serde::Deserialize; use sha2::Sha256; use syncserver_settings::Secrets; -use tokenserver_common::{ - error::{ErrorLocation, TokenserverError}, - NodeType, -}; +use tokenserver_common::{ErrorLocation, NodeType, TokenserverError}; +use tokenserver_db::{params, results, Db, DbPool}; -use super::{ - db::{models::Db, params, pool::DbPool, results}, - LogItemsMutator, ServerState, TokenserverMetrics, -}; -use crate::server::metrics::Metrics; -use crate::web::tags::Taggable; +use super::{LogItemsMutator, ServerState, TokenserverMetrics}; +use crate::server::{tags::Taggable, MetricsWrapper}; lazy_static! { static ref CLIENT_STATE_REGEX: Regex = Regex::new("^[a-zA-Z0-9._-]{1,32}$").unwrap(); @@ -218,7 +212,7 @@ impl FromRequest for TokenserverRequest { hash_device_id(&hashed_fxa_uid, device_id, fxa_metrics_hash_secret) }; - let db = >::extract(&req).await?; + let DbWrapper(db) = DbWrapper::extract(&req).await?; let service_id = { let path = req.match_info(); @@ -312,7 +306,10 @@ struct QueryParams { pub duration: Option, } -impl FromRequest for Box { +/// A local "newtype" that wraps `Box` so we can implement `FromRequest`. +pub struct DbWrapper(pub Box); + +impl FromRequest for DbWrapper { type Config = (); type Error = TokenserverError; type Future = LocalBoxFuture<'static, Result>; @@ -321,10 +318,12 @@ impl FromRequest for Box { let req = req.clone(); Box::pin(async move { - >::extract(&req) + DbPoolWrapper::extract(&req) .await? + .0 .get() .await + .map(Self) .map_err(|e| TokenserverError { context: format!("Couldn't acquire a database connection: {}", e), ..TokenserverError::internal_error() @@ -333,7 +332,9 @@ impl FromRequest for Box { } } -impl FromRequest for Box { +struct DbPoolWrapper(Box); + +impl FromRequest for DbPoolWrapper { type Config = (); type Error = TokenserverError; type Future = LocalBoxFuture<'static, Result>; @@ -344,7 +345,7 @@ impl FromRequest for Box { Box::pin(async move { let state = get_server_state(&req)?.as_ref(); - Ok(state.db_pool.clone()) + Ok(Self(state.db_pool.clone())) }) } } @@ -648,8 +649,12 @@ impl FromRequest for TokenserverMetrics { fn from_request(req: &HttpRequest, _payload: &mut Payload) -> Self::Future { let req = req.clone(); - // `Result::unwrap` is safe to use here, since Metrics::extract can never fail - Box::pin(async move { Ok(TokenserverMetrics(Metrics::extract(&req).await.unwrap())) }) + // `Result::unwrap` is safe to use here, since MetricsWrapper::extract can never fail + Box::pin(async move { + Ok(TokenserverMetrics( + MetricsWrapper::extract(&req).await.unwrap().0, + )) + }) } } @@ -706,12 +711,11 @@ mod tests { use serde_json; use syncserver_settings::Settings as GlobalSettings; use syncstorage_settings::ServerLimits; + use tokenserver_db::mock::MockDbPool as MockTokenserverPool; use tokenserver_settings::Settings as TokenserverSettings; - use crate::server::metrics; use crate::tokenserver::{ auth::{browserid, oauth, MockVerifier}, - db::mock::MockDbPool as MockTokenserverPool, ServerState, }; @@ -1339,7 +1343,7 @@ mod tests { node_capacity_release_rate: None, node_type: NodeType::default(), metrics: Box::new( - metrics::metrics_from_opts( + syncserver_common::metrics_from_opts( &tokenserver_settings.statsd_label, syncserver_settings.statsd_host.as_deref(), syncserver_settings.statsd_port, diff --git a/syncserver/src/tokenserver/handlers.rs b/syncserver/src/tokenserver/handlers.rs index 2e8c529a..257daf78 100644 --- a/syncserver/src/tokenserver/handlers.rs +++ b/syncserver/src/tokenserver/handlers.rs @@ -6,15 +6,15 @@ use std::{ use actix_web::{http::StatusCode, Error, HttpResponse}; use serde::Serialize; use serde_json::Value; -use tokenserver_common::{error::TokenserverError, NodeType}; +use tokenserver_common::{NodeType, TokenserverError}; +use tokenserver_db::{ + params::{GetNodeId, PostUser, PutUser, ReplaceUsers}, + Db, +}; use super::{ auth::{MakeTokenPlaintext, Tokenlib, TokenserverOrigin}, - db::{ - models::Db, - params::{GetNodeId, PostUser, PutUser, ReplaceUsers}, - }, - extractors::TokenserverRequest, + extractors::{DbWrapper, TokenserverRequest}, TokenserverMetrics, }; @@ -32,7 +32,7 @@ pub struct TokenserverResult { pub async fn get_tokenserver_result( req: TokenserverRequest, - db: Box, + DbWrapper(db): DbWrapper, TokenserverMetrics(mut metrics): TokenserverMetrics, ) -> Result { let updates = update_user(&req, db).await?; @@ -242,7 +242,7 @@ async fn update_user( } } -pub async fn heartbeat(db: Box) -> Result { +pub async fn heartbeat(DbWrapper(db): DbWrapper) -> Result { let mut checklist = HashMap::new(); checklist.insert( "version".to_owned(), diff --git a/syncserver/src/tokenserver/mod.rs b/syncserver/src/tokenserver/mod.rs index 52cb27fd..b501c7d0 100644 --- a/syncserver/src/tokenserver/mod.rs +++ b/syncserver/src/tokenserver/mod.rs @@ -1,5 +1,4 @@ pub mod auth; -pub mod db; pub mod extractors; pub mod handlers; pub mod logging; @@ -10,18 +9,16 @@ use serde::{ ser::{SerializeMap, Serializer}, Serialize, }; +use syncserver_common::{BlockingThreadpool, Metrics}; use tokenserver_common::NodeType; +use tokenserver_db::{params, DbPool, TokenserverPool}; use tokenserver_settings::Settings; use crate::{ - error::ApiError, - server::{metrics::Metrics, user_agent, BlockingThreadpool}, + error::{ApiError, ApiErrorKind}, + server::user_agent, }; use auth::{browserid, oauth, VerifyToken}; -use db::{ - params, - pool::{DbPool, TokenserverPool}, -}; use std::{collections::HashMap, convert::TryFrom, fmt, sync::Arc}; @@ -86,7 +83,7 @@ impl ServerState { token_duration: settings.token_duration, } }) - .map_err(Into::into) + .map_err(|_| ApiErrorKind::Internal("Failed to create Tokenserver pool".to_owned()).into()) } } diff --git a/syncserver/src/web/extractors.rs b/syncserver/src/web/extractors.rs index 5bb2d2c7..5e7be712 100644 --- a/syncserver/src/web/extractors.rs +++ b/syncserver/src/web/extractors.rs @@ -27,23 +27,23 @@ use serde::{ Deserialize, Serialize, }; use serde_json::Value; -use syncserver_common::X_WEAVE_RECORDS; -use syncserver_db_common::{ +use syncserver_common::{Metrics, X_WEAVE_RECORDS}; +use syncstorage_db::{ params::{self, PostCollectionBso}, - util::SyncTimestamp, - DbPool, Sorting, UserIdentifier, + DbError, DbPool, Sorting, SyncTimestamp, UserIdentifier, }; use validator::{Validate, ValidationError}; -use crate::db::transaction::DbTransactionPool; use crate::error::{ApiError, ApiErrorKind}; use crate::label; -use crate::server::{metrics, ServerState, BSO_ID_REGEX, COLLECTION_ID_REGEX}; +use crate::server::{ + tags::Taggable, MetricsWrapper, ServerState, BSO_ID_REGEX, COLLECTION_ID_REGEX, +}; use crate::tokenserver::auth::TokenserverOrigin; use crate::web::{ auth::HawkPayload, error::{HawkErrorKind, ValidationErrorKind}, - tags::Taggable, + transaction::DbTransactionPool, DOCKER_FLOW_ENDPOINTS, }; const BATCH_MAX_IDS: usize = 100; @@ -408,7 +408,6 @@ impl FromRequest for BsoBody { ) .into()); } - let state = match req.app_data::>() { Some(s) => s, None => { @@ -637,7 +636,7 @@ impl FromRequest for CollectionParam { pub struct MetaRequest { pub user_id: UserIdentifier, pub tokenserver_origin: TokenserverOrigin, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for MetaRequest { @@ -655,7 +654,7 @@ impl FromRequest for MetaRequest { Ok(MetaRequest { tokenserver_origin: user_id.tokenserver_origin, user_id: user_id.into(), - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, }) } .boxed_local() @@ -678,7 +677,7 @@ pub struct CollectionRequest { pub tokenserver_origin: TokenserverOrigin, pub query: BsoQueryParams, pub reply: ReplyFormat, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for CollectionRequest { @@ -719,7 +718,7 @@ impl FromRequest for CollectionRequest { user_id: user_id.into(), query, reply, - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, }) } .boxed_local() @@ -738,7 +737,7 @@ pub struct CollectionPostRequest { pub query: BsoQueryParams, pub bsos: BsoBodies, pub batch: Option, - pub metrics: metrics::Metrics, + pub metrics: Metrics, pub quota_enabled: bool, } @@ -817,7 +816,7 @@ impl FromRequest for CollectionPostRequest { query, bsos, batch: batch.opt, - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, quota_enabled: state.quota_enabled, }) }) @@ -834,7 +833,7 @@ pub struct BsoRequest { pub tokenserver_origin: TokenserverOrigin, pub query: BsoQueryParams, pub bso: String, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for BsoRequest { @@ -860,7 +859,7 @@ impl FromRequest for BsoRequest { user_id: user_id.into(), query, bso: bso.bso, - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, }) }) } @@ -876,7 +875,7 @@ pub struct BsoPutRequest { pub query: BsoQueryParams, pub bso: String, pub body: BsoBody, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for BsoPutRequest { @@ -889,7 +888,7 @@ impl FromRequest for BsoPutRequest { let mut payload = payload.take(); async move { - let metrics = metrics::Metrics::extract(&req).await?; + let metrics = MetricsWrapper::extract(&req).await?.0; let (user_id, collection, query, bso, body) = <( HawkIdentifier, @@ -938,7 +937,7 @@ pub struct QuotaInfo { #[derive(Clone, Debug)] pub struct HeartbeatRequest { pub headers: HeaderMap, - pub db_pool: Box, + pub db_pool: Box>, pub quota: QuotaInfo, } @@ -1755,13 +1754,12 @@ mod tests { use serde_json::{self, json}; use sha2::Sha256; use syncserver_common; - use syncserver_db_common::Db; use syncserver_settings::Settings as GlobalSettings; use syncstorage_settings::{Deadman, ServerLimits, Settings as SyncstorageSettings}; use tokio::sync::RwLock; - use crate::db::mock::{MockDb, MockDbPool}; - use crate::server::{metrics, ServerState}; + use crate::server::ServerState; + use syncstorage_db::mock::{MockDb, MockDbPool}; use crate::web::auth::HawkPayload; @@ -1779,8 +1777,8 @@ mod tests { const INVALID_BSO_NAME: &str = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"; - fn make_db() -> Box> { - Box::new(MockDb::new()) + fn make_db() -> MockDb { + MockDb::new() } fn make_state() -> ServerState { @@ -1792,7 +1790,7 @@ mod tests { limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(), port: 8000, metrics: Box::new( - metrics::metrics_from_opts( + syncserver_common::metrics_from_opts( &syncstorage_settings.statsd_label, syncserver_settings.statsd_host.as_deref(), syncserver_settings.statsd_port, diff --git a/syncserver/src/web/handlers.rs b/syncserver/src/web/handlers.rs index df5dbc63..7af9432e 100644 --- a/syncserver/src/web/handlers.rs +++ b/syncserver/src/web/handlers.rs @@ -6,21 +6,22 @@ use actix_web::{dev::HttpResponseBuilder, http::StatusCode, web::Data, HttpReque use serde::Serialize; use serde_json::{json, Value}; use syncserver_common::{X_LAST_MODIFIED, X_WEAVE_NEXT_OFFSET, X_WEAVE_RECORDS}; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, +use syncstorage_db::{ params, results::{CreateBatch, Paginated}, - Db, + Db, DbError, DbErrorIntrospect, }; use time; use crate::{ - db::transaction::DbTransactionPool, error::{ApiError, ApiErrorKind}, server::ServerState, - web::extractors::{ - BsoPutRequest, BsoRequest, CollectionPostRequest, CollectionRequest, EmitApiMetric, - HeartbeatRequest, MetaRequest, ReplyFormat, TestErrorRequest, + web::{ + extractors::{ + BsoPutRequest, BsoRequest, CollectionPostRequest, CollectionRequest, EmitApiMetric, + HeartbeatRequest, MetaRequest, ReplyFormat, TestErrorRequest, + }, + transaction::DbTransactionPool, }, }; @@ -189,7 +190,7 @@ pub async fn get_collection( async fn finish_get_collection( coll: &CollectionRequest, - db: Box + '_>, + db: Box>, result: Result, DbError>, ) -> Result where @@ -280,15 +281,16 @@ pub async fn post_collection( // the entire, accumulated if the `commit` flag is set. pub async fn post_collection_batch( coll: CollectionPostRequest, - db: Box + '_>, + db: Box>, ) -> Result { coll.emit_api_metric("request.post_collection_batch"); trace!("Batch: Post collection batch"); // Bail early if we have nonsensical arguments // TODO: issue932 may make these multi-level transforms easier - let breq = coll.batch.clone().ok_or_else(|| -> ApiError { - ApiErrorKind::Db(DbErrorKind::BatchNotFound.into()).into() - })?; + let breq = coll + .batch + .clone() + .ok_or_else(|| -> ApiError { ApiErrorKind::Db(DbError::batch_not_found()).into() })?; let new_batch = if let Some(id) = breq.id.clone() { trace!("Batch: Validating {}", &id); @@ -319,8 +321,7 @@ pub async fn post_collection_batch( }, } } else { - let err: DbError = DbErrorKind::BatchNotFound.into(); - return Err(ApiError::from(err)); + return Err(ApiErrorKind::Db(DbError::batch_not_found()).into()); } } else { trace!("Batch: Creating new batch"); @@ -405,8 +406,7 @@ pub async fn post_collection_batch( }) .await? } else { - let err: DbError = DbErrorKind::BatchNotFound.into(); - return Err(ApiError::from(err)); + return Err(ApiErrorKind::Db(DbError::batch_not_found()).into()); }; // Then, write the BSOs contained in the commit request into the BSO table. @@ -594,7 +594,7 @@ pub async fn lbheartbeat(req: HttpRequest) -> Result { let db_state = if cfg!(test) { use actix_web::http::header::HeaderValue; use std::str::FromStr; - use syncserver_db_common::PoolState; + use syncstorage_db::PoolState; let test_pool = PoolState { connections: u32::from_str( diff --git a/syncserver/src/web/middleware/mod.rs b/syncserver/src/web/middleware/mod.rs index 78e3f4b6..ed3c28f3 100644 --- a/syncserver/src/web/middleware/mod.rs +++ b/syncserver/src/web/middleware/mod.rs @@ -13,9 +13,10 @@ use actix_web::{ dev::{Service, ServiceRequest, ServiceResponse}, web::Data, }; +use syncserver_common::Metrics; use crate::error::{ApiError, ApiErrorKind}; -use crate::server::{metrics::Metrics, ServerState}; +use crate::server::ServerState; use crate::tokenserver::auth::TokenserverOrigin; pub fn emit_http_status_with_tokenserver_origin( diff --git a/syncserver/src/web/middleware/rejectua.rs b/syncserver/src/web/middleware/rejectua.rs index 29aaf056..d27115d0 100644 --- a/syncserver/src/web/middleware/rejectua.rs +++ b/syncserver/src/web/middleware/rejectua.rs @@ -10,7 +10,7 @@ use lazy_static::lazy_static; use regex::Regex; use crate::error::{ApiError, ApiErrorKind}; -use crate::server::metrics::Metrics; +use crate::server::MetricsWrapper; lazy_static! { // e.g. "Firefox-iOS-Sync/18.0b1 (iPhone; iPhone OS 13.2.2) (Fennec (synctesting))" @@ -43,7 +43,10 @@ pub fn reject_user_agent( Some(header) if header.to_str().map_or(false, should_reject) => Box::pin(async move { trace!("Rejecting User-Agent: {:?}", header); let (req, payload) = request.into_parts(); - Metrics::extract(&req).await?.incr("error.rejectua"); + MetricsWrapper::extract(&req) + .await? + .0 + .incr("error.rejectua"); let sreq = ServiceRequest::from_parts(req, payload).map_err(|_| { ApiError::from(ApiErrorKind::Internal( "failed to reconstruct ServiceRequest from its parts".to_owned(), diff --git a/syncserver/src/web/middleware/sentry.rs b/syncserver/src/web/middleware/sentry.rs index a9a58308..6fff74a1 100644 --- a/syncserver/src/web/middleware/sentry.rs +++ b/syncserver/src/web/middleware/sentry.rs @@ -11,12 +11,11 @@ use actix_web::{ use sentry::protocol::Event; use sentry_backtrace::parse_stacktrace; use serde_json::value::Value; -use syncserver_common::ReportableError; -use tokenserver_common::error::TokenserverError; +use syncserver_common::{Metrics, ReportableError}; +use tokenserver_common::TokenserverError; use crate::error::ApiError; -use crate::server::{metrics::Metrics, user_agent}; -use crate::web::tags::Taggable; +use crate::server::{tags::Taggable, user_agent, MetricsWrapper}; pub fn report( tags: HashMap, @@ -75,7 +74,7 @@ pub fn report_error( } } Some(e) => { - let metrics = Metrics::extract(sresp.request()).await.unwrap(); + let metrics = MetricsWrapper::extract(sresp.request()).await.unwrap().0; if let Some(apie) = e.as_error::() { process_error(apie, metrics, tags, extras); diff --git a/syncserver/src/web/middleware/weave.rs b/syncserver/src/web/middleware/weave.rs index 69e00374..b4652b11 100644 --- a/syncserver/src/web/middleware/weave.rs +++ b/syncserver/src/web/middleware/weave.rs @@ -7,7 +7,7 @@ use actix_web::{ }; use syncserver_common::{X_LAST_MODIFIED, X_WEAVE_TIMESTAMP}; -use syncserver_db_common::util::SyncTimestamp; +use syncstorage_db::SyncTimestamp; use crate::error::{ApiError, ApiErrorKind}; use crate::web::DOCKER_FLOW_ENDPOINTS; diff --git a/syncserver/src/web/mod.rs b/syncserver/src/web/mod.rs index f759eee5..671c8289 100644 --- a/syncserver/src/web/mod.rs +++ b/syncserver/src/web/mod.rs @@ -4,7 +4,7 @@ pub mod error; pub mod extractors; pub mod handlers; pub mod middleware; -pub mod tags; +mod transaction; // Known DockerFlow commands for Ops callbacks pub const DOCKER_FLOW_ENDPOINTS: [&str; 4] = [ diff --git a/syncserver/src/db/transaction.rs b/syncserver/src/web/transaction.rs similarity index 92% rename from syncserver/src/db/transaction.rs rename to syncserver/src/web/transaction.rs index 68c82c8d..dfbfb7b3 100644 --- a/syncserver/src/db/transaction.rs +++ b/syncserver/src/web/transaction.rs @@ -9,20 +9,18 @@ use actix_web::{FromRequest, HttpRequest, HttpResponse}; use futures::future::LocalBoxFuture; use futures::FutureExt; use syncserver_common::X_LAST_MODIFIED; -use syncserver_db_common::{params, Db, DbPool, UserIdentifier}; +use syncstorage_db::{params, results::ConnectionInfo, Db, DbError, DbPool, UserIdentifier}; -use crate::db::results::ConnectionInfo; use crate::error::{ApiError, ApiErrorKind}; -use crate::server::metrics::Metrics; -use crate::server::ServerState; +use crate::server::tags::Taggable; +use crate::server::{MetricsWrapper, ServerState}; use crate::web::extractors::{ BsoParam, CollectionParam, HawkIdentifier, PreConditionHeader, PreConditionHeaderOpt, }; -use crate::web::tags::Taggable; #[derive(Clone)] pub struct DbTransactionPool { - pool: Box, + pool: Box>, is_read: bool, user_id: UserIdentifier, collection: Option, @@ -51,10 +49,10 @@ impl DbTransactionPool { &'a self, request: HttpRequest, action: A, - ) -> Result<(R, Box>), ApiError> + ) -> Result<(R, Box>), ApiError> where - A: FnOnce(Box>) -> F, - F: Future> + 'a, + A: FnOnce(Box>) -> F, + F: Future>, { // Get connection from pool let db = self.pool.get().await?; @@ -88,7 +86,7 @@ impl DbTransactionPool { } } - pub fn get_pool(&self) -> Result, Error> { + pub fn get_pool(&self) -> Result>, Error> { Ok(self.pool.clone()) } @@ -99,7 +97,7 @@ impl DbTransactionPool { action: A, ) -> Result where - A: FnOnce(Box>) -> F, + A: FnOnce(Box>) -> F, F: Future> + 'a, { let (resp, db) = self.transaction_internal(request, action).await?; @@ -117,11 +115,11 @@ impl DbTransactionPool { action: A, ) -> Result where - A: FnOnce(Box>) -> F, + A: FnOnce(Box>) -> F, F: Future> + 'a, { let mreq = request.clone(); - let check_precondition = move |db: Box>| { + let check_precondition = move |db: Box>| { async move { // set the extra information for all requests so we capture default err handlers. set_extra(&mreq, db.get_connection_info()); @@ -233,9 +231,10 @@ impl FromRequest for DbTransactionPool { Err(e) => { // Semi-example to show how to use metrics inside of middleware. // `Result::unwrap` is safe to use here, since Metrics::extract can never fail - Metrics::extract(&req) + MetricsWrapper::extract(&req) .await .unwrap() + .0 .incr("sync.error.collectionParam"); warn!("⚠️ CollectionParam err: {:?}", e); return Err(e); diff --git a/syncstorage-db-common/Cargo.toml b/syncstorage-db-common/Cargo.toml new file mode 100644 index 00000000..86989d79 --- /dev/null +++ b/syncstorage-db-common/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "syncstorage-db-common" +version = "0.12.3" +edition = "2021" + +[dependencies] +async-trait = "0.1.40" +backtrace = "0.3.61" +chrono = "0.4" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +futures = { version = "0.3", features = ["compat"] } +http = "0.2.6" +lazy_static = "1.4.0" +serde = "1.0" +serde_json = { version = "1.0", features = ["arbitrary_precision"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +thiserror = "1.0.26" diff --git a/syncstorage-db-common/src/error.rs b/syncstorage-db-common/src/error.rs new file mode 100644 index 00000000..aa1716a5 --- /dev/null +++ b/syncstorage-db-common/src/error.rs @@ -0,0 +1,138 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{impl_fmt_display, ReportableError}; +use thiserror::Error; + +/// Errors common to all supported syncstorage database backends. These errors can be thought of +/// as being related more to the syncstorage application logic as opposed to a particular +/// database backend. +#[derive(Debug)] +pub struct SyncstorageDbError { + kind: SyncstorageDbErrorKind, + pub status: StatusCode, + pub backtrace: Backtrace, +} + +#[derive(Debug, Error)] +enum SyncstorageDbErrorKind { + #[error("Specified collection does not exist")] + CollectionNotFound, + + #[error("Specified bso does not exist")] + BsoNotFound, + + #[error("Specified batch does not exist")] + BatchNotFound, + + #[error("An attempt at a conflicting write")] + Conflict, + + #[error("Unexpected error: {}", _0)] + Internal(String), + + #[error("User over quota")] + Quota, +} + +impl SyncstorageDbError { + pub fn batch_not_found() -> Self { + SyncstorageDbErrorKind::BatchNotFound.into() + } + + pub fn bso_not_found() -> Self { + SyncstorageDbErrorKind::BsoNotFound.into() + } + + pub fn collection_not_found() -> Self { + SyncstorageDbErrorKind::CollectionNotFound.into() + } + + pub fn conflict() -> Self { + SyncstorageDbErrorKind::Conflict.into() + } + + pub fn internal(msg: String) -> Self { + SyncstorageDbErrorKind::Internal(msg).into() + } + + pub fn quota() -> Self { + SyncstorageDbErrorKind::Quota.into() + } +} + +pub trait DbErrorIntrospect { + fn is_collection_not_found(&self) -> bool; + fn is_conflict(&self) -> bool; + fn is_quota(&self) -> bool; + fn is_bso_not_found(&self) -> bool; + fn is_batch_not_found(&self) -> bool; +} + +impl DbErrorIntrospect for SyncstorageDbError { + fn is_collection_not_found(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::CollectionNotFound) + } + + fn is_conflict(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::Conflict) + } + + fn is_quota(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::Quota) + } + + fn is_bso_not_found(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::BsoNotFound) + } + + fn is_batch_not_found(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::BatchNotFound) + } +} + +impl ReportableError for SyncstorageDbError { + fn is_sentry_event(&self) -> bool { + !matches!(&self.kind, SyncstorageDbErrorKind::Conflict) + } + + fn metric_label(&self) -> Option { + match &self.kind { + SyncstorageDbErrorKind::Conflict => Some("storage.conflict".to_owned()), + _ => None, + } + } + + fn error_backtrace(&self) -> String { + format!("{:#?}", self.backtrace) + } +} + +impl From for SyncstorageDbError { + fn from(kind: SyncstorageDbErrorKind) -> Self { + let status = match kind { + SyncstorageDbErrorKind::CollectionNotFound | SyncstorageDbErrorKind::BsoNotFound => { + StatusCode::NOT_FOUND + } + // Matching the Python code here (a 400 vs 404) + SyncstorageDbErrorKind::BatchNotFound => StatusCode::BAD_REQUEST, + // NOTE: the protocol specification states that we should return a + // "409 Conflict" response here, but clients currently do not + // handle these respones very well: + // * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034 + // * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032 + SyncstorageDbErrorKind::Conflict => StatusCode::SERVICE_UNAVAILABLE, + SyncstorageDbErrorKind::Quota => StatusCode::FORBIDDEN, + _ => StatusCode::INTERNAL_SERVER_ERROR, + }; + + Self { + kind, + status, + backtrace: Backtrace::new(), + } + } +} + +impl_fmt_display!(SyncstorageDbError, SyncstorageDbErrorKind); diff --git a/syncstorage-db-common/src/lib.rs b/syncstorage-db-common/src/lib.rs new file mode 100644 index 00000000..c1613614 --- /dev/null +++ b/syncstorage-db-common/src/lib.rs @@ -0,0 +1,304 @@ +pub mod error; +pub mod params; +pub mod results; +pub mod util; + +use std::fmt::Debug; + +use async_trait::async_trait; +use futures::{future, TryFutureExt}; +use lazy_static::lazy_static; +use serde::Deserialize; +use syncserver_db_common::{DbFuture, GetPoolState}; + +use error::DbErrorIntrospect; +use util::SyncTimestamp; + +lazy_static! { + /// For efficiency, it's possible to use fixed pre-determined IDs for + /// common collection names. This is the canonical list of such + /// names. Non-standard collections will be allocated IDs starting + /// from the highest ID in this collection. + pub static ref STD_COLLS: Vec<(i32, &'static str)> = { + vec![ + (1, "clients"), + (2, "crypto"), + (3, "forms"), + (4, "history"), + (5, "keys"), + (6, "meta"), + (7, "bookmarks"), + (8, "prefs"), + (9, "tabs"), + (10, "passwords"), + (11, "addons"), + (12, "addresses"), + (13, "creditcards"), + ] + }; +} + +/// Rough guesstimate of the maximum reasonable life span of a batch +pub const BATCH_LIFETIME: i64 = 2 * 60 * 60 * 1000; // 2 hours, in milliseconds + +/// The ttl to use for rows that are never supposed to expire (in seconds) +pub const DEFAULT_BSO_TTL: u32 = 2_100_000_000; + +/// Non-standard collections will be allocated IDs beginning with this value +pub const FIRST_CUSTOM_COLLECTION_ID: i32 = 101; + +#[async_trait] +pub trait DbPool: Sync + Send + Debug + GetPoolState { + type Error; + + async fn get(&self) -> Result>, Self::Error>; + + fn validate_batch_id(&self, params: params::ValidateBatchId) -> Result<(), Self::Error>; + + fn box_clone(&self) -> Box>; +} + +impl Clone for Box> { + fn clone(&self) -> Box> { + self.box_clone() + } +} + +pub trait Db: Debug { + type Error: DbErrorIntrospect + 'static; + + fn lock_for_read(&self, params: params::LockCollection) -> DbFuture<'_, (), Self::Error>; + + fn lock_for_write(&self, params: params::LockCollection) -> DbFuture<'_, (), Self::Error>; + + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error>; + + fn commit(&self) -> DbFuture<'_, (), Self::Error>; + + fn rollback(&self) -> DbFuture<'_, (), Self::Error>; + + fn get_collection_timestamps( + &self, + params: params::GetCollectionTimestamps, + ) -> DbFuture<'_, results::GetCollectionTimestamps, Self::Error>; + + fn get_collection_timestamp( + &self, + params: params::GetCollectionTimestamp, + ) -> DbFuture<'_, results::GetCollectionTimestamp, Self::Error>; + + fn get_collection_counts( + &self, + params: params::GetCollectionCounts, + ) -> DbFuture<'_, results::GetCollectionCounts, Self::Error>; + + fn get_collection_usage( + &self, + params: params::GetCollectionUsage, + ) -> DbFuture<'_, results::GetCollectionUsage, Self::Error>; + + fn get_storage_timestamp( + &self, + params: params::GetStorageTimestamp, + ) -> DbFuture<'_, results::GetStorageTimestamp, Self::Error>; + + fn get_storage_usage( + &self, + params: params::GetStorageUsage, + ) -> DbFuture<'_, results::GetStorageUsage, Self::Error>; + + fn get_quota_usage( + &self, + params: params::GetQuotaUsage, + ) -> DbFuture<'_, results::GetQuotaUsage, Self::Error>; + + fn delete_storage( + &self, + params: params::DeleteStorage, + ) -> DbFuture<'_, results::DeleteStorage, Self::Error>; + + fn delete_collection( + &self, + params: params::DeleteCollection, + ) -> DbFuture<'_, results::DeleteCollection, Self::Error>; + + fn delete_bsos( + &self, + params: params::DeleteBsos, + ) -> DbFuture<'_, results::DeleteBsos, Self::Error>; + + fn get_bsos(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsos, Self::Error>; + + fn get_bso_ids(&self, params: params::GetBsos) + -> DbFuture<'_, results::GetBsoIds, Self::Error>; + + fn post_bsos(&self, params: params::PostBsos) -> DbFuture<'_, results::PostBsos, Self::Error>; + + fn delete_bso( + &self, + params: params::DeleteBso, + ) -> DbFuture<'_, results::DeleteBso, Self::Error>; + + fn get_bso(&self, params: params::GetBso) + -> DbFuture<'_, Option, Self::Error>; + + fn get_bso_timestamp( + &self, + params: params::GetBsoTimestamp, + ) -> DbFuture<'_, results::GetBsoTimestamp, Self::Error>; + + fn put_bso(&self, params: params::PutBso) -> DbFuture<'_, results::PutBso, Self::Error>; + + fn create_batch( + &self, + params: params::CreateBatch, + ) -> DbFuture<'_, results::CreateBatch, Self::Error>; + + fn validate_batch( + &self, + params: params::ValidateBatch, + ) -> DbFuture<'_, results::ValidateBatch, Self::Error>; + + fn append_to_batch( + &self, + params: params::AppendToBatch, + ) -> DbFuture<'_, results::AppendToBatch, Self::Error>; + + fn get_batch( + &self, + params: params::GetBatch, + ) -> DbFuture<'_, Option, Self::Error>; + + fn commit_batch( + &self, + params: params::CommitBatch, + ) -> DbFuture<'_, results::CommitBatch, Self::Error>; + + fn box_clone(&self) -> Box>; + + fn check(&self) -> DbFuture<'_, results::Check, Self::Error>; + + fn get_connection_info(&self) -> results::ConnectionInfo; + + /// Retrieve the timestamp for an item/collection + /// + /// Modeled on the Python `get_resource_timestamp` function. + fn extract_resource( + &self, + user_id: UserIdentifier, + collection: Option, + bso: Option, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { + // If there's no collection, we return the overall storage timestamp + let collection = match collection { + Some(collection) => collection, + None => return Box::pin(self.get_storage_timestamp(user_id)), + }; + // If there's no bso, return the collection + let bso = match bso { + Some(bso) => bso, + None => { + return Box::pin( + self.get_collection_timestamp(params::GetCollectionTimestamp { + user_id, + collection, + }) + .or_else(|e| { + if e.is_collection_not_found() { + future::ok(SyncTimestamp::from_seconds(0f64)) + } else { + future::err(e) + } + }), + ) + } + }; + Box::pin( + self.get_bso_timestamp(params::GetBsoTimestamp { + user_id, + collection, + id: bso, + }) + .or_else(|e| { + if e.is_collection_not_found() { + future::ok(SyncTimestamp::from_seconds(0f64)) + } else { + future::err(e) + } + }), + ) + } + + /// Internal methods used by the db tests + + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error>; + + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error>; + + fn update_collection( + &self, + params: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error>; + + fn timestamp(&self) -> SyncTimestamp; + + fn set_timestamp(&self, timestamp: SyncTimestamp); + + fn delete_batch(&self, params: params::DeleteBatch) -> DbFuture<'_, (), Self::Error>; + + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error>; + + fn set_quota(&mut self, enabled: bool, limit: usize, enforce: bool); +} + +impl Clone for Box> +where + E: DbErrorIntrospect + 'static, +{ + fn clone(&self) -> Box> { + self.box_clone() + } +} + +#[derive(Debug, Deserialize, Clone, PartialEq, Eq, Copy)] +#[serde(rename_all = "lowercase")] +pub enum Sorting { + None, + Newest, + Oldest, + Index, +} + +impl Default for Sorting { + fn default() -> Self { + Sorting::None + } +} + +#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] +pub struct UserIdentifier { + /// For MySQL database backends as the primary key + pub legacy_id: u64, + /// For NoSQL database backends that require randomly distributed primary keys + pub fxa_uid: String, + pub fxa_kid: String, +} + +impl UserIdentifier { + /// Create a new legacy id user identifier + pub fn new_legacy(user_id: u64) -> Self { + Self { + legacy_id: user_id, + ..Default::default() + } + } +} + +impl From for UserIdentifier { + fn from(val: u32) -> Self { + Self { + legacy_id: val.into(), + ..Default::default() + } + } +} diff --git a/syncserver-db-common/src/params.rs b/syncstorage-db-common/src/params.rs similarity index 100% rename from syncserver-db-common/src/params.rs rename to syncstorage-db-common/src/params.rs diff --git a/syncserver-db-common/src/results.rs b/syncstorage-db-common/src/results.rs similarity index 100% rename from syncserver-db-common/src/results.rs rename to syncstorage-db-common/src/results.rs diff --git a/syncserver-db-common/src/util.rs b/syncstorage-db-common/src/util.rs similarity index 85% rename from syncserver-db-common/src/util.rs rename to syncstorage-db-common/src/util.rs index 92da71be..1cee78d1 100644 --- a/syncserver-db-common/src/util.rs +++ b/syncstorage-db-common/src/util.rs @@ -12,7 +12,7 @@ use diesel::{ }; use serde::{ser, Deserialize, Deserializer, Serialize, Serializer}; -use super::error::{DbError, DbErrorKind}; +use super::error::SyncstorageDbError; /// Get the time since the UNIX epoch in milliseconds fn ms_since_epoch() -> i64 { @@ -53,15 +53,17 @@ impl SyncTimestamp { } /// Create a `SyncTimestamp` from an i64 - pub fn from_i64(val: i64) -> Result { + pub fn from_i64(val: i64) -> Result { if val < 0 { - return Err(DbErrorKind::Integrity("Invalid modified i64 (< 0)".to_owned()).into()); + return Err(SyncstorageDbError::internal( + "Invalid modified i64 (< 0)".to_owned(), + )); } Ok(SyncTimestamp::from_milliseconds(val as u64)) } /// Exposed separately for db tests - pub fn _from_i64(val: i64) -> Result { + pub fn _from_i64(val: i64) -> Result { SyncTimestamp::from_i64(val) } @@ -78,17 +80,19 @@ impl SyncTimestamp { /// Create a `SyncTimestamp` from an RFC 3339 and ISO 8601 date and time /// string such as 1996-12-19T16:39:57-08:00 - pub fn from_rfc3339(val: &str) -> Result { + pub fn from_rfc3339(val: &str) -> Result { let dt = DateTime::parse_from_rfc3339(val) - .map_err(|e| DbErrorKind::Integrity(format!("Invalid TIMESTAMP {}", e)))?; + .map_err(|e| SyncstorageDbError::internal(format!("Invalid TIMESTAMP {}", e)))?; Self::from_datetime(dt) } /// Create a `SyncTimestamp` from a chrono DateTime - fn from_datetime(val: DateTime) -> Result { + fn from_datetime(val: DateTime) -> Result { let millis = val.timestamp_millis(); if millis < 0 { - return Err(DbErrorKind::Integrity("Invalid DateTime (< 0)".to_owned()).into()); + return Err(SyncstorageDbError::internal( + "Invalid DateTime (< 0)".to_owned(), + )); } Ok(SyncTimestamp::from_milliseconds(millis as u64)) } @@ -105,7 +109,7 @@ impl SyncTimestamp { /// Return the timestamp as an RFC 3339 and ISO 8601 date and time string such as /// 1996-12-19T16:39:57-08:00 - pub fn as_rfc3339(self) -> Result { + pub fn as_rfc3339(self) -> Result { to_rfc3339(self.as_i64()) } } @@ -167,10 +171,10 @@ where /// Render a timestamp (as an i64 milliseconds since epoch) as an RFC 3339 and ISO 8601 /// date and time string such as 1996-12-19T16:39:57-08:00 -pub fn to_rfc3339(val: i64) -> Result { +pub fn to_rfc3339(val: i64) -> Result { let secs = val / 1000; let nsecs = ((val % 1000) * 1_000_000).try_into().map_err(|e| { - DbError::internal(&format!("Invalid timestamp (nanoseconds) {}: {}", val, e)) + SyncstorageDbError::internal(format!("Invalid timestamp (nanoseconds) {}: {}", val, e)) })?; Ok(Utc .timestamp(secs, nsecs) diff --git a/syncstorage-db/Cargo.toml b/syncstorage-db/Cargo.toml new file mode 100644 index 00000000..2c26ad36 --- /dev/null +++ b/syncstorage-db/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "syncstorage-db" +version = "0.12.3" +edition = "2021" + +[dependencies] +async-trait = "0.1.40" +cadence = "0.26" +env_logger = "0.9" +futures = { version = "0.3", features = ["compat"] } +hostname = "0.3.1" +lazy_static = "1.4.0" +log = { version = "0.4", features = [ + "max_level_debug", + "release_max_level_info", +] } +rand = "0.8" +slog-scope = "4.3" +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-settings = { path = "../syncserver-settings" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-mysql = { path = "../syncstorage-mysql", optional = true } +syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-spanner = { path = "../syncstorage-spanner", optional = true } +# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) +tokio = { version = "0.2.4", features = ["macros", "sync"] } + +[features] +mysql = ['syncstorage-mysql'] +spanner = ['syncstorage-spanner'] diff --git a/syncstorage-db/src/lib.rs b/syncstorage-db/src/lib.rs new file mode 100644 index 00000000..f7007d1c --- /dev/null +++ b/syncstorage-db/src/lib.rs @@ -0,0 +1,77 @@ +//! Generic db abstration. + +#[cfg(test)] +#[macro_use] +extern crate slog_scope; + +pub mod mock; +#[cfg(test)] +mod tests; + +use std::time::Duration; + +use cadence::{Gauged, StatsdClient}; +use tokio::{self, time}; + +#[cfg(feature = "mysql")] +pub type DbPoolImpl = syncstorage_mysql::MysqlDbPool; +#[cfg(feature = "mysql")] +pub use syncstorage_mysql::DbError; +#[cfg(feature = "mysql")] +pub type DbImpl = syncstorage_mysql::MysqlDb; + +#[cfg(feature = "spanner")] +pub type DbPoolImpl = syncstorage_spanner::SpannerDbPool; +#[cfg(feature = "spanner")] +pub use syncstorage_spanner::DbError; +#[cfg(feature = "spanner")] +pub type DbImpl = syncstorage_spanner::SpannerDb; + +pub use syncserver_db_common::{GetPoolState, PoolState}; +pub use syncstorage_db_common::error::DbErrorIntrospect; + +pub use syncstorage_db_common::{ + params, results, + util::{to_rfc3339, SyncTimestamp}, + Db, DbPool, Sorting, UserIdentifier, +}; + +#[cfg(all(feature = "mysql", feature = "spanner"))] +compile_error!("only one of the \"mysql\" and \"spanner\" features can be enabled at a time"); + +#[cfg(not(any(feature = "mysql", feature = "spanner")))] +compile_error!("exactly one of the \"mysql\" and \"spanner\" features must be enabled"); + +/// Emit DbPool metrics periodically +pub fn spawn_pool_periodic_reporter( + interval: Duration, + metrics: StatsdClient, + pool: T, +) -> Result<(), DbError> { + let hostname = hostname::get() + .expect("Couldn't get hostname") + .into_string() + .expect("Couldn't get hostname"); + tokio::spawn(async move { + loop { + let PoolState { + connections, + idle_connections, + } = pool.state(); + metrics + .gauge_with_tags( + "storage.pool.connections.active", + (connections - idle_connections) as u64, + ) + .with_tag("hostname", &hostname) + .send(); + metrics + .gauge_with_tags("storage.pool.connections.idle", idle_connections as u64) + .with_tag("hostname", &hostname) + .send(); + time::delay_for(interval).await; + } + }); + + Ok(()) +} diff --git a/syncserver/src/db/mock.rs b/syncstorage-db/src/mock.rs similarity index 86% rename from syncserver/src/db/mock.rs rename to syncstorage-db/src/mock.rs index af13bfb7..2f5575b6 100644 --- a/syncserver/src/db/mock.rs +++ b/syncstorage-db/src/mock.rs @@ -2,10 +2,12 @@ #![allow(clippy::new_without_default)] use async_trait::async_trait; use futures::future; -use syncserver_db_common::{ - error::DbError, params, results, util::SyncTimestamp, Db, DbFuture, DbPool, GetPoolState, - PoolState, -}; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{params, results, util::SyncTimestamp, Db, DbPool}; + +use crate::DbError; + +type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; #[derive(Clone, Debug)] pub struct MockDbPool; @@ -18,15 +20,17 @@ impl MockDbPool { #[async_trait] impl DbPool for MockDbPool { - async fn get<'a>(&'a self) -> Result>, DbError> { - Ok(Box::new(MockDb::new()) as Box>) + type Error = DbError; + + async fn get(&self) -> Result>, Self::Error> { + Ok(Box::new(MockDb::new())) } fn validate_batch_id(&self, _: params::ValidateBatchId) -> Result<(), DbError> { Ok(()) } - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } } @@ -58,7 +62,9 @@ macro_rules! mock_db_method { }; } -impl<'a> Db<'a> for MockDb { +impl Db for MockDb { + type Error = DbError; + fn commit(&self) -> DbFuture<'_, ()> { Box::pin(future::ok(())) } @@ -71,7 +77,7 @@ impl<'a> Db<'a> for MockDb { Box::pin(future::ok(())) } - fn box_clone(&self) -> Box> { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } diff --git a/syncserver/src/db/tests/batch.rs b/syncstorage-db/src/tests/batch.rs similarity index 90% rename from syncserver/src/db/tests/batch.rs rename to syncstorage-db/src/tests/batch.rs index 8efa1613..ecc972fd 100644 --- a/syncserver/src/db/tests/batch.rs +++ b/syncstorage-db/src/tests/batch.rs @@ -1,8 +1,11 @@ use log::debug; -use syncserver_db_common::{params, results, util::SyncTimestamp, BATCH_LIFETIME}; use syncserver_settings::Settings; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, BATCH_LIFETIME, +}; -use super::support::{db_pool, gbso, hid, pbso, postbso, test_db, Result}; +use super::support::{db_pool, gbso, hid, pbso, postbso, test_db}; +use crate::DbError; fn cb(user_id: u32, coll: &str, bsos: Vec) -> params::CreateBatch { params::CreateBatch { @@ -43,9 +46,9 @@ fn gb(user_id: u32, coll: &str, id: String) -> params::GetBatch { } #[tokio::test] -async fn create_delete() -> Result<()> { +async fn create_delete() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -66,9 +69,9 @@ async fn create_delete() -> Result<()> { } #[tokio::test] -async fn expiry() -> Result<()> { +async fn expiry() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -90,9 +93,9 @@ async fn expiry() -> Result<()> { } #[tokio::test] -async fn update() -> Result<()> { +async fn update() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -114,9 +117,9 @@ async fn update() -> Result<()> { } #[tokio::test] -async fn append_commit() -> Result<()> { +async fn append_commit() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -157,7 +160,7 @@ async fn append_commit() -> Result<()> { } #[tokio::test] -async fn quota_test_create_batch() -> Result<()> { +async fn quota_test_create_batch() -> Result<(), DbError> { let mut settings = Settings::test_settings().syncstorage; if !settings.enable_quota { @@ -169,7 +172,7 @@ async fn quota_test_create_batch() -> Result<()> { settings.limits.max_quota_limit = limit; let pool = db_pool(Some(settings.clone())).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -199,7 +202,7 @@ async fn quota_test_create_batch() -> Result<()> { } #[tokio::test] -async fn quota_test_append_batch() -> Result<()> { +async fn quota_test_append_batch() -> Result<(), DbError> { let mut settings = Settings::test_settings().syncstorage; if !settings.enable_quota { @@ -211,7 +214,7 @@ async fn quota_test_append_batch() -> Result<()> { settings.limits.max_quota_limit = limit; let pool = db_pool(Some(settings.clone())).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -244,10 +247,10 @@ async fn quota_test_append_batch() -> Result<()> { } #[tokio::test] -async fn test_append_async_w_null() -> Result<()> { +async fn test_append_async_w_null() -> Result<(), DbError> { let settings = Settings::test_settings().syncstorage; let pool = db_pool(Some(settings)).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; // Remember: TTL is seconds to live, not an expiry date let ttl_0 = 86_400; let ttl_1 = 86_400; diff --git a/syncserver/src/db/tests/db.rs b/syncstorage-db/src/tests/db.rs similarity index 89% rename from syncserver/src/db/tests/db.rs rename to syncstorage-db/src/tests/db.rs index 28c8b515..e1a00a84 100644 --- a/syncserver/src/db/tests/db.rs +++ b/syncstorage-db/src/tests/db.rs @@ -3,10 +3,13 @@ use std::collections::HashMap; use lazy_static::lazy_static; use rand::{distributions::Alphanumeric, thread_rng, Rng}; -use syncserver_db_common::{params, util::SyncTimestamp, Sorting, UserIdentifier, DEFAULT_BSO_TTL}; use syncserver_settings::Settings; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, util::SyncTimestamp, Sorting, UserIdentifier, DEFAULT_BSO_TTL, +}; -use super::support::{db_pool, dbso, dbsos, gbso, gbsos, hid, pbso, postbso, test_db, Result}; +use super::support::{db_pool, dbso, dbsos, gbso, gbsos, hid, pbso, postbso, test_db}; +use crate::DbError; // distant future (year 2099) timestamp for tests const MAX_TIMESTAMP: u64 = 4_070_937_600_000; @@ -16,9 +19,9 @@ lazy_static! { } #[tokio::test] -async fn bso_successfully_updates_single_values() -> Result<()> { +async fn bso_successfully_updates_single_values() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -57,9 +60,9 @@ async fn bso_successfully_updates_single_values() -> Result<()> { } #[tokio::test] -async fn bso_modified_not_changed_on_ttl_touch() -> Result<()> { +async fn bso_modified_not_changed_on_ttl_touch() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -80,9 +83,9 @@ async fn bso_modified_not_changed_on_ttl_touch() -> Result<()> { } #[tokio::test] -async fn put_bso_updates() -> Result<()> { +async fn put_bso_updates() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -103,9 +106,9 @@ async fn put_bso_updates() -> Result<()> { } #[tokio::test] -async fn get_bsos_limit_offset() -> Result<()> { +async fn get_bsos_limit_offset() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -224,9 +227,9 @@ async fn get_bsos_limit_offset() -> Result<()> { } #[tokio::test] -async fn get_bsos_newer() -> Result<()> { +async fn get_bsos_newer() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -309,9 +312,9 @@ async fn get_bsos_newer() -> Result<()> { } #[tokio::test] -async fn get_bsos_sort() -> Result<()> { +async fn get_bsos_sort() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -382,9 +385,9 @@ async fn get_bsos_sort() -> Result<()> { } #[tokio::test] -async fn delete_bsos_in_correct_collection() -> Result<()> { +async fn delete_bsos_in_correct_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let payload = "data"; @@ -399,9 +402,9 @@ async fn delete_bsos_in_correct_collection() -> Result<()> { } #[tokio::test] -async fn get_storage_timestamp() -> Result<()> { +async fn get_storage_timestamp() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; db.create_collection("NewCollection1".to_owned()).await?; @@ -422,17 +425,17 @@ async fn get_storage_timestamp() -> Result<()> { } #[tokio::test] -async fn get_collection_id() -> Result<()> { +async fn get_collection_id() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; db.get_collection_id("bookmarks".to_owned()).await?; Ok(()) } #[tokio::test] -async fn create_collection() -> Result<()> { +async fn create_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let name = "NewCollection"; let cid = db.create_collection(name.to_owned()).await?; @@ -443,9 +446,9 @@ async fn create_collection() -> Result<()> { } #[tokio::test] -async fn update_collection() -> Result<()> { +async fn update_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let collection = "test".to_owned(); let cid = db.create_collection(collection.clone()).await?; @@ -459,9 +462,9 @@ async fn update_collection() -> Result<()> { } #[tokio::test] -async fn delete_collection() -> Result<()> { +async fn delete_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "NewCollection"; @@ -495,9 +498,9 @@ async fn delete_collection() -> Result<()> { } #[tokio::test] -async fn delete_collection_tombstone() -> Result<()> { +async fn delete_collection_tombstone() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test"; @@ -555,9 +558,9 @@ async fn delete_collection_tombstone() -> Result<()> { } #[tokio::test] -async fn get_collection_timestamps() -> Result<()> { +async fn get_collection_timestamps() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test".to_owned(); @@ -583,9 +586,9 @@ async fn get_collection_timestamps() -> Result<()> { } #[tokio::test] -async fn get_collection_timestamps_tombstone() -> Result<()> { +async fn get_collection_timestamps_tombstone() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test".to_owned(); @@ -608,9 +611,9 @@ async fn get_collection_timestamps_tombstone() -> Result<()> { } #[tokio::test] -async fn get_collection_usage() -> Result<()> { +async fn get_collection_usage() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 5; let mut expected = HashMap::new(); @@ -660,7 +663,7 @@ async fn get_collection_usage() -> Result<()> { } #[tokio::test] -async fn test_quota() -> Result<()> { +async fn test_quota() -> Result<(), DbError> { let settings = Settings::test_settings(); if !settings.syncstorage.enable_quota { @@ -669,7 +672,7 @@ async fn test_quota() -> Result<()> { } let pool = db_pool(None).await?; - let mut db = test_db(pool.as_ref()).await?; + let mut db = test_db(pool).await?; let uid = 5; let coll = "bookmarks"; @@ -702,9 +705,9 @@ async fn test_quota() -> Result<()> { } #[tokio::test] -async fn get_collection_counts() -> Result<()> { +async fn get_collection_counts() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 4; let mut expected = HashMap::new(); @@ -725,9 +728,9 @@ async fn get_collection_counts() -> Result<()> { } #[tokio::test] -async fn put_bso() -> Result<()> { +async fn put_bso() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "NewCollection"; @@ -765,9 +768,9 @@ async fn put_bso() -> Result<()> { } #[tokio::test] -async fn post_bsos() -> Result<()> { +async fn post_bsos() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "NewCollection"; @@ -836,9 +839,9 @@ async fn post_bsos() -> Result<()> { } #[tokio::test] -async fn get_bso() -> Result<()> { +async fn get_bso() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -857,9 +860,9 @@ async fn get_bso() -> Result<()> { } #[tokio::test] -async fn get_bsos() -> Result<()> { +async fn get_bsos() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 2; let coll = "clients"; @@ -928,9 +931,9 @@ async fn get_bsos() -> Result<()> { } #[tokio::test] -async fn get_bso_timestamp() -> Result<()> { +async fn get_bso_timestamp() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -949,9 +952,9 @@ async fn get_bso_timestamp() -> Result<()> { } #[tokio::test] -async fn delete_bso() -> Result<()> { +async fn delete_bso() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -965,9 +968,9 @@ async fn delete_bso() -> Result<()> { } #[tokio::test] -async fn delete_bsos() -> Result<()> { +async fn delete_bsos() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -1000,31 +1003,31 @@ async fn delete_bsos() -> Result<()> { /* #[tokio::test] -async fn usage_stats() -> Result<()> { +async fn usage_stats() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; Ok(()) } #[tokio::test] -async fn purge_expired() -> Result<()> { +async fn purge_expired() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; Ok(()) } #[tokio::test] -async fn optimize() -> Result<()> { +async fn optimize() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; Ok(()) } */ #[tokio::test] -async fn delete_storage() -> Result<()> { +async fn delete_storage() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let bid = "test"; @@ -1048,9 +1051,9 @@ async fn delete_storage() -> Result<()> { } #[tokio::test] -async fn collection_cache() -> Result<()> { +async fn collection_cache() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test"; @@ -1069,9 +1072,9 @@ async fn collection_cache() -> Result<()> { } #[tokio::test] -async fn lock_for_read() -> Result<()> { +async fn lock_for_read() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -1087,9 +1090,9 @@ async fn lock_for_read() -> Result<()> { } #[tokio::test] -async fn lock_for_write() -> Result<()> { +async fn lock_for_write() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -1105,9 +1108,9 @@ async fn lock_for_write() -> Result<()> { } #[tokio::test] -async fn heartbeat() -> Result<()> { +async fn heartbeat() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; assert!(db.check().await?); Ok(()) diff --git a/syncserver/src/db/tests/mod.rs b/syncstorage-db/src/tests/mod.rs similarity index 100% rename from syncserver/src/db/tests/mod.rs rename to syncstorage-db/src/tests/mod.rs diff --git a/syncserver/src/db/tests/support.rs b/syncstorage-db/src/tests/support.rs similarity index 86% rename from syncserver/src/db/tests/support.rs rename to syncstorage-db/src/tests/support.rs index 9590d7fa..a952799e 100644 --- a/syncserver/src/db/tests/support.rs +++ b/syncstorage-db/src/tests/support.rs @@ -1,17 +1,14 @@ use std::{str::FromStr, sync::Arc}; -use syncserver_db_common::{params, util::SyncTimestamp, Db, Sorting, UserIdentifier}; +use syncserver_common::{BlockingThreadpool, Metrics}; use syncserver_settings::Settings as SyncserverSettings; +use syncstorage_db_common::{params, util::SyncTimestamp, Db, DbPool, Sorting, UserIdentifier}; use syncstorage_settings::Settings as SyncstorageSettings; -use crate::db::DbPool; -use crate::error::ApiResult; -use crate::{db::pool_from_settings, db::BlockingThreadpool, error::ApiError, server::metrics}; - -pub type Result = std::result::Result; +use crate::{DbError, DbPoolImpl}; #[cfg(test)] -pub async fn db_pool(settings: Option) -> Result> { +pub async fn db_pool(settings: Option) -> Result { let _ = env_logger::try_init(); // The default for SYNC_SYNCSTORAGE__DATABASE_USE_TEST_TRANSACTIONS is // false, but we want the mysql default to be true, so let's check @@ -25,13 +22,12 @@ pub async fn db_pool(settings: Option) -> Result ApiResult>> { +pub async fn test_db(pool: DbPoolImpl) -> Result>, DbError> { let db = pool.get().await?; // Spanner won't have a timestamp until lock_for_xxx are called: fill one // in for it diff --git a/syncstorage-mysql/Cargo.toml b/syncstorage-mysql/Cargo.toml new file mode 100644 index 00000000..7334322b --- /dev/null +++ b/syncstorage-mysql/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "syncstorage-mysql" +version = "0.12.3" +edition = "2021" + +[dependencies] +async-trait = "0.1.40" +backtrace = "0.3.61" +base64 = "0.13" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +futures = { version = "0.3", features = ["compat"] } +http = "0.2.5" +slog-scope = "4.3" +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +thiserror = "1.0.26" +url = "2.1" + +[dev-dependencies] +syncserver-settings = { path = "../syncserver-settings" } +env_logger = "0.9" diff --git a/migrations/2018-08-28-010336_init/down.sql b/syncstorage-mysql/migrations/2018-08-28-010336_init/down.sql similarity index 100% rename from migrations/2018-08-28-010336_init/down.sql rename to syncstorage-mysql/migrations/2018-08-28-010336_init/down.sql diff --git a/migrations/2018-08-28-010336_init/up.sql b/syncstorage-mysql/migrations/2018-08-28-010336_init/up.sql similarity index 100% rename from migrations/2018-08-28-010336_init/up.sql rename to syncstorage-mysql/migrations/2018-08-28-010336_init/up.sql diff --git a/migrations/2019-09-11-164500/down.sql b/syncstorage-mysql/migrations/2019-09-11-164500/down.sql similarity index 100% rename from migrations/2019-09-11-164500/down.sql rename to syncstorage-mysql/migrations/2019-09-11-164500/down.sql diff --git a/migrations/2019-09-11-164500/up.sql b/syncstorage-mysql/migrations/2019-09-11-164500/up.sql similarity index 100% rename from migrations/2019-09-11-164500/up.sql rename to syncstorage-mysql/migrations/2019-09-11-164500/up.sql diff --git a/migrations/2019-09-25-174347_min_collection_id/down.sql b/syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/down.sql similarity index 100% rename from migrations/2019-09-25-174347_min_collection_id/down.sql rename to syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/down.sql diff --git a/migrations/2019-09-25-174347_min_collection_id/up.sql b/syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/up.sql similarity index 100% rename from migrations/2019-09-25-174347_min_collection_id/up.sql rename to syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/up.sql diff --git a/migrations/2020-04-03-102015_change_userid/down.sql b/syncstorage-mysql/migrations/2020-04-03-102015_change_userid/down.sql similarity index 100% rename from migrations/2020-04-03-102015_change_userid/down.sql rename to syncstorage-mysql/migrations/2020-04-03-102015_change_userid/down.sql diff --git a/migrations/2020-04-03-102015_change_userid/up.sql b/syncstorage-mysql/migrations/2020-04-03-102015_change_userid/up.sql similarity index 100% rename from migrations/2020-04-03-102015_change_userid/up.sql rename to syncstorage-mysql/migrations/2020-04-03-102015_change_userid/up.sql diff --git a/migrations/2020-06-12-231034_new_batch/down.sql b/syncstorage-mysql/migrations/2020-06-12-231034_new_batch/down.sql similarity index 100% rename from migrations/2020-06-12-231034_new_batch/down.sql rename to syncstorage-mysql/migrations/2020-06-12-231034_new_batch/down.sql diff --git a/migrations/2020-06-12-231034_new_batch/up.sql b/syncstorage-mysql/migrations/2020-06-12-231034_new_batch/up.sql similarity index 100% rename from migrations/2020-06-12-231034_new_batch/up.sql rename to syncstorage-mysql/migrations/2020-06-12-231034_new_batch/up.sql diff --git a/migrations/2020-08-24-091401_add_quota/down.sql b/syncstorage-mysql/migrations/2020-08-24-091401_add_quota/down.sql similarity index 100% rename from migrations/2020-08-24-091401_add_quota/down.sql rename to syncstorage-mysql/migrations/2020-08-24-091401_add_quota/down.sql diff --git a/migrations/2020-08-24-091401_add_quota/up.sql b/syncstorage-mysql/migrations/2020-08-24-091401_add_quota/up.sql similarity index 100% rename from migrations/2020-08-24-091401_add_quota/up.sql rename to syncstorage-mysql/migrations/2020-08-24-091401_add_quota/up.sql diff --git a/syncserver/src/db/mysql/batch.rs b/syncstorage-mysql/src/batch.rs similarity index 90% rename from syncserver/src/db/mysql/batch.rs rename to syncstorage-mysql/src/batch.rs index 2bd9501d..bde50c3e 100644 --- a/syncserver/src/db/mysql/batch.rs +++ b/syncstorage-mysql/src/batch.rs @@ -9,19 +9,18 @@ use diesel::{ sql_types::{BigInt, Integer}, ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, }; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, UserIdentifier, BATCH_LIFETIME, -}; +use syncstorage_db_common::{params, results, UserIdentifier, BATCH_LIFETIME}; use super::{ - models::{MysqlDb, Result}, + error::DbError, + models::MysqlDb, schema::{batch_upload_items, batch_uploads}, + DbResult, }; const MAXTTL: i32 = 2_100_000_000; -pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result { +pub fn create(db: &MysqlDb, params: params::CreateBatch) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = db.get_collection_id(¶ms.collection)?; // Careful, there's some weirdness here! @@ -47,7 +46,7 @@ pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result DbError { match e { // The user tried to create two batches with the same timestamp - DieselError::DatabaseError(UniqueViolation, _) => DbErrorKind::Conflict.into(), + DieselError::DatabaseError(UniqueViolation, _) => DbError::conflict(), _ => e.into(), } })?; @@ -59,7 +58,7 @@ pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result Result { +pub fn validate(db: &MysqlDb, params: params::ValidateBatch) -> DbResult { let batch_id = decode_id(¶ms.id)?; // Avoid hitting the db for batches that are obviously too old. Recall // that the batchid is a millisecond timestamp. @@ -79,7 +78,7 @@ pub fn validate(db: &MysqlDb, params: params::ValidateBatch) -> Result { Ok(exists.is_some()) } -pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> { +pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> DbResult<()> { let exists = validate( db, params::ValidateBatch { @@ -90,7 +89,7 @@ pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> { )?; if !exists { - Err(DbErrorKind::BatchNotFound)? + return Err(DbError::batch_not_found()); } let batch_id = decode_id(¶ms.batch.id)?; @@ -99,7 +98,7 @@ pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> { Ok(()) } -pub fn get(db: &MysqlDb, params: params::GetBatch) -> Result> { +pub fn get(db: &MysqlDb, params: params::GetBatch) -> DbResult> { let is_valid = validate( db, params::ValidateBatch { @@ -116,7 +115,7 @@ pub fn get(db: &MysqlDb, params: params::GetBatch) -> Result Result<()> { +pub fn delete(db: &MysqlDb, params: params::DeleteBatch) -> DbResult<()> { let batch_id = decode_id(¶ms.id)?; let user_id = params.user_id.legacy_id as i64; let collection_id = db.get_collection_id(¶ms.collection)?; @@ -133,7 +132,7 @@ pub fn delete(db: &MysqlDb, params: params::DeleteBatch) -> Result<()> { } /// Commits a batch to the bsos table, deleting the batch when succesful -pub fn commit(db: &MysqlDb, params: params::CommitBatch) -> Result { +pub fn commit(db: &MysqlDb, params: params::CommitBatch) -> DbResult { let batch_id = decode_id(¶ms.batch.id)?; let user_id = params.user_id.legacy_id as i64; let collection_id = db.get_collection_id(¶ms.collection)?; @@ -169,7 +168,7 @@ pub fn do_append( user_id: UserIdentifier, _collection_id: i32, bsos: Vec, -) -> Result<()> { +) -> DbResult<()> { fn exist_idx(user_id: u64, batch_id: i64, bso_id: &str) -> String { // Construct something that matches the key for batch_upload_items format!( @@ -253,7 +252,7 @@ pub fn do_append( Ok(()) } -pub fn validate_batch_id(id: &str) -> Result<()> { +pub fn validate_batch_id(id: &str) -> DbResult<()> { decode_id(id).map(|_| ()) } @@ -261,18 +260,17 @@ fn encode_id(id: i64) -> String { base64::encode(id.to_string()) } -fn decode_id(id: &str) -> Result { +fn decode_id(id: &str) -> DbResult { let bytes = base64::decode(id).unwrap_or_else(|_| id.as_bytes().to_vec()); let decoded = std::str::from_utf8(&bytes).unwrap_or(id); decoded .parse::() - .map_err(|e| DbError::internal(&format!("Invalid batch_id: {}", e))) + .map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e))) } -#[macro_export] macro_rules! batch_db_method { ($name:ident, $batch_name:ident, $type:ident) => { - pub fn $name(&self, params: params::$type) -> Result { + pub fn $name(&self, params: params::$type) -> DbResult { batch::$batch_name(self, params) } }; diff --git a/syncserver/src/db/mysql/batch_commit.sql b/syncstorage-mysql/src/batch_commit.sql similarity index 100% rename from syncserver/src/db/mysql/batch_commit.sql rename to syncstorage-mysql/src/batch_commit.sql diff --git a/syncserver/src/db/mysql/diesel_ext.rs b/syncstorage-mysql/src/diesel_ext.rs similarity index 100% rename from syncserver/src/db/mysql/diesel_ext.rs rename to syncstorage-mysql/src/diesel_ext.rs diff --git a/syncstorage-mysql/src/error.rs b/syncstorage-mysql/src/error.rs new file mode 100644 index 00000000..0c4c8b6a --- /dev/null +++ b/syncstorage-mysql/src/error.rs @@ -0,0 +1,144 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{from_error, impl_fmt_display, InternalError, ReportableError}; +use syncserver_db_common::error::MysqlError; +use syncstorage_db_common::error::{DbErrorIntrospect, SyncstorageDbError}; +use thiserror::Error; + +/// An error type that represents any MySQL-related errors that may occur while processing a +/// syncstorage request. These errors may be application-specific or lower-level errors that arise +/// from the database backend. +#[derive(Debug)] +pub struct DbError { + kind: DbErrorKind, + pub status: StatusCode, + pub backtrace: Box, +} + +impl DbError { + pub fn batch_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::batch_not_found()).into() + } + + pub fn bso_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::bso_not_found()).into() + } + + pub fn collection_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::collection_not_found()).into() + } + + pub fn conflict() -> Self { + DbErrorKind::Common(SyncstorageDbError::conflict()).into() + } + + pub fn internal(msg: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(msg)).into() + } + + pub fn quota() -> Self { + DbErrorKind::Common(SyncstorageDbError::quota()).into() + } +} + +#[derive(Debug, Error)] +enum DbErrorKind { + #[error("{}", _0)] + Common(SyncstorageDbError), + + #[error("{}", _0)] + Mysql(MysqlError), +} + +impl From for DbError { + fn from(kind: DbErrorKind) -> Self { + match &kind { + DbErrorKind::Common(dbe) => Self { + status: dbe.status, + backtrace: Box::new(dbe.backtrace.clone()), + kind, + }, + _ => Self { + kind, + status: StatusCode::INTERNAL_SERVER_ERROR, + backtrace: Box::new(Backtrace::new()), + }, + } + } +} + +impl DbErrorIntrospect for DbError { + fn is_batch_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_batch_not_found()) + } + + fn is_bso_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_bso_not_found()) + } + + fn is_collection_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_collection_not_found()) + } + + fn is_conflict(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_conflict()) + } + + fn is_quota(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_quota()) + } +} + +impl ReportableError for DbError { + fn is_sentry_event(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_sentry_event()) + } + + fn metric_label(&self) -> Option { + if let DbErrorKind::Common(e) = &self.kind { + e.metric_label() + } else { + None + } + } + + fn error_backtrace(&self) -> String { + format!("{:#?}", self.backtrace) + } +} + +impl InternalError for DbError { + fn internal_error(message: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(message)).into() + } +} + +impl_fmt_display!(DbError, DbErrorKind); + +from_error!(SyncstorageDbError, DbError, DbErrorKind::Common); +from_error!( + diesel::result::Error, + DbError, + |error: diesel::result::Error| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel::result::ConnectionError, + DbError, + |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Mysql(MysqlError::from( + error + ))) +); +from_error!( + diesel::r2d2::PoolError, + DbError, + |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel_migrations::RunMigrationsError, + DbError, + |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Mysql( + MysqlError::from(error) + )) +); diff --git a/syncstorage-mysql/src/lib.rs b/syncstorage-mysql/src/lib.rs new file mode 100644 index 00000000..4a933900 --- /dev/null +++ b/syncstorage-mysql/src/lib.rs @@ -0,0 +1,22 @@ +#[macro_use] +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; +#[macro_use] +extern crate slog_scope; + +#[macro_use] +mod batch; +mod diesel_ext; +mod error; +mod models; +mod pool; +mod schema; +#[cfg(test)] +mod test; + +pub use error::DbError; +pub use models::MysqlDb; +pub use pool::MysqlDbPool; + +pub(crate) type DbResult = Result; diff --git a/syncserver/src/db/mysql/models.rs b/syncstorage-mysql/src/models.rs similarity index 87% rename from syncserver/src/db/mysql/models.rs rename to syncstorage-mysql/src/models.rs index 1b61ec02..a5cb255c 100644 --- a/syncserver/src/db/mysql/models.rs +++ b/syncstorage-mysql/src/models.rs @@ -13,44 +13,43 @@ use diesel::{ sql_types::{BigInt, Integer, Nullable, Text}, Connection, ExpressionMethods, GroupByDsl, OptionalExtension, QueryDsl, RunQueryDsl, }; -#[cfg(test)] +#[cfg(debug_assertions)] use diesel_logger::LoggingConnection; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::SyncTimestamp, - Db, DbFuture, Sorting, UserIdentifier, DEFAULT_BSO_TTL, +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{sync_db_method, DbFuture}; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, Db, Sorting, UserIdentifier, + DEFAULT_BSO_TTL, }; use syncstorage_settings::{Quota, DEFAULT_MAX_TOTAL_RECORDS}; use super::{ batch, diesel_ext::LockInShareModeDsl, + error::DbError, pool::CollectionCache, schema::{bso, collections, user_collections}, + DbResult, }; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; -pub type Result = std::result::Result; type Conn = PooledConnection>; // this is the max number of records we will return. -pub static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS; +static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS; -pub const TOMBSTONE: i32 = 0; +const TOMBSTONE: i32 = 0; /// SQL Variable remapping /// These names are the legacy values mapped to the new names. -pub const COLLECTION_ID: &str = "collection"; -pub const USER_ID: &str = "userid"; -pub const MODIFIED: &str = "modified"; -pub const EXPIRY: &str = "ttl"; -pub const LAST_MODIFIED: &str = "last_modified"; -pub const COUNT: &str = "count"; -pub const TOTAL_BYTES: &str = "total_bytes"; +const COLLECTION_ID: &str = "collection"; +const USER_ID: &str = "userid"; +const MODIFIED: &str = "modified"; +const EXPIRY: &str = "ttl"; +const LAST_MODIFIED: &str = "last_modified"; +const COUNT: &str = "count"; +const TOTAL_BYTES: &str = "total_bytes"; #[derive(Debug)] -pub enum CollectionLock { +enum CollectionLock { Read, Write, } @@ -71,8 +70,8 @@ struct MysqlDbSession { #[derive(Clone, Debug)] pub struct MysqlDb { - /// Synchronous Diesel calls are executed in tokio::task::spawn_blocking to satisfy - /// the Db trait's asynchronous interface. + /// Synchronous Diesel calls are executed in web::block to satisfy the Db trait's asynchronous + /// interface. /// /// Arc provides a Clone impl utilized for safely moving to /// the thread pool but does not provide Send as the underlying db @@ -94,9 +93,9 @@ pub struct MysqlDb { unsafe impl Send for MysqlDb {} pub struct MysqlDbInner { - #[cfg(not(test))] + #[cfg(not(debug_assertions))] pub(super) conn: Conn, - #[cfg(test)] + #[cfg(debug_assertions)] pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" session: RefCell, @@ -117,7 +116,7 @@ impl Deref for MysqlDb { } impl MysqlDb { - pub fn new( + pub(super) fn new( conn: Conn, coll_cache: Arc, metrics: &Metrics, @@ -125,9 +124,9 @@ impl MysqlDb { blocking_threadpool: Arc, ) -> Self { let inner = MysqlDbInner { - #[cfg(not(test))] + #[cfg(not(debug_assertions))] conn, - #[cfg(test)] + #[cfg(debug_assertions)] conn: LoggingConnection::new(conn), session: RefCell::new(Default::default()), }; @@ -149,7 +148,7 @@ impl MysqlDb { /// In theory it would be possible to use serializable transactions rather /// than explicit locking, but our ops team have expressed concerns about /// the efficiency of that approach at scale. - pub fn lock_for_read_sync(&self, params: params::LockCollection) -> Result<()> { + fn lock_for_read_sync(&self, params: params::LockCollection) -> DbResult<()> { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection).or_else(|e| { if e.is_collection_not_found() { @@ -196,7 +195,7 @@ impl MysqlDb { Ok(()) } - pub fn lock_for_write_sync(&self, params: params::LockCollection) -> Result<()> { + fn lock_for_write_sync(&self, params: params::LockCollection) -> DbResult<()> { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_or_create_collection_id(¶ms.collection)?; if let Some(CollectionLock::Read) = self @@ -205,7 +204,9 @@ impl MysqlDb { .coll_locks .get(&(user_id as u32, collection_id)) { - Err(DbError::internal("Can't escalate read-lock to write-lock"))? + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); } // Lock the db @@ -221,7 +222,7 @@ impl MysqlDb { let modified = SyncTimestamp::from_i64(modified)?; // Forbid the write if it would not properly incr the timestamp if modified >= self.timestamp() { - Err(DbErrorKind::Conflict)? + return Err(DbError::conflict()); } self.session .borrow_mut() @@ -235,7 +236,7 @@ impl MysqlDb { Ok(()) } - pub(super) fn begin(&self, for_write: bool) -> Result<()> { + pub(super) fn begin(&self, for_write: bool) -> DbResult<()> { self.conn .transaction_manager() .begin_transaction(&self.conn)?; @@ -246,11 +247,11 @@ impl MysqlDb { Ok(()) } - pub async fn begin_async(&self, for_write: bool) -> Result<()> { + async fn begin_async(&self, for_write: bool) -> DbResult<()> { self.begin(for_write) } - pub fn commit_sync(&self) -> Result<()> { + fn commit_sync(&self) -> DbResult<()> { if self.session.borrow().in_transaction { self.conn .transaction_manager() @@ -259,7 +260,7 @@ impl MysqlDb { Ok(()) } - pub fn rollback_sync(&self) -> Result<()> { + fn rollback_sync(&self) -> DbResult<()> { if self.session.borrow().in_transaction { self.conn .transaction_manager() @@ -268,7 +269,7 @@ impl MysqlDb { Ok(()) } - fn erect_tombstone(&self, user_id: i32) -> Result<()> { + fn erect_tombstone(&self, user_id: i32) -> DbResult<()> { sql_query(format!( r#"INSERT INTO user_collections ({user_id}, {collection_id}, {modified}) VALUES (?, ?, ?) @@ -285,7 +286,7 @@ impl MysqlDb { Ok(()) } - pub fn delete_storage_sync(&self, user_id: UserIdentifier) -> Result<()> { + fn delete_storage_sync(&self, user_id: UserIdentifier) -> DbResult<()> { let user_id = user_id.legacy_id as i64; // Delete user data. delete(bso::table) @@ -301,10 +302,7 @@ impl MysqlDb { // Deleting the collection should result in: // - collection does not appear in /info/collections // - X-Last-Modified timestamp at the storage level changing - pub fn delete_collection_sync( - &self, - params: params::DeleteCollection, - ) -> Result { + fn delete_collection_sync(&self, params: params::DeleteCollection) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let mut count = delete(bso::table) @@ -316,14 +314,14 @@ impl MysqlDb { .filter(user_collections::collection_id.eq(&collection_id)) .execute(&self.conn)?; if count == 0 { - Err(DbErrorKind::CollectionNotFound)? + return Err(DbError::collection_not_found()); } else { self.erect_tombstone(user_id as i32)?; } self.get_storage_timestamp_sync(params.user_id) } - pub(super) fn get_or_create_collection_id(&self, name: &str) -> Result { + pub(super) fn get_or_create_collection_id(&self, name: &str) -> DbResult { if let Some(id) = self.coll_cache.get_id(name)? { return Ok(id); } @@ -346,7 +344,7 @@ impl MysqlDb { Ok(id) } - pub(super) fn get_collection_id(&self, name: &str) -> Result { + pub(super) fn get_collection_id(&self, name: &str) -> DbResult { if let Some(id) = self.coll_cache.get_id(name)? { return Ok(id); } @@ -359,7 +357,7 @@ impl MysqlDb { .bind::(name) .get_result::(&self.conn) .optional()? - .ok_or(DbErrorKind::CollectionNotFound)? + .ok_or_else(DbError::collection_not_found)? .id; if !self.session.borrow().in_write_transaction { self.coll_cache.put(id, name.to_owned())?; @@ -367,7 +365,7 @@ impl MysqlDb { Ok(id) } - fn _get_collection_name(&self, id: i32) -> Result { + fn _get_collection_name(&self, id: i32) -> DbResult { let name = if let Some(name) = self.coll_cache.get_name(id)? { name } else { @@ -379,13 +377,13 @@ impl MysqlDb { .bind::(&id) .get_result::(&self.conn) .optional()? - .ok_or(DbErrorKind::CollectionNotFound)? + .ok_or_else(DbError::collection_not_found)? .name }; Ok(name) } - pub fn put_bso_sync(&self, bso: params::PutBso) -> Result { + fn put_bso_sync(&self, bso: params::PutBso) -> DbResult { /* if bso.payload.is_none() && bso.sortindex.is_none() && bso.ttl.is_none() { // XXX: go returns an error here (ErrNothingToDo), and is treated @@ -408,7 +406,7 @@ impl MysqlDb { tags.insert("collection".to_owned(), bso.collection.clone()); self.metrics.incr_with_tags("storage.quota.at_limit", tags); if self.quota.enforced { - return Err(DbErrorKind::Quota.into()); + return Err(DbError::quota()); } else { warn!("Quota at limit for user's collection ({} bytes)", usage.total_bytes; "collection"=>bso.collection.clone()); } @@ -476,7 +474,7 @@ impl MysqlDb { }) } - pub fn get_bsos_sync(&self, params: params::GetBsos) -> Result { + fn get_bsos_sync(&self, params: params::GetBsos) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let now = self.timestamp().as_i64(); @@ -566,7 +564,7 @@ impl MysqlDb { }) } - pub fn get_bso_ids_sync(&self, params: params::GetBsos) -> Result { + fn get_bso_ids_sync(&self, params: params::GetBsos) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let mut query = bso::table @@ -629,7 +627,7 @@ impl MysqlDb { }) } - pub fn get_bso_sync(&self, params: params::GetBso) -> Result> { + fn get_bso_sync(&self, params: params::GetBso) -> DbResult> { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; Ok(bso::table @@ -648,7 +646,7 @@ impl MysqlDb { .optional()?) } - pub fn delete_bso_sync(&self, params: params::DeleteBso) -> Result { + fn delete_bso_sync(&self, params: params::DeleteBso) -> DbResult { let user_id = params.user_id.legacy_id; let collection_id = self.get_collection_id(¶ms.collection)?; let affected_rows = delete(bso::table) @@ -658,12 +656,12 @@ impl MysqlDb { .filter(bso::expiry.gt(&self.timestamp().as_i64())) .execute(&self.conn)?; if affected_rows == 0 { - Err(DbErrorKind::BsoNotFound)? + return Err(DbError::bso_not_found()); } self.update_collection(user_id as u32, collection_id) } - pub fn delete_bsos_sync(&self, params: params::DeleteBsos) -> Result { + fn delete_bsos_sync(&self, params: params::DeleteBsos) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; delete(bso::table) @@ -674,7 +672,7 @@ impl MysqlDb { self.update_collection(user_id as u32, collection_id) } - pub fn post_bsos_sync(&self, input: params::PostBsos) -> Result { + fn post_bsos_sync(&self, input: params::PostBsos) -> DbResult { let collection_id = self.get_or_create_collection_id(&input.collection)?; let mut result = results::PostBsos { modified: self.timestamp(), @@ -707,20 +705,20 @@ impl MysqlDb { Ok(result) } - pub fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> Result { + fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> DbResult { let user_id = user_id.legacy_id as i64; let modified = user_collections::table .select(max(user_collections::modified)) .filter(user_collections::user_id.eq(user_id)) .first::>(&self.conn)? .unwrap_or_default(); - SyncTimestamp::from_i64(modified) + SyncTimestamp::from_i64(modified).map_err(Into::into) } - pub fn get_collection_timestamp_sync( + fn get_collection_timestamp_sync( &self, params: params::GetCollectionTimestamp, - ) -> Result { + ) -> DbResult { let user_id = params.user_id.legacy_id as u32; let collection_id = self.get_collection_id(¶ms.collection)?; if let Some(modified) = self @@ -737,10 +735,10 @@ impl MysqlDb { .filter(user_collections::collection_id.eq(collection_id)) .first(&self.conn) .optional()? - .ok_or_else(|| DbErrorKind::CollectionNotFound.into()) + .ok_or_else(DbError::collection_not_found) } - pub fn get_bso_timestamp_sync(&self, params: params::GetBsoTimestamp) -> Result { + fn get_bso_timestamp_sync(&self, params: params::GetBsoTimestamp) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let modified = bso::table @@ -751,13 +749,13 @@ impl MysqlDb { .first::(&self.conn) .optional()? .unwrap_or_default(); - SyncTimestamp::from_i64(modified) + SyncTimestamp::from_i64(modified).map_err(Into::into) } - pub fn get_collection_timestamps_sync( + fn get_collection_timestamps_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let modifieds = sql_query(format!( "SELECT {collection_id}, {modified} FROM user_collections @@ -771,26 +769,29 @@ impl MysqlDb { .bind::(TOMBSTONE) .load::(&self.conn)? .into_iter() - .map(|cr| SyncTimestamp::from_i64(cr.last_modified).map(|ts| (cr.collection, ts))) - .collect::>>()?; + .map(|cr| { + SyncTimestamp::from_i64(cr.last_modified) + .map(|ts| (cr.collection, ts)) + .map_err(Into::into) + }) + .collect::>>()?; self.map_collection_names(modifieds) } - fn check_sync(&self) -> Result { + fn check_sync(&self) -> DbResult { // has the database been up for more than 0 seconds? let result = sql_query("SHOW STATUS LIKE \"Uptime\"").execute(&self.conn)?; Ok(result as u64 > 0) } - fn map_collection_names(&self, by_id: HashMap) -> Result> { + fn map_collection_names(&self, by_id: HashMap) -> DbResult> { let mut names = self.load_collection_names(by_id.keys())?; by_id .into_iter() .map(|(id, value)| { - names - .remove(&id) - .map(|name| (name, value)) - .ok_or_else(|| DbError::internal("load_collection_names unknown collection id")) + names.remove(&id).map(|name| (name, value)).ok_or_else(|| { + DbError::internal("load_collection_names unknown collection id".to_owned()) + }) }) .collect() } @@ -798,7 +799,7 @@ impl MysqlDb { fn load_collection_names<'a>( &self, collection_ids: impl Iterator, - ) -> Result> { + ) -> DbResult> { let mut names = HashMap::new(); let mut uncached = Vec::new(); for &id in collection_ids { @@ -830,7 +831,7 @@ impl MysqlDb { &self, user_id: u32, collection_id: i32, - ) -> Result { + ) -> DbResult { let quota = if self.quota.enabled { self.calc_quota_usage_sync(user_id, collection_id)? } else { @@ -869,10 +870,10 @@ impl MysqlDb { } // Perform a lighter weight "read only" storage size check - pub fn get_storage_usage_sync( + fn get_storage_usage_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let uid = user_id.legacy_id as i64; let total_bytes = bso::table .select(sql::>("SUM(LENGTH(payload))")) @@ -883,10 +884,10 @@ impl MysqlDb { } // Perform a lighter weight "read only" quota storage check - pub fn get_quota_usage_sync( + fn get_quota_usage_sync( &self, params: params::GetQuotaUsage, - ) -> Result { + ) -> DbResult { let uid = params.user_id.legacy_id as i64; let (total_bytes, count): (i64, i32) = user_collections::table .select(( @@ -905,11 +906,11 @@ impl MysqlDb { } // perform a heavier weight quota calculation - pub fn calc_quota_usage_sync( + fn calc_quota_usage_sync( &self, user_id: u32, collection_id: i32, - ) -> Result { + ) -> DbResult { let (total_bytes, count): (i64, i32) = bso::table .select(( sql::(r#"COALESCE(SUM(LENGTH(COALESCE(payload, ""))),0)"#), @@ -927,10 +928,10 @@ impl MysqlDb { }) } - pub fn get_collection_usage_sync( + fn get_collection_usage_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let counts = bso::table .select((bso::collection_id, sql::("SUM(LENGTH(payload))"))) .filter(bso::user_id.eq(user_id.legacy_id as i64)) @@ -942,10 +943,10 @@ impl MysqlDb { self.map_collection_names(counts) } - pub fn get_collection_counts_sync( + fn get_collection_counts_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let counts = bso::table .select(( bso::collection_id, @@ -969,51 +970,34 @@ impl MysqlDb { batch_db_method!(commit_batch_sync, commit, CommitBatch); batch_db_method!(delete_batch_sync, delete, DeleteBatch); - pub fn get_batch_sync(&self, params: params::GetBatch) -> Result> { + fn get_batch_sync(&self, params: params::GetBatch) -> DbResult> { batch::get(self, params) } - pub fn timestamp(&self) -> SyncTimestamp { + pub(super) fn timestamp(&self) -> SyncTimestamp { self.session.borrow().timestamp } } -#[macro_export] -macro_rules! sync_db_method { - ($name:ident, $sync_name:ident, $type:ident) => { - sync_db_method!($name, $sync_name, $type, results::$type); - }; - ($name:ident, $sync_name:ident, $type:ident, $result:ty) => { - fn $name(&self, params: params::$type) -> DbFuture<'_, $result> { - let db = self.clone(); - Box::pin( - self.blocking_threadpool - .spawn(move || db.$sync_name(params)), - ) - } - }; -} -impl<'a> Db<'a> for MysqlDb { - fn commit(&self) -> DbFuture<'_, ()> { +impl Db for MysqlDb { + type Error = DbError; + + fn commit(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.commit_sync())) } - fn rollback(&self) -> DbFuture<'_, ()> { + fn rollback(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.rollback_sync())) } - fn begin(&self, for_write: bool) -> DbFuture<'_, ()> { + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.begin_async(for_write).map_err(Into::into).await }) } - fn box_clone(&self) -> Box> { - Box::new(self.clone()) - } - - fn check(&self) -> DbFuture<'_, results::Check> { + fn check(&self) -> DbFuture<'_, results::Check, Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.check_sync())) } @@ -1073,7 +1057,7 @@ impl<'a> Db<'a> for MysqlDb { ); sync_db_method!(commit_batch, commit_batch_sync, CommitBatch); - fn get_collection_id(&self, name: String) -> DbFuture<'_, i32> { + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin( self.blocking_threadpool @@ -1085,7 +1069,7 @@ impl<'a> Db<'a> for MysqlDb { results::ConnectionInfo::default() } - fn create_collection(&self, name: String) -> DbFuture<'_, i32> { + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin( self.blocking_threadpool @@ -1093,7 +1077,10 @@ impl<'a> Db<'a> for MysqlDb { ) } - fn update_collection(&self, param: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp> { + fn update_collection( + &self, + param: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || { db.update_collection(param.user_id.legacy_id as u32, param.collection_id) @@ -1110,7 +1097,7 @@ impl<'a> Db<'a> for MysqlDb { sync_db_method!(delete_batch, delete_batch_sync, DeleteBatch); - fn clear_coll_cache(&self) -> DbFuture<'_, ()> { + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || { db.coll_cache.clear(); @@ -1125,6 +1112,10 @@ impl<'a> Db<'a> for MysqlDb { enforced, } } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } } #[derive(Debug, QueryableByName)] diff --git a/syncserver/src/db/mysql/pool.rs b/syncstorage-mysql/src/pool.rs similarity index 76% rename from syncserver/src/db/mysql/pool.rs rename to syncstorage-mysql/src/pool.rs index a90034db..f2bf0d14 100644 --- a/syncserver/src/db/mysql/pool.rs +++ b/syncstorage-mysql/src/pool.rs @@ -12,16 +12,16 @@ use diesel::{ r2d2::{ConnectionManager, Pool}, Connection, }; -#[cfg(test)] +#[cfg(debug_assertions)] use diesel_logger::LoggingConnection; -use syncserver_db_common::{error::DbError, Db, DbPool, GetPoolState, PoolState, STD_COLLS}; +use syncserver_common::{BlockingThreadpool, Metrics}; +#[cfg(debug_assertions)] +use syncserver_db_common::test::TestTransactionCustomizer; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{Db, DbPool, STD_COLLS}; use syncstorage_settings::{Quota, Settings}; -use super::models::{MysqlDb, Result}; -#[cfg(test)] -use super::test::TestTransactionCustomizer; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; +use super::{error::DbError, models::MysqlDb, DbResult}; embed_migrations!(); @@ -29,13 +29,13 @@ embed_migrations!(); /// /// Mysql DDL statements implicitly commit which could disrupt MysqlPool's /// begin_test_transaction during tests. So this runs on its own separate conn. -pub fn run_embedded_migrations(database_url: &str) -> Result<()> { +fn run_embedded_migrations(database_url: &str) -> DbResult<()> { let conn = MysqlConnection::establish(database_url)?; - #[cfg(test)] + #[cfg(debug_assertions)] // XXX: this doesn't show the DDL statements // https://github.com/shssoichiro/diesel-logger/issues/1 embedded_migrations::run(&LoggingConnection::new(conn))?; - #[cfg(not(test))] + #[cfg(not(debug_assertions))] embedded_migrations::run(&conn)?; Ok(()) } @@ -61,7 +61,7 @@ impl MysqlDbPool { settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { run_embedded_migrations(&settings.database_url)?; Self::new_without_migrations(settings, metrics, blocking_threadpool) } @@ -70,7 +70,7 @@ impl MysqlDbPool { settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { let manager = ConnectionManager::::new(settings.database_url.clone()); let builder = Pool::builder() .max_size(settings.database_pool_max_size) @@ -79,7 +79,7 @@ impl MysqlDbPool { )) .min_idle(settings.database_pool_min_idle); - #[cfg(test)] + #[cfg(debug_assertions)] let builder = if settings.database_use_test_transactions { builder.connection_customizer(Box::new(TestTransactionCustomizer)) } else { @@ -99,7 +99,7 @@ impl MysqlDbPool { }) } - pub fn get_sync(&self) -> Result { + pub fn get_sync(&self) -> DbResult { Ok(MysqlDb::new( self.pool.get()?, Arc::clone(&self.coll_cache), @@ -112,31 +112,25 @@ impl MysqlDbPool { #[async_trait] impl DbPool for MysqlDbPool { - async fn get<'a>(&'a self) -> Result>> { - let pool = self.clone(); - let db = self - .blocking_threadpool - .spawn(move || pool.get_sync()) - .await?; + type Error = DbError; - Ok(Box::new(db) as Box>) + async fn get<'a>(&'a self) -> DbResult>> { + let pool = self.clone(); + self.blocking_threadpool + .spawn(move || pool.get_sync()) + .await + .map(|db| Box::new(db) as Box>) } - fn validate_batch_id(&self, id: String) -> Result<()> { + fn validate_batch_id(&self, id: String) -> DbResult<()> { super::batch::validate_batch_id(&id) } - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } } -impl GetPoolState for MysqlDbPool { - fn state(&self) -> PoolState { - self.pool.state().into() - } -} - impl fmt::Debug for MysqlDbPool { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("MysqlDbPool") @@ -145,42 +139,48 @@ impl fmt::Debug for MysqlDbPool { } } +impl GetPoolState for MysqlDbPool { + fn state(&self) -> PoolState { + self.pool.state().into() + } +} + #[derive(Debug)] -pub struct CollectionCache { +pub(super) struct CollectionCache { pub by_name: RwLock>, pub by_id: RwLock>, } impl CollectionCache { - pub fn put(&self, id: i32, name: String) -> Result<()> { + pub fn put(&self, id: i32, name: String) -> DbResult<()> { // XXX: should this emit a metric? // XXX: should probably either lock both simultaneously during // writes or use an RwLock alternative self.by_name .write() - .map_err(|_| DbError::internal("by_name write"))? + .map_err(|_| DbError::internal("by_name write".to_owned()))? .insert(name.clone(), id); self.by_id .write() - .map_err(|_| DbError::internal("by_id write"))? + .map_err(|_| DbError::internal("by_id write".to_owned()))? .insert(id, name); Ok(()) } - pub fn get_id(&self, name: &str) -> Result> { + pub fn get_id(&self, name: &str) -> DbResult> { Ok(self .by_name .read() - .map_err(|_| DbError::internal("by_name read"))? + .map_err(|_| DbError::internal("by_name read".to_owned()))? .get(name) .cloned()) } - pub fn get_name(&self, id: i32) -> Result> { + pub fn get_name(&self, id: i32) -> DbResult> { Ok(self .by_id .read() - .map_err(|_| DbError::internal("by_id read"))? + .map_err(|_| DbError::internal("by_id read".to_owned()))? .get(&id) .cloned()) } diff --git a/syncserver/src/db/mysql/schema.rs b/syncstorage-mysql/src/schema.rs similarity index 100% rename from syncserver/src/db/mysql/schema.rs rename to syncstorage-mysql/src/schema.rs diff --git a/syncserver/src/db/mysql/test.rs b/syncstorage-mysql/src/test.rs similarity index 74% rename from syncserver/src/db/mysql/test.rs rename to syncstorage-mysql/src/test.rs index fdeaf1ad..55b98ea1 100644 --- a/syncserver/src/db/mysql/test.rs +++ b/syncstorage-mysql/src/test.rs @@ -1,49 +1,32 @@ -use std::{collections::HashMap, result::Result as StdResult, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use diesel::{ // expression_methods::TextExpressionMethods, // See note below about `not_like` becoming swedish - mysql::MysqlConnection, - r2d2::{CustomizeConnection, Error as PoolError}, - Connection, ExpressionMethods, QueryDsl, RunQueryDsl, }; +use syncserver_common::{BlockingThreadpool, Metrics}; use syncserver_settings::Settings as SyncserverSettings; use syncstorage_settings::Settings as SyncstorageSettings; use url::Url; -use crate::db::mysql::{ - models::{MysqlDb, Result}, - pool::MysqlDbPool, - schema::collections, -}; -use crate::db::BlockingThreadpool; -use crate::server::metrics; +use crate::{models::MysqlDb, pool::MysqlDbPool, schema::collections, DbResult}; -#[derive(Debug)] -pub struct TestTransactionCustomizer; - -impl CustomizeConnection for TestTransactionCustomizer { - fn on_acquire(&self, conn: &mut MysqlConnection) -> StdResult<(), PoolError> { - conn.begin_test_transaction().map_err(PoolError::QueryError) - } -} - -pub fn db(settings: &SyncstorageSettings) -> Result { +pub fn db(settings: &SyncstorageSettings) -> DbResult { let _ = env_logger::try_init(); // inherit SYNC_SYNCSTORAGE__DATABASE_URL from the env let pool = MysqlDbPool::new( settings, - &metrics::Metrics::noop(), + &Metrics::noop(), Arc::new(BlockingThreadpool::default()), )?; pool.get_sync() } #[test] -fn static_collection_id() -> Result<()> { +fn static_collection_id() -> DbResult<()> { let settings = SyncserverSettings::test_settings().syncstorage; if Url::parse(&settings.database_url).unwrap().scheme() != "mysql" { // Skip this test if we're not using mysql diff --git a/syncstorage-settings/src/lib.rs b/syncstorage-settings/src/lib.rs index 6f03ec53..3011e5c8 100644 --- a/syncstorage-settings/src/lib.rs +++ b/syncstorage-settings/src/lib.rs @@ -75,6 +75,7 @@ pub struct Settings { pub database_pool_connection_lifespan: Option, /// Max time a connection should sit idle before being dropped. pub database_pool_connection_max_idle: Option, + #[cfg(debug_assertions)] pub database_use_test_transactions: bool, /// Server-enforced limits for request payloads. @@ -105,6 +106,7 @@ impl Default for Settings { database_pool_connection_lifespan: None, database_pool_connection_max_idle: None, database_pool_connection_timeout: Some(30), + #[cfg(debug_assertions)] database_use_test_transactions: false, limits: ServerLimits::default(), statsd_label: "syncstorage".to_string(), diff --git a/syncstorage-spanner/Cargo.toml b/syncstorage-spanner/Cargo.toml new file mode 100644 index 00000000..5182bec5 --- /dev/null +++ b/syncstorage-spanner/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "syncstorage-spanner" +version = "0.12.3" +edition = "2021" + +[dependencies] +async-trait = "0.1.40" +backtrace = "0.3.61" +cadence = "0.26" +# Pin to 0.5 for now, to keep it under tokio 0.2 (issue977). +# Fix for #803 (deadpool#92) points to our fork for now +#deadpool = "0.5" # pin to 0.5 +deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" } +env_logger = "0.9" +futures = { version = "0.3", features = ["compat"] } +google-cloud-rust-raw = "0.11.0" +# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause +# syncserver to either fail to either compile, or start. In those cases, try +# `cargo build --features grpcio/openssl ...` +grpcio = { version = "0.9" } +http = "0.2.5" +log = { version = "0.4", features = [ + "max_level_debug", + "release_max_level_info", +] } +# must match what's used by googleapis-raw +protobuf = "2.20.0" +slog-scope = "4.3" +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +thiserror = "1.0.26" +# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) +tokio = { version = "0.2.4", features = ["macros", "sync"] } +url = "2.1" +uuid = { version = "0.8.2", features = ["serde", "v4"] } + +[[bin]] +name = "purge_ttl" +path = "src/bin/purge_ttl.rs" diff --git a/syncserver/src/db/spanner/BATCH_COMMIT.txt b/syncstorage-spanner/src/BATCH_COMMIT.txt similarity index 100% rename from syncserver/src/db/spanner/BATCH_COMMIT.txt rename to syncstorage-spanner/src/BATCH_COMMIT.txt diff --git a/syncserver/src/db/spanner/batch.rs b/syncstorage-spanner/src/batch.rs similarity index 96% rename from syncserver/src/db/spanner/batch.rs rename to syncstorage-spanner/src/batch.rs index 74acfd02..bc69acdc 100644 --- a/syncserver/src/db/spanner/batch.rs +++ b/syncstorage-spanner/src/batch.rs @@ -8,24 +8,24 @@ use protobuf::{ well_known_types::{ListValue, Value}, RepeatedField, }; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::to_rfc3339, - UserIdentifier, BATCH_LIFETIME, DEFAULT_BSO_TTL, +use syncstorage_db_common::{ + params, results, util::to_rfc3339, UserIdentifier, BATCH_LIFETIME, DEFAULT_BSO_TTL, }; use uuid::Uuid; -use super::models::{Result, SpannerDb, PRETOUCH_TS}; +use crate::error::DbError; + +use super::models::{SpannerDb, PRETOUCH_TS}; use super::support::{as_type, null_value, struct_type_field, IntoSpannerValue}; +use super::DbResult; pub async fn create_async( db: &SpannerDb, params: params::CreateBatch, -) -> Result { +) -> DbResult { let batch_id = Uuid::new_v4().to_simple().to_string(); let collection_id = db.get_collection_id_async(¶ms.collection).await?; - let timestamp = db.timestamp()?.as_i64(); + let timestamp = db.checked_timestamp()?.as_i64(); // Ensure a parent record exists in user_collections before writing to batches // (INTERLEAVE IN PARENT user_collections) @@ -66,13 +66,13 @@ pub async fn create_async( Ok(new_batch) } -pub async fn validate_async(db: &SpannerDb, params: params::ValidateBatch) -> Result { +pub async fn validate_async(db: &SpannerDb, params: params::ValidateBatch) -> DbResult { let exists = get_async(db, params.into()).await?; Ok(exists.is_some()) } // Append a collection to a pending batch (`create_batch` creates a new batch) -pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Result<()> { +pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> DbResult<()> { let mut metrics = db.metrics.clone(); metrics.start_timer("storage.spanner.append_items_to_batch", None); let collection_id = db.get_collection_id_async(¶ms.collection).await?; @@ -98,7 +98,7 @@ pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Resu if !exists { // NOTE: db tests expects this but it doesn't seem necessary w/ the // handler validating the batch before appends - Err(DbErrorKind::BatchNotFound)? + return Err(DbError::batch_not_found()); } do_append_async( @@ -116,7 +116,7 @@ pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Resu pub async fn get_async( db: &SpannerDb, params: params::GetBatch, -) -> Result> { +) -> DbResult> { let collection_id = db.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid.clone(), @@ -143,7 +143,7 @@ pub async fn get_async( Ok(batch) } -pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> Result<()> { +pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> DbResult<()> { let collection_id = db.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid.clone(), @@ -170,7 +170,7 @@ pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> Result pub async fn commit_async( db: &SpannerDb, params: params::CommitBatch, -) -> Result { +) -> DbResult { let mut metrics = db.metrics.clone(); metrics.start_timer("storage.spanner.apply_batch", None); let collection_id = db.get_collection_id_async(¶ms.collection).await?; @@ -249,7 +249,7 @@ pub async fn do_append_async( batch: results::CreateBatch, bsos: Vec, collection: &str, -) -> Result<()> { +) -> DbResult<()> { // Pass an array of struct objects as @values (for UNNEST), e.g.: // [("", "", 101, "ba1", "bso1", NULL, "payload1", NULL), // ("", "", 101, "ba1", "bso2", NULL, "payload2", NULL)] @@ -528,7 +528,7 @@ async fn pretouch_collection_async( db: &SpannerDb, user_id: &UserIdentifier, collection_id: i32, -) -> Result<()> { +) -> DbResult<()> { let (mut sqlparams, mut sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid.clone(), "fxa_kid" => user_id.fxa_kid.clone(), @@ -569,8 +569,8 @@ async fn pretouch_collection_async( Ok(()) } -pub fn validate_batch_id(id: &str) -> Result<()> { +pub fn validate_batch_id(id: &str) -> DbResult<()> { Uuid::from_str(id) .map(|_| ()) - .map_err(|e| DbError::internal(&format!("Invalid batch_id: {}", e))) + .map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e))) } diff --git a/syncserver/src/db/spanner/batch_commit_insert.sql b/syncstorage-spanner/src/batch_commit_insert.sql similarity index 100% rename from syncserver/src/db/spanner/batch_commit_insert.sql rename to syncstorage-spanner/src/batch_commit_insert.sql diff --git a/syncserver/src/db/spanner/batch_commit_update.sql b/syncstorage-spanner/src/batch_commit_update.sql similarity index 100% rename from syncserver/src/db/spanner/batch_commit_update.sql rename to syncstorage-spanner/src/batch_commit_update.sql diff --git a/syncserver/src/db/spanner/batch_index.sql b/syncstorage-spanner/src/batch_index.sql similarity index 100% rename from syncserver/src/db/spanner/batch_index.sql rename to syncstorage-spanner/src/batch_index.sql diff --git a/syncserver/src/bin/purge_ttl.rs b/syncstorage-spanner/src/bin/purge_ttl.rs similarity index 100% rename from syncserver/src/bin/purge_ttl.rs rename to syncstorage-spanner/src/bin/purge_ttl.rs diff --git a/syncstorage-spanner/src/error.rs b/syncstorage-spanner/src/error.rs new file mode 100644 index 00000000..34a951de --- /dev/null +++ b/syncstorage-spanner/src/error.rs @@ -0,0 +1,151 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{from_error, impl_fmt_display, InternalError, ReportableError}; +use syncstorage_db_common::error::{DbErrorIntrospect, SyncstorageDbError}; +use thiserror::Error; + +/// An error type that represents any Spanner-related errors that may occur while processing a +/// syncstorage request. These errors may be application-specific or lower-level errors that arise +/// from the database backend. +#[derive(Debug)] +pub struct DbError { + kind: DbErrorKind, + pub status: StatusCode, + pub backtrace: Box, +} + +impl DbError { + pub fn batch_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::batch_not_found()).into() + } + + pub fn bso_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::bso_not_found()).into() + } + + pub fn collection_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::collection_not_found()).into() + } + + pub fn conflict() -> Self { + DbErrorKind::Common(SyncstorageDbError::conflict()).into() + } + + pub fn expired() -> Self { + DbErrorKind::Expired.into() + } + + pub fn integrity(msg: String) -> Self { + DbErrorKind::Integrity(msg).into() + } + + pub fn internal(msg: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(msg)).into() + } + + pub fn quota() -> Self { + DbErrorKind::Common(SyncstorageDbError::quota()).into() + } + + pub fn too_large(msg: String) -> Self { + DbErrorKind::TooLarge(msg).into() + } +} + +#[derive(Debug, Error)] +enum DbErrorKind { + #[error("{}", _0)] + Common(SyncstorageDbError), + + #[error("Connection expired")] + Expired, + + #[error("A database error occurred: {}", _0)] + Grpc(#[from] grpcio::Error), + + #[error("Database integrity error: {}", _0)] + Integrity(String), + + #[error("Spanner data load too large: {}", _0)] + TooLarge(String), +} + +impl From for DbError { + fn from(kind: DbErrorKind) -> Self { + let status = match &kind { + DbErrorKind::Common(e) => e.status, + // Matching the Python code here (a 400 vs 404) + DbErrorKind::TooLarge(_) => StatusCode::BAD_REQUEST, + _ => StatusCode::INTERNAL_SERVER_ERROR, + }; + + Self { + kind, + status, + backtrace: Box::new(Backtrace::new()), + } + } +} + +impl DbErrorIntrospect for DbError { + fn is_batch_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_batch_not_found()) + } + + fn is_bso_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_bso_not_found()) + } + + fn is_collection_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_collection_not_found()) + } + + fn is_conflict(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_conflict()) + } + + fn is_quota(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_quota()) + } +} + +impl ReportableError for DbError { + fn is_sentry_event(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_sentry_event()) + } + + fn metric_label(&self) -> Option { + if let DbErrorKind::Common(e) = &self.kind { + e.metric_label() + } else { + None + } + } + + fn error_backtrace(&self) -> String { + format!("{:#?}", self.backtrace) + } +} + +impl InternalError for DbError { + fn internal_error(message: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(message)).into() + } +} + +impl_fmt_display!(DbError, DbErrorKind); + +from_error!(grpcio::Error, DbError, |inner: grpcio::Error| { + // Convert ABORTED (typically due to a transaction abort) into 503s + match inner { + grpcio::Error::RpcFailure(ref status) | grpcio::Error::RpcFinished(Some(ref status)) + if status.code() == grpcio::RpcStatusCode::ABORTED => + { + DbErrorKind::Common(SyncstorageDbError::conflict()) + } + _ => DbErrorKind::Grpc(inner), + } +}); +from_error!(SyncstorageDbError, DbError, DbErrorKind::Common); diff --git a/syncserver/src/db/spanner/insert_standard_collections.sql b/syncstorage-spanner/src/insert_standard_collections.sql similarity index 100% rename from syncserver/src/db/spanner/insert_standard_collections.sql rename to syncstorage-spanner/src/insert_standard_collections.sql diff --git a/syncstorage-spanner/src/lib.rs b/syncstorage-spanner/src/lib.rs new file mode 100644 index 00000000..560c3f64 --- /dev/null +++ b/syncstorage-spanner/src/lib.rs @@ -0,0 +1,27 @@ +use std::time::SystemTime; + +#[macro_use] +extern crate slog_scope; + +#[macro_use] +mod macros; + +mod batch; +mod error; +mod manager; +mod models; +mod pool; +mod support; + +pub use error::DbError; +pub use models::SpannerDb; +pub use pool::SpannerDbPool; + +type DbResult = Result; + +fn now() -> i64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs() as i64 +} diff --git a/syncserver/src/db/spanner/macros.rs b/syncstorage-spanner/src/macros.rs similarity index 98% rename from syncserver/src/db/spanner/macros.rs rename to syncstorage-spanner/src/macros.rs index 651265a3..6785c4e6 100644 --- a/syncserver/src/db/spanner/macros.rs +++ b/syncstorage-spanner/src/macros.rs @@ -19,7 +19,7 @@ macro_rules! params { #[test] fn test_params_macro() { - use crate::db::spanner::support::IntoSpannerValue; + use super::support::IntoSpannerValue; use google_cloud_rust_raw::spanner::v1::type_pb::{Type, TypeCode}; use protobuf::{ well_known_types::{ListValue, Value}, diff --git a/syncserver/src/db/spanner/manager/bb8.rs b/syncstorage-spanner/src/manager/bb8.rs similarity index 96% rename from syncserver/src/db/spanner/manager/bb8.rs rename to syncstorage-spanner/src/manager/bb8.rs index 7ad6d7f8..cb551e21 100644 --- a/syncserver/src/db/spanner/manager/bb8.rs +++ b/syncstorage-spanner/src/manager/bb8.rs @@ -10,7 +10,7 @@ use crate::{ error::{DbError, DbErrorKind}, PoolState, }, - server::metrics::Metrics, + server::Metrics, settings::Settings, }; @@ -19,7 +19,7 @@ use super::session::{create_spanner_session, recycle_spanner_session, SpannerSes #[allow(dead_code)] pub type Conn<'a> = PooledConnection<'a, SpannerSessionManager>; -pub struct SpannerSessionManager { +pub(super) struct SpannerSessionManager { database_name: String, /// The gRPC environment env: Arc, diff --git a/syncserver/src/db/spanner/manager/deadpool.rs b/syncstorage-spanner/src/manager/deadpool.rs similarity index 88% rename from syncserver/src/db/spanner/manager/deadpool.rs rename to syncstorage-spanner/src/manager/deadpool.rs index 4433d655..ededa3fa 100644 --- a/syncserver/src/db/spanner/manager/deadpool.rs +++ b/syncstorage-spanner/src/manager/deadpool.rs @@ -3,17 +3,15 @@ use std::{fmt, sync::Arc}; use async_trait::async_trait; use deadpool::managed::{Manager, RecycleError, RecycleResult}; use grpcio::{EnvBuilder, Environment}; -use syncserver_db_common::error::{DbError, DbErrorKind}; +use syncserver_common::{BlockingThreadpool, Metrics}; use syncstorage_settings::Settings; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; - use super::session::{create_spanner_session, recycle_spanner_session, SpannerSession}; +use crate::error::DbError; -pub type Conn = deadpool::managed::Object; +pub(crate) type Conn = deadpool::managed::Object; -pub struct SpannerSessionManager { +pub(crate) struct SpannerSessionManager { database_name: String, /// The gRPC environment env: Arc, @@ -42,7 +40,9 @@ impl SpannerSessionManager { ) -> Result { let database_name = settings .spanner_database_name() - .ok_or_else(|| DbErrorKind::InvalidUrl(settings.database_url.to_owned()))? + .ok_or_else(|| { + DbError::internal(format!("invalid database url: {}", settings.database_url)) + })? .to_owned(); let env = Arc::new(EnvBuilder::new().build()); diff --git a/syncstorage-spanner/src/manager/mod.rs b/syncstorage-spanner/src/manager/mod.rs new file mode 100644 index 00000000..b1e20c53 --- /dev/null +++ b/syncstorage-spanner/src/manager/mod.rs @@ -0,0 +1,6 @@ +// mod bb8; +mod deadpool; +mod session; + +pub(super) use self::deadpool::{Conn, SpannerSessionManager}; +pub(super) use self::session::SpannerSession; diff --git a/syncserver/src/db/spanner/manager/session.rs b/syncstorage-spanner/src/manager/session.rs similarity index 94% rename from syncserver/src/db/spanner/manager/session.rs rename to syncstorage-spanner/src/manager/session.rs index b577c9de..68a123a6 100644 --- a/syncserver/src/db/spanner/manager/session.rs +++ b/syncstorage-spanner/src/manager/session.rs @@ -1,13 +1,13 @@ +use std::sync::Arc; + use google_cloud_rust_raw::spanner::v1::{ spanner::{CreateSessionRequest, GetSessionRequest, Session}, spanner_grpc::SpannerClient, }; use grpcio::{CallOption, ChannelBuilder, ChannelCredentials, Environment, MetadataBuilder}; -use std::sync::Arc; -use syncserver_db_common::error::{DbError, DbErrorKind}; +use syncserver_common::{BlockingThreadpool, Metrics}; -use crate::db::{spanner::now, BlockingThreadpool}; -use crate::server::metrics::Metrics; +use crate::error::DbError; const SPANNER_ADDRESS: &str = "spanner.googleapis.com:443"; @@ -21,11 +21,11 @@ pub struct SpannerSession { pub session: Session, /// The underlying client (Connection/Channel) for interacting with spanner pub client: SpannerClient, - pub(in crate::db::spanner) use_test_transactions: bool, + pub(crate) use_test_transactions: bool, /// A second based UTC for SpannerSession creation. /// Session has a similar `create_time` value that is managed by protobuf, /// but some clock skew issues are possible. - pub(in crate::db::spanner) create_time: i64, + pub(crate) create_time: i64, /// Whether we are using the Spanner emulator pub using_spanner_emulator: bool, } @@ -71,7 +71,7 @@ pub async fn create_spanner_session( session, client, use_test_transactions, - create_time: now(), + create_time: crate::now(), using_spanner_emulator, }) } @@ -84,7 +84,7 @@ pub async fn recycle_spanner_session( max_lifetime: Option, max_idle: Option, ) -> Result<(), DbError> { - let now = now(); + let now = crate::now(); let mut req = GetSessionRequest::new(); req.set_name(conn.session.get_name().to_owned()); /* @@ -128,7 +128,7 @@ pub async fn recycle_spanner_session( if age > max_life as i64 { metrics.incr("db.connection.max_life"); dbg!("### aging out", this_session.get_name()); - return Err(DbErrorKind::Expired.into()); + return Err(DbError::expired()); } } // check how long that this has been idle... @@ -145,7 +145,7 @@ pub async fn recycle_spanner_session( if idle > max_idle as i64 { metrics.incr("db.connection.max_idle"); dbg!("### idling out", this_session.get_name()); - return Err(DbErrorKind::Expired.into()); + return Err(DbError::expired()); } // and update the connection's reference session info conn.session = this_session; diff --git a/syncserver/src/db/spanner/models.rs b/syncstorage-spanner/src/models.rs similarity index 87% rename from syncserver/src/db/spanner/models.rs rename to syncstorage-spanner/src/models.rs index 74b111fc..fd7c73a8 100644 --- a/syncserver/src/db/spanner/models.rs +++ b/syncstorage-spanner/src/models.rs @@ -22,37 +22,33 @@ use protobuf::{ well_known_types::{ListValue, Value}, Message, RepeatedField, }; -use syncserver_common::MAX_SPANNER_LOAD_SIZE; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::SyncTimestamp, - Db, DbFuture, Sorting, UserIdentifier, DEFAULT_BSO_TTL, FIRST_CUSTOM_COLLECTION_ID, +use syncserver_common::{Metrics, MAX_SPANNER_LOAD_SIZE}; +use syncserver_db_common::DbFuture; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, Db, Sorting, UserIdentifier, + DEFAULT_BSO_TTL, FIRST_CUSTOM_COLLECTION_ID, }; use syncstorage_settings::Quota; -use crate::{db::spanner::now, server::metrics::Metrics}; - use super::{ batch, + error::DbError, pool::{CollectionCache, Conn}, support::{ as_type, bso_from_row, bso_to_insert_row, bso_to_update_row, ExecuteSqlRequestBuilder, IntoSpannerValue, StreamedResultSetAsync, }, + DbResult, }; #[derive(Debug, Eq, PartialEq)] -pub enum CollectionLock { +enum CollectionLock { Read, Write, } -pub type Result = std::result::Result; - -pub const TOMBSTONE: i32 = 0; - -pub const PRETOUCH_TS: &str = "0001-01-01T00:00:00.00Z"; +const TOMBSTONE: i32 = 0; +pub(super) const PRETOUCH_TS: &str = "0001-01-01T00:00:00.00Z"; /// Per session Db metadata #[derive(Debug, Default)] @@ -106,7 +102,7 @@ impl Deref for SpannerDb { } impl SpannerDb { - pub fn new( + pub(super) fn new( conn: Conn, coll_cache: Arc, metrics: &Metrics, @@ -128,7 +124,7 @@ impl SpannerDb { self.coll_cache.get_name(id).await } - pub(super) async fn get_collection_id_async(&self, name: &str) -> Result { + pub(super) async fn get_collection_id_async(&self, name: &str) -> DbResult { if let Some(id) = self.coll_cache.get_id(name).await { return Ok(id); } @@ -144,23 +140,25 @@ impl SpannerDb { .execute_async(&self.conn)? .one_or_none() .await? - .ok_or(DbErrorKind::CollectionNotFound)?; + .ok_or_else(DbError::collection_not_found)?; let id = result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; if !self.in_write_transaction() { self.coll_cache.put(id, name.to_owned()).await; } Ok(id) } - pub(super) async fn create_collection_async(&self, name: &str) -> Result { + pub(super) async fn create_collection_async(&self, name: &str) -> DbResult { // This should always run within a r/w transaction, so that: "If a // transaction successfully commits, then no other writer modified the // data that was read in the transaction after it was read." if !cfg!(test) && !self.in_write_transaction() { - Err(DbError::internal("Can't escalate read-lock to write-lock"))? + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); } let result = self .sql( @@ -173,7 +171,7 @@ impl SpannerDb { let max = result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let id = FIRST_CUSTOM_COLLECTION_ID.max(max + 1); let (sqlparams, sqlparam_types) = params! { "name" => name.to_string(), @@ -191,14 +189,14 @@ impl SpannerDb { Ok(id) } - async fn get_or_create_collection_id_async(&self, name: &str) -> Result { + async fn get_or_create_collection_id_async(&self, name: &str) -> DbResult { match self.get_collection_id_async(name).await { Err(err) if err.is_collection_not_found() => self.create_collection_async(name).await, result => result, } } - pub async fn lock_for_read_async(&self, params: params::LockCollection) -> Result<()> { + async fn lock_for_read_async(&self, params: params::LockCollection) -> DbResult<()> { // Begin a transaction self.begin_async(false).await?; @@ -235,7 +233,7 @@ impl SpannerDb { Ok(()) } - pub async fn lock_for_write_async(&self, params: params::LockCollection) -> Result<()> { + async fn lock_for_write_async(&self, params: params::LockCollection) -> DbResult<()> { // Begin a transaction self.begin_async(true).await?; let collection_id = self @@ -248,7 +246,9 @@ impl SpannerDb { .coll_locks .get(&(params.user_id.clone(), collection_id)) { - Err(DbError::internal("Can't escalate read-lock to write-lock"))? + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); } let (sqlparams, mut sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid.clone(), @@ -274,12 +274,12 @@ impl SpannerDb { .await?; let timestamp = if let Some(result) = result { - let modified = SyncTimestamp::from_rfc3339(result[1].get_string_value())?; - let now = SyncTimestamp::from_rfc3339(result[0].get_string_value())?; + let modified = sync_timestamp_from_rfc3339(result[1].get_string_value())?; + let now = sync_timestamp_from_rfc3339(result[0].get_string_value())?; // Forbid the write if it would not properly incr the modified // timestamp if modified >= now { - Err(DbErrorKind::Conflict)? + return Err(DbError::conflict()); } self.session .borrow_mut() @@ -292,7 +292,7 @@ impl SpannerDb { .execute_async(&self.conn)? .one() .await?; - SyncTimestamp::from_rfc3339(result[0].get_string_value())? + sync_timestamp_from_rfc3339(result[0].get_string_value())? }; self.set_timestamp(timestamp); @@ -308,7 +308,7 @@ impl SpannerDb { self.session.borrow_mut().timestamp = Some(timestamp); } - pub(super) fn begin(&self, for_write: bool) -> Result<()> { + pub(super) fn begin(&self, for_write: bool) -> DbResult<()> { let spanner = &self.conn; let mut options = TransactionOptions::new(); if for_write { @@ -328,7 +328,7 @@ impl SpannerDb { Ok(()) } - pub(super) async fn begin_async(&self, for_write: bool) -> Result<()> { + pub(super) async fn begin_async(&self, for_write: bool) -> DbResult<()> { let spanner = &self.conn; let mut options = TransactionOptions::new(); if for_write { @@ -349,7 +349,7 @@ impl SpannerDb { } /// Return the current transaction metadata (TransactionSelector) if one is active. - fn get_transaction(&self) -> Result> { + fn get_transaction(&self) -> DbResult> { if self.session.borrow().transaction.is_none() { self.begin(true)?; } @@ -358,7 +358,7 @@ impl SpannerDb { } /// Return the current transaction metadata (TransactionSelector) if one is active. - async fn get_transaction_async(&self) -> Result> { + async fn get_transaction_async(&self) -> DbResult> { if self.session.borrow().transaction.is_none() { self.begin_async(true).await?; } @@ -366,7 +366,7 @@ impl SpannerDb { Ok(self.session.borrow().transaction.clone()) } - fn sql_request(&self, sql: &str) -> Result { + fn sql_request(&self, sql: &str) -> DbResult { let mut sqlr = ExecuteSqlRequest::new(); sqlr.set_sql(sql.to_owned()); if let Some(transaction) = self.get_transaction()? { @@ -375,16 +375,17 @@ impl SpannerDb { sqlr.seqno = session .execute_sql_count .try_into() - .map_err(|_| DbError::internal("seqno overflow"))?; + .map_err(|_| DbError::internal("seqno overflow".to_owned()))?; session.execute_sql_count += 1; } Ok(sqlr) } - pub(super) fn sql(&self, sql: &str) -> Result { + pub(super) fn sql(&self, sql: &str) -> DbResult { Ok(ExecuteSqlRequestBuilder::new(self.sql_request(sql)?)) } + #[allow(unused)] pub(super) fn insert(&self, table: &str, columns: &[&str], values: Vec) { let mut mutation = Mutation::new(); mutation.set_insert(self.mutation_write(table, columns, values)); @@ -395,6 +396,7 @@ impl SpannerDb { .push(mutation); } + #[allow(unused)] pub(super) fn update(&self, table: &str, columns: &[&str], values: Vec) { let mut mutation = Mutation::new(); mutation.set_update(self.mutation_write(table, columns, values)); @@ -435,7 +437,7 @@ impl SpannerDb { self.session.borrow().in_write_transaction } - pub fn commit(&self) -> Result<()> { + pub fn commit_sync(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -458,11 +460,11 @@ impl SpannerDb { spanner.client.commit(&req)?; Ok(()) } else { - Err(DbError::internal("No transaction to commit"))? + Err(DbError::internal("No transaction to commit".to_owned())) } } - pub async fn commit_async(&self) -> Result<()> { + async fn commit_async(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -485,11 +487,11 @@ impl SpannerDb { spanner.client.commit_async(&req)?.await?; Ok(()) } else { - Err(DbError::internal("No transaction to commit"))? + Err(DbError::internal("No transaction to commit".to_owned())) } } - pub fn rollback(&self) -> Result<()> { + pub fn rollback_sync(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -503,11 +505,11 @@ impl SpannerDb { spanner.client.rollback(&req)?; Ok(()) } else { - Err(DbError::internal("No transaction to rollback"))? + Err(DbError::internal("No transaction to rollback".to_owned())) } } - pub async fn rollback_async(&self) -> Result<()> { + async fn rollback_async(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -521,14 +523,14 @@ impl SpannerDb { spanner.client.rollback_async(&req)?.await?; Ok(()) } else { - Err(DbError::internal("No transaction to rollback"))? + Err(DbError::internal("No transaction to rollback".to_owned())) } } - pub async fn get_collection_timestamp_async( + async fn get_collection_timestamp_async( &self, params: params::GetCollectionTimestamp, - ) -> Result { + ) -> DbResult { let collection_id = self.get_collection_id_async(¶ms.collection).await?; if let Some(modified) = self .session @@ -560,15 +562,15 @@ impl SpannerDb { .execute_async(&self.conn)? .one_or_none() .await? - .ok_or(DbErrorKind::CollectionNotFound)?; - let modified = SyncTimestamp::from_rfc3339(result[0].get_string_value())?; + .ok_or_else(DbError::collection_not_found)?; + let modified = sync_timestamp_from_rfc3339(result[0].get_string_value())?; Ok(modified) } - pub async fn get_collection_timestamps_async( + async fn get_collection_timestamps_async( &self, user_id: params::GetCollectionTimestamps, - ) -> Result { + ) -> DbResult { let (sqlparams, mut sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid, @@ -594,14 +596,17 @@ impl SpannerDb { let collection_id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; - let modified = SyncTimestamp::from_rfc3339(row[1].get_string_value())?; + .map_err(|e| DbError::integrity(e.to_string()))?; + let modified = sync_timestamp_from_rfc3339(row[1].get_string_value())?; results.insert(collection_id, modified); } self.map_collection_names(results).await } - async fn map_collection_names(&self, by_id: HashMap) -> Result> { + async fn map_collection_names( + &self, + by_id: HashMap, + ) -> DbResult> { let mut names = self.load_collection_names(by_id.keys()).await?; by_id .into_iter() @@ -609,7 +614,7 @@ impl SpannerDb { names .remove(&id) .map(|name| (name, value)) - .ok_or_else(|| DbError::internal("load_collection_names get")) + .ok_or_else(|| DbError::internal("load_collection_names get".to_owned())) }) .collect() } @@ -617,7 +622,7 @@ impl SpannerDb { async fn load_collection_names( &self, collection_ids: impl Iterator, - ) -> Result> { + ) -> DbResult> { let (mut names, uncached) = self .coll_cache .get_names(&collection_ids.cloned().collect::>()) @@ -646,7 +651,7 @@ impl SpannerDb { let id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let name = row[1].take_string_value(); names.insert(id, name.clone()); if !self.in_write_transaction() { @@ -658,10 +663,10 @@ impl SpannerDb { Ok(names) } - pub async fn get_collection_counts_async( + async fn get_collection_counts_async( &self, user_id: params::GetCollectionCounts, - ) -> Result { + ) -> DbResult { let (sqlparams, sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid, @@ -684,20 +689,20 @@ impl SpannerDb { let collection_id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let count = row[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; counts.insert(collection_id, count); } self.map_collection_names(counts).await } - pub async fn get_collection_usage_async( + async fn get_collection_usage_async( &self, user_id: params::GetCollectionUsage, - ) -> Result { + ) -> DbResult { let (sqlparams, sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid @@ -720,20 +725,20 @@ impl SpannerDb { let collection_id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let usage = row[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; usages.insert(collection_id, usage); } self.map_collection_names(usages).await } - pub async fn get_storage_timestamp( + async fn get_storage_timestamp( &self, user_id: params::GetStorageTimestamp, - ) -> Result { + ) -> DbResult { let (sqlparams, mut sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid, @@ -754,16 +759,17 @@ impl SpannerDb { .one() .await?; if row[0].has_null_value() { - SyncTimestamp::from_i64(0) + SyncTimestamp::from_i64(0).map_err(|e| DbError::integrity(e.to_string())) } else { - SyncTimestamp::from_rfc3339(row[0].get_string_value()) + sync_timestamp_from_rfc3339(row[0].get_string_value()) } + .map_err(Into::into) } - pub async fn get_storage_usage_async( + async fn get_storage_usage_async( &self, user_id: params::GetStorageUsage, - ) -> Result { + ) -> DbResult { let (sqlparams, sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid @@ -786,17 +792,17 @@ impl SpannerDb { let usage = result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; Ok(usage as u64) } else { Ok(0) } } - pub async fn get_quota_usage_async( + async fn get_quota_usage_async( &self, params: params::GetQuotaUsage, - ) -> Result { + ) -> DbResult { if !self.quota.enabled { return Ok(results::GetQuotaUsage::default()); } @@ -822,31 +828,31 @@ impl SpannerDb { result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))? + .map_err(|e| DbError::integrity(e.to_string()))? } else { 0 }; let count = result[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; Ok(results::GetQuotaUsage { total_bytes, count }) } else { Ok(results::GetQuotaUsage::default()) } } - pub async fn update_user_collection_quotas( + pub(super) async fn update_user_collection_quotas( &self, user: &UserIdentifier, collection_id: i32, - ) -> Result { + ) -> DbResult { // This will also update the counts in user_collections, since `update_collection_sync` // is called very early to ensure the record exists, and return the timestamp. // This will also write the tombstone if there are no records and we're explicitly // specifying a TOMBSTONE collection_id. // This function should be called after any write operation. - let timestamp = self.timestamp()?; + let timestamp = self.checked_timestamp()?; let (mut sqlparams, mut sqltypes) = params! { "fxa_uid" => user.fxa_uid.clone(), "fxa_kid" => user.fxa_kid.clone(), @@ -903,12 +909,9 @@ impl SpannerDb { ); sqltypes.insert( "total_bytes".to_owned(), - crate::db::spanner::support::as_type(TypeCode::INT64), - ); - sqltypes.insert( - "count".to_owned(), - crate::db::spanner::support::as_type(TypeCode::INT64), + super::support::as_type(TypeCode::INT64), ); + sqltypes.insert("count".to_owned(), super::support::as_type(TypeCode::INT64)); "UPDATE user_collections SET modified = @modified, count = @count, @@ -965,13 +968,13 @@ impl SpannerDb { Ok(timestamp) } - async fn erect_tombstone(&self, user_id: &UserIdentifier) -> Result { + async fn erect_tombstone(&self, user_id: &UserIdentifier) -> DbResult { // Delete the old tombstone (if it exists) let (params, mut param_types) = params! { "fxa_uid" => user_id.fxa_uid.clone(), "fxa_kid" => user_id.fxa_kid.clone(), "collection_id" => TOMBSTONE, - "modified" => self.timestamp()?.as_rfc3339()? + "modified" => self.checked_timestamp()?.as_rfc3339()? }; param_types.insert("modified".to_owned(), as_type(TypeCode::TIMESTAMP)); self.sql( @@ -988,10 +991,10 @@ impl SpannerDb { .await?; // Return timestamp, because sometimes there's a delay between writing and // reading the database. - self.timestamp() + self.checked_timestamp() } - pub async fn delete_storage_async(&self, user_id: params::DeleteStorage) -> Result<()> { + async fn delete_storage_async(&self, user_id: params::DeleteStorage) -> DbResult<()> { // Also deletes child bsos/batch rows (INTERLEAVE IN PARENT // user_collections ON DELETE CASCADE) let (sqlparams, sqlparam_types) = params! { @@ -1010,17 +1013,17 @@ impl SpannerDb { Ok(()) } - pub fn timestamp(&self) -> Result { + pub fn checked_timestamp(&self) -> DbResult { self.session .borrow() .timestamp - .ok_or_else(|| DbError::internal("CURRENT_TIMESTAMP() not read yet")) + .ok_or_else(|| DbError::internal("CURRENT_TIMESTAMP() not read yet".to_owned())) } - pub async fn delete_collection_async( + async fn delete_collection_async( &self, params: params::DeleteCollection, - ) -> Result { + ) -> DbResult { // Also deletes child bsos/batch rows (INTERLEAVE IN PARENT // user_collections ON DELETE CASCADE) let collection_id = self.get_collection_id_async(¶ms.collection).await?; @@ -1059,7 +1062,7 @@ impl SpannerDb { user_id: &UserIdentifier, collection_id: i32, collection: &str, - ) -> Result { + ) -> DbResult { // NOTE: Spanner supports upserts via its InsertOrUpdate mutation but // lacks a SQL equivalent. This call could be 1 InsertOrUpdate instead // of 2 queries but would require put/post_bsos to also use mutations. @@ -1069,7 +1072,7 @@ impl SpannerDb { // Mutations don't run in the same order as ExecuteSql calls, they are // buffered on the client side and only issued to Spanner in the final // transaction Commit. - let timestamp = self.timestamp()?; + let timestamp = self.checked_timestamp()?; if !cfg!(test) && self.session.borrow().updated_collection { // No need to touch it again (except during tests where we // currently reuse Dbs for multiple requests) @@ -1130,7 +1133,7 @@ impl SpannerDb { Ok(timestamp) } - pub async fn delete_bso_async(&self, params: params::DeleteBso) -> Result { + async fn delete_bso_async(&self, params: params::DeleteBso) -> DbResult { let collection_id = self.get_collection_id_async(¶ms.collection).await?; let user_id = params.user_id.clone(); let (sqlparams, sqlparam_types) = params! { @@ -1152,7 +1155,7 @@ impl SpannerDb { .execute_dml_async(&self.conn) .await?; if affected_rows == 0 { - Err(DbErrorKind::BsoNotFound)? + Err(DbError::bso_not_found()) } else { self.metrics.incr("storage.spanner.delete_bso"); Ok(self @@ -1161,10 +1164,7 @@ impl SpannerDb { } } - pub async fn delete_bsos_async( - &self, - params: params::DeleteBsos, - ) -> Result { + async fn delete_bsos_async(&self, params: params::DeleteBsos) -> DbResult { let user_id = params.user_id.clone(); let collection_id = self.get_collection_id_async(¶ms.collection).await?; @@ -1197,7 +1197,7 @@ impl SpannerDb { &self, query_str: &str, params: params::GetBsos, - ) -> Result { + ) -> DbResult { let mut query = query_str.to_owned(); let (mut sqlparams, mut sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid, @@ -1353,7 +1353,7 @@ impl SpannerDb { */ } - pub async fn get_bsos_async(&self, params: params::GetBsos) -> Result { + async fn get_bsos_async(&self, params: params::GetBsos) -> DbResult { let query = "\ SELECT bso_id, sortindex, payload, modified, expiry FROM bsos @@ -1393,7 +1393,7 @@ impl SpannerDb { }) } - pub async fn get_bso_ids_async(&self, params: params::GetBsos) -> Result { + async fn get_bso_ids_async(&self, params: params::GetBsos) -> DbResult { let limit = params.limit.map(i64::from).unwrap_or(-1); let params::Offset { offset, timestamp } = params.offset.clone().unwrap_or_default(); let sort = params.sort; @@ -1412,7 +1412,7 @@ impl SpannerDb { while let Some(row) = stream.next_async().await { let mut row = row?; ids.push(row[0].take_string_value()); - modifieds.push(SyncTimestamp::from_rfc3339(row[1].get_string_value())?.as_i64()); + modifieds.push(sync_timestamp_from_rfc3339(row[1].get_string_value())?.as_i64()); } // NOTE: when bsos.len() == 0, server-syncstorage (the Python impl) // makes an additional call to get_collection_timestamp to potentially @@ -1435,7 +1435,7 @@ impl SpannerDb { }) } - pub async fn get_bso_async(&self, params: params::GetBso) -> Result> { + async fn get_bso_async(&self, params: params::GetBso) -> DbResult> { let collection_id = self.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid, @@ -1461,10 +1461,10 @@ impl SpannerDb { .transpose() } - pub async fn get_bso_timestamp_async( + async fn get_bso_timestamp_async( &self, params: params::GetBsoTimestamp, - ) -> Result { + ) -> DbResult { let collection_id = self.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid, @@ -1489,13 +1489,15 @@ impl SpannerDb { .one_or_none() .await?; if let Some(result) = result { - SyncTimestamp::from_rfc3339(result[0].get_string_value()) + sync_timestamp_from_rfc3339(result[0].get_string_value()) } else { - SyncTimestamp::from_i64(0) + SyncTimestamp::from_i64(0).map_err(|e| DbError::integrity(e.to_string())) } + .map_err(Into::into) } - pub async fn put_bso_async(&self, params: params::PutBso) -> Result { + #[allow(unused)] + async fn put_bso_async(&self, params: params::PutBso) -> DbResult { let bsos = vec![params::PostCollectionBso { id: params.id, sortindex: params.sortindex, @@ -1515,7 +1517,7 @@ impl SpannerDb { Ok(result.modified) } - pub async fn post_bsos_async(&self, params: params::PostBsos) -> Result { + async fn post_bsos_async(&self, params: params::PostBsos) -> DbResult { let user_id = params.user_id; let collection_id = self .get_or_create_collection_id_async(¶ms.collection) @@ -1581,11 +1583,10 @@ impl SpannerDb { "⚠️Attempted to load too much data into Spanner: {:?} bytes", load_size ); - return Err(DbErrorKind::SpannerTooLarge(format!( + return Err(DbError::too_large(format!( "Committed data too large: {}", load_size - )) - .into()); + ))); } if !inserts.is_empty() { @@ -1621,7 +1622,7 @@ impl SpannerDb { Ok(result) } - async fn check_async(&self) -> Result { + async fn check_async(&self) -> DbResult { // TODO: is there a better check than just fetching UTC? self.sql("SELECT CURRENT_TIMESTAMP()")? .execute_async(&self.conn)? @@ -1635,15 +1636,15 @@ impl SpannerDb { let mut tags = HashMap::default(); tags.insert("collection".to_owned(), collection.to_owned()); self.metrics.incr_with_tags("storage.quota.at_limit", tags); - DbErrorKind::Quota.into() + DbError::quota() } - pub async fn check_quota( + pub(super) async fn check_quota( &self, user_id: &UserIdentifier, collection: &str, collection_id: i32, - ) -> Result> { + ) -> DbResult> { // duplicate quota trap in test func below. if !self.quota.enabled { return Ok(None); @@ -1667,8 +1668,9 @@ impl SpannerDb { // NOTE: Currently this put_bso_async_test impl. is only used during db tests, // see above for the non-tests version - pub async fn put_bso_async_test(&self, bso: params::PutBso) -> Result { - use syncserver_db_common::util::to_rfc3339; + #[allow(unused)] + async fn put_bso_async_test(&self, bso: params::PutBso) -> DbResult { + use syncstorage_db_common::util::to_rfc3339; let collection_id = self .get_or_create_collection_id_async(&bso.collection) .await?; @@ -1685,7 +1687,7 @@ impl SpannerDb { // prewarm the collections table by ensuring that the row is added if not present. self.update_collection_async(&bso.user_id, collection_id, &bso.collection) .await?; - let timestamp = self.timestamp()?; + let timestamp = self.checked_timestamp()?; let result = self .sql( @@ -1841,12 +1843,13 @@ impl SpannerDb { // NOTE: Currently this post_bso_async_test impl. is only used during db tests, // see above for the non-tests version - pub async fn post_bsos_async_test(&self, input: params::PostBsos) -> Result { + #[allow(unused)] + async fn post_bsos_async_test(&self, input: params::PostBsos) -> DbResult { let collection_id = self .get_or_create_collection_id_async(&input.collection) .await?; let mut result = results::PostBsos { - modified: self.timestamp()?, + modified: self.checked_timestamp()?, success: Default::default(), failed: input.failed, }; @@ -1870,28 +1873,30 @@ impl SpannerDb { } } -impl<'a> Db<'a> for SpannerDb { - fn commit(&self) -> DbFuture<'_, ()> { +impl Db for SpannerDb { + type Error = DbError; + + fn commit(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.commit_async().map_err(Into::into).await }) } - fn rollback(&self) -> DbFuture<'_, ()> { + fn rollback(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.rollback_async().map_err(Into::into).await }) } - fn lock_for_read(&self, param: params::LockCollection) -> DbFuture<'_, ()> { + fn lock_for_read(&self, param: params::LockCollection) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.lock_for_read_async(param).map_err(Into::into).await }) } - fn lock_for_write(&self, param: params::LockCollection) -> DbFuture<'_, ()> { + fn lock_for_write(&self, param: params::LockCollection) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.lock_for_write_async(param).map_err(Into::into).await }) } - fn begin(&self, for_write: bool) -> DbFuture<'_, ()> { + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.begin_async(for_write).map_err(Into::into).await }) } @@ -1899,7 +1904,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_timestamp( &self, param: params::GetCollectionTimestamp, - ) -> DbFuture<'_, results::GetCollectionTimestamp> { + ) -> DbFuture<'_, results::GetCollectionTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_timestamp_async(param) @@ -1911,7 +1916,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_storage_timestamp( &self, param: params::GetStorageTimestamp, - ) -> DbFuture<'_, results::GetStorageTimestamp> { + ) -> DbFuture<'_, results::GetStorageTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_storage_timestamp(param).map_err(Into::into).await }) } @@ -1919,16 +1924,12 @@ impl<'a> Db<'a> for SpannerDb { fn delete_collection( &self, param: params::DeleteCollection, - ) -> DbFuture<'_, results::DeleteCollection> { + ) -> DbFuture<'_, results::DeleteCollection, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_collection_async(param).map_err(Into::into).await }) } - fn box_clone(&self) -> Box> { - Box::new(self.clone()) - } - - fn check(&self) -> DbFuture<'_, results::Check> { + fn check(&self) -> DbFuture<'_, results::Check, Self::Error> { let db = self.clone(); Box::pin(async move { db.check_async().map_err(Into::into).await }) } @@ -1936,7 +1937,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_timestamps( &self, user_id: params::GetCollectionTimestamps, - ) -> DbFuture<'_, results::GetCollectionTimestamps> { + ) -> DbFuture<'_, results::GetCollectionTimestamps, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_timestamps_async(user_id) @@ -1948,7 +1949,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_counts( &self, user_id: params::GetCollectionCounts, - ) -> DbFuture<'_, results::GetCollectionCounts> { + ) -> DbFuture<'_, results::GetCollectionCounts, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_counts_async(user_id) @@ -1960,7 +1961,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_usage( &self, user_id: params::GetCollectionUsage, - ) -> DbFuture<'_, results::GetCollectionUsage> { + ) -> DbFuture<'_, results::GetCollectionUsage, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_usage_async(user_id) @@ -1972,7 +1973,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_storage_usage( &self, param: params::GetStorageUsage, - ) -> DbFuture<'_, results::GetStorageUsage> { + ) -> DbFuture<'_, results::GetStorageUsage, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_storage_usage_async(param).map_err(Into::into).await }) } @@ -1980,37 +1981,49 @@ impl<'a> Db<'a> for SpannerDb { fn get_quota_usage( &self, param: params::GetQuotaUsage, - ) -> DbFuture<'_, results::GetQuotaUsage> { + ) -> DbFuture<'_, results::GetQuotaUsage, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_quota_usage_async(param).map_err(Into::into).await }) } - fn delete_storage(&self, param: params::DeleteStorage) -> DbFuture<'_, results::DeleteStorage> { + fn delete_storage( + &self, + param: params::DeleteStorage, + ) -> DbFuture<'_, results::DeleteStorage, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_storage_async(param).map_err(Into::into).await }) } - fn delete_bso(&self, param: params::DeleteBso) -> DbFuture<'_, results::DeleteBso> { + fn delete_bso( + &self, + param: params::DeleteBso, + ) -> DbFuture<'_, results::DeleteBso, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_bso_async(param).map_err(Into::into).await }) } - fn delete_bsos(&self, param: params::DeleteBsos) -> DbFuture<'_, results::DeleteBsos> { + fn delete_bsos( + &self, + param: params::DeleteBsos, + ) -> DbFuture<'_, results::DeleteBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_bsos_async(param).map_err(Into::into).await }) } - fn get_bsos(&self, param: params::GetBsos) -> DbFuture<'_, results::GetBsos> { + fn get_bsos(&self, param: params::GetBsos) -> DbFuture<'_, results::GetBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bsos_async(param).map_err(Into::into).await }) } - fn get_bso_ids(&self, param: params::GetBsoIds) -> DbFuture<'_, results::GetBsoIds> { + fn get_bso_ids( + &self, + param: params::GetBsoIds, + ) -> DbFuture<'_, results::GetBsoIds, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bso_ids_async(param).map_err(Into::into).await }) } - fn get_bso(&self, param: params::GetBso) -> DbFuture<'_, Option> { + fn get_bso(&self, param: params::GetBso) -> DbFuture<'_, Option, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bso_async(param).map_err(Into::into).await }) } @@ -2018,41 +2031,47 @@ impl<'a> Db<'a> for SpannerDb { fn get_bso_timestamp( &self, param: params::GetBsoTimestamp, - ) -> DbFuture<'_, results::GetBsoTimestamp> { + ) -> DbFuture<'_, results::GetBsoTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bso_timestamp_async(param).map_err(Into::into).await }) } #[cfg(not(test))] - fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso> { + fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso, Self::Error> { let db = self.clone(); Box::pin(async move { db.put_bso_async(param).map_err(Into::into).await }) } #[cfg(test)] - fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso> { + fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso, Self::Error> { let db = self.clone(); Box::pin(async move { db.put_bso_async_test(param).map_err(Into::into).await }) } #[cfg(not(test))] - fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos> { + fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.post_bsos_async(param).map_err(Into::into).await }) } #[cfg(test)] - fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos> { + fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.post_bsos_async_test(param).map_err(Into::into).await }) } - fn create_batch(&self, param: params::CreateBatch) -> DbFuture<'_, results::CreateBatch> { + fn create_batch( + &self, + param: params::CreateBatch, + ) -> DbFuture<'_, results::CreateBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::create_async(&db, param).map_err(Into::into).await }) } - fn validate_batch(&self, param: params::ValidateBatch) -> DbFuture<'_, results::ValidateBatch> { + fn validate_batch( + &self, + param: params::ValidateBatch, + ) -> DbFuture<'_, results::ValidateBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::validate_async(&db, param).map_err(Into::into).await }) } @@ -2060,29 +2079,35 @@ impl<'a> Db<'a> for SpannerDb { fn append_to_batch( &self, param: params::AppendToBatch, - ) -> DbFuture<'_, results::AppendToBatch> { + ) -> DbFuture<'_, results::AppendToBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::append_async(&db, param).map_err(Into::into).await }) } - fn get_batch(&self, param: params::GetBatch) -> DbFuture<'_, Option> { + fn get_batch( + &self, + param: params::GetBatch, + ) -> DbFuture<'_, Option, Self::Error> { let db = self.clone(); Box::pin(async move { batch::get_async(&db, param).map_err(Into::into).await }) } - fn commit_batch(&self, param: params::CommitBatch) -> DbFuture<'_, results::CommitBatch> { + fn commit_batch( + &self, + param: params::CommitBatch, + ) -> DbFuture<'_, results::CommitBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::commit_async(&db, param).map_err(Into::into).await }) } - fn get_collection_id(&self, name: String) -> DbFuture<'_, i32> { + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_id_async(&name).map_err(Into::into).await }) } fn get_connection_info(&self) -> results::ConnectionInfo { let session = self.conn.session.clone(); - let now = now(); + let now = super::now(); results::ConnectionInfo { spanner_age: session .create_time @@ -2098,12 +2123,15 @@ impl<'a> Db<'a> for SpannerDb { } } - fn create_collection(&self, name: String) -> DbFuture<'_, i32> { + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin(async move { db.create_collection_async(&name).map_err(Into::into).await }) } - fn update_collection(&self, param: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp> { + fn update_collection( + &self, + param: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.update_collection_async(¶m.user_id, param.collection_id, ¶m.collection) @@ -2113,7 +2141,7 @@ impl<'a> Db<'a> for SpannerDb { } fn timestamp(&self) -> SyncTimestamp { - self.timestamp() + self.checked_timestamp() .expect("set_timestamp() not called yet for SpannerDb") } @@ -2121,12 +2149,15 @@ impl<'a> Db<'a> for SpannerDb { SpannerDb::set_timestamp(self, timestamp) } - fn delete_batch(&self, param: params::DeleteBatch) -> DbFuture<'_, results::DeleteBatch> { + fn delete_batch( + &self, + param: params::DeleteBatch, + ) -> DbFuture<'_, results::DeleteBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::delete_async(&db, param).map_err(Into::into).await }) } - fn clear_coll_cache(&self) -> DbFuture<'_, ()> { + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.coll_cache.clear().await; @@ -2141,4 +2172,12 @@ impl<'a> Db<'a> for SpannerDb { enforced, }; } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } +} + +fn sync_timestamp_from_rfc3339(val: &str) -> Result { + SyncTimestamp::from_rfc3339(val).map_err(|e| DbError::integrity(e.to_string())) } diff --git a/syncserver/src/db/spanner/pool.rs b/syncstorage-spanner/src/pool.rs similarity index 75% rename from syncserver/src/db/spanner/pool.rs rename to syncstorage-spanner/src/pool.rs index f1d474d6..35de15dd 100644 --- a/syncserver/src/db/spanner/pool.rs +++ b/syncstorage-spanner/src/pool.rs @@ -1,32 +1,20 @@ use std::{collections::HashMap, fmt, sync::Arc, time::Duration}; use async_trait::async_trait; -use bb8::ErrorSink; -use syncserver_db_common::{error::DbError, Db, DbPool, GetPoolState, PoolState, STD_COLLS}; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{Db, DbPool, STD_COLLS}; use syncstorage_settings::{Quota, Settings}; use tokio::sync::RwLock; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; - -pub use super::manager::Conn; +pub(super) use super::manager::Conn; use super::{ + error::DbError, manager::{SpannerSession, SpannerSessionManager}, - models::Result, models::SpannerDb, + DbResult, }; -embed_migrations!(); - -/// Run the diesel embedded migrations -/// -/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's -/// begin_test_transaction during tests. So this runs on its own separate conn. -//pub fn run_embedded_migrations(settings: &Settings) -> Result<()> { -// let conn = MysqlConnection::establish(&settings.database_url)?; -// Ok(embedded_migrations::run(&conn)?) -//} - #[derive(Clone)] pub struct SpannerDbPool { /// Pool of db connections @@ -40,20 +28,20 @@ pub struct SpannerDbPool { impl SpannerDbPool { /// Creates a new pool of Spanner db connections. - pub async fn new( + pub fn new( settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { //run_embedded_migrations(settings)?; - Self::new_without_migrations(settings, metrics, blocking_threadpool).await + Self::new_without_migrations(settings, metrics, blocking_threadpool) } - pub async fn new_without_migrations( + pub fn new_without_migrations( settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { let max_size = settings.database_pool_max_size as usize; let wait = settings .database_pool_connection_timeout @@ -78,11 +66,11 @@ impl SpannerDbPool { }) } - pub async fn get_async(&self) -> Result { + pub async fn get_async(&self) -> DbResult { let conn = self.pool.get().await.map_err(|e| match e { deadpool::managed::PoolError::Backend(dbe) => dbe, deadpool::managed::PoolError::Timeout(timeout_type) => { - DbError::internal(&format!("deadpool Timeout: {:?}", timeout_type)) + DbError::internal(format!("deadpool Timeout: {:?}", timeout_type)) } })?; Ok(SpannerDb::new( @@ -96,21 +84,23 @@ impl SpannerDbPool { #[async_trait] impl DbPool for SpannerDbPool { - async fn get<'a>(&'a self) -> Result>> { + type Error = DbError; + + async fn get(&self) -> DbResult>> { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.spanner.get_pool", None); self.get_async() .await - .map(|db| Box::new(db) as Box>) + .map(|db| Box::new(db) as Box>) .map_err(Into::into) } - fn validate_batch_id(&self, id: String) -> Result<()> { - super::batch::validate_batch_id(&id) + fn validate_batch_id(&self, id: String) -> DbResult<()> { + super::batch::validate_batch_id(&id).map_err(Into::into) } - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } } @@ -130,7 +120,7 @@ impl fmt::Debug for SpannerDbPool { } #[derive(Debug)] -pub struct CollectionCache { +pub(super) struct CollectionCache { pub by_name: RwLock>, pub by_id: RwLock>, } @@ -194,19 +184,3 @@ impl Default for CollectionCache { } } } - -/// Logs internal bb8 errors -#[derive(Debug, Clone, Copy)] -pub struct LoggingErrorSink; - -impl ErrorSink for LoggingErrorSink { - fn sink(&self, e: E) { - error!("bb8 Error: {}", e); - let event = sentry::event_from_error(&e); - sentry::capture_event(event); - } - - fn boxed_clone(&self) -> Box> { - Box::new(*self) - } -} diff --git a/syncserver/src/db/spanner/schema.ddl b/syncstorage-spanner/src/schema.ddl similarity index 100% rename from syncserver/src/db/spanner/schema.ddl rename to syncstorage-spanner/src/schema.ddl diff --git a/syncserver/src/db/spanner/support.rs b/syncstorage-spanner/src/support.rs similarity index 87% rename from syncserver/src/db/spanner/support.rs rename to syncstorage-spanner/src/support.rs index 5abeddcc..3fac678f 100644 --- a/syncserver/src/db/spanner/support.rs +++ b/syncstorage-spanner/src/support.rs @@ -1,7 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, mem, - result::Result as StdResult, }; use futures::stream::{StreamExt, StreamFuture}; @@ -15,15 +14,11 @@ use protobuf::{ well_known_types::{ListValue, NullValue, Struct, Value}, RepeatedField, }; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::to_rfc3339, - util::SyncTimestamp, - UserIdentifier, DEFAULT_BSO_TTL, +use syncstorage_db_common::{ + params, results, util::to_rfc3339, util::SyncTimestamp, UserIdentifier, DEFAULT_BSO_TTL, }; -use super::{models::Result, pool::Conn}; +use super::{error::DbError, pool::Conn, DbResult}; pub trait IntoSpannerValue { const TYPE_CODE: TypeCode; @@ -169,7 +164,7 @@ impl ExecuteSqlRequestBuilder { } /// Execute a SQL read statement but return a non-blocking streaming result - pub fn execute_async(self, conn: &Conn) -> Result { + pub fn execute_async(self, conn: &Conn) -> DbResult { let stream = conn .client .execute_streaming_sql(&self.prepare_request(conn))?; @@ -177,7 +172,7 @@ impl ExecuteSqlRequestBuilder { } /// Execute a DML statement, returning the exact count of modified rows - pub async fn execute_dml_async(self, conn: &Conn) -> Result { + pub async fn execute_dml_async(self, conn: &Conn) -> DbResult { let rs = conn .client .execute_sql_async(&self.prepare_request(conn))? @@ -230,20 +225,24 @@ impl StreamedResultSetAsync { } } - pub async fn one(&mut self) -> Result> { + pub async fn one(&mut self) -> DbResult> { if let Some(result) = self.one_or_none().await? { Ok(result) } else { - Err(DbError::internal("No rows matched the given query."))? + Err(DbError::internal( + "No rows matched the given query.".to_owned(), + )) } } - pub async fn one_or_none(&mut self) -> Result>> { + pub async fn one_or_none(&mut self) -> DbResult>> { let result = self.next_async().await; if result.is_none() { Ok(None) } else if self.next_async().await.is_some() { - Err(DbError::internal("Expected one result; got more."))? + Err(DbError::internal( + "Expected one result; got more.".to_owned(), + )) } else { result.transpose() } @@ -252,7 +251,7 @@ impl StreamedResultSetAsync { /// Pull and process the next values from the Stream /// /// Returns false when the stream is finished - async fn consume_next(&mut self) -> Result { + async fn consume_next(&mut self) -> DbResult { let (result, stream) = self .stream .take() @@ -286,9 +285,9 @@ impl StreamedResultSetAsync { let fields = self.fields(); let current_row_i = self.current_row.len(); if fields.len() <= current_row_i { - Err(DbErrorKind::Integrity( + return Err(DbError::integrity( "Invalid PartialResultSet fields".to_owned(), - ))?; + )); } let field = &fields[current_row_i]; values[0] = merge_by_type(pending_chunk, &values[0], field.get_field_type())?; @@ -314,7 +313,7 @@ impl StreamedResultSetAsync { // We could implement Stream::poll_next instead of this, but // this is easier for now and we can refactor into the trait later - pub async fn next_async(&mut self) -> Option>> { + pub async fn next_async(&mut self) -> Option>> { while self.rows.is_empty() { match self.consume_next().await { Ok(true) => (), @@ -329,7 +328,7 @@ impl StreamedResultSetAsync { } } -fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> Result { +fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> DbResult { // We only support merging basic string types as that's all we currently use. // The python client also supports: float64, array, struct. The go client // only additionally supports array (claiming structs are only returned as @@ -344,25 +343,28 @@ fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> Result { } } -fn unsupported_merge(field_type: &Type) -> Result { - Err(DbError::internal(&format!( +fn unsupported_merge(field_type: &Type) -> DbResult { + Err(DbError::internal(format!( "merge not supported, type: {:?}", field_type ))) } -fn merge_string(mut lhs: Value, rhs: &Value) -> Result { +fn merge_string(mut lhs: Value, rhs: &Value) -> DbResult { if !lhs.has_string_value() || !rhs.has_string_value() { - Err(DbError::internal("merge_string has no string value"))? + return Err(DbError::internal( + "merge_string has no string value".to_owned(), + )); } let mut merged = lhs.take_string_value(); merged.push_str(rhs.get_string_value()); Ok(merged.into_spanner_value()) } -pub fn bso_from_row(mut row: Vec) -> Result { +pub fn bso_from_row(mut row: Vec) -> DbResult { let modified_string = &row[3].get_string_value(); - let modified = SyncTimestamp::from_rfc3339(modified_string)?; + let modified = SyncTimestamp::from_rfc3339(modified_string) + .map_err(|e| DbError::integrity(e.to_string()))?; Ok(results::GetBso { id: row[0].take_string_value(), sortindex: if row[1].has_null_value() { @@ -372,12 +374,14 @@ pub fn bso_from_row(mut row: Vec) -> Result { row[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?, + .map_err(|e| DbError::integrity(e.to_string()))?, ) }, payload: row[2].take_string_value(), modified, - expiry: SyncTimestamp::from_rfc3339(row[4].get_string_value())?.as_i64(), + expiry: SyncTimestamp::from_rfc3339(row[4].get_string_value()) + .map_err(|e| DbError::integrity(e.to_string()))? + .as_i64(), }) } @@ -386,7 +390,7 @@ pub fn bso_to_insert_row( collection_id: i32, bso: params::PostCollectionBso, now: SyncTimestamp, -) -> Result { +) -> DbResult { let sortindex = bso .sortindex .map(|sortindex| sortindex.into_spanner_value()) @@ -413,7 +417,7 @@ pub fn bso_to_update_row( collection_id: i32, bso: params::PostCollectionBso, now: SyncTimestamp, -) -> Result<(Vec<&'static str>, ListValue)> { +) -> DbResult<(Vec<&'static str>, ListValue)> { let mut columns = vec!["fxa_uid", "fxa_kid", "collection_id", "bso_id"]; let mut values = vec![ user_id.fxa_uid.clone().into_spanner_value(), @@ -454,10 +458,10 @@ pub struct MapAndThenIterator { impl Iterator for MapAndThenIterator where - F: FnMut(A) -> StdResult, - I: Iterator>, + F: FnMut(A) -> Result, + I: Iterator>, { - type Item = StdResult; + type Item = Result; fn next(&mut self) -> Option { self.iter.next().map(|result| result.and_then(&mut self.f)) @@ -466,13 +470,13 @@ where pub trait MapAndThenTrait { /// Return an iterator adaptor that applies the provided closure to every - /// Result::Ok value. Result::Err values are unchanged. + /// DbResult::Ok value. DbResult::Err values are unchanged. /// /// The closure can be used for control flow based on result values fn map_and_then(self, func: F) -> MapAndThenIterator where - Self: Sized + Iterator>, - F: FnMut(A) -> StdResult, + Self: Sized + Iterator>, + F: FnMut(A) -> Result, { MapAndThenIterator { iter: self, @@ -481,4 +485,4 @@ pub trait MapAndThenTrait { } } -impl MapAndThenTrait for I where I: Sized + Iterator> {} +impl MapAndThenTrait for I where I: Sized + Iterator> {} diff --git a/tokenserver-common/Cargo.toml b/tokenserver-common/Cargo.toml index 1847cf11..3f296c92 100644 --- a/tokenserver-common/Cargo.toml +++ b/tokenserver-common/Cargo.toml @@ -9,5 +9,4 @@ backtrace = "0.3.61" serde = "1.0" serde_json = { version = "1.0", features = ["arbitrary_precision"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } thiserror = "1.0.26" diff --git a/tokenserver-common/src/error.rs b/tokenserver-common/src/error.rs index cab5ce00..1f7dcf31 100644 --- a/tokenserver-common/src/error.rs +++ b/tokenserver-common/src/error.rs @@ -7,8 +7,9 @@ use serde::{ Serialize, }; use syncserver_common::{InternalError, ReportableError}; -use syncserver_db_common::error::DbError; +/// An error type that represents application-specific errors to Tokenserver. This error is not +/// used to represent database-related errors; database-related errors have their own type. #[derive(Clone, Debug)] pub struct TokenserverError { pub status: &'static str, @@ -249,25 +250,6 @@ impl Serialize for TokenserverError { } } -impl From for TokenserverError { - fn from(db_error: DbError) -> Self { - TokenserverError { - description: db_error.to_string(), - context: db_error.to_string(), - backtrace: Box::new(db_error.backtrace), - http_status: if db_error.status.is_server_error() { - // Use the status code from the DbError if it already suggests an internal error; - // it might be more specific than `StatusCode::SERVICE_UNAVAILABLE` - db_error.status - } else { - StatusCode::SERVICE_UNAVAILABLE - }, - // An unhandled DbError in the Tokenserver code is an internal error - ..TokenserverError::internal_error() - } - } -} - impl From for HttpResponse { fn from(inner: TokenserverError) -> Self { ResponseError::error_response(&inner) diff --git a/tokenserver-common/src/lib.rs b/tokenserver-common/src/lib.rs index ab76d983..c03ea329 100644 --- a/tokenserver-common/src/lib.rs +++ b/tokenserver-common/src/lib.rs @@ -1,7 +1,9 @@ -pub mod error; +mod error; use serde::{Deserialize, Serialize}; +pub use error::{ErrorLocation, TokenType, TokenserverError}; + #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum NodeType { #[serde(rename = "mysql")] diff --git a/tokenserver-db/Cargo.toml b/tokenserver-db/Cargo.toml new file mode 100644 index 00000000..cdfb24df --- /dev/null +++ b/tokenserver-db/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "tokenserver-db" +version = "0.12.3" +edition = "2021" + +[dependencies] +async-trait = "0.1.40" +backtrace = "0.3.61" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +futures = { version = "0.3", features = ["compat"] } +http = "0.2.5" +serde = "1.0" +serde_derive = "1.0" +serde_json = { version = "1.0", features = ["arbitrary_precision"] } +slog-scope = "4.3" +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-settings = { path = "../tokenserver-settings" } +# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) +tokio = { version = "0.2.4", features = ["macros", "sync"] } + +[dev-dependencies] +syncserver-settings = { path = "../syncserver-settings" } +env_logger = "0.9" diff --git a/syncserver/src/tokenserver/migrations/2021-07-16-001122_init/down.sql b/tokenserver-db/migrations/2021-07-16-001122_init/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-07-16-001122_init/down.sql rename to tokenserver-db/migrations/2021-07-16-001122_init/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-07-16-001122_init/up.sql b/tokenserver-db/migrations/2021-07-16-001122_init/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-07-16-001122_init/up.sql rename to tokenserver-db/migrations/2021-07-16-001122_init/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/down.sql b/tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/down.sql rename to tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/up.sql b/tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/up.sql rename to tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql b/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql rename to tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql b/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql rename to tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/down.sql b/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/down.sql rename to tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/up.sql b/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/up.sql rename to tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/down.sql b/tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/down.sql rename to tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/up.sql b/tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/up.sql rename to tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql b/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql rename to tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql b/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql rename to tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql b/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql rename to tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql b/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql rename to tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/down.sql b/tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/down.sql rename to tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/up.sql b/tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/up.sql rename to tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql diff --git a/tokenserver-db/src/error.rs b/tokenserver-db/src/error.rs new file mode 100644 index 00000000..a7e30a12 --- /dev/null +++ b/tokenserver-db/src/error.rs @@ -0,0 +1,104 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{from_error, impl_fmt_display, InternalError}; +use syncserver_db_common::error::MysqlError; +use thiserror::Error; +use tokenserver_common::TokenserverError; + +pub(crate) type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; +pub(crate) type DbResult = Result; + +/// An error type that represents any database-related errors that may occur while processing a +/// tokenserver request. +#[derive(Debug)] +pub struct DbError { + kind: DbErrorKind, + pub status: StatusCode, + pub backtrace: Box, +} + +impl DbError { + pub(crate) fn internal(msg: String) -> Self { + DbErrorKind::Internal(msg).into() + } +} + +#[derive(Debug, Error)] +enum DbErrorKind { + #[error("{}", _0)] + Mysql(MysqlError), + + #[error("Unexpected error: {}", _0)] + Internal(String), +} + +impl From for DbError { + fn from(kind: DbErrorKind) -> Self { + match kind { + DbErrorKind::Mysql(ref mysql_error) => Self { + status: mysql_error.status, + backtrace: Box::new(mysql_error.backtrace.clone()), + kind, + }, + DbErrorKind::Internal(_) => Self { + kind, + status: StatusCode::INTERNAL_SERVER_ERROR, + backtrace: Box::new(Backtrace::new()), + }, + } + } +} + +impl From for TokenserverError { + fn from(db_error: DbError) -> Self { + TokenserverError { + description: db_error.to_string(), + context: db_error.to_string(), + backtrace: db_error.backtrace, + http_status: if db_error.status.is_server_error() { + // Use the status code from the DbError if it already suggests an internal error; + // it might be more specific than `StatusCode::SERVICE_UNAVAILABLE` + db_error.status + } else { + StatusCode::SERVICE_UNAVAILABLE + }, + // An unhandled DbError in the Tokenserver code is an internal error + ..TokenserverError::internal_error() + } + } +} + +impl InternalError for DbError { + fn internal_error(message: String) -> Self { + DbErrorKind::Internal(message).into() + } +} + +impl_fmt_display!(DbError, DbErrorKind); + +from_error!( + diesel::result::Error, + DbError, + |error: diesel::result::Error| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel::result::ConnectionError, + DbError, + |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Mysql(MysqlError::from( + error + ))) +); +from_error!( + diesel::r2d2::PoolError, + DbError, + |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel_migrations::RunMigrationsError, + DbError, + |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Mysql( + MysqlError::from(error) + )) +); diff --git a/tokenserver-db/src/lib.rs b/tokenserver-db/src/lib.rs new file mode 100644 index 00000000..1b9f86c6 --- /dev/null +++ b/tokenserver-db/src/lib.rs @@ -0,0 +1,13 @@ +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; + +mod error; +pub mod mock; +mod models; +pub mod params; +mod pool; +pub mod results; + +pub use models::{Db, TokenserverDb}; +pub use pool::{DbPool, TokenserverPool}; diff --git a/syncserver/src/tokenserver/db/mock.rs b/tokenserver-db/src/mock.rs similarity index 97% rename from syncserver/src/tokenserver/db/mock.rs rename to tokenserver-db/src/mock.rs index 871e6a4e..29041091 100644 --- a/syncserver/src/tokenserver/db/mock.rs +++ b/tokenserver-db/src/mock.rs @@ -2,9 +2,10 @@ use async_trait::async_trait; use futures::future; -use syncserver_db_common::{error::DbError, GetPoolState, PoolState}; +use syncserver_db_common::{GetPoolState, PoolState}; -use super::models::{Db, DbFuture}; +use super::error::{DbError, DbFuture}; +use super::models::Db; use super::params; use super::pool::DbPool; use super::results; diff --git a/syncserver/src/tokenserver/db/models.rs b/tokenserver-db/src/models.rs similarity index 97% rename from syncserver/src/tokenserver/db/models.rs rename to tokenserver-db/src/models.rs index a94be926..f1cc51de 100644 --- a/syncserver/src/tokenserver/db/models.rs +++ b/tokenserver-db/src/models.rs @@ -1,4 +1,3 @@ -use actix_web::http::StatusCode; use diesel::{ mysql::MysqlConnection, r2d2::{ConnectionManager, PooledConnection}, @@ -7,25 +6,24 @@ use diesel::{ }; #[cfg(test)] use diesel_logger::LoggingConnection; -use futures::future::LocalBoxFuture; -use syncserver_db_common::error::DbError; +use http::StatusCode; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{sync_db_method, DbFuture}; use std::{ - result, sync::Arc, time::{SystemTime, UNIX_EPOCH}, }; -use super::{params, results}; -use crate::server::{metrics::Metrics, BlockingThreadpool}; -use crate::sync_db_method; +use super::{ + error::{DbError, DbResult}, + params, results, +}; /// The maximum possible generation number. Used as a tombstone to mark users that have been /// "retired" from the db. const MAX_GENERATION: i64 = i64::MAX; -pub type DbFuture<'a, T> = LocalBoxFuture<'a, Result>; -pub type DbResult = result::Result; type Conn = PooledConnection>; #[derive(Clone)] @@ -37,7 +35,7 @@ pub struct TokenserverDb { /// the thread pool but does not provide Send as the underlying db /// conn. structs are !Sync (Arc requires both for Send). See the Send impl /// below. - pub(super) inner: Arc, + inner: Arc, metrics: Metrics, service_id: Option, spanner_node_id: Option, @@ -49,7 +47,7 @@ pub struct TokenserverDb { /// queued to the thread pool via Futures, naturally serialized. unsafe impl Send for TokenserverDb {} -pub struct DbInner { +struct DbInner { #[cfg(not(test))] pub(super) conn: Conn, #[cfg(test)] @@ -255,7 +253,7 @@ impl TokenserverDb { .get_result::(&self.inner.conn) .map_err(|e| { let mut db_error = - DbError::internal(&format!("unable to get Spanner node: {}", e)); + DbError::internal(format!("unable to get Spanner node: {}", e)); db_error.status = StatusCode::SERVICE_UNAVAILABLE; db_error }) @@ -289,7 +287,7 @@ impl TokenserverDb { } } - let mut db_error = DbError::internal("unable to get a node"); + let mut db_error = DbError::internal("unable to get a node".to_owned()); db_error.status = StatusCode::SERVICE_UNAVAILABLE; Err(db_error) } @@ -463,7 +461,7 @@ impl TokenserverDb { // The most up-to-date user doesn't have a node and is retired. This is an internal // service error for compatibility reasons (the legacy Tokenserver returned an // internal service error in this situation). - (_, None) => Err(DbError::internal("Tokenserver user retired")), + (_, None) => Err(DbError::internal("Tokenserver user retired".to_owned())), } } } @@ -682,7 +680,7 @@ impl Db for TokenserverDb { #[cfg(test)] sync_db_method!(get_user, get_user_sync, GetUser); - fn check(&self) -> DbFuture<'_, results::Check> { + fn check(&self) -> DbFuture<'_, results::Check, DbError> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.check_sync())) } @@ -718,63 +716,82 @@ impl Db for TokenserverDb { } pub trait Db { - fn replace_user(&self, params: params::ReplaceUser) -> DbFuture<'_, results::ReplaceUser>; + fn replace_user( + &self, + params: params::ReplaceUser, + ) -> DbFuture<'_, results::ReplaceUser, DbError>; - fn replace_users(&self, params: params::ReplaceUsers) -> DbFuture<'_, results::ReplaceUsers>; + fn replace_users( + &self, + params: params::ReplaceUsers, + ) -> DbFuture<'_, results::ReplaceUsers, DbError>; - fn post_user(&self, params: params::PostUser) -> DbFuture<'_, results::PostUser>; + fn post_user(&self, params: params::PostUser) -> DbFuture<'_, results::PostUser, DbError>; - fn put_user(&self, params: params::PutUser) -> DbFuture<'_, results::PutUser>; + fn put_user(&self, params: params::PutUser) -> DbFuture<'_, results::PutUser, DbError>; - fn check(&self) -> DbFuture<'_, results::Check>; + fn check(&self) -> DbFuture<'_, results::Check, DbError>; - fn get_node_id(&self, params: params::GetNodeId) -> DbFuture<'_, results::GetNodeId>; + fn get_node_id(&self, params: params::GetNodeId) -> DbFuture<'_, results::GetNodeId, DbError>; - fn get_best_node(&self, params: params::GetBestNode) -> DbFuture<'_, results::GetBestNode>; + fn get_best_node( + &self, + params: params::GetBestNode, + ) -> DbFuture<'_, results::GetBestNode, DbError>; fn add_user_to_node( &self, params: params::AddUserToNode, - ) -> DbFuture<'_, results::AddUserToNode>; + ) -> DbFuture<'_, results::AddUserToNode, DbError>; - fn get_users(&self, params: params::GetUsers) -> DbFuture<'_, results::GetUsers>; + fn get_users(&self, params: params::GetUsers) -> DbFuture<'_, results::GetUsers, DbError>; fn get_or_create_user( &self, params: params::GetOrCreateUser, - ) -> DbFuture<'_, results::GetOrCreateUser>; + ) -> DbFuture<'_, results::GetOrCreateUser, DbError>; - fn get_service_id(&self, params: params::GetServiceId) -> DbFuture<'_, results::GetServiceId>; + fn get_service_id( + &self, + params: params::GetServiceId, + ) -> DbFuture<'_, results::GetServiceId, DbError>; #[cfg(test)] fn set_user_created_at( &self, params: params::SetUserCreatedAt, - ) -> DbFuture<'_, results::SetUserCreatedAt>; + ) -> DbFuture<'_, results::SetUserCreatedAt, DbError>; #[cfg(test)] fn set_user_replaced_at( &self, params: params::SetUserReplacedAt, - ) -> DbFuture<'_, results::SetUserReplacedAt>; + ) -> DbFuture<'_, results::SetUserReplacedAt, DbError>; #[cfg(test)] - fn get_user(&self, params: params::GetUser) -> DbFuture<'_, results::GetUser>; + fn get_user(&self, params: params::GetUser) -> DbFuture<'_, results::GetUser, DbError>; #[cfg(test)] - fn post_node(&self, params: params::PostNode) -> DbFuture<'_, results::PostNode>; + fn post_node(&self, params: params::PostNode) -> DbFuture<'_, results::PostNode, DbError>; #[cfg(test)] - fn get_node(&self, params: params::GetNode) -> DbFuture<'_, results::GetNode>; + fn get_node(&self, params: params::GetNode) -> DbFuture<'_, results::GetNode, DbError>; #[cfg(test)] - fn unassign_node(&self, params: params::UnassignNode) -> DbFuture<'_, results::UnassignNode>; + fn unassign_node( + &self, + params: params::UnassignNode, + ) -> DbFuture<'_, results::UnassignNode, DbError>; #[cfg(test)] - fn remove_node(&self, params: params::RemoveNode) -> DbFuture<'_, results::RemoveNode>; + fn remove_node(&self, params: params::RemoveNode) + -> DbFuture<'_, results::RemoveNode, DbError>; #[cfg(test)] - fn post_service(&self, params: params::PostService) -> DbFuture<'_, results::PostService>; + fn post_service( + &self, + params: params::PostService, + ) -> DbFuture<'_, results::PostService, DbError>; } #[cfg(test)] @@ -786,7 +803,7 @@ mod tests { use syncserver_settings::Settings; - use crate::tokenserver::db::pool::{DbPool, TokenserverPool}; + use crate::pool::{DbPool, TokenserverPool}; #[tokio::test] async fn test_update_generation() -> DbResult<()> { diff --git a/syncserver/src/tokenserver/db/params.rs b/tokenserver-db/src/params.rs similarity index 100% rename from syncserver/src/tokenserver/db/params.rs rename to tokenserver-db/src/params.rs diff --git a/syncserver/src/tokenserver/db/pool.rs b/tokenserver-db/src/pool.rs similarity index 90% rename from syncserver/src/tokenserver/db/pool.rs rename to tokenserver-db/src/pool.rs index cf8f2c3f..dd100abb 100644 --- a/syncserver/src/tokenserver/db/pool.rs +++ b/tokenserver-db/src/pool.rs @@ -4,25 +4,27 @@ use async_trait::async_trait; use diesel::{ mysql::MysqlConnection, r2d2::{ConnectionManager, Pool}, + Connection, }; use diesel_logger::LoggingConnection; -use syncserver_db_common::{error::DbError, GetPoolState, PoolState}; +use syncserver_common::{BlockingThreadpool, Metrics}; +#[cfg(debug_assertions)] +use syncserver_db_common::test::TestTransactionCustomizer; +use syncserver_db_common::{GetPoolState, PoolState}; use tokenserver_settings::Settings; -use super::models::{Db, DbResult, TokenserverDb}; -use crate::diesel::Connection; -use crate::server::{metrics::Metrics, BlockingThreadpool}; +use super::{ + error::{DbError, DbResult}, + models::{Db, TokenserverDb}, +}; -#[cfg(test)] -use crate::db::mysql::TestTransactionCustomizer; - -embed_migrations!("src/tokenserver/migrations"); +embed_migrations!(); /// Run the diesel embedded migrations /// /// Mysql DDL statements implicitly commit which could disrupt MysqlPool's /// begin_test_transaction during tests. So this runs on its own separate conn. -pub fn run_embedded_migrations(database_url: &str) -> DbResult<()> { +fn run_embedded_migrations(database_url: &str) -> DbResult<()> { let conn = MysqlConnection::establish(database_url)?; embedded_migrations::run(&LoggingConnection::new(conn))?; @@ -60,7 +62,7 @@ impl TokenserverPool { )) .min_idle(settings.database_pool_min_idle); - #[cfg(test)] + #[cfg(debug_assertions)] let builder = if _use_test_transactions { builder.connection_customizer(Box::new(TestTransactionCustomizer)) } else { diff --git a/syncserver/src/tokenserver/db/results.rs b/tokenserver-db/src/results.rs similarity index 100% rename from syncserver/src/tokenserver/db/results.rs rename to tokenserver-db/src/results.rs