diff --git a/.cargo/audit.toml b/.cargo/audit.toml index 08df02c4..b70afb22 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -7,4 +7,5 @@ ignore = [ "RUSTSEC-2020-0071", "RUSTSEC-2021-0124", "RUSTSEC-2023-0034", # Bound by actix-http 2.2, Reqwest 0.10 + "RUSTSEC-2023-0052", # Bound by reqwest, various tls libs ] diff --git a/.circleci/config.yml b/.circleci/config.yml index be668990..fc3ca8f2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,7 +10,7 @@ commands: steps: - run: name: Display Rust Version - command: + command: rustc --version setup-rust-check: steps: @@ -43,22 +43,23 @@ commands: flake8 syncserver/src/tokenserver flake8 tools/integration_tests flake8 tools/tokenserver - rust-clippy: + rust-clippy-mysql: steps: - run: - name: Rust Clippy + name: Rust Clippy MySQL command: | - cargo clippy --workspace --all-targets --all-features -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql -- -D warnings + rust-clippy-spanner: + steps: + - run: + name: Rust Clippy Spanner + command: | + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner -- -D warnings cargo-build: steps: - run: name: cargo build command: cargo build - setup-gcp-grpc: - steps: - - run: - name: Install grpcio dependencies - command: sudo apt-get update && sudo apt-get install -y cmake golang-go setup-mysql: steps: - run: @@ -69,7 +70,7 @@ commands: - run: name: Create Tokenserver database command: | - mysql -u root -ppassword -h 127.0.0.1 -e 'CREATE DATABASE tokenserver;' + mysql -u root -ppassword -h 127.0.0.1 -e 'CREATE DATABASE tokenserver;' mysql -u root -ppassword -h 127.0.0.1 -e "GRANT ALL ON tokenserver.* to 'test'@'%';" write-version: @@ -105,7 +106,7 @@ commands: -f docker-compose.mysql.yaml -f docker-compose.e2e.mysql.yaml up - --exit-code-from e2e-tests + --exit-code-from mysql-e2e-tests --abort-on-container-exit environment: SYNCSTORAGE_RS_IMAGE: app:build @@ -129,7 +130,7 @@ commands: -f docker-compose.spanner.yaml -f docker-compose.e2e.spanner.yaml up - --exit-code-from e2e-tests + --exit-code-from spanner-e2e-tests --abort-on-container-exit environment: SYNCSTORAGE_RS_IMAGE: app:build @@ -164,13 +165,14 @@ jobs: auth: username: $DOCKER_USER password: $DOCKER_PASS + resource_class: large steps: - checkout - display-rust - setup-rust-check - - setup-gcp-grpc - rust-check - - rust-clippy + - rust-clippy-spanner + - rust-clippy-mysql - setup-python - python-check @@ -197,20 +199,9 @@ jobs: MYSQL_DATABASE: syncstorage resource_class: large steps: - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Login to Dockerhub - command: | - if [ "${DOCKER_USER}" == "" ] || [ "${DOCKER_PASS}" == "" ]; then - echo "Skipping Login to DockerHub, credentials unavailable" - else - echo "${DOCKER_PASS}" | docker login -u="${DOCKER_USER}" --password-stdin - fi - checkout - display-rust - setup-python - - setup-gcp-grpc - setup-mysql - create-tokenserver-database # XXX: currently the time needed to setup-sccache negates its savings @@ -221,9 +212,21 @@ jobs: - run-tests - run-tokenserver-scripts-tests #- save-sccache-cache + build-mysql-image: + docker: + - image: cimg/rust:1.60.0 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + resource_class: large + steps: + - setup_remote_docker: + docker_layer_caching: true + - checkout + - write-version - run: - name: Build Docker image - command: docker build -t app:build . + name: Build MySQL Docker image + command: docker build -t app:build --build-arg DATABASE_BACKEND=mysql . no_output_timeout: 30m # save the built docker container into CircleCI's cache. This is # required since Workflows do not have the same remote docker instance. @@ -234,13 +237,44 @@ jobs: docker save -o /home/circleci/cache/docker.tar "app:build" - run: name: Save docker-compose config - command: cp docker-compose*.yaml /home/circleci/cache + command: cp docker-compose*mysql.yaml /home/circleci/cache - save_cache: - key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} + key: mysql-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} paths: - /home/circleci/cache - e2e-tests: + build-spanner-image: + docker: + - image: cimg/rust:1.60.0 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + resource_class: large + steps: + - setup_remote_docker: + docker_layer_caching: true + - checkout + - write-version + - run: + name: Build Spanner Docker image + command: docker build -t app:build --build-arg DATABASE_BACKEND=spanner . + no_output_timeout: 30m + # save the built docker container into CircleCI's cache. This is + # required since Workflows do not have the same remote docker instance. + - run: + name: docker save app:build + command: | + mkdir -p /home/circleci/cache + docker save -o /home/circleci/cache/docker.tar "app:build" + - run: + name: Save docker-compose config + command: cp docker-compose*spanner.yaml /home/circleci/cache + - save_cache: + key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }}-{{ epoch }} + paths: + - /home/circleci/cache + + mysql-e2e-tests: docker: - image: docker/compose:1.24.0 auth: @@ -249,7 +283,7 @@ jobs: steps: - setup_remote_docker - restore_cache: - key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + key: mysql-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} - run: name: Restore Docker image cache command: docker load -i /home/circleci/cache/docker.tar @@ -257,6 +291,23 @@ jobs: name: Restore docker-compose config command: cp /home/circleci/cache/docker-compose*.yaml . - run-e2e-mysql-tests + + spanner-e2e-tests: + docker: + - image: docker/compose:1.24.0 + auth: + username: $DOCKER_USER + password: $DOCKER_PASS + steps: + - setup_remote_docker + - restore_cache: + key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + - run: + name: Restore Docker image cache + command: docker load -i /home/circleci/cache/docker.tar + - run: + name: Restore docker-compose config + command: cp /home/circleci/cache/docker-compose*.yaml . - run-e2e-spanner-tests deploy: @@ -268,7 +319,7 @@ jobs: steps: - setup_remote_docker - restore_cache: - key: v1-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} + key: spanner-{{ .Branch }}-{{ .Environment.CIRCLE_SHA1 }} - run: name: Restore Docker image cache command: docker load -i /home/circleci/cache/docker.tar @@ -312,7 +363,7 @@ jobs: command: | export UTILS_DOCKERHUB_REPO=mozilla/sync-spanner-py-utils if [ "${CIRCLE_BRANCH}" == "master" ]; then - DOCKER_TAG="${CIRCLE_SHA1}" + DOCKER_TAG="latest" fi if echo "${CIRCLE_BRANCH}" | grep '^feature\..*' > /dev/null; then @@ -346,21 +397,44 @@ workflows: filters: tags: only: /.*/ - - e2e-tests: + - build-mysql-image: requires: - build-and-test filters: tags: only: /.*/ + - build-spanner-image: + requires: + - build-and-test + filters: + tags: + only: /.*/ + - mysql-e2e-tests: + requires: + - build-mysql-image + filters: + tags: + only: /.*/ + - spanner-e2e-tests: + requires: + - build-spanner-image + filters: + tags: + only: /.*/ - deploy: requires: - - e2e-tests + - mysql-e2e-tests + - spanner-e2e-tests filters: tags: only: /.*/ + branches: + only: master + # touch: 1676417203 - deploy-python-utils: requires: - - e2e-tests + - mysql-e2e-tests + - spanner-e2e-tests filters: tags: only: /.*/ diff --git a/Cargo.lock b/Cargo.lock index 09a5318b..3a8e5372 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,7 +13,7 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project 0.4.29", + "pin-project 0.4.30", "tokio", "tokio-util", ] @@ -63,7 +63,7 @@ dependencies = [ "actix-service", "actix-threadpool", "actix-utils", - "base64 0.13.0", + "base64 0.13.1", "bitflags 1.3.2", "brotli", "bytes 0.5.6", @@ -86,11 +86,11 @@ dependencies = [ "lazy_static", "log", "mime", - "percent-encoding 2.1.0", - "pin-project 1.0.10", + "percent-encoding 2.3.0", + "pin-project 1.1.3", "rand 0.7.3", "regex", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "serde_urlencoded", "sha-1", @@ -105,7 +105,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ca8ce00b267af8ccebbd647de0d61e0674b6e61185cc7a592ff88772bed655" dependencies = [ "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -118,7 +118,7 @@ dependencies = [ "http", "log", "regex", - "serde 1.0.135", + "serde 1.0.188", ] [[package]] @@ -163,7 +163,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0052435d581b5be835d11f4eb3bce417c8af18d87ddf8ace99f8e67e595882bb" dependencies = [ "futures-util", - "pin-project 0.4.29", + "pin-project 0.4.30", ] [[package]] @@ -191,7 +191,7 @@ dependencies = [ "lazy_static", "log", "num_cpus", - "parking_lot", + "parking_lot 0.11.2", "threadpool", ] @@ -223,7 +223,7 @@ dependencies = [ "futures-sink", "futures-util", "log", - "pin-project 0.4.29", + "pin-project 0.4.30", "slab", ] @@ -255,15 +255,15 @@ dependencies = [ "fxhash", "log", "mime", - "pin-project 1.0.10", + "pin-project 1.1.3", "regex", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "serde_urlencoded", "socket2 0.3.19", "time 0.2.27", "tinyvec", - "url 2.2.2", + "url 2.4.1", ] [[package]] @@ -274,14 +274,14 @@ checksum = "ad26f77093333e0e7c6ffe54ebe3582d908a104e448723eec6d43d08b07143fb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "addr2line" -version = "0.17.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -294,39 +294,63 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.18" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "ea5d730647d4fadd988536d06fecce94b7b4f2a7efdae548f1cf4b63205518ab" dependencies = [ "memchr", ] [[package]] name = "alloc-no-stdlib" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] name = "alloc-stdlib" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ "alloc-no-stdlib", ] [[package]] -name = "anyhow" -version = "1.0.53" +name = "android-tzdata" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "anyhow" +version = "1.0.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arc-swap" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "arrayvec" @@ -336,23 +360,23 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "assert-json-diff" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f1c3703dd33532d7f0ca049168930e9099ecac238e23cf932f3a69c42f06da" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" dependencies = [ - "serde 1.0.135", + "serde 1.0.188", "serde_json", ] [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.37", ] [[package]] @@ -361,16 +385,16 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi 0.3.9", ] [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "awc" @@ -382,40 +406,40 @@ dependencies = [ "actix-http", "actix-rt", "actix-service", - "base64 0.13.0", + "base64 0.13.1", "bytes 0.5.6", "cfg-if 1.0.0", "derive_more", "futures-core", "log", "mime", - "percent-encoding 2.1.0", + "percent-encoding 2.3.0", "rand 0.7.3", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "serde_urlencoded", ] [[package]] name = "backtrace" -version = "0.3.65" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", "cfg-if 1.0.0", "libc", - "miniz_oxide 0.5.1", + "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base64" @@ -425,38 +449,37 @@ checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] -name = "bb8" -version = "0.4.2" +name = "base64" +version = "0.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374bba43fc924d90393ee7768e6f75d223a98307a488fe5bc34b66c3e96932a6" -dependencies = [ - "async-trait", - "futures 0.3.19", - "tokio", -] +checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" [[package]] name = "bindgen" -version = "0.57.0" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags 1.3.2", "cexpr", "clang-sys", + "clap", + "env_logger", "lazy_static", "lazycell", + "log", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", + "which", ] [[package]] @@ -491,18 +514,18 @@ dependencies = [ [[package]] name = "boringssl-src" -version = "0.3.0+688fc5c" +version = "0.5.2+6195bf8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f901accdf830d2ea2f4e27f923a5e1125cd8b1a39ab578b9db1a42d578a6922b" +checksum = "7ab565ccc5e276ea82a2013dd08bf2c999866b06daf1d4f30fee419c4aaec6d5" dependencies = [ "cmake", ] [[package]] name = "brotli" -version = "3.3.3" +version = "3.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f838e47a451d5a8fa552371f80024dd6ace9b7acdf25c4c3d0f9bc6816fb1c39" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -511,9 +534,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.3.2" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -521,9 +544,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "byteorder" @@ -539,41 +562,44 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] name = "bytes" -version = "1.1.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "bytestring" -version = "1.0.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90706ba19e97b90786e19dc0d5e2abd80008d99d4c0c5d1ad0b5e72cec7c494d" +checksum = "238e4886760d98c4f899360c834fa93e62cf7f721ac3c2da375cbdf4b8679aae" dependencies = [ - "bytes 1.1.0", + "bytes 1.5.0", ] [[package]] name = "cadence" -version = "0.26.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7685b737fff763407351ce3a0d18c980a68e154b36f2d0b0fafebbac47de032" +checksum = "f39286bc075b023101dccdb79456a1334221c768b8faede0c2aff7ed29a9482d" dependencies = [ "crossbeam-channel", ] [[package]] name = "cc" -version = "1.0.72" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] [[package]] name = "cexpr" -version = "0.4.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom", + "nom 7.1.3", ] [[package]] @@ -590,23 +616,24 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" dependencies = [ - "libc", - "num-integer", - "num-traits 0.2.14", - "serde 1.0.135", - "time 0.1.43", - "winapi 0.3.9", + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits 0.2.16", + "serde 1.0.188", + "wasm-bindgen", + "windows-targets", ] [[package]] name = "clang-sys" -version = "1.3.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" dependencies = [ "glob", "libc", @@ -614,23 +641,38 @@ dependencies = [ ] [[package]] -name = "cmake" -version = "0.1.45" +name = "clap" +version = "2.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb6210b637171dfba4cda12e579ac6dc73f5165ad56133e5d72ef3131f320855" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags 1.3.2", + "strsim 0.8.0", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" dependencies = [ "cc", ] [[package]] name = "colored" -version = "2.0.0" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3616f750b84d8f0de8a58bda93e08e2a81ad3f523089b05f1dffecab48c6cbd" +checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" dependencies = [ - "atty", + "is-terminal", "lazy_static", - "winapi 0.3.9", + "windows-sys", ] [[package]] @@ -640,8 +682,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b076e143e1d9538dde65da30f8481c2a6c44040edb8e02b9bf1351edb92ce3" dependencies = [ "lazy_static", - "nom", - "serde 1.0.135", + "nom 5.1.3", + "serde 1.0.188", ] [[package]] @@ -651,9 +693,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" dependencies = [ "lazy_static", - "nom", + "nom 5.1.3", "rust-ini", - "serde 1.0.135", + "serde 1.0.188", "serde-hjson", "serde_json", "toml", @@ -678,7 +720,7 @@ version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" dependencies = [ - "percent-encoding 2.1.0", + "percent-encoding 2.3.0", "time 0.2.27", "version_check", ] @@ -691,9 +733,9 @@ checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536" [[package]] name = "core-foundation" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -701,36 +743,36 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "crossbeam-channel" -version = "0.5.2" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.16", ] [[package]] @@ -757,12 +799,11 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.6" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if 1.0.0", - "lazy_static", ] [[package]] @@ -777,24 +818,24 @@ dependencies = [ [[package]] name = "curl" -version = "0.4.42" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7de97b894edd5b5bcceef8b78d7da9b75b1d2f2f9a910569d0bde3dd31d84939" +checksum = "509bd11746c7ac09ebd19f0b17782eae80aadee26237658a6b4808afb5c11a22" dependencies = [ "curl-sys", "libc", "openssl-probe", "openssl-sys", "schannel", - "socket2 0.4.3", + "socket2 0.4.9", "winapi 0.3.9", ] [[package]] name = "curl-sys" -version = "0.4.52+curl-7.81.0" +version = "0.4.66+curl-8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b8c2d1023ea5fded5b7b892e4b8e95f70038a421126a056761a84246a28971" +checksum = "70c44a72e830f0e40ad90dda8a6ab6ed6314d39776599a58a2e5e37fbc6db5b9" dependencies = [ "cc", "libc", @@ -802,7 +843,7 @@ dependencies = [ "openssl-sys", "pkg-config", "vcpkg", - "winapi 0.3.9", + "windows-sys", ] [[package]] @@ -814,20 +855,26 @@ dependencies = [ "config 0.10.1", "crossbeam-queue", "num_cpus", - "serde 1.0.135", + "serde 1.0.188", "tokio", ] [[package]] name = "debugid" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91cf5a8c2f2097e2a32627123508635d47ce10563d999ec1a95addf08b502ba" +checksum = "d6ee87af31d84ef885378aebca32be3d682b0e0dc119d5b4860a2c5bb5046730" dependencies = [ - "serde 1.0.135", + "serde 1.0.188", "uuid", ] +[[package]] +name = "deranged" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" + [[package]] name = "derive_more" version = "0.99.17" @@ -838,7 +885,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn", + "syn 1.0.109", ] [[package]] @@ -862,7 +909,7 @@ checksum = "45f5098f628d02a7a0f68ddba586fb61e80edec3bdc1be3b921f4ceec60858d3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -935,48 +982,48 @@ checksum = "7f3f119846c823f9eafcf953a8f6ffb6ed69bf6240883261a7f13b634579a51f" dependencies = [ "lazy_static", "regex", - "serde 1.0.135", - "strsim", + "serde 1.0.188", + "strsim 0.10.0", ] [[package]] name = "dyn-clone" -version = "1.0.5" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" +checksum = "23d2f3407d9a573d666de4b5bdf10569d73ca9478087346697dcbae6244bfbcd" [[package]] name = "either" -version = "1.6.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "enum-as-inner" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "env_logger" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", "humantime", @@ -987,22 +1034,22 @@ dependencies = [ [[package]] name = "erased-serde" -version = "0.3.18" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56047058e1ab118075ca22f9ecd737bcc961aa3566a3019cb71388afa280bd8a" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ - "serde 1.0.135", + "serde 1.0.188", ] [[package]] name = "errno" -version = "0.2.8" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" dependencies = [ "errno-dragonfly", "libc", - "winapi 0.3.9", + "windows-sys", ] [[package]] @@ -1033,29 +1080,24 @@ checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "synstructure", ] [[package]] name = "fastrand" -version = "1.7.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" -dependencies = [ - "instant", -] +checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" dependencies = [ - "cfg-if 1.0.0", "crc32fast", - "libc", - "miniz_oxide 0.4.4", + "miniz_oxide", ] [[package]] @@ -1081,12 +1123,11 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ - "matches", - "percent-encoding 2.1.0", + "percent-encoding 2.3.0", ] [[package]] @@ -1113,9 +1154,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" +checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" dependencies = [ "futures-channel", "futures-core", @@ -1128,9 +1169,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", "futures-sink", @@ -1138,15 +1179,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" +checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" [[package]] name = "futures-executor" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" +checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" dependencies = [ "futures-core", "futures-task", @@ -1155,38 +1196,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" +checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" [[package]] name = "futures-macro" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" +checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.37", ] [[package]] name = "futures-sink" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" +checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" [[package]] name = "futures-task" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" +checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" [[package]] name = "futures-util" -version = "0.3.19" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" +checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" dependencies = [ "futures 0.1.31", "futures-channel", @@ -1196,7 +1237,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.13", "pin-utils", "slab", ] @@ -1212,9 +1253,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -1233,57 +1274,58 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "gimli" -version = "0.26.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" [[package]] name = "glob" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "google-cloud-rust-raw" -version = "0.11.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0f0936883f3207fa424f69fc218956a5778de6fb847ea3c491f1dc47a39fb26" +checksum = "1887de8efd052e35bf75e4ed4bc78de35b69447a4b6d9f2e7ede52579512f318" dependencies = [ - "futures 0.3.19", + "futures 0.3.28", "grpcio", "protobuf", ] [[package]] name = "grpcio" -version = "0.9.1" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d99e00eed7e0a04ee2705112e7cfdbe1a3cc771147f22f016a8cd2d002187b" +checksum = "609832ca501baeb662dc81932fda9ed83f5d058f4b899a807ba222ce696f430a" dependencies = [ - "futures 0.3.19", + "futures-executor", + "futures-util", "grpcio-sys", "libc", "log", - "parking_lot", + "parking_lot 0.12.1", "protobuf", ] [[package]] name = "grpcio-sys" -version = "0.9.1+1.38.0" +version = "0.12.1+1.46.5-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9447d1a926beeef466606cc45717f80897998b548e7dc622873d453e1ecb4be4" +checksum = "cf625d1803b6f44203f0428ddace847fb4994def5c803fc8a7a2f18fb3daec62" dependencies = [ "bindgen", "boringssl-src", @@ -1317,9 +1359,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hawk" @@ -1333,17 +1375,14 @@ dependencies = [ "once_cell", "ring", "thiserror", - "url 2.2.2", + "url 2.4.1", ] [[package]] name = "heck" -version = "0.3.3" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" @@ -1354,6 +1393,12 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" + [[package]] name = "hex" version = "0.4.3" @@ -1380,6 +1425,15 @@ dependencies = [ "digest", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys", +] + [[package]] name = "hostname" version = "0.3.1" @@ -1393,13 +1447,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" dependencies = [ - "bytes 1.1.0", + "bytes 1.5.0", "fnv", - "itoa 1.0.1", + "itoa 1.0.9", ] [[package]] @@ -1414,9 +1468,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.5.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1446,7 +1500,7 @@ dependencies = [ "httparse", "httpdate", "itoa 0.4.8", - "pin-project 1.0.10", + "pin-project 1.1.3", "socket2 0.3.19", "tokio", "tower-service", @@ -1483,6 +1537,29 @@ dependencies = [ "tokio-tls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "idna" version = "0.1.5" @@ -1505,6 +1582,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if_chain" version = "1.0.2" @@ -1527,9 +1614,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -1554,7 +1641,7 @@ dependencies = [ "proc-macro-hack", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unindent", ] @@ -1567,16 +1654,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "io-lifetimes" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfa919a82ea574332e2de6e74b4c36e74d41982b335080fa59d4ef31be20fdf3" -dependencies = [ - "libc", - "windows-sys 0.45.0", -] - [[package]] name = "iovec" version = "0.1.4" @@ -1600,9 +1677,20 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" + +[[package]] +name = "is-terminal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi 0.3.3", + "rustix", + "windows-sys", +] [[package]] name = "itoa" @@ -1612,15 +1700,15 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -1668,15 +1756,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.139" +version = "0.2.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if 1.0.0", "winapi 0.3.9", @@ -1684,9 +1772,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.3" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" +checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" dependencies = [ "cc", "libc", @@ -1696,33 +1784,31 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" +checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" [[package]] name = "lock_api" -version = "0.4.5" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.14" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "lru-cache" @@ -1741,9 +1827,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matches" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "maybe-uninit" @@ -1753,9 +1839,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.4.1" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "migrations_internals" @@ -1775,40 +1861,36 @@ dependencies = [ "migrations_internals", "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2684d4c2e97d99848d30b324b00c8fcc7e5c897b7cbb5819b09e7c90e8baf212" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", ] [[package]] -name = "miniz_oxide" -version = "0.4.4" +name = "minimal-lexical" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] @@ -1875,9 +1957,9 @@ dependencies = [ [[package]] name = "mysqlclient-sys" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9637d93448044078aaafea7419aed69d301b4a12bcc4aa0ae856eb169bef85" +checksum = "f61b381528ba293005c42a409dd73d034508e273bf90481f17ec2e964a6e969b" dependencies = [ "pkg-config", "vcpkg", @@ -1885,9 +1967,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -1903,9 +1985,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.37" +version = "0.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "b13b648036a2339d06de780866fbdfda0dde886de7b3af2ddeba8b14f4ee34ac" dependencies = [ "cfg-if 0.1.10", "libc", @@ -1914,9 +1996,9 @@ dependencies = [ [[package]] name = "nom" -version = "5.1.2" +version = "5.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "08959a387a676302eebf4ddbcbc611da04285579f76f88ee0506c63b1a61dd4b" dependencies = [ "lexical-core", "memchr", @@ -1924,13 +2006,13 @@ dependencies = [ ] [[package]] -name = "num-integer" -version = "0.1.44" +name = "nom" +version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ - "autocfg", - "num-traits 0.2.14", + "memchr", + "minimal-lexical", ] [[package]] @@ -1939,51 +2021,51 @@ version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" dependencies = [ - "num-traits 0.2.14", + "num-traits 0.2.16", ] [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" dependencies = [ "autocfg", ] [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi", + "hermit-abi 0.3.3", "libc", ] [[package]] name = "num_threads" -version = "0.1.2" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71a1eb3a36534514077c1e079ada2fb170ef30c47d203aa6916138cf882ecd52" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "object" -version = "0.28.3" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40bec70ba014595f99f7aa110b84331ffe1ee9aece7fe6f387cc7e3ecda4d456" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.9.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "opaque-debug" @@ -2008,13 +2090,13 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.37", ] [[package]] @@ -2043,23 +2125,46 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core", + "parking_lot_core 0.8.6", +] + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.8", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ "cfg-if 1.0.0", "instant", "libc", - "redox_syscall", + "redox_syscall 0.2.16", "smallvec", "winapi 0.3.9", ] +[[package]] +name = "parking_lot_core" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall 0.3.5", + "smallvec", + "windows-targets", +] + [[package]] name = "paste" version = "0.1.18" @@ -2093,48 +2198,48 @@ checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pin-project" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" +checksum = "3ef0f924a5ee7ea9cbcea77529dba45f8a9ba9f622419fe3386ca581a3ae9d5a" dependencies = [ - "pin-project-internal 0.4.29", + "pin-project-internal 0.4.30", ] [[package]] name = "pin-project" -version = "1.0.10" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ - "pin-project-internal 1.0.10", + "pin-project-internal 1.1.3", ] [[package]] name = "pin-project-internal" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" +checksum = "851c8d0ce9bebe43790dedfc86614c23494ac9f423dd618d3a61fc693eafe61e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.37", ] [[package]] @@ -2145,9 +2250,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" @@ -2157,15 +2262,15 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.24" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" +checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-error" @@ -2176,7 +2281,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn", + "syn 1.0.109", "version_check", ] @@ -2193,24 +2298,24 @@ dependencies = [ [[package]] name = "proc-macro-hack" -version = "0.5.19" +version = "0.5.20+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] name = "protobuf" -version = "2.25.2" +version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c327e191621a2158159df97cdbc2e7074bb4e940275e35abf38eb3d2595754" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" [[package]] name = "pyo3" @@ -2221,7 +2326,7 @@ dependencies = [ "cfg-if 1.0.0", "indoc", "libc", - "parking_lot", + "parking_lot 0.11.2", "paste", "pyo3-build-config", "pyo3-macros", @@ -2245,7 +2350,7 @@ checksum = "fc0bc5215d704824dfddddc03f93cb572e1155c68b6761c37005e1c288808ea8" dependencies = [ "pyo3-macros-backend", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2257,7 +2362,7 @@ dependencies = [ "proc-macro2", "pyo3-build-config", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -2268,21 +2373,21 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.15" +version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "r2d2" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545c5bc2b880973c9c10e4067418407a0ccaa3091781d1671d46eb35107cb26f" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot", + "parking_lot 0.12.1", "scheduled-thread-pool", ] @@ -2307,7 +2412,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -2327,7 +2432,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -2341,11 +2446,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.10", ] [[package]] @@ -2368,28 +2473,50 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + +[[package]] +name = "redox_syscall" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.4", - "redox_syscall", + "getrandom 0.2.10", + "redox_syscall 0.2.16", + "thiserror", ] [[package]] name = "regex" -version = "1.5.5" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", @@ -2398,9 +2525,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" @@ -2408,7 +2535,7 @@ version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ - "base64 0.13.0", + "base64 0.13.1", "bytes 0.5.6", "encoding_rs", "futures-core", @@ -2425,16 +2552,16 @@ dependencies = [ "mime", "mime_guess", "native-tls", - "percent-encoding 2.1.0", - "pin-project-lite 0.2.8", + "percent-encoding 2.3.0", + "pin-project-lite 0.2.13", "rustls", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", "tokio-tls", - "url 2.2.2", + "url 2.4.1", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -2475,9 +2602,9 @@ checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc-hash" @@ -2500,21 +2627,20 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.4", + "semver 1.0.18", ] [[package]] name = "rustix" -version = "0.36.9" +version = "0.38.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc" +checksum = "747c788e9ce8e92b12cd485c49ddf90723550b654b32508f979b71a7b1ecda4f" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.0", "errno", - "io-lifetimes", "libc", "linux-raw-sys", - "windows-sys 0.45.0", + "windows-sys", ] [[package]] @@ -2532,15 +2658,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "same-file" @@ -2553,28 +2679,27 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "lazy_static", - "winapi 0.3.9", + "windows-sys", ] [[package]] name = "scheduled-thread-pool" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "parking_lot", + "parking_lot 0.12.1", ] [[package]] name = "scopeguard" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" @@ -2588,9 +2713,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.5.0" +version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d09d3c15d814eda1d6a836f2f2b56a6abc1446c8a34351cb3180d3db92ffe4ce" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -2601,9 +2726,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.5.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e90dd10c41c6bfc633da6e0c659bd25d31e0791e5974ac42970267d59eba87f7" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", @@ -2620,9 +2745,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.4" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" [[package]] name = "semver-parser" @@ -2715,10 +2840,10 @@ checksum = "87b41bac48a3586249431fa9efb88cd1414c3455117eb57c02f5bda9634e158d" dependencies = [ "chrono", "debugid", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "thiserror", - "url 2.2.2", + "url 2.4.1", "uuid", ] @@ -2730,9 +2855,9 @@ checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" [[package]] name = "serde" -version = "1.0.135" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cf9235533494ea2ddcdb794665461814781c53f19d87b76e571a1c35acbad2b" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] @@ -2751,24 +2876,24 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.135" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dcde03d87d4c973c04be249e7d8f0b35db1c848c487bd43032808e59dd8328d" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.37", ] [[package]] name = "serde_json" -version = "1.0.78" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" +checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ - "itoa 1.0.1", + "itoa 1.0.9", "ryu", - "serde 1.0.135", + "serde 1.0.188", ] [[package]] @@ -2778,9 +2903,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa 1.0.9", "ryu", - "serde 1.0.135", + "serde 1.0.188", ] [[package]] @@ -2826,15 +2951,15 @@ dependencies = [ [[package]] name = "shlex" -version = "0.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" +checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" [[package]] name = "signal-hook-registry" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] @@ -2851,9 +2976,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.5" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] [[package]] name = "slog" @@ -2866,9 +2994,9 @@ dependencies = [ [[package]] name = "slog-async" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "766c59b252e62a34651412870ff55d8c4e6d04df19b43eecb2703e417b097ffe" +checksum = "72c8038f898a2c79507940990f05386455b3a317d8f18d4caea7cbc3d5096b84" dependencies = [ "crossbeam-channel", "slog", @@ -2898,7 +3026,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f400f1c5db96f1f52065e8931ca0c524cceb029f7537c9e6d5424488ca137ca0" dependencies = [ "chrono", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "slog", ] @@ -2935,14 +3063,14 @@ dependencies = [ "slog", "term", "thread_local", - "time 0.3.9", + "time 0.3.28", ] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "socket2" @@ -2957,9 +3085,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.3" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f82496b90c36d70af5fcd482edaa2e0bd16fade569de1330405fecbbdac736b" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi 0.3.9", @@ -3008,9 +3136,9 @@ checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" dependencies = [ "proc-macro2", "quote", - "serde 1.0.135", + "serde 1.0.188", "serde_derive", - "syn", + "syn 1.0.109", ] [[package]] @@ -3022,11 +3150,11 @@ dependencies = [ "base-x", "proc-macro2", "quote", - "serde 1.0.135", + "serde 1.0.188", "serde_derive", "serde_json", "sha1", - "syn", + "syn 1.0.109", ] [[package]] @@ -3035,6 +3163,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + [[package]] name = "strsim" version = "0.10.0" @@ -3049,13 +3183,24 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.86" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7303ef2c05cd654186cb250d29049a24840ca25d2747c25c0381c8d9e2f582e8" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", ] [[package]] @@ -3065,44 +3210,28 @@ dependencies = [ "actix-cors", "actix-http", "actix-rt", - "actix-service", "actix-web", "async-trait", "backtrace", - "base64 0.13.0", - "bb8", - "bytes 1.1.0", + "base64 0.21.4", "cadence", "chrono", - "deadpool", - "diesel", - "diesel_logger", - "diesel_migrations", "docopt", "dyn-clone", "env_logger", - "futures 0.3.19", - "google-cloud-rust-raw", - "grpcio", + "futures 0.3.28", "hawk", "hex", "hmac", "hostname", - "http", "lazy_static", - "log", "mime", - "mockito", - "num_cpus", - "protobuf", - "pyo3", "rand 0.8.5", "regex", "reqwest", - "scheduled-thread-pool", "sentry", "sentry-backtrace", - "serde 1.0.135", + "serde 1.0.188", "serde_derive", "serde_json", "sha2", @@ -3116,15 +3245,16 @@ dependencies = [ "syncserver-common", "syncserver-db-common", "syncserver-settings", + "syncstorage-db", "syncstorage-settings", "thiserror", - "time 0.3.9", + "time 0.3.28", + "tokenserver-auth", "tokenserver-common", + "tokenserver-db", "tokenserver-settings", "tokio", - "url 2.2.2", "urlencoding", - "uuid", "validator", "validator_derive", "woothee", @@ -3134,30 +3264,29 @@ dependencies = [ name = "syncserver-common" version = "0.13.7" dependencies = [ + "actix-web", + "cadence", + "futures 0.3.28", "hkdf", + "serde 1.0.188", + "serde_json", "sha2", + "slog", + "slog-scope", ] [[package]] name = "syncserver-db-common" version = "0.13.7" dependencies = [ - "async-trait", "backtrace", - "chrono", "deadpool", "diesel", "diesel_migrations", - "futures 0.3.19", - "grpcio", - "hostname", + "futures 0.3.28", "http", - "lazy_static", - "serde 1.0.135", - "serde_json", "syncserver-common", "thiserror", - "url 2.2.2", ] [[package]] @@ -3166,12 +3295,77 @@ version = "0.13.7" dependencies = [ "config 0.11.0", "num_cpus", - "serde 1.0.135", + "serde 1.0.188", "slog-scope", "syncserver-common", "syncstorage-settings", "tokenserver-settings", - "url 2.2.2", + "url 2.4.1", +] + +[[package]] +name = "syncstorage-db" +version = "0.13.7" +dependencies = [ + "async-trait", + "cadence", + "env_logger", + "futures 0.3.28", + "hostname", + "lazy_static", + "log", + "rand 0.8.5", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-mysql", + "syncstorage-settings", + "syncstorage-spanner", + "tokio", +] + +[[package]] +name = "syncstorage-db-common" +version = "0.13.7" +dependencies = [ + "async-trait", + "backtrace", + "chrono", + "diesel", + "diesel_migrations", + "futures 0.3.28", + "http", + "lazy_static", + "serde 1.0.188", + "serde_json", + "syncserver-common", + "syncserver-db-common", + "thiserror", +] + +[[package]] +name = "syncstorage-mysql" +version = "0.13.7" +dependencies = [ + "async-trait", + "backtrace", + "base64 0.21.4", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger", + "futures 0.3.28", + "http", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "syncstorage-db-common", + "syncstorage-settings", + "thiserror", + "url 2.4.1", ] [[package]] @@ -3179,9 +3373,35 @@ name = "syncstorage-settings" version = "0.13.7" dependencies = [ "rand 0.8.5", - "serde 1.0.135", + "serde 1.0.188", "syncserver-common", - "time 0.3.9", + "time 0.3.28", +] + +[[package]] +name = "syncstorage-spanner" +version = "0.13.7" +dependencies = [ + "async-trait", + "backtrace", + "cadence", + "deadpool", + "env_logger", + "futures 0.3.28", + "google-cloud-rust-raw", + "grpcio", + "http", + "log", + "protobuf", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncstorage-db-common", + "syncstorage-settings", + "thiserror", + "tokio", + "url 2.4.1", + "uuid", ] [[package]] @@ -3192,7 +3412,7 @@ checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", "unicode-xid", ] @@ -3204,15 +3424,15 @@ checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" [[package]] name = "tempfile" -version = "3.4.0" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af18f7ae1acd354b992402e9ec5864359d693cd8a79dcbef59f76891701c1e95" +checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" dependencies = [ "cfg-if 1.0.0", "fastrand", - "redox_syscall", + "redox_syscall 0.3.5", "rustix", - "windows-sys 0.42.0", + "windows-sys", ] [[package]] @@ -3228,39 +3448,49 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" dependencies = [ "winapi-util", ] [[package]] -name = "thiserror" -version = "1.0.30" +name = "textwrap" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thiserror" +version = "1.0.48" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.37", ] [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ + "cfg-if 1.0.0", "once_cell", ] @@ -3273,16 +3503,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi 0.3.9", -] - [[package]] name = "time" version = "0.2.27" @@ -3300,16 +3520,25 @@ dependencies = [ [[package]] name = "time" -version = "0.3.9" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" dependencies = [ - "itoa 1.0.1", + "deranged", + "itoa 1.0.9", "libc", "num_threads", - "time-macros 0.2.4", + "serde 1.0.188", + "time-core", + "time-macros 0.2.14", ] +[[package]] +name = "time-core" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" + [[package]] name = "time-macros" version = "0.1.1" @@ -3322,9 +3551,12 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.4" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +dependencies = [ + "time-core", +] [[package]] name = "time-macros-impl" @@ -3336,23 +3568,41 @@ dependencies = [ "proc-macro2", "quote", "standback", - "syn", + "syn 1.0.109", ] [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokenserver-auth" +version = "0.13.7" +dependencies = [ + "async-trait", + "dyn-clone", + "futures 0.3.28", + "mockito", + "pyo3", + "reqwest", + "serde 1.0.188", + "serde_json", + "syncserver-common", + "tokenserver-common", + "tokenserver-settings", + "tokio", +] [[package]] name = "tokenserver-common" @@ -3360,18 +3610,42 @@ version = "0.13.7" dependencies = [ "actix-web", "backtrace", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "syncserver-common", - "syncserver-db-common", "thiserror", ] +[[package]] +name = "tokenserver-db" +version = "0.13.7" +dependencies = [ + "async-trait", + "backtrace", + "diesel", + "diesel_logger", + "diesel_migrations", + "env_logger", + "futures 0.3.28", + "http", + "serde 1.0.188", + "serde_derive", + "serde_json", + "slog-scope", + "syncserver-common", + "syncserver-db-common", + "syncserver-settings", + "thiserror", + "tokenserver-common", + "tokenserver-settings", + "tokio", +] + [[package]] name = "tokenserver-settings" version = "0.13.7" dependencies = [ - "serde 1.0.135", + "serde 1.0.188", "tokenserver-common", ] @@ -3406,7 +3680,7 @@ checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.109", ] [[package]] @@ -3447,38 +3721,38 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" dependencies = [ - "serde 1.0.135", + "serde 1.0.188", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.8", + "pin-project-lite 0.2.13", "tracing-core", ] [[package]] name = "tracing-core" -version = "0.1.21" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" dependencies = [ - "lazy_static", + "once_cell", ] [[package]] @@ -3487,7 +3761,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.10", + "pin-project 1.1.3", "tracing", ] @@ -3500,7 +3774,7 @@ dependencies = [ "async-trait", "cfg-if 1.0.0", "enum-as-inner", - "futures 0.3.19", + "futures 0.3.28", "idna 0.2.3", "lazy_static", "log", @@ -3508,7 +3782,7 @@ dependencies = [ "smallvec", "thiserror", "tokio", - "url 2.2.2", + "url 2.4.1", ] [[package]] @@ -3518,7 +3792,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "710f593b371175db53a26d0b38ed2978fafb9e9e8d3868b1acd753ea18df0ceb" dependencies = [ "cfg-if 0.1.10", - "futures 0.3.19", + "futures 0.3.28", "ipconfig", "lazy_static", "log", @@ -3532,15 +3806,15 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" -version = "1.15.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "uname" @@ -3553,45 +3827,51 @@ dependencies = [ [[package]] name = "unicase" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] -name = "unicode-segmentation" -version = "1.8.0" +name = "unicode-width" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "unindent" -version = "0.1.7" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" +checksum = "e1766d682d402817b5ac4490b3c3002d91dfa0d22812f341609f97b08757359c" [[package]] name = "untrusted" @@ -3612,22 +3892,21 @@ dependencies = [ [[package]] name = "url" -version = "2.2.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", - "idna 0.2.3", - "matches", - "percent-encoding 2.1.0", - "serde 1.0.135", + "idna 0.4.0", + "percent-encoding 2.3.0", + "serde 1.0.188", ] [[package]] name = "urlencoding" -version = "2.1.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b90931029ab9b034b300b797048cf23723400aa757e8a2bfb9d748102f9821" +checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "uuid" @@ -3635,8 +3914,8 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.4", - "serde 1.0.135", + "getrandom 0.2.10", + "serde 1.0.188", ] [[package]] @@ -3648,10 +3927,10 @@ dependencies = [ "idna 0.2.3", "lazy_static", "regex", - "serde 1.0.135", + "serde 1.0.188", "serde_derive", "serde_json", - "url 2.2.2", + "url 2.4.1", "validator_types", ] @@ -3667,7 +3946,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn", + "syn 1.0.109", "validator_types", ] @@ -3678,7 +3957,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded9d97e1d42327632f5f3bae6403c04886e2de3036261ef42deebd931a6a291" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.109", ] [[package]] @@ -3687,6 +3966,12 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + [[package]] name = "version_check" version = "0.9.4" @@ -3695,22 +3980,20 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", - "winapi 0.3.9", "winapi-util", ] [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -3722,42 +4005,42 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if 1.0.0", - "serde 1.0.135", + "serde 1.0.188", "serde_json", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", - "syn", + "syn 2.0.37", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3767,9 +4050,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3777,28 +4060,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.37", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -3823,6 +4106,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + [[package]] name = "widestring" version = "0.4.3" @@ -3859,9 +4154,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi 0.3.9", ] @@ -3873,34 +4168,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows-sys" -version = "0.42.0" +name = "windows" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows-targets", ] [[package]] name = "windows-sys" -version = "0.45.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets", ] [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm", "windows_aarch64_msvc", @@ -3913,45 +4202,45 @@ dependencies = [ [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winreg" diff --git a/Cargo.toml b/Cargo.toml index 56eced59..fd15d1bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,16 +1,66 @@ [workspace] resolver = "2" members = [ - "syncserver-settings", "syncserver-common", "syncserver-db-common", + "syncserver-settings", + "syncstorage-db", + "syncstorage-db-common", + "syncstorage-mysql", "syncstorage-settings", + "syncstorage-spanner", + "tokenserver-auth", "tokenserver-common", + "tokenserver-db", "tokenserver-settings", "syncserver", ] default-members = ["syncserver"] +[workspace.package] +version = "0.13.7" +authors = [ + "Ben Bangert ", + "Phil Jenvey ", + "Mozilla Services Engineering ", +] +edition = "2021" +license = "MPL-2.0" + +[workspace.dependencies] +base64 = "0.21" +cadence = "0.29" +backtrace = "0.3" +chrono = "0.4" +docopt = "1.1" +env_logger = "0.9" +futures = { version = "0.3", features = ["compat"] } +hex = "0.4" +http = "0.2" +lazy_static = "1.4" +protobuf = "=2.25.2" # pin to 2.25.2 to prevent side updating +rand = "0.8" +regex = "1.4" +sentry = { version = "0.19", features = [ + "with_curl_transport", +] } # pin to 0.19 until on-prem sentry server is updated +sentry-backtrace = "0.19" +serde = "1.0" +serde_derive = "1.0" +serde_json = { version = "1.0", features = ["arbitrary_precision"] } +sha2 = "0.9" +slog = { version = "2.5", features = [ + "max_level_info", + "release_max_level_info", + "dynamic-keys", +] } +slog-async = "2.5" +slog-envlogger = "2.2.0" +slog-mozlog-json = "0.1" +slog-scope = "4.3" +slog-stdlog = "4.1" +slog-term = "2.6" + [profile.release] # Enables line numbers in Sentry reporting debug = 1 diff --git a/Dockerfile b/Dockerfile index 38f608b2..eb5b2ebe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,36 @@ # NOTE: Ensure builder's Rust version matches CI's in .circleci/config.yml -FROM rust:1.72-buster as builder +FROM lukemathwalker/cargo-chef:0.1.62-rust-1.72-buster as chef WORKDIR /app -ADD . /app -ENV PATH=$PATH:/root/.cargo/bin -# temp removed --no-install-recommends due to CI docker build issue + +FROM chef AS planner +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS cacher +ARG DATABASE_BACKEND=spanner +COPY --from=planner /app/mysql_pubkey.asc mysql_pubkey.asc + +# cmake is required to build grpcio-sys for Spanner builds +RUN \ + echo "deb https://repo.mysql.com/apt/debian/ buster mysql-8.0" >> /etc/apt/sources.list && \ + # mysql_pubkey.asc from: + # https://dev.mysql.com/doc/refman/8.0/en/checking-gpg-signature.html + # related: + # https://dev.mysql.com/doc/mysql-apt-repo-quick-guide/en/#repo-qg-apt-repo-manual-setup + apt-key adv --import mysql_pubkey.asc && \ + apt-get -q update && \ + apt-get -q install -y --no-install-recommends libmysqlclient-dev cmake + +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --recipe-path recipe.json + +FROM chef as builder +ARG DATABASE_BACKEND=spanner + +COPY . /app +COPY --from=cacher /app/target /app/target +COPY --from=cacher $CARGO_HOME /app/$CARGO_HOME + RUN \ echo "deb https://repo.mysql.com/apt/debian/ buster mysql-8.0" >> /etc/apt/sources.list && \ # mysql_pubkey.asc from: @@ -16,11 +43,13 @@ RUN \ pip3 install -r requirements.txt && \ rm -rf /var/lib/apt/lists/* +ENV PATH=$PATH:/root/.cargo/bin + RUN \ cargo --version && \ rustc --version && \ - cargo install --path ./syncserver --locked --root /app && \ - cargo install --path ./syncserver --locked --root /app --bin purge_ttl + cargo install --path ./syncserver --no-default-features --features=syncstorage-db/$DATABASE_BACKEND --locked --root /app && \ + if [ "$DATABASE_BACKEND" = "spanner" ] ; then cargo install --path ./syncstorage-spanner --locked --root /app --bin purge_ttl ; fi FROM debian:buster-slim WORKDIR /app @@ -56,7 +85,7 @@ COPY --from=builder /app/tools/spanner /app/tools/spanner COPY --from=builder /app/tools/integration_tests /app/tools/integration_tests COPY --from=builder /app/tools/tokenserver /app/tools/tokenserver COPY --from=builder /app/scripts/prepare-spanner.sh /app/scripts/prepare-spanner.sh -COPY --from=builder /app/syncserver/src/db/spanner/schema.ddl /app/schema.ddl +COPY --from=builder /app/syncstorage-spanner/src/schema.ddl /app/schema.ddl RUN chmod +x /app/scripts/prepare-spanner.sh RUN pip3 install -r /app/tools/integration_tests/requirements.txt diff --git a/Makefile b/Makefile index 39629617..a0540f6b 100644 --- a/Makefile +++ b/Makefile @@ -10,9 +10,16 @@ PATH_TO_SYNC_SPANNER_KEYS = `pwd`/service-account.json # https://github.com/mozilla-services/server-syncstorage PATH_TO_GRPC_CERT = ../server-syncstorage/local/lib/python2.7/site-packages/grpc/_cython/_credentials/roots.pem -clippy: +SRC_ROOT = $(shell pwd) +PYTHON_SITE_PACKGES = $(shell $(SRC_ROOT)/venv/bin/python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") + +clippy_mysql: # Matches what's run in circleci - cargo clippy --workspace --all-targets --all-features -- -D warnings + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/mysql -- -D warnings + +clippy_spanner: + # Matches what's run in circleci + cargo clippy --workspace --all-targets --no-default-features --features=syncstorage-db/spanner -- -D warnings clean: cargo clean @@ -40,14 +47,28 @@ python: python3 -m venv venv venv/bin/python -m pip install -r requirements.txt -run: python - PATH="./venv/bin:$(PATH)" RUST_LOG=debug RUST_BACKTRACE=full cargo run -- --config config/local.toml +run_mysql: python + PATH="./venv/bin:$(PATH)" \ + # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the + # below env var + PYTHONPATH=$(PYTHON_SITE_PACKGES) \ + RUST_LOG=debug \ + RUST_BACKTRACE=full \ + cargo run --no-default-features --features=syncstorage-db/mysql -- --config config/local.toml -run_spanner: - GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=$(PATH_TO_GRPC_CERT) make run +run_spanner: python + GOOGLE_APPLICATION_CREDENTIALS=$(PATH_TO_SYNC_SPANNER_KEYS) \ + GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=$(PATH_TO_GRPC_CERT) \ + # See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the + # below env var + PYTHONPATH=$(PYTHON_SITE_PACKGES) \ + PATH="./venv/bin:$(PATH)" \ + RUST_LOG=debug \ + RUST_BACKTRACE=full \ + cargo run --no-default-features --features=syncstorage-db/spanner -- --config config/local.toml test: SYNC_SYNCSTORAGE__DATABASE_URL=mysql://sample_user:sample_password@localhost/syncstorage_rs \ SYNC_TOKENSERVER__DATABASE_URL=mysql://sample_user:sample_password@localhost/tokenserver_rs \ RUST_TEST_THREADS=1 \ - cargo test + cargo test --workspace diff --git a/README.md b/README.md index d9f63b0d..3abcff0b 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ Mozilla Sync Storage built with [Rust](https://rust-lang.org). - make - pkg-config - [Rust stable](https://rustup.rs) +- python 3.9+ - MySQL 5.7 (or compatible) * libmysqlclient (`brew install mysql` on macOS, `apt install libmysqlclient-dev` on Ubuntu, `apt install libmariadb-dev-compat` on Debian) @@ -48,7 +49,11 @@ are missing `libcurl4-openssl-dev`. 1. Follow the instructions below to use either MySQL or Spanner as your DB. 2. Now `cp config/local.example.toml config/local.toml`. Open `config/local.toml` and make sure you have the desired settings configured. For a complete list of available configuration options, check out [docs/config.md](docs/config.md). -3. `make run` starts the server in debug mode, using your new `local.toml` file for config options. Or, simply `cargo run` with your own config options provided as env vars. +3. To start a local server in debug mode, run either: + - `make run_mysql` if using MySQL or, + - `make run_spanner` if using spanner. + + The above starts the server in debug mode, using your new `local.toml` file for config options. Or, simply `cargo run` with your own config options provided as env vars. 4. Visit `http://localhost:8000/__heartbeat__` to make sure the server is running. ### MySQL @@ -57,13 +62,19 @@ Durable sync needs only a valid mysql DSN in order to set up connections to a My `mysql://_user_:_password_@_host_/_database_` -To setup a fresh MySQL DB and user: (`mysql -u root`): +To setup a fresh MySQL DB and user: + +- First make sure that you have a MySQL server running, to do that run: `mysqld` +- Then, run the following to launch a mysql shell `mysql -u root` +- Finally, run each of the following SQL statements ```sql CREATE USER "sample_user"@"localhost" IDENTIFIED BY "sample_password"; CREATE DATABASE syncstorage_rs; +CREATE DATABASE tokenserver_rs; GRANT ALL PRIVILEGES on syncstorage_rs.* to sample_user@localhost; +GRANT ALL PRIVILEGES on tokenserver_rs.* to sample_user@localhost; ``` ### Spanner diff --git a/config/local.example.toml b/config/local.example.toml index 7e2a687c..f845b5c9 100644 --- a/config/local.example.toml +++ b/config/local.example.toml @@ -1,22 +1,21 @@ -# Example MySQL DSN: -database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs" - -# Example Spanner DSN: -# database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB" - -"limits.max_total_records"=1666 # See issues #298/#333 master_secret = "INSERT_SECRET_KEY_HERE" # removing this line will default to moz_json formatted logs (which is preferred for production envs) human_logs = 1 +# Example Syncstorage settings: +# Example MySQL DSN: +syncstorage.database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs" +# Example Spanner DSN: +# database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB" # enable quota limits -enable_quota = 0 +syncstorage.enable_quota = 0 # set the quota limit to 2GB. # max_quota_limit = 200000000 +syncstorage.enabled = true +syncstorage.limits.max_total_records = 1666 # See issues #298/#333 # Example Tokenserver settings: -disable_syncstorage = false tokenserver.database_url = "mysql://sample_user:sample_password@localhost/tokenserver_rs" tokenserver.enabled = true tokenserver.fxa_email_domain = "api-accounts.stage.mozaws.net" diff --git a/docker-compose.e2e.mysql.yaml b/docker-compose.e2e.mysql.yaml index bb5d6e99..d2d42195 100644 --- a/docker-compose.e2e.mysql.yaml +++ b/docker-compose.e2e.mysql.yaml @@ -14,7 +14,7 @@ services: sleep 15; /app/bin/syncserver; " - e2e-tests: + mysql-e2e-tests: depends_on: - mock-fxa-server - syncserver diff --git a/docker-compose.e2e.spanner.yaml b/docker-compose.e2e.spanner.yaml index b431276b..e0a25a8b 100644 --- a/docker-compose.e2e.spanner.yaml +++ b/docker-compose.e2e.spanner.yaml @@ -14,7 +14,7 @@ services: sleep 15; /app/bin/syncserver; " - e2e-tests: + spanner-e2e-tests: depends_on: - mock-fxa-server - syncserver diff --git a/syncserver-common/Cargo.toml b/syncserver-common/Cargo.toml index 2303e3fd..5ae7fb57 100644 --- a/syncserver-common/Cargo.toml +++ b/syncserver-common/Cargo.toml @@ -1,8 +1,18 @@ [package] name = "syncserver-common" -version = "0.13.7" -edition = "2021" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true [dependencies] +cadence.workspace=true +futures.workspace=true +sha2.workspace=true +serde.workspace=true +serde_json.workspace=true +slog.workspace=true +slog-scope.workspace=true + +actix-web = "3" hkdf = "0.11" -sha2 = "0.9" diff --git a/syncserver-common/src/lib.rs b/syncserver-common/src/lib.rs index 6d836ec9..1bbc9e9b 100644 --- a/syncserver-common/src/lib.rs +++ b/syncserver-common/src/lib.rs @@ -1,6 +1,19 @@ +#[macro_use] +extern crate slog_scope; + +mod metrics; + +use std::{ + fmt, + sync::atomic::{AtomicU64, Ordering}, +}; + +use actix_web::{error::BlockingError, web}; use hkdf::Hkdf; use sha2::Sha256; +pub use metrics::{metrics_from_opts, MetricError, Metrics}; + // header statics must be lower case, numbers and symbols per the RFC spec. This reduces chance of error. pub static X_LAST_MODIFIED: &str = "x-last-modified"; pub static X_WEAVE_TIMESTAMP: &str = "x-weave-timestamp"; @@ -56,3 +69,43 @@ pub trait InternalError { /// Constructs an internal error with the given error message. fn internal_error(message: String) -> Self; } + +/// A threadpool on which callers can spawn non-CPU-bound tasks that block their thread (this is +/// mostly useful for running I/O tasks). `BlockingThreadpool` intentionally does not implement +/// `Clone`: `Arc`s are not used internally, so a `BlockingThreadpool` should be instantiated once +/// and shared by passing around `Arc`s. +#[derive(Debug, Default)] +pub struct BlockingThreadpool { + spawned_tasks: AtomicU64, +} + +impl BlockingThreadpool { + /// Runs a function as a task on the blocking threadpool. + /// + /// WARNING: Spawning a blocking task through means other than calling this method will + /// result in inaccurate threadpool metrics being reported. If you want to spawn a task on + /// the blocking threadpool, you **must** use this function. + pub async fn spawn(&self, f: F) -> Result + where + F: FnOnce() -> Result + Send + 'static, + T: Send + 'static, + E: fmt::Debug + Send + InternalError + 'static, + { + self.spawned_tasks.fetch_add(1, Ordering::Relaxed); + + let result = web::block(f).await.map_err(|e| match e { + BlockingError::Error(e) => e, + BlockingError::Canceled => { + E::internal_error("Blocking threadpool operation canceled".to_owned()) + } + }); + + self.spawned_tasks.fetch_sub(1, Ordering::Relaxed); + + result + } + + pub fn active_threads(&self) -> u64 { + self.spawned_tasks.load(Ordering::Relaxed) + } +} diff --git a/syncserver/src/server/metrics.rs b/syncserver-common/src/metrics.rs similarity index 76% rename from syncserver/src/server/metrics.rs rename to syncserver-common/src/metrics.rs index 4b29f555..9dfb5bd5 100644 --- a/syncserver/src/server/metrics.rs +++ b/syncserver-common/src/metrics.rs @@ -1,19 +1,14 @@ use std::collections::HashMap; use std::net::UdpSocket; +use std::sync::Arc; use std::time::Instant; -use actix_web::{dev::Payload, web::Data, FromRequest, HttpRequest}; use cadence::{ BufferedUdpMetricSink, Counted, Metric, NopMetricSink, QueuingMetricSink, StatsdClient, Timed, }; -use futures::future; -use futures::future::Ready; use slog::{Key, Record, KV}; -use crate::error::ApiError; -use crate::server::ServerState; -use crate::tokenserver; -use crate::web::tags::Taggable; +pub use cadence::MetricError; #[derive(Debug, Clone)] pub struct MetricTimer { @@ -24,7 +19,7 @@ pub struct MetricTimer { #[derive(Debug, Default, Clone)] pub struct Metrics { - pub client: Option, + pub client: Option>, pub tags: HashMap, pub timer: Option, } @@ -58,55 +53,6 @@ impl Drop for Metrics { } } -impl FromRequest for Metrics { - type Config = (); - type Error = (); - type Future = Ready>; - - fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { - let client = { - let syncstorage_metrics = req - .app_data::>() - .map(|state| state.metrics.clone()); - let tokenserver_metrics = req - .app_data::>() - .map(|state| state.metrics.clone()); - - syncstorage_metrics.or(tokenserver_metrics) - }; - - if client.is_none() { - warn!("⚠️ metric error: No App State"); - } - - future::ok(Metrics { - client: client.as_deref().cloned(), - tags: req.get_tags(), - timer: None, - }) - } -} - -impl From<&StatsdClient> for Metrics { - fn from(client: &StatsdClient) -> Self { - Metrics { - client: Some(client.clone()), - tags: HashMap::default(), - timer: None, - } - } -} - -impl From<&ServerState> for Metrics { - fn from(state: &ServerState) -> Self { - Metrics { - client: Some(*state.metrics.clone()), - tags: HashMap::default(), - timer: None, - } - } -} - impl Metrics { pub fn sink() -> StatsdClient { StatsdClient::builder("", NopMetricSink).build() @@ -114,7 +60,7 @@ impl Metrics { pub fn noop() -> Self { Self { - client: Some(Self::sink()), + client: Some(Arc::new(Self::sink())), timer: None, tags: HashMap::default(), } @@ -191,7 +137,7 @@ pub fn metrics_from_opts( label: &str, host: Option<&str>, port: u16, -) -> Result { +) -> Result, MetricError> { let builder = if let Some(statsd_host) = host { let socket = UdpSocket::bind("0.0.0.0:0")?; socket.set_nonblocking(true)?; @@ -203,11 +149,23 @@ pub fn metrics_from_opts( } else { StatsdClient::builder(label, NopMetricSink) }; - Ok(builder - .with_error_handler(|err| { - warn!("⚠️ Metric send error: {:?}", err); - }) - .build()) + Ok(Arc::new( + builder + .with_error_handler(|err| { + warn!("⚠️ Metric send error: {:?}", err); + }) + .build(), + )) +} + +impl From<&Arc> for Metrics { + fn from(client: &Arc) -> Self { + Metrics { + client: Some(client.clone()), + tags: HashMap::default(), + timer: None, + } + } } /// A newtype used solely to allow us to implement KV on HashMap. diff --git a/syncserver-db-common/Cargo.toml b/syncserver-db-common/Cargo.toml index 3ee7425b..6b04ca41 100644 --- a/syncserver-db-common/Cargo.toml +++ b/syncserver-db-common/Cargo.toml @@ -1,28 +1,20 @@ [package] name = "syncserver-db-common" -version = "0.13.7" -edition = "2021" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true [dependencies] -async-trait = "0.1.40" -backtrace = "0.3.61" -chrono = "0.4" +backtrace.workspace=true +futures.workspace=true +http.workspace=true + # Pin to 0.5 for now, to keep it under tokio 0.2 (issue977). # Fix for #803 (deadpool#92) points to our fork for now #deadpool = "0.5" # pin to 0.5 deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" } diesel = { version = "1.4", features = ["mysql", "r2d2"] } diesel_migrations = { version = "1.4.0", features = ["mysql"] } -# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause -# syncstorage to either fail to either compile, or start. In those cases, try -# `cargo build --features grpcio/openssl ...` -grpcio = { version = "0.9" } -hostname = "0.3.1" -http = "0.2.6" -futures = { version = "0.3", features = ["compat"] } -lazy_static = "1.4.0" -serde = "1.0" -serde_json = { version = "1.0", features = ["arbitrary_precision"] } syncserver-common = { path = "../syncserver-common" } thiserror = "1.0.26" -url = "2.1" diff --git a/syncserver-db-common/src/error.rs b/syncserver-db-common/src/error.rs index 1a2d7a66..e49e89fd 100644 --- a/syncserver-db-common/src/error.rs +++ b/syncserver-db-common/src/error.rs @@ -2,153 +2,58 @@ use std::fmt; use backtrace::Backtrace; use http::StatusCode; -use syncserver_common::{from_error, impl_fmt_display, InternalError}; +use syncserver_common::{from_error, impl_fmt_display}; use thiserror::Error; +/// Error specific to any MySQL database backend. These errors are not related to the syncstorage +/// or tokenserver application logic; rather, they are lower-level errors arising from diesel. #[derive(Debug)] -pub struct DbError { - kind: DbErrorKind, +pub struct MysqlError { + kind: MysqlErrorKind, pub status: StatusCode, pub backtrace: Backtrace, } #[derive(Debug, Error)] -pub enum DbErrorKind { +enum MysqlErrorKind { #[error("A database error occurred: {}", _0)] DieselQuery(#[from] diesel::result::Error), #[error("An error occurred while establishing a db connection: {}", _0)] DieselConnection(#[from] diesel::result::ConnectionError), - #[error("A database error occurred: {}", _0)] - SpannerGrpc(#[from] grpcio::Error), - - #[error("Spanner data load too large: {}", _0)] - SpannerTooLarge(String), - #[error("A database pool error occurred: {}", _0)] Pool(diesel::r2d2::PoolError), #[error("Error migrating the database: {}", _0)] Migration(diesel_migrations::RunMigrationsError), - - #[error("Specified collection does not exist")] - CollectionNotFound, - - #[error("Specified bso does not exist")] - BsoNotFound, - - #[error("Specified batch does not exist")] - BatchNotFound, - - #[error("An attempt at a conflicting write")] - Conflict, - - #[error("Database integrity error: {}", _0)] - Integrity(String), - - #[error("Invalid database URL: {}", _0)] - InvalidUrl(String), - - #[error("Unexpected error: {}", _0)] - Internal(String), - - #[error("User over quota")] - Quota, - - #[error("Connection expired")] - Expired, } -impl DbError { - pub fn internal(msg: &str) -> Self { - DbErrorKind::Internal(msg.to_owned()).into() - } - - pub fn is_sentry_event(&self) -> bool { - !matches!(&self.kind, DbErrorKind::Conflict) - } - - pub fn metric_label(&self) -> Option { - match &self.kind { - DbErrorKind::Conflict => Some("storage.conflict".to_owned()), - _ => None, - } - } - - pub fn is_collection_not_found(&self) -> bool { - matches!(self.kind, DbErrorKind::CollectionNotFound) - } - - pub fn is_conflict(&self) -> bool { - matches!(self.kind, DbErrorKind::Conflict) - } - - pub fn is_quota(&self) -> bool { - matches!(self.kind, DbErrorKind::Quota) - } - - pub fn is_bso_not_found(&self) -> bool { - matches!(self.kind, DbErrorKind::BsoNotFound) - } - - pub fn is_batch_not_found(&self) -> bool { - matches!(self.kind, DbErrorKind::BatchNotFound) - } -} - -impl From for DbError { - fn from(kind: DbErrorKind) -> Self { - let status = match kind { - DbErrorKind::CollectionNotFound | DbErrorKind::BsoNotFound => StatusCode::NOT_FOUND, - // Matching the Python code here (a 400 vs 404) - DbErrorKind::BatchNotFound | DbErrorKind::SpannerTooLarge(_) => StatusCode::BAD_REQUEST, - // NOTE: the protocol specification states that we should return a - // "409 Conflict" response here, but clients currently do not - // handle these respones very well: - // * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034 - // * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032 - DbErrorKind::Conflict => StatusCode::SERVICE_UNAVAILABLE, - DbErrorKind::Quota => StatusCode::FORBIDDEN, - _ => StatusCode::INTERNAL_SERVER_ERROR, - }; - +impl From for MysqlError { + fn from(kind: MysqlErrorKind) -> Self { Self { kind, - status, + status: StatusCode::INTERNAL_SERVER_ERROR, backtrace: Backtrace::new(), } } } -impl_fmt_display!(DbError, DbErrorKind); +impl_fmt_display!(MysqlError, MysqlErrorKind); -from_error!(diesel::result::Error, DbError, DbErrorKind::DieselQuery); +from_error!( + diesel::result::Error, + MysqlError, + MysqlErrorKind::DieselQuery +); from_error!( diesel::result::ConnectionError, - DbError, - DbErrorKind::DieselConnection + MysqlError, + MysqlErrorKind::DieselConnection ); -from_error!(grpcio::Error, DbError, |inner: grpcio::Error| { - // Convert ABORTED (typically due to a transaction abort) into 503s - match inner { - grpcio::Error::RpcFailure(ref status) | grpcio::Error::RpcFinished(Some(ref status)) - if status.code() == grpcio::RpcStatusCode::ABORTED => - { - DbErrorKind::Conflict - } - _ => DbErrorKind::SpannerGrpc(inner), - } -}); -from_error!(diesel::r2d2::PoolError, DbError, DbErrorKind::Pool); +from_error!(diesel::r2d2::PoolError, MysqlError, MysqlErrorKind::Pool); from_error!( diesel_migrations::RunMigrationsError, - DbError, - DbErrorKind::Migration + MysqlError, + MysqlErrorKind::Migration ); - -impl InternalError for DbError { - fn internal_error(message: String) -> Self { - DbErrorKind::Internal(message).into() - } -} diff --git a/syncserver-db-common/src/lib.rs b/syncserver-db-common/src/lib.rs index a850777d..5e227376 100644 --- a/syncserver-db-common/src/lib.rs +++ b/syncserver-db-common/src/lib.rs @@ -1,78 +1,18 @@ pub mod error; -pub mod params; -pub mod results; -pub mod util; +pub mod test; use std::fmt::Debug; -use async_trait::async_trait; -use futures::future::{self, LocalBoxFuture, TryFutureExt}; -use lazy_static::lazy_static; -use serde::Deserialize; +use futures::future::LocalBoxFuture; -use error::DbError; -use util::SyncTimestamp; - -lazy_static! { - /// For efficiency, it's possible to use fixed pre-determined IDs for - /// common collection names. This is the canonical list of such - /// names. Non-standard collections will be allocated IDs starting - /// from the highest ID in this collection. - pub static ref STD_COLLS: Vec<(i32, &'static str)> = { - vec![ - (1, "clients"), - (2, "crypto"), - (3, "forms"), - (4, "history"), - (5, "keys"), - (6, "meta"), - (7, "bookmarks"), - (8, "prefs"), - (9, "tabs"), - (10, "passwords"), - (11, "addons"), - (12, "addresses"), - (13, "creditcards"), - ] - }; -} - -/// Rough guesstimate of the maximum reasonable life span of a batch -pub const BATCH_LIFETIME: i64 = 2 * 60 * 60 * 1000; // 2 hours, in milliseconds - -/// The ttl to use for rows that are never supposed to expire (in seconds) -pub const DEFAULT_BSO_TTL: u32 = 2_100_000_000; - -/// Non-standard collections will be allocated IDs beginning with this value -pub const FIRST_CUSTOM_COLLECTION_ID: i32 = 101; - -pub type DbFuture<'a, T> = LocalBoxFuture<'a, Result>; - -#[async_trait] -pub trait DbPool: Sync + Send + Debug + GetPoolState { - async fn get(&self) -> Result>, DbError>; - - fn validate_batch_id(&self, params: params::ValidateBatchId) -> Result<(), DbError>; - - fn box_clone(&self) -> Box; -} - -impl Clone for Box { - fn clone(&self) -> Box { - self.box_clone() - } -} +pub type DbFuture<'a, T, E> = LocalBoxFuture<'a, Result>; +/// A trait to be implemented by database pool data structures. It provides an interface to +/// derive the current state of the pool, as represented by the `PoolState` struct. pub trait GetPoolState { fn state(&self) -> PoolState; } -impl GetPoolState for Box { - fn state(&self) -> PoolState { - (**self).state() - } -} - #[derive(Debug, Default)] /// A mockable r2d2::State pub struct PoolState { @@ -97,212 +37,18 @@ impl From for PoolState { } } -pub trait Db<'a>: Debug + 'a { - fn lock_for_read(&self, params: params::LockCollection) -> DbFuture<'_, ()>; - - fn lock_for_write(&self, params: params::LockCollection) -> DbFuture<'_, ()>; - - fn begin(&self, for_write: bool) -> DbFuture<'_, ()>; - - fn commit(&self) -> DbFuture<'_, ()>; - - fn rollback(&self) -> DbFuture<'_, ()>; - - fn get_collection_timestamps( - &self, - params: params::GetCollectionTimestamps, - ) -> DbFuture<'_, results::GetCollectionTimestamps>; - - fn get_collection_timestamp( - &self, - params: params::GetCollectionTimestamp, - ) -> DbFuture<'_, results::GetCollectionTimestamp>; - - fn get_collection_counts( - &self, - params: params::GetCollectionCounts, - ) -> DbFuture<'_, results::GetCollectionCounts>; - - fn get_collection_usage( - &self, - params: params::GetCollectionUsage, - ) -> DbFuture<'_, results::GetCollectionUsage>; - - fn get_storage_timestamp( - &self, - params: params::GetStorageTimestamp, - ) -> DbFuture<'_, results::GetStorageTimestamp>; - - fn get_storage_usage( - &self, - params: params::GetStorageUsage, - ) -> DbFuture<'_, results::GetStorageUsage>; - - fn get_quota_usage( - &self, - params: params::GetQuotaUsage, - ) -> DbFuture<'_, results::GetQuotaUsage>; - - fn delete_storage(&self, params: params::DeleteStorage) - -> DbFuture<'_, results::DeleteStorage>; - - fn delete_collection( - &self, - params: params::DeleteCollection, - ) -> DbFuture<'_, results::DeleteCollection>; - - fn delete_bsos(&self, params: params::DeleteBsos) -> DbFuture<'_, results::DeleteBsos>; - - fn get_bsos(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsos>; - - fn get_bso_ids(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsoIds>; - - fn post_bsos(&self, params: params::PostBsos) -> DbFuture<'_, results::PostBsos>; - - fn delete_bso(&self, params: params::DeleteBso) -> DbFuture<'_, results::DeleteBso>; - - fn get_bso(&self, params: params::GetBso) -> DbFuture<'_, Option>; - - fn get_bso_timestamp( - &self, - params: params::GetBsoTimestamp, - ) -> DbFuture<'_, results::GetBsoTimestamp>; - - fn put_bso(&self, params: params::PutBso) -> DbFuture<'_, results::PutBso>; - - fn create_batch(&self, params: params::CreateBatch) -> DbFuture<'_, results::CreateBatch>; - - fn validate_batch(&self, params: params::ValidateBatch) - -> DbFuture<'_, results::ValidateBatch>; - - fn append_to_batch( - &self, - params: params::AppendToBatch, - ) -> DbFuture<'_, results::AppendToBatch>; - - fn get_batch(&self, params: params::GetBatch) -> DbFuture<'_, Option>; - - fn commit_batch(&self, params: params::CommitBatch) -> DbFuture<'_, results::CommitBatch>; - - fn box_clone(&self) -> Box>; - - fn check(&self) -> DbFuture<'_, results::Check>; - - fn get_connection_info(&self) -> results::ConnectionInfo; - - /// Retrieve the timestamp for an item/collection - /// - /// Modeled on the Python `get_resource_timestamp` function. - fn extract_resource( - &self, - user_id: UserIdentifier, - collection: Option, - bso: Option, - ) -> DbFuture<'_, SyncTimestamp> { - // If there's no collection, we return the overall storage timestamp - let collection = match collection { - Some(collection) => collection, - None => return Box::pin(self.get_storage_timestamp(user_id)), - }; - // If there's no bso, return the collection - let bso = match bso { - Some(bso) => bso, - None => { - return Box::pin( - self.get_collection_timestamp(params::GetCollectionTimestamp { - user_id, - collection, - }) - .or_else(|e| { - if e.is_collection_not_found() { - future::ok(SyncTimestamp::from_seconds(0f64)) - } else { - future::err(e) - } - }), - ) - } - }; - Box::pin( - self.get_bso_timestamp(params::GetBsoTimestamp { - user_id, - collection, - id: bso, - }) - .or_else(|e| { - if e.is_collection_not_found() { - future::ok(SyncTimestamp::from_seconds(0f64)) - } else { - future::err(e) - } - }), - ) - } - - /// Internal methods used by the db tests - - fn get_collection_id(&self, name: String) -> DbFuture<'_, i32>; - - fn create_collection(&self, name: String) -> DbFuture<'_, i32>; - - fn update_collection(&self, params: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp>; - - fn timestamp(&self) -> SyncTimestamp; - - fn set_timestamp(&self, timestamp: SyncTimestamp); - - fn delete_batch(&self, params: params::DeleteBatch) -> DbFuture<'_, ()>; - - fn clear_coll_cache(&self) -> DbFuture<'_, ()>; - - fn set_quota(&mut self, enabled: bool, limit: usize, enforce: bool); -} - -impl<'a> Clone for Box> { - fn clone(&self) -> Box> { - self.box_clone() - } -} - -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Copy)] -#[serde(rename_all = "lowercase")] -pub enum Sorting { - None, - Newest, - Oldest, - Index, -} - -impl Default for Sorting { - fn default() -> Self { - Sorting::None - } -} - -#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] -pub struct UserIdentifier { - /// For MySQL database backends as the primary key - pub legacy_id: u64, - /// For NoSQL database backends that require randomly distributed primary keys - pub fxa_uid: String, - pub fxa_kid: String, -} - -impl UserIdentifier { - /// Create a new legacy id user identifier - pub fn new_legacy(user_id: u64) -> Self { - Self { - legacy_id: user_id, - ..Default::default() +#[macro_export] +macro_rules! sync_db_method { + ($name:ident, $sync_name:ident, $type:ident) => { + sync_db_method!($name, $sync_name, $type, results::$type); + }; + ($name:ident, $sync_name:ident, $type:ident, $result:ty) => { + fn $name(&self, params: params::$type) -> DbFuture<'_, $result, DbError> { + let db = self.clone(); + Box::pin( + self.blocking_threadpool + .spawn(move || db.$sync_name(params)), + ) } - } -} - -impl From for UserIdentifier { - fn from(val: u32) -> Self { - Self { - legacy_id: val.into(), - ..Default::default() - } - } + }; } diff --git a/syncserver-db-common/src/test.rs b/syncserver-db-common/src/test.rs new file mode 100644 index 00000000..351888f3 --- /dev/null +++ b/syncserver-db-common/src/test.rs @@ -0,0 +1,14 @@ +use diesel::{ + mysql::MysqlConnection, + r2d2::{CustomizeConnection, Error as PoolError}, + Connection, +}; + +#[derive(Debug)] +pub struct TestTransactionCustomizer; + +impl CustomizeConnection for TestTransactionCustomizer { + fn on_acquire(&self, conn: &mut MysqlConnection) -> Result<(), PoolError> { + conn.begin_test_transaction().map_err(PoolError::QueryError) + } +} diff --git a/syncserver-settings/Cargo.toml b/syncserver-settings/Cargo.toml index 839e7bc6..45f12767 100644 --- a/syncserver-settings/Cargo.toml +++ b/syncserver-settings/Cargo.toml @@ -1,13 +1,16 @@ [package] name = "syncserver-settings" -version = "0.13.7" -edition = "2021" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true [dependencies] +serde.workspace=true +slog-scope.workspace=true + config = "0.11" num_cpus = "1" -serde = "1.0" -slog-scope = "4.3" syncserver-common = { path = "../syncserver-common" } syncstorage-settings = { path = "../syncstorage-settings" } tokenserver-settings = { path = "../tokenserver-settings" } diff --git a/syncserver-settings/src/lib.rs b/syncserver-settings/src/lib.rs index 9eb93331..f4fa887f 100644 --- a/syncserver-settings/src/lib.rs +++ b/syncserver-settings/src/lib.rs @@ -13,9 +13,9 @@ use syncstorage_settings::Settings as SyncstorageSettings; use tokenserver_settings::Settings as TokenserverSettings; use url::Url; -pub static PREFIX: &str = "sync"; +static PREFIX: &str = "sync"; -#[derive(Clone, Deserialize)] +#[derive(Clone, Debug, Deserialize)] #[serde(default)] pub struct Settings { pub port: u16, @@ -134,6 +134,7 @@ impl Settings { } } + #[cfg(debug_assertions)] pub fn test_settings() -> Self { let mut settings = Self::with_env_and_config_file(None).expect("Could not get Settings in test_settings"); diff --git a/syncserver/Cargo.toml b/syncserver/Cargo.toml index de269bc5..7a16c945 100644 --- a/syncserver/Cargo.toml +++ b/syncserver/Cargo.toml @@ -1,105 +1,70 @@ [package] name = "syncserver" -version = "0.13.7" -license = "MPL-2.0" -authors = [ - "Ben Bangert ", - "Phil Jenvey ", - "Mozilla Services Engineering ", -] -edition = "2018" default-run = "syncserver" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true [dependencies] +backtrace.workspace=true +base64.workspace=true +cadence.workspace=true +chrono.workspace=true +docopt.workspace=true +env_logger.workspace=true +futures.workspace=true +hex.workspace=true +lazy_static.workspace=true +rand.workspace=true +regex.workspace=true +sentry-backtrace.workspace=true +serde.workspace=true +serde_derive.workspace=true +serde_json.workspace=true +sha2.workspace=true +slog.workspace=true +slog-async.workspace=true +slog-envlogger.workspace=true +slog-mozlog-json.workspace=true +slog-scope.workspace=true +slog-stdlog.workspace=true +slog-term.workspace=true + actix-http = "2" actix-web = "3" -actix-rt = "1" # Pin to 1.0, due to dependencies on Tokio +actix-rt = "1" # Pin to 1.0, due to dependencies on Tokio actix-cors = "0.5" -actix-service = "1.0.6" async-trait = "0.1.40" -backtrace = "0.3.61" -base64 = "0.13" -bb8 = "0.4.1" # pin to 0.4 due to dependencies on Tokio -bytes = "1.0" -cadence = "0.26" -chrono = "0.4" -# Pin to 0.5 for now, to keep it under tokio 0.2 (issue977). -# Fix for #803 (deadpool#92) points to our fork for now -#deadpool = "0.5" # pin to 0.5 -deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" } -diesel = { version = "1.4", features = ["mysql", "r2d2"] } -diesel_logger = "0.1.1" -diesel_migrations = { version = "1.4.0", features = ["mysql"] } -docopt = "1.1.0" dyn-clone = "1.0.4" -env_logger = "0.9" -futures = { version = "0.3", features = ["compat"] } -google-cloud-rust-raw = "0.11.0" -# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause -# syncserver to either fail to either compile, or start. In those cases, try -# `cargo build --features grpcio/openssl ...` -grpcio = { version = "0.9" } -lazy_static = "1.4.0" -hawk = "3.2" -hex = "0.4.3" hostname = "0.3.1" +hawk = "3.2" hmac = "0.11" -http = "0.2.5" -log = { version = "0.4", features = [ - "max_level_debug", - "release_max_level_info", -] } mime = "0.3" -num_cpus = "1" -# must match what's used by googleapis-raw -protobuf = "2.20.0" -pyo3 = { version = "0.14", features = ["auto-initialize"] } -rand = "0.8" -regex = "1.4" reqwest = { version = "0.10.10", features = ["json", "rustls-tls"] } # pin to 0.19: https://github.com/getsentry/sentry-rust/issues/277 sentry = { version = "0.19", features = [ "with_curl_transport", ] } # pin to 0.19 until on-prem sentry server is updated -sentry-backtrace = "0.19" -serde = "1.0" -serde_derive = "1.0" -serde_json = { version = "1.0", features = ["arbitrary_precision"] } -scheduled-thread-pool = "0.2" -sha2 = "0.9" -slog = { version = "2.5", features = [ - "max_level_info", - "release_max_level_info", - "dynamic-keys", -] } -slog-async = "2.5" -slog-envlogger = "2.2.0" -slog-mozlog-json = "0.1" -slog-scope = "4.3" -slog-stdlog = "4.1" -slog-term = "2.6" -syncserver-settings = { path = "../syncserver-settings" } -syncserver-db-common = { path = "../syncserver-db-common" } syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-settings = { path = "../syncserver-settings" } +syncstorage-db = { path = "../syncstorage-db" } syncstorage-settings = { path = "../syncstorage-settings" } time = "^0.3" thiserror = "1.0.26" +tokenserver-auth = { path = "../tokenserver-auth" } tokenserver-common = { path = "../tokenserver-common" } +tokenserver-db = { path = "../tokenserver-db" } tokenserver-settings = { path = "../tokenserver-settings" } # pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) tokio = { version = "0.2.4", features = ["macros", "sync"] } -url = "2.1" urlencoding = "2.1" -uuid = { version = "0.8.2", features = ["serde", "v4"] } validator = "0.14" validator_derive = "0.14" woothee = "0.11" -[dev-dependencies] -mockito = "0.30.0" - [features] +default = ["syncstorage-db/mysql"] no_auth = [] - -[[bin]] -name = "purge_ttl" +spanner = ["syncstorage-db/spanner"] diff --git a/syncserver/src/db/mysql/mod.rs b/syncserver/src/db/mysql/mod.rs deleted file mode 100644 index 82ea3a9b..00000000 --- a/syncserver/src/db/mysql/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -#[macro_use] -mod batch; -mod diesel_ext; -pub mod models; -pub mod pool; -mod schema; -#[cfg(test)] -mod test; - -pub use self::pool::MysqlDbPool; -#[cfg(test)] -pub use self::test::TestTransactionCustomizer; diff --git a/syncserver/src/db/spanner/manager/mod.rs b/syncserver/src/db/spanner/manager/mod.rs deleted file mode 100644 index b1ee0932..00000000 --- a/syncserver/src/db/spanner/manager/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -// mod bb8; -mod deadpool; -mod session; - -pub use self::deadpool::{Conn, SpannerSessionManager}; -pub use self::session::SpannerSession; diff --git a/syncserver/src/db/spanner/mod.rs b/syncserver/src/db/spanner/mod.rs deleted file mode 100644 index a5962eb3..00000000 --- a/syncserver/src/db/spanner/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -use std::time::SystemTime; - -#[macro_use] -mod macros; - -mod batch; -pub mod manager; -pub mod models; -pub mod pool; -mod support; - -pub use self::pool::SpannerDbPool; - -pub fn now() -> i64 { - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_secs() as i64 -} diff --git a/syncserver/src/error.rs b/syncserver/src/error.rs index c81ea604..933b4de0 100644 --- a/syncserver/src/error.rs +++ b/syncserver/src/error.rs @@ -21,8 +21,9 @@ use serde::{ Serialize, }; -use syncserver_common::{from_error, impl_fmt_display, ReportableError}; -use syncserver_db_common::error::DbError; +use syncserver_common::{from_error, impl_fmt_display, MetricError, ReportableError}; +use syncstorage_db::{DbError, DbErrorIntrospect}; + use thiserror::Error; use crate::web::error::{HawkError, ValidationError}; @@ -57,7 +58,7 @@ pub const RETRY_AFTER: u8 = 10; #[derive(Debug)] pub struct ApiError { kind: ApiErrorKind, - pub(crate) backtrace: Backtrace, + pub(crate) backtrace: Box, status: StatusCode, } @@ -87,8 +88,8 @@ pub enum ApiErrorKind { impl ApiErrorKind { pub fn metric_label(&self) -> Option { match self { - ApiErrorKind::Db(err) => err.metric_label(), ApiErrorKind::Hawk(err) => err.metric_label(), + ApiErrorKind::Db(err) => err.metric_label(), ApiErrorKind::Validation(err) => err.metric_label(), _ => None, } @@ -96,6 +97,15 @@ impl ApiErrorKind { } impl ApiError { + pub fn is_sentry_event(&self) -> bool { + // Should we report this error to sentry? + self.status.is_server_error() + && match &self.kind { + ApiErrorKind::Db(dbe) => dbe.is_sentry_event(), + _ => self.kind.metric_label().is_none(), + } + } + fn weave_error_code(&self) -> WeaveError { match &self.kind { ApiErrorKind::Validation(ver) => ver.weave_error_code(), @@ -148,8 +158,8 @@ impl From for HttpResponse { } } -impl From for ApiError { - fn from(inner: cadence::MetricError) -> Self { +impl From for ApiError { + fn from(inner: MetricError) -> Self { ApiErrorKind::Internal(inner.to_string()).into() } } @@ -173,7 +183,7 @@ impl From for ApiError { Self { kind, - backtrace: Backtrace::new(), + backtrace: Box::new(Backtrace::new()), status, } } diff --git a/syncserver/src/lib.rs b/syncserver/src/lib.rs index 90284b15..ac5b979a 100644 --- a/syncserver/src/lib.rs +++ b/syncserver/src/lib.rs @@ -1,10 +1,6 @@ #![warn(rust_2018_idioms)] #![allow(clippy::try_err)] -#[macro_use] -extern crate diesel; -#[macro_use] -extern crate diesel_migrations; #[macro_use] extern crate slog_scope; #[macro_use] @@ -12,7 +8,6 @@ extern crate validator_derive; #[macro_use] pub mod error; -pub mod db; pub mod logging; pub mod server; pub mod tokenserver; diff --git a/syncserver/src/server/mod.rs b/syncserver/src/server/mod.rs index 7a0d80fd..0e7572a2 100644 --- a/syncserver/src/server/mod.rs +++ b/syncserver/src/server/mod.rs @@ -1,33 +1,27 @@ //! Main application server -use std::{ - env, fmt, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, - }, - time::Duration, -}; +use std::{env, sync::Arc, time::Duration}; use actix_cors::Cors; use actix_web::{ - dev, - error::BlockingError, + dev::{self, Payload}, http::StatusCode, http::{header::LOCATION, Method}, middleware::errhandlers::ErrorHandlers, - web, App, HttpRequest, HttpResponse, HttpServer, + web::{self, Data}, + App, FromRequest, HttpRequest, HttpResponse, HttpServer, }; use cadence::{Gauged, StatsdClient}; -use syncserver_common::InternalError; -use syncserver_db_common::{error::DbError, DbPool, GetPoolState, PoolState}; +use futures::future::{self, Ready}; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{GetPoolState, PoolState}; use syncserver_settings::Settings; +use syncstorage_db::{DbError, DbPool, DbPoolImpl}; use syncstorage_settings::{Deadman, ServerLimits}; use tokio::{sync::RwLock, time}; -use crate::db::pool_from_settings; use crate::error::ApiError; -use crate::server::metrics::Metrics; +use crate::server::tags::Taggable; use crate::tokenserver; use crate::web::{handlers, middleware}; @@ -38,7 +32,7 @@ pub const SYNC_DOCS_URL: &str = const MYSQL_UID_REGEX: &str = r"[0-9]{1,10}"; const SYNC_VERSION_PATH: &str = "1.5"; -pub mod metrics; +pub mod tags; #[cfg(test)] mod test; pub mod user_agent; @@ -46,7 +40,7 @@ pub mod user_agent; /// This is the global HTTP state object that will be made available to all /// HTTP API calls. pub struct ServerState { - pub db_pool: Box, + pub db_pool: Box>, /// Server-enforced limits for request payloads. pub limits: Arc, @@ -55,7 +49,7 @@ pub struct ServerState { pub limits_json: String, /// Metric reporting - pub metrics: Box, + pub metrics: Arc, pub port: u16, @@ -93,10 +87,10 @@ macro_rules! build_app { // These will wrap all outbound responses with matching status codes. .wrap(ErrorHandlers::new().handler(StatusCode::NOT_FOUND, ApiError::render_404)) // These are our wrappers - .wrap(middleware::weave::WeaveTimestamp::new()) - .wrap(tokenserver::logging::LoggingWrapper::new()) - .wrap(middleware::sentry::SentryWrapper::default()) - .wrap(middleware::rejectua::RejectUA::default()) + .wrap_fn(middleware::weave::set_weave_timestamp) + .wrap_fn(tokenserver::logging::handle_request_log_line) + .wrap_fn(middleware::sentry::report_error) + .wrap_fn(middleware::rejectua::reject_user_agent) .wrap($cors) .wrap_fn(middleware::emit_http_status_with_tokenserver_origin) .service( @@ -198,9 +192,9 @@ macro_rules! build_app_without_syncstorage { // These will wrap all outbound responses with matching status codes. .wrap(ErrorHandlers::new().handler(StatusCode::NOT_FOUND, ApiError::render_404)) // These are our wrappers - .wrap(middleware::sentry::SentryWrapper::default()) - .wrap(tokenserver::logging::LoggingWrapper::new()) - .wrap(middleware::rejectua::RejectUA::default()) + .wrap_fn(middleware::sentry::report_error) + .wrap_fn(tokenserver::logging::handle_request_log_line) + .wrap_fn(middleware::rejectua::reject_user_agent) // Followed by the "official middleware" so they run first. // actix is getting increasingly tighter about CORS headers. Our server is // not a huge risk but does deliver XHR JSON content. @@ -249,7 +243,7 @@ macro_rules! build_app_without_syncstorage { impl Server { pub async fn with_settings(settings: Settings) -> Result { let settings_copy = settings.clone(); - let metrics = metrics::metrics_from_opts( + let metrics = syncserver_common::metrics_from_opts( &settings.syncstorage.statsd_label, settings.statsd_host.as_deref(), settings.statsd_port, @@ -258,12 +252,11 @@ impl Server { let port = settings.port; let deadman = Arc::new(RwLock::new(Deadman::from(&settings.syncstorage))); let blocking_threadpool = Arc::new(BlockingThreadpool::default()); - let db_pool = pool_from_settings( + let db_pool = DbPoolImpl::new( &settings.syncstorage, &Metrics::from(&metrics), blocking_threadpool.clone(), - ) - .await?; + )?; let limits = Arc::new(settings.syncstorage.limits); let limits_json = serde_json::to_string(&*limits).expect("ServerLimits failed to serialize"); @@ -273,12 +266,12 @@ impl Server { let tokenserver_state = if settings.tokenserver.enabled { let state = tokenserver::ServerState::from_settings( &settings.tokenserver, - metrics::metrics_from_opts( + syncserver_common::metrics_from_opts( &settings.tokenserver.statsd_label, settings.statsd_host.as_deref(), settings.statsd_port, )?, - blocking_threadpool.clone(), + blocking_threadpool, )?; Some(state) @@ -290,7 +283,7 @@ impl Server { Duration::from_secs(10), metrics.clone(), db_pool.clone(), - blocking_threadpool.clone(), + blocking_threadpool, )?; None @@ -298,10 +291,10 @@ impl Server { let mut server = HttpServer::new(move || { let syncstorage_state = ServerState { - db_pool: db_pool.clone(), + db_pool: Box::new(db_pool.clone()), limits: Arc::clone(&limits), limits_json: limits_json.clone(), - metrics: Box::new(metrics.clone()), + metrics: metrics.clone(), port, quota_enabled, deadman: Arc::clone(&deadman), @@ -337,7 +330,7 @@ impl Server { let blocking_threadpool = Arc::new(BlockingThreadpool::default()); let tokenserver_state = tokenserver::ServerState::from_settings( &settings.tokenserver, - metrics::metrics_from_opts( + syncserver_common::metrics_from_opts( &settings.tokenserver.statsd_label, settings.statsd_host.as_deref(), settings.statsd_port, @@ -347,7 +340,7 @@ impl Server { spawn_metric_periodic_reporter( Duration::from_secs(10), - *tokenserver_state.metrics.clone(), + tokenserver_state.metrics.clone(), tokenserver_state.db_pool.clone(), blocking_threadpool, )?; @@ -405,10 +398,41 @@ fn build_cors(settings: &Settings) -> Cors { cors } +pub struct MetricsWrapper(pub Metrics); + +impl FromRequest for MetricsWrapper { + type Config = (); + type Error = (); + type Future = Ready>; + + fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { + let client = { + let syncstorage_metrics = req + .app_data::>() + .map(|state| state.metrics.clone()); + let tokenserver_metrics = req + .app_data::>() + .map(|state| state.metrics.clone()); + + syncstorage_metrics.or(tokenserver_metrics) + }; + + if client.is_none() { + warn!("⚠️ metric error: No App State"); + } + + future::ok(MetricsWrapper(Metrics { + client, + tags: req.get_tags(), + timer: None, + })) + } +} + /// Emit database pool and threadpool metrics periodically fn spawn_metric_periodic_reporter( interval: Duration, - metrics: StatsdClient, + metrics: Arc, pool: T, blocking_threadpool: Arc, ) -> Result<(), DbError> { @@ -453,43 +477,3 @@ fn spawn_metric_periodic_reporter( Ok(()) } - -/// A threadpool on which callers can spawn non-CPU-bound tasks that block their thread (this is -/// mostly useful for running I/O tasks). `BlockingThreadpool` intentionally does not implement -/// `Clone`: `Arc`s are not used internally, so a `BlockingThreadpool` should be instantiated once -/// and shared by passing around `Arc`s. -#[derive(Debug, Default)] -pub struct BlockingThreadpool { - spawned_tasks: AtomicU64, -} - -impl BlockingThreadpool { - /// Runs a function as a task on the blocking threadpool. - /// - /// WARNING: Spawning a blocking task through means other than calling this method will - /// result in inaccurate threadpool metrics being reported. If you want to spawn a task on - /// the blocking threadpool, you **must** use this function. - pub async fn spawn(&self, f: F) -> Result - where - F: FnOnce() -> Result + Send + 'static, - T: Send + 'static, - E: fmt::Debug + Send + InternalError + 'static, - { - self.spawned_tasks.fetch_add(1, Ordering::Relaxed); - - let result = web::block(f).await.map_err(|e| match e { - BlockingError::Error(e) => e, - BlockingError::Canceled => { - E::internal_error("Blocking threadpool operation canceled".to_owned()) - } - }); - - self.spawned_tasks.fetch_sub(1, Ordering::Relaxed); - - result - } - - fn active_threads(&self) -> u64 { - self.spawned_tasks.load(Ordering::Relaxed) - } -} diff --git a/syncserver/src/web/tags.rs b/syncserver/src/server/tags.rs similarity index 89% rename from syncserver/src/web/tags.rs rename to syncserver/src/server/tags.rs index db6f6ab2..933644c9 100644 --- a/syncserver/src/web/tags.rs +++ b/syncserver/src/server/tags.rs @@ -1,4 +1,4 @@ -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; use actix_web::HttpMessage; @@ -78,14 +78,3 @@ struct Tags(HashMap); // "Extras" are pieces of metadata with high cardinality to be included in Sentry errors. #[derive(Default)] struct Extras(HashMap); - -impl From for BTreeMap { - fn from(tags: Tags) -> BTreeMap { - let mut result = BTreeMap::new(); - - for (k, v) in tags.0 { - result.insert(k.clone(), v.clone()); - } - result - } -} diff --git a/syncserver/src/server/test.rs b/syncserver/src/server/test.rs index 6378e586..fb49c884 100644 --- a/syncserver/src/server/test.rs +++ b/syncserver/src/server/test.rs @@ -7,6 +7,7 @@ use actix_web::{ test, web::Bytes, }; +use base64::{engine, Engine}; use chrono::offset::Utc; use hawk::{self, Credentials, Key, RequestBuilder}; use hmac::{Hmac, Mac, NewMac}; @@ -16,17 +17,16 @@ use serde::de::DeserializeOwned; use serde_json::json; use sha2::Sha256; use syncserver_common::{self, X_LAST_MODIFIED}; -use syncserver_db_common::{ +use syncserver_settings::{Secrets, Settings}; +use syncstorage_db::{ params, results::{DeleteBso, GetBso, PostBsos, PutBso}, - util::SyncTimestamp, + DbPoolImpl, SyncTimestamp, }; -use syncserver_settings::{Secrets, Settings}; use syncstorage_settings::ServerLimits; use super::*; use crate::build_app; -use crate::db::pool_from_settings; use crate::tokenserver; use crate::web::{auth::HawkPayload, extractors::BsoBody}; @@ -65,20 +65,21 @@ fn get_test_settings() -> Settings { } async fn get_test_state(settings: &Settings) -> ServerState { - let metrics = Metrics::sink(); + let metrics = Arc::new(Metrics::sink()); let blocking_threadpool = Arc::new(BlockingThreadpool::default()); ServerState { - db_pool: pool_from_settings( - &settings.syncstorage, - &Metrics::from(&metrics), - blocking_threadpool.clone(), - ) - .await - .expect("Could not get db_pool in get_test_state"), + db_pool: Box::new( + DbPoolImpl::new( + &settings.syncstorage, + &Metrics::from(&metrics), + blocking_threadpool, + ) + .expect("Could not get db_pool in get_test_state"), + ), limits: Arc::clone(&SERVER_LIMITS), limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(), - metrics: Box::new(metrics), + metrics, port: settings.port, quota_enabled: settings.syncstorage.enable_quota, deadman: Arc::new(RwLock::new(Deadman::from(&settings.syncstorage))), @@ -165,14 +166,14 @@ fn create_hawk_header(method: &str, port: u16, path: &str) -> String { let mut id: Vec = vec![]; id.extend(payload.as_bytes()); id.extend_from_slice(&signature); - let id = base64::encode_config(&id, base64::URL_SAFE); + let id = engine::general_purpose::URL_SAFE.encode(&id); let token_secret = syncserver_common::hkdf_expand_32( format!("services.mozilla.com/tokenlib/v1/derive/{}", id).as_bytes(), Some(b"wibble"), &SECRETS.master_secret, ) .expect("hkdf_expand_32 failed in create_hawk_header"); - let token_secret = base64::encode_config(token_secret, base64::URL_SAFE); + let token_secret = engine::general_purpose::URL_SAFE.encode(token_secret); let request = RequestBuilder::new(method, host, port, path).request(); let credentials = Credentials { id, diff --git a/syncserver/src/tokenserver/db/mod.rs b/syncserver/src/tokenserver/db/mod.rs deleted file mode 100644 index c068100c..00000000 --- a/syncserver/src/tokenserver/db/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod mock; -pub mod models; -pub mod params; -pub mod pool; -pub mod results; diff --git a/syncserver/src/tokenserver/extractors.rs b/syncserver/src/tokenserver/extractors.rs index 6118da19..f1672f55 100644 --- a/syncserver/src/tokenserver/extractors.rs +++ b/syncserver/src/tokenserver/extractors.rs @@ -13,6 +13,7 @@ use actix_web::{ web::{Data, Query}, FromRequest, HttpRequest, }; +use base64::{engine, Engine}; use futures::future::LocalBoxFuture; use hex; use hmac::{Hmac, Mac, NewMac}; @@ -21,17 +22,11 @@ use regex::Regex; use serde::Deserialize; use sha2::Sha256; use syncserver_settings::Secrets; -use tokenserver_common::{ - error::{ErrorLocation, TokenserverError}, - NodeType, -}; +use tokenserver_common::{ErrorLocation, NodeType, TokenserverError}; +use tokenserver_db::{params, results, Db, DbPool}; -use super::{ - db::{models::Db, params, pool::DbPool, results}, - LogItemsMutator, ServerState, TokenserverMetrics, -}; -use crate::server::metrics::Metrics; -use crate::web::tags::Taggable; +use super::{LogItemsMutator, ServerState, TokenserverMetrics}; +use crate::server::{tags::Taggable, MetricsWrapper}; lazy_static! { static ref CLIENT_STATE_REGEX: Regex = Regex::new("^[a-zA-Z0-9._-]{1,32}$").unwrap(); @@ -218,7 +213,7 @@ impl FromRequest for TokenserverRequest { hash_device_id(&hashed_fxa_uid, device_id, fxa_metrics_hash_secret) }; - let db = >::extract(&req).await?; + let DbWrapper(db) = DbWrapper::extract(&req).await?; let service_id = { let path = req.match_info(); @@ -312,7 +307,10 @@ struct QueryParams { pub duration: Option, } -impl FromRequest for Box { +/// A local "newtype" that wraps `Box` so we can implement `FromRequest`. +pub struct DbWrapper(pub Box); + +impl FromRequest for DbWrapper { type Config = (); type Error = TokenserverError; type Future = LocalBoxFuture<'static, Result>; @@ -321,10 +319,12 @@ impl FromRequest for Box { let req = req.clone(); Box::pin(async move { - >::extract(&req) + DbPoolWrapper::extract(&req) .await? + .0 .get() .await + .map(Self) .map_err(|e| TokenserverError { context: format!("Couldn't acquire a database connection: {}", e), ..TokenserverError::internal_error() @@ -333,7 +333,9 @@ impl FromRequest for Box { } } -impl FromRequest for Box { +struct DbPoolWrapper(Box); + +impl FromRequest for DbPoolWrapper { type Config = (); type Error = TokenserverError; type Future = LocalBoxFuture<'static, Result>; @@ -344,7 +346,7 @@ impl FromRequest for Box { Box::pin(async move { let state = get_server_state(&req)?.as_ref(); - Ok(state.db_pool.clone()) + Ok(Self(state.db_pool.clone())) }) } } @@ -592,15 +594,15 @@ impl FromRequest for KeyId { // encoded as URL-safe base64 with the padding removed. We convert it to hex // because we store the client state as hex in the database. let client_state_hex = { - let bytes = - base64::decode_config(encoded_client_state, base64::URL_SAFE_NO_PAD) - .map_err(|e| TokenserverError { - context: format!( - "Failed to decode client state base64 in X-KeyID: {}", - e - ), - ..TokenserverError::invalid_credentials("Unauthorized".to_owned()) - })?; + let bytes = engine::general_purpose::URL_SAFE_NO_PAD + .decode(encoded_client_state) + .map_err(|e| TokenserverError { + context: format!( + "Failed to decode client state base64 in X-KeyID: {}", + e + ), + ..TokenserverError::invalid_credentials("Unauthorized".to_owned()) + })?; hex::encode(bytes) }; @@ -648,8 +650,12 @@ impl FromRequest for TokenserverMetrics { fn from_request(req: &HttpRequest, _payload: &mut Payload) -> Self::Future { let req = req.clone(); - // `Result::unwrap` is safe to use here, since Metrics::extract can never fail - Box::pin(async move { Ok(TokenserverMetrics(Metrics::extract(&req).await.unwrap())) }) + // `Result::unwrap` is safe to use here, since MetricsWrapper::extract can never fail + Box::pin(async move { + Ok(TokenserverMetrics( + MetricsWrapper::extract(&req).await.unwrap().0, + )) + }) } } @@ -706,14 +712,11 @@ mod tests { use serde_json; use syncserver_settings::Settings as GlobalSettings; use syncstorage_settings::ServerLimits; + use tokenserver_auth::{browserid, oauth, MockVerifier}; + use tokenserver_db::mock::MockDbPool as MockTokenserverPool; use tokenserver_settings::Settings as TokenserverSettings; - use crate::server::metrics; - use crate::tokenserver::{ - auth::{browserid, oauth, MockVerifier}, - db::mock::MockDbPool as MockTokenserverPool, - ServerState, - }; + use crate::tokenserver::ServerState; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; @@ -1338,14 +1341,12 @@ mod tests { db_pool: Box::new(MockTokenserverPool::new()), node_capacity_release_rate: None, node_type: NodeType::default(), - metrics: Box::new( - metrics::metrics_from_opts( - &tokenserver_settings.statsd_label, - syncserver_settings.statsd_host.as_deref(), - syncserver_settings.statsd_port, - ) - .unwrap(), - ), + metrics: syncserver_common::metrics_from_opts( + &tokenserver_settings.statsd_label, + syncserver_settings.statsd_host.as_deref(), + syncserver_settings.statsd_port, + ) + .unwrap(), token_duration: TOKEN_DURATION, } } diff --git a/syncserver/src/tokenserver/handlers.rs b/syncserver/src/tokenserver/handlers.rs index 855efa51..2c1610db 100644 --- a/syncserver/src/tokenserver/handlers.rs +++ b/syncserver/src/tokenserver/handlers.rs @@ -4,17 +4,18 @@ use std::{ }; use actix_web::{http::StatusCode, Error, HttpResponse}; +use base64::{engine, Engine}; use serde::Serialize; use serde_json::Value; -use tokenserver_common::{error::TokenserverError, NodeType}; +use tokenserver_auth::{MakeTokenPlaintext, Tokenlib, TokenserverOrigin}; +use tokenserver_common::{NodeType, TokenserverError}; +use tokenserver_db::{ + params::{GetNodeId, PostUser, PutUser, ReplaceUsers}, + Db, +}; use super::{ - auth::{MakeTokenPlaintext, Tokenlib, TokenserverOrigin}, - db::{ - models::Db, - params::{GetNodeId, PostUser, PutUser, ReplaceUsers}, - }, - extractors::TokenserverRequest, + extractors::{DbWrapper, TokenserverRequest}, TokenserverMetrics, }; @@ -32,7 +33,7 @@ pub struct TokenserverResult { pub async fn get_tokenserver_result( req: TokenserverRequest, - db: Box, + DbWrapper(db): DbWrapper, TokenserverMetrics(mut metrics): TokenserverMetrics, ) -> Result { let updates = update_user(&req, db).await?; @@ -83,7 +84,7 @@ fn get_token_plaintext( context: format!("Failed to decode the client state hex: {}", e), ..TokenserverError::internal_error() })?; - let client_state_b64 = base64::encode_config(&client_state, base64::URL_SAFE_NO_PAD); + let client_state_b64 = engine::general_purpose::URL_SAFE_NO_PAD.encode(client_state); format!( "{:013}-{:}", @@ -242,7 +243,7 @@ async fn update_user( } } -pub async fn heartbeat(db: Box) -> Result { +pub async fn heartbeat(DbWrapper(db): DbWrapper) -> Result { let mut checklist = HashMap::new(); checklist.insert( "version".to_owned(), diff --git a/syncserver/src/tokenserver/logging.rs b/syncserver/src/tokenserver/logging.rs index c7006c7d..577f0b55 100644 --- a/syncserver/src/tokenserver/logging.rs +++ b/syncserver/src/tokenserver/logging.rs @@ -1,74 +1,30 @@ -use std::task::Context; -use std::{cell::RefCell, rc::Rc}; - use actix_web::{ - dev::{Service, ServiceRequest, ServiceResponse, Transform}, - Error, HttpMessage, + dev::{Service, ServiceRequest, ServiceResponse}, + HttpMessage, }; -use futures::future::{self, LocalBoxFuture, TryFutureExt}; -use std::task::Poll; +use futures::future::Future; use super::LogItems; -#[derive(Default)] -pub struct LoggingWrapper; +pub fn handle_request_log_line( + request: ServiceRequest, + service: &mut impl Service< + Request = ServiceRequest, + Response = ServiceResponse, + Error = actix_web::Error, + >, +) -> impl Future> { + let items = LogItems::from(request.head()); + request.extensions_mut().insert(items); + let fut = service.call(request); -impl LoggingWrapper { - pub fn new() -> Self { - LoggingWrapper::default() - } -} - -impl Transform for LoggingWrapper -where - S: Service, Error = Error> + 'static, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type InitError = (); - type Transform = LoggingWrapperMiddleware; - type Future = LocalBoxFuture<'static, Result>; - - fn new_transform(&self, service: S) -> Self::Future { - Box::pin(future::ok(LoggingWrapperMiddleware { - service: Rc::new(RefCell::new(service)), - })) - } -} - -#[derive(Debug)] -pub struct LoggingWrapperMiddleware { - service: Rc>, -} - -impl Service for LoggingWrapperMiddleware -where - S: Service, Error = Error> + 'static, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type Future = LocalBoxFuture<'static, Result>; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.service.poll_ready(cx) - } - - fn call(&mut self, sreq: ServiceRequest) -> Self::Future { - let items = LogItems::from(sreq.head()); - sreq.extensions_mut().insert(items); - - Box::pin(self.service.call(sreq).and_then(move |sresp| { - if let Some(items) = sresp.request().extensions().get::() { - info!("{}", items); - } - - future::ok(sresp) - })) - } + Box::pin(async move { + let sresp = fut.await?; + + if let Some(items) = sresp.request().extensions().get::() { + info!("{}", items); + } + + Ok(sresp) + }) } diff --git a/syncserver/src/tokenserver/mod.rs b/syncserver/src/tokenserver/mod.rs index 52cb27fd..f204cec6 100644 --- a/syncserver/src/tokenserver/mod.rs +++ b/syncserver/src/tokenserver/mod.rs @@ -1,5 +1,3 @@ -pub mod auth; -pub mod db; pub mod extractors; pub mod handlers; pub mod logging; @@ -10,17 +8,15 @@ use serde::{ ser::{SerializeMap, Serializer}, Serialize, }; +use syncserver_common::{BlockingThreadpool, Metrics}; +use tokenserver_auth::{browserid, oauth, VerifyToken}; use tokenserver_common::NodeType; +use tokenserver_db::{params, DbPool, TokenserverPool}; use tokenserver_settings::Settings; use crate::{ - error::ApiError, - server::{metrics::Metrics, user_agent, BlockingThreadpool}, -}; -use auth::{browserid, oauth, VerifyToken}; -use db::{ - params, - pool::{DbPool, TokenserverPool}, + error::{ApiError, ApiErrorKind}, + server::user_agent, }; use std::{collections::HashMap, convert::TryFrom, fmt, sync::Arc}; @@ -34,14 +30,14 @@ pub struct ServerState { pub browserid_verifier: Box>, pub node_capacity_release_rate: Option, pub node_type: NodeType, - pub metrics: Box, + pub metrics: Arc, pub token_duration: u64, } impl ServerState { pub fn from_settings( settings: &Settings, - metrics: StatsdClient, + metrics: Arc, blocking_threadpool: Arc, ) -> Result { let oauth_verifier = Box::new( @@ -49,7 +45,7 @@ impl ServerState { .expect("failed to create Tokenserver OAuth verifier"), ); let browserid_verifier = Box::new( - browserid::RemoteVerifier::try_from(settings) + browserid::Verifier::try_from(settings) .expect("failed to create Tokenserver BrowserID verifier"), ); let use_test_transactions = false; @@ -82,11 +78,11 @@ impl ServerState { db_pool: Box::new(db_pool), node_capacity_release_rate: settings.node_capacity_release_rate, node_type: settings.node_type, - metrics: Box::new(metrics), + metrics, token_duration: settings.token_duration, } }) - .map_err(Into::into) + .map_err(|_| ApiErrorKind::Internal("Failed to create Tokenserver pool".to_owned()).into()) } } diff --git a/syncserver/src/web/auth.rs b/syncserver/src/web/auth.rs index 958698ad..09c3f920 100644 --- a/syncserver/src/web/auth.rs +++ b/syncserver/src/web/auth.rs @@ -8,6 +8,7 @@ use std::convert::TryInto; +use base64::{engine, Engine}; use chrono::offset::Utc; use hawk::{self, Header as HawkHeader, Key, RequestBuilder}; use hmac::{Hmac, Mac, NewMac}; @@ -16,6 +17,7 @@ use sha2::Sha256; use syncserver_common; use syncserver_settings::Secrets; use time::Duration; +use tokenserver_auth::TokenserverOrigin; use actix_web::dev::ConnectionInfo; use actix_web::http::Uri; @@ -26,7 +28,6 @@ use super::{ }; use crate::error::{ApiErrorKind, ApiResult}; use crate::label; -use crate::tokenserver::auth::TokenserverOrigin; /// A parsed and authenticated JSON payload /// extracted from the signed `id` property @@ -90,7 +91,7 @@ impl HawkPayload { &secrets.master_secret, ) .map_err(|e| ApiErrorKind::Internal(format!("HKDF Error: {:?}", e)))?; - let token_secret = base64::encode_config(token_secret, base64::URL_SAFE); + let token_secret = engine::general_purpose::URL_SAFE.encode(token_secret); let request = RequestBuilder::new(method, host, port, path).request(); @@ -125,7 +126,7 @@ impl HawkPayload { /// Decode the `id` property of a Hawk header /// and verify the payload part against the signature part. fn extract_and_validate(id: &str, secrets: &Secrets, expiry: u64) -> ApiResult { - let decoded_id = base64::decode_config(id, base64::URL_SAFE)?; + let decoded_id = engine::general_purpose::URL_SAFE.decode(id)?; if decoded_id.len() <= 32 { Err(HawkErrorKind::TruncatedId)?; } diff --git a/syncserver/src/web/extractors.rs b/syncserver/src/web/extractors.rs index 5bb2d2c7..632e0813 100644 --- a/syncserver/src/web/extractors.rs +++ b/syncserver/src/web/extractors.rs @@ -15,7 +15,6 @@ use actix_web::{ web::{Data, Json, Query}, Error, FromRequest, HttpMessage, HttpRequest, }; - use futures::future::{self, FutureExt, LocalBoxFuture, Ready, TryFutureExt}; use syncserver_settings::Secrets; @@ -27,23 +26,23 @@ use serde::{ Deserialize, Serialize, }; use serde_json::Value; -use syncserver_common::X_WEAVE_RECORDS; -use syncserver_db_common::{ +use syncserver_common::{Metrics, X_WEAVE_RECORDS}; +use syncstorage_db::{ params::{self, PostCollectionBso}, - util::SyncTimestamp, - DbPool, Sorting, UserIdentifier, + DbError, DbPool, Sorting, SyncTimestamp, UserIdentifier, }; +use tokenserver_auth::TokenserverOrigin; use validator::{Validate, ValidationError}; -use crate::db::transaction::DbTransactionPool; use crate::error::{ApiError, ApiErrorKind}; use crate::label; -use crate::server::{metrics, ServerState, BSO_ID_REGEX, COLLECTION_ID_REGEX}; -use crate::tokenserver::auth::TokenserverOrigin; +use crate::server::{ + tags::Taggable, MetricsWrapper, ServerState, BSO_ID_REGEX, COLLECTION_ID_REGEX, +}; use crate::web::{ auth::HawkPayload, error::{HawkErrorKind, ValidationErrorKind}, - tags::Taggable, + transaction::DbTransactionPool, DOCKER_FLOW_ENDPOINTS, }; const BATCH_MAX_IDS: usize = 100; @@ -408,7 +407,6 @@ impl FromRequest for BsoBody { ) .into()); } - let state = match req.app_data::>() { Some(s) => s, None => { @@ -637,7 +635,7 @@ impl FromRequest for CollectionParam { pub struct MetaRequest { pub user_id: UserIdentifier, pub tokenserver_origin: TokenserverOrigin, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for MetaRequest { @@ -655,7 +653,7 @@ impl FromRequest for MetaRequest { Ok(MetaRequest { tokenserver_origin: user_id.tokenserver_origin, user_id: user_id.into(), - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, }) } .boxed_local() @@ -678,7 +676,7 @@ pub struct CollectionRequest { pub tokenserver_origin: TokenserverOrigin, pub query: BsoQueryParams, pub reply: ReplyFormat, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for CollectionRequest { @@ -719,7 +717,7 @@ impl FromRequest for CollectionRequest { user_id: user_id.into(), query, reply, - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, }) } .boxed_local() @@ -738,7 +736,7 @@ pub struct CollectionPostRequest { pub query: BsoQueryParams, pub bsos: BsoBodies, pub batch: Option, - pub metrics: metrics::Metrics, + pub metrics: Metrics, pub quota_enabled: bool, } @@ -817,7 +815,7 @@ impl FromRequest for CollectionPostRequest { query, bsos, batch: batch.opt, - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, quota_enabled: state.quota_enabled, }) }) @@ -834,7 +832,7 @@ pub struct BsoRequest { pub tokenserver_origin: TokenserverOrigin, pub query: BsoQueryParams, pub bso: String, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for BsoRequest { @@ -860,7 +858,7 @@ impl FromRequest for BsoRequest { user_id: user_id.into(), query, bso: bso.bso, - metrics: metrics::Metrics::extract(&req).await?, + metrics: MetricsWrapper::extract(&req).await?.0, }) }) } @@ -876,7 +874,7 @@ pub struct BsoPutRequest { pub query: BsoQueryParams, pub bso: String, pub body: BsoBody, - pub metrics: metrics::Metrics, + pub metrics: Metrics, } impl FromRequest for BsoPutRequest { @@ -889,7 +887,7 @@ impl FromRequest for BsoPutRequest { let mut payload = payload.take(); async move { - let metrics = metrics::Metrics::extract(&req).await?; + let metrics = MetricsWrapper::extract(&req).await?.0; let (user_id, collection, query, bso, body) = <( HawkIdentifier, @@ -938,7 +936,7 @@ pub struct QuotaInfo { #[derive(Clone, Debug)] pub struct HeartbeatRequest { pub headers: HeaderMap, - pub db_pool: Box, + pub db_pool: Box>, pub quota: QuotaInfo, } @@ -1733,6 +1731,7 @@ impl_emit_api_metric!(BsoPutRequest); #[cfg(test)] mod tests { use actix_http::h1; + use base64::{engine, Engine}; use futures::executor::block_on; use super::*; @@ -1755,13 +1754,12 @@ mod tests { use serde_json::{self, json}; use sha2::Sha256; use syncserver_common; - use syncserver_db_common::Db; use syncserver_settings::Settings as GlobalSettings; use syncstorage_settings::{Deadman, ServerLimits, Settings as SyncstorageSettings}; use tokio::sync::RwLock; - use crate::db::mock::{MockDb, MockDbPool}; - use crate::server::{metrics, ServerState}; + use crate::server::ServerState; + use syncstorage_db::mock::{MockDb, MockDbPool}; use crate::web::auth::HawkPayload; @@ -1779,8 +1777,8 @@ mod tests { const INVALID_BSO_NAME: &str = "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"; - fn make_db() -> Box> { - Box::new(MockDb::new()) + fn make_db() -> MockDb { + MockDb::new() } fn make_state() -> ServerState { @@ -1791,14 +1789,12 @@ mod tests { limits: Arc::clone(&SERVER_LIMITS), limits_json: serde_json::to_string(&**SERVER_LIMITS).unwrap(), port: 8000, - metrics: Box::new( - metrics::metrics_from_opts( - &syncstorage_settings.statsd_label, - syncserver_settings.statsd_host.as_deref(), - syncserver_settings.statsd_port, - ) - .unwrap(), - ), + metrics: syncserver_common::metrics_from_opts( + &syncstorage_settings.statsd_label, + syncserver_settings.statsd_host.as_deref(), + syncserver_settings.statsd_port, + ) + .unwrap(), quota_enabled: syncstorage_settings.enable_quota, deadman: Arc::new(RwLock::new(Deadman::default())), } @@ -1823,14 +1819,14 @@ mod tests { let payload_hash = hmac.finalize().into_bytes(); let mut id = payload.as_bytes().to_vec(); id.extend(payload_hash.to_vec()); - let id = base64::encode_config(&id, base64::URL_SAFE); + let id = engine::general_purpose::URL_SAFE.encode(&id); let token_secret = syncserver_common::hkdf_expand_32( format!("services.mozilla.com/tokenlib/v1/derive/{}", id).as_bytes(), Some(salt.as_bytes()), &SECRETS.master_secret, ) .unwrap(); - let token_secret = base64::encode_config(token_secret, base64::URL_SAFE); + let token_secret = engine::general_purpose::URL_SAFE.encode(token_secret); let credentials = Credentials { id, key: Key::new(token_secret.as_bytes(), hawk::DigestAlgorithm::Sha256).unwrap(), diff --git a/syncserver/src/web/handlers.rs b/syncserver/src/web/handlers.rs index 762f5bca..7af9432e 100644 --- a/syncserver/src/web/handlers.rs +++ b/syncserver/src/web/handlers.rs @@ -6,21 +6,22 @@ use actix_web::{dev::HttpResponseBuilder, http::StatusCode, web::Data, HttpReque use serde::Serialize; use serde_json::{json, Value}; use syncserver_common::{X_LAST_MODIFIED, X_WEAVE_NEXT_OFFSET, X_WEAVE_RECORDS}; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, +use syncstorage_db::{ params, results::{CreateBatch, Paginated}, - Db, + Db, DbError, DbErrorIntrospect, }; use time; use crate::{ - db::transaction::DbTransactionPool, error::{ApiError, ApiErrorKind}, server::ServerState, - web::extractors::{ - BsoPutRequest, BsoRequest, CollectionPostRequest, CollectionRequest, EmitApiMetric, - HeartbeatRequest, MetaRequest, ReplyFormat, TestErrorRequest, + web::{ + extractors::{ + BsoPutRequest, BsoRequest, CollectionPostRequest, CollectionRequest, EmitApiMetric, + HeartbeatRequest, MetaRequest, ReplyFormat, TestErrorRequest, + }, + transaction::DbTransactionPool, }, }; @@ -189,7 +190,7 @@ pub async fn get_collection( async fn finish_get_collection( coll: &CollectionRequest, - db: Box + '_>, + db: Box>, result: Result, DbError>, ) -> Result where @@ -280,15 +281,16 @@ pub async fn post_collection( // the entire, accumulated if the `commit` flag is set. pub async fn post_collection_batch( coll: CollectionPostRequest, - db: Box + '_>, + db: Box>, ) -> Result { coll.emit_api_metric("request.post_collection_batch"); trace!("Batch: Post collection batch"); // Bail early if we have nonsensical arguments // TODO: issue932 may make these multi-level transforms easier - let breq = coll.batch.clone().ok_or_else(|| -> ApiError { - ApiErrorKind::Db(DbErrorKind::BatchNotFound.into()).into() - })?; + let breq = coll + .batch + .clone() + .ok_or_else(|| -> ApiError { ApiErrorKind::Db(DbError::batch_not_found()).into() })?; let new_batch = if let Some(id) = breq.id.clone() { trace!("Batch: Validating {}", &id); @@ -313,14 +315,13 @@ pub async fn post_collection_batch( CreateBatch { id: id.clone(), size: if coll.quota_enabled { - Some(usage.total_bytes as usize) + Some(usage.total_bytes) } else { None }, } } else { - let err: DbError = DbErrorKind::BatchNotFound.into(); - return Err(ApiError::from(err)); + return Err(ApiErrorKind::Db(DbError::batch_not_found()).into()); } } else { trace!("Batch: Creating new batch"); @@ -405,8 +406,7 @@ pub async fn post_collection_batch( }) .await? } else { - let err: DbError = DbErrorKind::BatchNotFound.into(); - return Err(ApiError::from(err)); + return Err(ApiErrorKind::Db(DbError::batch_not_found()).into()); }; // Then, write the BSOs contained in the commit request into the BSO table. @@ -594,7 +594,7 @@ pub async fn lbheartbeat(req: HttpRequest) -> Result { let db_state = if cfg!(test) { use actix_web::http::header::HeaderValue; use std::str::FromStr; - use syncserver_db_common::PoolState; + use syncstorage_db::PoolState; let test_pool = PoolState { connections: u32::from_str( diff --git a/syncserver/src/web/middleware/mod.rs b/syncserver/src/web/middleware/mod.rs index 78e3f4b6..82072cbd 100644 --- a/syncserver/src/web/middleware/mod.rs +++ b/syncserver/src/web/middleware/mod.rs @@ -13,10 +13,11 @@ use actix_web::{ dev::{Service, ServiceRequest, ServiceResponse}, web::Data, }; +use syncserver_common::Metrics; +use tokenserver_auth::TokenserverOrigin; use crate::error::{ApiError, ApiErrorKind}; -use crate::server::{metrics::Metrics, ServerState}; -use crate::tokenserver::auth::TokenserverOrigin; +use crate::server::ServerState; pub fn emit_http_status_with_tokenserver_origin( req: ServiceRequest, @@ -37,7 +38,7 @@ pub fn emit_http_status_with_tokenserver_origin( .map(|state| state.metrics.clone()) .ok_or_else(|| ApiError::from(ApiErrorKind::NoServerState))?; - Metrics::from(&*statsd_client) + Metrics::from(&statsd_client) }; let mut tags = HashMap::default(); diff --git a/syncserver/src/web/middleware/rejectua.rs b/syncserver/src/web/middleware/rejectua.rs index 2e082ec7..d27115d0 100644 --- a/syncserver/src/web/middleware/rejectua.rs +++ b/syncserver/src/web/middleware/rejectua.rs @@ -1,17 +1,16 @@ #![allow(clippy::type_complexity)] -use std::task::{Context, Poll}; use actix_web::{ - dev::{Service, ServiceRequest, ServiceResponse, Transform}, + dev::{Service, ServiceRequest, ServiceResponse}, http::header::USER_AGENT, - Error, FromRequest, HttpResponse, + FromRequest, HttpResponse, }; -use futures::future::{self, LocalBoxFuture, Ready}; +use futures::future::LocalBoxFuture; use lazy_static::lazy_static; use regex::Regex; use crate::error::{ApiError, ApiErrorKind}; -use crate::server::metrics::Metrics; +use crate::server::MetricsWrapper; lazy_static! { // e.g. "Firefox-iOS-Sync/18.0b1 (iPhone; iPhone OS 13.2.2) (Fennec (synctesting))" @@ -32,67 +31,35 @@ $ .unwrap(); } -#[allow(clippy::upper_case_acronyms)] -#[derive(Debug, Default)] -pub struct RejectUA; - -impl Transform for RejectUA -where - S: Service, Error = Error>, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type InitError = (); - type Transform = RejectUAMiddleware; - type Future = Ready>; - - fn new_transform(&self, service: S) -> Self::Future { - future::ok(RejectUAMiddleware { service }) - } -} -#[allow(clippy::upper_case_acronyms)] -pub struct RejectUAMiddleware { - service: S, -} - -impl Service for RejectUAMiddleware -where - S: Service, Error = Error>, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type Future = LocalBoxFuture<'static, Result>; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.service.poll_ready(cx) - } - - fn call(&mut self, sreq: ServiceRequest) -> Self::Future { - match sreq.headers().get(USER_AGENT).cloned() { - Some(header) if header.to_str().map_or(false, should_reject) => Box::pin(async move { - trace!("Rejecting User-Agent: {:?}", header); - let (req, payload) = sreq.into_parts(); - Metrics::extract(&req).await?.incr("error.rejectua"); - let sreq = ServiceRequest::from_parts(req, payload).map_err(|_| { - ApiError::from(ApiErrorKind::Internal( - "failed to reconstruct ServiceRequest from its parts".to_owned(), - )) - })?; - - Ok(sreq.into_response( - HttpResponse::ServiceUnavailable() - .body("0".to_owned()) - .into_body(), +pub fn reject_user_agent( + request: ServiceRequest, + service: &mut (impl Service< + Request = ServiceRequest, + Response = ServiceResponse, + Error = actix_web::Error, + > + 'static), +) -> LocalBoxFuture<'static, Result> { + match request.headers().get(USER_AGENT).cloned() { + Some(header) if header.to_str().map_or(false, should_reject) => Box::pin(async move { + trace!("Rejecting User-Agent: {:?}", header); + let (req, payload) = request.into_parts(); + MetricsWrapper::extract(&req) + .await? + .0 + .incr("error.rejectua"); + let sreq = ServiceRequest::from_parts(req, payload).map_err(|_| { + ApiError::from(ApiErrorKind::Internal( + "failed to reconstruct ServiceRequest from its parts".to_owned(), )) - }), - _ => Box::pin(self.service.call(sreq)), - } + })?; + + Ok(sreq.into_response( + HttpResponse::ServiceUnavailable() + .body("0".to_owned()) + .into_body(), + )) + }), + _ => Box::pin(service.call(request)), } } diff --git a/syncserver/src/web/middleware/sentry.rs b/syncserver/src/web/middleware/sentry.rs index 7af3741c..6fff74a1 100644 --- a/syncserver/src/web/middleware/sentry.rs +++ b/syncserver/src/web/middleware/sentry.rs @@ -1,63 +1,21 @@ use std::collections::HashMap; use std::error::Error as StdError; -use std::task::{Context, Poll}; -use std::{cell::RefCell, rc::Rc}; +use std::future::Future; use actix_http::HttpMessage; use actix_web::{ - dev::{Service, ServiceRequest, ServiceResponse, Transform}, + dev::{Service, ServiceRequest, ServiceResponse}, http::header::USER_AGENT, - Error, FromRequest, + FromRequest, }; -use futures::future::{self, LocalBoxFuture}; use sentry::protocol::Event; use sentry_backtrace::parse_stacktrace; use serde_json::value::Value; -use syncserver_common::ReportableError; -use tokenserver_common::error::TokenserverError; +use syncserver_common::{Metrics, ReportableError}; +use tokenserver_common::TokenserverError; use crate::error::ApiError; -use crate::server::{metrics::Metrics, user_agent}; -use crate::web::tags::Taggable; - -pub struct SentryWrapper; - -impl SentryWrapper { - pub fn new() -> Self { - SentryWrapper::default() - } -} - -impl Default for SentryWrapper { - fn default() -> Self { - Self - } -} - -impl Transform for SentryWrapper -where - S: Service, Error = Error> + 'static, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type InitError = (); - type Transform = SentryWrapperMiddleware; - type Future = LocalBoxFuture<'static, Result>; - - fn new_transform(&self, service: S) -> Self::Future { - Box::pin(future::ok(SentryWrapperMiddleware { - service: Rc::new(RefCell::new(service)), - })) - } -} - -#[derive(Debug)] -pub struct SentryWrapperMiddleware { - service: Rc>, -} +use crate::server::{tags::Taggable, user_agent, MetricsWrapper}; pub fn report( tags: HashMap, @@ -72,70 +30,61 @@ pub fn report( sentry::capture_event(event); } -impl Service for SentryWrapperMiddleware -where - S: Service, Error = Error> + 'static, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type Future = LocalBoxFuture<'static, Result>; +pub fn report_error( + request: ServiceRequest, + service: &mut impl Service< + Request = ServiceRequest, + Response = ServiceResponse, + Error = actix_web::Error, + >, +) -> impl Future> { + add_initial_tags(&request, request.head().method.to_string()); + add_initial_extras(&request, request.head().uri.to_string()); - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.service.poll_ready(cx) - } + let fut = service.call(request); - fn call(&mut self, sreq: ServiceRequest) -> Self::Future { - add_initial_tags(&sreq, sreq.head().method.to_string()); - add_initial_extras(&sreq, sreq.head().uri.to_string()); + Box::pin(async move { + let mut sresp = fut.await?; + let tags = sresp.request().get_tags(); + let extras = sresp.request().get_extras(); - let fut = self.service.call(sreq); - - Box::pin(async move { - let mut sresp = fut.await?; - let tags = sresp.request().get_tags(); - let extras = sresp.request().get_extras(); - - match sresp.response().error() { - None => { - // Middleware errors are eaten by current versions of Actix. Errors are now added - // to the extensions. Need to check both for any errors and report them. - if let Some(events) = sresp - .request() - .extensions_mut() - .remove::>>() - { - for event in events { - trace!("Sentry: found an error stored in request: {:?}", &event); - report(tags.clone(), extras.clone(), event); - } - } - if let Some(events) = sresp - .response_mut() - .extensions_mut() - .remove::>>() - { - for event in events { - trace!("Sentry: Found an error stored in response: {:?}", &event); - report(tags.clone(), extras.clone(), event); - } + match sresp.response().error() { + None => { + // Middleware errors are eaten by current versions of Actix. Errors are now added + // to the extensions. Need to check both for any errors and report them. + if let Some(events) = sresp + .request() + .extensions_mut() + .remove::>>() + { + for event in events { + trace!("Sentry: found an error stored in request: {:?}", &event); + report(tags.clone(), extras.clone(), event); } } - Some(e) => { - let metrics = Metrics::extract(sresp.request()).await.unwrap(); - - if let Some(apie) = e.as_error::() { - process_error(apie, metrics, tags, extras); - } else if let Some(tokenserver_error) = e.as_error::() { - process_error(tokenserver_error, metrics, tags, extras); + if let Some(events) = sresp + .response_mut() + .extensions_mut() + .remove::>>() + { + for event in events { + trace!("Sentry: Found an error stored in response: {:?}", &event); + report(tags.clone(), extras.clone(), event); } } } - Ok(sresp) - }) - } + Some(e) => { + let metrics = MetricsWrapper::extract(sresp.request()).await.unwrap().0; + + if let Some(apie) = e.as_error::() { + process_error(apie, metrics, tags, extras); + } else if let Some(tokenserver_error) = e.as_error::() { + process_error(tokenserver_error, metrics, tags, extras); + } + } + } + Ok(sresp) + }) } fn process_error( diff --git a/syncserver/src/web/middleware/weave.rs b/syncserver/src/web/middleware/weave.rs index 6f72e815..b4652b11 100644 --- a/syncserver/src/web/middleware/weave.rs +++ b/syncserver/src/web/middleware/weave.rs @@ -1,57 +1,44 @@ use std::fmt::Display; - -use std::task::{Context, Poll}; +use std::future::Future; use actix_web::{ - dev::{Service, ServiceRequest, ServiceResponse, Transform}, + dev::{Service, ServiceRequest, ServiceResponse}, http::header::{self, HeaderMap}, - Error, }; -use futures::future::{self, LocalBoxFuture}; use syncserver_common::{X_LAST_MODIFIED, X_WEAVE_TIMESTAMP}; -use syncserver_db_common::util::SyncTimestamp; +use syncstorage_db::SyncTimestamp; use crate::error::{ApiError, ApiErrorKind}; use crate::web::DOCKER_FLOW_ENDPOINTS; -pub struct WeaveTimestampMiddleware { - service: S, -} +/// Middleware to set the X-Weave-Timestamp header on all responses. +pub fn set_weave_timestamp( + request: ServiceRequest, + service: &mut impl Service< + Request = ServiceRequest, + Response = ServiceResponse, + Error = actix_web::Error, + >, +) -> impl Future> { + let request_path = request.uri().path().to_lowercase(); + let ts = SyncTimestamp::default().as_seconds(); + let fut = service.call(request); -impl Service for WeaveTimestampMiddleware -where - S: Service, Error = Error>, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type Future = LocalBoxFuture<'static, Result>; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - self.service.poll_ready(cx) - } - - fn call(&mut self, sreq: ServiceRequest) -> Self::Future { - if DOCKER_FLOW_ENDPOINTS.contains(&sreq.uri().path().to_lowercase().as_str()) { - return Box::pin(self.service.call(sreq)); + Box::pin(async move { + if DOCKER_FLOW_ENDPOINTS.contains(&request_path.as_str()) { + return fut.await; } - let ts = SyncTimestamp::default().as_seconds(); - let fut = self.service.call(sreq); - Box::pin(async move { - let mut resp = fut.await?; - set_weave_timestamp(resp.headers_mut(), ts)?; - Ok(resp) - }) - } + let mut resp = fut.await?; + insert_weave_timestamp_into_headers(resp.headers_mut(), ts)?; + Ok(resp) + }) } /// Set a X-Weave-Timestamp header on all responses (depending on the /// response's X-Last-Modified header) -fn set_weave_timestamp(headers: &mut HeaderMap, ts: f64) -> Result<(), ApiError> { +fn insert_weave_timestamp_into_headers(headers: &mut HeaderMap, ts: f64) -> Result<(), ApiError> { fn invalid_xlm(e: E) -> ApiError where E: Display, @@ -80,39 +67,6 @@ fn set_weave_timestamp(headers: &mut HeaderMap, ts: f64) -> Result<(), ApiError> Ok(()) } -/// Middleware to set the X-Weave-Timestamp header on all responses. -pub struct WeaveTimestamp; - -impl WeaveTimestamp { - pub fn new() -> Self { - WeaveTimestamp::default() - } -} - -impl Default for WeaveTimestamp { - fn default() -> Self { - Self - } -} - -impl Transform for WeaveTimestamp -where - S: Service, Error = Error>, - S::Future: 'static, - B: 'static, -{ - type Request = ServiceRequest; - type Response = ServiceResponse; - type Error = Error; - type InitError = (); - type Transform = WeaveTimestampMiddleware; - type Future = LocalBoxFuture<'static, Result>; - - fn new_transform(&self, service: S) -> Self::Future { - Box::pin(future::ok(WeaveTimestampMiddleware { service })) - } -} - #[cfg(test)] mod tests { use super::*; @@ -122,7 +76,11 @@ mod tests { #[test] fn test_no_modified_header() { let mut resp = HttpResponse::build(http::StatusCode::OK).finish(); - set_weave_timestamp(resp.headers_mut(), SyncTimestamp::default().as_seconds()).unwrap(); + insert_weave_timestamp_into_headers( + resp.headers_mut(), + SyncTimestamp::default().as_seconds(), + ) + .unwrap(); let weave_hdr = resp .headers() .get(X_WEAVE_TIMESTAMP) @@ -146,7 +104,7 @@ mod tests { let mut resp = HttpResponse::build(http::StatusCode::OK) .header(X_LAST_MODIFIED, hts.clone()) .finish(); - set_weave_timestamp(resp.headers_mut(), ts as f64).unwrap(); + insert_weave_timestamp_into_headers(resp.headers_mut(), ts as f64).unwrap(); let weave_hdr = resp .headers() .get(X_WEAVE_TIMESTAMP) @@ -166,7 +124,7 @@ mod tests { let mut resp = HttpResponse::build(http::StatusCode::OK) .header(X_LAST_MODIFIED, hts.clone()) .finish(); - set_weave_timestamp(resp.headers_mut(), ts as f64 / 1_000.0).unwrap(); + insert_weave_timestamp_into_headers(resp.headers_mut(), ts as f64 / 1_000.0).unwrap(); let weave_hdr = resp .headers() .get(X_WEAVE_TIMESTAMP) diff --git a/syncserver/src/web/mod.rs b/syncserver/src/web/mod.rs index f759eee5..671c8289 100644 --- a/syncserver/src/web/mod.rs +++ b/syncserver/src/web/mod.rs @@ -4,7 +4,7 @@ pub mod error; pub mod extractors; pub mod handlers; pub mod middleware; -pub mod tags; +mod transaction; // Known DockerFlow commands for Ops callbacks pub const DOCKER_FLOW_ENDPOINTS: [&str; 4] = [ diff --git a/syncserver/src/db/transaction.rs b/syncserver/src/web/transaction.rs similarity index 92% rename from syncserver/src/db/transaction.rs rename to syncserver/src/web/transaction.rs index 68c82c8d..dfbfb7b3 100644 --- a/syncserver/src/db/transaction.rs +++ b/syncserver/src/web/transaction.rs @@ -9,20 +9,18 @@ use actix_web::{FromRequest, HttpRequest, HttpResponse}; use futures::future::LocalBoxFuture; use futures::FutureExt; use syncserver_common::X_LAST_MODIFIED; -use syncserver_db_common::{params, Db, DbPool, UserIdentifier}; +use syncstorage_db::{params, results::ConnectionInfo, Db, DbError, DbPool, UserIdentifier}; -use crate::db::results::ConnectionInfo; use crate::error::{ApiError, ApiErrorKind}; -use crate::server::metrics::Metrics; -use crate::server::ServerState; +use crate::server::tags::Taggable; +use crate::server::{MetricsWrapper, ServerState}; use crate::web::extractors::{ BsoParam, CollectionParam, HawkIdentifier, PreConditionHeader, PreConditionHeaderOpt, }; -use crate::web::tags::Taggable; #[derive(Clone)] pub struct DbTransactionPool { - pool: Box, + pool: Box>, is_read: bool, user_id: UserIdentifier, collection: Option, @@ -51,10 +49,10 @@ impl DbTransactionPool { &'a self, request: HttpRequest, action: A, - ) -> Result<(R, Box>), ApiError> + ) -> Result<(R, Box>), ApiError> where - A: FnOnce(Box>) -> F, - F: Future> + 'a, + A: FnOnce(Box>) -> F, + F: Future>, { // Get connection from pool let db = self.pool.get().await?; @@ -88,7 +86,7 @@ impl DbTransactionPool { } } - pub fn get_pool(&self) -> Result, Error> { + pub fn get_pool(&self) -> Result>, Error> { Ok(self.pool.clone()) } @@ -99,7 +97,7 @@ impl DbTransactionPool { action: A, ) -> Result where - A: FnOnce(Box>) -> F, + A: FnOnce(Box>) -> F, F: Future> + 'a, { let (resp, db) = self.transaction_internal(request, action).await?; @@ -117,11 +115,11 @@ impl DbTransactionPool { action: A, ) -> Result where - A: FnOnce(Box>) -> F, + A: FnOnce(Box>) -> F, F: Future> + 'a, { let mreq = request.clone(); - let check_precondition = move |db: Box>| { + let check_precondition = move |db: Box>| { async move { // set the extra information for all requests so we capture default err handlers. set_extra(&mreq, db.get_connection_info()); @@ -233,9 +231,10 @@ impl FromRequest for DbTransactionPool { Err(e) => { // Semi-example to show how to use metrics inside of middleware. // `Result::unwrap` is safe to use here, since Metrics::extract can never fail - Metrics::extract(&req) + MetricsWrapper::extract(&req) .await .unwrap() + .0 .incr("sync.error.collectionParam"); warn!("⚠️ CollectionParam err: {:?}", e); return Err(e); diff --git a/syncstorage-db-common/Cargo.toml b/syncstorage-db-common/Cargo.toml new file mode 100644 index 00000000..68655bcf --- /dev/null +++ b/syncstorage-db-common/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "syncstorage-db-common" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +[dependencies] +backtrace.workspace=true +chrono.workspace=true +futures.workspace=true +lazy_static.workspace=true +http.workspace=true +serde.workspace=true +serde_json.workspace=true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +thiserror = "1.0.26" diff --git a/syncstorage-db-common/src/error.rs b/syncstorage-db-common/src/error.rs new file mode 100644 index 00000000..aa1716a5 --- /dev/null +++ b/syncstorage-db-common/src/error.rs @@ -0,0 +1,138 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{impl_fmt_display, ReportableError}; +use thiserror::Error; + +/// Errors common to all supported syncstorage database backends. These errors can be thought of +/// as being related more to the syncstorage application logic as opposed to a particular +/// database backend. +#[derive(Debug)] +pub struct SyncstorageDbError { + kind: SyncstorageDbErrorKind, + pub status: StatusCode, + pub backtrace: Backtrace, +} + +#[derive(Debug, Error)] +enum SyncstorageDbErrorKind { + #[error("Specified collection does not exist")] + CollectionNotFound, + + #[error("Specified bso does not exist")] + BsoNotFound, + + #[error("Specified batch does not exist")] + BatchNotFound, + + #[error("An attempt at a conflicting write")] + Conflict, + + #[error("Unexpected error: {}", _0)] + Internal(String), + + #[error("User over quota")] + Quota, +} + +impl SyncstorageDbError { + pub fn batch_not_found() -> Self { + SyncstorageDbErrorKind::BatchNotFound.into() + } + + pub fn bso_not_found() -> Self { + SyncstorageDbErrorKind::BsoNotFound.into() + } + + pub fn collection_not_found() -> Self { + SyncstorageDbErrorKind::CollectionNotFound.into() + } + + pub fn conflict() -> Self { + SyncstorageDbErrorKind::Conflict.into() + } + + pub fn internal(msg: String) -> Self { + SyncstorageDbErrorKind::Internal(msg).into() + } + + pub fn quota() -> Self { + SyncstorageDbErrorKind::Quota.into() + } +} + +pub trait DbErrorIntrospect { + fn is_collection_not_found(&self) -> bool; + fn is_conflict(&self) -> bool; + fn is_quota(&self) -> bool; + fn is_bso_not_found(&self) -> bool; + fn is_batch_not_found(&self) -> bool; +} + +impl DbErrorIntrospect for SyncstorageDbError { + fn is_collection_not_found(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::CollectionNotFound) + } + + fn is_conflict(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::Conflict) + } + + fn is_quota(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::Quota) + } + + fn is_bso_not_found(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::BsoNotFound) + } + + fn is_batch_not_found(&self) -> bool { + matches!(self.kind, SyncstorageDbErrorKind::BatchNotFound) + } +} + +impl ReportableError for SyncstorageDbError { + fn is_sentry_event(&self) -> bool { + !matches!(&self.kind, SyncstorageDbErrorKind::Conflict) + } + + fn metric_label(&self) -> Option { + match &self.kind { + SyncstorageDbErrorKind::Conflict => Some("storage.conflict".to_owned()), + _ => None, + } + } + + fn error_backtrace(&self) -> String { + format!("{:#?}", self.backtrace) + } +} + +impl From for SyncstorageDbError { + fn from(kind: SyncstorageDbErrorKind) -> Self { + let status = match kind { + SyncstorageDbErrorKind::CollectionNotFound | SyncstorageDbErrorKind::BsoNotFound => { + StatusCode::NOT_FOUND + } + // Matching the Python code here (a 400 vs 404) + SyncstorageDbErrorKind::BatchNotFound => StatusCode::BAD_REQUEST, + // NOTE: the protocol specification states that we should return a + // "409 Conflict" response here, but clients currently do not + // handle these respones very well: + // * desktop bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959034 + // * android bug: https://bugzilla.mozilla.org/show_bug.cgi?id=959032 + SyncstorageDbErrorKind::Conflict => StatusCode::SERVICE_UNAVAILABLE, + SyncstorageDbErrorKind::Quota => StatusCode::FORBIDDEN, + _ => StatusCode::INTERNAL_SERVER_ERROR, + }; + + Self { + kind, + status, + backtrace: Backtrace::new(), + } + } +} + +impl_fmt_display!(SyncstorageDbError, SyncstorageDbErrorKind); diff --git a/syncstorage-db-common/src/lib.rs b/syncstorage-db-common/src/lib.rs new file mode 100644 index 00000000..8cf3e684 --- /dev/null +++ b/syncstorage-db-common/src/lib.rs @@ -0,0 +1,299 @@ +pub mod error; +pub mod params; +pub mod results; +pub mod util; + +use std::fmt::Debug; + +use async_trait::async_trait; +use futures::{future, TryFutureExt}; +use lazy_static::lazy_static; +use serde::Deserialize; +use syncserver_db_common::{DbFuture, GetPoolState}; + +use error::DbErrorIntrospect; +use util::SyncTimestamp; + +lazy_static! { + /// For efficiency, it's possible to use fixed pre-determined IDs for + /// common collection names. This is the canonical list of such + /// names. Non-standard collections will be allocated IDs starting + /// from the highest ID in this collection. + pub static ref STD_COLLS: Vec<(i32, &'static str)> = { + vec![ + (1, "clients"), + (2, "crypto"), + (3, "forms"), + (4, "history"), + (5, "keys"), + (6, "meta"), + (7, "bookmarks"), + (8, "prefs"), + (9, "tabs"), + (10, "passwords"), + (11, "addons"), + (12, "addresses"), + (13, "creditcards"), + ] + }; +} + +/// Rough guesstimate of the maximum reasonable life span of a batch +pub const BATCH_LIFETIME: i64 = 2 * 60 * 60 * 1000; // 2 hours, in milliseconds + +/// The ttl to use for rows that are never supposed to expire (in seconds) +pub const DEFAULT_BSO_TTL: u32 = 2_100_000_000; + +/// Non-standard collections will be allocated IDs beginning with this value +pub const FIRST_CUSTOM_COLLECTION_ID: i32 = 101; + +#[async_trait] +pub trait DbPool: Sync + Send + Debug + GetPoolState { + type Error; + + async fn get(&self) -> Result>, Self::Error>; + + fn validate_batch_id(&self, params: params::ValidateBatchId) -> Result<(), Self::Error>; + + fn box_clone(&self) -> Box>; +} + +impl Clone for Box> { + fn clone(&self) -> Box> { + self.box_clone() + } +} + +pub trait Db: Debug { + type Error: DbErrorIntrospect + 'static; + + fn lock_for_read(&self, params: params::LockCollection) -> DbFuture<'_, (), Self::Error>; + + fn lock_for_write(&self, params: params::LockCollection) -> DbFuture<'_, (), Self::Error>; + + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error>; + + fn commit(&self) -> DbFuture<'_, (), Self::Error>; + + fn rollback(&self) -> DbFuture<'_, (), Self::Error>; + + fn get_collection_timestamps( + &self, + params: params::GetCollectionTimestamps, + ) -> DbFuture<'_, results::GetCollectionTimestamps, Self::Error>; + + fn get_collection_timestamp( + &self, + params: params::GetCollectionTimestamp, + ) -> DbFuture<'_, results::GetCollectionTimestamp, Self::Error>; + + fn get_collection_counts( + &self, + params: params::GetCollectionCounts, + ) -> DbFuture<'_, results::GetCollectionCounts, Self::Error>; + + fn get_collection_usage( + &self, + params: params::GetCollectionUsage, + ) -> DbFuture<'_, results::GetCollectionUsage, Self::Error>; + + fn get_storage_timestamp( + &self, + params: params::GetStorageTimestamp, + ) -> DbFuture<'_, results::GetStorageTimestamp, Self::Error>; + + fn get_storage_usage( + &self, + params: params::GetStorageUsage, + ) -> DbFuture<'_, results::GetStorageUsage, Self::Error>; + + fn get_quota_usage( + &self, + params: params::GetQuotaUsage, + ) -> DbFuture<'_, results::GetQuotaUsage, Self::Error>; + + fn delete_storage( + &self, + params: params::DeleteStorage, + ) -> DbFuture<'_, results::DeleteStorage, Self::Error>; + + fn delete_collection( + &self, + params: params::DeleteCollection, + ) -> DbFuture<'_, results::DeleteCollection, Self::Error>; + + fn delete_bsos( + &self, + params: params::DeleteBsos, + ) -> DbFuture<'_, results::DeleteBsos, Self::Error>; + + fn get_bsos(&self, params: params::GetBsos) -> DbFuture<'_, results::GetBsos, Self::Error>; + + fn get_bso_ids(&self, params: params::GetBsos) + -> DbFuture<'_, results::GetBsoIds, Self::Error>; + + fn post_bsos(&self, params: params::PostBsos) -> DbFuture<'_, results::PostBsos, Self::Error>; + + fn delete_bso( + &self, + params: params::DeleteBso, + ) -> DbFuture<'_, results::DeleteBso, Self::Error>; + + fn get_bso(&self, params: params::GetBso) + -> DbFuture<'_, Option, Self::Error>; + + fn get_bso_timestamp( + &self, + params: params::GetBsoTimestamp, + ) -> DbFuture<'_, results::GetBsoTimestamp, Self::Error>; + + fn put_bso(&self, params: params::PutBso) -> DbFuture<'_, results::PutBso, Self::Error>; + + fn create_batch( + &self, + params: params::CreateBatch, + ) -> DbFuture<'_, results::CreateBatch, Self::Error>; + + fn validate_batch( + &self, + params: params::ValidateBatch, + ) -> DbFuture<'_, results::ValidateBatch, Self::Error>; + + fn append_to_batch( + &self, + params: params::AppendToBatch, + ) -> DbFuture<'_, results::AppendToBatch, Self::Error>; + + fn get_batch( + &self, + params: params::GetBatch, + ) -> DbFuture<'_, Option, Self::Error>; + + fn commit_batch( + &self, + params: params::CommitBatch, + ) -> DbFuture<'_, results::CommitBatch, Self::Error>; + + fn box_clone(&self) -> Box>; + + fn check(&self) -> DbFuture<'_, results::Check, Self::Error>; + + fn get_connection_info(&self) -> results::ConnectionInfo; + + /// Retrieve the timestamp for an item/collection + /// + /// Modeled on the Python `get_resource_timestamp` function. + fn extract_resource( + &self, + user_id: UserIdentifier, + collection: Option, + bso: Option, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { + // If there's no collection, we return the overall storage timestamp + let collection = match collection { + Some(collection) => collection, + None => return Box::pin(self.get_storage_timestamp(user_id)), + }; + // If there's no bso, return the collection + let bso = match bso { + Some(bso) => bso, + None => { + return Box::pin( + self.get_collection_timestamp(params::GetCollectionTimestamp { + user_id, + collection, + }) + .or_else(|e| { + if e.is_collection_not_found() { + future::ok(SyncTimestamp::from_seconds(0f64)) + } else { + future::err(e) + } + }), + ) + } + }; + Box::pin( + self.get_bso_timestamp(params::GetBsoTimestamp { + user_id, + collection, + id: bso, + }) + .or_else(|e| { + if e.is_collection_not_found() { + future::ok(SyncTimestamp::from_seconds(0f64)) + } else { + future::err(e) + } + }), + ) + } + + /// Internal methods used by the db tests + + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error>; + + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error>; + + fn update_collection( + &self, + params: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error>; + + fn timestamp(&self) -> SyncTimestamp; + + fn set_timestamp(&self, timestamp: SyncTimestamp); + + fn delete_batch(&self, params: params::DeleteBatch) -> DbFuture<'_, (), Self::Error>; + + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error>; + + fn set_quota(&mut self, enabled: bool, limit: usize, enforce: bool); +} + +impl Clone for Box> +where + E: DbErrorIntrospect + 'static, +{ + fn clone(&self) -> Box> { + self.box_clone() + } +} + +#[derive(Debug, Default, Deserialize, Clone, PartialEq, Eq, Copy)] +#[serde(rename_all = "lowercase")] +pub enum Sorting { + #[default] + None, + Newest, + Oldest, + Index, +} + +#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] +pub struct UserIdentifier { + /// For MySQL database backends as the primary key + pub legacy_id: u64, + /// For NoSQL database backends that require randomly distributed primary keys + pub fxa_uid: String, + pub fxa_kid: String, +} + +impl UserIdentifier { + /// Create a new legacy id user identifier + pub fn new_legacy(user_id: u64) -> Self { + Self { + legacy_id: user_id, + ..Default::default() + } + } +} + +impl From for UserIdentifier { + fn from(val: u32) -> Self { + Self { + legacy_id: val.into(), + ..Default::default() + } + } +} diff --git a/syncserver-db-common/src/params.rs b/syncstorage-db-common/src/params.rs similarity index 100% rename from syncserver-db-common/src/params.rs rename to syncstorage-db-common/src/params.rs diff --git a/syncserver-db-common/src/results.rs b/syncstorage-db-common/src/results.rs similarity index 100% rename from syncserver-db-common/src/results.rs rename to syncstorage-db-common/src/results.rs diff --git a/syncserver-db-common/src/util.rs b/syncstorage-db-common/src/util.rs similarity index 81% rename from syncserver-db-common/src/util.rs rename to syncstorage-db-common/src/util.rs index 92da71be..97350eee 100644 --- a/syncserver-db-common/src/util.rs +++ b/syncstorage-db-common/src/util.rs @@ -12,7 +12,7 @@ use diesel::{ }; use serde::{ser, Deserialize, Deserializer, Serialize, Serializer}; -use super::error::{DbError, DbErrorKind}; +use super::error::SyncstorageDbError; /// Get the time since the UNIX epoch in milliseconds fn ms_since_epoch() -> i64 { @@ -53,15 +53,17 @@ impl SyncTimestamp { } /// Create a `SyncTimestamp` from an i64 - pub fn from_i64(val: i64) -> Result { + pub fn from_i64(val: i64) -> Result { if val < 0 { - return Err(DbErrorKind::Integrity("Invalid modified i64 (< 0)".to_owned()).into()); + return Err(SyncstorageDbError::internal( + "Invalid modified i64 (< 0)".to_owned(), + )); } Ok(SyncTimestamp::from_milliseconds(val as u64)) } /// Exposed separately for db tests - pub fn _from_i64(val: i64) -> Result { + pub fn _from_i64(val: i64) -> Result { SyncTimestamp::from_i64(val) } @@ -78,17 +80,19 @@ impl SyncTimestamp { /// Create a `SyncTimestamp` from an RFC 3339 and ISO 8601 date and time /// string such as 1996-12-19T16:39:57-08:00 - pub fn from_rfc3339(val: &str) -> Result { + pub fn from_rfc3339(val: &str) -> Result { let dt = DateTime::parse_from_rfc3339(val) - .map_err(|e| DbErrorKind::Integrity(format!("Invalid TIMESTAMP {}", e)))?; + .map_err(|e| SyncstorageDbError::internal(format!("Invalid TIMESTAMP {}", e)))?; Self::from_datetime(dt) } /// Create a `SyncTimestamp` from a chrono DateTime - fn from_datetime(val: DateTime) -> Result { + fn from_datetime(val: DateTime) -> Result { let millis = val.timestamp_millis(); if millis < 0 { - return Err(DbErrorKind::Integrity("Invalid DateTime (< 0)".to_owned()).into()); + return Err(SyncstorageDbError::internal( + "Invalid DateTime (< 0)".to_owned(), + )); } Ok(SyncTimestamp::from_milliseconds(millis as u64)) } @@ -105,7 +109,7 @@ impl SyncTimestamp { /// Return the timestamp as an RFC 3339 and ISO 8601 date and time string such as /// 1996-12-19T16:39:57-08:00 - pub fn as_rfc3339(self) -> Result { + pub fn as_rfc3339(self) -> Result { to_rfc3339(self.as_i64()) } } @@ -167,12 +171,17 @@ where /// Render a timestamp (as an i64 milliseconds since epoch) as an RFC 3339 and ISO 8601 /// date and time string such as 1996-12-19T16:39:57-08:00 -pub fn to_rfc3339(val: i64) -> Result { +pub fn to_rfc3339(val: i64) -> Result { let secs = val / 1000; let nsecs = ((val % 1000) * 1_000_000).try_into().map_err(|e| { - DbError::internal(&format!("Invalid timestamp (nanoseconds) {}: {}", val, e)) + SyncstorageDbError::internal(format!("Invalid timestamp (nanoseconds) {}: {}", val, e)) })?; - Ok(Utc - .timestamp(secs, nsecs) - .to_rfc3339_opts(SecondsFormat::Nanos, true)) + let ts = Utc.timestamp_opt(secs, nsecs); + if let Some(dt) = ts.single() { + return Ok(dt.to_rfc3339_opts(SecondsFormat::Nanos, true)); + }; + Err(SyncstorageDbError::internal(format!( + "Invalid or ambiguous timestamp {}: {:?}", + val, ts + ))) } diff --git a/syncstorage-db/Cargo.toml b/syncstorage-db/Cargo.toml new file mode 100644 index 00000000..949cb8e4 --- /dev/null +++ b/syncstorage-db/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "syncstorage-db" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +[dependencies] +cadence.workspace=true +env_logger.workspace=true +futures.workspace=true +lazy_static.workspace=true +rand.workspace=true +slog-scope.workspace=true + +async-trait = "0.1.40" +hostname = "0.3.1" +log = { version = "0.4", features = [ + "max_level_debug", + "release_max_level_info", +] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncserver-settings = { path = "../syncserver-settings" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-mysql = { path = "../syncstorage-mysql", optional = true } +syncstorage-settings = { path = "../syncstorage-settings" } +syncstorage-spanner = { path = "../syncstorage-spanner", optional = true } +# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) +tokio = { version = "0.2.4", features = ["macros", "sync"] } + +[features] +mysql = ['syncstorage-mysql'] +spanner = ['syncstorage-spanner'] diff --git a/syncstorage-db/src/lib.rs b/syncstorage-db/src/lib.rs new file mode 100644 index 00000000..f7007d1c --- /dev/null +++ b/syncstorage-db/src/lib.rs @@ -0,0 +1,77 @@ +//! Generic db abstration. + +#[cfg(test)] +#[macro_use] +extern crate slog_scope; + +pub mod mock; +#[cfg(test)] +mod tests; + +use std::time::Duration; + +use cadence::{Gauged, StatsdClient}; +use tokio::{self, time}; + +#[cfg(feature = "mysql")] +pub type DbPoolImpl = syncstorage_mysql::MysqlDbPool; +#[cfg(feature = "mysql")] +pub use syncstorage_mysql::DbError; +#[cfg(feature = "mysql")] +pub type DbImpl = syncstorage_mysql::MysqlDb; + +#[cfg(feature = "spanner")] +pub type DbPoolImpl = syncstorage_spanner::SpannerDbPool; +#[cfg(feature = "spanner")] +pub use syncstorage_spanner::DbError; +#[cfg(feature = "spanner")] +pub type DbImpl = syncstorage_spanner::SpannerDb; + +pub use syncserver_db_common::{GetPoolState, PoolState}; +pub use syncstorage_db_common::error::DbErrorIntrospect; + +pub use syncstorage_db_common::{ + params, results, + util::{to_rfc3339, SyncTimestamp}, + Db, DbPool, Sorting, UserIdentifier, +}; + +#[cfg(all(feature = "mysql", feature = "spanner"))] +compile_error!("only one of the \"mysql\" and \"spanner\" features can be enabled at a time"); + +#[cfg(not(any(feature = "mysql", feature = "spanner")))] +compile_error!("exactly one of the \"mysql\" and \"spanner\" features must be enabled"); + +/// Emit DbPool metrics periodically +pub fn spawn_pool_periodic_reporter( + interval: Duration, + metrics: StatsdClient, + pool: T, +) -> Result<(), DbError> { + let hostname = hostname::get() + .expect("Couldn't get hostname") + .into_string() + .expect("Couldn't get hostname"); + tokio::spawn(async move { + loop { + let PoolState { + connections, + idle_connections, + } = pool.state(); + metrics + .gauge_with_tags( + "storage.pool.connections.active", + (connections - idle_connections) as u64, + ) + .with_tag("hostname", &hostname) + .send(); + metrics + .gauge_with_tags("storage.pool.connections.idle", idle_connections as u64) + .with_tag("hostname", &hostname) + .send(); + time::delay_for(interval).await; + } + }); + + Ok(()) +} diff --git a/syncserver/src/db/mock.rs b/syncstorage-db/src/mock.rs similarity index 86% rename from syncserver/src/db/mock.rs rename to syncstorage-db/src/mock.rs index af13bfb7..2f5575b6 100644 --- a/syncserver/src/db/mock.rs +++ b/syncstorage-db/src/mock.rs @@ -2,10 +2,12 @@ #![allow(clippy::new_without_default)] use async_trait::async_trait; use futures::future; -use syncserver_db_common::{ - error::DbError, params, results, util::SyncTimestamp, Db, DbFuture, DbPool, GetPoolState, - PoolState, -}; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{params, results, util::SyncTimestamp, Db, DbPool}; + +use crate::DbError; + +type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; #[derive(Clone, Debug)] pub struct MockDbPool; @@ -18,15 +20,17 @@ impl MockDbPool { #[async_trait] impl DbPool for MockDbPool { - async fn get<'a>(&'a self) -> Result>, DbError> { - Ok(Box::new(MockDb::new()) as Box>) + type Error = DbError; + + async fn get(&self) -> Result>, Self::Error> { + Ok(Box::new(MockDb::new())) } fn validate_batch_id(&self, _: params::ValidateBatchId) -> Result<(), DbError> { Ok(()) } - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } } @@ -58,7 +62,9 @@ macro_rules! mock_db_method { }; } -impl<'a> Db<'a> for MockDb { +impl Db for MockDb { + type Error = DbError; + fn commit(&self) -> DbFuture<'_, ()> { Box::pin(future::ok(())) } @@ -71,7 +77,7 @@ impl<'a> Db<'a> for MockDb { Box::pin(future::ok(())) } - fn box_clone(&self) -> Box> { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } diff --git a/syncserver/src/db/tests/batch.rs b/syncstorage-db/src/tests/batch.rs similarity index 90% rename from syncserver/src/db/tests/batch.rs rename to syncstorage-db/src/tests/batch.rs index 8efa1613..ecc972fd 100644 --- a/syncserver/src/db/tests/batch.rs +++ b/syncstorage-db/src/tests/batch.rs @@ -1,8 +1,11 @@ use log::debug; -use syncserver_db_common::{params, results, util::SyncTimestamp, BATCH_LIFETIME}; use syncserver_settings::Settings; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, BATCH_LIFETIME, +}; -use super::support::{db_pool, gbso, hid, pbso, postbso, test_db, Result}; +use super::support::{db_pool, gbso, hid, pbso, postbso, test_db}; +use crate::DbError; fn cb(user_id: u32, coll: &str, bsos: Vec) -> params::CreateBatch { params::CreateBatch { @@ -43,9 +46,9 @@ fn gb(user_id: u32, coll: &str, id: String) -> params::GetBatch { } #[tokio::test] -async fn create_delete() -> Result<()> { +async fn create_delete() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -66,9 +69,9 @@ async fn create_delete() -> Result<()> { } #[tokio::test] -async fn expiry() -> Result<()> { +async fn expiry() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -90,9 +93,9 @@ async fn expiry() -> Result<()> { } #[tokio::test] -async fn update() -> Result<()> { +async fn update() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -114,9 +117,9 @@ async fn update() -> Result<()> { } #[tokio::test] -async fn append_commit() -> Result<()> { +async fn append_commit() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -157,7 +160,7 @@ async fn append_commit() -> Result<()> { } #[tokio::test] -async fn quota_test_create_batch() -> Result<()> { +async fn quota_test_create_batch() -> Result<(), DbError> { let mut settings = Settings::test_settings().syncstorage; if !settings.enable_quota { @@ -169,7 +172,7 @@ async fn quota_test_create_batch() -> Result<()> { settings.limits.max_quota_limit = limit; let pool = db_pool(Some(settings.clone())).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -199,7 +202,7 @@ async fn quota_test_create_batch() -> Result<()> { } #[tokio::test] -async fn quota_test_append_batch() -> Result<()> { +async fn quota_test_append_batch() -> Result<(), DbError> { let mut settings = Settings::test_settings().syncstorage; if !settings.enable_quota { @@ -211,7 +214,7 @@ async fn quota_test_append_batch() -> Result<()> { settings.limits.max_quota_limit = limit; let pool = db_pool(Some(settings.clone())).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 1; let coll = "clients"; @@ -244,10 +247,10 @@ async fn quota_test_append_batch() -> Result<()> { } #[tokio::test] -async fn test_append_async_w_null() -> Result<()> { +async fn test_append_async_w_null() -> Result<(), DbError> { let settings = Settings::test_settings().syncstorage; let pool = db_pool(Some(settings)).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; // Remember: TTL is seconds to live, not an expiry date let ttl_0 = 86_400; let ttl_1 = 86_400; diff --git a/syncserver/src/db/tests/db.rs b/syncstorage-db/src/tests/db.rs similarity index 89% rename from syncserver/src/db/tests/db.rs rename to syncstorage-db/src/tests/db.rs index 346501e1..e1a00a84 100644 --- a/syncserver/src/db/tests/db.rs +++ b/syncstorage-db/src/tests/db.rs @@ -3,10 +3,13 @@ use std::collections::HashMap; use lazy_static::lazy_static; use rand::{distributions::Alphanumeric, thread_rng, Rng}; -use syncserver_db_common::{params, util::SyncTimestamp, Sorting, UserIdentifier, DEFAULT_BSO_TTL}; use syncserver_settings::Settings; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, util::SyncTimestamp, Sorting, UserIdentifier, DEFAULT_BSO_TTL, +}; -use super::support::{db_pool, dbso, dbsos, gbso, gbsos, hid, pbso, postbso, test_db, Result}; +use super::support::{db_pool, dbso, dbsos, gbso, gbsos, hid, pbso, postbso, test_db}; +use crate::DbError; // distant future (year 2099) timestamp for tests const MAX_TIMESTAMP: u64 = 4_070_937_600_000; @@ -16,9 +19,9 @@ lazy_static! { } #[tokio::test] -async fn bso_successfully_updates_single_values() -> Result<()> { +async fn bso_successfully_updates_single_values() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -57,9 +60,9 @@ async fn bso_successfully_updates_single_values() -> Result<()> { } #[tokio::test] -async fn bso_modified_not_changed_on_ttl_touch() -> Result<()> { +async fn bso_modified_not_changed_on_ttl_touch() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -80,9 +83,9 @@ async fn bso_modified_not_changed_on_ttl_touch() -> Result<()> { } #[tokio::test] -async fn put_bso_updates() -> Result<()> { +async fn put_bso_updates() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -103,9 +106,9 @@ async fn put_bso_updates() -> Result<()> { } #[tokio::test] -async fn get_bsos_limit_offset() -> Result<()> { +async fn get_bsos_limit_offset() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -224,9 +227,9 @@ async fn get_bsos_limit_offset() -> Result<()> { } #[tokio::test] -async fn get_bsos_newer() -> Result<()> { +async fn get_bsos_newer() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -309,9 +312,9 @@ async fn get_bsos_newer() -> Result<()> { } #[tokio::test] -async fn get_bsos_sort() -> Result<()> { +async fn get_bsos_sort() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -382,9 +385,9 @@ async fn get_bsos_sort() -> Result<()> { } #[tokio::test] -async fn delete_bsos_in_correct_collection() -> Result<()> { +async fn delete_bsos_in_correct_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let payload = "data"; @@ -399,9 +402,9 @@ async fn delete_bsos_in_correct_collection() -> Result<()> { } #[tokio::test] -async fn get_storage_timestamp() -> Result<()> { +async fn get_storage_timestamp() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; db.create_collection("NewCollection1".to_owned()).await?; @@ -422,17 +425,17 @@ async fn get_storage_timestamp() -> Result<()> { } #[tokio::test] -async fn get_collection_id() -> Result<()> { +async fn get_collection_id() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; db.get_collection_id("bookmarks".to_owned()).await?; Ok(()) } #[tokio::test] -async fn create_collection() -> Result<()> { +async fn create_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let name = "NewCollection"; let cid = db.create_collection(name.to_owned()).await?; @@ -443,9 +446,9 @@ async fn create_collection() -> Result<()> { } #[tokio::test] -async fn update_collection() -> Result<()> { +async fn update_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let collection = "test".to_owned(); let cid = db.create_collection(collection.clone()).await?; @@ -459,9 +462,9 @@ async fn update_collection() -> Result<()> { } #[tokio::test] -async fn delete_collection() -> Result<()> { +async fn delete_collection() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "NewCollection"; @@ -495,9 +498,9 @@ async fn delete_collection() -> Result<()> { } #[tokio::test] -async fn delete_collection_tombstone() -> Result<()> { +async fn delete_collection_tombstone() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test"; @@ -555,9 +558,9 @@ async fn delete_collection_tombstone() -> Result<()> { } #[tokio::test] -async fn get_collection_timestamps() -> Result<()> { +async fn get_collection_timestamps() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test".to_owned(); @@ -583,9 +586,9 @@ async fn get_collection_timestamps() -> Result<()> { } #[tokio::test] -async fn get_collection_timestamps_tombstone() -> Result<()> { +async fn get_collection_timestamps_tombstone() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test".to_owned(); @@ -608,9 +611,9 @@ async fn get_collection_timestamps_tombstone() -> Result<()> { } #[tokio::test] -async fn get_collection_usage() -> Result<()> { +async fn get_collection_usage() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 5; let mut expected = HashMap::new(); @@ -625,7 +628,7 @@ async fn get_collection_usage() -> Result<()> { db.put_bso(pbso( uid, coll, - &format!("b{}", i as i32), + &format!("b{}", i), Some(&String::from_utf8_lossy(&payload)), None, None, @@ -660,7 +663,7 @@ async fn get_collection_usage() -> Result<()> { } #[tokio::test] -async fn test_quota() -> Result<()> { +async fn test_quota() -> Result<(), DbError> { let settings = Settings::test_settings(); if !settings.syncstorage.enable_quota { @@ -669,7 +672,7 @@ async fn test_quota() -> Result<()> { } let pool = db_pool(None).await?; - let mut db = test_db(pool.as_ref()).await?; + let mut db = test_db(pool).await?; let uid = 5; let coll = "bookmarks"; @@ -702,9 +705,9 @@ async fn test_quota() -> Result<()> { } #[tokio::test] -async fn get_collection_counts() -> Result<()> { +async fn get_collection_counts() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 4; let mut expected = HashMap::new(); @@ -725,9 +728,9 @@ async fn get_collection_counts() -> Result<()> { } #[tokio::test] -async fn put_bso() -> Result<()> { +async fn put_bso() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "NewCollection"; @@ -765,9 +768,9 @@ async fn put_bso() -> Result<()> { } #[tokio::test] -async fn post_bsos() -> Result<()> { +async fn post_bsos() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "NewCollection"; @@ -836,9 +839,9 @@ async fn post_bsos() -> Result<()> { } #[tokio::test] -async fn get_bso() -> Result<()> { +async fn get_bso() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -857,9 +860,9 @@ async fn get_bso() -> Result<()> { } #[tokio::test] -async fn get_bsos() -> Result<()> { +async fn get_bsos() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = 2; let coll = "clients"; @@ -928,9 +931,9 @@ async fn get_bsos() -> Result<()> { } #[tokio::test] -async fn get_bso_timestamp() -> Result<()> { +async fn get_bso_timestamp() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -949,9 +952,9 @@ async fn get_bso_timestamp() -> Result<()> { } #[tokio::test] -async fn delete_bso() -> Result<()> { +async fn delete_bso() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -965,9 +968,9 @@ async fn delete_bso() -> Result<()> { } #[tokio::test] -async fn delete_bsos() -> Result<()> { +async fn delete_bsos() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -1000,31 +1003,31 @@ async fn delete_bsos() -> Result<()> { /* #[tokio::test] -async fn usage_stats() -> Result<()> { +async fn usage_stats() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; Ok(()) } #[tokio::test] -async fn purge_expired() -> Result<()> { +async fn purge_expired() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; Ok(()) } #[tokio::test] -async fn optimize() -> Result<()> { +async fn optimize() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; Ok(()) } */ #[tokio::test] -async fn delete_storage() -> Result<()> { +async fn delete_storage() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let bid = "test"; @@ -1048,9 +1051,9 @@ async fn delete_storage() -> Result<()> { } #[tokio::test] -async fn collection_cache() -> Result<()> { +async fn collection_cache() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "test"; @@ -1069,9 +1072,9 @@ async fn collection_cache() -> Result<()> { } #[tokio::test] -async fn lock_for_read() -> Result<()> { +async fn lock_for_read() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -1087,9 +1090,9 @@ async fn lock_for_read() -> Result<()> { } #[tokio::test] -async fn lock_for_write() -> Result<()> { +async fn lock_for_write() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; let uid = *UID; let coll = "clients"; @@ -1105,9 +1108,9 @@ async fn lock_for_write() -> Result<()> { } #[tokio::test] -async fn heartbeat() -> Result<()> { +async fn heartbeat() -> Result<(), DbError> { let pool = db_pool(None).await?; - let db = test_db(pool.as_ref()).await?; + let db = test_db(pool).await?; assert!(db.check().await?); Ok(()) diff --git a/syncserver/src/db/tests/mod.rs b/syncstorage-db/src/tests/mod.rs similarity index 100% rename from syncserver/src/db/tests/mod.rs rename to syncstorage-db/src/tests/mod.rs diff --git a/syncserver/src/db/tests/support.rs b/syncstorage-db/src/tests/support.rs similarity index 86% rename from syncserver/src/db/tests/support.rs rename to syncstorage-db/src/tests/support.rs index 9590d7fa..a952799e 100644 --- a/syncserver/src/db/tests/support.rs +++ b/syncstorage-db/src/tests/support.rs @@ -1,17 +1,14 @@ use std::{str::FromStr, sync::Arc}; -use syncserver_db_common::{params, util::SyncTimestamp, Db, Sorting, UserIdentifier}; +use syncserver_common::{BlockingThreadpool, Metrics}; use syncserver_settings::Settings as SyncserverSettings; +use syncstorage_db_common::{params, util::SyncTimestamp, Db, DbPool, Sorting, UserIdentifier}; use syncstorage_settings::Settings as SyncstorageSettings; -use crate::db::DbPool; -use crate::error::ApiResult; -use crate::{db::pool_from_settings, db::BlockingThreadpool, error::ApiError, server::metrics}; - -pub type Result = std::result::Result; +use crate::{DbError, DbPoolImpl}; #[cfg(test)] -pub async fn db_pool(settings: Option) -> Result> { +pub async fn db_pool(settings: Option) -> Result { let _ = env_logger::try_init(); // The default for SYNC_SYNCSTORAGE__DATABASE_USE_TEST_TRANSACTIONS is // false, but we want the mysql default to be true, so let's check @@ -25,13 +22,12 @@ pub async fn db_pool(settings: Option) -> Result ApiResult>> { +pub async fn test_db(pool: DbPoolImpl) -> Result>, DbError> { let db = pool.get().await?; // Spanner won't have a timestamp until lock_for_xxx are called: fill one // in for it diff --git a/syncstorage-mysql/Cargo.toml b/syncstorage-mysql/Cargo.toml new file mode 100644 index 00000000..80f372c8 --- /dev/null +++ b/syncstorage-mysql/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "syncstorage-mysql" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +[dependencies] +backtrace.workspace=true +base64.workspace=true +futures.workspace=true +http.workspace=true +slog-scope.workspace=true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +thiserror = "1.0.26" +url = "2.1" + +[dev-dependencies] +env_logger.workspace=true +syncserver-settings = { path = "../syncserver-settings" } diff --git a/migrations/2018-08-28-010336_init/down.sql b/syncstorage-mysql/migrations/2018-08-28-010336_init/down.sql similarity index 100% rename from migrations/2018-08-28-010336_init/down.sql rename to syncstorage-mysql/migrations/2018-08-28-010336_init/down.sql diff --git a/migrations/2018-08-28-010336_init/up.sql b/syncstorage-mysql/migrations/2018-08-28-010336_init/up.sql similarity index 100% rename from migrations/2018-08-28-010336_init/up.sql rename to syncstorage-mysql/migrations/2018-08-28-010336_init/up.sql diff --git a/migrations/2019-09-11-164500/down.sql b/syncstorage-mysql/migrations/2019-09-11-164500/down.sql similarity index 100% rename from migrations/2019-09-11-164500/down.sql rename to syncstorage-mysql/migrations/2019-09-11-164500/down.sql diff --git a/migrations/2019-09-11-164500/up.sql b/syncstorage-mysql/migrations/2019-09-11-164500/up.sql similarity index 100% rename from migrations/2019-09-11-164500/up.sql rename to syncstorage-mysql/migrations/2019-09-11-164500/up.sql diff --git a/migrations/2019-09-25-174347_min_collection_id/down.sql b/syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/down.sql similarity index 100% rename from migrations/2019-09-25-174347_min_collection_id/down.sql rename to syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/down.sql diff --git a/migrations/2019-09-25-174347_min_collection_id/up.sql b/syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/up.sql similarity index 100% rename from migrations/2019-09-25-174347_min_collection_id/up.sql rename to syncstorage-mysql/migrations/2019-09-25-174347_min_collection_id/up.sql diff --git a/migrations/2020-04-03-102015_change_userid/down.sql b/syncstorage-mysql/migrations/2020-04-03-102015_change_userid/down.sql similarity index 100% rename from migrations/2020-04-03-102015_change_userid/down.sql rename to syncstorage-mysql/migrations/2020-04-03-102015_change_userid/down.sql diff --git a/migrations/2020-04-03-102015_change_userid/up.sql b/syncstorage-mysql/migrations/2020-04-03-102015_change_userid/up.sql similarity index 100% rename from migrations/2020-04-03-102015_change_userid/up.sql rename to syncstorage-mysql/migrations/2020-04-03-102015_change_userid/up.sql diff --git a/migrations/2020-06-12-231034_new_batch/down.sql b/syncstorage-mysql/migrations/2020-06-12-231034_new_batch/down.sql similarity index 100% rename from migrations/2020-06-12-231034_new_batch/down.sql rename to syncstorage-mysql/migrations/2020-06-12-231034_new_batch/down.sql diff --git a/migrations/2020-06-12-231034_new_batch/up.sql b/syncstorage-mysql/migrations/2020-06-12-231034_new_batch/up.sql similarity index 100% rename from migrations/2020-06-12-231034_new_batch/up.sql rename to syncstorage-mysql/migrations/2020-06-12-231034_new_batch/up.sql diff --git a/migrations/2020-08-24-091401_add_quota/down.sql b/syncstorage-mysql/migrations/2020-08-24-091401_add_quota/down.sql similarity index 100% rename from migrations/2020-08-24-091401_add_quota/down.sql rename to syncstorage-mysql/migrations/2020-08-24-091401_add_quota/down.sql diff --git a/migrations/2020-08-24-091401_add_quota/up.sql b/syncstorage-mysql/migrations/2020-08-24-091401_add_quota/up.sql similarity index 100% rename from migrations/2020-08-24-091401_add_quota/up.sql rename to syncstorage-mysql/migrations/2020-08-24-091401_add_quota/up.sql diff --git a/syncserver/src/db/mysql/batch.rs b/syncstorage-mysql/src/batch.rs similarity index 87% rename from syncserver/src/db/mysql/batch.rs rename to syncstorage-mysql/src/batch.rs index 4616c9b7..1e487440 100644 --- a/syncserver/src/db/mysql/batch.rs +++ b/syncstorage-mysql/src/batch.rs @@ -1,3 +1,4 @@ +use base64::Engine; use std::collections::HashSet; use diesel::{ @@ -9,19 +10,18 @@ use diesel::{ sql_types::{BigInt, Integer}, ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, }; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, UserIdentifier, BATCH_LIFETIME, -}; +use syncstorage_db_common::{params, results, UserIdentifier, BATCH_LIFETIME}; use super::{ - models::{MysqlDb, Result}, + error::DbError, + models::MysqlDb, schema::{batch_upload_items, batch_uploads}, + DbResult, }; const MAXTTL: i32 = 2_100_000_000; -pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result { +pub fn create(db: &MysqlDb, params: params::CreateBatch) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = db.get_collection_id(¶ms.collection)?; // Careful, there's some weirdness here! @@ -47,7 +47,7 @@ pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result DbError { match e { // The user tried to create two batches with the same timestamp - DieselError::DatabaseError(UniqueViolation, _) => DbErrorKind::Conflict.into(), + DieselError::DatabaseError(UniqueViolation, _) => DbError::conflict(), _ => e.into(), } })?; @@ -59,7 +59,7 @@ pub fn create(db: &MysqlDb, params: params::CreateBatch) -> Result Result { +pub fn validate(db: &MysqlDb, params: params::ValidateBatch) -> DbResult { let batch_id = decode_id(¶ms.id)?; // Avoid hitting the db for batches that are obviously too old. Recall // that the batchid is a millisecond timestamp. @@ -79,7 +79,7 @@ pub fn validate(db: &MysqlDb, params: params::ValidateBatch) -> Result { Ok(exists.is_some()) } -pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> { +pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> DbResult<()> { let exists = validate( db, params::ValidateBatch { @@ -90,7 +90,7 @@ pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> { )?; if !exists { - Err(DbErrorKind::BatchNotFound)? + return Err(DbError::batch_not_found()); } let batch_id = decode_id(¶ms.batch.id)?; @@ -99,7 +99,7 @@ pub fn append(db: &MysqlDb, params: params::AppendToBatch) -> Result<()> { Ok(()) } -pub fn get(db: &MysqlDb, params: params::GetBatch) -> Result> { +pub fn get(db: &MysqlDb, params: params::GetBatch) -> DbResult> { let is_valid = validate( db, params::ValidateBatch { @@ -116,7 +116,7 @@ pub fn get(db: &MysqlDb, params: params::GetBatch) -> Result Result<()> { +pub fn delete(db: &MysqlDb, params: params::DeleteBatch) -> DbResult<()> { let batch_id = decode_id(¶ms.id)?; let user_id = params.user_id.legacy_id as i64; let collection_id = db.get_collection_id(¶ms.collection)?; @@ -133,19 +133,19 @@ pub fn delete(db: &MysqlDb, params: params::DeleteBatch) -> Result<()> { } /// Commits a batch to the bsos table, deleting the batch when succesful -pub fn commit(db: &MysqlDb, params: params::CommitBatch) -> Result { +pub fn commit(db: &MysqlDb, params: params::CommitBatch) -> DbResult { let batch_id = decode_id(¶ms.batch.id)?; let user_id = params.user_id.legacy_id as i64; let collection_id = db.get_collection_id(¶ms.collection)?; let timestamp = db.timestamp(); sql_query(include_str!("batch_commit.sql")) - .bind::(user_id as i64) + .bind::(user_id) .bind::(&collection_id) .bind::(&db.timestamp().as_i64()) .bind::(&db.timestamp().as_i64()) .bind::((MAXTTL as i64) * 1000) // XXX: .bind::(&batch_id) - .bind::(user_id as i64) + .bind::(user_id) .bind::(&db.timestamp().as_i64()) .bind::(&db.timestamp().as_i64()) .execute(&db.conn)?; @@ -169,7 +169,7 @@ pub fn do_append( user_id: UserIdentifier, _collection_id: i32, bsos: Vec, -) -> Result<()> { +) -> DbResult<()> { fn exist_idx(user_id: u64, batch_id: i64, bso_id: &str) -> String { // Construct something that matches the key for batch_upload_items format!( @@ -253,26 +253,27 @@ pub fn do_append( Ok(()) } -pub fn validate_batch_id(id: &str) -> Result<()> { +pub fn validate_batch_id(id: &str) -> DbResult<()> { decode_id(id).map(|_| ()) } fn encode_id(id: i64) -> String { - base64::encode(&id.to_string()) + base64::engine::general_purpose::STANDARD.encode(id.to_string()) } -fn decode_id(id: &str) -> Result { - let bytes = base64::decode(id).unwrap_or_else(|_| id.as_bytes().to_vec()); +fn decode_id(id: &str) -> DbResult { + let bytes = base64::engine::general_purpose::STANDARD + .decode(id) + .unwrap_or_else(|_| id.as_bytes().to_vec()); let decoded = std::str::from_utf8(&bytes).unwrap_or(id); decoded .parse::() - .map_err(|e| DbError::internal(&format!("Invalid batch_id: {}", e))) + .map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e))) } -#[macro_export] macro_rules! batch_db_method { ($name:ident, $batch_name:ident, $type:ident) => { - pub fn $name(&self, params: params::$type) -> Result { + pub fn $name(&self, params: params::$type) -> DbResult { batch::$batch_name(self, params) } }; diff --git a/syncserver/src/db/mysql/batch_commit.sql b/syncstorage-mysql/src/batch_commit.sql similarity index 100% rename from syncserver/src/db/mysql/batch_commit.sql rename to syncstorage-mysql/src/batch_commit.sql diff --git a/syncserver/src/db/mysql/diesel_ext.rs b/syncstorage-mysql/src/diesel_ext.rs similarity index 100% rename from syncserver/src/db/mysql/diesel_ext.rs rename to syncstorage-mysql/src/diesel_ext.rs diff --git a/syncstorage-mysql/src/error.rs b/syncstorage-mysql/src/error.rs new file mode 100644 index 00000000..0c4c8b6a --- /dev/null +++ b/syncstorage-mysql/src/error.rs @@ -0,0 +1,144 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{from_error, impl_fmt_display, InternalError, ReportableError}; +use syncserver_db_common::error::MysqlError; +use syncstorage_db_common::error::{DbErrorIntrospect, SyncstorageDbError}; +use thiserror::Error; + +/// An error type that represents any MySQL-related errors that may occur while processing a +/// syncstorage request. These errors may be application-specific or lower-level errors that arise +/// from the database backend. +#[derive(Debug)] +pub struct DbError { + kind: DbErrorKind, + pub status: StatusCode, + pub backtrace: Box, +} + +impl DbError { + pub fn batch_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::batch_not_found()).into() + } + + pub fn bso_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::bso_not_found()).into() + } + + pub fn collection_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::collection_not_found()).into() + } + + pub fn conflict() -> Self { + DbErrorKind::Common(SyncstorageDbError::conflict()).into() + } + + pub fn internal(msg: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(msg)).into() + } + + pub fn quota() -> Self { + DbErrorKind::Common(SyncstorageDbError::quota()).into() + } +} + +#[derive(Debug, Error)] +enum DbErrorKind { + #[error("{}", _0)] + Common(SyncstorageDbError), + + #[error("{}", _0)] + Mysql(MysqlError), +} + +impl From for DbError { + fn from(kind: DbErrorKind) -> Self { + match &kind { + DbErrorKind::Common(dbe) => Self { + status: dbe.status, + backtrace: Box::new(dbe.backtrace.clone()), + kind, + }, + _ => Self { + kind, + status: StatusCode::INTERNAL_SERVER_ERROR, + backtrace: Box::new(Backtrace::new()), + }, + } + } +} + +impl DbErrorIntrospect for DbError { + fn is_batch_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_batch_not_found()) + } + + fn is_bso_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_bso_not_found()) + } + + fn is_collection_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_collection_not_found()) + } + + fn is_conflict(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_conflict()) + } + + fn is_quota(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_quota()) + } +} + +impl ReportableError for DbError { + fn is_sentry_event(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_sentry_event()) + } + + fn metric_label(&self) -> Option { + if let DbErrorKind::Common(e) = &self.kind { + e.metric_label() + } else { + None + } + } + + fn error_backtrace(&self) -> String { + format!("{:#?}", self.backtrace) + } +} + +impl InternalError for DbError { + fn internal_error(message: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(message)).into() + } +} + +impl_fmt_display!(DbError, DbErrorKind); + +from_error!(SyncstorageDbError, DbError, DbErrorKind::Common); +from_error!( + diesel::result::Error, + DbError, + |error: diesel::result::Error| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel::result::ConnectionError, + DbError, + |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Mysql(MysqlError::from( + error + ))) +); +from_error!( + diesel::r2d2::PoolError, + DbError, + |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel_migrations::RunMigrationsError, + DbError, + |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Mysql( + MysqlError::from(error) + )) +); diff --git a/syncstorage-mysql/src/lib.rs b/syncstorage-mysql/src/lib.rs new file mode 100644 index 00000000..4a933900 --- /dev/null +++ b/syncstorage-mysql/src/lib.rs @@ -0,0 +1,22 @@ +#[macro_use] +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; +#[macro_use] +extern crate slog_scope; + +#[macro_use] +mod batch; +mod diesel_ext; +mod error; +mod models; +mod pool; +mod schema; +#[cfg(test)] +mod test; + +pub use error::DbError; +pub use models::MysqlDb; +pub use pool::MysqlDbPool; + +pub(crate) type DbResult = Result; diff --git a/syncserver/src/db/mysql/models.rs b/syncstorage-mysql/src/models.rs similarity index 86% rename from syncserver/src/db/mysql/models.rs rename to syncstorage-mysql/src/models.rs index 8b7fb82f..a5cb255c 100644 --- a/syncserver/src/db/mysql/models.rs +++ b/syncstorage-mysql/src/models.rs @@ -13,44 +13,43 @@ use diesel::{ sql_types::{BigInt, Integer, Nullable, Text}, Connection, ExpressionMethods, GroupByDsl, OptionalExtension, QueryDsl, RunQueryDsl, }; -#[cfg(test)] +#[cfg(debug_assertions)] use diesel_logger::LoggingConnection; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::SyncTimestamp, - Db, DbFuture, Sorting, UserIdentifier, DEFAULT_BSO_TTL, +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{sync_db_method, DbFuture}; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, Db, Sorting, UserIdentifier, + DEFAULT_BSO_TTL, }; use syncstorage_settings::{Quota, DEFAULT_MAX_TOTAL_RECORDS}; use super::{ batch, diesel_ext::LockInShareModeDsl, + error::DbError, pool::CollectionCache, schema::{bso, collections, user_collections}, + DbResult, }; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; -pub type Result = std::result::Result; type Conn = PooledConnection>; // this is the max number of records we will return. -pub static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS; +static DEFAULT_LIMIT: u32 = DEFAULT_MAX_TOTAL_RECORDS; -pub const TOMBSTONE: i32 = 0; +const TOMBSTONE: i32 = 0; /// SQL Variable remapping /// These names are the legacy values mapped to the new names. -pub const COLLECTION_ID: &str = "collection"; -pub const USER_ID: &str = "userid"; -pub const MODIFIED: &str = "modified"; -pub const EXPIRY: &str = "ttl"; -pub const LAST_MODIFIED: &str = "last_modified"; -pub const COUNT: &str = "count"; -pub const TOTAL_BYTES: &str = "total_bytes"; +const COLLECTION_ID: &str = "collection"; +const USER_ID: &str = "userid"; +const MODIFIED: &str = "modified"; +const EXPIRY: &str = "ttl"; +const LAST_MODIFIED: &str = "last_modified"; +const COUNT: &str = "count"; +const TOTAL_BYTES: &str = "total_bytes"; #[derive(Debug)] -pub enum CollectionLock { +enum CollectionLock { Read, Write, } @@ -71,8 +70,8 @@ struct MysqlDbSession { #[derive(Clone, Debug)] pub struct MysqlDb { - /// Synchronous Diesel calls are executed in tokio::task::spawn_blocking to satisfy - /// the Db trait's asynchronous interface. + /// Synchronous Diesel calls are executed in web::block to satisfy the Db trait's asynchronous + /// interface. /// /// Arc provides a Clone impl utilized for safely moving to /// the thread pool but does not provide Send as the underlying db @@ -94,9 +93,9 @@ pub struct MysqlDb { unsafe impl Send for MysqlDb {} pub struct MysqlDbInner { - #[cfg(not(test))] + #[cfg(not(debug_assertions))] pub(super) conn: Conn, - #[cfg(test)] + #[cfg(debug_assertions)] pub(super) conn: LoggingConnection, // display SQL when RUST_LOG="diesel_logger=trace" session: RefCell, @@ -117,7 +116,7 @@ impl Deref for MysqlDb { } impl MysqlDb { - pub fn new( + pub(super) fn new( conn: Conn, coll_cache: Arc, metrics: &Metrics, @@ -125,9 +124,9 @@ impl MysqlDb { blocking_threadpool: Arc, ) -> Self { let inner = MysqlDbInner { - #[cfg(not(test))] + #[cfg(not(debug_assertions))] conn, - #[cfg(test)] + #[cfg(debug_assertions)] conn: LoggingConnection::new(conn), session: RefCell::new(Default::default()), }; @@ -149,7 +148,7 @@ impl MysqlDb { /// In theory it would be possible to use serializable transactions rather /// than explicit locking, but our ops team have expressed concerns about /// the efficiency of that approach at scale. - pub fn lock_for_read_sync(&self, params: params::LockCollection) -> Result<()> { + fn lock_for_read_sync(&self, params: params::LockCollection) -> DbResult<()> { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection).or_else(|e| { if e.is_collection_not_found() { @@ -196,7 +195,7 @@ impl MysqlDb { Ok(()) } - pub fn lock_for_write_sync(&self, params: params::LockCollection) -> Result<()> { + fn lock_for_write_sync(&self, params: params::LockCollection) -> DbResult<()> { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_or_create_collection_id(¶ms.collection)?; if let Some(CollectionLock::Read) = self @@ -205,7 +204,9 @@ impl MysqlDb { .coll_locks .get(&(user_id as u32, collection_id)) { - Err(DbError::internal("Can't escalate read-lock to write-lock"))? + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); } // Lock the db @@ -221,7 +222,7 @@ impl MysqlDb { let modified = SyncTimestamp::from_i64(modified)?; // Forbid the write if it would not properly incr the timestamp if modified >= self.timestamp() { - Err(DbErrorKind::Conflict)? + return Err(DbError::conflict()); } self.session .borrow_mut() @@ -235,7 +236,7 @@ impl MysqlDb { Ok(()) } - pub(super) fn begin(&self, for_write: bool) -> Result<()> { + pub(super) fn begin(&self, for_write: bool) -> DbResult<()> { self.conn .transaction_manager() .begin_transaction(&self.conn)?; @@ -246,11 +247,11 @@ impl MysqlDb { Ok(()) } - pub async fn begin_async(&self, for_write: bool) -> Result<()> { + async fn begin_async(&self, for_write: bool) -> DbResult<()> { self.begin(for_write) } - pub fn commit_sync(&self) -> Result<()> { + fn commit_sync(&self) -> DbResult<()> { if self.session.borrow().in_transaction { self.conn .transaction_manager() @@ -259,7 +260,7 @@ impl MysqlDb { Ok(()) } - pub fn rollback_sync(&self) -> Result<()> { + fn rollback_sync(&self) -> DbResult<()> { if self.session.borrow().in_transaction { self.conn .transaction_manager() @@ -268,7 +269,7 @@ impl MysqlDb { Ok(()) } - fn erect_tombstone(&self, user_id: i32) -> Result<()> { + fn erect_tombstone(&self, user_id: i32) -> DbResult<()> { sql_query(format!( r#"INSERT INTO user_collections ({user_id}, {collection_id}, {modified}) VALUES (?, ?, ?) @@ -285,7 +286,7 @@ impl MysqlDb { Ok(()) } - pub fn delete_storage_sync(&self, user_id: UserIdentifier) -> Result<()> { + fn delete_storage_sync(&self, user_id: UserIdentifier) -> DbResult<()> { let user_id = user_id.legacy_id as i64; // Delete user data. delete(bso::table) @@ -301,10 +302,7 @@ impl MysqlDb { // Deleting the collection should result in: // - collection does not appear in /info/collections // - X-Last-Modified timestamp at the storage level changing - pub fn delete_collection_sync( - &self, - params: params::DeleteCollection, - ) -> Result { + fn delete_collection_sync(&self, params: params::DeleteCollection) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let mut count = delete(bso::table) @@ -316,14 +314,14 @@ impl MysqlDb { .filter(user_collections::collection_id.eq(&collection_id)) .execute(&self.conn)?; if count == 0 { - Err(DbErrorKind::CollectionNotFound)? + return Err(DbError::collection_not_found()); } else { self.erect_tombstone(user_id as i32)?; } self.get_storage_timestamp_sync(params.user_id) } - pub(super) fn get_or_create_collection_id(&self, name: &str) -> Result { + pub(super) fn get_or_create_collection_id(&self, name: &str) -> DbResult { if let Some(id) = self.coll_cache.get_id(name)? { return Ok(id); } @@ -346,7 +344,7 @@ impl MysqlDb { Ok(id) } - pub(super) fn get_collection_id(&self, name: &str) -> Result { + pub(super) fn get_collection_id(&self, name: &str) -> DbResult { if let Some(id) = self.coll_cache.get_id(name)? { return Ok(id); } @@ -359,7 +357,7 @@ impl MysqlDb { .bind::(name) .get_result::(&self.conn) .optional()? - .ok_or(DbErrorKind::CollectionNotFound)? + .ok_or_else(DbError::collection_not_found)? .id; if !self.session.borrow().in_write_transaction { self.coll_cache.put(id, name.to_owned())?; @@ -367,7 +365,7 @@ impl MysqlDb { Ok(id) } - fn _get_collection_name(&self, id: i32) -> Result { + fn _get_collection_name(&self, id: i32) -> DbResult { let name = if let Some(name) = self.coll_cache.get_name(id)? { name } else { @@ -379,13 +377,13 @@ impl MysqlDb { .bind::(&id) .get_result::(&self.conn) .optional()? - .ok_or(DbErrorKind::CollectionNotFound)? + .ok_or_else(DbError::collection_not_found)? .name }; Ok(name) } - pub fn put_bso_sync(&self, bso: params::PutBso) -> Result { + fn put_bso_sync(&self, bso: params::PutBso) -> DbResult { /* if bso.payload.is_none() && bso.sortindex.is_none() && bso.ttl.is_none() { // XXX: go returns an error here (ErrNothingToDo), and is treated @@ -403,12 +401,12 @@ impl MysqlDb { collection: bso.collection.clone(), collection_id, })?; - if usage.total_bytes >= self.quota.size as usize { + if usage.total_bytes >= self.quota.size { let mut tags = HashMap::default(); tags.insert("collection".to_owned(), bso.collection.clone()); self.metrics.incr_with_tags("storage.quota.at_limit", tags); if self.quota.enforced { - return Err(DbErrorKind::Quota.into()); + return Err(DbError::quota()); } else { warn!("Quota at limit for user's collection ({} bytes)", usage.total_bytes; "collection"=>bso.collection.clone()); } @@ -476,7 +474,7 @@ impl MysqlDb { }) } - pub fn get_bsos_sync(&self, params: params::GetBsos) -> Result { + fn get_bsos_sync(&self, params: params::GetBsos) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let now = self.timestamp().as_i64(); @@ -489,7 +487,7 @@ impl MysqlDb { bso::expiry, )) .filter(bso::user_id.eq(user_id)) - .filter(bso::collection_id.eq(collection_id as i32)) // XXX: + .filter(bso::collection_id.eq(collection_id)) .filter(bso::expiry.gt(now)) .into_boxed(); @@ -566,13 +564,13 @@ impl MysqlDb { }) } - pub fn get_bso_ids_sync(&self, params: params::GetBsos) -> Result { + fn get_bso_ids_sync(&self, params: params::GetBsos) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let mut query = bso::table .select(bso::id) .filter(bso::user_id.eq(user_id)) - .filter(bso::collection_id.eq(collection_id as i32)) // XXX: + .filter(bso::collection_id.eq(collection_id)) .filter(bso::expiry.gt(self.timestamp().as_i64())) .into_boxed(); @@ -629,7 +627,7 @@ impl MysqlDb { }) } - pub fn get_bso_sync(&self, params: params::GetBso) -> Result> { + fn get_bso_sync(&self, params: params::GetBso) -> DbResult> { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; Ok(bso::table @@ -648,7 +646,7 @@ impl MysqlDb { .optional()?) } - pub fn delete_bso_sync(&self, params: params::DeleteBso) -> Result { + fn delete_bso_sync(&self, params: params::DeleteBso) -> DbResult { let user_id = params.user_id.legacy_id; let collection_id = self.get_collection_id(¶ms.collection)?; let affected_rows = delete(bso::table) @@ -658,12 +656,12 @@ impl MysqlDb { .filter(bso::expiry.gt(&self.timestamp().as_i64())) .execute(&self.conn)?; if affected_rows == 0 { - Err(DbErrorKind::BsoNotFound)? + return Err(DbError::bso_not_found()); } self.update_collection(user_id as u32, collection_id) } - pub fn delete_bsos_sync(&self, params: params::DeleteBsos) -> Result { + fn delete_bsos_sync(&self, params: params::DeleteBsos) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; delete(bso::table) @@ -674,7 +672,7 @@ impl MysqlDb { self.update_collection(user_id as u32, collection_id) } - pub fn post_bsos_sync(&self, input: params::PostBsos) -> Result { + fn post_bsos_sync(&self, input: params::PostBsos) -> DbResult { let collection_id = self.get_or_create_collection_id(&input.collection)?; let mut result = results::PostBsos { modified: self.timestamp(), @@ -707,20 +705,20 @@ impl MysqlDb { Ok(result) } - pub fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> Result { + fn get_storage_timestamp_sync(&self, user_id: UserIdentifier) -> DbResult { let user_id = user_id.legacy_id as i64; let modified = user_collections::table .select(max(user_collections::modified)) .filter(user_collections::user_id.eq(user_id)) .first::>(&self.conn)? .unwrap_or_default(); - SyncTimestamp::from_i64(modified) + SyncTimestamp::from_i64(modified).map_err(Into::into) } - pub fn get_collection_timestamp_sync( + fn get_collection_timestamp_sync( &self, params: params::GetCollectionTimestamp, - ) -> Result { + ) -> DbResult { let user_id = params.user_id.legacy_id as u32; let collection_id = self.get_collection_id(¶ms.collection)?; if let Some(modified) = self @@ -737,10 +735,10 @@ impl MysqlDb { .filter(user_collections::collection_id.eq(collection_id)) .first(&self.conn) .optional()? - .ok_or_else(|| DbErrorKind::CollectionNotFound.into()) + .ok_or_else(DbError::collection_not_found) } - pub fn get_bso_timestamp_sync(&self, params: params::GetBsoTimestamp) -> Result { + fn get_bso_timestamp_sync(&self, params: params::GetBsoTimestamp) -> DbResult { let user_id = params.user_id.legacy_id as i64; let collection_id = self.get_collection_id(¶ms.collection)?; let modified = bso::table @@ -751,13 +749,13 @@ impl MysqlDb { .first::(&self.conn) .optional()? .unwrap_or_default(); - SyncTimestamp::from_i64(modified) + SyncTimestamp::from_i64(modified).map_err(Into::into) } - pub fn get_collection_timestamps_sync( + fn get_collection_timestamps_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let modifieds = sql_query(format!( "SELECT {collection_id}, {modified} FROM user_collections @@ -771,26 +769,29 @@ impl MysqlDb { .bind::(TOMBSTONE) .load::(&self.conn)? .into_iter() - .map(|cr| SyncTimestamp::from_i64(cr.last_modified).map(|ts| (cr.collection, ts))) - .collect::>>()?; + .map(|cr| { + SyncTimestamp::from_i64(cr.last_modified) + .map(|ts| (cr.collection, ts)) + .map_err(Into::into) + }) + .collect::>>()?; self.map_collection_names(modifieds) } - fn check_sync(&self) -> Result { + fn check_sync(&self) -> DbResult { // has the database been up for more than 0 seconds? let result = sql_query("SHOW STATUS LIKE \"Uptime\"").execute(&self.conn)?; Ok(result as u64 > 0) } - fn map_collection_names(&self, by_id: HashMap) -> Result> { + fn map_collection_names(&self, by_id: HashMap) -> DbResult> { let mut names = self.load_collection_names(by_id.keys())?; by_id .into_iter() .map(|(id, value)| { - names - .remove(&id) - .map(|name| (name, value)) - .ok_or_else(|| DbError::internal("load_collection_names unknown collection id")) + names.remove(&id).map(|name| (name, value)).ok_or_else(|| { + DbError::internal("load_collection_names unknown collection id".to_owned()) + }) }) .collect() } @@ -798,7 +799,7 @@ impl MysqlDb { fn load_collection_names<'a>( &self, collection_ids: impl Iterator, - ) -> Result> { + ) -> DbResult> { let mut names = HashMap::new(); let mut uncached = Vec::new(); for &id in collection_ids { @@ -830,7 +831,7 @@ impl MysqlDb { &self, user_id: u32, collection_id: i32, - ) -> Result { + ) -> DbResult { let quota = if self.quota.enabled { self.calc_quota_usage_sync(user_id, collection_id)? } else { @@ -869,10 +870,10 @@ impl MysqlDb { } // Perform a lighter weight "read only" storage size check - pub fn get_storage_usage_sync( + fn get_storage_usage_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let uid = user_id.legacy_id as i64; let total_bytes = bso::table .select(sql::>("SUM(LENGTH(payload))")) @@ -883,10 +884,10 @@ impl MysqlDb { } // Perform a lighter weight "read only" quota storage check - pub fn get_quota_usage_sync( + fn get_quota_usage_sync( &self, params: params::GetQuotaUsage, - ) -> Result { + ) -> DbResult { let uid = params.user_id.legacy_id as i64; let (total_bytes, count): (i64, i32) = user_collections::table .select(( @@ -905,11 +906,11 @@ impl MysqlDb { } // perform a heavier weight quota calculation - pub fn calc_quota_usage_sync( + fn calc_quota_usage_sync( &self, user_id: u32, collection_id: i32, - ) -> Result { + ) -> DbResult { let (total_bytes, count): (i64, i32) = bso::table .select(( sql::(r#"COALESCE(SUM(LENGTH(COALESCE(payload, ""))),0)"#), @@ -927,10 +928,10 @@ impl MysqlDb { }) } - pub fn get_collection_usage_sync( + fn get_collection_usage_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let counts = bso::table .select((bso::collection_id, sql::("SUM(LENGTH(payload))"))) .filter(bso::user_id.eq(user_id.legacy_id as i64)) @@ -942,10 +943,10 @@ impl MysqlDb { self.map_collection_names(counts) } - pub fn get_collection_counts_sync( + fn get_collection_counts_sync( &self, user_id: UserIdentifier, - ) -> Result { + ) -> DbResult { let counts = bso::table .select(( bso::collection_id, @@ -969,51 +970,34 @@ impl MysqlDb { batch_db_method!(commit_batch_sync, commit, CommitBatch); batch_db_method!(delete_batch_sync, delete, DeleteBatch); - pub fn get_batch_sync(&self, params: params::GetBatch) -> Result> { + fn get_batch_sync(&self, params: params::GetBatch) -> DbResult> { batch::get(self, params) } - pub fn timestamp(&self) -> SyncTimestamp { + pub(super) fn timestamp(&self) -> SyncTimestamp { self.session.borrow().timestamp } } -#[macro_export] -macro_rules! sync_db_method { - ($name:ident, $sync_name:ident, $type:ident) => { - sync_db_method!($name, $sync_name, $type, results::$type); - }; - ($name:ident, $sync_name:ident, $type:ident, $result:ty) => { - fn $name(&self, params: params::$type) -> DbFuture<'_, $result> { - let db = self.clone(); - Box::pin( - self.blocking_threadpool - .spawn(move || db.$sync_name(params)), - ) - } - }; -} -impl<'a> Db<'a> for MysqlDb { - fn commit(&self) -> DbFuture<'_, ()> { +impl Db for MysqlDb { + type Error = DbError; + + fn commit(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.commit_sync())) } - fn rollback(&self) -> DbFuture<'_, ()> { + fn rollback(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.rollback_sync())) } - fn begin(&self, for_write: bool) -> DbFuture<'_, ()> { + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.begin_async(for_write).map_err(Into::into).await }) } - fn box_clone(&self) -> Box> { - Box::new(self.clone()) - } - - fn check(&self) -> DbFuture<'_, results::Check> { + fn check(&self) -> DbFuture<'_, results::Check, Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.check_sync())) } @@ -1073,7 +1057,7 @@ impl<'a> Db<'a> for MysqlDb { ); sync_db_method!(commit_batch, commit_batch_sync, CommitBatch); - fn get_collection_id(&self, name: String) -> DbFuture<'_, i32> { + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin( self.blocking_threadpool @@ -1085,7 +1069,7 @@ impl<'a> Db<'a> for MysqlDb { results::ConnectionInfo::default() } - fn create_collection(&self, name: String) -> DbFuture<'_, i32> { + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin( self.blocking_threadpool @@ -1093,7 +1077,10 @@ impl<'a> Db<'a> for MysqlDb { ) } - fn update_collection(&self, param: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp> { + fn update_collection( + &self, + param: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || { db.update_collection(param.user_id.legacy_id as u32, param.collection_id) @@ -1110,7 +1097,7 @@ impl<'a> Db<'a> for MysqlDb { sync_db_method!(delete_batch, delete_batch_sync, DeleteBatch); - fn clear_coll_cache(&self) -> DbFuture<'_, ()> { + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || { db.coll_cache.clear(); @@ -1125,6 +1112,10 @@ impl<'a> Db<'a> for MysqlDb { enforced, } } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } } #[derive(Debug, QueryableByName)] diff --git a/syncserver/src/db/mysql/pool.rs b/syncstorage-mysql/src/pool.rs similarity index 76% rename from syncserver/src/db/mysql/pool.rs rename to syncstorage-mysql/src/pool.rs index a90034db..f2bf0d14 100644 --- a/syncserver/src/db/mysql/pool.rs +++ b/syncstorage-mysql/src/pool.rs @@ -12,16 +12,16 @@ use diesel::{ r2d2::{ConnectionManager, Pool}, Connection, }; -#[cfg(test)] +#[cfg(debug_assertions)] use diesel_logger::LoggingConnection; -use syncserver_db_common::{error::DbError, Db, DbPool, GetPoolState, PoolState, STD_COLLS}; +use syncserver_common::{BlockingThreadpool, Metrics}; +#[cfg(debug_assertions)] +use syncserver_db_common::test::TestTransactionCustomizer; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{Db, DbPool, STD_COLLS}; use syncstorage_settings::{Quota, Settings}; -use super::models::{MysqlDb, Result}; -#[cfg(test)] -use super::test::TestTransactionCustomizer; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; +use super::{error::DbError, models::MysqlDb, DbResult}; embed_migrations!(); @@ -29,13 +29,13 @@ embed_migrations!(); /// /// Mysql DDL statements implicitly commit which could disrupt MysqlPool's /// begin_test_transaction during tests. So this runs on its own separate conn. -pub fn run_embedded_migrations(database_url: &str) -> Result<()> { +fn run_embedded_migrations(database_url: &str) -> DbResult<()> { let conn = MysqlConnection::establish(database_url)?; - #[cfg(test)] + #[cfg(debug_assertions)] // XXX: this doesn't show the DDL statements // https://github.com/shssoichiro/diesel-logger/issues/1 embedded_migrations::run(&LoggingConnection::new(conn))?; - #[cfg(not(test))] + #[cfg(not(debug_assertions))] embedded_migrations::run(&conn)?; Ok(()) } @@ -61,7 +61,7 @@ impl MysqlDbPool { settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { run_embedded_migrations(&settings.database_url)?; Self::new_without_migrations(settings, metrics, blocking_threadpool) } @@ -70,7 +70,7 @@ impl MysqlDbPool { settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { let manager = ConnectionManager::::new(settings.database_url.clone()); let builder = Pool::builder() .max_size(settings.database_pool_max_size) @@ -79,7 +79,7 @@ impl MysqlDbPool { )) .min_idle(settings.database_pool_min_idle); - #[cfg(test)] + #[cfg(debug_assertions)] let builder = if settings.database_use_test_transactions { builder.connection_customizer(Box::new(TestTransactionCustomizer)) } else { @@ -99,7 +99,7 @@ impl MysqlDbPool { }) } - pub fn get_sync(&self) -> Result { + pub fn get_sync(&self) -> DbResult { Ok(MysqlDb::new( self.pool.get()?, Arc::clone(&self.coll_cache), @@ -112,31 +112,25 @@ impl MysqlDbPool { #[async_trait] impl DbPool for MysqlDbPool { - async fn get<'a>(&'a self) -> Result>> { - let pool = self.clone(); - let db = self - .blocking_threadpool - .spawn(move || pool.get_sync()) - .await?; + type Error = DbError; - Ok(Box::new(db) as Box>) + async fn get<'a>(&'a self) -> DbResult>> { + let pool = self.clone(); + self.blocking_threadpool + .spawn(move || pool.get_sync()) + .await + .map(|db| Box::new(db) as Box>) } - fn validate_batch_id(&self, id: String) -> Result<()> { + fn validate_batch_id(&self, id: String) -> DbResult<()> { super::batch::validate_batch_id(&id) } - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } } -impl GetPoolState for MysqlDbPool { - fn state(&self) -> PoolState { - self.pool.state().into() - } -} - impl fmt::Debug for MysqlDbPool { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("MysqlDbPool") @@ -145,42 +139,48 @@ impl fmt::Debug for MysqlDbPool { } } +impl GetPoolState for MysqlDbPool { + fn state(&self) -> PoolState { + self.pool.state().into() + } +} + #[derive(Debug)] -pub struct CollectionCache { +pub(super) struct CollectionCache { pub by_name: RwLock>, pub by_id: RwLock>, } impl CollectionCache { - pub fn put(&self, id: i32, name: String) -> Result<()> { + pub fn put(&self, id: i32, name: String) -> DbResult<()> { // XXX: should this emit a metric? // XXX: should probably either lock both simultaneously during // writes or use an RwLock alternative self.by_name .write() - .map_err(|_| DbError::internal("by_name write"))? + .map_err(|_| DbError::internal("by_name write".to_owned()))? .insert(name.clone(), id); self.by_id .write() - .map_err(|_| DbError::internal("by_id write"))? + .map_err(|_| DbError::internal("by_id write".to_owned()))? .insert(id, name); Ok(()) } - pub fn get_id(&self, name: &str) -> Result> { + pub fn get_id(&self, name: &str) -> DbResult> { Ok(self .by_name .read() - .map_err(|_| DbError::internal("by_name read"))? + .map_err(|_| DbError::internal("by_name read".to_owned()))? .get(name) .cloned()) } - pub fn get_name(&self, id: i32) -> Result> { + pub fn get_name(&self, id: i32) -> DbResult> { Ok(self .by_id .read() - .map_err(|_| DbError::internal("by_id read"))? + .map_err(|_| DbError::internal("by_id read".to_owned()))? .get(&id) .cloned()) } diff --git a/syncserver/src/db/mysql/schema.rs b/syncstorage-mysql/src/schema.rs similarity index 100% rename from syncserver/src/db/mysql/schema.rs rename to syncstorage-mysql/src/schema.rs diff --git a/syncserver/src/db/mysql/test.rs b/syncstorage-mysql/src/test.rs similarity index 74% rename from syncserver/src/db/mysql/test.rs rename to syncstorage-mysql/src/test.rs index fdeaf1ad..55b98ea1 100644 --- a/syncserver/src/db/mysql/test.rs +++ b/syncstorage-mysql/src/test.rs @@ -1,49 +1,32 @@ -use std::{collections::HashMap, result::Result as StdResult, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use diesel::{ // expression_methods::TextExpressionMethods, // See note below about `not_like` becoming swedish - mysql::MysqlConnection, - r2d2::{CustomizeConnection, Error as PoolError}, - Connection, ExpressionMethods, QueryDsl, RunQueryDsl, }; +use syncserver_common::{BlockingThreadpool, Metrics}; use syncserver_settings::Settings as SyncserverSettings; use syncstorage_settings::Settings as SyncstorageSettings; use url::Url; -use crate::db::mysql::{ - models::{MysqlDb, Result}, - pool::MysqlDbPool, - schema::collections, -}; -use crate::db::BlockingThreadpool; -use crate::server::metrics; +use crate::{models::MysqlDb, pool::MysqlDbPool, schema::collections, DbResult}; -#[derive(Debug)] -pub struct TestTransactionCustomizer; - -impl CustomizeConnection for TestTransactionCustomizer { - fn on_acquire(&self, conn: &mut MysqlConnection) -> StdResult<(), PoolError> { - conn.begin_test_transaction().map_err(PoolError::QueryError) - } -} - -pub fn db(settings: &SyncstorageSettings) -> Result { +pub fn db(settings: &SyncstorageSettings) -> DbResult { let _ = env_logger::try_init(); // inherit SYNC_SYNCSTORAGE__DATABASE_URL from the env let pool = MysqlDbPool::new( settings, - &metrics::Metrics::noop(), + &Metrics::noop(), Arc::new(BlockingThreadpool::default()), )?; pool.get_sync() } #[test] -fn static_collection_id() -> Result<()> { +fn static_collection_id() -> DbResult<()> { let settings = SyncserverSettings::test_settings().syncstorage; if Url::parse(&settings.database_url).unwrap().scheme() != "mysql" { // Skip this test if we're not using mysql diff --git a/syncstorage-settings/Cargo.toml b/syncstorage-settings/Cargo.toml index bca756fa..7b789c5c 100644 --- a/syncstorage-settings/Cargo.toml +++ b/syncstorage-settings/Cargo.toml @@ -1,10 +1,13 @@ [package] name = "syncstorage-settings" -version = "0.13.7" -edition = "2021" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true [dependencies] -rand = "0.8" -serde = "1.0" +rand.workspace=true +serde.workspace=true + syncserver-common = { path = "../syncserver-common" } time = "^0.3" diff --git a/syncstorage-settings/src/lib.rs b/syncstorage-settings/src/lib.rs index 6f03ec53..3011e5c8 100644 --- a/syncstorage-settings/src/lib.rs +++ b/syncstorage-settings/src/lib.rs @@ -75,6 +75,7 @@ pub struct Settings { pub database_pool_connection_lifespan: Option, /// Max time a connection should sit idle before being dropped. pub database_pool_connection_max_idle: Option, + #[cfg(debug_assertions)] pub database_use_test_transactions: bool, /// Server-enforced limits for request payloads. @@ -105,6 +106,7 @@ impl Default for Settings { database_pool_connection_lifespan: None, database_pool_connection_max_idle: None, database_pool_connection_timeout: Some(30), + #[cfg(debug_assertions)] database_use_test_transactions: false, limits: ServerLimits::default(), statsd_label: "syncstorage".to_string(), diff --git a/syncstorage-spanner/Cargo.toml b/syncstorage-spanner/Cargo.toml new file mode 100644 index 00000000..d8538b2c --- /dev/null +++ b/syncstorage-spanner/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "syncstorage-spanner" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +[dependencies] +backtrace.workspace=true +cadence.workspace=true +env_logger.workspace=true +futures.workspace=true +http.workspace=true +slog-scope.workspace=true + +async-trait = "0.1.40" +# Pin to 0.5 for now, to keep it under tokio 0.2 (issue977). +# Fix for #803 (deadpool#92) points to our fork for now +#deadpool = "0.5" # pin to 0.5 +deadpool = { git = "https://github.com/mozilla-services/deadpool", branch = "deadpool-v0.5.2-issue92" } +google-cloud-rust-raw = "0.14.0" +# Some versions of OpenSSL 1.1.1 conflict with grpcio's built-in boringssl which can cause +# syncserver to either fail to either compile, or start. In those cases, try +# `cargo build --features grpcio/openssl ...` +grpcio = { version = "0.12.0" } +log = { version = "0.4", features = [ + "max_level_debug", + "release_max_level_info", +] } +protobuf = {version="2.25.2"} # must match what's used by googleapis-raw +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +syncstorage-db-common = { path = "../syncstorage-db-common" } +syncstorage-settings = { path = "../syncstorage-settings" } +thiserror = "1.0.26" +tokio = { version = "0.2.4", features = ["macros", "sync"] } # pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) +url = "2.1" +uuid = { version = "0.8.2", features = ["serde", "v4"] } + +[[bin]] +name = "purge_ttl" +path = "src/bin/purge_ttl.rs" diff --git a/syncserver/src/db/spanner/BATCH_COMMIT.txt b/syncstorage-spanner/src/BATCH_COMMIT.txt similarity index 100% rename from syncserver/src/db/spanner/BATCH_COMMIT.txt rename to syncstorage-spanner/src/BATCH_COMMIT.txt diff --git a/syncserver/src/db/spanner/batch.rs b/syncstorage-spanner/src/batch.rs similarity index 96% rename from syncserver/src/db/spanner/batch.rs rename to syncstorage-spanner/src/batch.rs index 74acfd02..bc69acdc 100644 --- a/syncserver/src/db/spanner/batch.rs +++ b/syncstorage-spanner/src/batch.rs @@ -8,24 +8,24 @@ use protobuf::{ well_known_types::{ListValue, Value}, RepeatedField, }; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::to_rfc3339, - UserIdentifier, BATCH_LIFETIME, DEFAULT_BSO_TTL, +use syncstorage_db_common::{ + params, results, util::to_rfc3339, UserIdentifier, BATCH_LIFETIME, DEFAULT_BSO_TTL, }; use uuid::Uuid; -use super::models::{Result, SpannerDb, PRETOUCH_TS}; +use crate::error::DbError; + +use super::models::{SpannerDb, PRETOUCH_TS}; use super::support::{as_type, null_value, struct_type_field, IntoSpannerValue}; +use super::DbResult; pub async fn create_async( db: &SpannerDb, params: params::CreateBatch, -) -> Result { +) -> DbResult { let batch_id = Uuid::new_v4().to_simple().to_string(); let collection_id = db.get_collection_id_async(¶ms.collection).await?; - let timestamp = db.timestamp()?.as_i64(); + let timestamp = db.checked_timestamp()?.as_i64(); // Ensure a parent record exists in user_collections before writing to batches // (INTERLEAVE IN PARENT user_collections) @@ -66,13 +66,13 @@ pub async fn create_async( Ok(new_batch) } -pub async fn validate_async(db: &SpannerDb, params: params::ValidateBatch) -> Result { +pub async fn validate_async(db: &SpannerDb, params: params::ValidateBatch) -> DbResult { let exists = get_async(db, params.into()).await?; Ok(exists.is_some()) } // Append a collection to a pending batch (`create_batch` creates a new batch) -pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Result<()> { +pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> DbResult<()> { let mut metrics = db.metrics.clone(); metrics.start_timer("storage.spanner.append_items_to_batch", None); let collection_id = db.get_collection_id_async(¶ms.collection).await?; @@ -98,7 +98,7 @@ pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Resu if !exists { // NOTE: db tests expects this but it doesn't seem necessary w/ the // handler validating the batch before appends - Err(DbErrorKind::BatchNotFound)? + return Err(DbError::batch_not_found()); } do_append_async( @@ -116,7 +116,7 @@ pub async fn append_async(db: &SpannerDb, params: params::AppendToBatch) -> Resu pub async fn get_async( db: &SpannerDb, params: params::GetBatch, -) -> Result> { +) -> DbResult> { let collection_id = db.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid.clone(), @@ -143,7 +143,7 @@ pub async fn get_async( Ok(batch) } -pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> Result<()> { +pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> DbResult<()> { let collection_id = db.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid.clone(), @@ -170,7 +170,7 @@ pub async fn delete_async(db: &SpannerDb, params: params::DeleteBatch) -> Result pub async fn commit_async( db: &SpannerDb, params: params::CommitBatch, -) -> Result { +) -> DbResult { let mut metrics = db.metrics.clone(); metrics.start_timer("storage.spanner.apply_batch", None); let collection_id = db.get_collection_id_async(¶ms.collection).await?; @@ -249,7 +249,7 @@ pub async fn do_append_async( batch: results::CreateBatch, bsos: Vec, collection: &str, -) -> Result<()> { +) -> DbResult<()> { // Pass an array of struct objects as @values (for UNNEST), e.g.: // [("", "", 101, "ba1", "bso1", NULL, "payload1", NULL), // ("", "", 101, "ba1", "bso2", NULL, "payload2", NULL)] @@ -528,7 +528,7 @@ async fn pretouch_collection_async( db: &SpannerDb, user_id: &UserIdentifier, collection_id: i32, -) -> Result<()> { +) -> DbResult<()> { let (mut sqlparams, mut sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid.clone(), "fxa_kid" => user_id.fxa_kid.clone(), @@ -569,8 +569,8 @@ async fn pretouch_collection_async( Ok(()) } -pub fn validate_batch_id(id: &str) -> Result<()> { +pub fn validate_batch_id(id: &str) -> DbResult<()> { Uuid::from_str(id) .map(|_| ()) - .map_err(|e| DbError::internal(&format!("Invalid batch_id: {}", e))) + .map_err(|e| DbError::internal(format!("Invalid batch_id: {}", e))) } diff --git a/syncserver/src/db/spanner/batch_commit_insert.sql b/syncstorage-spanner/src/batch_commit_insert.sql similarity index 100% rename from syncserver/src/db/spanner/batch_commit_insert.sql rename to syncstorage-spanner/src/batch_commit_insert.sql diff --git a/syncserver/src/db/spanner/batch_commit_update.sql b/syncstorage-spanner/src/batch_commit_update.sql similarity index 100% rename from syncserver/src/db/spanner/batch_commit_update.sql rename to syncstorage-spanner/src/batch_commit_update.sql diff --git a/syncserver/src/db/spanner/batch_index.sql b/syncstorage-spanner/src/batch_index.sql similarity index 100% rename from syncserver/src/db/spanner/batch_index.sql rename to syncstorage-spanner/src/batch_index.sql diff --git a/syncserver/src/bin/purge_ttl.rs b/syncstorage-spanner/src/bin/purge_ttl.rs similarity index 96% rename from syncserver/src/bin/purge_ttl.rs rename to syncstorage-spanner/src/bin/purge_ttl.rs index fa672dee..03a914c7 100644 --- a/syncserver/src/bin/purge_ttl.rs +++ b/syncstorage-spanner/src/bin/purge_ttl.rs @@ -29,7 +29,7 @@ const SLEEP_ENV_VAR: &str = "PURGE_TTL_RETRY_SLEEP_MILLIS"; // Default value = 0 use protobuf::well_known_types::Value; pub struct MetricTimer { - pub client: StatsdClient, + pub client: Arc, pub label: String, pub start: Instant, } @@ -48,7 +48,7 @@ impl Drop for MetricTimer { } } -pub fn start_timer(client: &StatsdClient, label: &str) -> MetricTimer { +pub fn start_timer(client: &Arc, label: &str) -> MetricTimer { trace!("⌚ Starting timer... {:?}", label); MetricTimer { start: Instant::now(), @@ -57,7 +57,7 @@ pub fn start_timer(client: &StatsdClient, label: &str) -> MetricTimer { } } -pub fn statsd_from_env() -> Result> { +pub fn statsd_from_env() -> Result, Box> { let statsd_host = env::var("STATSD_HOST").unwrap_or_else(|_| "127.0.0.1".to_string()); let statsd_port = match env::var("STATSD_PORT") { Ok(port) => port.parse::()?, @@ -71,11 +71,13 @@ pub fn statsd_from_env() -> Result> { let sink = QueuingMetricSink::from(udp_sink); let builder = StatsdClient::builder("syncstorage", sink); - Ok(builder - .with_error_handler(|err| { - warn!("Metric send error: {:?}", err); - }) - .build()) + Ok(Arc::new( + builder + .with_error_handler(|err| { + warn!("Metric send error: {:?}", err); + }) + .build(), + )) } pub enum RequestType { @@ -303,7 +305,8 @@ fn main() -> Result<(), Box> { let chan = ChannelBuilder::new(env) .max_send_message_len(100 << 20) .max_receive_message_len(100 << 20) - .secure_connect(SPANNER_ADDRESS, creds); + .set_credentials(creds) + .connect(SPANNER_ADDRESS); let client = SpannerClient::new(chan); // Create a session diff --git a/syncstorage-spanner/src/error.rs b/syncstorage-spanner/src/error.rs new file mode 100644 index 00000000..34a951de --- /dev/null +++ b/syncstorage-spanner/src/error.rs @@ -0,0 +1,151 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{from_error, impl_fmt_display, InternalError, ReportableError}; +use syncstorage_db_common::error::{DbErrorIntrospect, SyncstorageDbError}; +use thiserror::Error; + +/// An error type that represents any Spanner-related errors that may occur while processing a +/// syncstorage request. These errors may be application-specific or lower-level errors that arise +/// from the database backend. +#[derive(Debug)] +pub struct DbError { + kind: DbErrorKind, + pub status: StatusCode, + pub backtrace: Box, +} + +impl DbError { + pub fn batch_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::batch_not_found()).into() + } + + pub fn bso_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::bso_not_found()).into() + } + + pub fn collection_not_found() -> Self { + DbErrorKind::Common(SyncstorageDbError::collection_not_found()).into() + } + + pub fn conflict() -> Self { + DbErrorKind::Common(SyncstorageDbError::conflict()).into() + } + + pub fn expired() -> Self { + DbErrorKind::Expired.into() + } + + pub fn integrity(msg: String) -> Self { + DbErrorKind::Integrity(msg).into() + } + + pub fn internal(msg: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(msg)).into() + } + + pub fn quota() -> Self { + DbErrorKind::Common(SyncstorageDbError::quota()).into() + } + + pub fn too_large(msg: String) -> Self { + DbErrorKind::TooLarge(msg).into() + } +} + +#[derive(Debug, Error)] +enum DbErrorKind { + #[error("{}", _0)] + Common(SyncstorageDbError), + + #[error("Connection expired")] + Expired, + + #[error("A database error occurred: {}", _0)] + Grpc(#[from] grpcio::Error), + + #[error("Database integrity error: {}", _0)] + Integrity(String), + + #[error("Spanner data load too large: {}", _0)] + TooLarge(String), +} + +impl From for DbError { + fn from(kind: DbErrorKind) -> Self { + let status = match &kind { + DbErrorKind::Common(e) => e.status, + // Matching the Python code here (a 400 vs 404) + DbErrorKind::TooLarge(_) => StatusCode::BAD_REQUEST, + _ => StatusCode::INTERNAL_SERVER_ERROR, + }; + + Self { + kind, + status, + backtrace: Box::new(Backtrace::new()), + } + } +} + +impl DbErrorIntrospect for DbError { + fn is_batch_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_batch_not_found()) + } + + fn is_bso_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_bso_not_found()) + } + + fn is_collection_not_found(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_collection_not_found()) + } + + fn is_conflict(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_conflict()) + } + + fn is_quota(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_quota()) + } +} + +impl ReportableError for DbError { + fn is_sentry_event(&self) -> bool { + matches!(&self.kind, DbErrorKind::Common(e) if e.is_sentry_event()) + } + + fn metric_label(&self) -> Option { + if let DbErrorKind::Common(e) = &self.kind { + e.metric_label() + } else { + None + } + } + + fn error_backtrace(&self) -> String { + format!("{:#?}", self.backtrace) + } +} + +impl InternalError for DbError { + fn internal_error(message: String) -> Self { + DbErrorKind::Common(SyncstorageDbError::internal(message)).into() + } +} + +impl_fmt_display!(DbError, DbErrorKind); + +from_error!(grpcio::Error, DbError, |inner: grpcio::Error| { + // Convert ABORTED (typically due to a transaction abort) into 503s + match inner { + grpcio::Error::RpcFailure(ref status) | grpcio::Error::RpcFinished(Some(ref status)) + if status.code() == grpcio::RpcStatusCode::ABORTED => + { + DbErrorKind::Common(SyncstorageDbError::conflict()) + } + _ => DbErrorKind::Grpc(inner), + } +}); +from_error!(SyncstorageDbError, DbError, DbErrorKind::Common); diff --git a/syncserver/src/db/spanner/insert_standard_collections.sql b/syncstorage-spanner/src/insert_standard_collections.sql similarity index 100% rename from syncserver/src/db/spanner/insert_standard_collections.sql rename to syncstorage-spanner/src/insert_standard_collections.sql diff --git a/syncstorage-spanner/src/lib.rs b/syncstorage-spanner/src/lib.rs new file mode 100644 index 00000000..560c3f64 --- /dev/null +++ b/syncstorage-spanner/src/lib.rs @@ -0,0 +1,27 @@ +use std::time::SystemTime; + +#[macro_use] +extern crate slog_scope; + +#[macro_use] +mod macros; + +mod batch; +mod error; +mod manager; +mod models; +mod pool; +mod support; + +pub use error::DbError; +pub use models::SpannerDb; +pub use pool::SpannerDbPool; + +type DbResult = Result; + +fn now() -> i64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs() as i64 +} diff --git a/syncserver/src/db/spanner/macros.rs b/syncstorage-spanner/src/macros.rs similarity index 98% rename from syncserver/src/db/spanner/macros.rs rename to syncstorage-spanner/src/macros.rs index 651265a3..6785c4e6 100644 --- a/syncserver/src/db/spanner/macros.rs +++ b/syncstorage-spanner/src/macros.rs @@ -19,7 +19,7 @@ macro_rules! params { #[test] fn test_params_macro() { - use crate::db::spanner::support::IntoSpannerValue; + use super::support::IntoSpannerValue; use google_cloud_rust_raw::spanner::v1::type_pb::{Type, TypeCode}; use protobuf::{ well_known_types::{ListValue, Value}, diff --git a/syncserver/src/db/spanner/manager/bb8.rs b/syncstorage-spanner/src/manager/bb8.rs similarity index 96% rename from syncserver/src/db/spanner/manager/bb8.rs rename to syncstorage-spanner/src/manager/bb8.rs index 7ad6d7f8..cb551e21 100644 --- a/syncserver/src/db/spanner/manager/bb8.rs +++ b/syncstorage-spanner/src/manager/bb8.rs @@ -10,7 +10,7 @@ use crate::{ error::{DbError, DbErrorKind}, PoolState, }, - server::metrics::Metrics, + server::Metrics, settings::Settings, }; @@ -19,7 +19,7 @@ use super::session::{create_spanner_session, recycle_spanner_session, SpannerSes #[allow(dead_code)] pub type Conn<'a> = PooledConnection<'a, SpannerSessionManager>; -pub struct SpannerSessionManager { +pub(super) struct SpannerSessionManager { database_name: String, /// The gRPC environment env: Arc, diff --git a/syncserver/src/db/spanner/manager/deadpool.rs b/syncstorage-spanner/src/manager/deadpool.rs similarity index 88% rename from syncserver/src/db/spanner/manager/deadpool.rs rename to syncstorage-spanner/src/manager/deadpool.rs index 4433d655..ededa3fa 100644 --- a/syncserver/src/db/spanner/manager/deadpool.rs +++ b/syncstorage-spanner/src/manager/deadpool.rs @@ -3,17 +3,15 @@ use std::{fmt, sync::Arc}; use async_trait::async_trait; use deadpool::managed::{Manager, RecycleError, RecycleResult}; use grpcio::{EnvBuilder, Environment}; -use syncserver_db_common::error::{DbError, DbErrorKind}; +use syncserver_common::{BlockingThreadpool, Metrics}; use syncstorage_settings::Settings; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; - use super::session::{create_spanner_session, recycle_spanner_session, SpannerSession}; +use crate::error::DbError; -pub type Conn = deadpool::managed::Object; +pub(crate) type Conn = deadpool::managed::Object; -pub struct SpannerSessionManager { +pub(crate) struct SpannerSessionManager { database_name: String, /// The gRPC environment env: Arc, @@ -42,7 +40,9 @@ impl SpannerSessionManager { ) -> Result { let database_name = settings .spanner_database_name() - .ok_or_else(|| DbErrorKind::InvalidUrl(settings.database_url.to_owned()))? + .ok_or_else(|| { + DbError::internal(format!("invalid database url: {}", settings.database_url)) + })? .to_owned(); let env = Arc::new(EnvBuilder::new().build()); diff --git a/syncstorage-spanner/src/manager/mod.rs b/syncstorage-spanner/src/manager/mod.rs new file mode 100644 index 00000000..b1e20c53 --- /dev/null +++ b/syncstorage-spanner/src/manager/mod.rs @@ -0,0 +1,6 @@ +// mod bb8; +mod deadpool; +mod session; + +pub(super) use self::deadpool::{Conn, SpannerSessionManager}; +pub(super) use self::session::SpannerSession; diff --git a/syncserver/src/db/spanner/manager/session.rs b/syncstorage-spanner/src/manager/session.rs similarity index 93% rename from syncserver/src/db/spanner/manager/session.rs rename to syncstorage-spanner/src/manager/session.rs index b577c9de..d6902479 100644 --- a/syncserver/src/db/spanner/manager/session.rs +++ b/syncstorage-spanner/src/manager/session.rs @@ -1,13 +1,13 @@ +use std::sync::Arc; + use google_cloud_rust_raw::spanner::v1::{ spanner::{CreateSessionRequest, GetSessionRequest, Session}, spanner_grpc::SpannerClient, }; use grpcio::{CallOption, ChannelBuilder, ChannelCredentials, Environment, MetadataBuilder}; -use std::sync::Arc; -use syncserver_db_common::error::{DbError, DbErrorKind}; +use syncserver_common::{BlockingThreadpool, Metrics}; -use crate::db::{spanner::now, BlockingThreadpool}; -use crate::server::metrics::Metrics; +use crate::error::DbError; const SPANNER_ADDRESS: &str = "spanner.googleapis.com:443"; @@ -21,11 +21,11 @@ pub struct SpannerSession { pub session: Session, /// The underlying client (Connection/Channel) for interacting with spanner pub client: SpannerClient, - pub(in crate::db::spanner) use_test_transactions: bool, + pub(crate) use_test_transactions: bool, /// A second based UTC for SpannerSession creation. /// Session has a similar `create_time` value that is managed by protobuf, /// but some clock skew issues are possible. - pub(in crate::db::spanner) create_time: i64, + pub(crate) create_time: i64, /// Whether we are using the Spanner emulator pub using_spanner_emulator: bool, } @@ -58,7 +58,8 @@ pub async fn create_spanner_session( Ok(ChannelBuilder::new(env) .max_send_message_len(100 << 20) .max_receive_message_len(100 << 20) - .secure_connect(SPANNER_ADDRESS, creds)) + .set_credentials(creds) + .connect(SPANNER_ADDRESS)) } }) .await?; @@ -71,7 +72,7 @@ pub async fn create_spanner_session( session, client, use_test_transactions, - create_time: now(), + create_time: crate::now(), using_spanner_emulator, }) } @@ -84,7 +85,7 @@ pub async fn recycle_spanner_session( max_lifetime: Option, max_idle: Option, ) -> Result<(), DbError> { - let now = now(); + let now = crate::now(); let mut req = GetSessionRequest::new(); req.set_name(conn.session.get_name().to_owned()); /* @@ -128,7 +129,7 @@ pub async fn recycle_spanner_session( if age > max_life as i64 { metrics.incr("db.connection.max_life"); dbg!("### aging out", this_session.get_name()); - return Err(DbErrorKind::Expired.into()); + return Err(DbError::expired()); } } // check how long that this has been idle... @@ -145,7 +146,7 @@ pub async fn recycle_spanner_session( if idle > max_idle as i64 { metrics.incr("db.connection.max_idle"); dbg!("### idling out", this_session.get_name()); - return Err(DbErrorKind::Expired.into()); + return Err(DbError::expired()); } // and update the connection's reference session info conn.session = this_session; diff --git a/syncserver/src/db/spanner/models.rs b/syncstorage-spanner/src/models.rs similarity index 87% rename from syncserver/src/db/spanner/models.rs rename to syncstorage-spanner/src/models.rs index 50eef358..fd7c73a8 100644 --- a/syncserver/src/db/spanner/models.rs +++ b/syncstorage-spanner/src/models.rs @@ -22,37 +22,33 @@ use protobuf::{ well_known_types::{ListValue, Value}, Message, RepeatedField, }; -use syncserver_common::MAX_SPANNER_LOAD_SIZE; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::SyncTimestamp, - Db, DbFuture, Sorting, UserIdentifier, DEFAULT_BSO_TTL, FIRST_CUSTOM_COLLECTION_ID, +use syncserver_common::{Metrics, MAX_SPANNER_LOAD_SIZE}; +use syncserver_db_common::DbFuture; +use syncstorage_db_common::{ + error::DbErrorIntrospect, params, results, util::SyncTimestamp, Db, Sorting, UserIdentifier, + DEFAULT_BSO_TTL, FIRST_CUSTOM_COLLECTION_ID, }; use syncstorage_settings::Quota; -use crate::{db::spanner::now, server::metrics::Metrics}; - use super::{ batch, + error::DbError, pool::{CollectionCache, Conn}, support::{ as_type, bso_from_row, bso_to_insert_row, bso_to_update_row, ExecuteSqlRequestBuilder, IntoSpannerValue, StreamedResultSetAsync, }, + DbResult, }; #[derive(Debug, Eq, PartialEq)] -pub enum CollectionLock { +enum CollectionLock { Read, Write, } -pub type Result = std::result::Result; - -pub const TOMBSTONE: i32 = 0; - -pub const PRETOUCH_TS: &str = "0001-01-01T00:00:00.00Z"; +const TOMBSTONE: i32 = 0; +pub(super) const PRETOUCH_TS: &str = "0001-01-01T00:00:00.00Z"; /// Per session Db metadata #[derive(Debug, Default)] @@ -106,7 +102,7 @@ impl Deref for SpannerDb { } impl SpannerDb { - pub fn new( + pub(super) fn new( conn: Conn, coll_cache: Arc, metrics: &Metrics, @@ -128,7 +124,7 @@ impl SpannerDb { self.coll_cache.get_name(id).await } - pub(super) async fn get_collection_id_async(&self, name: &str) -> Result { + pub(super) async fn get_collection_id_async(&self, name: &str) -> DbResult { if let Some(id) = self.coll_cache.get_id(name).await { return Ok(id); } @@ -144,23 +140,25 @@ impl SpannerDb { .execute_async(&self.conn)? .one_or_none() .await? - .ok_or(DbErrorKind::CollectionNotFound)?; + .ok_or_else(DbError::collection_not_found)?; let id = result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; if !self.in_write_transaction() { self.coll_cache.put(id, name.to_owned()).await; } Ok(id) } - pub(super) async fn create_collection_async(&self, name: &str) -> Result { + pub(super) async fn create_collection_async(&self, name: &str) -> DbResult { // This should always run within a r/w transaction, so that: "If a // transaction successfully commits, then no other writer modified the // data that was read in the transaction after it was read." if !cfg!(test) && !self.in_write_transaction() { - Err(DbError::internal("Can't escalate read-lock to write-lock"))? + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); } let result = self .sql( @@ -173,7 +171,7 @@ impl SpannerDb { let max = result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let id = FIRST_CUSTOM_COLLECTION_ID.max(max + 1); let (sqlparams, sqlparam_types) = params! { "name" => name.to_string(), @@ -191,14 +189,14 @@ impl SpannerDb { Ok(id) } - async fn get_or_create_collection_id_async(&self, name: &str) -> Result { + async fn get_or_create_collection_id_async(&self, name: &str) -> DbResult { match self.get_collection_id_async(name).await { Err(err) if err.is_collection_not_found() => self.create_collection_async(name).await, result => result, } } - pub async fn lock_for_read_async(&self, params: params::LockCollection) -> Result<()> { + async fn lock_for_read_async(&self, params: params::LockCollection) -> DbResult<()> { // Begin a transaction self.begin_async(false).await?; @@ -235,7 +233,7 @@ impl SpannerDb { Ok(()) } - pub async fn lock_for_write_async(&self, params: params::LockCollection) -> Result<()> { + async fn lock_for_write_async(&self, params: params::LockCollection) -> DbResult<()> { // Begin a transaction self.begin_async(true).await?; let collection_id = self @@ -248,7 +246,9 @@ impl SpannerDb { .coll_locks .get(&(params.user_id.clone(), collection_id)) { - Err(DbError::internal("Can't escalate read-lock to write-lock"))? + return Err(DbError::internal( + "Can't escalate read-lock to write-lock".to_owned(), + )); } let (sqlparams, mut sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid.clone(), @@ -274,12 +274,12 @@ impl SpannerDb { .await?; let timestamp = if let Some(result) = result { - let modified = SyncTimestamp::from_rfc3339(result[1].get_string_value())?; - let now = SyncTimestamp::from_rfc3339(result[0].get_string_value())?; + let modified = sync_timestamp_from_rfc3339(result[1].get_string_value())?; + let now = sync_timestamp_from_rfc3339(result[0].get_string_value())?; // Forbid the write if it would not properly incr the modified // timestamp if modified >= now { - Err(DbErrorKind::Conflict)? + return Err(DbError::conflict()); } self.session .borrow_mut() @@ -292,7 +292,7 @@ impl SpannerDb { .execute_async(&self.conn)? .one() .await?; - SyncTimestamp::from_rfc3339(result[0].get_string_value())? + sync_timestamp_from_rfc3339(result[0].get_string_value())? }; self.set_timestamp(timestamp); @@ -308,7 +308,7 @@ impl SpannerDb { self.session.borrow_mut().timestamp = Some(timestamp); } - pub(super) fn begin(&self, for_write: bool) -> Result<()> { + pub(super) fn begin(&self, for_write: bool) -> DbResult<()> { let spanner = &self.conn; let mut options = TransactionOptions::new(); if for_write { @@ -328,7 +328,7 @@ impl SpannerDb { Ok(()) } - pub(super) async fn begin_async(&self, for_write: bool) -> Result<()> { + pub(super) async fn begin_async(&self, for_write: bool) -> DbResult<()> { let spanner = &self.conn; let mut options = TransactionOptions::new(); if for_write { @@ -349,7 +349,7 @@ impl SpannerDb { } /// Return the current transaction metadata (TransactionSelector) if one is active. - fn get_transaction(&self) -> Result> { + fn get_transaction(&self) -> DbResult> { if self.session.borrow().transaction.is_none() { self.begin(true)?; } @@ -358,7 +358,7 @@ impl SpannerDb { } /// Return the current transaction metadata (TransactionSelector) if one is active. - async fn get_transaction_async(&self) -> Result> { + async fn get_transaction_async(&self) -> DbResult> { if self.session.borrow().transaction.is_none() { self.begin_async(true).await?; } @@ -366,7 +366,7 @@ impl SpannerDb { Ok(self.session.borrow().transaction.clone()) } - fn sql_request(&self, sql: &str) -> Result { + fn sql_request(&self, sql: &str) -> DbResult { let mut sqlr = ExecuteSqlRequest::new(); sqlr.set_sql(sql.to_owned()); if let Some(transaction) = self.get_transaction()? { @@ -375,16 +375,17 @@ impl SpannerDb { sqlr.seqno = session .execute_sql_count .try_into() - .map_err(|_| DbError::internal("seqno overflow"))?; + .map_err(|_| DbError::internal("seqno overflow".to_owned()))?; session.execute_sql_count += 1; } Ok(sqlr) } - pub(super) fn sql(&self, sql: &str) -> Result { + pub(super) fn sql(&self, sql: &str) -> DbResult { Ok(ExecuteSqlRequestBuilder::new(self.sql_request(sql)?)) } + #[allow(unused)] pub(super) fn insert(&self, table: &str, columns: &[&str], values: Vec) { let mut mutation = Mutation::new(); mutation.set_insert(self.mutation_write(table, columns, values)); @@ -395,6 +396,7 @@ impl SpannerDb { .push(mutation); } + #[allow(unused)] pub(super) fn update(&self, table: &str, columns: &[&str], values: Vec) { let mut mutation = Mutation::new(); mutation.set_update(self.mutation_write(table, columns, values)); @@ -435,7 +437,7 @@ impl SpannerDb { self.session.borrow().in_write_transaction } - pub fn commit(&self) -> Result<()> { + pub fn commit_sync(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -458,11 +460,11 @@ impl SpannerDb { spanner.client.commit(&req)?; Ok(()) } else { - Err(DbError::internal("No transaction to commit"))? + Err(DbError::internal("No transaction to commit".to_owned())) } } - pub async fn commit_async(&self) -> Result<()> { + async fn commit_async(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -485,11 +487,11 @@ impl SpannerDb { spanner.client.commit_async(&req)?.await?; Ok(()) } else { - Err(DbError::internal("No transaction to commit"))? + Err(DbError::internal("No transaction to commit".to_owned())) } } - pub fn rollback(&self) -> Result<()> { + pub fn rollback_sync(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -503,11 +505,11 @@ impl SpannerDb { spanner.client.rollback(&req)?; Ok(()) } else { - Err(DbError::internal("No transaction to rollback"))? + Err(DbError::internal("No transaction to rollback".to_owned())) } } - pub async fn rollback_async(&self) -> Result<()> { + async fn rollback_async(&self) -> DbResult<()> { if !self.in_write_transaction() { // read-only return Ok(()); @@ -521,14 +523,14 @@ impl SpannerDb { spanner.client.rollback_async(&req)?.await?; Ok(()) } else { - Err(DbError::internal("No transaction to rollback"))? + Err(DbError::internal("No transaction to rollback".to_owned())) } } - pub async fn get_collection_timestamp_async( + async fn get_collection_timestamp_async( &self, params: params::GetCollectionTimestamp, - ) -> Result { + ) -> DbResult { let collection_id = self.get_collection_id_async(¶ms.collection).await?; if let Some(modified) = self .session @@ -560,15 +562,15 @@ impl SpannerDb { .execute_async(&self.conn)? .one_or_none() .await? - .ok_or(DbErrorKind::CollectionNotFound)?; - let modified = SyncTimestamp::from_rfc3339(result[0].get_string_value())?; + .ok_or_else(DbError::collection_not_found)?; + let modified = sync_timestamp_from_rfc3339(result[0].get_string_value())?; Ok(modified) } - pub async fn get_collection_timestamps_async( + async fn get_collection_timestamps_async( &self, user_id: params::GetCollectionTimestamps, - ) -> Result { + ) -> DbResult { let (sqlparams, mut sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid, @@ -594,14 +596,17 @@ impl SpannerDb { let collection_id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; - let modified = SyncTimestamp::from_rfc3339(row[1].get_string_value())?; + .map_err(|e| DbError::integrity(e.to_string()))?; + let modified = sync_timestamp_from_rfc3339(row[1].get_string_value())?; results.insert(collection_id, modified); } self.map_collection_names(results).await } - async fn map_collection_names(&self, by_id: HashMap) -> Result> { + async fn map_collection_names( + &self, + by_id: HashMap, + ) -> DbResult> { let mut names = self.load_collection_names(by_id.keys()).await?; by_id .into_iter() @@ -609,7 +614,7 @@ impl SpannerDb { names .remove(&id) .map(|name| (name, value)) - .ok_or_else(|| DbError::internal("load_collection_names get")) + .ok_or_else(|| DbError::internal("load_collection_names get".to_owned())) }) .collect() } @@ -617,7 +622,7 @@ impl SpannerDb { async fn load_collection_names( &self, collection_ids: impl Iterator, - ) -> Result> { + ) -> DbResult> { let (mut names, uncached) = self .coll_cache .get_names(&collection_ids.cloned().collect::>()) @@ -646,7 +651,7 @@ impl SpannerDb { let id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let name = row[1].take_string_value(); names.insert(id, name.clone()); if !self.in_write_transaction() { @@ -658,10 +663,10 @@ impl SpannerDb { Ok(names) } - pub async fn get_collection_counts_async( + async fn get_collection_counts_async( &self, user_id: params::GetCollectionCounts, - ) -> Result { + ) -> DbResult { let (sqlparams, sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid, @@ -684,20 +689,20 @@ impl SpannerDb { let collection_id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let count = row[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; counts.insert(collection_id, count); } self.map_collection_names(counts).await } - pub async fn get_collection_usage_async( + async fn get_collection_usage_async( &self, user_id: params::GetCollectionUsage, - ) -> Result { + ) -> DbResult { let (sqlparams, sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid @@ -720,20 +725,20 @@ impl SpannerDb { let collection_id = row[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; let usage = row[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; usages.insert(collection_id, usage); } self.map_collection_names(usages).await } - pub async fn get_storage_timestamp( + async fn get_storage_timestamp( &self, user_id: params::GetStorageTimestamp, - ) -> Result { + ) -> DbResult { let (sqlparams, mut sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid, @@ -754,16 +759,17 @@ impl SpannerDb { .one() .await?; if row[0].has_null_value() { - SyncTimestamp::from_i64(0) + SyncTimestamp::from_i64(0).map_err(|e| DbError::integrity(e.to_string())) } else { - SyncTimestamp::from_rfc3339(row[0].get_string_value()) + sync_timestamp_from_rfc3339(row[0].get_string_value()) } + .map_err(Into::into) } - pub async fn get_storage_usage_async( + async fn get_storage_usage_async( &self, user_id: params::GetStorageUsage, - ) -> Result { + ) -> DbResult { let (sqlparams, sqlparam_types) = params! { "fxa_uid" => user_id.fxa_uid, "fxa_kid" => user_id.fxa_kid @@ -786,17 +792,17 @@ impl SpannerDb { let usage = result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; Ok(usage as u64) } else { Ok(0) } } - pub async fn get_quota_usage_async( + async fn get_quota_usage_async( &self, params: params::GetQuotaUsage, - ) -> Result { + ) -> DbResult { if !self.quota.enabled { return Ok(results::GetQuotaUsage::default()); } @@ -822,31 +828,31 @@ impl SpannerDb { result[0] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))? + .map_err(|e| DbError::integrity(e.to_string()))? } else { 0 }; let count = result[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?; + .map_err(|e| DbError::integrity(e.to_string()))?; Ok(results::GetQuotaUsage { total_bytes, count }) } else { Ok(results::GetQuotaUsage::default()) } } - pub async fn update_user_collection_quotas( + pub(super) async fn update_user_collection_quotas( &self, user: &UserIdentifier, collection_id: i32, - ) -> Result { + ) -> DbResult { // This will also update the counts in user_collections, since `update_collection_sync` // is called very early to ensure the record exists, and return the timestamp. // This will also write the tombstone if there are no records and we're explicitly // specifying a TOMBSTONE collection_id. // This function should be called after any write operation. - let timestamp = self.timestamp()?; + let timestamp = self.checked_timestamp()?; let (mut sqlparams, mut sqltypes) = params! { "fxa_uid" => user.fxa_uid.clone(), "fxa_kid" => user.fxa_kid.clone(), @@ -903,12 +909,9 @@ impl SpannerDb { ); sqltypes.insert( "total_bytes".to_owned(), - crate::db::spanner::support::as_type(TypeCode::INT64), - ); - sqltypes.insert( - "count".to_owned(), - crate::db::spanner::support::as_type(TypeCode::INT64), + super::support::as_type(TypeCode::INT64), ); + sqltypes.insert("count".to_owned(), super::support::as_type(TypeCode::INT64)); "UPDATE user_collections SET modified = @modified, count = @count, @@ -965,13 +968,13 @@ impl SpannerDb { Ok(timestamp) } - async fn erect_tombstone(&self, user_id: &UserIdentifier) -> Result { + async fn erect_tombstone(&self, user_id: &UserIdentifier) -> DbResult { // Delete the old tombstone (if it exists) let (params, mut param_types) = params! { "fxa_uid" => user_id.fxa_uid.clone(), "fxa_kid" => user_id.fxa_kid.clone(), "collection_id" => TOMBSTONE, - "modified" => self.timestamp()?.as_rfc3339()? + "modified" => self.checked_timestamp()?.as_rfc3339()? }; param_types.insert("modified".to_owned(), as_type(TypeCode::TIMESTAMP)); self.sql( @@ -988,10 +991,10 @@ impl SpannerDb { .await?; // Return timestamp, because sometimes there's a delay between writing and // reading the database. - self.timestamp() + self.checked_timestamp() } - pub async fn delete_storage_async(&self, user_id: params::DeleteStorage) -> Result<()> { + async fn delete_storage_async(&self, user_id: params::DeleteStorage) -> DbResult<()> { // Also deletes child bsos/batch rows (INTERLEAVE IN PARENT // user_collections ON DELETE CASCADE) let (sqlparams, sqlparam_types) = params! { @@ -1010,17 +1013,17 @@ impl SpannerDb { Ok(()) } - pub fn timestamp(&self) -> Result { + pub fn checked_timestamp(&self) -> DbResult { self.session .borrow() .timestamp - .ok_or_else(|| DbError::internal("CURRENT_TIMESTAMP() not read yet")) + .ok_or_else(|| DbError::internal("CURRENT_TIMESTAMP() not read yet".to_owned())) } - pub async fn delete_collection_async( + async fn delete_collection_async( &self, params: params::DeleteCollection, - ) -> Result { + ) -> DbResult { // Also deletes child bsos/batch rows (INTERLEAVE IN PARENT // user_collections ON DELETE CASCADE) let collection_id = self.get_collection_id_async(¶ms.collection).await?; @@ -1059,7 +1062,7 @@ impl SpannerDb { user_id: &UserIdentifier, collection_id: i32, collection: &str, - ) -> Result { + ) -> DbResult { // NOTE: Spanner supports upserts via its InsertOrUpdate mutation but // lacks a SQL equivalent. This call could be 1 InsertOrUpdate instead // of 2 queries but would require put/post_bsos to also use mutations. @@ -1069,7 +1072,7 @@ impl SpannerDb { // Mutations don't run in the same order as ExecuteSql calls, they are // buffered on the client side and only issued to Spanner in the final // transaction Commit. - let timestamp = self.timestamp()?; + let timestamp = self.checked_timestamp()?; if !cfg!(test) && self.session.borrow().updated_collection { // No need to touch it again (except during tests where we // currently reuse Dbs for multiple requests) @@ -1130,7 +1133,7 @@ impl SpannerDb { Ok(timestamp) } - pub async fn delete_bso_async(&self, params: params::DeleteBso) -> Result { + async fn delete_bso_async(&self, params: params::DeleteBso) -> DbResult { let collection_id = self.get_collection_id_async(¶ms.collection).await?; let user_id = params.user_id.clone(); let (sqlparams, sqlparam_types) = params! { @@ -1152,7 +1155,7 @@ impl SpannerDb { .execute_dml_async(&self.conn) .await?; if affected_rows == 0 { - Err(DbErrorKind::BsoNotFound)? + Err(DbError::bso_not_found()) } else { self.metrics.incr("storage.spanner.delete_bso"); Ok(self @@ -1161,10 +1164,7 @@ impl SpannerDb { } } - pub async fn delete_bsos_async( - &self, - params: params::DeleteBsos, - ) -> Result { + async fn delete_bsos_async(&self, params: params::DeleteBsos) -> DbResult { let user_id = params.user_id.clone(); let collection_id = self.get_collection_id_async(¶ms.collection).await?; @@ -1197,7 +1197,7 @@ impl SpannerDb { &self, query_str: &str, params: params::GetBsos, - ) -> Result { + ) -> DbResult { let mut query = query_str.to_owned(); let (mut sqlparams, mut sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid, @@ -1353,7 +1353,7 @@ impl SpannerDb { */ } - pub async fn get_bsos_async(&self, params: params::GetBsos) -> Result { + async fn get_bsos_async(&self, params: params::GetBsos) -> DbResult { let query = "\ SELECT bso_id, sortindex, payload, modified, expiry FROM bsos @@ -1393,7 +1393,7 @@ impl SpannerDb { }) } - pub async fn get_bso_ids_async(&self, params: params::GetBsos) -> Result { + async fn get_bso_ids_async(&self, params: params::GetBsos) -> DbResult { let limit = params.limit.map(i64::from).unwrap_or(-1); let params::Offset { offset, timestamp } = params.offset.clone().unwrap_or_default(); let sort = params.sort; @@ -1412,7 +1412,7 @@ impl SpannerDb { while let Some(row) = stream.next_async().await { let mut row = row?; ids.push(row[0].take_string_value()); - modifieds.push(SyncTimestamp::from_rfc3339(row[1].get_string_value())?.as_i64()); + modifieds.push(sync_timestamp_from_rfc3339(row[1].get_string_value())?.as_i64()); } // NOTE: when bsos.len() == 0, server-syncstorage (the Python impl) // makes an additional call to get_collection_timestamp to potentially @@ -1435,7 +1435,7 @@ impl SpannerDb { }) } - pub async fn get_bso_async(&self, params: params::GetBso) -> Result> { + async fn get_bso_async(&self, params: params::GetBso) -> DbResult> { let collection_id = self.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid, @@ -1461,10 +1461,10 @@ impl SpannerDb { .transpose() } - pub async fn get_bso_timestamp_async( + async fn get_bso_timestamp_async( &self, params: params::GetBsoTimestamp, - ) -> Result { + ) -> DbResult { let collection_id = self.get_collection_id_async(¶ms.collection).await?; let (sqlparams, sqlparam_types) = params! { "fxa_uid" => params.user_id.fxa_uid, @@ -1489,13 +1489,15 @@ impl SpannerDb { .one_or_none() .await?; if let Some(result) = result { - SyncTimestamp::from_rfc3339(result[0].get_string_value()) + sync_timestamp_from_rfc3339(result[0].get_string_value()) } else { - SyncTimestamp::from_i64(0) + SyncTimestamp::from_i64(0).map_err(|e| DbError::integrity(e.to_string())) } + .map_err(Into::into) } - pub async fn put_bso_async(&self, params: params::PutBso) -> Result { + #[allow(unused)] + async fn put_bso_async(&self, params: params::PutBso) -> DbResult { let bsos = vec![params::PostCollectionBso { id: params.id, sortindex: params.sortindex, @@ -1515,7 +1517,7 @@ impl SpannerDb { Ok(result.modified) } - pub async fn post_bsos_async(&self, params: params::PostBsos) -> Result { + async fn post_bsos_async(&self, params: params::PostBsos) -> DbResult { let user_id = params.user_id; let collection_id = self .get_or_create_collection_id_async(¶ms.collection) @@ -1581,11 +1583,10 @@ impl SpannerDb { "⚠️Attempted to load too much data into Spanner: {:?} bytes", load_size ); - return Err(DbErrorKind::SpannerTooLarge(format!( + return Err(DbError::too_large(format!( "Committed data too large: {}", load_size - )) - .into()); + ))); } if !inserts.is_empty() { @@ -1621,7 +1622,7 @@ impl SpannerDb { Ok(result) } - async fn check_async(&self) -> Result { + async fn check_async(&self) -> DbResult { // TODO: is there a better check than just fetching UTC? self.sql("SELECT CURRENT_TIMESTAMP()")? .execute_async(&self.conn)? @@ -1635,15 +1636,15 @@ impl SpannerDb { let mut tags = HashMap::default(); tags.insert("collection".to_owned(), collection.to_owned()); self.metrics.incr_with_tags("storage.quota.at_limit", tags); - DbErrorKind::Quota.into() + DbError::quota() } - pub async fn check_quota( + pub(super) async fn check_quota( &self, user_id: &UserIdentifier, collection: &str, collection_id: i32, - ) -> Result> { + ) -> DbResult> { // duplicate quota trap in test func below. if !self.quota.enabled { return Ok(None); @@ -1662,13 +1663,14 @@ impl SpannerDb { warn!("Quota at limit for user's collection: ({} bytes)", usage.total_bytes; "collection"=>collection); } } - Ok(Some(usage.total_bytes as usize)) + Ok(Some(usage.total_bytes)) } // NOTE: Currently this put_bso_async_test impl. is only used during db tests, // see above for the non-tests version - pub async fn put_bso_async_test(&self, bso: params::PutBso) -> Result { - use syncserver_db_common::util::to_rfc3339; + #[allow(unused)] + async fn put_bso_async_test(&self, bso: params::PutBso) -> DbResult { + use syncstorage_db_common::util::to_rfc3339; let collection_id = self .get_or_create_collection_id_async(&bso.collection) .await?; @@ -1685,7 +1687,7 @@ impl SpannerDb { // prewarm the collections table by ensuring that the row is added if not present. self.update_collection_async(&bso.user_id, collection_id, &bso.collection) .await?; - let timestamp = self.timestamp()?; + let timestamp = self.checked_timestamp()?; let result = self .sql( @@ -1841,12 +1843,13 @@ impl SpannerDb { // NOTE: Currently this post_bso_async_test impl. is only used during db tests, // see above for the non-tests version - pub async fn post_bsos_async_test(&self, input: params::PostBsos) -> Result { + #[allow(unused)] + async fn post_bsos_async_test(&self, input: params::PostBsos) -> DbResult { let collection_id = self .get_or_create_collection_id_async(&input.collection) .await?; let mut result = results::PostBsos { - modified: self.timestamp()?, + modified: self.checked_timestamp()?, success: Default::default(), failed: input.failed, }; @@ -1870,28 +1873,30 @@ impl SpannerDb { } } -impl<'a> Db<'a> for SpannerDb { - fn commit(&self) -> DbFuture<'_, ()> { +impl Db for SpannerDb { + type Error = DbError; + + fn commit(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.commit_async().map_err(Into::into).await }) } - fn rollback(&self) -> DbFuture<'_, ()> { + fn rollback(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.rollback_async().map_err(Into::into).await }) } - fn lock_for_read(&self, param: params::LockCollection) -> DbFuture<'_, ()> { + fn lock_for_read(&self, param: params::LockCollection) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.lock_for_read_async(param).map_err(Into::into).await }) } - fn lock_for_write(&self, param: params::LockCollection) -> DbFuture<'_, ()> { + fn lock_for_write(&self, param: params::LockCollection) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.lock_for_write_async(param).map_err(Into::into).await }) } - fn begin(&self, for_write: bool) -> DbFuture<'_, ()> { + fn begin(&self, for_write: bool) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.begin_async(for_write).map_err(Into::into).await }) } @@ -1899,7 +1904,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_timestamp( &self, param: params::GetCollectionTimestamp, - ) -> DbFuture<'_, results::GetCollectionTimestamp> { + ) -> DbFuture<'_, results::GetCollectionTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_timestamp_async(param) @@ -1911,7 +1916,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_storage_timestamp( &self, param: params::GetStorageTimestamp, - ) -> DbFuture<'_, results::GetStorageTimestamp> { + ) -> DbFuture<'_, results::GetStorageTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_storage_timestamp(param).map_err(Into::into).await }) } @@ -1919,16 +1924,12 @@ impl<'a> Db<'a> for SpannerDb { fn delete_collection( &self, param: params::DeleteCollection, - ) -> DbFuture<'_, results::DeleteCollection> { + ) -> DbFuture<'_, results::DeleteCollection, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_collection_async(param).map_err(Into::into).await }) } - fn box_clone(&self) -> Box> { - Box::new(self.clone()) - } - - fn check(&self) -> DbFuture<'_, results::Check> { + fn check(&self) -> DbFuture<'_, results::Check, Self::Error> { let db = self.clone(); Box::pin(async move { db.check_async().map_err(Into::into).await }) } @@ -1936,7 +1937,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_timestamps( &self, user_id: params::GetCollectionTimestamps, - ) -> DbFuture<'_, results::GetCollectionTimestamps> { + ) -> DbFuture<'_, results::GetCollectionTimestamps, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_timestamps_async(user_id) @@ -1948,7 +1949,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_counts( &self, user_id: params::GetCollectionCounts, - ) -> DbFuture<'_, results::GetCollectionCounts> { + ) -> DbFuture<'_, results::GetCollectionCounts, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_counts_async(user_id) @@ -1960,7 +1961,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_collection_usage( &self, user_id: params::GetCollectionUsage, - ) -> DbFuture<'_, results::GetCollectionUsage> { + ) -> DbFuture<'_, results::GetCollectionUsage, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_usage_async(user_id) @@ -1972,7 +1973,7 @@ impl<'a> Db<'a> for SpannerDb { fn get_storage_usage( &self, param: params::GetStorageUsage, - ) -> DbFuture<'_, results::GetStorageUsage> { + ) -> DbFuture<'_, results::GetStorageUsage, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_storage_usage_async(param).map_err(Into::into).await }) } @@ -1980,37 +1981,49 @@ impl<'a> Db<'a> for SpannerDb { fn get_quota_usage( &self, param: params::GetQuotaUsage, - ) -> DbFuture<'_, results::GetQuotaUsage> { + ) -> DbFuture<'_, results::GetQuotaUsage, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_quota_usage_async(param).map_err(Into::into).await }) } - fn delete_storage(&self, param: params::DeleteStorage) -> DbFuture<'_, results::DeleteStorage> { + fn delete_storage( + &self, + param: params::DeleteStorage, + ) -> DbFuture<'_, results::DeleteStorage, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_storage_async(param).map_err(Into::into).await }) } - fn delete_bso(&self, param: params::DeleteBso) -> DbFuture<'_, results::DeleteBso> { + fn delete_bso( + &self, + param: params::DeleteBso, + ) -> DbFuture<'_, results::DeleteBso, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_bso_async(param).map_err(Into::into).await }) } - fn delete_bsos(&self, param: params::DeleteBsos) -> DbFuture<'_, results::DeleteBsos> { + fn delete_bsos( + &self, + param: params::DeleteBsos, + ) -> DbFuture<'_, results::DeleteBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.delete_bsos_async(param).map_err(Into::into).await }) } - fn get_bsos(&self, param: params::GetBsos) -> DbFuture<'_, results::GetBsos> { + fn get_bsos(&self, param: params::GetBsos) -> DbFuture<'_, results::GetBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bsos_async(param).map_err(Into::into).await }) } - fn get_bso_ids(&self, param: params::GetBsoIds) -> DbFuture<'_, results::GetBsoIds> { + fn get_bso_ids( + &self, + param: params::GetBsoIds, + ) -> DbFuture<'_, results::GetBsoIds, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bso_ids_async(param).map_err(Into::into).await }) } - fn get_bso(&self, param: params::GetBso) -> DbFuture<'_, Option> { + fn get_bso(&self, param: params::GetBso) -> DbFuture<'_, Option, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bso_async(param).map_err(Into::into).await }) } @@ -2018,41 +2031,47 @@ impl<'a> Db<'a> for SpannerDb { fn get_bso_timestamp( &self, param: params::GetBsoTimestamp, - ) -> DbFuture<'_, results::GetBsoTimestamp> { + ) -> DbFuture<'_, results::GetBsoTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_bso_timestamp_async(param).map_err(Into::into).await }) } #[cfg(not(test))] - fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso> { + fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso, Self::Error> { let db = self.clone(); Box::pin(async move { db.put_bso_async(param).map_err(Into::into).await }) } #[cfg(test)] - fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso> { + fn put_bso(&self, param: params::PutBso) -> DbFuture<'_, results::PutBso, Self::Error> { let db = self.clone(); Box::pin(async move { db.put_bso_async_test(param).map_err(Into::into).await }) } #[cfg(not(test))] - fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos> { + fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.post_bsos_async(param).map_err(Into::into).await }) } #[cfg(test)] - fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos> { + fn post_bsos(&self, param: params::PostBsos) -> DbFuture<'_, results::PostBsos, Self::Error> { let db = self.clone(); Box::pin(async move { db.post_bsos_async_test(param).map_err(Into::into).await }) } - fn create_batch(&self, param: params::CreateBatch) -> DbFuture<'_, results::CreateBatch> { + fn create_batch( + &self, + param: params::CreateBatch, + ) -> DbFuture<'_, results::CreateBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::create_async(&db, param).map_err(Into::into).await }) } - fn validate_batch(&self, param: params::ValidateBatch) -> DbFuture<'_, results::ValidateBatch> { + fn validate_batch( + &self, + param: params::ValidateBatch, + ) -> DbFuture<'_, results::ValidateBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::validate_async(&db, param).map_err(Into::into).await }) } @@ -2060,29 +2079,35 @@ impl<'a> Db<'a> for SpannerDb { fn append_to_batch( &self, param: params::AppendToBatch, - ) -> DbFuture<'_, results::AppendToBatch> { + ) -> DbFuture<'_, results::AppendToBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::append_async(&db, param).map_err(Into::into).await }) } - fn get_batch(&self, param: params::GetBatch) -> DbFuture<'_, Option> { + fn get_batch( + &self, + param: params::GetBatch, + ) -> DbFuture<'_, Option, Self::Error> { let db = self.clone(); Box::pin(async move { batch::get_async(&db, param).map_err(Into::into).await }) } - fn commit_batch(&self, param: params::CommitBatch) -> DbFuture<'_, results::CommitBatch> { + fn commit_batch( + &self, + param: params::CommitBatch, + ) -> DbFuture<'_, results::CommitBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::commit_async(&db, param).map_err(Into::into).await }) } - fn get_collection_id(&self, name: String) -> DbFuture<'_, i32> { + fn get_collection_id(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin(async move { db.get_collection_id_async(&name).map_err(Into::into).await }) } fn get_connection_info(&self) -> results::ConnectionInfo { let session = self.conn.session.clone(); - let now = now(); + let now = super::now(); results::ConnectionInfo { spanner_age: session .create_time @@ -2098,12 +2123,15 @@ impl<'a> Db<'a> for SpannerDb { } } - fn create_collection(&self, name: String) -> DbFuture<'_, i32> { + fn create_collection(&self, name: String) -> DbFuture<'_, i32, Self::Error> { let db = self.clone(); Box::pin(async move { db.create_collection_async(&name).map_err(Into::into).await }) } - fn update_collection(&self, param: params::UpdateCollection) -> DbFuture<'_, SyncTimestamp> { + fn update_collection( + &self, + param: params::UpdateCollection, + ) -> DbFuture<'_, SyncTimestamp, Self::Error> { let db = self.clone(); Box::pin(async move { db.update_collection_async(¶m.user_id, param.collection_id, ¶m.collection) @@ -2113,7 +2141,7 @@ impl<'a> Db<'a> for SpannerDb { } fn timestamp(&self) -> SyncTimestamp { - self.timestamp() + self.checked_timestamp() .expect("set_timestamp() not called yet for SpannerDb") } @@ -2121,12 +2149,15 @@ impl<'a> Db<'a> for SpannerDb { SpannerDb::set_timestamp(self, timestamp) } - fn delete_batch(&self, param: params::DeleteBatch) -> DbFuture<'_, results::DeleteBatch> { + fn delete_batch( + &self, + param: params::DeleteBatch, + ) -> DbFuture<'_, results::DeleteBatch, Self::Error> { let db = self.clone(); Box::pin(async move { batch::delete_async(&db, param).map_err(Into::into).await }) } - fn clear_coll_cache(&self) -> DbFuture<'_, ()> { + fn clear_coll_cache(&self) -> DbFuture<'_, (), Self::Error> { let db = self.clone(); Box::pin(async move { db.coll_cache.clear().await; @@ -2141,4 +2172,12 @@ impl<'a> Db<'a> for SpannerDb { enforced, }; } + + fn box_clone(&self) -> Box> { + Box::new(self.clone()) + } +} + +fn sync_timestamp_from_rfc3339(val: &str) -> Result { + SyncTimestamp::from_rfc3339(val).map_err(|e| DbError::integrity(e.to_string())) } diff --git a/syncserver/src/db/spanner/pool.rs b/syncstorage-spanner/src/pool.rs similarity index 75% rename from syncserver/src/db/spanner/pool.rs rename to syncstorage-spanner/src/pool.rs index f1d474d6..35de15dd 100644 --- a/syncserver/src/db/spanner/pool.rs +++ b/syncstorage-spanner/src/pool.rs @@ -1,32 +1,20 @@ use std::{collections::HashMap, fmt, sync::Arc, time::Duration}; use async_trait::async_trait; -use bb8::ErrorSink; -use syncserver_db_common::{error::DbError, Db, DbPool, GetPoolState, PoolState, STD_COLLS}; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{GetPoolState, PoolState}; +use syncstorage_db_common::{Db, DbPool, STD_COLLS}; use syncstorage_settings::{Quota, Settings}; use tokio::sync::RwLock; -use crate::db::BlockingThreadpool; -use crate::server::metrics::Metrics; - -pub use super::manager::Conn; +pub(super) use super::manager::Conn; use super::{ + error::DbError, manager::{SpannerSession, SpannerSessionManager}, - models::Result, models::SpannerDb, + DbResult, }; -embed_migrations!(); - -/// Run the diesel embedded migrations -/// -/// Mysql DDL statements implicitly commit which could disrupt MysqlPool's -/// begin_test_transaction during tests. So this runs on its own separate conn. -//pub fn run_embedded_migrations(settings: &Settings) -> Result<()> { -// let conn = MysqlConnection::establish(&settings.database_url)?; -// Ok(embedded_migrations::run(&conn)?) -//} - #[derive(Clone)] pub struct SpannerDbPool { /// Pool of db connections @@ -40,20 +28,20 @@ pub struct SpannerDbPool { impl SpannerDbPool { /// Creates a new pool of Spanner db connections. - pub async fn new( + pub fn new( settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { //run_embedded_migrations(settings)?; - Self::new_without_migrations(settings, metrics, blocking_threadpool).await + Self::new_without_migrations(settings, metrics, blocking_threadpool) } - pub async fn new_without_migrations( + pub fn new_without_migrations( settings: &Settings, metrics: &Metrics, blocking_threadpool: Arc, - ) -> Result { + ) -> DbResult { let max_size = settings.database_pool_max_size as usize; let wait = settings .database_pool_connection_timeout @@ -78,11 +66,11 @@ impl SpannerDbPool { }) } - pub async fn get_async(&self) -> Result { + pub async fn get_async(&self) -> DbResult { let conn = self.pool.get().await.map_err(|e| match e { deadpool::managed::PoolError::Backend(dbe) => dbe, deadpool::managed::PoolError::Timeout(timeout_type) => { - DbError::internal(&format!("deadpool Timeout: {:?}", timeout_type)) + DbError::internal(format!("deadpool Timeout: {:?}", timeout_type)) } })?; Ok(SpannerDb::new( @@ -96,21 +84,23 @@ impl SpannerDbPool { #[async_trait] impl DbPool for SpannerDbPool { - async fn get<'a>(&'a self) -> Result>> { + type Error = DbError; + + async fn get(&self) -> DbResult>> { let mut metrics = self.metrics.clone(); metrics.start_timer("storage.spanner.get_pool", None); self.get_async() .await - .map(|db| Box::new(db) as Box>) + .map(|db| Box::new(db) as Box>) .map_err(Into::into) } - fn validate_batch_id(&self, id: String) -> Result<()> { - super::batch::validate_batch_id(&id) + fn validate_batch_id(&self, id: String) -> DbResult<()> { + super::batch::validate_batch_id(&id).map_err(Into::into) } - fn box_clone(&self) -> Box { + fn box_clone(&self) -> Box> { Box::new(self.clone()) } } @@ -130,7 +120,7 @@ impl fmt::Debug for SpannerDbPool { } #[derive(Debug)] -pub struct CollectionCache { +pub(super) struct CollectionCache { pub by_name: RwLock>, pub by_id: RwLock>, } @@ -194,19 +184,3 @@ impl Default for CollectionCache { } } } - -/// Logs internal bb8 errors -#[derive(Debug, Clone, Copy)] -pub struct LoggingErrorSink; - -impl ErrorSink for LoggingErrorSink { - fn sink(&self, e: E) { - error!("bb8 Error: {}", e); - let event = sentry::event_from_error(&e); - sentry::capture_event(event); - } - - fn boxed_clone(&self) -> Box> { - Box::new(*self) - } -} diff --git a/syncserver/src/db/spanner/schema.ddl b/syncstorage-spanner/src/schema.ddl similarity index 100% rename from syncserver/src/db/spanner/schema.ddl rename to syncstorage-spanner/src/schema.ddl diff --git a/syncserver/src/db/spanner/support.rs b/syncstorage-spanner/src/support.rs similarity index 87% rename from syncserver/src/db/spanner/support.rs rename to syncstorage-spanner/src/support.rs index 5abeddcc..3fac678f 100644 --- a/syncserver/src/db/spanner/support.rs +++ b/syncstorage-spanner/src/support.rs @@ -1,7 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, mem, - result::Result as StdResult, }; use futures::stream::{StreamExt, StreamFuture}; @@ -15,15 +14,11 @@ use protobuf::{ well_known_types::{ListValue, NullValue, Struct, Value}, RepeatedField, }; -use syncserver_db_common::{ - error::{DbError, DbErrorKind}, - params, results, - util::to_rfc3339, - util::SyncTimestamp, - UserIdentifier, DEFAULT_BSO_TTL, +use syncstorage_db_common::{ + params, results, util::to_rfc3339, util::SyncTimestamp, UserIdentifier, DEFAULT_BSO_TTL, }; -use super::{models::Result, pool::Conn}; +use super::{error::DbError, pool::Conn, DbResult}; pub trait IntoSpannerValue { const TYPE_CODE: TypeCode; @@ -169,7 +164,7 @@ impl ExecuteSqlRequestBuilder { } /// Execute a SQL read statement but return a non-blocking streaming result - pub fn execute_async(self, conn: &Conn) -> Result { + pub fn execute_async(self, conn: &Conn) -> DbResult { let stream = conn .client .execute_streaming_sql(&self.prepare_request(conn))?; @@ -177,7 +172,7 @@ impl ExecuteSqlRequestBuilder { } /// Execute a DML statement, returning the exact count of modified rows - pub async fn execute_dml_async(self, conn: &Conn) -> Result { + pub async fn execute_dml_async(self, conn: &Conn) -> DbResult { let rs = conn .client .execute_sql_async(&self.prepare_request(conn))? @@ -230,20 +225,24 @@ impl StreamedResultSetAsync { } } - pub async fn one(&mut self) -> Result> { + pub async fn one(&mut self) -> DbResult> { if let Some(result) = self.one_or_none().await? { Ok(result) } else { - Err(DbError::internal("No rows matched the given query."))? + Err(DbError::internal( + "No rows matched the given query.".to_owned(), + )) } } - pub async fn one_or_none(&mut self) -> Result>> { + pub async fn one_or_none(&mut self) -> DbResult>> { let result = self.next_async().await; if result.is_none() { Ok(None) } else if self.next_async().await.is_some() { - Err(DbError::internal("Expected one result; got more."))? + Err(DbError::internal( + "Expected one result; got more.".to_owned(), + )) } else { result.transpose() } @@ -252,7 +251,7 @@ impl StreamedResultSetAsync { /// Pull and process the next values from the Stream /// /// Returns false when the stream is finished - async fn consume_next(&mut self) -> Result { + async fn consume_next(&mut self) -> DbResult { let (result, stream) = self .stream .take() @@ -286,9 +285,9 @@ impl StreamedResultSetAsync { let fields = self.fields(); let current_row_i = self.current_row.len(); if fields.len() <= current_row_i { - Err(DbErrorKind::Integrity( + return Err(DbError::integrity( "Invalid PartialResultSet fields".to_owned(), - ))?; + )); } let field = &fields[current_row_i]; values[0] = merge_by_type(pending_chunk, &values[0], field.get_field_type())?; @@ -314,7 +313,7 @@ impl StreamedResultSetAsync { // We could implement Stream::poll_next instead of this, but // this is easier for now and we can refactor into the trait later - pub async fn next_async(&mut self) -> Option>> { + pub async fn next_async(&mut self) -> Option>> { while self.rows.is_empty() { match self.consume_next().await { Ok(true) => (), @@ -329,7 +328,7 @@ impl StreamedResultSetAsync { } } -fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> Result { +fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> DbResult { // We only support merging basic string types as that's all we currently use. // The python client also supports: float64, array, struct. The go client // only additionally supports array (claiming structs are only returned as @@ -344,25 +343,28 @@ fn merge_by_type(lhs: Value, rhs: &Value, field_type: &Type) -> Result { } } -fn unsupported_merge(field_type: &Type) -> Result { - Err(DbError::internal(&format!( +fn unsupported_merge(field_type: &Type) -> DbResult { + Err(DbError::internal(format!( "merge not supported, type: {:?}", field_type ))) } -fn merge_string(mut lhs: Value, rhs: &Value) -> Result { +fn merge_string(mut lhs: Value, rhs: &Value) -> DbResult { if !lhs.has_string_value() || !rhs.has_string_value() { - Err(DbError::internal("merge_string has no string value"))? + return Err(DbError::internal( + "merge_string has no string value".to_owned(), + )); } let mut merged = lhs.take_string_value(); merged.push_str(rhs.get_string_value()); Ok(merged.into_spanner_value()) } -pub fn bso_from_row(mut row: Vec) -> Result { +pub fn bso_from_row(mut row: Vec) -> DbResult { let modified_string = &row[3].get_string_value(); - let modified = SyncTimestamp::from_rfc3339(modified_string)?; + let modified = SyncTimestamp::from_rfc3339(modified_string) + .map_err(|e| DbError::integrity(e.to_string()))?; Ok(results::GetBso { id: row[0].take_string_value(), sortindex: if row[1].has_null_value() { @@ -372,12 +374,14 @@ pub fn bso_from_row(mut row: Vec) -> Result { row[1] .get_string_value() .parse::() - .map_err(|e| DbErrorKind::Integrity(e.to_string()))?, + .map_err(|e| DbError::integrity(e.to_string()))?, ) }, payload: row[2].take_string_value(), modified, - expiry: SyncTimestamp::from_rfc3339(row[4].get_string_value())?.as_i64(), + expiry: SyncTimestamp::from_rfc3339(row[4].get_string_value()) + .map_err(|e| DbError::integrity(e.to_string()))? + .as_i64(), }) } @@ -386,7 +390,7 @@ pub fn bso_to_insert_row( collection_id: i32, bso: params::PostCollectionBso, now: SyncTimestamp, -) -> Result { +) -> DbResult { let sortindex = bso .sortindex .map(|sortindex| sortindex.into_spanner_value()) @@ -413,7 +417,7 @@ pub fn bso_to_update_row( collection_id: i32, bso: params::PostCollectionBso, now: SyncTimestamp, -) -> Result<(Vec<&'static str>, ListValue)> { +) -> DbResult<(Vec<&'static str>, ListValue)> { let mut columns = vec!["fxa_uid", "fxa_kid", "collection_id", "bso_id"]; let mut values = vec![ user_id.fxa_uid.clone().into_spanner_value(), @@ -454,10 +458,10 @@ pub struct MapAndThenIterator { impl Iterator for MapAndThenIterator where - F: FnMut(A) -> StdResult, - I: Iterator>, + F: FnMut(A) -> Result, + I: Iterator>, { - type Item = StdResult; + type Item = Result; fn next(&mut self) -> Option { self.iter.next().map(|result| result.and_then(&mut self.f)) @@ -466,13 +470,13 @@ where pub trait MapAndThenTrait { /// Return an iterator adaptor that applies the provided closure to every - /// Result::Ok value. Result::Err values are unchanged. + /// DbResult::Ok value. DbResult::Err values are unchanged. /// /// The closure can be used for control flow based on result values fn map_and_then(self, func: F) -> MapAndThenIterator where - Self: Sized + Iterator>, - F: FnMut(A) -> StdResult, + Self: Sized + Iterator>, + F: FnMut(A) -> Result, { MapAndThenIterator { iter: self, @@ -481,4 +485,4 @@ pub trait MapAndThenTrait { } } -impl MapAndThenTrait for I where I: Sized + Iterator> {} +impl MapAndThenTrait for I where I: Sized + Iterator> {} diff --git a/tokenserver-auth/Cargo.toml b/tokenserver-auth/Cargo.toml new file mode 100644 index 00000000..fa68fbc3 --- /dev/null +++ b/tokenserver-auth/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "tokenserver-auth" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +futures.workspace=true +serde.workspace=true +serde_json.workspace=true + +async-trait = "0.1.40" +dyn-clone = "1.0.4" +pyo3 = { version = "0.14", features = ["auto-initialize"] } +reqwest = { version = "0.10.10", features = ["json", "rustls-tls"] } +syncserver-common = { path = "../syncserver-common" } +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-settings = { path = "../tokenserver-settings" } +# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) +tokio = { version = "0.2.4", features = ["blocking"] } + +[dev-dependencies] +mockito = "0.30.0" diff --git a/syncserver/src/tokenserver/auth/browserid.rs b/tokenserver-auth/src/browserid.rs similarity index 98% rename from syncserver/src/tokenserver/auth/browserid.rs rename to tokenserver-auth/src/browserid.rs index 8be9bb0f..2757cf42 100644 --- a/syncserver/src/tokenserver/auth/browserid.rs +++ b/tokenserver-auth/src/browserid.rs @@ -1,7 +1,7 @@ use async_trait::async_trait; use reqwest::{Client as ReqwestClient, StatusCode}; use serde::{de::Deserializer, Deserialize, Serialize}; -use tokenserver_common::error::{ErrorLocation, TokenType, TokenserverError}; +use tokenserver_common::{ErrorLocation, TokenType, TokenserverError}; use tokenserver_settings::Settings; use super::VerifyToken; @@ -19,7 +19,7 @@ pub struct VerifyOutput { /// The verifier used to verify BrowserID assertions. #[derive(Clone)] -pub struct RemoteVerifier { +pub struct Verifier { audience: String, issuer: String, fxa_verifier_url: String, @@ -28,7 +28,7 @@ pub struct RemoteVerifier { request_client: ReqwestClient, } -impl TryFrom<&Settings> for RemoteVerifier { +impl TryFrom<&Settings> for Verifier { type Error = &'static str; fn try_from(settings: &Settings) -> Result { @@ -47,7 +47,7 @@ impl TryFrom<&Settings> for RemoteVerifier { } #[async_trait] -impl VerifyToken for RemoteVerifier { +impl VerifyToken for Verifier { type Output = VerifyOutput; /// Verifies a BrowserID assertion. Returns `VerifyOutput` for valid assertions and a @@ -288,7 +288,7 @@ mod tests { use mockito::{self, Mock}; use serde_json::json; - #[actix_rt::test] + #[tokio::test] async fn test_browserid_verifier_success() { let body = json!({ "status": "okay", @@ -305,7 +305,7 @@ mod tests { .with_header("content-type", "application/json") .with_body(body.to_string()) .create(); - let verifier = RemoteVerifier::try_from(&Settings { + let verifier = Verifier::try_from(&Settings { fxa_browserid_audience: "https://test.com".to_owned(), fxa_browserid_issuer: "accounts.firefox.com".to_owned(), fxa_browserid_server_url: format!("{}/v2", mockito::server_url()), @@ -326,11 +326,11 @@ mod tests { assert_eq!(expected_result, result); } - #[actix_rt::test] + #[tokio::test] async fn test_browserid_verifier_failure_cases() { const AUDIENCE: &str = "https://test.com"; - let verifier = RemoteVerifier::try_from(&Settings { + let verifier = Verifier::try_from(&Settings { fxa_browserid_audience: AUDIENCE.to_owned(), fxa_browserid_server_url: format!("{}/v2", mockito::server_url()), ..Default::default() @@ -446,7 +446,7 @@ mod tests { } } - #[actix_rt::test] + #[tokio::test] async fn test_browserid_verifier_rejects_unissuers() { const AUDIENCE: &str = "https://test.com"; const ISSUER: &str = "accounts.firefox.com"; @@ -470,7 +470,7 @@ mod tests { token_type: TokenType::BrowserId, ..TokenserverError::invalid_credentials("Unauthorized".to_owned()) }; - let verifier = RemoteVerifier::try_from(&Settings { + let verifier = Verifier::try_from(&Settings { fxa_browserid_audience: AUDIENCE.to_owned(), fxa_browserid_issuer: ISSUER.to_owned(), fxa_browserid_server_url: format!("{}/v2", mockito::server_url()), diff --git a/syncserver/src/tokenserver/auth/mod.rs b/tokenserver-auth/src/lib.rs similarity index 95% rename from syncserver/src/tokenserver/auth/mod.rs rename to tokenserver-auth/src/lib.rs index 1a5e4d54..9cc2fce8 100644 --- a/syncserver/src/tokenserver/auth/mod.rs +++ b/tokenserver-auth/src/lib.rs @@ -10,24 +10,19 @@ use pyo3::{ types::IntoPyDict, }; use serde::{Deserialize, Serialize}; -use tokenserver_common::error::TokenserverError; +use tokenserver_common::TokenserverError; /// Represents the origin of the token used by Sync clients to access their data. -#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +#[derive(Clone, Copy, Default, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] #[serde(rename_all = "lowercase")] pub enum TokenserverOrigin { /// The Python Tokenserver. + #[default] Python, /// The Rust Tokenserver. Rust, } -impl Default for TokenserverOrigin { - fn default() -> Self { - TokenserverOrigin::Python - } -} - impl fmt::Display for TokenserverOrigin { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { diff --git a/syncserver/src/tokenserver/auth/oauth.rs b/tokenserver-auth/src/oauth.rs similarity index 98% rename from syncserver/src/tokenserver/auth/oauth.rs rename to tokenserver-auth/src/oauth.rs index 9853498b..fb61eb5f 100644 --- a/syncserver/src/tokenserver/auth/oauth.rs +++ b/tokenserver-auth/src/oauth.rs @@ -5,12 +5,12 @@ use pyo3::{ }; use serde::{Deserialize, Serialize}; use serde_json; -use tokenserver_common::error::TokenserverError; +use syncserver_common::BlockingThreadpool; +use tokenserver_common::TokenserverError; use tokenserver_settings::{Jwk, Settings}; use tokio::time; use super::VerifyToken; -use crate::server::BlockingThreadpool; use std::{sync::Arc, time::Duration}; @@ -156,7 +156,7 @@ impl VerifyToken for Verifier { .spawn(move || verify_inner(&verifier)); // The PyFxA OAuth client does not offer a way to set a request timeout, so we set one here - // by timing out the future if the verification process blocks its thread for longer + // by timing out the future if the verification process blocks this thread for longer // than the specified number of seconds. time::timeout(Duration::from_secs(self.timeout), fut) .await diff --git a/syncserver/src/tokenserver/auth/verify.py b/tokenserver-auth/src/verify.py similarity index 100% rename from syncserver/src/tokenserver/auth/verify.py rename to tokenserver-auth/src/verify.py diff --git a/tokenserver-common/Cargo.toml b/tokenserver-common/Cargo.toml index f0e8f0ec..e521c91d 100644 --- a/tokenserver-common/Cargo.toml +++ b/tokenserver-common/Cargo.toml @@ -1,13 +1,15 @@ [package] name = "tokenserver-common" -version = "0.13.7" -edition = "2021" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true [dependencies] +backtrace.workspace=true +serde.workspace=true +serde_json.workspace=true + actix-web = "3" -backtrace = "0.3.61" -serde = "1.0" -serde_json = { version = "1.0", features = ["arbitrary_precision"] } syncserver-common = { path = "../syncserver-common" } -syncserver-db-common = { path = "../syncserver-db-common" } thiserror = "1.0.26" diff --git a/tokenserver-common/src/error.rs b/tokenserver-common/src/error.rs index cab5ce00..1f7dcf31 100644 --- a/tokenserver-common/src/error.rs +++ b/tokenserver-common/src/error.rs @@ -7,8 +7,9 @@ use serde::{ Serialize, }; use syncserver_common::{InternalError, ReportableError}; -use syncserver_db_common::error::DbError; +/// An error type that represents application-specific errors to Tokenserver. This error is not +/// used to represent database-related errors; database-related errors have their own type. #[derive(Clone, Debug)] pub struct TokenserverError { pub status: &'static str, @@ -249,25 +250,6 @@ impl Serialize for TokenserverError { } } -impl From for TokenserverError { - fn from(db_error: DbError) -> Self { - TokenserverError { - description: db_error.to_string(), - context: db_error.to_string(), - backtrace: Box::new(db_error.backtrace), - http_status: if db_error.status.is_server_error() { - // Use the status code from the DbError if it already suggests an internal error; - // it might be more specific than `StatusCode::SERVICE_UNAVAILABLE` - db_error.status - } else { - StatusCode::SERVICE_UNAVAILABLE - }, - // An unhandled DbError in the Tokenserver code is an internal error - ..TokenserverError::internal_error() - } - } -} - impl From for HttpResponse { fn from(inner: TokenserverError) -> Self { ResponseError::error_response(&inner) diff --git a/tokenserver-common/src/lib.rs b/tokenserver-common/src/lib.rs index ab76d983..c03ea329 100644 --- a/tokenserver-common/src/lib.rs +++ b/tokenserver-common/src/lib.rs @@ -1,7 +1,9 @@ -pub mod error; +mod error; use serde::{Deserialize, Serialize}; +pub use error::{ErrorLocation, TokenType, TokenserverError}; + #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum NodeType { #[serde(rename = "mysql")] diff --git a/tokenserver-db/Cargo.toml b/tokenserver-db/Cargo.toml new file mode 100644 index 00000000..2f88730f --- /dev/null +++ b/tokenserver-db/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "tokenserver-db" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true + +[dependencies] +backtrace.workspace=true +futures.workspace=true +http.workspace=true +serde.workspace=true +serde_derive.workspace=true +serde_json.workspace=true +slog-scope.workspace=true + +async-trait = "0.1.40" +diesel = { version = "1.4", features = ["mysql", "r2d2"] } +diesel_logger = "0.1.1" +diesel_migrations = { version = "1.4.0", features = ["mysql"] } +syncserver-common = { path = "../syncserver-common" } +syncserver-db-common = { path = "../syncserver-db-common" } +thiserror = "1.0.26" +tokenserver-common = { path = "../tokenserver-common" } +tokenserver-settings = { path = "../tokenserver-settings" } +# pinning to 0.2.4 due to high number of dependencies (actix, bb8, deadpool, etc.) +tokio = { version = "0.2.4", features = ["macros", "sync"] } + +[dev-dependencies] +env_logger.workspace=true + +syncserver-settings = { path = "../syncserver-settings" } diff --git a/syncserver/src/tokenserver/migrations/2021-07-16-001122_init/down.sql b/tokenserver-db/migrations/2021-07-16-001122_init/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-07-16-001122_init/down.sql rename to tokenserver-db/migrations/2021-07-16-001122_init/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-07-16-001122_init/up.sql b/tokenserver-db/migrations/2021-07-16-001122_init/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-07-16-001122_init/up.sql rename to tokenserver-db/migrations/2021-07-16-001122_init/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/down.sql b/tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/down.sql rename to tokenserver-db/migrations/2021-08-03-234845_populate_services/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/up.sql b/tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-08-03-234845_populate_services/up.sql rename to tokenserver-db/migrations/2021-08-03-234845_populate_services/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql b/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql rename to tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql b/tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql rename to tokenserver-db/migrations/2021-09-30-142643_remove_foreign_key_constraints/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/down.sql b/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/down.sql rename to tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/up.sql b/tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142654_remove_node_defaults/up.sql rename to tokenserver-db/migrations/2021-09-30-142654_remove_node_defaults/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/down.sql b/tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/down.sql rename to tokenserver-db/migrations/2021-09-30-142746_add_indexes/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/up.sql b/tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-142746_add_indexes/up.sql rename to tokenserver-db/migrations/2021-09-30-142746_add_indexes/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql b/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql rename to tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql b/tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql rename to tokenserver-db/migrations/2021-09-30-144043_remove_nodes_service_key/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql b/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql rename to tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql b/tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql rename to tokenserver-db/migrations/2021-09-30-144225_remove_users_nodeid_key/up.sql diff --git a/syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/down.sql b/tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/down.sql rename to tokenserver-db/migrations/2021-12-22-160451_remove_services/down.sql diff --git a/syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/up.sql b/tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql similarity index 100% rename from syncserver/src/tokenserver/migrations/2021-12-22-160451_remove_services/up.sql rename to tokenserver-db/migrations/2021-12-22-160451_remove_services/up.sql diff --git a/tokenserver-db/src/error.rs b/tokenserver-db/src/error.rs new file mode 100644 index 00000000..a7e30a12 --- /dev/null +++ b/tokenserver-db/src/error.rs @@ -0,0 +1,104 @@ +use std::fmt; + +use backtrace::Backtrace; +use http::StatusCode; +use syncserver_common::{from_error, impl_fmt_display, InternalError}; +use syncserver_db_common::error::MysqlError; +use thiserror::Error; +use tokenserver_common::TokenserverError; + +pub(crate) type DbFuture<'a, T> = syncserver_db_common::DbFuture<'a, T, DbError>; +pub(crate) type DbResult = Result; + +/// An error type that represents any database-related errors that may occur while processing a +/// tokenserver request. +#[derive(Debug)] +pub struct DbError { + kind: DbErrorKind, + pub status: StatusCode, + pub backtrace: Box, +} + +impl DbError { + pub(crate) fn internal(msg: String) -> Self { + DbErrorKind::Internal(msg).into() + } +} + +#[derive(Debug, Error)] +enum DbErrorKind { + #[error("{}", _0)] + Mysql(MysqlError), + + #[error("Unexpected error: {}", _0)] + Internal(String), +} + +impl From for DbError { + fn from(kind: DbErrorKind) -> Self { + match kind { + DbErrorKind::Mysql(ref mysql_error) => Self { + status: mysql_error.status, + backtrace: Box::new(mysql_error.backtrace.clone()), + kind, + }, + DbErrorKind::Internal(_) => Self { + kind, + status: StatusCode::INTERNAL_SERVER_ERROR, + backtrace: Box::new(Backtrace::new()), + }, + } + } +} + +impl From for TokenserverError { + fn from(db_error: DbError) -> Self { + TokenserverError { + description: db_error.to_string(), + context: db_error.to_string(), + backtrace: db_error.backtrace, + http_status: if db_error.status.is_server_error() { + // Use the status code from the DbError if it already suggests an internal error; + // it might be more specific than `StatusCode::SERVICE_UNAVAILABLE` + db_error.status + } else { + StatusCode::SERVICE_UNAVAILABLE + }, + // An unhandled DbError in the Tokenserver code is an internal error + ..TokenserverError::internal_error() + } + } +} + +impl InternalError for DbError { + fn internal_error(message: String) -> Self { + DbErrorKind::Internal(message).into() + } +} + +impl_fmt_display!(DbError, DbErrorKind); + +from_error!( + diesel::result::Error, + DbError, + |error: diesel::result::Error| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel::result::ConnectionError, + DbError, + |error: diesel::result::ConnectionError| DbError::from(DbErrorKind::Mysql(MysqlError::from( + error + ))) +); +from_error!( + diesel::r2d2::PoolError, + DbError, + |error: diesel::r2d2::PoolError| DbError::from(DbErrorKind::Mysql(MysqlError::from(error))) +); +from_error!( + diesel_migrations::RunMigrationsError, + DbError, + |error: diesel_migrations::RunMigrationsError| DbError::from(DbErrorKind::Mysql( + MysqlError::from(error) + )) +); diff --git a/tokenserver-db/src/lib.rs b/tokenserver-db/src/lib.rs new file mode 100644 index 00000000..1b9f86c6 --- /dev/null +++ b/tokenserver-db/src/lib.rs @@ -0,0 +1,13 @@ +extern crate diesel; +#[macro_use] +extern crate diesel_migrations; + +mod error; +pub mod mock; +mod models; +pub mod params; +mod pool; +pub mod results; + +pub use models::{Db, TokenserverDb}; +pub use pool::{DbPool, TokenserverPool}; diff --git a/syncserver/src/tokenserver/db/mock.rs b/tokenserver-db/src/mock.rs similarity index 97% rename from syncserver/src/tokenserver/db/mock.rs rename to tokenserver-db/src/mock.rs index 871e6a4e..29041091 100644 --- a/syncserver/src/tokenserver/db/mock.rs +++ b/tokenserver-db/src/mock.rs @@ -2,9 +2,10 @@ use async_trait::async_trait; use futures::future; -use syncserver_db_common::{error::DbError, GetPoolState, PoolState}; +use syncserver_db_common::{GetPoolState, PoolState}; -use super::models::{Db, DbFuture}; +use super::error::{DbError, DbFuture}; +use super::models::Db; use super::params; use super::pool::DbPool; use super::results; diff --git a/syncserver/src/tokenserver/db/models.rs b/tokenserver-db/src/models.rs similarity index 97% rename from syncserver/src/tokenserver/db/models.rs rename to tokenserver-db/src/models.rs index a94be926..f1cc51de 100644 --- a/syncserver/src/tokenserver/db/models.rs +++ b/tokenserver-db/src/models.rs @@ -1,4 +1,3 @@ -use actix_web::http::StatusCode; use diesel::{ mysql::MysqlConnection, r2d2::{ConnectionManager, PooledConnection}, @@ -7,25 +6,24 @@ use diesel::{ }; #[cfg(test)] use diesel_logger::LoggingConnection; -use futures::future::LocalBoxFuture; -use syncserver_db_common::error::DbError; +use http::StatusCode; +use syncserver_common::{BlockingThreadpool, Metrics}; +use syncserver_db_common::{sync_db_method, DbFuture}; use std::{ - result, sync::Arc, time::{SystemTime, UNIX_EPOCH}, }; -use super::{params, results}; -use crate::server::{metrics::Metrics, BlockingThreadpool}; -use crate::sync_db_method; +use super::{ + error::{DbError, DbResult}, + params, results, +}; /// The maximum possible generation number. Used as a tombstone to mark users that have been /// "retired" from the db. const MAX_GENERATION: i64 = i64::MAX; -pub type DbFuture<'a, T> = LocalBoxFuture<'a, Result>; -pub type DbResult = result::Result; type Conn = PooledConnection>; #[derive(Clone)] @@ -37,7 +35,7 @@ pub struct TokenserverDb { /// the thread pool but does not provide Send as the underlying db /// conn. structs are !Sync (Arc requires both for Send). See the Send impl /// below. - pub(super) inner: Arc, + inner: Arc, metrics: Metrics, service_id: Option, spanner_node_id: Option, @@ -49,7 +47,7 @@ pub struct TokenserverDb { /// queued to the thread pool via Futures, naturally serialized. unsafe impl Send for TokenserverDb {} -pub struct DbInner { +struct DbInner { #[cfg(not(test))] pub(super) conn: Conn, #[cfg(test)] @@ -255,7 +253,7 @@ impl TokenserverDb { .get_result::(&self.inner.conn) .map_err(|e| { let mut db_error = - DbError::internal(&format!("unable to get Spanner node: {}", e)); + DbError::internal(format!("unable to get Spanner node: {}", e)); db_error.status = StatusCode::SERVICE_UNAVAILABLE; db_error }) @@ -289,7 +287,7 @@ impl TokenserverDb { } } - let mut db_error = DbError::internal("unable to get a node"); + let mut db_error = DbError::internal("unable to get a node".to_owned()); db_error.status = StatusCode::SERVICE_UNAVAILABLE; Err(db_error) } @@ -463,7 +461,7 @@ impl TokenserverDb { // The most up-to-date user doesn't have a node and is retired. This is an internal // service error for compatibility reasons (the legacy Tokenserver returned an // internal service error in this situation). - (_, None) => Err(DbError::internal("Tokenserver user retired")), + (_, None) => Err(DbError::internal("Tokenserver user retired".to_owned())), } } } @@ -682,7 +680,7 @@ impl Db for TokenserverDb { #[cfg(test)] sync_db_method!(get_user, get_user_sync, GetUser); - fn check(&self) -> DbFuture<'_, results::Check> { + fn check(&self) -> DbFuture<'_, results::Check, DbError> { let db = self.clone(); Box::pin(self.blocking_threadpool.spawn(move || db.check_sync())) } @@ -718,63 +716,82 @@ impl Db for TokenserverDb { } pub trait Db { - fn replace_user(&self, params: params::ReplaceUser) -> DbFuture<'_, results::ReplaceUser>; + fn replace_user( + &self, + params: params::ReplaceUser, + ) -> DbFuture<'_, results::ReplaceUser, DbError>; - fn replace_users(&self, params: params::ReplaceUsers) -> DbFuture<'_, results::ReplaceUsers>; + fn replace_users( + &self, + params: params::ReplaceUsers, + ) -> DbFuture<'_, results::ReplaceUsers, DbError>; - fn post_user(&self, params: params::PostUser) -> DbFuture<'_, results::PostUser>; + fn post_user(&self, params: params::PostUser) -> DbFuture<'_, results::PostUser, DbError>; - fn put_user(&self, params: params::PutUser) -> DbFuture<'_, results::PutUser>; + fn put_user(&self, params: params::PutUser) -> DbFuture<'_, results::PutUser, DbError>; - fn check(&self) -> DbFuture<'_, results::Check>; + fn check(&self) -> DbFuture<'_, results::Check, DbError>; - fn get_node_id(&self, params: params::GetNodeId) -> DbFuture<'_, results::GetNodeId>; + fn get_node_id(&self, params: params::GetNodeId) -> DbFuture<'_, results::GetNodeId, DbError>; - fn get_best_node(&self, params: params::GetBestNode) -> DbFuture<'_, results::GetBestNode>; + fn get_best_node( + &self, + params: params::GetBestNode, + ) -> DbFuture<'_, results::GetBestNode, DbError>; fn add_user_to_node( &self, params: params::AddUserToNode, - ) -> DbFuture<'_, results::AddUserToNode>; + ) -> DbFuture<'_, results::AddUserToNode, DbError>; - fn get_users(&self, params: params::GetUsers) -> DbFuture<'_, results::GetUsers>; + fn get_users(&self, params: params::GetUsers) -> DbFuture<'_, results::GetUsers, DbError>; fn get_or_create_user( &self, params: params::GetOrCreateUser, - ) -> DbFuture<'_, results::GetOrCreateUser>; + ) -> DbFuture<'_, results::GetOrCreateUser, DbError>; - fn get_service_id(&self, params: params::GetServiceId) -> DbFuture<'_, results::GetServiceId>; + fn get_service_id( + &self, + params: params::GetServiceId, + ) -> DbFuture<'_, results::GetServiceId, DbError>; #[cfg(test)] fn set_user_created_at( &self, params: params::SetUserCreatedAt, - ) -> DbFuture<'_, results::SetUserCreatedAt>; + ) -> DbFuture<'_, results::SetUserCreatedAt, DbError>; #[cfg(test)] fn set_user_replaced_at( &self, params: params::SetUserReplacedAt, - ) -> DbFuture<'_, results::SetUserReplacedAt>; + ) -> DbFuture<'_, results::SetUserReplacedAt, DbError>; #[cfg(test)] - fn get_user(&self, params: params::GetUser) -> DbFuture<'_, results::GetUser>; + fn get_user(&self, params: params::GetUser) -> DbFuture<'_, results::GetUser, DbError>; #[cfg(test)] - fn post_node(&self, params: params::PostNode) -> DbFuture<'_, results::PostNode>; + fn post_node(&self, params: params::PostNode) -> DbFuture<'_, results::PostNode, DbError>; #[cfg(test)] - fn get_node(&self, params: params::GetNode) -> DbFuture<'_, results::GetNode>; + fn get_node(&self, params: params::GetNode) -> DbFuture<'_, results::GetNode, DbError>; #[cfg(test)] - fn unassign_node(&self, params: params::UnassignNode) -> DbFuture<'_, results::UnassignNode>; + fn unassign_node( + &self, + params: params::UnassignNode, + ) -> DbFuture<'_, results::UnassignNode, DbError>; #[cfg(test)] - fn remove_node(&self, params: params::RemoveNode) -> DbFuture<'_, results::RemoveNode>; + fn remove_node(&self, params: params::RemoveNode) + -> DbFuture<'_, results::RemoveNode, DbError>; #[cfg(test)] - fn post_service(&self, params: params::PostService) -> DbFuture<'_, results::PostService>; + fn post_service( + &self, + params: params::PostService, + ) -> DbFuture<'_, results::PostService, DbError>; } #[cfg(test)] @@ -786,7 +803,7 @@ mod tests { use syncserver_settings::Settings; - use crate::tokenserver::db::pool::{DbPool, TokenserverPool}; + use crate::pool::{DbPool, TokenserverPool}; #[tokio::test] async fn test_update_generation() -> DbResult<()> { diff --git a/syncserver/src/tokenserver/db/params.rs b/tokenserver-db/src/params.rs similarity index 100% rename from syncserver/src/tokenserver/db/params.rs rename to tokenserver-db/src/params.rs diff --git a/syncserver/src/tokenserver/db/pool.rs b/tokenserver-db/src/pool.rs similarity index 90% rename from syncserver/src/tokenserver/db/pool.rs rename to tokenserver-db/src/pool.rs index cf8f2c3f..dd100abb 100644 --- a/syncserver/src/tokenserver/db/pool.rs +++ b/tokenserver-db/src/pool.rs @@ -4,25 +4,27 @@ use async_trait::async_trait; use diesel::{ mysql::MysqlConnection, r2d2::{ConnectionManager, Pool}, + Connection, }; use diesel_logger::LoggingConnection; -use syncserver_db_common::{error::DbError, GetPoolState, PoolState}; +use syncserver_common::{BlockingThreadpool, Metrics}; +#[cfg(debug_assertions)] +use syncserver_db_common::test::TestTransactionCustomizer; +use syncserver_db_common::{GetPoolState, PoolState}; use tokenserver_settings::Settings; -use super::models::{Db, DbResult, TokenserverDb}; -use crate::diesel::Connection; -use crate::server::{metrics::Metrics, BlockingThreadpool}; +use super::{ + error::{DbError, DbResult}, + models::{Db, TokenserverDb}, +}; -#[cfg(test)] -use crate::db::mysql::TestTransactionCustomizer; - -embed_migrations!("src/tokenserver/migrations"); +embed_migrations!(); /// Run the diesel embedded migrations /// /// Mysql DDL statements implicitly commit which could disrupt MysqlPool's /// begin_test_transaction during tests. So this runs on its own separate conn. -pub fn run_embedded_migrations(database_url: &str) -> DbResult<()> { +fn run_embedded_migrations(database_url: &str) -> DbResult<()> { let conn = MysqlConnection::establish(database_url)?; embedded_migrations::run(&LoggingConnection::new(conn))?; @@ -60,7 +62,7 @@ impl TokenserverPool { )) .min_idle(settings.database_pool_min_idle); - #[cfg(test)] + #[cfg(debug_assertions)] let builder = if _use_test_transactions { builder.connection_customizer(Box::new(TestTransactionCustomizer)) } else { diff --git a/syncserver/src/tokenserver/db/results.rs b/tokenserver-db/src/results.rs similarity index 100% rename from syncserver/src/tokenserver/db/results.rs rename to tokenserver-db/src/results.rs diff --git a/tokenserver-settings/Cargo.toml b/tokenserver-settings/Cargo.toml index 591c932f..52b3eb33 100644 --- a/tokenserver-settings/Cargo.toml +++ b/tokenserver-settings/Cargo.toml @@ -1,8 +1,11 @@ [package] name = "tokenserver-settings" -version = "0.13.7" -edition = "2021" +version.workspace=true +license.workspace=true +authors.workspace=true +edition.workspace=true [dependencies] -serde = "1.0" +serde.workspace=true + tokenserver-common = { path = "../tokenserver-common" }