Merge branch 'master' into release/0.21

This commit is contained in:
Philip Jenvey 2025-09-23 23:51:34 -07:00 committed by GitHub
commit a29c8a7d48
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 1135 additions and 428 deletions

196
Cargo.lock generated
View File

@ -846,6 +846,7 @@ dependencies = [
"mysql_common",
"scoped-futures",
"tokio",
"tokio-postgres",
]
[[package]]
@ -1021,6 +1022,12 @@ dependencies = [
"windows-sys 0.60.2",
]
[[package]]
name = "fallible-iterator"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
[[package]]
name = "findshlibs"
version = "0.10.2"
@ -1839,6 +1846,17 @@ dependencies = [
"windows-targets 0.53.3",
]
[[package]]
name = "libredox"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3"
dependencies = [
"bitflags 2.9.3",
"libc",
"redox_syscall",
]
[[package]]
name = "libz-sys"
version = "1.1.22"
@ -1917,6 +1935,16 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]]
name = "md-5"
version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
dependencies = [
"cfg-if",
"digest",
]
[[package]]
name = "memchr"
version = "2.7.5"
@ -2245,6 +2273,24 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "phf"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
dependencies = [
"phf_shared",
]
[[package]]
name = "phf_shared"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5"
dependencies = [
"siphasher",
]
[[package]]
name = "pin-project-lite"
version = "0.2.16"
@ -2291,6 +2337,35 @@ dependencies = [
"portable-atomic",
]
[[package]]
name = "postgres-protocol"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76ff0abab4a9b844b93ef7b81f1efc0a366062aaef2cd702c76256b5dc075c54"
dependencies = [
"base64",
"byteorder",
"bytes",
"fallible-iterator",
"hmac",
"md-5",
"memchr",
"rand 0.9.2",
"sha2",
"stringprep",
]
[[package]]
name = "postgres-types"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613283563cd90e1dfc3518d548caee47e0e725455ed619881f5cf21f36de4b48"
dependencies = [
"bytes",
"fallible-iterator",
"postgres-protocol",
]
[[package]]
name = "potential_utf"
version = "0.1.3"
@ -3024,6 +3099,12 @@ version = "2.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa"
[[package]]
name = "siphasher"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
[[package]]
name = "slab"
version = "0.4.11"
@ -3152,6 +3233,17 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "stringprep"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1"
dependencies = [
"unicode-bidi",
"unicode-normalization",
"unicode-properties",
]
[[package]]
name = "strsim"
version = "0.8.0"
@ -3591,22 +3683,56 @@ name = "tokenserver-db"
version = "0.21.1"
dependencies = [
"async-trait",
"backtrace",
"deadpool",
"diesel",
"diesel-async",
"diesel_logger",
"diesel_migrations",
"env_logger 0.11.8",
"futures 0.3.31",
"http 1.3.1",
"syncserver-common",
"syncserver-db-common",
"syncserver-settings",
"tokenserver-common",
"tokenserver-db-common",
"tokenserver-db-postgres",
"tokenserver-settings",
"tokio",
"url",
]
[[package]]
name = "tokenserver-db-common"
version = "0.20.1"
dependencies = [
"async-trait",
"backtrace",
"deadpool",
"diesel",
"diesel_migrations",
"http 1.3.1",
"serde 1.0.219",
"slog-scope",
"syncserver-common",
"syncserver-db-common",
"syncserver-settings",
"thiserror 1.0.69",
"tokenserver-common",
]
[[package]]
name = "tokenserver-db-postgres"
version = "0.20.1"
dependencies = [
"async-trait",
"deadpool",
"diesel",
"diesel-async",
"diesel_logger",
"diesel_migrations",
"syncserver-common",
"syncserver-db-common",
"tokenserver-common",
"tokenserver-db-common",
"tokenserver-settings",
"tokio",
]
@ -3658,6 +3784,32 @@ dependencies = [
"syn",
]
[[package]]
name = "tokio-postgres"
version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c95d533c83082bb6490e0189acaa0bbeef9084e60471b696ca6988cd0541fb0"
dependencies = [
"async-trait",
"byteorder",
"bytes",
"fallible-iterator",
"futures-channel",
"futures-util",
"log",
"parking_lot",
"percent-encoding",
"phf",
"pin-project-lite",
"postgres-protocol",
"postgres-types",
"rand 0.9.2",
"socket2 0.5.10",
"tokio",
"tokio-util",
"whoami",
]
[[package]]
name = "tokio-rustls"
version = "0.26.2"
@ -3843,12 +3995,33 @@ dependencies = [
"libc",
]
[[package]]
name = "unicode-bidi"
version = "0.3.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5"
[[package]]
name = "unicode-ident"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
[[package]]
name = "unicode-normalization"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-properties"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0"
[[package]]
name = "unicode-width"
version = "0.1.14"
@ -4002,6 +4175,12 @@ dependencies = [
"wit-bindgen",
]
[[package]]
name = "wasite"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b"
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
@ -4114,6 +4293,17 @@ dependencies = [
"rustix",
]
[[package]]
name = "whoami"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d"
dependencies = [
"libredox",
"wasite",
"web-sys",
]
[[package]]
name = "winapi"
version = "0.3.9"

View File

@ -15,6 +15,7 @@ members = [
"tokenserver-settings",
"syncserver",
"tokenserver-db-postgres",
"tokenserver-db-common",
]
default-members = ["syncserver"]
@ -31,7 +32,7 @@ license = "MPL-2.0"
[workspace.dependencies]
actix-web = { version = "4", default-features = false, features = ["compat", "http2", "macros"] }
async-trait = "0.1.88"
docopt = "1.1"
base64 = "0.22"

View File

@ -21,6 +21,7 @@ INTEGRATION_TEST_DIR_TOKENSERVER := $(TOOLS_DIR)/integration_tests/tokenserver
SPANNER_DIR := $(TOOLS_DIR)/spanner
TOKENSERVER_UTIL_DIR := $(TOOLS_DIR)/tokenserver
LOAD_TEST_DIR := $(TOOLS_DIR)/tokenserver/loadtests
RUST_LOG ?= debug
# In order to be consumed by the ETE Test Metric Pipeline, files need to follow a strict naming
# convention: {job_number}__{utc_epoch_datetime}__{workflow}__{test_suite}__results{-index}.xml
@ -44,7 +45,7 @@ SYNC_SYNCSTORAGE__DATABASE_URL ?= mysql://sample_user:sample_password@localhost/
SYNC_TOKENSERVER__DATABASE_URL ?= mysql://sample_user:sample_password@localhost/tokenserver_rs
SRC_ROOT = $(shell pwd)
PYTHON_SITE_PACKGES = $(shell poetry run python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
PYTHON_SITE_PACKAGES = $(shell poetry run python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
clippy_mysql:
# Matches what's run in circleci
@ -104,8 +105,8 @@ docker_run_spanner_e2e_tests:
run_mysql: $(INSTALL_STAMP)
# See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the
# below env var
PYTHONPATH=$(PYTHON_SITE_PACKGES) \
RUST_LOG=debug \
PYTHONPATH=$(PYTHON_SITE_PACKAGES) \
RUST_LOG=$(RUST_LOG) \
RUST_BACKTRACE=full \
cargo run --no-default-features --features=syncstorage-db/mysql --features=py_verifier -- --config config/local.toml
@ -114,8 +115,8 @@ run_spanner: $(INSTALL_STAMP)
GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=$(PATH_TO_GRPC_CERT) \
# See https://github.com/PyO3/pyo3/issues/1741 for discussion re: why we need to set the
# below env var
PYTHONPATH=$(PYTHON_SITE_PACKGES) \
RUST_LOG=debug \
PYTHONPATH=$(PYTHON_SITE_PACKAGES) \
RUST_LOG=$(RUST_LOG) \
RUST_BACKTRACE=full \
cargo run --no-default-features --features=syncstorage-db/spanner --features=py_verifier -- --config config/local.toml

View File

@ -28,7 +28,7 @@ Mozilla Sync Storage built with [Rust](https://rust-lang.org).
## System Requirements
- cmake
- cmake (>= 3.5 and < 3.30)
- gcc
- [golang](https://golang.org/doc/install)
- libcurl4-openssl-dev
@ -77,6 +77,8 @@ GRANT ALL PRIVILEGES on syncstorage_rs.* to sample_user@localhost;
GRANT ALL PRIVILEGES on tokenserver_rs.* to sample_user@localhost;
```
Note that if you are running MySQL with Docker and encountered a socket connection error, change the MySQL DSN from `localhost` to `127.0.0.1` to use a TCP connection.
### Spanner
#### Authenticating via OAuth
@ -199,30 +201,9 @@ This requires access to [Google Cloud Rust (raw)](https://crates.io/crates/googl
This will walk you through the steps to connect this project to your local copy of Firefox.
1. Follow the steps outlined above for running this project using [MySQL](https://github.com/mozilla-services/syncstorage-rs#mysql).
2. Setup a local copy of [syncserver](https://github.com/mozilla-services/syncserver), with a few special changes to [syncserver.ini](https://github.com/mozilla-services/syncserver/blob/master/syncserver.ini); make sure that you're using the following values (in addition to all of the other defaults):
```ini
[server:main]
port = 5000
[syncserver]
public_url = http://localhost:5000/
# This value needs to match your "master_secret" for syncstorage-rs!
secret = INSERT_SECRET_KEY_HERE
[tokenserver]
node_url = http://localhost:8000
sqluri = pymysql://sample_user:sample_password@127.0.0.1/syncstorage_rs
[endpoints]
sync-1.5 = "http://localhost:8000/1.5/1"
```
3. In Firefox, go to `about:config`. Change `identity.sync.tokenserver.uri` to `http://localhost:5000/1.0/sync/1.5`.
4. Restart Firefox. Now, try syncing. You should see new BSOs in your local MySQL instance.
1. Follow the steps outlined above for running this project using MySQL or Spanner.
2. In Firefox, go to `about:config`. Change `identity.sync.tokenserver.uri` to `http://localhost:8000/1.0/sync/1.5`.
3. Restart Firefox. Now, try syncing. You should see new BSOs in your MySQL or Spanner instance.
## Logging

View File

@ -7,7 +7,7 @@ human_logs = 1
# Example MySQL DSN:
syncstorage.database_url = "mysql://sample_user:sample_password@localhost/syncstorage_rs"
# Example Spanner DSN:
# database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB"
# syncstorage.database_url="spanner://projects/SAMPLE_GCP_PROJECT/instances/SAMPLE_SPANNER_INSTANCE/databases/SAMPLE_SPANNER_DB"
# enable quota limits
syncstorage.enable_quota = 0
# set the quota limit to 2GB.

View File

@ -45,10 +45,12 @@ async fn main() -> Result<(), Box<dyn Error>> {
// Note: set "debug: true," to diagnose sentry issues
transport: Some(Arc::new(curl_transport_factory)),
release: sentry::release_name!(),
environment: Some(settings.environment.clone().into()),
..sentry::ClientOptions::default()
});
opts.integrations.retain(|i| i.name() != "debug-images");
opts.default_integrations = false;
let _sentry = sentry::init(opts);
// Setup and run the server

View File

@ -15,7 +15,7 @@ use syncserver_common::{BlockingThreadpool, Metrics};
use tokenserver_auth::JWTVerifierImpl;
use tokenserver_auth::{oauth, VerifyToken};
use tokenserver_common::NodeType;
use tokenserver_db::{DbPool, TokenserverPool};
use tokenserver_db::{pool_from_settings, DbPool};
use tokenserver_settings::Settings;
use crate::{error::ApiError, server::user_agent};
@ -72,14 +72,13 @@ impl ServerState {
);
let use_test_transactions = false;
let db_pool =
TokenserverPool::new(settings, &Metrics::from(&metrics), use_test_transactions)
.expect("Failed to create Tokenserver pool");
let db_pool = pool_from_settings(settings, &Metrics::from(&metrics), use_test_transactions)
.expect("Failed to create Tokenserver pool");
Ok(ServerState {
fxa_email_domain: settings.fxa_email_domain.clone(),
fxa_metrics_hash_secret: settings.fxa_metrics_hash_secret.clone(),
oauth_verifier,
db_pool: Box::new(db_pool),
db_pool,
node_capacity_release_rate: settings.node_capacity_release_rate,
node_type: settings.node_type,
metrics,

View File

@ -0,0 +1,21 @@
[package]
name = "tokenserver-db-common"
version.workspace = true
authors.workspace = true
edition.workspace = true
license.workspace = true
[dependencies]
async-trait.workspace = true
backtrace.workspace = true
deadpool.workspace = true
diesel.workspace = true
diesel_migrations.workspace = true
http.workspace = true
serde.workspace = true
slog-scope.workspace = true
thiserror.workspace = true
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
tokenserver-common = { path = "../tokenserver-common" }

View File

@ -7,8 +7,6 @@ use syncserver_db_common::error::SqlError;
use thiserror::Error;
use tokenserver_common::TokenserverError;
pub(crate) type DbResult<T> = Result<T, DbError>;
/// An error type that represents any database-related errors that may occur while processing a
/// tokenserver request.
#[derive(Debug)]
@ -19,11 +17,11 @@ pub struct DbError {
}
impl DbError {
pub(crate) fn internal(msg: String) -> Self {
pub fn internal(msg: String) -> Self {
DbErrorKind::Internal(msg).into()
}
pub(crate) fn pool_timeout(timeout_type: deadpool::managed::TimeoutType) -> Self {
pub fn pool_timeout(timeout_type: deadpool::managed::TimeoutType) -> Self {
DbErrorKind::PoolTimeout(timeout_type).into()
}
}
@ -68,9 +66,9 @@ enum DbErrorKind {
impl From<DbErrorKind> for DbError {
fn from(kind: DbErrorKind) -> Self {
match kind {
DbErrorKind::Sql(ref mysql_error) => Self {
status: mysql_error.status,
backtrace: Box::new(mysql_error.backtrace.clone()),
DbErrorKind::Sql(ref sqle) => Self {
status: sqle.status,
backtrace: Box::new(sqle.backtrace.clone()),
kind,
},
_ => Self {

View File

@ -0,0 +1,313 @@
#[macro_use]
extern crate slog_scope;
mod error;
pub mod params;
pub mod results;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use async_trait::async_trait;
use syncserver_common::Metrics;
use syncserver_db_common::{GetPoolState, PoolState};
pub use crate::error::DbError;
pub type DbResult<T> = Result<T, DbError>;
/// The maximum possible generation number. Used as a tombstone to mark users that have been
/// "retired" from the db.
pub const MAX_GENERATION: i64 = i64::MAX;
#[async_trait(?Send)]
pub trait DbPool: Sync + Send + GetPoolState {
async fn init(&mut self) -> DbResult<()>;
async fn get(&self) -> DbResult<Box<dyn Db>>;
fn box_clone(&self) -> Box<dyn DbPool>;
}
impl GetPoolState for Box<dyn DbPool> {
fn state(&self) -> PoolState {
(**self).state()
}
}
impl Clone for Box<dyn DbPool> {
fn clone(&self) -> Box<dyn DbPool> {
self.box_clone()
}
}
#[async_trait(?Send)]
pub trait Db {
/// Return the Db instance timeout duration.
fn timeout(&self) -> Option<Duration> {
None
}
/// Mark the user with the given uid and service ID as being replaced.
async fn replace_user(&mut self, params: params::ReplaceUser)
-> DbResult<results::ReplaceUser>;
/// Mark users matching the given email and service ID as replaced.
async fn replace_users(
&mut self,
params: params::ReplaceUsers,
) -> DbResult<results::ReplaceUsers>;
/// Post complete user object and get last insert ID.
async fn post_user(&mut self, params: params::PostUser) -> DbResult<results::PostUser>;
/// Based on service_id, email, generation, and changed keys timestamp, update user.
async fn put_user(&mut self, params: params::PutUser) -> DbResult<results::PutUser>;
/// Show database uptime status and health as boolean.
async fn check(&mut self) -> DbResult<results::Check>;
/// Get Node ID based on service_id and node string.
async fn get_node_id(&mut self, params: params::GetNodeId) -> DbResult<results::GetNodeId>;
/// Get Node ID and string identifier based on node
/// with lowest capacity or high release rate.
async fn get_best_node(
&mut self,
params: params::GetBestNode,
) -> DbResult<results::GetBestNode>;
/// Add a user to a specific node, based on service and node string.
async fn add_user_to_node(
&mut self,
params: params::AddUserToNode,
) -> DbResult<results::AddUserToNode>;
/// Get vector of users based on passed in service and FxA email.
async fn get_users(&mut self, params: params::GetUsers) -> DbResult<results::GetUsers>;
/// Get the service id by passing in service string identifier.
async fn get_service_id(
&mut self,
params: params::GetServiceId,
) -> DbResult<results::GetServiceId>;
/// Return the Db instance Metrics.
fn metrics(&self) -> &Metrics;
/// Gets the user with the given email and service ID.
/// If one doesn't exist, allocates a new user.
async fn get_or_create_user(
&mut self,
params: params::GetOrCreateUser,
) -> DbResult<results::GetOrCreateUser> {
let mut raw_users = self
.get_users(params::GetUsers {
service_id: params.service_id,
email: params.email.clone(),
})
.await?;
if raw_users.is_empty() {
// There are no users in the database with the given email and service ID, so
// allocate a new one.
let allocate_user_result = self
.allocate_user(params.clone() as params::AllocateUser)
.await?;
Ok(results::GetOrCreateUser {
uid: allocate_user_result.uid,
email: params.email,
client_state: params.client_state,
generation: params.generation,
node: allocate_user_result.node,
keys_changed_at: params.keys_changed_at,
created_at: allocate_user_result.created_at,
replaced_at: None,
first_seen_at: allocate_user_result.created_at,
old_client_states: vec![],
})
} else {
raw_users.sort_by_key(|raw_user| (raw_user.generation, raw_user.created_at));
raw_users.reverse();
// The user with the greatest `generation` and `created_at` is the current user
let raw_user = raw_users[0].clone();
// Collect any old client states that differ from the current client state
let old_client_states: Vec<String> = {
raw_users[1..]
.iter()
.map(|user| user.client_state.clone())
.filter(|client_state| client_state != &raw_user.client_state)
.collect()
};
// Make sure every old row is marked as replaced. They might not be, due to races in row
// creation.
for old_user in &raw_users[1..] {
if old_user.replaced_at.is_none() {
let params = params::ReplaceUser {
uid: old_user.uid,
service_id: params.service_id,
replaced_at: raw_user.created_at,
};
self.replace_user(params).await?;
}
}
let first_seen_at = raw_users[raw_users.len() - 1].created_at;
match (raw_user.replaced_at, raw_user.node) {
// If the most up-to-date user is marked as replaced or does not have a node
// assignment, allocate a new user. Note that, if the current user is marked
// as replaced, we do not want to create a new user with the account metadata
// in the parameters to this method. Rather, we want to create a duplicate of
// the replaced user assigned to a new node. This distinction is important
// because the account metadata in the parameters to this method may not match
// that currently stored on the most up-to-date user and may be invalid.
(Some(_), _) | (_, None) if raw_user.generation < MAX_GENERATION => {
let allocate_user_result = {
self.allocate_user(params::AllocateUser {
service_id: params.service_id,
email: params.email.clone(),
generation: raw_user.generation,
client_state: raw_user.client_state.clone(),
keys_changed_at: raw_user.keys_changed_at,
capacity_release_rate: params.capacity_release_rate,
})
.await?
};
Ok(results::GetOrCreateUser {
uid: allocate_user_result.uid,
email: params.email,
client_state: raw_user.client_state,
generation: raw_user.generation,
node: allocate_user_result.node,
keys_changed_at: raw_user.keys_changed_at,
created_at: allocate_user_result.created_at,
replaced_at: None,
first_seen_at,
old_client_states,
})
}
// The most up-to-date user has a node. Note that this user may be retired or
// replaced.
(_, Some(node)) => Ok(results::GetOrCreateUser {
uid: raw_user.uid,
email: params.email,
client_state: raw_user.client_state,
generation: raw_user.generation,
node,
keys_changed_at: raw_user.keys_changed_at,
created_at: raw_user.created_at,
replaced_at: None,
first_seen_at,
old_client_states,
}),
// The most up-to-date user doesn't have a node and is retired. This is an internal
// service error for compatibility reasons (the legacy Tokenserver returned an
// internal service error in this situation).
(_, None) => {
let uid = raw_user.uid;
warn!("Tokenserver user retired"; "uid" => &uid);
Err(DbError::internal("Tokenserver user retired".to_owned()))
}
}
}
}
/// Creates a new user and assigns them to a node.
async fn allocate_user(
&mut self,
params: params::AllocateUser,
) -> DbResult<results::AllocateUser> {
let mut metrics = self.metrics().clone();
metrics.start_timer("storage.allocate_user", None);
// Get the least-loaded node
let node = self
.get_best_node(params::GetBestNode {
service_id: params.service_id,
capacity_release_rate: params.capacity_release_rate,
})
.await?;
// Decrement `available` and increment `current_load` on the node assigned to the user.
self.add_user_to_node(params::AddUserToNode {
service_id: params.service_id,
node: node.node.clone(),
})
.await?;
let created_at = {
let start = SystemTime::now();
start.duration_since(UNIX_EPOCH).unwrap().as_millis() as i64
};
let uid = self
.post_user(params::PostUser {
service_id: params.service_id,
email: params.email.clone(),
generation: params.generation,
client_state: params.client_state.clone(),
created_at,
node_id: node.id,
keys_changed_at: params.keys_changed_at,
})
.await?
.id;
Ok(results::AllocateUser {
uid,
node: node.node,
created_at,
})
}
// Internal methods used by the db tests
#[cfg(debug_assertions)]
async fn set_user_created_at(
&mut self,
params: params::SetUserCreatedAt,
) -> DbResult<results::SetUserCreatedAt>;
/// Update users replaced_at attribute based on user uid.
#[cfg(debug_assertions)]
async fn set_user_replaced_at(
&mut self,
params: params::SetUserReplacedAt,
) -> DbResult<results::SetUserReplacedAt>;
/// Get full user object based on passed user ID.
#[cfg(debug_assertions)]
async fn get_user(&mut self, params: params::GetUser) -> DbResult<results::GetUser>;
/// Create a complete node and return insert id from node.
#[cfg(debug_assertions)]
async fn post_node(&mut self, params: params::PostNode) -> DbResult<results::PostNode>;
/// Get complete node entry based on passed id.
#[cfg(debug_assertions)]
async fn get_node(&mut self, params: params::GetNode) -> DbResult<results::GetNode>;
/// Based on Node ID, unassign node from `users`.
#[cfg(debug_assertions)]
async fn unassign_node(
&mut self,
params: params::UnassignNode,
) -> DbResult<results::UnassignNode>;
/// Remove Node based on Node ID
#[cfg(debug_assertions)]
async fn remove_node(&mut self, params: params::RemoveNode) -> DbResult<results::RemoveNode>;
#[cfg(debug_assertions)]
/// Creates new service and returns new service_id.
async fn post_service(&mut self, params: params::PostService)
-> DbResult<results::PostService>;
#[cfg(debug_assertions)]
fn set_spanner_node_id(&mut self, params: params::SpannerNodeId);
}

View File

@ -96,30 +96,33 @@ pub struct GetServiceId {
pub service: String,
}
#[cfg(test)]
#[cfg(debug_assertions)]
pub struct SetUserCreatedAt {
pub uid: i64,
pub created_at: i64,
}
#[cfg(test)]
#[cfg(debug_assertions)]
pub struct SetUserReplacedAt {
pub uid: i64,
pub replaced_at: i64,
}
#[cfg(test)]
#[cfg(debug_assertions)]
#[derive(Default)]
pub struct GetUser {
pub id: i64,
}
#[cfg(test)]
#[cfg(debug_assertions)]
pub struct UnassignNode {
pub node_id: i64,
}
#[cfg(test)]
#[cfg(debug_assertions)]
pub struct RemoveNode {
pub node_id: i64,
}
#[cfg(debug_assertions)]
pub type SpannerNodeId = Option<i32>;

View File

@ -82,7 +82,7 @@ pub struct GetServiceId {
pub id: i32,
}
#[cfg(test)]
#[cfg(debug_assertions)]
#[derive(Debug, Default, Eq, PartialEq, QueryableByName)]
pub struct GetUser {
#[diesel(sql_type = Integer)]
@ -103,10 +103,10 @@ pub struct GetUser {
pub keys_changed_at: Option<i64>,
}
#[cfg(test)]
#[cfg(debug_assertions)]
pub type PostNode = LastInsertId;
#[cfg(test)]
#[cfg(debug_assertions)]
#[derive(Default, QueryableByName)]
pub struct GetNode {
#[diesel(sql_type = Bigint)]
@ -128,23 +128,23 @@ pub struct GetNode {
pub backoff: i32,
}
#[cfg(test)]
#[cfg(debug_assertions)]
#[derive(Default, QueryableByName)]
pub struct PostService {
#[diesel(sql_type = Integer)]
pub id: i32,
}
#[cfg(test)]
#[cfg(debug_assertions)]
pub type SetUserCreatedAt = ();
#[cfg(test)]
#[cfg(debug_assertions)]
pub type SetUserReplacedAt = ();
pub type Check = bool;
#[cfg(test)]
#[cfg(debug_assertions)]
pub type UnassignNode = ();
#[cfg(test)]
#[cfg(debug_assertions)]
pub type RemoveNode = ();

View File

@ -6,4 +6,16 @@ edition.workspace = true
license.workspace = true
[dependencies]
diesel = { workspace = true, features = ["postgres", "r2d2"] }
async-trait.workspace = true
deadpool.workspace = true
diesel = { workspace = true, features = ["postgres", "r2d2"] }
diesel-async = { workspace = true, features = ["postgres"] }
diesel_logger.workspace = true
diesel_migrations = { workspace = true, features = ["postgres"] }
tokio = { workspace = true, features = ["macros", "sync"] }
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
tokenserver-common = { path = "../tokenserver-common" }
tokenserver-db-common = { path = "../tokenserver-db-common" }
tokenserver-settings = { path = "../tokenserver-settings" }

View File

@ -2,7 +2,7 @@
CREATE TABLE services (
id SERIAL PRIMARY KEY,
service VARCHAR(30) UNIQUE,
pattern VARCHAR(128),
pattern VARCHAR(128)
);
CREATE TABLE nodes (
@ -34,4 +34,4 @@ CREATE INDEX lookup_idx ON users (email, service, created_at);
CREATE INDEX replaced_at_idx ON users (service, replaced_at);
CREATE INDEX node_idx ON users (nodeid);
CREATE INDEX node_idx ON users (nodeid);

View File

@ -1 +1,13 @@
#![allow(non_local_definitions)]
extern crate diesel;
extern crate diesel_migrations;
mod models;
mod orm_models;
mod pool;
mod schema;
pub use models::TokenserverPgDb;
pub use orm_models::{Node, Service, User};
pub use pool::TokenserverPgPool;
pub use tokenserver_db_common::{params, results, Db, DbPool};

View File

@ -0,0 +1,172 @@
use std::time::Duration;
use super::pool::Conn;
use async_trait::async_trait;
use syncserver_common::Metrics;
use tokenserver_db_common::{params, results, Db, DbError};
#[allow(dead_code)]
pub struct TokenserverPgDb {
conn: Conn,
metrics: Metrics,
service_id: Option<i32>,
spanner_node_id: Option<i32>,
pub timeout: Option<Duration>,
}
impl TokenserverPgDb {
pub fn new(
conn: Conn,
metrics: &Metrics,
service_id: Option<i32>,
spanner_node_id: Option<i32>,
timeout: Option<Duration>,
) -> Self {
Self {
conn,
metrics: metrics.clone(),
service_id,
spanner_node_id,
timeout,
}
}
}
#[async_trait(?Send)]
impl Db for TokenserverPgDb {
fn timeout(&self) -> Option<Duration> {
self.timeout
}
fn metrics(&self) -> &Metrics {
&self.metrics
}
async fn check(&mut self) -> Result<results::Check, DbError> {
TokenserverPgDb::check(self).await
}
// Services Methods
async fn get_service_id(
&mut self,
params: params::GetServiceId,
) -> Result<results::GetServiceId, DbError> {
TokenserverPgDb::get_service_id(self, params).await
}
#[cfg(debug_assertions)]
async fn post_service(
&mut self,
params: params::PostService,
) -> Result<results::PostService, DbError> {
TokenserverPgDb::post_service(self, params).await
}
// Nodes Methods
#[cfg(debug_assertions)]
async fn get_node(&mut self, params: params::GetNode) -> Result<results::GetNode, DbError> {
TokenserverPgDb::get_node(self, params).await
}
async fn get_node_id(
&mut self,
params: params::GetNodeId,
) -> Result<results::GetNodeId, DbError> {
TokenserverPgDb::get_node_id(self, params).await
}
async fn get_best_node(
&mut self,
params: params::GetBestNode,
) -> Result<results::GetBestNode, DbError> {
TokenserverPgDb::get_best_node(self, params).await
}
#[cfg(debug_assertions)]
async fn post_node(&mut self, params: params::PostNode) -> Result<results::PostNode, DbError> {
TokenserverPgDb::post_node(self, params).await
}
async fn add_user_to_node(
&mut self,
params: params::AddUserToNode,
) -> Result<results::AddUserToNode, DbError> {
TokenserverPgDb::add_user_to_node(self, params).await
}
#[cfg(debug_assertions)]
async fn remove_node(
&mut self,
params: params::RemoveNode,
) -> Result<results::RemoveNode, DbError> {
TokenserverPgDb::remove_node(self, params).await
}
// Users Methods
#[cfg(debug_assertions)]
async fn get_user(&mut self, params: params::GetUser) -> Result<results::GetUser, DbError> {
TokenserverPgDb::get_user(self, params).await
}
async fn get_or_create_user(
&mut self,
params: params::GetOrCreateUser,
) -> Result<results::GetOrCreateUser, DbError> {
TokenserverPgDb::get_or_create_user(self, params).await
}
async fn get_users(&mut self, params: params::GetUsers) -> Result<results::GetUsers, DbError> {
TokenserverPgDb::get_users(self, params).await
}
async fn post_user(&mut self, params: params::PostUser) -> Result<results::PostUser, DbError> {
TokenserverPgDb::post_user(self, params).await
}
async fn put_user(&mut self, params: params::PutUser) -> Result<results::PutUser, DbError> {
TokenserverPgDb::put_user(self, params).await
}
async fn replace_user(
&mut self,
params: params::ReplaceUser,
) -> Result<results::ReplaceUser, DbError> {
TokenserverPgDb::replace_user(self, params).await
}
async fn replace_users(
&mut self,
params: params::ReplaceUsers,
) -> Result<results::ReplaceUsers, DbError> {
TokenserverPgDb::replace_users(self, params).await
}
#[cfg(debug_assertions)]
async fn unassign_node(
&mut self,
params: params::UnassignNode,
) -> Result<results::UnassignNode, DbError> {
TokenserverPgDb::unassign_node(self, params).await
}
#[cfg(debug_assertions)]
async fn set_user_created_at(
&mut self,
params: params::SetUserCreatedAt,
) -> Result<results::SetUserCreatedAt, DbError> {
TokenserverPgDb::set_user_created_at(self, params).await
}
#[cfg(debug_assertions)]
async fn set_user_replaced_at(
&mut self,
params: params::SetUserReplacedAt,
) -> Result<results::SetUserReplacedAt, DbError> {
TokenserverPgDb::set_user_replaced_at(self, params).await
}
#[cfg(debug_assertions)]
fn set_spanner_node_id(&mut self, params: params::SpannerNodeId) {
self.spanner_node_id = params;
}
}

View File

@ -0,0 +1,35 @@
use crate::schema::{nodes, services, users};
use diesel::{Identifiable, Insertable, Queryable};
#[derive(Queryable, Debug, Identifiable, Insertable)]
pub struct Service {
pub id: i32,
pub service: Option<String>,
pub pattern: Option<String>,
}
#[derive(Queryable, Debug, Identifiable, Insertable)]
#[diesel(primary_key(uid))]
pub struct User {
pub uid: i64,
pub service: i32,
pub email: String,
pub generation: i64,
pub client_state: String,
pub created_at: i64,
pub replaced_at: Option<i64>,
pub nodeid: i64,
pub keys_changed_at: Option<i64>,
}
#[derive(Queryable, Debug, Identifiable, Insertable)]
pub struct Node {
pub id: i64,
pub service: i32,
pub node: String,
pub available: i32,
pub current_load: i32,
pub capacity: i32,
pub downed: i32,
pub backoff: i32,
}

View File

@ -0,0 +1,182 @@
use std::time::Duration;
use async_trait::async_trait;
use deadpool::managed::PoolError;
use diesel::Connection;
use diesel_async::{
async_connection_wrapper::AsyncConnectionWrapper,
pooled_connection::{
deadpool::{Object, Pool},
AsyncDieselConnectionManager,
},
AsyncPgConnection,
};
use diesel_logger::LoggingConnection;
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use syncserver_common::Metrics;
#[cfg(debug_assertions)]
use syncserver_db_common::test::test_transaction_hook;
use syncserver_db_common::{GetPoolState, PoolState};
use tokenserver_db_common::{params, Db, DbError, DbPool, DbResult};
use super::models::TokenserverPgDb;
use tokenserver_settings::Settings;
use tokio::task::spawn_blocking;
/// The `embed_migrations!` macro reads migrations at compile time.
/// This creates a constant that references a list of migrations.
/// See https://docs.rs/diesel_migrations/2.2.0/diesel_migrations/macro.embed_migrations.html
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
/// Connection type defined as an AsyncPgConnection for purposes of abstraction.
pub(crate) type Conn = Object<AsyncPgConnection>;
#[allow(dead_code)]
fn run_embedded_migrations(database_url: &str) -> DbResult<()> {
let conn = AsyncConnectionWrapper::<AsyncPgConnection>::establish(database_url)?;
LoggingConnection::new(conn).run_pending_migrations(MIGRATIONS)?;
Ok(())
}
#[allow(dead_code)]
#[derive(Clone)]
pub struct TokenserverPgPool {
/// Pool of db connections.
inner: Pool<AsyncPgConnection>,
/// Metrics module from synserver-common.
metrics: Metrics,
/// This field is public so the service ID can be set after the pool is created.
pub service_id: Option<i32>,
/// Optional associated spanner node.
spanner_node_id: Option<i32>,
/// Optional pool timeout duration, defined as i32.
pub timeout: Option<Duration>,
/// Config setting flag to determine if migrations should run.
run_migrations: bool,
/// URL for associated Postgres database
database_url: String,
}
#[allow(dead_code)]
impl TokenserverPgPool {
pub fn new(
settings: &Settings,
metrics: &Metrics,
_use_test_transactions: bool,
) -> DbResult<Self> {
let manager =
AsyncDieselConnectionManager::<AsyncPgConnection>::new(&settings.database_url);
let wait = settings
.database_pool_connection_timeout
.map(|seconds| Duration::from_secs(seconds as u64));
let timeouts = deadpool::managed::Timeouts {
wait,
..Default::default()
};
let config = deadpool::managed::PoolConfig {
max_size: settings.database_pool_max_size as usize,
timeouts,
..Default::default()
};
let builder = Pool::builder(manager)
.config(config)
.runtime(deadpool::Runtime::Tokio1);
#[cfg(debug_assertions)]
let builder = if _use_test_transactions {
builder.post_create(deadpool::managed::Hook::async_fn(|conn, _| {
Box::pin(async { test_transaction_hook(conn).await })
}))
} else {
builder
};
let pool = builder.build().map_err(|e| {
DbError::internal(format!("Couldn't build Tokenserver Postgres Db Pool: {e}"))
})?;
let timeout = settings
.database_request_timeout
.map(|v| Duration::from_secs(v as u64));
Ok(Self {
inner: pool,
metrics: metrics.clone(),
spanner_node_id: settings.spanner_node_id,
service_id: None,
timeout,
run_migrations: settings.run_migrations,
database_url: settings.database_url.clone(),
})
}
async fn get_tokenserver_db(&self) -> Result<TokenserverPgDb, DbError> {
let conn = self.inner.get().await.map_err(|e| match e {
PoolError::Backend(backend_err) => match backend_err {
diesel_async::pooled_connection::PoolError::ConnectionError(conn_err) => {
conn_err.into()
}
diesel_async::pooled_connection::PoolError::QueryError(query_err) => {
query_err.into()
}
},
PoolError::Timeout(timeout_type) => DbError::pool_timeout(timeout_type),
_ => DbError::internal(format!("Deadpool PoolError: {e}")),
})?;
Ok(TokenserverPgDb::new(
conn,
&self.metrics,
self.service_id,
self.spanner_node_id,
self.timeout,
))
}
/// Acquire the common "sync-1.5" service_id and cache.
async fn init_service_id(&mut self) -> Result<(), tokenserver_common::TokenserverError> {
let service_id = self
.get()
.await?
.get_service_id(params::GetServiceId {
service: "sync-1.5".to_owned(),
})
.await?;
self.service_id = Some(service_id.id);
Ok(())
}
}
#[async_trait(?Send)]
impl DbPool for TokenserverPgPool {
async fn init(&mut self) -> Result<(), DbError> {
if self.run_migrations {
let database_url = self.database_url.clone();
spawn_blocking(move || run_embedded_migrations(&database_url))
.await
.map_err(|e| DbError::internal(format!("Couldn't spawn migrations: {e}")))??;
}
// As long as the sync service "sync-1.5" service record is in the database, this query should not fail,
// unless there is a network failure or unpredictable event.
let _ = self.init_service_id().await;
Ok(())
}
async fn get(&self) -> Result<Box<dyn Db>, DbError> {
let mut metrics = self.metrics.clone();
metrics.start_timer("storage.get_pool", None);
todo!("implement get once Db trait implemented for TokenserverDb")
// Ok(Box::new(self.get_tokenserver_db().await?) as Box<dyn Db>)
}
fn box_clone(&self) -> Box<dyn DbPool> {
Box::new(self.clone())
}
}
impl GetPoolState for TokenserverPgPool {
fn state(&self) -> PoolState {
self.inner.status().into()
}
}

View File

@ -6,14 +6,9 @@ authors.workspace = true
edition.workspace = true
[dependencies]
backtrace.workspace = true
futures.workspace = true
async-trait.workspace = true
http.workspace = true
serde.workspace = true
slog-scope.workspace = true
thiserror.workspace = true
async-trait = "0.1.88"
deadpool = { workspace = true }
diesel = { workspace = true }
diesel-async = { workspace = true }
@ -22,10 +17,16 @@ diesel_migrations = { workspace = true, features = ["mysql"] }
syncserver-common = { path = "../syncserver-common" }
syncserver-db-common = { path = "../syncserver-db-common" }
tokenserver-common = { path = "../tokenserver-common" }
tokenserver-db-common = { path = "../tokenserver-db-common" }
tokenserver-db-postgres = { path = "../tokenserver-db-postgres", optional = true }
tokenserver-settings = { path = "../tokenserver-settings" }
tokio = { workspace = true, features = ["macros", "sync"] }
url = "2.1"
[dev-dependencies]
env_logger.workspace = true
syncserver-settings = { path = "../syncserver-settings" }
[features]
postgres = ['tokenserver-db-postgres']

View File

@ -1,15 +1,41 @@
#![allow(non_local_definitions)]
extern crate diesel;
extern crate diesel_migrations;
#[macro_use]
extern crate slog_scope;
mod error;
pub mod mock;
mod models;
pub mod params;
mod pool;
pub mod results;
pub use models::{Db, TokenserverDb};
pub use pool::{DbPool, TokenserverPool};
use url::Url;
pub use models::TokenserverDb;
pub use pool::TokenserverPool;
use syncserver_common::Metrics;
pub use tokenserver_db_common::{params, results, Db, DbError, DbPool};
use tokenserver_settings::Settings;
pub fn pool_from_settings(
settings: &Settings,
metrics: &Metrics,
use_test_transactions: bool,
) -> Result<Box<dyn DbPool>, DbError> {
let url = Url::parse(&settings.database_url)
.map_err(|e| DbError::internal(format!("Invalid SYNC_TOKENSERVER__DATABASE_URL: {e}")))?;
Ok(match url.scheme() {
"mysql" => Box::new(crate::pool::TokenserverPool::new(
settings,
metrics,
use_test_transactions,
)?),
#[cfg(feature = "postgres")]
"postgres" => Box::new(tokenserver_db_postgres::TokenserverPgPool::new(
settings,
metrics,
use_test_transactions,
)?),
invalid_scheme => {
return Err(DbError::internal(format!(
"Invalid SYNC_TOKENSERVER__DATABASE_URL scheme: {invalid_scheme}://"
)))
}
})
}

View File

@ -1,13 +1,11 @@
#![allow(clippy::new_without_default)]
use async_trait::async_trait;
use syncserver_db_common::{GetPoolState, PoolState};
use std::sync::LazyLock;
use super::error::DbError;
use super::models::Db;
use super::params;
use super::pool::DbPool;
use super::results;
use async_trait::async_trait;
use syncserver_common::Metrics;
use syncserver_db_common::{GetPoolState, PoolState};
use tokenserver_db_common::{params, results, Db, DbError, DbPool};
#[derive(Clone, Debug)]
pub struct MockDbPool;
@ -115,7 +113,12 @@ impl Db for MockDb {
Ok(results::GetServiceId::default())
}
#[cfg(test)]
fn metrics(&self) -> &Metrics {
static METRICS: LazyLock<Metrics> = LazyLock::new(Metrics::noop);
&METRICS
}
#[cfg(debug_assertions)]
async fn set_user_created_at(
&mut self,
_params: params::SetUserCreatedAt,
@ -123,7 +126,7 @@ impl Db for MockDb {
Ok(())
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn set_user_replaced_at(
&mut self,
_params: params::SetUserReplacedAt,
@ -131,22 +134,22 @@ impl Db for MockDb {
Ok(())
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn get_user(&mut self, _params: params::GetUser) -> Result<results::GetUser, DbError> {
Ok(results::GetUser::default())
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn post_node(&mut self, _params: params::PostNode) -> Result<results::PostNode, DbError> {
Ok(results::PostNode::default())
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn get_node(&mut self, _params: params::GetNode) -> Result<results::GetNode, DbError> {
Ok(results::GetNode::default())
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn unassign_node(
&mut self,
_params: params::UnassignNode,
@ -154,7 +157,7 @@ impl Db for MockDb {
Ok(())
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn remove_node(
&mut self,
_params: params::RemoveNode,
@ -162,11 +165,14 @@ impl Db for MockDb {
Ok(())
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn post_service(
&mut self,
_params: params::PostService,
) -> Result<results::PostService, DbError> {
Ok(results::PostService::default())
}
#[cfg(debug_assertions)]
fn set_spanner_node_id(&mut self, _params: params::SpannerNodeId) {}
}

View File

@ -1,3 +1,7 @@
use std::time::Duration;
#[cfg(debug_assertions)]
use std::time::{SystemTime, UNIX_EPOCH};
use async_trait::async_trait;
use diesel::{
sql_types::{Bigint, Float, Integer, Nullable, Text},
@ -6,19 +10,9 @@ use diesel::{
use diesel_async::RunQueryDsl;
use http::StatusCode;
use syncserver_common::Metrics;
use tokenserver_db_common::{params, results, Db, DbError, DbResult};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use super::{
error::{DbError, DbResult},
params,
pool::Conn,
results,
};
/// The maximum possible generation number. Used as a tombstone to mark users that have been
/// "retired" from the db.
const MAX_GENERATION: i64 = i64::MAX;
use super::pool::Conn;
pub struct TokenserverDb {
conn: Conn,
@ -337,177 +331,6 @@ impl TokenserverDb {
.map_err(Into::into)
}
/// Gets the user with the given email and service ID, or if one doesn't exist, allocates a new
/// user.
async fn get_or_create_user(
&mut self,
params: params::GetOrCreateUser,
) -> DbResult<results::GetOrCreateUser> {
let mut raw_users = self
.get_users(params::GetUsers {
service_id: params.service_id,
email: params.email.clone(),
})
.await?;
if raw_users.is_empty() {
// There are no users in the database with the given email and service ID, so
// allocate a new one.
let allocate_user_result = self
.allocate_user(params.clone() as params::AllocateUser)
.await?;
Ok(results::GetOrCreateUser {
uid: allocate_user_result.uid,
email: params.email,
client_state: params.client_state,
generation: params.generation,
node: allocate_user_result.node,
keys_changed_at: params.keys_changed_at,
created_at: allocate_user_result.created_at,
replaced_at: None,
first_seen_at: allocate_user_result.created_at,
old_client_states: vec![],
})
} else {
raw_users.sort_by_key(|raw_user| (raw_user.generation, raw_user.created_at));
raw_users.reverse();
// The user with the greatest `generation` and `created_at` is the current user
let raw_user = raw_users[0].clone();
// Collect any old client states that differ from the current client state
let old_client_states: Vec<String> = {
raw_users[1..]
.iter()
.map(|user| user.client_state.clone())
.filter(|client_state| client_state != &raw_user.client_state)
.collect()
};
// Make sure every old row is marked as replaced. They might not be, due to races in row
// creation.
for old_user in &raw_users[1..] {
if old_user.replaced_at.is_none() {
let params = params::ReplaceUser {
uid: old_user.uid,
service_id: params.service_id,
replaced_at: raw_user.created_at,
};
self.replace_user(params).await?;
}
}
let first_seen_at = raw_users[raw_users.len() - 1].created_at;
match (raw_user.replaced_at, raw_user.node) {
// If the most up-to-date user is marked as replaced or does not have a node
// assignment, allocate a new user. Note that, if the current user is marked
// as replaced, we do not want to create a new user with the account metadata
// in the parameters to this method. Rather, we want to create a duplicate of
// the replaced user assigned to a new node. This distinction is important
// because the account metadata in the parameters to this method may not match
// that currently stored on the most up-to-date user and may be invalid.
(Some(_), _) | (_, None) if raw_user.generation < MAX_GENERATION => {
let allocate_user_result = {
self.allocate_user(params::AllocateUser {
service_id: params.service_id,
email: params.email.clone(),
generation: raw_user.generation,
client_state: raw_user.client_state.clone(),
keys_changed_at: raw_user.keys_changed_at,
capacity_release_rate: params.capacity_release_rate,
})
.await?
};
Ok(results::GetOrCreateUser {
uid: allocate_user_result.uid,
email: params.email,
client_state: raw_user.client_state,
generation: raw_user.generation,
node: allocate_user_result.node,
keys_changed_at: raw_user.keys_changed_at,
created_at: allocate_user_result.created_at,
replaced_at: None,
first_seen_at,
old_client_states,
})
}
// The most up-to-date user has a node. Note that this user may be retired or
// replaced.
(_, Some(node)) => Ok(results::GetOrCreateUser {
uid: raw_user.uid,
email: params.email,
client_state: raw_user.client_state,
generation: raw_user.generation,
node,
keys_changed_at: raw_user.keys_changed_at,
created_at: raw_user.created_at,
replaced_at: None,
first_seen_at,
old_client_states,
}),
// The most up-to-date user doesn't have a node and is retired. This is an internal
// service error for compatibility reasons (the legacy Tokenserver returned an
// internal service error in this situation).
(_, None) => {
let uid = raw_user.uid;
warn!("Tokenserver user retired"; "uid" => &uid);
Err(DbError::internal("Tokenserver user retired".to_owned()))
}
}
}
}
/// Creates a new user and assigns them to a node.
async fn allocate_user(
&mut self,
params: params::AllocateUser,
) -> DbResult<results::AllocateUser> {
let mut metrics = self.metrics.clone();
metrics.start_timer("storage.allocate_user", None);
// Get the least-loaded node
let node = self
.get_best_node(params::GetBestNode {
service_id: params.service_id,
capacity_release_rate: params.capacity_release_rate,
})
.await?;
// Decrement `available` and increment `current_load` on the node assigned to the user.
self.add_user_to_node(params::AddUserToNode {
service_id: params.service_id,
node: node.node.clone(),
})
.await?;
let created_at = {
let start = SystemTime::now();
start.duration_since(UNIX_EPOCH).unwrap().as_millis() as i64
};
let uid = self
.post_user(params::PostUser {
service_id: params.service_id,
email: params.email.clone(),
generation: params.generation,
client_state: params.client_state.clone(),
created_at,
node_id: node.id,
keys_changed_at: params.keys_changed_at,
})
.await?
.id;
Ok(results::AllocateUser {
uid,
node: node.node,
created_at,
})
}
pub async fn get_service_id(
&mut self,
params: params::GetServiceId,
@ -529,7 +352,7 @@ impl TokenserverDb {
}
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn set_user_created_at(
&mut self,
params: params::SetUserCreatedAt,
@ -548,7 +371,7 @@ impl TokenserverDb {
.map_err(Into::into)
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn set_user_replaced_at(
&mut self,
params: params::SetUserReplacedAt,
@ -567,7 +390,7 @@ impl TokenserverDb {
.map_err(Into::into)
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn get_user(&mut self, params: params::GetUser) -> DbResult<results::GetUser> {
const QUERY: &str = r#"
SELECT service, email, generation, client_state, replaced_at, nodeid, keys_changed_at
@ -582,7 +405,7 @@ impl TokenserverDb {
.map_err(Into::into)
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn post_node(&mut self, params: params::PostNode) -> DbResult<results::PostNode> {
const QUERY: &str = r#"
INSERT INTO nodes (service, node, available, current_load, capacity, downed, backoff)
@ -605,7 +428,7 @@ impl TokenserverDb {
.map_err(Into::into)
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn get_node(&mut self, params: params::GetNode) -> DbResult<results::GetNode> {
const QUERY: &str = r#"
SELECT *
@ -620,7 +443,7 @@ impl TokenserverDb {
.map_err(Into::into)
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn unassign_node(
&mut self,
params: params::UnassignNode,
@ -645,7 +468,7 @@ impl TokenserverDb {
.map_err(Into::into)
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn remove_node(&mut self, params: params::RemoveNode) -> DbResult<results::RemoveNode> {
const QUERY: &str = "DELETE FROM nodes WHERE id = ?";
@ -657,7 +480,7 @@ impl TokenserverDb {
.map_err(Into::into)
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn post_service(
&mut self,
params: params::PostService,
@ -681,6 +504,11 @@ impl TokenserverDb {
})
.map_err(Into::into)
}
#[cfg(debug_assertions)]
fn set_spanner_node_id(&mut self, params: params::SpannerNodeId) {
self.spanner_node_id = params;
}
}
#[async_trait(?Send)]
@ -732,13 +560,6 @@ impl Db for TokenserverDb {
TokenserverDb::get_users(self, params).await
}
async fn get_or_create_user(
&mut self,
params: params::GetOrCreateUser,
) -> Result<results::GetOrCreateUser, DbError> {
TokenserverDb::get_or_create_user(self, params).await
}
async fn get_service_id(
&mut self,
params: params::GetServiceId,
@ -746,11 +567,15 @@ impl Db for TokenserverDb {
TokenserverDb::get_service_id(self, params).await
}
fn metrics(&self) -> &Metrics {
&self.metrics
}
fn timeout(&self) -> Option<Duration> {
self.timeout
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn get_user(&mut self, params: params::GetUser) -> Result<results::GetUser, DbError> {
TokenserverDb::get_user(self, params).await
}
@ -759,7 +584,7 @@ impl Db for TokenserverDb {
TokenserverDb::check(self).await
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn set_user_created_at(
&mut self,
params: params::SetUserCreatedAt,
@ -767,7 +592,7 @@ impl Db for TokenserverDb {
TokenserverDb::set_user_created_at(self, params).await
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn set_user_replaced_at(
&mut self,
params: params::SetUserReplacedAt,
@ -775,17 +600,17 @@ impl Db for TokenserverDb {
TokenserverDb::set_user_replaced_at(self, params).await
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn post_node(&mut self, params: params::PostNode) -> Result<results::PostNode, DbError> {
TokenserverDb::post_node(self, params).await
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn get_node(&mut self, params: params::GetNode) -> Result<results::GetNode, DbError> {
TokenserverDb::get_node(self, params).await
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn unassign_node(
&mut self,
params: params::UnassignNode,
@ -793,7 +618,7 @@ impl Db for TokenserverDb {
TokenserverDb::unassign_node(self, params).await
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn remove_node(
&mut self,
params: params::RemoveNode,
@ -801,102 +626,18 @@ impl Db for TokenserverDb {
TokenserverDb::remove_node(self, params).await
}
#[cfg(test)]
#[cfg(debug_assertions)]
async fn post_service(
&mut self,
params: params::PostService,
) -> Result<results::PostService, DbError> {
TokenserverDb::post_service(self, params).await
}
}
#[async_trait(?Send)]
pub trait Db {
fn timeout(&self) -> Option<Duration> {
None
#[cfg(debug_assertions)]
fn set_spanner_node_id(&mut self, params: params::SpannerNodeId) {
TokenserverDb::set_spanner_node_id(self, params)
}
async fn replace_user(
&mut self,
params: params::ReplaceUser,
) -> Result<results::ReplaceUser, DbError>;
async fn replace_users(
&mut self,
params: params::ReplaceUsers,
) -> Result<results::ReplaceUsers, DbError>;
async fn post_user(&mut self, params: params::PostUser) -> Result<results::PostUser, DbError>;
async fn put_user(&mut self, params: params::PutUser) -> Result<results::PutUser, DbError>;
async fn check(&mut self) -> Result<results::Check, DbError>;
async fn get_node_id(
&mut self,
params: params::GetNodeId,
) -> Result<results::GetNodeId, DbError>;
async fn get_best_node(
&mut self,
params: params::GetBestNode,
) -> Result<results::GetBestNode, DbError>;
async fn add_user_to_node(
&mut self,
params: params::AddUserToNode,
) -> Result<results::AddUserToNode, DbError>;
async fn get_users(&mut self, params: params::GetUsers) -> Result<results::GetUsers, DbError>;
async fn get_or_create_user(
&mut self,
params: params::GetOrCreateUser,
) -> Result<results::GetOrCreateUser, DbError>;
async fn get_service_id(
&mut self,
params: params::GetServiceId,
) -> Result<results::GetServiceId, DbError>;
#[cfg(test)]
async fn set_user_created_at(
&mut self,
params: params::SetUserCreatedAt,
) -> Result<results::SetUserCreatedAt, DbError>;
#[cfg(test)]
async fn set_user_replaced_at(
&mut self,
params: params::SetUserReplacedAt,
) -> Result<results::SetUserReplacedAt, DbError>;
#[cfg(test)]
async fn get_user(&mut self, params: params::GetUser) -> Result<results::GetUser, DbError>;
#[cfg(test)]
async fn post_node(&mut self, params: params::PostNode) -> Result<results::PostNode, DbError>;
#[cfg(test)]
async fn get_node(&mut self, params: params::GetNode) -> Result<results::GetNode, DbError>;
#[cfg(test)]
async fn unassign_node(
&mut self,
params: params::UnassignNode,
) -> Result<results::UnassignNode, DbError>;
#[cfg(test)]
async fn remove_node(
&mut self,
params: params::RemoveNode,
) -> Result<results::RemoveNode, DbError>;
#[cfg(test)]
async fn post_service(
&mut self,
params: params::PostService,
) -> Result<results::PostService, DbError>;
}
#[cfg(test)]
@ -907,8 +648,9 @@ mod tests {
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use syncserver_settings::Settings;
use tokenserver_db_common::{DbPool, MAX_GENERATION};
use crate::pool::{DbPool, TokenserverPool};
use crate::pool_from_settings;
#[tokio::test]
async fn test_update_generation() -> DbResult<()> {
@ -1358,7 +1100,7 @@ mod tests {
#[tokio::test]
async fn test_node_allocation() -> DbResult<()> {
let pool = db_pool().await?;
let mut db = pool.get_tokenserver_db().await?;
let mut db = pool.get().await?;
// Add a service
let service_id = db
@ -1405,7 +1147,7 @@ mod tests {
#[tokio::test]
async fn test_allocation_to_least_loaded_node() -> DbResult<()> {
let pool = db_pool().await?;
let mut db = pool.get_tokenserver_db().await?;
let mut db = pool.get().await?;
// Add a service
let service_id = db
@ -1470,7 +1212,7 @@ mod tests {
#[tokio::test]
async fn test_allocation_is_not_allowed_to_downed_nodes() -> DbResult<()> {
let pool = db_pool().await?;
let mut db = pool.get_tokenserver_db().await?;
let mut db = pool.get().await?;
// Add a service
let service_id = db
@ -1513,7 +1255,7 @@ mod tests {
#[tokio::test]
async fn test_allocation_is_not_allowed_to_backoff_nodes() -> DbResult<()> {
let pool = db_pool().await?;
let mut db = pool.get_tokenserver_db().await?;
let mut db = pool.get().await?;
// Add a service
let service_id = db
@ -1556,7 +1298,7 @@ mod tests {
#[tokio::test]
async fn test_node_reassignment_when_records_are_replaced() -> DbResult<()> {
let pool = db_pool().await?;
let mut db = pool.get_tokenserver_db().await?;
let mut db = pool.get().await?;
// Add a service
let service_id = db
@ -2126,7 +1868,7 @@ mod tests {
#[tokio::test]
async fn test_get_spanner_node() -> DbResult<()> {
let pool = db_pool().await?;
let mut db = pool.get_tokenserver_db().await?;
let mut db = pool.get().await?;
// Add a service
let service_id = db
@ -2174,7 +1916,7 @@ mod tests {
);
// Ensure the Spanner node is selected if the Spanner node ID is cached
db.spanner_node_id = Some(spanner_node_id as i32);
db.set_spanner_node_id(Some(spanner_node_id as i32));
assert_eq!(
db.get_best_node(params::GetBestNode {
@ -2197,14 +1939,14 @@ mod tests {
Ok(())
}
async fn db_pool() -> DbResult<TokenserverPool> {
async fn db_pool() -> DbResult<Box<dyn DbPool>> {
let _ = env_logger::try_init();
let mut settings = Settings::test_settings();
settings.tokenserver.run_migrations = true;
let use_test_transactions = true;
let mut pool = TokenserverPool::new(
let mut pool = pool_from_settings(
&settings.tokenserver,
&Metrics::noop(),
use_test_transactions,

View File

@ -17,14 +17,12 @@ use syncserver_common::Metrics;
#[cfg(debug_assertions)]
use syncserver_db_common::test::test_transaction_hook;
use syncserver_db_common::{GetPoolState, PoolState};
use tokenserver_db_common::{params, Db, DbError, DbPool, DbResult};
use tokenserver_settings::Settings;
use tokio::task::spawn_blocking;
use super::{
error::{DbError, DbResult},
models::{Db, TokenserverDb},
params,
};
use super::models::TokenserverDb;
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
@ -171,29 +169,8 @@ impl DbPool for TokenserverPool {
}
}
#[async_trait(?Send)]
pub trait DbPool: Sync + Send + GetPoolState {
async fn init(&mut self) -> Result<(), DbError>;
async fn get(&self) -> Result<Box<dyn Db>, DbError>;
fn box_clone(&self) -> Box<dyn DbPool>;
}
impl GetPoolState for TokenserverPool {
fn state(&self) -> PoolState {
self.inner.status().into()
}
}
impl GetPoolState for Box<dyn DbPool> {
fn state(&self) -> PoolState {
(**self).state()
}
}
impl Clone for Box<dyn DbPool> {
fn clone(&self) -> Box<dyn DbPool> {
self.box_clone()
}
}

View File

@ -10,7 +10,7 @@ import logging
DEBUG_BUILD = "target/debug/syncserver"
RELEASE_BUILD = "/app/bin/syncserver"
# max number of attempts to check server heartbeat
SYNC_SERVER_STARTUP_MAX_ATTEMPTS = 30
SYNC_SERVER_STARTUP_MAX_ATTEMPTS = 35
JWK_CACHE_DISABLED = os.environ.get("JWK_CACHE_DISABLED")
logger = logging.getLogger("tokenserver.scripts.conftest")

View File

@ -1,15 +1,15 @@
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.2.0 and should not be changed by hand.
[[package]]
name = "authlib"
version = "1.6.1"
version = "1.6.4"
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "authlib-1.6.1-py2.py3-none-any.whl", hash = "sha256:e9d2031c34c6309373ab845afc24168fe9e93dc52d252631f52642f21f5ed06e"},
{file = "authlib-1.6.1.tar.gz", hash = "sha256:4dffdbb1460ba6ec8c17981a4c67af7d8af131231b5a36a88a1e8c80c111cdfd"},
{file = "authlib-1.6.4-py2.py3-none-any.whl", hash = "sha256:39313d2a2caac3ecf6d8f95fbebdfd30ae6ea6ae6a6db794d976405fdd9aa796"},
{file = "authlib-1.6.4.tar.gz", hash = "sha256:104b0442a43061dc8bc23b133d1d06a2b0a9c2e3e33f34c4338929e816287649"},
]
[package.dependencies]
@ -674,6 +674,9 @@ files = [
{file = "geventhttpclient-2.3.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:182f5158504ac426d591cfb1234de5180813292b49049e761f00bf70691aace5"},
{file = "geventhttpclient-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59a2e7c136a3e6b60b87bf8b87e5f1fb25705d76ab7471018e25f8394c640dda"},
{file = "geventhttpclient-2.3.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5fde955b634a593e70eae9b4560b74badc8b2b1e3dd5b12a047de53f52a3964a"},
{file = "geventhttpclient-2.3.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1c69c4ec9b618ca42008d6930077d72ee0c304e2272a39a046e775c25ca4ac44"},
{file = "geventhttpclient-2.3.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aaa7aebf4fe0d33a3f9f8945061f5374557c9f7baa3c636bfe25ac352167be9c"},
{file = "geventhttpclient-2.3.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:08ea2e92a1a4f46d3eeff631fa3f04f4d12c78523dc9bffc3b05b3dd93233050"},
{file = "geventhttpclient-2.3.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49f5e2051f7d06cb6476500a2ec1b9737aa3160258f0344b07b6d8e8cda3a0cb"},
{file = "geventhttpclient-2.3.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0599fd7ca84a8621f8d34c4e2b89babae633b34c303607c61500ebd3b8a7687a"},
{file = "geventhttpclient-2.3.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b4ac86f8d4ddd112bd63aa9f3c7b73c62d16b33fca414f809e8465bbed2580a3"},
@ -687,6 +690,9 @@ files = [
{file = "geventhttpclient-2.3.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fb8f6a18f1b5e37724111abbd3edf25f8f00e43dc261b11b10686e17688d2405"},
{file = "geventhttpclient-2.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dbb28455bb5d82ca3024f9eb7d65c8ff6707394b584519def497b5eb9e5b1222"},
{file = "geventhttpclient-2.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96578fc4a5707b5535d1c25a89e72583e02aafe64d14f3b4d78f9c512c6d613c"},
{file = "geventhttpclient-2.3.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:19721357db976149ccf54ac279eab8139da8cdf7a11343fd02212891b6f39677"},
{file = "geventhttpclient-2.3.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecf830cdcd1d4d28463c8e0c48f7f5fb06f3c952fff875da279385554d1d4d65"},
{file = "geventhttpclient-2.3.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:47dbf8a163a07f83b38b0f8a35b85e5d193d3af4522ab8a5bbecffff1a4cd462"},
{file = "geventhttpclient-2.3.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e39ad577b33a5be33b47bff7c2dda9b19ced4773d169d6555777cd8445c13c0"},
{file = "geventhttpclient-2.3.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:110d863baf7f0a369b6c22be547c5582e87eea70ddda41894715c870b2e82eb0"},
{file = "geventhttpclient-2.3.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:226d9fca98469bd770e3efd88326854296d1aa68016f285bd1a2fb6cd21e17ee"},
@ -700,6 +706,9 @@ files = [
{file = "geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9ac30c38d86d888b42bb2ab2738ab9881199609e9fa9a153eb0c66fc9188c6cb"},
{file = "geventhttpclient-2.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b802000a4fad80fa57e895009671d6e8af56777e3adf0d8aee0807e96188fd9"},
{file = "geventhttpclient-2.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:461e4d9f4caee481788ec95ac64e0a4a087c1964ddbfae9b6f2dc51715ba706c"},
{file = "geventhttpclient-2.3.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b7e41687c74e8fbe6a665458bbaea0c5a75342a95e2583738364a73bcbf1671b"},
{file = "geventhttpclient-2.3.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ea5da20f4023cf40207ce15f5f4028377ffffdba3adfb60b4c8f34925fce79"},
{file = "geventhttpclient-2.3.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:91f19a8a6899c27867dbdace9500f337d3e891a610708e86078915f1d779bf53"},
{file = "geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41f2dcc0805551ea9d49f9392c3b9296505a89b9387417b148655d0d8251b36e"},
{file = "geventhttpclient-2.3.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:62f3a29bf242ecca6360d497304900683fd8f42cbf1de8d0546c871819251dad"},
{file = "geventhttpclient-2.3.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8714a3f2c093aeda3ffdb14c03571d349cb3ed1b8b461d9f321890659f4a5dbf"},
@ -713,6 +722,9 @@ files = [
{file = "geventhttpclient-2.3.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:be64c5583884c407fc748dedbcb083475d5b138afb23c6bc0836cbad228402cc"},
{file = "geventhttpclient-2.3.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:15b2567137734183efda18e4d6245b18772e648b6a25adea0eba8b3a8b0d17e8"},
{file = "geventhttpclient-2.3.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a4bca1151b8cd207eef6d5cb3c720c562b2aa7293cf113a68874e235cfa19c31"},
{file = "geventhttpclient-2.3.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8a681433e2f3d4b326d8b36b3e05b787b2c6dd2a5660a4a12527622278bf02ed"},
{file = "geventhttpclient-2.3.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:736aa8e9609e4da40aeff0dbc02fea69021a034f4ed1e99bf93fc2ca83027b64"},
{file = "geventhttpclient-2.3.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9d477ae1f5d42e1ee6abbe520a2e9c7f369781c3b8ca111d1f5283c1453bc825"},
{file = "geventhttpclient-2.3.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b50d9daded5d36193d67e2fc30e59752262fcbbdc86e8222c7df6b93af0346a"},
{file = "geventhttpclient-2.3.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe705e7656bc6982a463a4ed7f9b1db8c78c08323f1d45d0d1d77063efa0ce96"},
{file = "geventhttpclient-2.3.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69668589359db4cbb9efa327dda5735d1e74145e6f0a9ffa50236d15cf904053"},
@ -723,9 +735,30 @@ files = [
{file = "geventhttpclient-2.3.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8d1d0db89c1c8f3282eac9a22fda2b4082e1ed62a2107f70e3f1de1872c7919f"},
{file = "geventhttpclient-2.3.4-cp313-cp313-win32.whl", hash = "sha256:4e492b9ab880f98f8a9cc143b96ea72e860946eae8ad5fb2837cede2a8f45154"},
{file = "geventhttpclient-2.3.4-cp313-cp313-win_amd64.whl", hash = "sha256:72575c5b502bf26ececccb905e4e028bb922f542946be701923e726acf305eb6"},
{file = "geventhttpclient-2.3.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:503db5dd0aa94d899c853b37e1853390c48c7035132f39a0bab44cbf95d29101"},
{file = "geventhttpclient-2.3.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:389d3f83316220cfa2010f41401c140215a58ddba548222e7122b2161e25e391"},
{file = "geventhttpclient-2.3.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:20c65d404fa42c95f6682831465467dff317004e53602c01f01fbd5ba1e56628"},
{file = "geventhttpclient-2.3.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2574ee47ff6f379e9ef124e2355b23060b81629f1866013aa975ba35df0ed60b"},
{file = "geventhttpclient-2.3.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fecf1b735591fb21ea124a374c207104a491ad0d772709845a10d5faa07fa833"},
{file = "geventhttpclient-2.3.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:44e9ba810c28f9635e5c4c9cf98fc6470bad5a3620d8045d08693f7489493a3c"},
{file = "geventhttpclient-2.3.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:501d5c69adecd5eaee3c22302006f6c16aa114139640873b72732aa17dab9ee7"},
{file = "geventhttpclient-2.3.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:709f557138fb84ed32703d42da68f786459dab77ff2c23524538f2e26878d154"},
{file = "geventhttpclient-2.3.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b8b86815a30e026c6677b89a5a21ba5fd7b69accf8f0e9b83bac123e4e9f3b31"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:4371b1b1afc072ad2b0ff5a8929d73ffd86d582908d3e9e8d7911dc027b1b3a6"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:6409fcda1f40d66eab48afc218b4c41e45a95c173738d10c50bc69c7de4261b9"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:142870c2efb6bd0a593dcd75b83defb58aeb72ceaec4c23186785790bd44a311"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3a74f7b926badb3b1d47ea987779cb83523a406e89203070b58b20cf95d6f535"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2a8cde016e5ea6eb289c039b6af8dcef6c3ee77f5d753e57b48fe2555cdeacca"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5aa16f2939a508667093b18e47919376f7db9a9acbe858343173c5a58e347869"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ffe87eb7f1956357c2144a56814b5ffc927cbb8932f143a0351c78b93129ebbc"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5ee758e37215da9519cea53105b2a078d8bc0a32603eef2a1f9ab551e3767dee"},
{file = "geventhttpclient-2.3.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:416cc70adb3d34759e782d2e120b4432752399b85ac9758932ecd12274a104c3"},
{file = "geventhttpclient-2.3.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2fa223034774573218bb49e78eca7e92b8c82ccae9d840fdcf424ea95c2d1790"},
{file = "geventhttpclient-2.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9f707dbdaad78dafe6444ee0977cbbaefa16ad10ab290d75709170d124bac4c8"},
{file = "geventhttpclient-2.3.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5660dfd692bc2cbd3bd2d0a2ad2a58ec47f7778042369340bdea765dc10e5672"},
{file = "geventhttpclient-2.3.4-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a85c0cdf16559c9cfa3e2145c16bfe5e1c3115d0cb3b143d41fb68412888171f"},
{file = "geventhttpclient-2.3.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:024b9e2e3203cc5e2c34cb5efd16ba0f2851e39c45abdc2966a8c30a935094fc"},
{file = "geventhttpclient-2.3.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:d693d1f63ae6a794074ec1f475e3e3f607c52242f3799479fc483207b5c02ff0"},
{file = "geventhttpclient-2.3.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c7a0c11afc1fe2c8338e5ccfd7ffdab063b84ace8b9656b5b3bc1614ee8a234"},
{file = "geventhttpclient-2.3.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39746bcd874cb75aaf6d16cdddd287a29721e8b56c20dd8a4d4ecde1d3b92f14"},
{file = "geventhttpclient-2.3.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:73e7d2e3d2d67e25d9d0f2bf46768650a57306a0587bbcdbfe2f4eac504248d2"},
@ -2025,4 +2058,4 @@ testing = ["coverage[toml]", "zope.event", "zope.testing"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<3.12"
content-hash = "fca5361c160cab132ef2a4621b41d87033bd7584cedb4c48401eaf09ef3504f0"
content-hash = "24a0220e6c6710caadb18f6a8a06dad918ef2bf78e2d01938ba48413878e6c58"

View File

@ -14,7 +14,7 @@ requires-python = ">=3.10,<3.12"
package-mode = false
[tool.poetry.dependencies]
authlib = "^1.6.0"
authlib = "^1.6.4"
cryptography = "^45.0.4"
locust = "^2.37.10"
pybrowserid = "^0.14.0"