test: add tokenserver util tests (#2195)
Some checks failed
Glean probe-scraper / glean-probe-scraper (push) Has been cancelled
Main Workflow - Lint, Build, Test / python-env (push) Has been cancelled
Main Workflow - Lint, Build, Test / rust-env (push) Has been cancelled
Main Workflow - Lint, Build, Test / python-checks (push) Has been cancelled
Main Workflow - Lint, Build, Test / rust-checks (push) Has been cancelled
Main Workflow - Lint, Build, Test / clippy (mysql) (push) Has been cancelled
Main Workflow - Lint, Build, Test / clippy (postgres) (push) Has been cancelled
Main Workflow - Lint, Build, Test / clippy (spanner) (push) Has been cancelled
Main Workflow - Lint, Build, Test / build-and-unit-test-postgres (push) Has been cancelled
Main Workflow - Lint, Build, Test / build-postgres-image (push) Has been cancelled
Main Workflow - Lint, Build, Test / postgres-e2e-tests (push) Has been cancelled
Main Workflow - Lint, Build, Test / build-and-unit-test-mysql (push) Has been cancelled
Main Workflow - Lint, Build, Test / build-mysql-image (push) Has been cancelled
Main Workflow - Lint, Build, Test / mysql-e2e-tests (push) Has been cancelled
Main Workflow - Lint, Build, Test / build-and-unit-test-spanner (push) Has been cancelled
Main Workflow - Lint, Build, Test / build-spanner-image (push) Has been cancelled
Main Workflow - Lint, Build, Test / spanner-e2e-tests (push) Has been cancelled
Build, Tag and Push Container Images to GAR / check (push) Has been cancelled
Build, Tag and Push Container Images to GAR / build-and-push-syncstorage-rs (push) Has been cancelled
Build, Tag and Push Container Images to GAR / build-and-push-syncserver-postgres (push) Has been cancelled
Build, Tag and Push Container Images to GAR / build-and-push-syncstorage-rs-spanner-python-utils (push) Has been cancelled
Build, Tag and Push Container Images to GAR / build-and-push-syncserver-postgres-python-utils (push) Has been cancelled
Build, Tag and Push Container Images to GAR / build-and-push-syncserver-mysql (push) Has been cancelled
Publish Sync docs to pages / build-mdbook (push) Has been cancelled
Publish Sync docs to pages / build-openapi (push) Has been cancelled
Publish Sync docs to pages / combine-and-prepare (push) Has been cancelled
Publish Sync docs to pages / deploy (push) Has been cancelled

test: add tokenserver util tests
This commit is contained in:
Taddes 2026-04-21 22:46:36 +03:00 committed by GitHub
parent e32579f251
commit b295c151ac
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 62 additions and 18 deletions

View File

@ -1030,7 +1030,6 @@ jobs:
tags: app:build
build-args: |
SYNCSTORAGE_DATABASE_BACKEND=spanner
MYSQLCLIENT_PKG=libmysqlclient-dev
outputs: type=docker,dest=/tmp/spanner-image.tar
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@ -119,6 +119,7 @@ RUN apt-get -q update && \
apt-get install -y --no-install-recommends gnupg ca-certificates wget && \
echo "deb https://repo.mysql.com/apt/debian/ bookworm mysql-8.0" >> /etc/apt/sources.list && \
# Fetch and install the MySQL public key
# Key ID A8D3785C from https://dev.mysql.com/doc/refman/8.0/en/checking-gpg-signature.html
gpg --batch --keyserver hkp://keyserver.ubuntu.com --recv-keys A8D3785C && \
gpg --batch --armor --export A8D3785C | tee /etc/apt/trusted.gpg.d/mysql.asc && \
apt-get -q update ; \
@ -128,7 +129,7 @@ RUN apt-get -q update && \
# The python3-cryptography debian package installs version 2.6.1, but we
# we want to use the version specified in requirements.txt. To do this,
# we have to remove the python3-cryptography package here.
apt-get -q remove -y python3-cryptography 2>/dev/null || true && \
(apt-get -q remove -y python3-cryptography 2>/dev/null || true) && \
apt-get -q autoremove -y && \
rm -rf /var/lib/apt/lists/* && \
python3 --version

View File

@ -46,6 +46,7 @@ POSTGRES_INT_JUNIT_XML := $(TEST_RESULTS_DIR)/$(TEST_FILE_PREFIX)postgres_integr
POSTGRES_NO_JWK_INT_JUNIT_XML := $(TEST_RESULTS_DIR)/$(TEST_FILE_PREFIX)postgres_no_oauth_integration__results.xml
MYSQL_INT_JUNIT_XML := $(TEST_RESULTS_DIR)/$(TEST_FILE_PREFIX)mysql_integration__results.xml
MYSQL_NO_JWK_INT_JUNIT_XML := $(TEST_RESULTS_DIR)/$(TEST_FILE_PREFIX)mysql_no_oauth_integration__results.xml
TOKENSERVER_UTILS_JUNIT_XML := $(TEST_RESULTS_DIR)/$(TEST_FILE_PREFIX)tokenserver_utils__results.xml
LOCAL_INTEGRATION_JUNIT_XML := $(TEST_RESULTS_DIR)/$(TEST_FILE_PREFIX)local_integration__results.xml
SYNC_SYNCSTORAGE__DATABASE_URL ?= mysql://sample_user:sample_password@localhost/syncstorage_rs
@ -113,6 +114,7 @@ docker_run_mysql_e2e_tests:
--exit-code-from e2e-tests \
--abort-on-container-exit || exit_code=$$?
docker cp mysql-e2e-tests:/mysql_integration_results.xml ${MYSQL_INT_JUNIT_XML}
docker cp mysql-e2e-tests:/tokenserver_utils_results.xml ${TOKENSERVER_UTILS_JUNIT_XML}
docker compose \
-f docker/docker-compose.mysql.yaml \
-f docker/docker-compose.e2e.mysql.yaml \
@ -146,6 +148,7 @@ docker_run_postgres_e2e_tests:
--exit-code-from e2e-tests \
--abort-on-container-exit || exit_code=$$?
docker cp postgres-e2e-tests:/postgres_integration_results.xml ${POSTGRES_INT_JUNIT_XML}
docker cp postgres-e2e-tests:/tokenserver_utils_results.xml ${TOKENSERVER_UTILS_JUNIT_XML}
docker compose \
-f docker/docker-compose.postgres.yaml \
-f docker/docker-compose.e2e.postgres.yaml \
@ -179,6 +182,7 @@ docker_run_spanner_e2e_tests:
--exit-code-from e2e-tests \
--abort-on-container-exit || exit_code=$$?
docker cp spanner-e2e-tests:/spanner_integration_results.xml ${SPANNER_INT_JUNIT_XML}
docker cp spanner-e2e-tests:/tokenserver_utils_results.xml ${TOKENSERVER_UTILS_JUNIT_XML}
docker compose \
-f docker/docker-compose.spanner.yaml \
-f docker/docker-compose.e2e.spanner.yaml \

View File

@ -15,5 +15,5 @@ services:
- -c
- >-
PYTHONPATH=/app
pytest /app/tools/integration_tests/
pytest /app/tools/integration_tests/ /app/tools/tokenserver/
--junit-xml=/${RESULTS_FILENAME}

View File

@ -32,5 +32,5 @@ services:
- -c
- >-
PYTHONPATH=/app
pytest /app/tools/integration_tests/
pytest /app/tools/integration_tests/ /app/tools/tokenserver/
--junit-xml=/${RESULTS_FILENAME}

View File

@ -30,7 +30,7 @@ services:
entrypoint:
- /bin/sh
- -c
- >-
PYTHONPATH=/app
pytest /app/tools/integration_tests/
- >-
PYTHONPATH=/app
pytest /app/tools/integration_tests/ /app/tools/tokenserver/
--junit-xml=/${RESULTS_FILENAME}

View File

@ -31,5 +31,5 @@ services:
- -c
- >-
PYTHONPATH=/app
pytest /app/tools/integration_tests/
pytest /app/tools/integration_tests/ /app/tools/tokenserver/
--junit-xml=/${RESULTS_FILENAME}

View File

@ -180,7 +180,9 @@ where
""")
_GET_BEST_NODE = sqltext("""\
# MySQL: log(0) returns NULL, and NULLs sort first with ASC — zero-load
# nodes naturally win. Original query unchanged.
_GET_BEST_NODE_MYSQL = sqltext("""\
select
id, node
from
@ -196,6 +198,26 @@ order by
limit 1
""")
# PostgreSQL: ln() is the natural log equivalent of MySQL's log(). NULLIF
# converts zero to NULL to avoid InvalidArgumentForLogarithm. NULLS FIRST
# replicates MySQL's default NULL-first ASC sort order, ensuring zero-load
# nodes are always preferred.
_GET_BEST_NODE_POSTGRES = sqltext("""\
select
id, node
from
nodes
where
service = :service
and available > 0
and capacity > current_load
and downed = 0
and backoff = 0
order by
ln(NULLIF(current_load, 0)) / ln(capacity) NULLS FIRST
limit 1
""")
_RELEASE_NODE_CAPACITY = sqltext("""\
update
@ -616,11 +638,14 @@ class Database:
pattern=pattern,
**kwds,
)
res.close()
if self.db_mode == "postgresql":
return res.fetchone()[0]
row = res.fetchone()[0]
res.close()
return row
else:
return res.lastrowid
lastrowid = res.lastrowid
res.close()
return lastrowid
def add_node(self, node, capacity, **kwds):
"""Add definition for a new node."""
@ -653,8 +678,10 @@ class Database:
capacity=capacity,
available=available,
current_load=kwds.get("current_load", 0),
downed=kwds.get("downed", 0),
backoff=kwds.get("backoff", 0),
# Cast to int: optparse action="store_true" produces Python bools,
# which postgres rejects for INTEGER columns (MySQL coerces silently).
downed=int(kwds.get("downed", 0)),
backoff=int(kwds.get("backoff", 0)),
)
res.close()
@ -676,6 +703,11 @@ class Database:
query += """
where service = :service and node = :node
"""
# Cast boolean fields to int: Python bools are rejected by postgres
# INTEGER columns. MySQL coerces silently; postgres does not.
for field in ("downed", "backoff"):
if field in values:
values[field] = int(values[field])
values["service"] = self._get_service_id(SERVICE_NAME)
values["node"] = node
if kwds:
@ -747,8 +779,16 @@ class Database:
# capacity. This loop allows a maximum of five retries before
# bailing out.
for _ in range(5):
# Select the appropriate query variant — postgres requires
# explicit NULL handling for log(0) and NULL sort order that
# MySQL handles implicitly.
best_node_query = (
_GET_BEST_NODE_POSTGRES
if self.db_mode == "postgresql"
else _GET_BEST_NODE_MYSQL
)
res = self._execute_sql(
_GET_BEST_NODE, service=self._get_service_id(SERVICE_NAME)
best_node_query, service=self._get_service_id(SERVICE_NAME)
)
row = res.fetchone()
res.close()

View File

@ -45,7 +45,7 @@ class ProcessAccountEventsTestCase(unittest.TestCase):
testing.tearDown()
cursor = self.database._execute_sql("DELETE FROM users")
cursor.close
cursor.close()
cursor = self.database._execute_sql("DELETE FROM nodes")
cursor.close()

View File

@ -83,9 +83,9 @@ def main(args=None):
if opts.current_load is not None:
kwds["current_load"] = opts.current_load
if opts.backoff is not None:
kwds["backoff"] = opts.backoff
kwds["backoff"] = int(opts.backoff)
if opts.downed is not None:
kwds["downed"] = opts.downed
kwds["downed"] = int(opts.downed)
update_node(node_name, **kwds)
return 0