Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Remove backwards compatibility with RelationPaginationToken. #12138

Merged
merged 2 commits into from
Mar 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/12138.removal
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0.
55 changes: 14 additions & 41 deletions synapse/rest/client/relations.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,50 +27,15 @@
from synapse.http.servlet import RestServlet, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.storage.relations import (
AggregationPaginationToken,
PaginationChunk,
RelationPaginationToken,
)
from synapse.types import JsonDict, RoomStreamToken, StreamToken
from synapse.storage.relations import AggregationPaginationToken, PaginationChunk
from synapse.types import JsonDict, StreamToken

if TYPE_CHECKING:
from synapse.server import HomeServer
from synapse.storage.databases.main import DataStore

logger = logging.getLogger(__name__)


async def _parse_token(
store: "DataStore", token: Optional[str]
) -> Optional[StreamToken]:
"""
For backwards compatibility support RelationPaginationToken, but new pagination
tokens are generated as full StreamTokens, to be compatible with /sync and /messages.
"""
if not token:
return None
# Luckily the format for StreamToken and RelationPaginationToken differ enough
# that they can easily be separated. An "_" appears in the serialization of
# RoomStreamToken (as part of StreamToken), but RelationPaginationToken uses
# "-" only for separators.
if "_" in token:
return await StreamToken.from_string(store, token)
else:
relation_token = RelationPaginationToken.from_string(token)
return StreamToken(
room_key=RoomStreamToken(relation_token.topological, relation_token.stream),
presence_key=0,
typing_key=0,
receipt_key=0,
account_data_key=0,
push_rules_key=0,
to_device_key=0,
device_list_key=0,
groups_key=0,
)


class RelationPaginationServlet(RestServlet):
"""API to paginate relations on an event by topological ordering, optionally
filtered by relation type and event type.
Expand Down Expand Up @@ -122,8 +87,12 @@ async def on_GET(
pagination_chunk = PaginationChunk(chunk=[])
else:
# Return the relations
from_token = await _parse_token(self.store, from_token_str)
to_token = await _parse_token(self.store, to_token_str)
from_token = None
if from_token_str:
from_token = await StreamToken.from_string(self.store, from_token_str)
to_token = None
if to_token_str:
to_token = await StreamToken.from_string(self.store, to_token_str)

pagination_chunk = await self.store.get_relations_for_event(
event_id=parent_id,
Expand Down Expand Up @@ -317,8 +286,12 @@ async def on_GET(
from_token_str = parse_string(request, "from")
to_token_str = parse_string(request, "to")

from_token = await _parse_token(self.store, from_token_str)
to_token = await _parse_token(self.store, to_token_str)
from_token = None
if from_token_str:
from_token = await StreamToken.from_string(self.store, from_token_str)
to_token = None
if to_token_str:
to_token = await StreamToken.from_string(self.store, to_token_str)

result = await self.store.get_relations_for_event(
event_id=parent_id,
Expand Down
31 changes: 0 additions & 31 deletions synapse/storage/relations.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,37 +54,6 @@ async def to_dict(self, store: "DataStore") -> Dict[str, Any]:
return d


@attr.s(frozen=True, slots=True, auto_attribs=True)
class RelationPaginationToken:
"""Pagination token for relation pagination API.

As the results are in topological order, we can use the
`topological_ordering` and `stream_ordering` fields of the events at the
boundaries of the chunk as pagination tokens.

Attributes:
topological: The topological ordering of the boundary event
stream: The stream ordering of the boundary event.
"""

topological: int
stream: int

@staticmethod
def from_string(string: str) -> "RelationPaginationToken":
try:
t, s = string.split("-")
return RelationPaginationToken(int(t), int(s))
except ValueError:
raise SynapseError(400, "Invalid relation pagination token")

async def to_string(self, store: "DataStore") -> str:
return "%d-%d" % (self.topological, self.stream)

def as_tuple(self) -> Tuple[Any, ...]:
return attr.astuple(self)


@attr.s(frozen=True, slots=True, auto_attribs=True)
class AggregationPaginationToken:
"""Pagination token for relation aggregation pagination API.
Expand Down
73 changes: 1 addition & 72 deletions tests/rest/client/test_relations.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
from synapse.rest import admin
from synapse.rest.client import login, register, relations, room, sync
from synapse.server import HomeServer
from synapse.storage.relations import RelationPaginationToken
from synapse.types import JsonDict, StreamToken
from synapse.types import JsonDict
from synapse.util import Clock

from tests import unittest
Expand Down Expand Up @@ -281,15 +280,6 @@ def test_basic_paginate_relations(self) -> None:
channel.json_body["chunk"][0],
)

def _stream_token_to_relation_token(self, token: str) -> str:
"""Convert a StreamToken into a legacy token (RelationPaginationToken)."""
room_key = self.get_success(StreamToken.from_string(self.store, token)).room_key
return self.get_success(
RelationPaginationToken(
topological=room_key.topological, stream=room_key.stream
).to_string(self.store)
)

def test_repeated_paginate_relations(self) -> None:
"""Test that if we paginate using a limit and tokens then we get the
expected events.
Expand Down Expand Up @@ -330,34 +320,6 @@ def test_repeated_paginate_relations(self) -> None:
found_event_ids.reverse()
self.assertEqual(found_event_ids, expected_event_ids)

# Reset and try again, but convert the tokens to the legacy format.
prev_token = ""
found_event_ids = []
for _ in range(20):
from_token = ""
if prev_token:
from_token = "&from=" + self._stream_token_to_relation_token(prev_token)

channel = self.make_request(
"GET",
f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)

found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
next_batch = channel.json_body.get("next_batch")

self.assertNotEqual(prev_token, next_batch)
prev_token = next_batch

if not prev_token:
break

# We paginated backwards, so reverse
found_event_ids.reverse()
self.assertEqual(found_event_ids, expected_event_ids)

def test_pagination_from_sync_and_messages(self) -> None:
"""Pagination tokens from /sync and /messages can be used to paginate /relations."""
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
Expand Down Expand Up @@ -543,39 +505,6 @@ def test_aggregation_pagination_within_group(self) -> None:
found_event_ids.reverse()
self.assertEqual(found_event_ids, expected_event_ids)

# Reset and try again, but convert the tokens to the legacy format.
prev_token = ""
found_event_ids = []
for _ in range(20):
from_token = ""
if prev_token:
from_token = "&from=" + self._stream_token_to_relation_token(prev_token)

channel = self.make_request(
"GET",
f"/_matrix/client/unstable/rooms/{self.room}"
f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}"
f"/m.reaction/{encoded_key}?limit=1{from_token}",
access_token=self.user_token,
)
self.assertEqual(200, channel.code, channel.json_body)

self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)

found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])

next_batch = channel.json_body.get("next_batch")

self.assertNotEqual(prev_token, next_batch)
prev_token = next_batch

if not prev_token:
break

# We paginated backwards, so reverse
found_event_ids.reverse()
self.assertEqual(found_event_ids, expected_event_ids)

def test_aggregation(self) -> None:
"""Test that annotations get correctly aggregated."""

Expand Down