aboutsummaryrefslogtreecommitdiff
path: root/syncapi/storage/storage_test.go
diff options
context:
space:
mode:
authorTill <2353100+S7evinK@users.noreply.github.com>2023-02-07 14:31:23 +0100
committerGitHub <noreply@github.com>2023-02-07 14:31:23 +0100
commiteb29a315507f0075c2c6a495ac59c64a7f45f9fc (patch)
tree3ed37d947dbacdf95c77dc27e4193c671e4968dc /syncapi/storage/storage_test.go
parentcf254ba0445e2509f77f41dbec69f632b126b847 (diff)
Optimize `/sync` and history visibility (#2961)
Should fix the following issues or make a lot less worse when using Postgres: The main issue behind #2911: The client gives up after a certain time, causing a cascade of context errors, because the response couldn't be built up fast enough. This mostly happens on accounts with many rooms, due to the inefficient way we're getting recent events and current state For #2777: The queries for getting the membership events for history visibility were being executed for each room (I think 185?), resulting in a whooping 2k queries for membership events. (Getting the statesnapshot -> block nids -> actual wanted membership event) Both should now be better by: - Using a LATERAL join to get all recent events for all joined rooms in one go (TODO: maybe do the same for room summary and current state etc) - If we're lazy loading on initial syncs, we're now not getting the whole current state, just to drop the majority of it because we're lazy loading members - we add a filter to exclude membership events on the first call to `CurrentState`. - Using an optimized query to get the membership events needed to calculate history visibility --------- Co-authored-by: kegsay <kegan@matrix.org>
Diffstat (limited to 'syncapi/storage/storage_test.go')
-rw-r--r--syncapi/storage/storage_test.go73
1 files changed, 67 insertions, 6 deletions
diff --git a/syncapi/storage/storage_test.go b/syncapi/storage/storage_test.go
index e65367d8..05d498bc 100644
--- a/syncapi/storage/storage_test.go
+++ b/syncapi/storage/storage_test.go
@@ -156,12 +156,12 @@ func TestRecentEventsPDU(t *testing.T) {
tc := testCases[i]
t.Run(tc.Name, func(st *testing.T) {
var filter gomatrixserverlib.RoomEventFilter
- var gotEvents []types.StreamEvent
+ var gotEvents map[string]types.RecentEvents
var limited bool
filter.Limit = tc.Limit
WithSnapshot(t, db, func(snapshot storage.DatabaseTransaction) {
var err error
- gotEvents, limited, err = snapshot.RecentEvents(ctx, r.ID, types.Range{
+ gotEvents, err = snapshot.RecentEvents(ctx, []string{r.ID}, types.Range{
From: tc.From,
To: tc.To,
}, &filter, !tc.ReverseOrder, true)
@@ -169,15 +169,18 @@ func TestRecentEventsPDU(t *testing.T) {
st.Fatalf("failed to do sync: %s", err)
}
})
+ streamEvents := gotEvents[r.ID]
+ limited = streamEvents.Limited
if limited != tc.WantLimited {
st.Errorf("got limited=%v want %v", limited, tc.WantLimited)
}
- if len(gotEvents) != len(tc.WantEvents) {
+ if len(streamEvents.Events) != len(tc.WantEvents) {
st.Errorf("got %d events, want %d", len(gotEvents), len(tc.WantEvents))
}
- for j := range gotEvents {
- if !reflect.DeepEqual(gotEvents[j].JSON(), tc.WantEvents[j].JSON()) {
- st.Errorf("event %d got %s want %s", j, string(gotEvents[j].JSON()), string(tc.WantEvents[j].JSON()))
+
+ for j := range streamEvents.Events {
+ if !reflect.DeepEqual(streamEvents.Events[j].JSON(), tc.WantEvents[j].JSON()) {
+ st.Errorf("event %d got %s want %s", j, string(streamEvents.Events[j].JSON()), string(tc.WantEvents[j].JSON()))
}
}
})
@@ -923,3 +926,61 @@ func TestRoomSummary(t *testing.T) {
}
})
}
+
+func TestRecentEvents(t *testing.T) {
+ alice := test.NewUser(t)
+ room1 := test.NewRoom(t, alice)
+ room2 := test.NewRoom(t, alice)
+ roomIDs := []string{room1.ID, room2.ID}
+ rooms := map[string]*test.Room{
+ room1.ID: room1,
+ room2.ID: room2,
+ }
+
+ test.WithAllDatabases(t, func(t *testing.T, dbType test.DBType) {
+ filter := gomatrixserverlib.DefaultRoomEventFilter()
+ db, close, closeBase := MustCreateDatabase(t, dbType)
+ t.Cleanup(func() {
+ close()
+ closeBase()
+ })
+
+ MustWriteEvents(t, db, room1.Events())
+ MustWriteEvents(t, db, room2.Events())
+
+ transaction, err := db.NewDatabaseTransaction(ctx)
+ assert.NoError(t, err)
+ defer transaction.Rollback()
+
+ // get all recent events from 0 to 100 (we only created 5 events, so we should get 5 back)
+ roomEvs, err := transaction.RecentEvents(ctx, roomIDs, types.Range{From: 0, To: 100}, &filter, true, true)
+ assert.NoError(t, err)
+ assert.Equal(t, len(roomEvs), 2, "unexpected recent events response")
+ for _, recentEvents := range roomEvs {
+ assert.Equal(t, 5, len(recentEvents.Events), "unexpected recent events for room")
+ }
+
+ // update the filter to only return one event
+ filter.Limit = 1
+ roomEvs, err = transaction.RecentEvents(ctx, roomIDs, types.Range{From: 0, To: 100}, &filter, true, true)
+ assert.NoError(t, err)
+ assert.Equal(t, len(roomEvs), 2, "unexpected recent events response")
+ for roomID, recentEvents := range roomEvs {
+ origEvents := rooms[roomID].Events()
+ assert.Equal(t, true, recentEvents.Limited, "expected events to be limited")
+ assert.Equal(t, 1, len(recentEvents.Events), "unexpected recent events for room")
+ assert.Equal(t, origEvents[len(origEvents)-1].EventID(), recentEvents.Events[0].EventID())
+ }
+
+ // not chronologically ordered still returns the events in order (given ORDER BY id DESC)
+ roomEvs, err = transaction.RecentEvents(ctx, roomIDs, types.Range{From: 0, To: 100}, &filter, false, true)
+ assert.NoError(t, err)
+ assert.Equal(t, len(roomEvs), 2, "unexpected recent events response")
+ for roomID, recentEvents := range roomEvs {
+ origEvents := rooms[roomID].Events()
+ assert.Equal(t, true, recentEvents.Limited, "expected events to be limited")
+ assert.Equal(t, 1, len(recentEvents.Events), "unexpected recent events for room")
+ assert.Equal(t, origEvents[len(origEvents)-1].EventID(), recentEvents.Events[0].EventID())
+ }
+ })
+}