From 15ba4518e14ebda2d6189ed04f4f28d2749a5fd7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 28 Jan 2021 01:45:40 -0600 Subject: [PATCH 01/81] Add tests for MSC2716 and backfilling history Work on MSC2716: https://github.com/matrix-org/matrix-doc/pull/2716 --- README.md | 12 +++++++ internal/b/blueprints.go | 9 ++--- tests/msc2716_test.go | 78 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+), 4 deletions(-) create mode 100644 tests/msc2716_test.go diff --git a/README.md b/README.md index 70f30e67..82e10694 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,19 @@ +[![Complement Dev](https://img.shields.io/matrix/complement:matrix.org.svg?label=%23complement%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#complement:matrix.org) + ### Complement Complement is a black box integration testing framework for Matrix homeservers. + +## adsf + +To get started developing, see https://github.com/matrix-org/complement/blob/master/ONBOARDING.md + +If you're looking to run Complement against a local dev instance of Synapse, see [`matrix-org/synapse` -> `scripts-dev/complement.sh`](https://github.com/matrix-org/synapse/blob/develop/scripts-dev/complement.sh) + +If you want to develop Complement tests while working on a local dev instance of Synapse, edit `scripts-dev/complement.sh` to point to your local Complement checkout. + + #### Running You need to have Go and Docker installed. Then: diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index ef8424b4..a2ce915b 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -69,10 +69,11 @@ type Room struct { } type Event struct { - Type string - Sender string - StateKey *string - Content map[string]interface{} + Type string + Sender string + StateKey *string + PrevEvents []string + Content map[string]interface{} // This field is ignored in blueprints as clients are unable to set it. Used with federation.Server Unsigned map[string]interface{} } diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go new file mode 100644 index 00000000..7705ab19 --- /dev/null +++ b/tests/msc2716_test.go @@ -0,0 +1,78 @@ +// +build msc2716 + +// This file contains tests for incrementally importing history to an existing room, +// a currently experimental feature defined by MSC2716, which you can read here: +// https://github.com/matrix-org/matrix-doc/pull/2716 + +package tests + +import ( + "testing" + + "github.com/matrix-org/complement/internal/b" + "github.com/matrix-org/complement/internal/must" + "github.com/tidwall/gjson" +) + +// Test that the m.room.create and m.room.member events for a room we created comes down /sync +func TestBackfillingHistory(t *testing.T) { + deployment := Deploy(t, "rooms_state", b.BlueprintAlice) + defer deployment.Destroy(t) + + userID := "@alice:hs1" + alice := deployment.Client(t, "hs1", userID) + roomID := alice.CreateRoom(t, struct{}{}) + + // eventA + eventA := alice.SendEventSynced(t, roomID, b.Event{ + Type: "m.room.message", + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message A", + }, + }) + // eventB + alice.SendEventSynced(t, roomID, b.Event{ + Type: "m.room.message", + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message B", + }, + }) + // eventC + alice.SendEventSynced(t, roomID, b.Event{ + Type: "m.room.message", + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message C", + }, + }) + + // event1 + alice.SendEventSynced(t, roomID, b.Event{ + Type: "m.room.message", + PrevEvents: []string{ + eventA, + }, + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message 1", + }, + }) + + t.Run("parallel", func(t *testing.T) { + // sytest: Room creation reports m.room.create to myself + t.Run("Room creation reports m.room.create to myself", func(t *testing.T) { + t.Parallel() + alice := deployment.Client(t, "hs1", userID) + alice.SyncUntilTimelineHas(t, roomID, func(ev gjson.Result) bool { + if ev.Get("type").Str != "m.room.create" { + return false + } + must.EqualStr(t, ev.Get("sender").Str, userID, "wrong sender") + must.EqualStr(t, ev.Get("content").Get("creator").Str, userID, "wrong content.creator") + return true + }) + }) + }) +} From 55c5a8c74bef8377ee9ff1e488813846d9670e25 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 28 Jan 2021 19:55:22 -0600 Subject: [PATCH 02/81] Allow SendEventSynced to pass over prev_event querystring parameters --- internal/client/client.go | 13 ++++++++++++- internal/docker/deployment.go | 7 ++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/internal/client/client.go b/internal/client/client.go index 28ef89d5..dcbf8ae3 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -93,7 +93,18 @@ func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e b.Event) string { if e.StateKey != nil { paths = []string{"_matrix", "client", "r0", "rooms", roomID, "state", e.Type, *e.StateKey} } - res := c.MustDo(t, "PUT", paths, e.Content) + + query := make(url.Values, len(e.PrevEvents)) + for _, prevEvent := range e.PrevEvents { + query.Add("prev_event", prevEvent) + } + + b, err := json.Marshal(e.Content) + if err != nil { + t.Fatalf("CSAPI.Do failed to marshal JSON body: %s", err) + } + + res := c.MustDoRaw(t, "PUT", paths, b, "application/json", query) body := ParseJSON(t, res) eventID := GetJSONFieldStr(t, body, "event_id") t.Logf("SendEventSynced waiting for event ID %s", eventID) diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index 672a4d59..dd568db2 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -30,7 +30,12 @@ type HomeserverDeployment struct { // will print container logs before killing the container. func (d *Deployment) Destroy(t *testing.T) { t.Helper() - d.Deployer.Destroy(d, t.Failed()) + d.Deployer.Destroy( + d, + // TODO: Revert this back to `t.Failed()`. + // I did this so I can always see the homersever logs regardless of outcome + true, + ) } // Client returns a CSAPI client targeting the given hsName, using the access token for the given userID. From 70036ac0189a385599309ac58c946d8dd631a8f5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 29 Jan 2021 01:17:59 -0600 Subject: [PATCH 03/81] Switch to fire and forget sending over historical events which we don't expect to sync --- internal/client/client.go | 13 ++++++++++--- tests/msc2716_test.go | 40 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/internal/client/client.go b/internal/client/client.go index dcbf8ae3..e5edee37 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -84,9 +84,7 @@ func (c *CSAPI) JoinRoom(t *testing.T, roomIDOrAlias string, serverNames []strin return GetJSONFieldStr(t, body, "room_id") } -// SendEventSynced sends `e` into the room and waits for its event ID to come down /sync. -// Returns the event ID of the sent event. -func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e b.Event) string { +func (c *CSAPI) SendEvent(t *testing.T, roomID string, e b.Event) string { t.Helper() c.txnID++ paths := []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, strconv.Itoa(c.txnID)} @@ -107,6 +105,15 @@ func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e b.Event) string { res := c.MustDoRaw(t, "PUT", paths, b, "application/json", query) body := ParseJSON(t, res) eventID := GetJSONFieldStr(t, body, "event_id") + + return eventID +} + +// SendEventSynced sends `e` into the room and waits for its event ID to come down /sync. +// Returns the event ID of the sent event. +func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e b.Event) string { + t.Helper() + eventID := c.SendEvent(t, roomID, e) t.Logf("SendEventSynced waiting for event ID %s", eventID) c.SyncUntilTimelineHas(t, roomID, func(r gjson.Result) bool { return r.Get("event_id").Str == eventID diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 7705ab19..901c8c51 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -7,10 +7,12 @@ package tests import ( + "net/url" "testing" "github.com/matrix-org/complement/internal/b" "github.com/matrix-org/complement/internal/must" + "github.com/sirupsen/logrus" "github.com/tidwall/gjson" ) @@ -49,7 +51,7 @@ func TestBackfillingHistory(t *testing.T) { }) // event1 - alice.SendEventSynced(t, roomID, b.Event{ + event1 := alice.SendEvent(t, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ eventA, @@ -60,11 +62,47 @@ func TestBackfillingHistory(t *testing.T) { }, }) + // event2 + event2 := alice.SendEvent(t, roomID, b.Event{ + Type: "m.room.message", + PrevEvents: []string{ + event1, + }, + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message 2", + }, + }) + + // event3 + alice.SendEvent(t, roomID, b.Event{ + Type: "m.room.message", + PrevEvents: []string{ + event2, + }, + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message 3", + }, + }) + + res := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + }) + + t.Logf("aweawfeefwaweafeafw") + logrus.WithFields(logrus.Fields{ + "res": res, + }).Error("messages res") + t.Run("parallel", func(t *testing.T) { // sytest: Room creation reports m.room.create to myself t.Run("Room creation reports m.room.create to myself", func(t *testing.T) { t.Parallel() + alice := deployment.Client(t, "hs1", userID) + alice.SyncUntilTimelineHas(t, roomID, func(ev gjson.Result) bool { if ev.Get("type").Str != "m.room.create" { return false From dabdf5aca91e2bd1ecfacb0b1b8ecbce3011fa72 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 29 Jan 2021 22:16:29 -0600 Subject: [PATCH 04/81] Override origin_server_ts --- internal/b/blueprints.go | 11 ++++++----- internal/client/client.go | 4 ++++ tests/msc2716_test.go | 17 ++++++++++++++++- 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index a2ce915b..075963eb 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -69,11 +69,12 @@ type Room struct { } type Event struct { - Type string - Sender string - StateKey *string - PrevEvents []string - Content map[string]interface{} + Type string + Sender string + OriginServerTS uint64 + StateKey *string + PrevEvents []string + Content map[string]interface{} // This field is ignored in blueprints as clients are unable to set it. Used with federation.Server Unsigned map[string]interface{} } diff --git a/internal/client/client.go b/internal/client/client.go index e5edee37..fe220d47 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -97,6 +97,10 @@ func (c *CSAPI) SendEvent(t *testing.T, roomID string, e b.Event) string { query.Add("prev_event", prevEvent) } + if e.OriginServerTS != 0 { + query.Add("origin_server_ts", strconv.FormatUint(e.OriginServerTS, 10)) + } + b, err := json.Marshal(e.Content) if err != nil { t.Fatalf("CSAPI.Do failed to marshal JSON body: %s", err) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 901c8c51..e876b93c 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -9,8 +9,10 @@ package tests import ( "net/url" "testing" + "time" "github.com/matrix-org/complement/internal/b" + "github.com/matrix-org/complement/internal/client" "github.com/matrix-org/complement/internal/must" "github.com/sirupsen/logrus" "github.com/tidwall/gjson" @@ -33,6 +35,13 @@ func TestBackfillingHistory(t *testing.T) { "body": "Message A", }, }) + + insertTime := time.Now() + insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) + + // wait 3ms to ensure that the timestamp changes enough intervals for each message we try to insert later + time.Sleep(3 * time.Millisecond) + // eventB alice.SendEventSynced(t, roomID, b.Event{ Type: "m.room.message", @@ -56,6 +65,7 @@ func TestBackfillingHistory(t *testing.T) { PrevEvents: []string{ eventA, }, + OriginServerTS: insertOriginServerTs, Content: map[string]interface{}{ "msgtype": "m.text", "body": "Message 1", @@ -68,6 +78,7 @@ func TestBackfillingHistory(t *testing.T) { PrevEvents: []string{ event1, }, + OriginServerTS: insertOriginServerTs + 1, Content: map[string]interface{}{ "msgtype": "m.text", "body": "Message 2", @@ -80,6 +91,7 @@ func TestBackfillingHistory(t *testing.T) { PrevEvents: []string{ event2, }, + OriginServerTS: insertOriginServerTs + 2, Content: map[string]interface{}{ "msgtype": "m.text", "body": "Message 3", @@ -92,8 +104,11 @@ func TestBackfillingHistory(t *testing.T) { }) t.Logf("aweawfeefwaweafeafw") + body := client.ParseJSON(t, res) logrus.WithFields(logrus.Fields{ - "res": res, + "insertOriginServerTs": insertOriginServerTs, + "res": res, + "body": string(body), }).Error("messages res") t.Run("parallel", func(t *testing.T) { From 6b77b292c945386a0a348adefa5018e665667323 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 2 Feb 2021 03:55:36 -0600 Subject: [PATCH 05/81] Grab context response to visualize DAG --- internal/client/client.go | 2 +- tests/msc2716_test.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/internal/client/client.go b/internal/client/client.go index fe220d47..a96e08af 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -98,7 +98,7 @@ func (c *CSAPI) SendEvent(t *testing.T, roomID string, e b.Event) string { } if e.OriginServerTS != 0 { - query.Add("origin_server_ts", strconv.FormatUint(e.OriginServerTS, 10)) + query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) } b, err := json.Marshal(e.Content) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index e876b93c..bbbdd589 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -98,6 +98,15 @@ func TestBackfillingHistory(t *testing.T) { }, }) + // eventStar + eventStar := alice.SendEventSynced(t, roomID, b.Event{ + Type: "m.room.message", + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message *", + }, + }) + res := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -111,6 +120,14 @@ func TestBackfillingHistory(t *testing.T) { "body": string(body), }).Error("messages res") + contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventStar}, nil, "application/json", url.Values{ + "limit": []string{"100"}, + }) + contextResBody := client.ParseJSON(t, contextRes) + logrus.WithFields(logrus.Fields{ + "contextResBody": string(contextResBody), + }).Error("context res") + t.Run("parallel", func(t *testing.T) { // sytest: Room creation reports m.room.create to myself t.Run("Room creation reports m.room.create to myself", func(t *testing.T) { From 2b2a53fdf29f7cb37a352d032c63be27ac21ff55 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 3 Feb 2021 02:46:25 -0600 Subject: [PATCH 06/81] Proper test formatting --- tests/msc2716_test.go | 71 ++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 38 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index bbbdd589..30445885 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -7,18 +7,18 @@ package tests import ( + "fmt" "net/url" "testing" "time" "github.com/matrix-org/complement/internal/b" - "github.com/matrix-org/complement/internal/client" + "github.com/matrix-org/complement/internal/match" "github.com/matrix-org/complement/internal/must" - "github.com/sirupsen/logrus" "github.com/tidwall/gjson" ) -// Test that the m.room.create and m.room.member events for a room we created comes down /sync +// Test that the message events we insert between A and B come back in the correct order from /messages func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, "rooms_state", b.BlueprintAlice) defer deployment.Destroy(t) @@ -39,11 +39,11 @@ func TestBackfillingHistory(t *testing.T) { insertTime := time.Now() insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) - // wait 3ms to ensure that the timestamp changes enough intervals for each message we try to insert later + // wait 3ms to ensure that the timestamp changes enough for each of the 3 message we try to insert later time.Sleep(3 * time.Millisecond) // eventB - alice.SendEventSynced(t, roomID, b.Event{ + eventB := alice.SendEventSynced(t, roomID, b.Event{ Type: "m.room.message", Content: map[string]interface{}{ "msgtype": "m.text", @@ -51,7 +51,7 @@ func TestBackfillingHistory(t *testing.T) { }, }) // eventC - alice.SendEventSynced(t, roomID, b.Event{ + eventC := alice.SendEventSynced(t, roomID, b.Event{ Type: "m.room.message", Content: map[string]interface{}{ "msgtype": "m.text", @@ -86,7 +86,7 @@ func TestBackfillingHistory(t *testing.T) { }) // event3 - alice.SendEvent(t, roomID, b.Event{ + event3 := alice.SendEvent(t, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ event2, @@ -107,41 +107,36 @@ func TestBackfillingHistory(t *testing.T) { }, }) - res := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ - "dir": []string{"b"}, - "limit": []string{"100"}, - }) - - t.Logf("aweawfeefwaweafeafw") - body := client.ParseJSON(t, res) - logrus.WithFields(logrus.Fields{ - "insertOriginServerTs": insertOriginServerTs, - "res": res, - "body": string(body), - }).Error("messages res") - - contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventStar}, nil, "application/json", url.Values{ - "limit": []string{"100"}, - }) - contextResBody := client.ParseJSON(t, contextRes) - logrus.WithFields(logrus.Fields{ - "contextResBody": string(contextResBody), - }).Error("context res") - t.Run("parallel", func(t *testing.T) { - // sytest: Room creation reports m.room.create to myself - t.Run("Room creation reports m.room.create to myself", func(t *testing.T) { + t.Run("Backfilled messages come back in correct order", func(t *testing.T) { t.Parallel() - alice := deployment.Client(t, "hs1", userID) + messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + }) - alice.SyncUntilTimelineHas(t, roomID, func(ev gjson.Result) bool { - if ev.Get("type").Str != "m.room.create" { - return false - } - must.EqualStr(t, ev.Get("sender").Str, userID, "wrong sender") - must.EqualStr(t, ev.Get("content").Get("creator").Str, userID, "wrong content.creator") - return true + expectedMessageOrder := []string{ + eventStar, eventC, eventB, event3, event2, event1, eventA, + } + + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONArrayEach("chunk", func(r gjson.Result) error { + // Find all events in order + if len(r.Get("content").Get("body").Str) > 0 { + // Pop the next message off the expected list + nextEventInOrder := expectedMessageOrder[0] + expectedMessageOrder = expectedMessageOrder[1:] + + if r.Get("event_id").Str != nextEventInOrder { + return fmt.Errorf("Next event found was %s but expected %s", r.Get("event_id").Str, nextEventInOrder) + } + } + + return nil + }), + }, }) }) }) From 975665d86abf8d6aa4f4a5d8ad1a98df61b3bce6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 3 Feb 2021 03:06:09 -0600 Subject: [PATCH 07/81] Add m.historical to backfilled messages --- tests/msc2716_test.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 30445885..8d350690 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -67,8 +67,9 @@ func TestBackfillingHistory(t *testing.T) { }, OriginServerTS: insertOriginServerTs, Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message 1", + "msgtype": "m.text", + "body": "Message 1", + "m.historical": true, }, }) @@ -80,8 +81,9 @@ func TestBackfillingHistory(t *testing.T) { }, OriginServerTS: insertOriginServerTs + 1, Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message 2", + "msgtype": "m.text", + "body": "Message 2", + "m.historical": true, }, }) @@ -93,8 +95,9 @@ func TestBackfillingHistory(t *testing.T) { }, OriginServerTS: insertOriginServerTs + 2, Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message 3", + "msgtype": "m.text", + "body": "Message 3", + "m.historical": true, }, }) From 110352f9ead5ad71f9c0dd53561381f221aea5b5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 3 Feb 2021 03:38:12 -0600 Subject: [PATCH 08/81] Add tests for behavior around m.historical --- tests/msc2716_test.go | 178 ++++++++++++++++++++++++++++++------------ 1 file changed, 127 insertions(+), 51 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 8d350690..df1538a4 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/matrix-org/complement/internal/b" + "github.com/matrix-org/complement/internal/client" "github.com/matrix-org/complement/internal/match" "github.com/matrix-org/complement/internal/must" "github.com/tidwall/gjson" @@ -27,8 +28,119 @@ func TestBackfillingHistory(t *testing.T) { alice := deployment.Client(t, "hs1", userID) roomID := alice.CreateRoom(t, struct{}{}) + eventA, eventB, eventC, timeAfterEventA := createMessagesInRoom(t, alice, roomID) + + event1, event2, event3 := backfillMessagesAtTime(t, alice, roomID, eventA, timeAfterEventA) + + // eventStar + eventStar := alice.SendEventSynced(t, roomID, b.Event{ + Type: "m.room.message", + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message *", + }, + }) + + t.Run("parallel", func(t *testing.T) { + t.Run("Backfilled messages come back in correct order", func(t *testing.T) { + t.Parallel() + + messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + }) + + expectedMessageOrder := []string{ + eventStar, eventC, eventB, event3, event2, event1, eventA, + } + + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONArrayEach("chunk", func(r gjson.Result) error { + // Find all events in order + if len(r.Get("content").Get("body").Str) > 0 { + // Pop the next message off the expected list + nextEventInOrder := expectedMessageOrder[0] + expectedMessageOrder = expectedMessageOrder[1:] + + if r.Get("event_id").Str != nextEventInOrder { + return fmt.Errorf("Next event found was %s but expected %s", r.Get("event_id").Str, nextEventInOrder) + } + } + + return nil + }), + }, + }) + }) + + t.Run("Backfilled events with m.historical do not come down /sync", func(t *testing.T) { + t.Parallel() + + roomID := alice.CreateRoom(t, struct{}{}) + eventA, _, _, timeAfterEventA := createMessagesInRoom(t, alice, roomID) + insertOriginServerTs := uint64(timeAfterEventA.UnixNano() / 1000000) + + // If we see this message in the /sync, then something went wrong + event1 := alice.SendEvent(t, roomID, b.Event{ + Type: "m.room.message", + PrevEvents: []string{ + eventA, + }, + OriginServerTS: insertOriginServerTs, + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message 1", + "m.historical": true, + }, + }) + + // This is just a dummy event we search for after event1 + eventStar := alice.SendEvent(t, roomID, b.Event{ + Type: "m.room.message", + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message *", + }, + }) + + // Sync until we find the star message. If we're able to see the star message + // after event1 without seeing event1 in the mean-time, I think we're safe to + // assume it won't sync + alice.SyncUntil(t, "", "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { + if r.Get("event_id").Str == event1 { + t.Fatalf("We should not see the %s event in /sync response", event1) + } + + return r.Get("event_id").Str == eventStar + }) + }) + + t.Run("Backfilled events without m.historical come down /sync", func(t *testing.T) { + t.Parallel() + + roomID := alice.CreateRoom(t, struct{}{}) + eventA, _, _, timeAfterEventA := createMessagesInRoom(t, alice, roomID) + insertOriginServerTs := uint64(timeAfterEventA.UnixNano() / 1000000) + + alice.SendEventSynced(t, roomID, b.Event{ + Type: "m.room.message", + PrevEvents: []string{ + eventA, + }, + OriginServerTS: insertOriginServerTs, + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Message 1", + }, + }) + }) + }) +} + +func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string) (string, string, string, time.Time) { // eventA - eventA := alice.SendEventSynced(t, roomID, b.Event{ + eventA := c.SendEventSynced(t, roomID, b.Event{ Type: "m.room.message", Content: map[string]interface{}{ "msgtype": "m.text", @@ -36,14 +148,13 @@ func TestBackfillingHistory(t *testing.T) { }, }) - insertTime := time.Now() - insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) + timeAfterEventA := time.Now() // wait 3ms to ensure that the timestamp changes enough for each of the 3 message we try to insert later time.Sleep(3 * time.Millisecond) // eventB - eventB := alice.SendEventSynced(t, roomID, b.Event{ + eventB := c.SendEventSynced(t, roomID, b.Event{ Type: "m.room.message", Content: map[string]interface{}{ "msgtype": "m.text", @@ -51,7 +162,7 @@ func TestBackfillingHistory(t *testing.T) { }, }) // eventC - eventC := alice.SendEventSynced(t, roomID, b.Event{ + eventC := c.SendEventSynced(t, roomID, b.Event{ Type: "m.room.message", Content: map[string]interface{}{ "msgtype": "m.text", @@ -59,11 +170,17 @@ func TestBackfillingHistory(t *testing.T) { }, }) + return eventA, eventB, eventC, timeAfterEventA +} + +func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insertAfterEvent string, insertTime time.Time) (string, string, string) { + insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) + // event1 - event1 := alice.SendEvent(t, roomID, b.Event{ + event1 := c.SendEvent(t, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ - eventA, + insertAfterEvent, }, OriginServerTS: insertOriginServerTs, Content: map[string]interface{}{ @@ -74,7 +191,7 @@ func TestBackfillingHistory(t *testing.T) { }) // event2 - event2 := alice.SendEvent(t, roomID, b.Event{ + event2 := c.SendEvent(t, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ event1, @@ -88,7 +205,7 @@ func TestBackfillingHistory(t *testing.T) { }) // event3 - event3 := alice.SendEvent(t, roomID, b.Event{ + event3 := c.SendEvent(t, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ event2, @@ -101,46 +218,5 @@ func TestBackfillingHistory(t *testing.T) { }, }) - // eventStar - eventStar := alice.SendEventSynced(t, roomID, b.Event{ - Type: "m.room.message", - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message *", - }, - }) - - t.Run("parallel", func(t *testing.T) { - t.Run("Backfilled messages come back in correct order", func(t *testing.T) { - t.Parallel() - - messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ - "dir": []string{"b"}, - "limit": []string{"100"}, - }) - - expectedMessageOrder := []string{ - eventStar, eventC, eventB, event3, event2, event1, eventA, - } - - must.MatchResponse(t, messagesRes, match.HTTPResponse{ - JSON: []match.JSON{ - match.JSONArrayEach("chunk", func(r gjson.Result) error { - // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 { - // Pop the next message off the expected list - nextEventInOrder := expectedMessageOrder[0] - expectedMessageOrder = expectedMessageOrder[1:] - - if r.Get("event_id").Str != nextEventInOrder { - return fmt.Errorf("Next event found was %s but expected %s", r.Get("event_id").Str, nextEventInOrder) - } - } - - return nil - }), - }, - }) - }) - }) + return event1, event2, event3 } From 8bbb929c5a659b193429770c7a2e8d9ff5c662bb Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 5 Feb 2021 00:17:27 -0600 Subject: [PATCH 09/81] Update docs --- README.md | 6 +++--- tests/msc2716_test.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 82e10694..df149905 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,13 @@ Complement is a black box integration testing framework for Matrix homeservers. -## adsf +#### Getting started To get started developing, see https://github.com/matrix-org/complement/blob/master/ONBOARDING.md -If you're looking to run Complement against a local dev instance of Synapse, see [`matrix-org/synapse` -> `scripts-dev/complement.sh`](https://github.com/matrix-org/synapse/blob/develop/scripts-dev/complement.sh) +If you're looking to run Complement against a local dev instance of Synapse, see [`matrix-org/synapse` -> `scripts-dev/complement.sh`](https://github.com/matrix-org/synapse/blob/develop/scripts-dev/complement.sh). -If you want to develop Complement tests while working on a local dev instance of Synapse, edit `scripts-dev/complement.sh` to point to your local Complement checkout. +If you want to develop Complement tests while working on a local dev instance of Synapse, edit [`scripts-dev/complement.sh`](https://github.com/matrix-org/synapse/blob/develop/scripts-dev/complement.sh) to point to your local Complement checkout (`cd ../complement`) instead of downloading from GitHub. #### Running diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index df1538a4..20dd7eae 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -105,11 +105,11 @@ func TestBackfillingHistory(t *testing.T) { }) // Sync until we find the star message. If we're able to see the star message - // after event1 without seeing event1 in the mean-time, I think we're safe to + // that occurs after event1 without seeing event1 in the mean-time, I think we're safe to // assume it won't sync alice.SyncUntil(t, "", "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { if r.Get("event_id").Str == event1 { - t.Fatalf("We should not see the %s event in /sync response", event1) + t.Fatalf("We should not see the %s event in /sync response but it was present", event1) } return r.Get("event_id").Str == eventStar From 22292b50d4e066c80a4e2bc1ba4c222bb0796f71 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 5 Feb 2021 00:19:47 -0600 Subject: [PATCH 10/81] Remove docs already moved to #70 --- README.md | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/README.md b/README.md index df149905..70f30e67 100644 --- a/README.md +++ b/README.md @@ -1,19 +1,7 @@ -[![Complement Dev](https://img.shields.io/matrix/complement:matrix.org.svg?label=%23complement%3Amatrix.org&logo=matrix&server_fqdn=matrix.org)](https://matrix.to/#/#complement:matrix.org) - ### Complement Complement is a black box integration testing framework for Matrix homeservers. - -#### Getting started - -To get started developing, see https://github.com/matrix-org/complement/blob/master/ONBOARDING.md - -If you're looking to run Complement against a local dev instance of Synapse, see [`matrix-org/synapse` -> `scripts-dev/complement.sh`](https://github.com/matrix-org/synapse/blob/develop/scripts-dev/complement.sh). - -If you want to develop Complement tests while working on a local dev instance of Synapse, edit [`scripts-dev/complement.sh`](https://github.com/matrix-org/synapse/blob/develop/scripts-dev/complement.sh) to point to your local Complement checkout (`cd ../complement`) instead of downloading from GitHub. - - #### Running You need to have Go and Docker installed. Then: From 1675fecebc13c3d919200dd0a26d549d4102b755 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 9 Feb 2021 16:01:22 -0600 Subject: [PATCH 11/81] Enable msc2716 feature flag --- dockerfiles/synapse/homeserver.yaml | 2 ++ dockerfiles/synapse/workers-shared.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/dockerfiles/synapse/homeserver.yaml b/dockerfiles/synapse/homeserver.yaml index a2f6f309..64f28bea 100644 --- a/dockerfiles/synapse/homeserver.yaml +++ b/dockerfiles/synapse/homeserver.yaml @@ -97,3 +97,5 @@ federation_rr_transactions_per_room_per_second: 9999 experimental_features: # Enable knocking support msc2403_enabled: true + # Enable history backfilling support + msc2716_enabled: true diff --git a/dockerfiles/synapse/workers-shared.yaml b/dockerfiles/synapse/workers-shared.yaml index 12f88cff..a589707d 100644 --- a/dockerfiles/synapse/workers-shared.yaml +++ b/dockerfiles/synapse/workers-shared.yaml @@ -63,3 +63,5 @@ federation_rr_transactions_per_room_per_second: 9999 experimental_features: # Enable knocking support msc2403_enabled: true + # Enable history backfilling support + msc2716_enabled: true From 54061cb16aca023fe35bf5bd68d95a6eabd03cb1 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 10 Feb 2021 17:19:17 -0600 Subject: [PATCH 12/81] Move SendEvent to the only test file it's used in --- internal/client/client.go | 30 ++++-------------------------- tests/msc2716_test.go | 39 ++++++++++++++++++++++++++++++++++----- 2 files changed, 38 insertions(+), 31 deletions(-) diff --git a/internal/client/client.go b/internal/client/client.go index a96e08af..28ef89d5 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -84,40 +84,18 @@ func (c *CSAPI) JoinRoom(t *testing.T, roomIDOrAlias string, serverNames []strin return GetJSONFieldStr(t, body, "room_id") } -func (c *CSAPI) SendEvent(t *testing.T, roomID string, e b.Event) string { +// SendEventSynced sends `e` into the room and waits for its event ID to come down /sync. +// Returns the event ID of the sent event. +func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e b.Event) string { t.Helper() c.txnID++ paths := []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, strconv.Itoa(c.txnID)} if e.StateKey != nil { paths = []string{"_matrix", "client", "r0", "rooms", roomID, "state", e.Type, *e.StateKey} } - - query := make(url.Values, len(e.PrevEvents)) - for _, prevEvent := range e.PrevEvents { - query.Add("prev_event", prevEvent) - } - - if e.OriginServerTS != 0 { - query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) - } - - b, err := json.Marshal(e.Content) - if err != nil { - t.Fatalf("CSAPI.Do failed to marshal JSON body: %s", err) - } - - res := c.MustDoRaw(t, "PUT", paths, b, "application/json", query) + res := c.MustDo(t, "PUT", paths, e.Content) body := ParseJSON(t, res) eventID := GetJSONFieldStr(t, body, "event_id") - - return eventID -} - -// SendEventSynced sends `e` into the room and waits for its event ID to come down /sync. -// Returns the event ID of the sent event. -func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e b.Event) string { - t.Helper() - eventID := c.SendEvent(t, roomID, e) t.Logf("SendEventSynced waiting for event ID %s", eventID) c.SyncUntilTimelineHas(t, roomID, func(r gjson.Result) bool { return r.Get("event_id").Str == eventID diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 20dd7eae..6f4c74eb 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -7,8 +7,10 @@ package tests import ( + "encoding/json" "fmt" "net/url" + "strconv" "testing" "time" @@ -82,7 +84,7 @@ func TestBackfillingHistory(t *testing.T) { insertOriginServerTs := uint64(timeAfterEventA.UnixNano() / 1000000) // If we see this message in the /sync, then something went wrong - event1 := alice.SendEvent(t, roomID, b.Event{ + event1 := sendEvent(t, alice, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ eventA, @@ -96,7 +98,7 @@ func TestBackfillingHistory(t *testing.T) { }) // This is just a dummy event we search for after event1 - eventStar := alice.SendEvent(t, roomID, b.Event{ + eventStar := sendEvent(t, alice, roomID, b.Event{ Type: "m.room.message", Content: map[string]interface{}{ "msgtype": "m.text", @@ -138,6 +140,33 @@ func TestBackfillingHistory(t *testing.T) { }) } +var txnID int = 0 +var txnPrefix string = "msc2716-txn" + +func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e b.Event) string { + txnID++ + + query := make(url.Values, len(e.PrevEvents)) + for _, prevEvent := range e.PrevEvents { + query.Add("prev_event", prevEvent) + } + + if e.OriginServerTS != 0 { + query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) + } + + b, err := json.Marshal(e.Content) + if err != nil { + t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) + } + + res := c.MustDoRaw(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + strconv.Itoa(txnID)}, b, "application/json", query) + body := client.ParseJSON(t, res) + eventID := client.GetJSONFieldStr(t, body, "event_id") + + return eventID +} + func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string) (string, string, string, time.Time) { // eventA eventA := c.SendEventSynced(t, roomID, b.Event{ @@ -177,7 +206,7 @@ func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insert insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) // event1 - event1 := c.SendEvent(t, roomID, b.Event{ + event1 := sendEvent(t, c, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ insertAfterEvent, @@ -191,7 +220,7 @@ func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insert }) // event2 - event2 := c.SendEvent(t, roomID, b.Event{ + event2 := sendEvent(t, c, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ event1, @@ -205,7 +234,7 @@ func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insert }) // event3 - event3 := c.SendEvent(t, roomID, b.Event{ + event3 := sendEvent(t, c, roomID, b.Event{ Type: "m.room.message", PrevEvents: []string{ event2, From 397331257ce8621ee670879b24ac5cc43d608784 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 12 Feb 2021 03:48:30 -0600 Subject: [PATCH 13/81] Initial stab of defining AS in blueprint --- dockerfiles/synapse/as-registration.yaml | 10 +++++ dockerfiles/synapse/homeserver.yaml | 7 ++++ internal/b/blueprints.go | 9 +++++ internal/b/hs_with_application_service.go | 25 ++++++++++++ internal/docker/builder.go | 46 ++++++++++++++++++++--- internal/docker/deployment.go | 9 +++-- 6 files changed, 97 insertions(+), 9 deletions(-) create mode 100644 dockerfiles/synapse/as-registration.yaml create mode 100644 internal/b/hs_with_application_service.go diff --git a/dockerfiles/synapse/as-registration.yaml b/dockerfiles/synapse/as-registration.yaml new file mode 100644 index 00000000..9026ba3c --- /dev/null +++ b/dockerfiles/synapse/as-registration.yaml @@ -0,0 +1,10 @@ +id: 24c97215fcec1025b19df73c574b5bf5b3d69f1648921461ac9beabaf11466b7 +hs_token: 27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e +as_token: f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc +url: 'http://localhost:9000' +sender_localpart: the-bridge-user +rate_limited: false +namespaces: + users: [] + rooms: [] + aliases: [] diff --git a/dockerfiles/synapse/homeserver.yaml b/dockerfiles/synapse/homeserver.yaml index 64f28bea..6aa7f192 100644 --- a/dockerfiles/synapse/homeserver.yaml +++ b/dockerfiles/synapse/homeserver.yaml @@ -92,6 +92,13 @@ rc_joins: federation_rr_transactions_per_room_per_second: 9999 +## API Configuration ## + +# A list of application service config files to use +# +app_service_config_files: + AS_REGISTRATION_FILES + ## Experimental Features ## experimental_features: diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index 075963eb..3c5477b9 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -46,6 +46,8 @@ type Homeserver struct { Users []User // The list of rooms to create on this homeserver Rooms []Room + // The list of application services to create on the homeserver + ApplicationServices []ApplicationService } type User struct { @@ -68,6 +70,13 @@ type Room struct { Events []Event } +type ApplicationService struct { + ID string + URL string + SenderLocalpart string + RateLimited bool +} + type Event struct { Type string Sender string diff --git a/internal/b/hs_with_application_service.go b/internal/b/hs_with_application_service.go new file mode 100644 index 00000000..1b4abf42 --- /dev/null +++ b/internal/b/hs_with_application_service.go @@ -0,0 +1,25 @@ +package b + +// BlueprintHSWithApplicationService who has an application service to interact with +var BlueprintHSWithApplicationService = MustValidate(Blueprint{ + Name: "alice", + Homeservers: []Homeserver{ + { + Name: "hs1", + Users: []User{ + { + Localpart: "@alice", + DisplayName: "Alice", + }, + }, + ApplicationServices: []ApplicationService{ + { + ID: "my-as-id", + URL: "http://localhost:9000", + SenderLocalpart: "the-bridge-user", + RateLimited: false, + }, + }, + }, + }, +}) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index a8b45ce6..082ec208 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -251,6 +251,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { runner := instruction.NewRunner(bprint.Name, d.debugLogging) results := make([]result, len(bprint.Homeservers)) for i, hs := range bprint.Homeservers { + res := d.constructHomeserver(bprint.Name, runner, hs, networkID) if res.err != nil { errs = append(errs, res.err) @@ -259,6 +260,20 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { printLogs(d.Docker, res.containerID, res.contextStr) } } + + // Create the application service files + for _, as := range hs.ApplicationServices { + yamlRegistrationContent := generateASRegistrationYaml(as) + err := d.Docker.CopyToContainer(context.Background(), res.containerID, "/conf/${as.ID}.yaml", strings.NewReader(yamlRegistrationContent), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + + if err != nil { + errs = append(errs, err) + d.log("Failed to add application service registration file %s in %s\n", as.ID, res.containerID) + } + } + // kill the container defer func(r result) { killErr := d.Docker.ContainerKill(context.Background(), r.containerID, "KILL") @@ -300,7 +315,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { func (d *Builder) constructHomeserver(blueprintName string, runner *instruction.Runner, hs b.Homeserver, networkID string) result { contextStr := fmt.Sprintf("%s.%s", blueprintName, hs.Name) d.log("%s : constructing homeserver...\n", contextStr) - dep, err := d.deployBaseImage(blueprintName, hs.Name, contextStr, networkID) + dep, err := d.deployBaseImage(blueprintName, hs, contextStr, networkID) if err != nil { log.Printf("%s : failed to deployBaseImage: %s\n", contextStr, err) containerID := "" @@ -328,9 +343,9 @@ func (d *Builder) constructHomeserver(blueprintName string, runner *instruction. } // deployBaseImage runs the base image and returns the baseURL, containerID or an error. -func (d *Builder) deployBaseImage(blueprintName, hsName, contextStr, networkID string) (*HomeserverDeployment, error) { +func (d *Builder) deployBaseImage(blueprintName, hs b.Homeserver, contextStr, networkID string) (*HomeserverDeployment, error) { return deployImage( - d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hsName, contextStr, + d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs, contextStr, networkID, d.config.VersionCheckIterations, ) } @@ -409,8 +424,21 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ return caVolume, caMount, nil } +func generateASRegistrationYaml(as b.ApplicationService) string { + return "id: ${as.ID}\n" + + "hs_token: 27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e\n" + + "as_token: f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc\n" + + "url: '${as.URL}'\n" + + "sender_localpart: ${as.SenderLocalpart}\n" + + "rate_limited: ${as.RateLimited}\n" + + "namespaces:\n" + + "\tusers: []\n" + + "\trooms: []\n" + + "\taliases: []\n" +} + func deployImage( - docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName, contextStr, networkID string, versionCheckIterations int, + docker *client.Client, imageID string, csPort int, containerName, blueprintName, hs b.Homeserver, contextStr, networkID string, versionCheckIterations int, ) (*HomeserverDeployment, error) { ctx := context.Background() var extraHosts []string @@ -418,6 +446,8 @@ func deployImage( var caMount []mount.Mount var err error + hsName = hs.Name + if runtime.GOOS == "linux" { // By default docker for linux does not expose this, so do it now. // When https://github.com/moby/moby/pull/40007 lands in Docker 20, we should @@ -434,7 +464,12 @@ func deployImage( body, err := docker.ContainerCreate(ctx, &container.Config{ Image: imageID, - Env: []string{"SERVER_NAME=" + hsName, "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA")}, + Env: []string{ + "SERVER_NAME=" + hsName, + "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), + // TODO + "AS_REGISTRATION_FILES=" + hs.ApplicationServices + }, //Cmd: d.ImageArgs, Labels: map[string]string{ complementLabel: contextStr, @@ -493,6 +528,7 @@ func deployImage( FedBaseURL: fedBaseURL, ContainerID: containerID, AccessTokens: tokensFromLabels(inspect.Config.Labels), + //ApplicationServices } if lastErr != nil { return d, fmt.Errorf("%s: failed to check server is up. %w", contextStr, lastErr) diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index dd568db2..40b98450 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -20,10 +20,11 @@ type Deployment struct { // HomeserverDeployment represents a running homeserver in a container. type HomeserverDeployment struct { - BaseURL string // e.g http://localhost:38646 - FedBaseURL string // e.g https://localhost:48373 - ContainerID string // e.g 10de45efba - AccessTokens map[string]string // e.g { "@alice:hs1": "myAcc3ssT0ken" } + BaseURL string // e.g http://localhost:38646 + FedBaseURL string // e.g https://localhost:48373 + ContainerID string // e.g 10de45efba + AccessTokens map[string]string // e.g { "@alice:hs1": "myAcc3ssT0ken" } + ApplicationServices map[string]map[string]string // e.g { "my-as-id": { "hs_token": "xxx", "as_token": "xxx" } } } // Destroy the entire deployment. Destroys all running containers. If `printServerLogs` is true, From 5a71989eca2dbc1100ddd3dbc99db612c9c902ee Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 12 Feb 2021 22:51:00 -0600 Subject: [PATCH 14/81] More poking at defining app services --- dockerfiles/synapse/start.sh | 12 ++++++++ internal/b/blueprints.go | 1 + internal/docker/builder.go | 58 +++++++++++++++++++++++++++-------- internal/docker/deployer.go | 4 ++- internal/docker/deployment.go | 10 +++--- tests/msc2716_test.go | 2 +- 6 files changed, 67 insertions(+), 20 deletions(-) diff --git a/dockerfiles/synapse/start.sh b/dockerfiles/synapse/start.sh index 6e35b4dd..543538e7 100755 --- a/dockerfiles/synapse/start.sh +++ b/dockerfiles/synapse/start.sh @@ -4,6 +4,18 @@ set -e sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml +for as_id in $AS_REGISTRATION_IDS +do + touch "/conf/${as_id}.yaml" + echo "id: ${as_id}\nhs_token: 123abc\nas_token: 123abc" > "/conf/${as_id}.yaml" + # Insert the registration file and the AS_REGISTRATION_FILES marker in order + # to add other application services in the next iteration of the loop + sed -i "s/AS_REGISTRATION_FILES/ - \/conf\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml +done +# Remove the AS_REGISTRATION_FILES entry +sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml + + # generate an ssl cert for the server, signed by our dummy CA openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ -subj "/CN=${SERVER_NAME}" diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index 3c5477b9..d1034c77 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -26,6 +26,7 @@ var KnownBlueprints = map[string]*Blueprint{ BlueprintAlice.Name: &BlueprintAlice, BlueprintFederationOneToOneRoom.Name: &BlueprintFederationOneToOneRoom, BlueprintFederationTwoLocalOneRemote.Name: &BlueprintFederationTwoLocalOneRemote, + BlueprintHSWithApplicationService.Name: &BlueprintHSWithApplicationService, BlueprintOneToOneRoom.Name: &BlueprintOneToOneRoom, BlueprintPerfManyMessages.Name: &BlueprintPerfManyMessages, BlueprintPerfManyRooms.Name: &BlueprintPerfManyRooms, diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 082ec208..a9aa1c2d 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -251,6 +251,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { runner := instruction.NewRunner(bprint.Name, d.debugLogging) results := make([]result, len(bprint.Homeservers)) for i, hs := range bprint.Homeservers { + log.Printf("nbtbtrtbrddbtrbtrd %s", idsFromApplicationServices(hs.ApplicationServices)) res := d.constructHomeserver(bprint.Name, runner, hs, networkID) if res.err != nil { @@ -261,6 +262,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { } } + log.Printf("hhtrhtddhtrhdhrt %s", idsFromApplicationServices(hs.ApplicationServices)) // Create the application service files for _, as := range hs.ApplicationServices { yamlRegistrationContent := generateASRegistrationYaml(as) @@ -291,6 +293,11 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { } labels := labelsForTokens(runner.AccessTokens(res.homeserver.Name)) + asLabels := labelsForApplicationServices(res.homeserver.ApplicationServices) + for k, v := range asLabels { + labels[k] = v + } + // commit the container commit, err := d.Docker.ContainerCommit(context.Background(), res.containerID, types.ContainerCommitOptions{ Author: "Complement", @@ -343,9 +350,9 @@ func (d *Builder) constructHomeserver(blueprintName string, runner *instruction. } // deployBaseImage runs the base image and returns the baseURL, containerID or an error. -func (d *Builder) deployBaseImage(blueprintName, hs b.Homeserver, contextStr, networkID string) (*HomeserverDeployment, error) { +func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, contextStr, networkID string) (*HomeserverDeployment, error) { return deployImage( - d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs, contextStr, + d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs.Name, idsFromApplicationServices(hs.ApplicationServices), contextStr, networkID, d.config.VersionCheckIterations, ) } @@ -437,8 +444,17 @@ func generateASRegistrationYaml(as b.ApplicationService) string { "\taliases: []\n" } +func idsFromApplicationServices(asList []b.ApplicationService) []string { + ids := make([]string, len(asList)) + for i, as := range asList { + ids[i] = as.ID + } + + return ids +} + func deployImage( - docker *client.Client, imageID string, csPort int, containerName, blueprintName, hs b.Homeserver, contextStr, networkID string, versionCheckIterations int, + docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName string, hsApplicationServiceIDs []string, contextStr, networkID string, versionCheckIterations int, ) (*HomeserverDeployment, error) { ctx := context.Background() var extraHosts []string @@ -446,8 +462,6 @@ func deployImage( var caMount []mount.Mount var err error - hsName = hs.Name - if runtime.GOOS == "linux" { // By default docker for linux does not expose this, so do it now. // When https://github.com/moby/moby/pull/40007 lands in Docker 20, we should @@ -464,11 +478,10 @@ func deployImage( body, err := docker.ContainerCreate(ctx, &container.Config{ Image: imageID, - Env: []string{ + Env: []string{ "SERVER_NAME=" + hsName, "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), - // TODO - "AS_REGISTRATION_FILES=" + hs.ApplicationServices + "AS_REGISTRATION_IDS=" + strings.Join(hsApplicationServiceIDs, " "), }, //Cmd: d.ImageArgs, Labels: map[string]string{ @@ -524,11 +537,11 @@ func deployImage( break } d := &HomeserverDeployment{ - BaseURL: baseURL, - FedBaseURL: fedBaseURL, - ContainerID: containerID, - AccessTokens: tokensFromLabels(inspect.Config.Labels), - //ApplicationServices + BaseURL: baseURL, + FedBaseURL: fedBaseURL, + ContainerID: containerID, + AccessTokens: tokensFromLabels(inspect.Config.Labels), + ApplicationServices: applicationServiceIDsFromLabels(inspect.Config.Labels), } if lastErr != nil { return d, fmt.Errorf("%s: failed to check server is up. %w", contextStr, lastErr) @@ -602,6 +615,25 @@ func labelsForTokens(userIDToToken map[string]string) map[string]string { return labels } +func applicationServiceIDsFromLabels(labels map[string]string) []string { + asIDs := make([]string, 0) + for k, v := range labels { + if strings.HasPrefix(k, "access_token_") { + asIDs = append(asIDs, v) + } + } + return asIDs +} + +func labelsForApplicationServices(asList []b.ApplicationService) map[string]string { + labels := make(map[string]string) + // collect and store access tokens as labels 'access_token_$userid: $token' + for _, as := range asList { + labels["application_service_"+as.ID] = as.ID + } + return labels +} + func endpoints(p nat.PortMap, csPort, ssPort int) (baseURL, fedBaseURL string, err error) { csapiPort := fmt.Sprintf("%d/tcp", csPort) csapiPortInfo, ok := p[nat.Port(csapiPort)] diff --git a/internal/docker/deployer.go b/internal/docker/deployer.go index ca473135..3fa76575 100644 --- a/internal/docker/deployer.go +++ b/internal/docker/deployer.go @@ -81,10 +81,12 @@ func (d *Deployer) Deploy(ctx context.Context, blueprintName string) (*Deploymen d.Counter++ contextStr := img.Labels["complement_context"] hsName := img.Labels["complement_hs_name"] + asIDs := applicationServiceIDsFromLabels(img.Labels) + // TODO: Make CSAPI port configurable deployment, err := deployImage( d.Docker, img.ID, 8008, fmt.Sprintf("complement_%s_%s_%d", d.Namespace, contextStr, d.Counter), - blueprintName, hsName, contextStr, networkID, d.config.VersionCheckIterations) + blueprintName, hsName, asIDs, contextStr, networkID, d.config.VersionCheckIterations) if err != nil { if deployment != nil && deployment.ContainerID != "" { // print logs to help debug diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index 40b98450..ec1ecb1f 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -20,11 +20,11 @@ type Deployment struct { // HomeserverDeployment represents a running homeserver in a container. type HomeserverDeployment struct { - BaseURL string // e.g http://localhost:38646 - FedBaseURL string // e.g https://localhost:48373 - ContainerID string // e.g 10de45efba - AccessTokens map[string]string // e.g { "@alice:hs1": "myAcc3ssT0ken" } - ApplicationServices map[string]map[string]string // e.g { "my-as-id": { "hs_token": "xxx", "as_token": "xxx" } } + BaseURL string // e.g http://localhost:38646 + FedBaseURL string // e.g https://localhost:48373 + ContainerID string // e.g 10de45efba + AccessTokens map[string]string // e.g { "@alice:hs1": "myAcc3ssT0ken" } + ApplicationServices []string // e.g ["my-as-id"] } // Destroy the entire deployment. Destroys all running containers. If `printServerLogs` is true, diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 6f4c74eb..9d8a6fcf 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -23,7 +23,7 @@ import ( // Test that the message events we insert between A and B come back in the correct order from /messages func TestBackfillingHistory(t *testing.T) { - deployment := Deploy(t, "rooms_state", b.BlueprintAlice) + deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) defer deployment.Destroy(t) userID := "@alice:hs1" From c607f8dfeaf25fa79f8d3c52a9960d17cf797eb8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Sun, 14 Feb 2021 18:27:31 -0600 Subject: [PATCH 15/81] More app service changes --- internal/docker/builder.go | 57 +++++++++++++++++------------------ internal/docker/deployer.go | 4 +-- internal/docker/deployment.go | 2 +- 3 files changed, 31 insertions(+), 32 deletions(-) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index a9aa1c2d..b241c472 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -251,8 +251,6 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { runner := instruction.NewRunner(bprint.Name, d.debugLogging) results := make([]result, len(bprint.Homeservers)) for i, hs := range bprint.Homeservers { - log.Printf("nbtbtrtbrddbtrbtrd %s", idsFromApplicationServices(hs.ApplicationServices)) - res := d.constructHomeserver(bprint.Name, runner, hs, networkID) if res.err != nil { errs = append(errs, res.err) @@ -262,20 +260,6 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { } } - log.Printf("hhtrhtddhtrhdhrt %s", idsFromApplicationServices(hs.ApplicationServices)) - // Create the application service files - for _, as := range hs.ApplicationServices { - yamlRegistrationContent := generateASRegistrationYaml(as) - err := d.Docker.CopyToContainer(context.Background(), res.containerID, "/conf/${as.ID}.yaml", strings.NewReader(yamlRegistrationContent), types.CopyToContainerOptions{ - AllowOverwriteDirWithFile: false, - }) - - if err != nil { - errs = append(errs, err) - d.log("Failed to add application service registration file %s in %s\n", as.ID, res.containerID) - } - } - // kill the container defer func(r result) { killErr := d.Docker.ContainerKill(context.Background(), r.containerID, "KILL") @@ -293,6 +277,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { } labels := labelsForTokens(runner.AccessTokens(res.homeserver.Name)) + // Combine the labels for tokens and application services asLabels := labelsForApplicationServices(res.homeserver.ApplicationServices) for k, v := range asLabels { labels[k] = v @@ -351,8 +336,10 @@ func (d *Builder) constructHomeserver(blueprintName string, runner *instruction. // deployBaseImage runs the base image and returns the baseURL, containerID or an error. func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, contextStr, networkID string) (*HomeserverDeployment, error) { + asIDToRegistrationMap := asIDToRegistrationFromLabels(labelsForApplicationServices(hs.ApplicationServices)) + return deployImage( - d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs.Name, idsFromApplicationServices(hs.ApplicationServices), contextStr, + d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs.Name, asIDToRegistrationMap, contextStr, networkID, d.config.VersionCheckIterations, ) } @@ -454,7 +441,7 @@ func idsFromApplicationServices(asList []b.ApplicationService) []string { } func deployImage( - docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName string, hsApplicationServiceIDs []string, contextStr, networkID string, versionCheckIterations int, + docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName string, asIDToRegistrationMap map[string]string, contextStr, networkID string, versionCheckIterations int, ) (*HomeserverDeployment, error) { ctx := context.Background() var extraHosts []string @@ -476,13 +463,25 @@ func deployImage( } } + env := []string{ + "SERVER_NAME=" + hsName, + "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), + } + + i := 0 + var asIDs []string + for asID, registration := range asIDToRegistrationMap { + env = append(env, "AS_REGISTRATION_${i}="+registration) + asIDs = append(asIDs, asID) + + i++ + } + + env = append(env, "AS_REGISTRATION_IDS="+strings.Join(asIDs, " ")) + body, err := docker.ContainerCreate(ctx, &container.Config{ Image: imageID, - Env: []string{ - "SERVER_NAME=" + hsName, - "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), - "AS_REGISTRATION_IDS=" + strings.Join(hsApplicationServiceIDs, " "), - }, + Env: env, //Cmd: d.ImageArgs, Labels: map[string]string{ complementLabel: contextStr, @@ -541,7 +540,7 @@ func deployImage( FedBaseURL: fedBaseURL, ContainerID: containerID, AccessTokens: tokensFromLabels(inspect.Config.Labels), - ApplicationServices: applicationServiceIDsFromLabels(inspect.Config.Labels), + ApplicationServices: asIDToRegistrationFromLabels(inspect.Config.Labels), } if lastErr != nil { return d, fmt.Errorf("%s: failed to check server is up. %w", contextStr, lastErr) @@ -615,21 +614,21 @@ func labelsForTokens(userIDToToken map[string]string) map[string]string { return labels } -func applicationServiceIDsFromLabels(labels map[string]string) []string { - asIDs := make([]string, 0) +func asIDToRegistrationFromLabels(labels map[string]string) map[string]string { + asMap := make(map[string]string) for k, v := range labels { if strings.HasPrefix(k, "access_token_") { - asIDs = append(asIDs, v) + asMap[strings.TrimPrefix(k, "application_service_")] = v } } - return asIDs + return asMap } func labelsForApplicationServices(asList []b.ApplicationService) map[string]string { labels := make(map[string]string) // collect and store access tokens as labels 'access_token_$userid: $token' for _, as := range asList { - labels["application_service_"+as.ID] = as.ID + labels["application_service_"+as.ID] = generateASRegistrationYaml(as) } return labels } diff --git a/internal/docker/deployer.go b/internal/docker/deployer.go index 3fa76575..b9111848 100644 --- a/internal/docker/deployer.go +++ b/internal/docker/deployer.go @@ -81,12 +81,12 @@ func (d *Deployer) Deploy(ctx context.Context, blueprintName string) (*Deploymen d.Counter++ contextStr := img.Labels["complement_context"] hsName := img.Labels["complement_hs_name"] - asIDs := applicationServiceIDsFromLabels(img.Labels) + asIDToRegistrationMap := asIDToRegistrationFromLabels(img.Labels) // TODO: Make CSAPI port configurable deployment, err := deployImage( d.Docker, img.ID, 8008, fmt.Sprintf("complement_%s_%s_%d", d.Namespace, contextStr, d.Counter), - blueprintName, hsName, asIDs, contextStr, networkID, d.config.VersionCheckIterations) + blueprintName, hsName, asIDToRegistrationMap, contextStr, networkID, d.config.VersionCheckIterations) if err != nil { if deployment != nil && deployment.ContainerID != "" { // print logs to help debug diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index ec1ecb1f..5aa6c809 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -24,7 +24,7 @@ type HomeserverDeployment struct { FedBaseURL string // e.g https://localhost:48373 ContainerID string // e.g 10de45efba AccessTokens map[string]string // e.g { "@alice:hs1": "myAcc3ssT0ken" } - ApplicationServices []string // e.g ["my-as-id"] + ApplicationServices map[string]string // e.g { "my-as-id": "id: xxx\nas_token: xxx ..."} } } // Destroy the entire deployment. Destroys all running containers. If `printServerLogs` is true, From fe050a9ad19c5053ab8ad45a7b80489fdbe99fc8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 Feb 2021 14:38:06 -0600 Subject: [PATCH 16/81] Passing registration by environment variable --- dockerfiles/synapse/start.sh | 7 +++++-- internal/b/hs_with_application_service.go | 2 +- internal/docker/builder.go | 21 +++++++++------------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/dockerfiles/synapse/start.sh b/dockerfiles/synapse/start.sh index 543538e7..d7b2c698 100755 --- a/dockerfiles/synapse/start.sh +++ b/dockerfiles/synapse/start.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash set -e @@ -6,8 +6,11 @@ sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml for as_id in $AS_REGISTRATION_IDS do + as_registration_varname=AS_REGISTRATION_${as_id} + touch "/conf/${as_id}.yaml" - echo "id: ${as_id}\nhs_token: 123abc\nas_token: 123abc" > "/conf/${as_id}.yaml" + echo "${!as_registration_varname}" > "/conf/${as_id}.yaml" + # Insert the registration file and the AS_REGISTRATION_FILES marker in order # to add other application services in the next iteration of the loop sed -i "s/AS_REGISTRATION_FILES/ - \/conf\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml diff --git a/internal/b/hs_with_application_service.go b/internal/b/hs_with_application_service.go index 1b4abf42..ba9db923 100644 --- a/internal/b/hs_with_application_service.go +++ b/internal/b/hs_with_application_service.go @@ -14,7 +14,7 @@ var BlueprintHSWithApplicationService = MustValidate(Blueprint{ }, ApplicationServices: []ApplicationService{ { - ID: "my-as-id", + ID: "my_as_id", URL: "http://localhost:9000", SenderLocalpart: "the-bridge-user", RateLimited: false, diff --git a/internal/docker/builder.go b/internal/docker/builder.go index b241c472..f5834d91 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -419,16 +419,16 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ } func generateASRegistrationYaml(as b.ApplicationService) string { - return "id: ${as.ID}\n" + + return fmt.Sprintf("id: %s\n", as.ID) + "hs_token: 27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e\n" + "as_token: f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc\n" + - "url: '${as.URL}'\n" + - "sender_localpart: ${as.SenderLocalpart}\n" + - "rate_limited: ${as.RateLimited}\n" + + fmt.Sprintf("url: '%s'\n", as.URL) + + fmt.Sprintf("sender_localpart: %s\n", as.SenderLocalpart) + + fmt.Sprintf("rate_limited: %v\n", as.RateLimited) + "namespaces:\n" + - "\tusers: []\n" + - "\trooms: []\n" + - "\taliases: []\n" + " users: []\n" + + " rooms: []\n" + + " aliases: []\n" } func idsFromApplicationServices(asList []b.ApplicationService) []string { @@ -468,13 +468,10 @@ func deployImage( "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), } - i := 0 var asIDs []string for asID, registration := range asIDToRegistrationMap { - env = append(env, "AS_REGISTRATION_${i}="+registration) + env = append(env, fmt.Sprintf("AS_REGISTRATION_%s=", asID)+registration) asIDs = append(asIDs, asID) - - i++ } env = append(env, "AS_REGISTRATION_IDS="+strings.Join(asIDs, " ")) @@ -617,7 +614,7 @@ func labelsForTokens(userIDToToken map[string]string) map[string]string { func asIDToRegistrationFromLabels(labels map[string]string) map[string]string { asMap := make(map[string]string) for k, v := range labels { - if strings.HasPrefix(k, "access_token_") { + if strings.HasPrefix(k, "application_service_") { asMap[strings.TrimPrefix(k, "application_service_")] = v } } From 762506725c1f2e195a9b82bcf5a60a0db1e6ff00 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 Feb 2021 22:12:14 -0600 Subject: [PATCH 17/81] WIP: Make AS tokens available to use as users --- internal/b/blueprints.go | 2 ++ internal/docker/builder.go | 19 ++++++++++++++++--- tests/msc2716_test.go | 4 +++- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index d1034c77..dd9dbe3e 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -73,6 +73,8 @@ type Room struct { type ApplicationService struct { ID string + HSToken string + ASToken string URL string SenderLocalpart string RateLimited bool diff --git a/internal/docker/builder.go b/internal/docker/builder.go index f5834d91..cbb61ad4 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -34,6 +34,7 @@ import ( client "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/go-connections/nat" + "github.com/sirupsen/logrus" "github.com/matrix-org/complement/internal/b" "github.com/matrix-org/complement/internal/config" @@ -420,8 +421,8 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ func generateASRegistrationYaml(as b.ApplicationService) string { return fmt.Sprintf("id: %s\n", as.ID) + - "hs_token: 27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e\n" + - "as_token: f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc\n" + + fmt.Sprintf("hs_token: %s\n", as.HSToken) + + fmt.Sprintf("as_token: %s\n", as.ASToken) + fmt.Sprintf("url: '%s'\n", as.URL) + fmt.Sprintf("sender_localpart: %s\n", as.SenderLocalpart) + fmt.Sprintf("rate_limited: %v\n", as.RateLimited) + @@ -532,6 +533,11 @@ func deployImage( lastErr = nil break } + + logrus.WithFields(logrus.Fields{ + "inspect.Config.Labels": inspect.Config.Labels, + }).Error("fwewfeaafewffffwewfe") + d := &HomeserverDeployment{ BaseURL: baseURL, FedBaseURL: fedBaseURL, @@ -623,9 +629,16 @@ func asIDToRegistrationFromLabels(labels map[string]string) map[string]string { func labelsForApplicationServices(asList []b.ApplicationService) map[string]string { labels := make(map[string]string) - // collect and store access tokens as labels 'access_token_$userid: $token' + // collect and store app service registrations as labels 'application_service_$as_id: $registration' + // collect and store app service access tokens as labels 'access_token_$sender_localpart: $as_token' for _, as := range asList { + // TODO: Genereate unique tokens each run + as.HSToken = "27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e" + as.ASToken = "f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc" + labels["application_service_"+as.ID] = generateASRegistrationYaml(as) + + labels["access_token_@"+as.SenderLocalpart+":hs1"] = as.ASToken } return labels } diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 9d8a6fcf..ed09f1db 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -32,7 +32,9 @@ func TestBackfillingHistory(t *testing.T) { eventA, eventB, eventC, timeAfterEventA := createMessagesInRoom(t, alice, roomID) - event1, event2, event3 := backfillMessagesAtTime(t, alice, roomID, eventA, timeAfterEventA) + asUserID := "@the-bridge-user:hs1" + as := deployment.Client(t, "hs1", asUserID) + event1, event2, event3 := backfillMessagesAtTime(t, as, roomID, eventA, timeAfterEventA) // eventStar eventStar := alice.SendEventSynced(t, roomID, b.Event{ From 7d70586dd989aed95e16cb15b811fe7ee9804ec3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 Feb 2021 19:04:48 -0600 Subject: [PATCH 18/81] Add hs domain to bridge MXID --- internal/docker/builder.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index cbb61ad4..221ee227 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -279,7 +279,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { labels := labelsForTokens(runner.AccessTokens(res.homeserver.Name)) // Combine the labels for tokens and application services - asLabels := labelsForApplicationServices(res.homeserver.ApplicationServices) + asLabels := labelsForApplicationServices(res.homeserver) for k, v := range asLabels { labels[k] = v } @@ -337,7 +337,7 @@ func (d *Builder) constructHomeserver(blueprintName string, runner *instruction. // deployBaseImage runs the base image and returns the baseURL, containerID or an error. func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, contextStr, networkID string) (*HomeserverDeployment, error) { - asIDToRegistrationMap := asIDToRegistrationFromLabels(labelsForApplicationServices(hs.ApplicationServices)) + asIDToRegistrationMap := asIDToRegistrationFromLabels(labelsForApplicationServices(hs)) return deployImage( d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs.Name, asIDToRegistrationMap, contextStr, @@ -627,18 +627,18 @@ func asIDToRegistrationFromLabels(labels map[string]string) map[string]string { return asMap } -func labelsForApplicationServices(asList []b.ApplicationService) map[string]string { +func labelsForApplicationServices(hs b.Homeserver) map[string]string { labels := make(map[string]string) // collect and store app service registrations as labels 'application_service_$as_id: $registration' // collect and store app service access tokens as labels 'access_token_$sender_localpart: $as_token' - for _, as := range asList { - // TODO: Genereate unique tokens each run + for _, as := range hs.ApplicationServices { + // TODO: Generate unique tokens on each run as.HSToken = "27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e" as.ASToken = "f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc" labels["application_service_"+as.ID] = generateASRegistrationYaml(as) - labels["access_token_@"+as.SenderLocalpart+":hs1"] = as.ASToken + labels["access_token_@"+as.SenderLocalpart+":"+hs.Name] = as.ASToken } return labels } From 9292cf8989089b88514cdab1f4a7d389a8470d67 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Sun, 21 Feb 2021 20:36:44 -0600 Subject: [PATCH 19/81] Copy AS registration to container instead of messy env variables --- dockerfiles/synapse/as-registration.yaml | 10 ---- dockerfiles/synapse/homeserver.yaml | 2 +- dockerfiles/synapse/start.sh | 7 +-- internal/docker/builder.go | 65 +++++++++++++++++++++++- tests/msc2716_test.go | 8 +-- 5 files changed, 70 insertions(+), 22 deletions(-) delete mode 100644 dockerfiles/synapse/as-registration.yaml diff --git a/dockerfiles/synapse/as-registration.yaml b/dockerfiles/synapse/as-registration.yaml deleted file mode 100644 index 9026ba3c..00000000 --- a/dockerfiles/synapse/as-registration.yaml +++ /dev/null @@ -1,10 +0,0 @@ -id: 24c97215fcec1025b19df73c574b5bf5b3d69f1648921461ac9beabaf11466b7 -hs_token: 27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e -as_token: f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc -url: 'http://localhost:9000' -sender_localpart: the-bridge-user -rate_limited: false -namespaces: - users: [] - rooms: [] - aliases: [] diff --git a/dockerfiles/synapse/homeserver.yaml b/dockerfiles/synapse/homeserver.yaml index 6aa7f192..2375efff 100644 --- a/dockerfiles/synapse/homeserver.yaml +++ b/dockerfiles/synapse/homeserver.yaml @@ -97,7 +97,7 @@ federation_rr_transactions_per_room_per_second: 9999 # A list of application service config files to use # app_service_config_files: - AS_REGISTRATION_FILES +AS_REGISTRATION_FILES ## Experimental Features ## diff --git a/dockerfiles/synapse/start.sh b/dockerfiles/synapse/start.sh index d7b2c698..2db03eef 100755 --- a/dockerfiles/synapse/start.sh +++ b/dockerfiles/synapse/start.sh @@ -6,12 +6,7 @@ sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml for as_id in $AS_REGISTRATION_IDS do - as_registration_varname=AS_REGISTRATION_${as_id} - - touch "/conf/${as_id}.yaml" - echo "${!as_registration_varname}" > "/conf/${as_id}.yaml" - - # Insert the registration file and the AS_REGISTRATION_FILES marker in order + # Insert the path to the registration file and the AS_REGISTRATION_FILES marker in order # to add other application services in the next iteration of the loop sed -i "s/AS_REGISTRATION_FILES/ - \/conf\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml done diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 221ee227..7bd8c3b4 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -14,6 +14,8 @@ package docker import ( + "archive/tar" + "bytes" "context" "errors" "fmt" @@ -408,6 +410,7 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ return nil, nil, err } } + caMount = []mount.Mount{ { Type: mount.TypeBind, @@ -419,6 +422,33 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ return caVolume, caMount, nil } +func getAppServiceVolume(docker *client.Client, ctx context.Context) (map[string]struct{}, []mount.Mount, error) { + var asVolume map[string]struct{} + + // Our application service registration is placed in the current working dir. + cwd, err := os.Getwd() + if err != nil { + return nil, nil, err + } + asDirHost := path.Join(cwd, "as") + if _, err := os.Stat(asDirHost); os.IsNotExist(err) { + err = os.Mkdir(asDirHost, 0770) + if err != nil { + return nil, nil, err + } + } + + asMount := []mount.Mount{ + { + Type: mount.TypeBind, + Source: asDirHost, + Target: "/as", + }, + } + + return asVolume, asMount, nil +} + func generateASRegistrationYaml(as b.ApplicationService) string { return fmt.Sprintf("id: %s\n", as.ID) + fmt.Sprintf("hs_token: %s\n", as.HSToken) + @@ -464,14 +494,19 @@ func deployImage( } } + // asVolume, asMount, err := getAppServiceVolume(docker, ctx) + // if err != nil { + // return nil, err + // } + env := []string{ "SERVER_NAME=" + hsName, "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), } var asIDs []string - for asID, registration := range asIDToRegistrationMap { - env = append(env, fmt.Sprintf("AS_REGISTRATION_%s=", asID)+registration) + for asID, _ := range asIDToRegistrationMap { + //env = append(env, fmt.Sprintf("AS_REGISTRATION_%s=", asID)+registration) asIDs = append(asIDs, asID) } @@ -502,7 +537,33 @@ func deployImage( if err != nil { return nil, err } + containerID := body.ID + + // Create the application service files + for asID, registration := range asIDToRegistrationMap { + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + err = tw.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/conf/%s.yaml", asID), // filename + Mode: 0777, // permissions + Size: int64(len(registration)), // filesize + }) + if err != nil { + return nil, fmt.Errorf("docker copy: %v", err) + } + tw.Write([]byte(registration)) + tw.Close() + + err := docker.CopyToContainer(context.Background(), containerID, "/", &buf, types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + + if err != nil { + return nil, err + } + } + err = docker.ContainerStart(ctx, containerID, types.ContainerStartOptions{}) if err != nil { return nil, err diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index ed09f1db..1d83ed37 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -26,14 +26,16 @@ func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) defer deployment.Destroy(t) + asUserID := "@the-bridge-user:hs1" + as := deployment.Client(t, "hs1", asUserID) + roomID := as.CreateRoom(t, struct{}{}) + userID := "@alice:hs1" alice := deployment.Client(t, "hs1", userID) - roomID := alice.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) eventA, eventB, eventC, timeAfterEventA := createMessagesInRoom(t, alice, roomID) - asUserID := "@the-bridge-user:hs1" - as := deployment.Client(t, "hs1", asUserID) event1, event2, event3 := backfillMessagesAtTime(t, as, roomID, eventA, timeAfterEventA) // eventStar From 440045968f9188416a56b76573fc04db3b575364 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Sun, 21 Feb 2021 21:41:22 -0600 Subject: [PATCH 20/81] Look for appservice registrations in the /appservices directory --- dockerfiles/synapse/start.sh | 15 +++-- internal/docker/builder.go | 120 ++++++++++++++--------------------- 2 files changed, 57 insertions(+), 78 deletions(-) diff --git a/dockerfiles/synapse/start.sh b/dockerfiles/synapse/start.sh index 2db03eef..8af7c278 100755 --- a/dockerfiles/synapse/start.sh +++ b/dockerfiles/synapse/start.sh @@ -4,16 +4,19 @@ set -e sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml -for as_id in $AS_REGISTRATION_IDS -do - # Insert the path to the registration file and the AS_REGISTRATION_FILES marker in order - # to add other application services in the next iteration of the loop - sed -i "s/AS_REGISTRATION_FILES/ - \/conf\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml +# Add the application service registration files to the homeserver.yaml config +for filename in /appservices/*.yaml; do + [ -f "$filename" ] || break + + as_id=$(basename "$filename" .yaml) + + # Insert the path to the registration file and the AS_REGISTRATION_FILES marker after + # so we can add the next application service in the next iteration of this for loop + sed -i "s/AS_REGISTRATION_FILES/ - \/appservices\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml done # Remove the AS_REGISTRATION_FILES entry sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml - # generate an ssl cert for the server, signed by our dummy CA openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ -subj "/CN=${SERVER_NAME}" diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 7bd8c3b4..69299899 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -33,10 +33,10 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" client "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/go-connections/nat" - "github.com/sirupsen/logrus" "github.com/matrix-org/complement/internal/b" "github.com/matrix-org/complement/internal/config" @@ -348,9 +348,9 @@ func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, context } // getCaVolume returns the correct mounts and volumes for providing a CA to homeserver containers. -func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{}, []mount.Mount, error) { - var caVolume map[string]struct{} - var caMount []mount.Mount +func getCaVolume(docker *client.Client, ctx context.Context) (string, mount.Mount, error) { + var caVolume string + var caMount mount.Mount if os.Getenv("CI") == "true" { // When in CI, Complement itself is a container with the CA volume mounted at /ca. @@ -361,16 +361,16 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ // /proc/1/cpuset should be /docker/ cpuset, err := ioutil.ReadFile("/proc/1/cpuset") if err != nil { - return nil, nil, err + return caVolume, caMount, err } if !strings.Contains(string(cpuset), "docker") { - return nil, nil, errors.New("Could not identify container ID using /proc/1/cpuset") + return caVolume, caMount, errors.New("Could not identify container ID using /proc/1/cpuset") } cpusetList := strings.Split(strings.TrimSpace(string(cpuset)), "/") containerId := cpusetList[len(cpusetList)-1] container, err := docker.ContainerInspect(ctx, containerId) if err != nil { - return nil, nil, err + return caVolume, caMount, err } // Get the volume that matches the destination in our complement container var volumeName string @@ -383,17 +383,13 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ // We did not find a volume. This container might be created without a volume, // or CI=true is passed but we are not running in a container. // todo: log that we do not provide a CA volume mount? - return nil, nil, nil + return caVolume, caMount, nil } else { - caVolume = map[string]struct{}{ - "/ca": {}, - } - caMount = []mount.Mount{ - { - Type: mount.TypeVolume, - Source: volumeName, - Target: "/ca", - }, + caVolume = "/ca" + caMount = mount.Mount{ + Type: mount.TypeVolume, + Source: volumeName, + Target: "/ca", } } } else { @@ -401,52 +397,39 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ // We bind mount this directory to all homeserver containers. cwd, err := os.Getwd() if err != nil { - return nil, nil, err + return caVolume, caMount, err } caCertificateDirHost := path.Join(cwd, "ca") if _, err := os.Stat(caCertificateDirHost); os.IsNotExist(err) { err = os.Mkdir(caCertificateDirHost, 0770) if err != nil { - return nil, nil, err + return caVolume, caMount, err } } - caMount = []mount.Mount{ - { - Type: mount.TypeBind, - Source: path.Join(cwd, "ca"), - Target: "/ca", - }, + caMount = mount.Mount{ + Type: mount.TypeBind, + Source: path.Join(cwd, "ca"), + Target: "/ca", } } return caVolume, caMount, nil } -func getAppServiceVolume(docker *client.Client, ctx context.Context) (map[string]struct{}, []mount.Mount, error) { - var asVolume map[string]struct{} - - // Our application service registration is placed in the current working dir. - cwd, err := os.Getwd() - if err != nil { - return nil, nil, err - } - asDirHost := path.Join(cwd, "as") - if _, err := os.Stat(asDirHost); os.IsNotExist(err) { - err = os.Mkdir(asDirHost, 0770) - if err != nil { - return nil, nil, err - } - } +func getAppServiceVolume(docker *client.Client, ctx context.Context) (string, mount.Mount, error) { + asVolume, err := docker.VolumeCreate(context.Background(), volume.VolumesCreateBody{ + //Driver: "overlay2", + DriverOpts: map[string]string{}, + Name: "appservices", + }) - asMount := []mount.Mount{ - { - Type: mount.TypeBind, - Source: asDirHost, - Target: "/as", - }, + asMount := mount.Mount{ + Type: mount.TypeVolume, + Source: asVolume.Name, + Target: "/appservices", } - return asVolume, asMount, nil + return "/appservices", asMount, err } func generateASRegistrationYaml(as b.ApplicationService) string { @@ -476,8 +459,8 @@ func deployImage( ) (*HomeserverDeployment, error) { ctx := context.Background() var extraHosts []string - var caVolume map[string]struct{} - var caMount []mount.Mount + var volumes = make(map[string]struct{}) + var mounts []mount.Mount var err error if runtime.GOOS == "linux" { @@ -488,30 +471,27 @@ func deployImage( } if os.Getenv("COMPLEMENT_CA") == "true" { - caVolume, caMount, err = getCaVolume(docker, ctx) + caVolume, caMount, err := getCaVolume(docker, ctx) if err != nil { return nil, err } + + volumes[caVolume] = struct{}{} + mounts = append(mounts, caMount) } - // asVolume, asMount, err := getAppServiceVolume(docker, ctx) - // if err != nil { - // return nil, err - // } + asVolume, asMount, err := getAppServiceVolume(docker, ctx) + if err != nil { + return nil, err + } + volumes[asVolume] = struct{}{} + mounts = append(mounts, asMount) env := []string{ "SERVER_NAME=" + hsName, "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), } - var asIDs []string - for asID, _ := range asIDToRegistrationMap { - //env = append(env, fmt.Sprintf("AS_REGISTRATION_%s=", asID)+registration) - asIDs = append(asIDs, asID) - } - - env = append(env, "AS_REGISTRATION_IDS="+strings.Join(asIDs, " ")) - body, err := docker.ContainerCreate(ctx, &container.Config{ Image: imageID, Env: env, @@ -521,11 +501,11 @@ func deployImage( "complement_blueprint": blueprintName, "complement_hs_name": hsName, }, - Volumes: caVolume, + Volumes: volumes, }, &container.HostConfig{ PublishAllPorts: true, ExtraHosts: extraHosts, - Mounts: caMount, + Mounts: mounts, }, &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ hsName: { @@ -545,12 +525,12 @@ func deployImage( var buf bytes.Buffer tw := tar.NewWriter(&buf) err = tw.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("/conf/%s.yaml", asID), // filename - Mode: 0777, // permissions - Size: int64(len(registration)), // filesize + Name: fmt.Sprintf("/appservices/%s.yaml", asID), + Mode: 0777, + Size: int64(len(registration)), }) if err != nil { - return nil, fmt.Errorf("docker copy: %v", err) + return nil, fmt.Errorf("Failed to copy regstration to container: %v", err) } tw.Write([]byte(registration)) tw.Close() @@ -595,10 +575,6 @@ func deployImage( break } - logrus.WithFields(logrus.Fields{ - "inspect.Config.Labels": inspect.Config.Labels, - }).Error("fwewfeaafewffffwewfe") - d := &HomeserverDeployment{ BaseURL: baseURL, FedBaseURL: fedBaseURL, From 606eca3f60378c980baea88e7725fec99fc54c49 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Sun, 21 Feb 2021 23:17:18 -0600 Subject: [PATCH 21/81] Generate tokens on every run --- internal/b/blueprints.go | 28 ++++++++++++++++++++++++++++ internal/docker/builder.go | 5 +---- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index dd9dbe3e..d737888b 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -15,9 +15,13 @@ package b import ( + "crypto/rand" + "encoding/hex" "fmt" "strconv" "strings" + + "github.com/sirupsen/logrus" ) // KnownBlueprints lists static blueprints @@ -121,7 +125,31 @@ func Validate(bp Blueprint) (Blueprint, error) { return bp, err } } + + for i, as := range hs.ApplicationServices { + hsToken := make([]byte, 32) + _, err := rand.Read(hsToken) + if err != nil { + return bp, err + } + + asToken := make([]byte, 32) + _, err = rand.Read(asToken) + if err != nil { + return bp, err + } + + as.HSToken = hex.EncodeToString(hsToken) + as.ASToken = hex.EncodeToString(asToken) + + hs.ApplicationServices[i] = as + } } + + logrus.WithFields(logrus.Fields{ + "bp": bp.Homeservers[0].ApplicationServices, + }).Error("after modfiying bp") + return bp, nil } diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 69299899..b5bb42cc 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -310,6 +310,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { func (d *Builder) constructHomeserver(blueprintName string, runner *instruction.Runner, hs b.Homeserver, networkID string) result { contextStr := fmt.Sprintf("%s.%s", blueprintName, hs.Name) d.log("%s : constructing homeserver...\n", contextStr) + dep, err := d.deployBaseImage(blueprintName, hs, contextStr, networkID) if err != nil { log.Printf("%s : failed to deployBaseImage: %s\n", contextStr, err) @@ -669,10 +670,6 @@ func labelsForApplicationServices(hs b.Homeserver) map[string]string { // collect and store app service registrations as labels 'application_service_$as_id: $registration' // collect and store app service access tokens as labels 'access_token_$sender_localpart: $as_token' for _, as := range hs.ApplicationServices { - // TODO: Generate unique tokens on each run - as.HSToken = "27562ff25dd2eb69361ac1eb67e3a3cd38ab9509c1483234ec8dfec0f247c73e" - as.ASToken = "f872531e387377686989e792c723e646f7823643e747a0521e94770a721f40fc" - labels["application_service_"+as.ID] = generateASRegistrationYaml(as) labels["access_token_@"+as.SenderLocalpart+":"+hs.Name] = as.ASToken From 2a8ccf15a5795bb7d3e199f7d8fc08fd63edd4a5 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Sun, 21 Feb 2021 23:21:58 -0600 Subject: [PATCH 22/81] Refactor to use normalize func --- internal/b/blueprints.go | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index d737888b..48223b0b 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -125,24 +125,11 @@ func Validate(bp Blueprint) (Blueprint, error) { return bp, err } } - for i, as := range hs.ApplicationServices { - hsToken := make([]byte, 32) - _, err := rand.Read(hsToken) - if err != nil { - return bp, err - } - - asToken := make([]byte, 32) - _, err = rand.Read(asToken) + hs.ApplicationServices[i], err = normalizeApplicationService(as) if err != nil { return bp, err } - - as.HSToken = hex.EncodeToString(hsToken) - as.ASToken = hex.EncodeToString(asToken) - - hs.ApplicationServices[i] = as } } @@ -194,6 +181,25 @@ func normaliseUser(u string, hsName string) (string, error) { return u, nil } +func normalizeApplicationService(as ApplicationService) (ApplicationService, error) { + hsToken := make([]byte, 32) + _, err := rand.Read(hsToken) + if err != nil { + return as, err + } + + asToken := make([]byte, 32) + _, err = rand.Read(asToken) + if err != nil { + return as, err + } + + as.HSToken = hex.EncodeToString(hsToken) + as.ASToken = hex.EncodeToString(asToken) + + return as, err +} + // Ptr returns a pointer to `in`, because Go doesn't allow you to inline this. func Ptr(in string) *string { return &in From 1dd26ef3d2561b6d70f0b10208c8a794c17de066 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 00:13:18 -0600 Subject: [PATCH 23/81] Remove unused function --- internal/docker/builder.go | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index b5bb42cc..8584612f 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -446,15 +446,6 @@ func generateASRegistrationYaml(as b.ApplicationService) string { " aliases: []\n" } -func idsFromApplicationServices(asList []b.ApplicationService) []string { - ids := make([]string, len(asList)) - for i, as := range asList { - ids[i] = as.ID - } - - return ids -} - func deployImage( docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName string, asIDToRegistrationMap map[string]string, contextStr, networkID string, versionCheckIterations int, ) (*HomeserverDeployment, error) { @@ -536,10 +527,9 @@ func deployImage( tw.Write([]byte(registration)) tw.Close() - err := docker.CopyToContainer(context.Background(), containerID, "/", &buf, types.CopyToContainerOptions{ + err = docker.CopyToContainer(context.Background(), containerID, "/", &buf, types.CopyToContainerOptions{ AllowOverwriteDirWithFile: false, }) - if err != nil { return nil, err } From 4f96f440fee133b4b44f226a5c8013f83d8637d3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 00:56:30 -0600 Subject: [PATCH 24/81] Fix lint --- internal/docker/builder.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 8584612f..07362811 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -463,7 +463,9 @@ func deployImage( } if os.Getenv("COMPLEMENT_CA") == "true" { - caVolume, caMount, err := getCaVolume(docker, ctx) + var caVolume string + var caMount mount.Mount + caVolume, caMount, err = getCaVolume(docker, ctx) if err != nil { return nil, err } From 7f8c5c1637a62c40768456b54175af6ae0a3e89a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 01:00:24 -0600 Subject: [PATCH 25/81] Revert some newline changes --- internal/docker/builder.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 07362811..619ad653 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -262,7 +262,6 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { printLogs(d.Docker, res.containerID, res.contextStr) } } - // kill the container defer func(r result) { killErr := d.Docker.ContainerKill(context.Background(), r.containerID, "KILL") @@ -310,7 +309,6 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { func (d *Builder) constructHomeserver(blueprintName string, runner *instruction.Runner, hs b.Homeserver, networkID string) result { contextStr := fmt.Sprintf("%s.%s", blueprintName, hs.Name) d.log("%s : constructing homeserver...\n", contextStr) - dep, err := d.deployBaseImage(blueprintName, hs, contextStr, networkID) if err != nil { log.Printf("%s : failed to deployBaseImage: %s\n", contextStr, err) From cf4d8e99ea074bd2540fa3e1843724d6306af8c2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 01:58:40 -0600 Subject: [PATCH 26/81] Add application service support to blueprints Split out from https://github.com/matrix-org/complement/pull/68 --- dockerfiles/synapse/homeserver.yaml | 7 + dockerfiles/synapse/start.sh | 15 +- internal/b/blueprints.go | 56 ++++++- internal/b/hs_with_application_service.go | 25 ++++ internal/docker/builder.go | 175 +++++++++++++++++----- internal/docker/deployer.go | 4 +- internal/docker/deployment.go | 16 +- 7 files changed, 250 insertions(+), 48 deletions(-) create mode 100644 internal/b/hs_with_application_service.go diff --git a/dockerfiles/synapse/homeserver.yaml b/dockerfiles/synapse/homeserver.yaml index a2f6f309..9fd6c97a 100644 --- a/dockerfiles/synapse/homeserver.yaml +++ b/dockerfiles/synapse/homeserver.yaml @@ -92,6 +92,13 @@ rc_joins: federation_rr_transactions_per_room_per_second: 9999 +## API Configuration ## + +# A list of application service config files to use +# +app_service_config_files: +AS_REGISTRATION_FILES + ## Experimental Features ## experimental_features: diff --git a/dockerfiles/synapse/start.sh b/dockerfiles/synapse/start.sh index 6e35b4dd..8af7c278 100755 --- a/dockerfiles/synapse/start.sh +++ b/dockerfiles/synapse/start.sh @@ -1,9 +1,22 @@ -#!/bin/sh +#!/bin/bash set -e sed -i "s/SERVER_NAME/${SERVER_NAME}/g" /conf/homeserver.yaml +# Add the application service registration files to the homeserver.yaml config +for filename in /appservices/*.yaml; do + [ -f "$filename" ] || break + + as_id=$(basename "$filename" .yaml) + + # Insert the path to the registration file and the AS_REGISTRATION_FILES marker after + # so we can add the next application service in the next iteration of this for loop + sed -i "s/AS_REGISTRATION_FILES/ - \/appservices\/${as_id}.yaml\nAS_REGISTRATION_FILES/g" /conf/homeserver.yaml +done +# Remove the AS_REGISTRATION_FILES entry +sed -i "s/AS_REGISTRATION_FILES//g" /conf/homeserver.yaml + # generate an ssl cert for the server, signed by our dummy CA openssl req -new -key /conf/server.tls.key -out /conf/server.tls.csr \ -subj "/CN=${SERVER_NAME}" diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index ef8424b4..48223b0b 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -15,9 +15,13 @@ package b import ( + "crypto/rand" + "encoding/hex" "fmt" "strconv" "strings" + + "github.com/sirupsen/logrus" ) // KnownBlueprints lists static blueprints @@ -26,6 +30,7 @@ var KnownBlueprints = map[string]*Blueprint{ BlueprintAlice.Name: &BlueprintAlice, BlueprintFederationOneToOneRoom.Name: &BlueprintFederationOneToOneRoom, BlueprintFederationTwoLocalOneRemote.Name: &BlueprintFederationTwoLocalOneRemote, + BlueprintHSWithApplicationService.Name: &BlueprintHSWithApplicationService, BlueprintOneToOneRoom.Name: &BlueprintOneToOneRoom, BlueprintPerfManyMessages.Name: &BlueprintPerfManyMessages, BlueprintPerfManyRooms.Name: &BlueprintPerfManyRooms, @@ -46,6 +51,8 @@ type Homeserver struct { Users []User // The list of rooms to create on this homeserver Rooms []Room + // The list of application services to create on the homeserver + ApplicationServices []ApplicationService } type User struct { @@ -68,11 +75,22 @@ type Room struct { Events []Event } +type ApplicationService struct { + ID string + HSToken string + ASToken string + URL string + SenderLocalpart string + RateLimited bool +} + type Event struct { - Type string - Sender string - StateKey *string - Content map[string]interface{} + Type string + Sender string + OriginServerTS uint64 + StateKey *string + PrevEvents []string + Content map[string]interface{} // This field is ignored in blueprints as clients are unable to set it. Used with federation.Server Unsigned map[string]interface{} } @@ -107,7 +125,18 @@ func Validate(bp Blueprint) (Blueprint, error) { return bp, err } } + for i, as := range hs.ApplicationServices { + hs.ApplicationServices[i], err = normalizeApplicationService(as) + if err != nil { + return bp, err + } + } } + + logrus.WithFields(logrus.Fields{ + "bp": bp.Homeservers[0].ApplicationServices, + }).Error("after modfiying bp") + return bp, nil } @@ -152,6 +181,25 @@ func normaliseUser(u string, hsName string) (string, error) { return u, nil } +func normalizeApplicationService(as ApplicationService) (ApplicationService, error) { + hsToken := make([]byte, 32) + _, err := rand.Read(hsToken) + if err != nil { + return as, err + } + + asToken := make([]byte, 32) + _, err = rand.Read(asToken) + if err != nil { + return as, err + } + + as.HSToken = hex.EncodeToString(hsToken) + as.ASToken = hex.EncodeToString(asToken) + + return as, err +} + // Ptr returns a pointer to `in`, because Go doesn't allow you to inline this. func Ptr(in string) *string { return &in diff --git a/internal/b/hs_with_application_service.go b/internal/b/hs_with_application_service.go new file mode 100644 index 00000000..ba9db923 --- /dev/null +++ b/internal/b/hs_with_application_service.go @@ -0,0 +1,25 @@ +package b + +// BlueprintHSWithApplicationService who has an application service to interact with +var BlueprintHSWithApplicationService = MustValidate(Blueprint{ + Name: "alice", + Homeservers: []Homeserver{ + { + Name: "hs1", + Users: []User{ + { + Localpart: "@alice", + DisplayName: "Alice", + }, + }, + ApplicationServices: []ApplicationService{ + { + ID: "my_as_id", + URL: "http://localhost:9000", + SenderLocalpart: "the-bridge-user", + RateLimited: false, + }, + }, + }, + }, +}) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index a8b45ce6..619ad653 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -14,6 +14,8 @@ package docker import ( + "archive/tar" + "bytes" "context" "errors" "fmt" @@ -31,6 +33,7 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/volume" client "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" "github.com/docker/go-connections/nat" @@ -276,6 +279,12 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { } labels := labelsForTokens(runner.AccessTokens(res.homeserver.Name)) + // Combine the labels for tokens and application services + asLabels := labelsForApplicationServices(res.homeserver) + for k, v := range asLabels { + labels[k] = v + } + // commit the container commit, err := d.Docker.ContainerCommit(context.Background(), res.containerID, types.ContainerCommitOptions{ Author: "Complement", @@ -300,7 +309,7 @@ func (d *Builder) construct(bprint b.Blueprint) (errs []error) { func (d *Builder) constructHomeserver(blueprintName string, runner *instruction.Runner, hs b.Homeserver, networkID string) result { contextStr := fmt.Sprintf("%s.%s", blueprintName, hs.Name) d.log("%s : constructing homeserver...\n", contextStr) - dep, err := d.deployBaseImage(blueprintName, hs.Name, contextStr, networkID) + dep, err := d.deployBaseImage(blueprintName, hs, contextStr, networkID) if err != nil { log.Printf("%s : failed to deployBaseImage: %s\n", contextStr, err) containerID := "" @@ -328,17 +337,19 @@ func (d *Builder) constructHomeserver(blueprintName string, runner *instruction. } // deployBaseImage runs the base image and returns the baseURL, containerID or an error. -func (d *Builder) deployBaseImage(blueprintName, hsName, contextStr, networkID string) (*HomeserverDeployment, error) { +func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, contextStr, networkID string) (*HomeserverDeployment, error) { + asIDToRegistrationMap := asIDToRegistrationFromLabels(labelsForApplicationServices(hs)) + return deployImage( - d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hsName, contextStr, + d.Docker, d.BaseImage, d.CSAPIPort, fmt.Sprintf("complement_%s", contextStr), blueprintName, hs.Name, asIDToRegistrationMap, contextStr, networkID, d.config.VersionCheckIterations, ) } // getCaVolume returns the correct mounts and volumes for providing a CA to homeserver containers. -func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{}, []mount.Mount, error) { - var caVolume map[string]struct{} - var caMount []mount.Mount +func getCaVolume(docker *client.Client, ctx context.Context) (string, mount.Mount, error) { + var caVolume string + var caMount mount.Mount if os.Getenv("CI") == "true" { // When in CI, Complement itself is a container with the CA volume mounted at /ca. @@ -349,16 +360,16 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ // /proc/1/cpuset should be /docker/ cpuset, err := ioutil.ReadFile("/proc/1/cpuset") if err != nil { - return nil, nil, err + return caVolume, caMount, err } if !strings.Contains(string(cpuset), "docker") { - return nil, nil, errors.New("Could not identify container ID using /proc/1/cpuset") + return caVolume, caMount, errors.New("Could not identify container ID using /proc/1/cpuset") } cpusetList := strings.Split(strings.TrimSpace(string(cpuset)), "/") containerId := cpusetList[len(cpusetList)-1] container, err := docker.ContainerInspect(ctx, containerId) if err != nil { - return nil, nil, err + return caVolume, caMount, err } // Get the volume that matches the destination in our complement container var volumeName string @@ -371,17 +382,13 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ // We did not find a volume. This container might be created without a volume, // or CI=true is passed but we are not running in a container. // todo: log that we do not provide a CA volume mount? - return nil, nil, nil + return caVolume, caMount, nil } else { - caVolume = map[string]struct{}{ - "/ca": {}, - } - caMount = []mount.Mount{ - { - Type: mount.TypeVolume, - Source: volumeName, - Target: "/ca", - }, + caVolume = "/ca" + caMount = mount.Mount{ + Type: mount.TypeVolume, + Source: volumeName, + Target: "/ca", } } } else { @@ -389,33 +396,61 @@ func getCaVolume(docker *client.Client, ctx context.Context) (map[string]struct{ // We bind mount this directory to all homeserver containers. cwd, err := os.Getwd() if err != nil { - return nil, nil, err + return caVolume, caMount, err } caCertificateDirHost := path.Join(cwd, "ca") if _, err := os.Stat(caCertificateDirHost); os.IsNotExist(err) { err = os.Mkdir(caCertificateDirHost, 0770) if err != nil { - return nil, nil, err + return caVolume, caMount, err } } - caMount = []mount.Mount{ - { - Type: mount.TypeBind, - Source: path.Join(cwd, "ca"), - Target: "/ca", - }, + + caMount = mount.Mount{ + Type: mount.TypeBind, + Source: path.Join(cwd, "ca"), + Target: "/ca", } } return caVolume, caMount, nil } +func getAppServiceVolume(docker *client.Client, ctx context.Context) (string, mount.Mount, error) { + asVolume, err := docker.VolumeCreate(context.Background(), volume.VolumesCreateBody{ + //Driver: "overlay2", + DriverOpts: map[string]string{}, + Name: "appservices", + }) + + asMount := mount.Mount{ + Type: mount.TypeVolume, + Source: asVolume.Name, + Target: "/appservices", + } + + return "/appservices", asMount, err +} + +func generateASRegistrationYaml(as b.ApplicationService) string { + return fmt.Sprintf("id: %s\n", as.ID) + + fmt.Sprintf("hs_token: %s\n", as.HSToken) + + fmt.Sprintf("as_token: %s\n", as.ASToken) + + fmt.Sprintf("url: '%s'\n", as.URL) + + fmt.Sprintf("sender_localpart: %s\n", as.SenderLocalpart) + + fmt.Sprintf("rate_limited: %v\n", as.RateLimited) + + "namespaces:\n" + + " users: []\n" + + " rooms: []\n" + + " aliases: []\n" +} + func deployImage( - docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName, contextStr, networkID string, versionCheckIterations int, + docker *client.Client, imageID string, csPort int, containerName, blueprintName, hsName string, asIDToRegistrationMap map[string]string, contextStr, networkID string, versionCheckIterations int, ) (*HomeserverDeployment, error) { ctx := context.Background() var extraHosts []string - var caVolume map[string]struct{} - var caMount []mount.Mount + var volumes = make(map[string]struct{}) + var mounts []mount.Mount var err error if runtime.GOOS == "linux" { @@ -426,26 +461,43 @@ func deployImage( } if os.Getenv("COMPLEMENT_CA") == "true" { + var caVolume string + var caMount mount.Mount caVolume, caMount, err = getCaVolume(docker, ctx) if err != nil { return nil, err } + + volumes[caVolume] = struct{}{} + mounts = append(mounts, caMount) + } + + asVolume, asMount, err := getAppServiceVolume(docker, ctx) + if err != nil { + return nil, err + } + volumes[asVolume] = struct{}{} + mounts = append(mounts, asMount) + + env := []string{ + "SERVER_NAME=" + hsName, + "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA"), } body, err := docker.ContainerCreate(ctx, &container.Config{ Image: imageID, - Env: []string{"SERVER_NAME=" + hsName, "COMPLEMENT_CA=" + os.Getenv("COMPLEMENT_CA")}, + Env: env, //Cmd: d.ImageArgs, Labels: map[string]string{ complementLabel: contextStr, "complement_blueprint": blueprintName, "complement_hs_name": hsName, }, - Volumes: caVolume, + Volumes: volumes, }, &container.HostConfig{ PublishAllPorts: true, ExtraHosts: extraHosts, - Mounts: caMount, + Mounts: mounts, }, &network.NetworkingConfig{ EndpointsConfig: map[string]*network.EndpointSettings{ hsName: { @@ -457,7 +509,32 @@ func deployImage( if err != nil { return nil, err } + containerID := body.ID + + // Create the application service files + for asID, registration := range asIDToRegistrationMap { + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + err = tw.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("/appservices/%s.yaml", asID), + Mode: 0777, + Size: int64(len(registration)), + }) + if err != nil { + return nil, fmt.Errorf("Failed to copy regstration to container: %v", err) + } + tw.Write([]byte(registration)) + tw.Close() + + err = docker.CopyToContainer(context.Background(), containerID, "/", &buf, types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + return nil, err + } + } + err = docker.ContainerStart(ctx, containerID, types.ContainerStartOptions{}) if err != nil { return nil, err @@ -488,11 +565,13 @@ func deployImage( lastErr = nil break } + d := &HomeserverDeployment{ - BaseURL: baseURL, - FedBaseURL: fedBaseURL, - ContainerID: containerID, - AccessTokens: tokensFromLabels(inspect.Config.Labels), + BaseURL: baseURL, + FedBaseURL: fedBaseURL, + ContainerID: containerID, + AccessTokens: tokensFromLabels(inspect.Config.Labels), + ApplicationServices: asIDToRegistrationFromLabels(inspect.Config.Labels), } if lastErr != nil { return d, fmt.Errorf("%s: failed to check server is up. %w", contextStr, lastErr) @@ -566,6 +645,28 @@ func labelsForTokens(userIDToToken map[string]string) map[string]string { return labels } +func asIDToRegistrationFromLabels(labels map[string]string) map[string]string { + asMap := make(map[string]string) + for k, v := range labels { + if strings.HasPrefix(k, "application_service_") { + asMap[strings.TrimPrefix(k, "application_service_")] = v + } + } + return asMap +} + +func labelsForApplicationServices(hs b.Homeserver) map[string]string { + labels := make(map[string]string) + // collect and store app service registrations as labels 'application_service_$as_id: $registration' + // collect and store app service access tokens as labels 'access_token_$sender_localpart: $as_token' + for _, as := range hs.ApplicationServices { + labels["application_service_"+as.ID] = generateASRegistrationYaml(as) + + labels["access_token_@"+as.SenderLocalpart+":"+hs.Name] = as.ASToken + } + return labels +} + func endpoints(p nat.PortMap, csPort, ssPort int) (baseURL, fedBaseURL string, err error) { csapiPort := fmt.Sprintf("%d/tcp", csPort) csapiPortInfo, ok := p[nat.Port(csapiPort)] diff --git a/internal/docker/deployer.go b/internal/docker/deployer.go index ca473135..b9111848 100644 --- a/internal/docker/deployer.go +++ b/internal/docker/deployer.go @@ -81,10 +81,12 @@ func (d *Deployer) Deploy(ctx context.Context, blueprintName string) (*Deploymen d.Counter++ contextStr := img.Labels["complement_context"] hsName := img.Labels["complement_hs_name"] + asIDToRegistrationMap := asIDToRegistrationFromLabels(img.Labels) + // TODO: Make CSAPI port configurable deployment, err := deployImage( d.Docker, img.ID, 8008, fmt.Sprintf("complement_%s_%s_%d", d.Namespace, contextStr, d.Counter), - blueprintName, hsName, contextStr, networkID, d.config.VersionCheckIterations) + blueprintName, hsName, asIDToRegistrationMap, contextStr, networkID, d.config.VersionCheckIterations) if err != nil { if deployment != nil && deployment.ContainerID != "" { // print logs to help debug diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index 672a4d59..5aa6c809 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -20,17 +20,23 @@ type Deployment struct { // HomeserverDeployment represents a running homeserver in a container. type HomeserverDeployment struct { - BaseURL string // e.g http://localhost:38646 - FedBaseURL string // e.g https://localhost:48373 - ContainerID string // e.g 10de45efba - AccessTokens map[string]string // e.g { "@alice:hs1": "myAcc3ssT0ken" } + BaseURL string // e.g http://localhost:38646 + FedBaseURL string // e.g https://localhost:48373 + ContainerID string // e.g 10de45efba + AccessTokens map[string]string // e.g { "@alice:hs1": "myAcc3ssT0ken" } + ApplicationServices map[string]string // e.g { "my-as-id": "id: xxx\nas_token: xxx ..."} } } // Destroy the entire deployment. Destroys all running containers. If `printServerLogs` is true, // will print container logs before killing the container. func (d *Deployment) Destroy(t *testing.T) { t.Helper() - d.Deployer.Destroy(d, t.Failed()) + d.Deployer.Destroy( + d, + // TODO: Revert this back to `t.Failed()`. + // I did this so I can always see the homersever logs regardless of outcome + true, + ) } // Client returns a CSAPI client targeting the given hsName, using the access token for the given userID. From 614c66ed284eefad366cd1a9da5a44f7b576aa5f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 02:10:34 -0600 Subject: [PATCH 27/81] Revert always showing logs --- internal/docker/deployment.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index 5aa6c809..489b69e2 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -31,12 +31,7 @@ type HomeserverDeployment struct { // will print container logs before killing the container. func (d *Deployment) Destroy(t *testing.T) { t.Helper() - d.Deployer.Destroy( - d, - // TODO: Revert this back to `t.Failed()`. - // I did this so I can always see the homersever logs regardless of outcome - true, - ) + d.Deployer.Destroy(d, t.Failed()) } // Client returns a CSAPI client targeting the given hsName, using the access token for the given userID. From ccb20e69cbf177476ed056bf0cf12d43fa7239fc Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 02:10:34 -0600 Subject: [PATCH 28/81] Revert always showing logs --- internal/docker/deployment.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index 5aa6c809..489b69e2 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -31,12 +31,7 @@ type HomeserverDeployment struct { // will print container logs before killing the container. func (d *Deployment) Destroy(t *testing.T) { t.Helper() - d.Deployer.Destroy( - d, - // TODO: Revert this back to `t.Failed()`. - // I did this so I can always see the homersever logs regardless of outcome - true, - ) + d.Deployer.Destroy(d, t.Failed()) } // Client returns a CSAPI client targeting the given hsName, using the access token for the given userID. From 12e6ec0b8919c8cbb0e950219147522c57f85a72 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 02:12:13 -0600 Subject: [PATCH 29/81] Add comment doc --- internal/docker/builder.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 619ad653..3bec6e35 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -415,6 +415,8 @@ func getCaVolume(docker *client.Client, ctx context.Context) (string, mount.Moun return caVolume, caMount, nil } +// getAppServiceVolume returns the correct mounts and volumes for providing the `/appservice` directory to homeserver containers +// containing application service registration files to be used by the homeserver func getAppServiceVolume(docker *client.Client, ctx context.Context) (string, mount.Mount, error) { asVolume, err := docker.VolumeCreate(context.Background(), volume.VolumesCreateBody{ //Driver: "overlay2", From 1b19990ca31bfe1a6112fd94e43660ec02dcdc84 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 15:54:47 -0600 Subject: [PATCH 30/81] Some nits and remove the volume paths - Seems like the `Volumes` syntax is to create an anonymous volume, https://stackoverflow.com/a/58916037/796832 - And lots of people not knowing what `Volumes` syntax is or what to do. Seems like Mounts is the thing to use - https://github.com/fsouza/go-dockerclient/issues/155 - https://stackoverflow.com/questions/55718603/golang-docker-library-mounting-host-directory-volumes - https://stackoverflow.com/questions/48470194/defining-a-mount-point-for-volumes-in-golang-docker-sdk --- build/scripts/find-lint.sh | 5 +++- internal/b/blueprints.go | 6 ---- internal/docker/builder.go | 58 ++++++++++++++++---------------------- 3 files changed, 28 insertions(+), 41 deletions(-) diff --git a/build/scripts/find-lint.sh b/build/scripts/find-lint.sh index 55f81ed9..a54b8bc2 100755 --- a/build/scripts/find-lint.sh +++ b/build/scripts/find-lint.sh @@ -19,7 +19,10 @@ if [ ${1:-""} = "fast" ] then args="--fast" fi -if [[ -v COMPLEMENT_LINT_CONCURRENCY ]]; then +if [ -z ${COMPLEMENT_LINT_CONCURRENCY+x} ]; then + # COMPLEMENT_LINT_CONCURRENCY was not set + : +else args="${args} --concurrency $COMPLEMENT_LINT_CONCURRENCY" fi diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index 48223b0b..94cff59d 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -20,8 +20,6 @@ import ( "fmt" "strconv" "strings" - - "github.com/sirupsen/logrus" ) // KnownBlueprints lists static blueprints @@ -133,10 +131,6 @@ func Validate(bp Blueprint) (Blueprint, error) { } } - logrus.WithFields(logrus.Fields{ - "bp": bp.Homeservers[0].ApplicationServices, - }).Error("after modfiying bp") - return bp, nil } diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 3bec6e35..c57bd347 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -347,29 +347,27 @@ func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, context } // getCaVolume returns the correct mounts and volumes for providing a CA to homeserver containers. -func getCaVolume(docker *client.Client, ctx context.Context) (string, mount.Mount, error) { - var caVolume string - var caMount mount.Mount - +// Returns the +func getCaVolume(ctx context.Context, docker *client.Client) (caMount mount.Mount, err error) { if os.Getenv("CI") == "true" { // When in CI, Complement itself is a container with the CA volume mounted at /ca. // We need to mount this volume to all homeserver containers to synchronize the CA cert. // This is needed to establish trust among all containers. // Get volume mounted at /ca. First we get the container ID - // /proc/1/cpuset should be /docker/ + // /proc/1/cpuset should be /docker/ cpuset, err := ioutil.ReadFile("/proc/1/cpuset") if err != nil { - return caVolume, caMount, err + return caMount, err } if !strings.Contains(string(cpuset), "docker") { - return caVolume, caMount, errors.New("Could not identify container ID using /proc/1/cpuset") + return caMount, errors.New("Could not identify container ID using /proc/1/cpuset") } cpusetList := strings.Split(strings.TrimSpace(string(cpuset)), "/") - containerId := cpusetList[len(cpusetList)-1] - container, err := docker.ContainerInspect(ctx, containerId) + containerID := cpusetList[len(cpusetList)-1] + container, err := docker.ContainerInspect(ctx, containerID) if err != nil { - return caVolume, caMount, err + return caMount, err } // Get the volume that matches the destination in our complement container var volumeName string @@ -382,27 +380,26 @@ func getCaVolume(docker *client.Client, ctx context.Context) (string, mount.Moun // We did not find a volume. This container might be created without a volume, // or CI=true is passed but we are not running in a container. // todo: log that we do not provide a CA volume mount? - return caVolume, caMount, nil - } else { - caVolume = "/ca" - caMount = mount.Mount{ - Type: mount.TypeVolume, - Source: volumeName, - Target: "/ca", - } + return caMount, nil + } + + caMount = mount.Mount{ + Type: mount.TypeVolume, + Source: volumeName, + Target: "/ca", } } else { // When not in CI, our CA cert is placed in the current working dir. // We bind mount this directory to all homeserver containers. cwd, err := os.Getwd() if err != nil { - return caVolume, caMount, err + return caMount, err } caCertificateDirHost := path.Join(cwd, "ca") if _, err := os.Stat(caCertificateDirHost); os.IsNotExist(err) { err = os.Mkdir(caCertificateDirHost, 0770) if err != nil { - return caVolume, caMount, err + return caMount, err } } @@ -412,25 +409,23 @@ func getCaVolume(docker *client.Client, ctx context.Context) (string, mount.Moun Target: "/ca", } } - return caVolume, caMount, nil + return caMount, nil } // getAppServiceVolume returns the correct mounts and volumes for providing the `/appservice` directory to homeserver containers // containing application service registration files to be used by the homeserver -func getAppServiceVolume(docker *client.Client, ctx context.Context) (string, mount.Mount, error) { +func getAppServiceVolume(ctx context.Context, docker *client.Client) (asMount mount.Mount, err error) { asVolume, err := docker.VolumeCreate(context.Background(), volume.VolumesCreateBody{ - //Driver: "overlay2", - DriverOpts: map[string]string{}, - Name: "appservices", + Name: "appservices", }) - asMount := mount.Mount{ + asMount = mount.Mount{ Type: mount.TypeVolume, Source: asVolume.Name, Target: "/appservices", } - return "/appservices", asMount, err + return asMount, err } func generateASRegistrationYaml(as b.ApplicationService) string { @@ -451,7 +446,6 @@ func deployImage( ) (*HomeserverDeployment, error) { ctx := context.Background() var extraHosts []string - var volumes = make(map[string]struct{}) var mounts []mount.Mount var err error @@ -463,22 +457,19 @@ func deployImage( } if os.Getenv("COMPLEMENT_CA") == "true" { - var caVolume string var caMount mount.Mount - caVolume, caMount, err = getCaVolume(docker, ctx) + caMount, err = getCaVolume(ctx, docker) if err != nil { return nil, err } - volumes[caVolume] = struct{}{} mounts = append(mounts, caMount) } - asVolume, asMount, err := getAppServiceVolume(docker, ctx) + asMount, err := getAppServiceVolume(ctx, docker) if err != nil { return nil, err } - volumes[asVolume] = struct{}{} mounts = append(mounts, asMount) env := []string{ @@ -495,7 +486,6 @@ func deployImage( "complement_blueprint": blueprintName, "complement_hs_name": hsName, }, - Volumes: volumes, }, &container.HostConfig{ PublishAllPorts: true, ExtraHosts: extraHosts, From c1f07c280642401a304a8f4bedee59ad5d065c8e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 17:20:55 -0600 Subject: [PATCH 31/81] Address review and add comment docs --- internal/b/blueprints.go | 10 ++++------ internal/docker/builder.go | 19 ++++++++++++++----- 2 files changed, 18 insertions(+), 11 deletions(-) diff --git a/internal/b/blueprints.go b/internal/b/blueprints.go index 94cff59d..b010daae 100644 --- a/internal/b/blueprints.go +++ b/internal/b/blueprints.go @@ -83,12 +83,10 @@ type ApplicationService struct { } type Event struct { - Type string - Sender string - OriginServerTS uint64 - StateKey *string - PrevEvents []string - Content map[string]interface{} + Type string + Sender string + StateKey *string + Content map[string]interface{} // This field is ignored in blueprints as clients are unable to set it. Used with federation.Server Unsigned map[string]interface{} } diff --git a/internal/docker/builder.go b/internal/docker/builder.go index c57bd347..8890f176 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -39,6 +39,7 @@ import ( "github.com/docker/go-connections/nat" "github.com/matrix-org/complement/internal/b" + internalClient "github.com/matrix-org/complement/internal/client" "github.com/matrix-org/complement/internal/config" "github.com/matrix-org/complement/internal/instruction" ) @@ -346,8 +347,9 @@ func (d *Builder) deployBaseImage(blueprintName string, hs b.Homeserver, context ) } -// getCaVolume returns the correct mounts and volumes for providing a CA to homeserver containers. -// Returns the +// getCaVolume returns the correct volume mount for providing a CA to homeserver containers. +// If running CI, returns an error if it's unable to find a volume that has /ca +// Otherwise, returns an error if we're unable to find the /ca directory on the local host func getCaVolume(ctx context.Context, docker *client.Client) (caMount mount.Mount, err error) { if os.Getenv("CI") == "true" { // When in CI, Complement itself is a container with the CA volume mounted at /ca. @@ -412,12 +414,16 @@ func getCaVolume(ctx context.Context, docker *client.Client) (caMount mount.Moun return caMount, nil } -// getAppServiceVolume returns the correct mounts and volumes for providing the `/appservice` directory to homeserver containers -// containing application service registration files to be used by the homeserver +// getAppServiceVolume returns a volume mount for providing the `/appservice` directory to homeserver containers. +// This directory will contain application service registration config files. +// Returns an error if the volume failed to create func getAppServiceVolume(ctx context.Context, docker *client.Client) (asMount mount.Mount, err error) { asVolume, err := docker.VolumeCreate(context.Background(), volume.VolumesCreateBody{ Name: "appservices", }) + if err != nil { + return asMount, err + } asMount = mount.Mount{ Type: mount.TypeVolume, @@ -506,10 +512,12 @@ func deployImage( // Create the application service files for asID, registration := range asIDToRegistrationMap { + // Create a fake/virtual file in memory that we can copy to the container + // via https://stackoverflow.com/a/52131297/796832 var buf bytes.Buffer tw := tar.NewWriter(&buf) err = tw.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("/appservices/%s.yaml", asID), + Name: fmt.Sprintf("/appservices/%s.yaml", internalClient.GjsonEscape(asID)), Mode: 0777, Size: int64(len(registration)), }) @@ -519,6 +527,7 @@ func deployImage( tw.Write([]byte(registration)) tw.Close() + // Put our new fake file in the container volume err = docker.CopyToContainer(context.Background(), containerID, "/", &buf, types.CopyToContainerOptions{ AllowOverwriteDirWithFile: false, }) From 2f78441329f5f60fa230762448f0ce4d259827de Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 17:34:50 -0600 Subject: [PATCH 32/81] Refactor so our custom event type is contained within our MSC test file --- tests/msc2716_test.go | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 1d83ed37..4656170a 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -21,6 +21,15 @@ import ( "github.com/tidwall/gjson" ) +type event struct { + Type string + Sender string + OriginServerTS uint64 + StateKey *string + PrevEvents []string + Content map[string]interface{} +} + // Test that the message events we insert between A and B come back in the correct order from /messages func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) @@ -88,7 +97,7 @@ func TestBackfillingHistory(t *testing.T) { insertOriginServerTs := uint64(timeAfterEventA.UnixNano() / 1000000) // If we see this message in the /sync, then something went wrong - event1 := sendEvent(t, alice, roomID, b.Event{ + event1 := sendEvent(t, alice, roomID, event{ Type: "m.room.message", PrevEvents: []string{ eventA, @@ -102,7 +111,7 @@ func TestBackfillingHistory(t *testing.T) { }) // This is just a dummy event we search for after event1 - eventStar := sendEvent(t, alice, roomID, b.Event{ + eventStar := sendEvent(t, alice, roomID, event{ Type: "m.room.message", Content: map[string]interface{}{ "msgtype": "m.text", @@ -129,7 +138,7 @@ func TestBackfillingHistory(t *testing.T) { eventA, _, _, timeAfterEventA := createMessagesInRoom(t, alice, roomID) insertOriginServerTs := uint64(timeAfterEventA.UnixNano() / 1000000) - alice.SendEventSynced(t, roomID, b.Event{ + eventID := sendEvent(t, alice, roomID, event{ Type: "m.room.message", PrevEvents: []string{ eventA, @@ -140,6 +149,10 @@ func TestBackfillingHistory(t *testing.T) { "body": "Message 1", }, }) + + alice.SyncUntilTimelineHas(t, roomID, func(r gjson.Result) bool { + return r.Get("event_id").Str == eventID + }) }) }) } @@ -147,7 +160,7 @@ func TestBackfillingHistory(t *testing.T) { var txnID int = 0 var txnPrefix string = "msc2716-txn" -func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e b.Event) string { +func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e event) string { txnID++ query := make(url.Values, len(e.PrevEvents)) @@ -210,7 +223,7 @@ func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insert insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) // event1 - event1 := sendEvent(t, c, roomID, b.Event{ + event1 := sendEvent(t, c, roomID, event{ Type: "m.room.message", PrevEvents: []string{ insertAfterEvent, @@ -224,7 +237,7 @@ func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insert }) // event2 - event2 := sendEvent(t, c, roomID, b.Event{ + event2 := sendEvent(t, c, roomID, event{ Type: "m.room.message", PrevEvents: []string{ event1, @@ -238,7 +251,7 @@ func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insert }) // event3 - event3 := sendEvent(t, c, roomID, b.Event{ + event3 := sendEvent(t, c, roomID, event{ Type: "m.room.message", PrevEvents: []string{ event2, From 169a60d510961aa3ce454c1f20e32a08b3430c4e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 22 Feb 2021 17:40:02 -0600 Subject: [PATCH 33/81] Revert lint change already in other PR #73 --- build/scripts/find-lint.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/build/scripts/find-lint.sh b/build/scripts/find-lint.sh index a54b8bc2..55f81ed9 100755 --- a/build/scripts/find-lint.sh +++ b/build/scripts/find-lint.sh @@ -19,10 +19,7 @@ if [ ${1:-""} = "fast" ] then args="--fast" fi -if [ -z ${COMPLEMENT_LINT_CONCURRENCY+x} ]; then - # COMPLEMENT_LINT_CONCURRENCY was not set - : -else +if [[ -v COMPLEMENT_LINT_CONCURRENCY ]]; then args="${args} --concurrency $COMPLEMENT_LINT_CONCURRENCY" fi From c6155af04cd029cd3f805de04cf5ca7770924a27 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Tue, 23 Feb 2021 16:00:59 +0000 Subject: [PATCH 34/81] Path escape AS IDs to avoid directory traversal attacks --- internal/docker/builder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 8890f176..4f6fb1bf 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "log" "net/http" + "net/url" "os" "path" "runtime" @@ -39,7 +40,6 @@ import ( "github.com/docker/go-connections/nat" "github.com/matrix-org/complement/internal/b" - internalClient "github.com/matrix-org/complement/internal/client" "github.com/matrix-org/complement/internal/config" "github.com/matrix-org/complement/internal/instruction" ) @@ -517,7 +517,7 @@ func deployImage( var buf bytes.Buffer tw := tar.NewWriter(&buf) err = tw.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("/appservices/%s.yaml", internalClient.GjsonEscape(asID)), + Name: fmt.Sprintf("/appservices/%s.yaml", url.PathEscape(asID)), Mode: 0777, Size: int64(len(registration)), }) From 30b4f6af942b48e7fd6311f9fcc6b6d9376d70d6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 23 Feb 2021 20:40:43 -0600 Subject: [PATCH 35/81] Refactor tests to use variable amount of messages --- tests/msc2716_test.go | 232 ++++++++++++++++++++++-------------------- 1 file changed, 123 insertions(+), 109 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 4656170a..6bcc6184 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -43,18 +43,18 @@ func TestBackfillingHistory(t *testing.T) { alice := deployment.Client(t, "hs1", userID) alice.JoinRoom(t, roomID, nil) - eventA, eventB, eventC, timeAfterEventA := createMessagesInRoom(t, alice, roomID) + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() - event1, event2, event3 := backfillMessagesAtTime(t, as, roomID, eventA, timeAfterEventA) + numBackfilledMessages := 3 + // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later + time.Sleep(time.Duration(numBackfilledMessages) * time.Millisecond) - // eventStar - eventStar := alice.SendEventSynced(t, roomID, b.Event{ - Type: "m.room.message", - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message *", - }, - }) + eventsAfter := createMessagesInRoom(t, alice, roomID, 2) + + // We backfill a bunch of events after eventBefore + backfilledEvents := backfillMessagesAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numBackfilledMessages) t.Run("parallel", func(t *testing.T) { t.Run("Backfilled messages come back in correct order", func(t *testing.T) { @@ -65,26 +65,43 @@ func TestBackfillingHistory(t *testing.T) { "limit": []string{"100"}, }) - expectedMessageOrder := []string{ - eventStar, eventC, eventB, event3, event2, event1, eventA, - } + // Order events from newest to oldest + var expectedMessageOrder []string + expectedMessageOrder = append(reversed(eventsAfter), reversed(backfilledEvents)...) + expectedMessageOrder = append(expectedMessageOrder, reversed(eventsBefore)...) must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONArrayEach("chunk", func(r gjson.Result) error { - // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 { - // Pop the next message off the expected list - nextEventInOrder := expectedMessageOrder[0] - expectedMessageOrder = expectedMessageOrder[1:] - - if r.Get("event_id").Str != nextEventInOrder { - return fmt.Errorf("Next event found was %s but expected %s", r.Get("event_id").Str, nextEventInOrder) - } + func(body []byte) error { + eventIDsFromResponse, err := getEventIDsFromResponseBody(body) + if err != nil { + return err } - return nil - }), + // Copy the array by value so we can modify it as we iterate in the foreach loop + workingExpectedMessageOrder := expectedMessageOrder + + // Match each event from the response in order to the list of expected events + matcher := match.JSONArrayEach("chunk", func(r gjson.Result) error { + // Find all events in order + if len(r.Get("content").Get("body").Str) > 0 { + // Pop the next message off the expected list + nextEventInOrder := workingExpectedMessageOrder[0] + // Update the list as we go for the next loop + workingExpectedMessageOrder = workingExpectedMessageOrder[1:] + + if r.Get("event_id").Str != nextEventInOrder { + return fmt.Errorf("Next event found was %s but expected %s\nActualEvents: %v\nExpectedEvents: %v", r.Get("event_id").Str, nextEventInOrder, eventIDsFromResponse, expectedMessageOrder) + } + } + + return nil + }) + + err = matcher(body) + + return err + }, }, }) }) @@ -93,14 +110,16 @@ func TestBackfillingHistory(t *testing.T) { t.Parallel() roomID := alice.CreateRoom(t, struct{}{}) - eventA, _, _, timeAfterEventA := createMessagesInRoom(t, alice, roomID) - insertOriginServerTs := uint64(timeAfterEventA.UnixNano() / 1000000) + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() + insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / 1000000) - // If we see this message in the /sync, then something went wrong + // If we see this message in the /sync response, then something went wrong event1 := sendEvent(t, alice, roomID, event{ Type: "m.room.message", PrevEvents: []string{ - eventA, + eventBefore, }, OriginServerTS: insertOriginServerTs, Content: map[string]interface{}{ @@ -135,28 +154,61 @@ func TestBackfillingHistory(t *testing.T) { t.Parallel() roomID := alice.CreateRoom(t, struct{}{}) - eventA, _, _, timeAfterEventA := createMessagesInRoom(t, alice, roomID) - insertOriginServerTs := uint64(timeAfterEventA.UnixNano() / 1000000) + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() + insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / 1000000) - eventID := sendEvent(t, alice, roomID, event{ + event1 := sendEvent(t, alice, roomID, event{ Type: "m.room.message", PrevEvents: []string{ - eventA, + eventBefore, }, OriginServerTS: insertOriginServerTs, Content: map[string]interface{}{ "msgtype": "m.text", "body": "Message 1", + // This is commented out on purpse. + // We are explicitely testing when m.historical isn't present + //"m.historical": true, }, }) alice.SyncUntilTimelineHas(t, roomID, func(r gjson.Result) bool { - return r.Get("event_id").Str == eventID + return r.Get("event_id").Str == event1 }) }) }) } +func reversed(in []string) []string { + out := make([]string, len(in)) + for i := 0; i < len(in); i++ { + out[i] = in[len(in)-i-1] + } + return out +} + +func getEventIDsFromResponseBody(body []byte) (eventIDsFromResponse []string, err error) { + wantKey := "chunk" + res := gjson.GetBytes(body, wantKey) + if !res.Exists() { + return eventIDsFromResponse, fmt.Errorf("missing key '%s'", wantKey) + } + if !res.IsArray() { + return eventIDsFromResponse, fmt.Errorf("key '%s' is not an array (was %s)", wantKey, res.Type) + } + + res.ForEach(func(key, r gjson.Result) bool { + if len(r.Get("content").Get("body").Str) > 0 { + eventIDsFromResponse = append(eventIDsFromResponse, r.Get("event_id").Str) + } + return true + }) + + return eventIDsFromResponse, nil +} + var txnID int = 0 var txnPrefix string = "msc2716-txn" @@ -184,85 +236,47 @@ func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e event) string { return eventID } -func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string) (string, string, string, time.Time) { - // eventA - eventA := c.SendEventSynced(t, roomID, b.Event{ - Type: "m.room.message", - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message A", - }, - }) - - timeAfterEventA := time.Now() - - // wait 3ms to ensure that the timestamp changes enough for each of the 3 message we try to insert later - time.Sleep(3 * time.Millisecond) - - // eventB - eventB := c.SendEventSynced(t, roomID, b.Event{ - Type: "m.room.message", - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message B", - }, - }) - // eventC - eventC := c.SendEventSynced(t, roomID, b.Event{ - Type: "m.room.message", - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message C", - }, - }) +func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count int) []string { + evs := make([]string, count) + for i := 0; i < len(evs); i++ { + newEvent := b.Event{ + Type: "m.room.message", + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": fmt.Sprintf("Message %d", i), + }, + } + newEventId := c.SendEventSynced(t, roomID, newEvent) + evs[i] = newEventId + } - return eventA, eventB, eventC, timeAfterEventA + return evs } -func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insertAfterEvent string, insertTime time.Time) (string, string, string) { +func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insertAfterEventId string, insertTime time.Time, count int) []string { insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) - // event1 - event1 := sendEvent(t, c, roomID, event{ - Type: "m.room.message", - PrevEvents: []string{ - insertAfterEvent, - }, - OriginServerTS: insertOriginServerTs, - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message 1", - "m.historical": true, - }, - }) - - // event2 - event2 := sendEvent(t, c, roomID, event{ - Type: "m.room.message", - PrevEvents: []string{ - event1, - }, - OriginServerTS: insertOriginServerTs + 1, - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message 2", - "m.historical": true, - }, - }) - - // event3 - event3 := sendEvent(t, c, roomID, event{ - Type: "m.room.message", - PrevEvents: []string{ - event2, - }, - OriginServerTS: insertOriginServerTs + 2, - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message 3", - "m.historical": true, - }, - }) + evs := make([]string, count) + + prevEventId := insertAfterEventId + for i := 0; i < len(evs); i++ { + newEvent := event{ + Type: "m.room.message", + PrevEvents: []string{ + prevEventId, + }, + OriginServerTS: insertOriginServerTs + uint64(i), + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": fmt.Sprintf("Backfilled %d", i), + "m.historical": true, + }, + } + newEventId := sendEvent(t, c, roomID, newEvent) + evs[i] = newEventId + + prevEventId = newEventId + } - return event1, event2, event3 + return evs } From 312feaa80d07d589a294329a97e1e5fd6bb53ad4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 23 Feb 2021 20:45:43 -0600 Subject: [PATCH 36/81] Move test setup to the test itself --- tests/msc2716_test.go | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 6bcc6184..d202d0ae 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -37,28 +37,30 @@ func TestBackfillingHistory(t *testing.T) { asUserID := "@the-bridge-user:hs1" as := deployment.Client(t, "hs1", asUserID) - roomID := as.CreateRoom(t, struct{}{}) userID := "@alice:hs1" alice := deployment.Client(t, "hs1", userID) - alice.JoinRoom(t, roomID, nil) - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] - timeAfterEventBefore := time.Now() + t.Run("parallel", func(t *testing.T) { + t.Run("Backfilled messages come back in correct order", func(t *testing.T) { + t.Parallel() - numBackfilledMessages := 3 - // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later - time.Sleep(time.Duration(numBackfilledMessages) * time.Millisecond) + roomID := as.CreateRoom(t, struct{}{}) - eventsAfter := createMessagesInRoom(t, alice, roomID, 2) + alice.JoinRoom(t, roomID, nil) - // We backfill a bunch of events after eventBefore - backfilledEvents := backfillMessagesAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numBackfilledMessages) + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() - t.Run("parallel", func(t *testing.T) { - t.Run("Backfilled messages come back in correct order", func(t *testing.T) { - t.Parallel() + numBackfilledMessages := 3 + // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later + time.Sleep(time.Duration(numBackfilledMessages) * time.Millisecond) + + eventsAfter := createMessagesInRoom(t, alice, roomID, 2) + + // We backfill a bunch of events after eventBefore + backfilledEvents := backfillMessagesAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numBackfilledMessages) messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, From 923c41ef0a265f02dfd6b4249a8945170fed9aed Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Feb 2021 01:17:20 -0600 Subject: [PATCH 37/81] Add more messages to fill up sync and limit response See explanation in https://github.com/matrix-org/synapse/pull/9247#issuecomment-784066647 > Basically, for initial sync we will return the latest N events, which can involve backfilled events, e.g. if you have a graph `A2 -> A1 -> B1 -> B2 -> B3` where An are "live" events and Bn are backfilled events (i.e. the server only has two live events, the rest are backfilled) then initial sync will return `A1`, `A2` and then a bunch of the backfilled events (up to the limit). Incremental syncs from that, however, will just return new events non-backfilled events received after `A2`. > > So I'm wondering if your tests are correctly seeing some backfilled events, due to the server not having enough non-backfilled recent events to fill up the timeline section in the initial sync --- internal/client/client.go | 8 +++-- tests/msc2403_test.go | 2 ++ tests/msc2716_test.go | 62 +++++++++++++++++---------------------- 3 files changed, 35 insertions(+), 37 deletions(-) diff --git a/internal/client/client.go b/internal/client/client.go index 28ef89d5..1f0759ff 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -108,13 +108,13 @@ func (c *CSAPI) SendEventSynced(t *testing.T, roomID string, e b.Event) string { // Will time out after CSAPI.SyncUntilTimeout. func (c *CSAPI) SyncUntilTimelineHas(t *testing.T, roomID string, check func(gjson.Result) bool) { t.Helper() - c.SyncUntil(t, "", "rooms.join."+GjsonEscape(roomID)+".timeline.events", check) + c.SyncUntil(t, "", "", "rooms.join."+GjsonEscape(roomID)+".timeline.events", check) } // SyncUntil blocks and continually calls /sync until the `check` function returns true. // If the `check` function fails the test, the failing event will be automatically logged. // Will time out after CSAPI.SyncUntilTimeout. -func (c *CSAPI) SyncUntil(t *testing.T, since, key string, check func(gjson.Result) bool) { +func (c *CSAPI) SyncUntil(t *testing.T, since, filter, key string, check func(gjson.Result) bool) { t.Helper() start := time.Now() checkCounter := 0 @@ -129,6 +129,10 @@ func (c *CSAPI) SyncUntil(t *testing.T, since, key string, check func(gjson.Resu if since != "" { query["since"] = []string{since} } + if filter != "" { + query["filter"] = []string{filter} + } + res, err := c.Do(t, "GET", []string{"_matrix", "client", "r0", "sync"}, nil, query) if err != nil { t.Fatalf("CSAPI.syncUntil since=%s error: %s", since, err) diff --git a/tests/msc2403_test.go b/tests/msc2403_test.go index ea000f07..91f7b0bb 100644 --- a/tests/msc2403_test.go +++ b/tests/msc2403_test.go @@ -154,6 +154,7 @@ func knockingBetweenTwoUsersTest(t *testing.T, roomID string, inRoomUser, knocki knockingUser.SyncUntil( t, since, + "", "rooms.leave."+client.GjsonEscape(roomID)+".timeline.events", func(ev gjson.Result) bool { if ev.Get("type").Str != "m.room.member" || ev.Get("sender").Str != knockingUser.UserID { @@ -285,6 +286,7 @@ func knockOnRoomSynced(t *testing.T, c *client.CSAPI, roomID, reason string, ser c.SyncUntil( t, "", + "", "rooms."+client.GjsonEscape(knockUnstableIdentifier)+"."+client.GjsonEscape(roomID)+".knock_state.events", func(ev gjson.Result) bool { // We don't currently define any required state event types to be sent. diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index d202d0ae..93e5926a 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -46,7 +46,6 @@ func TestBackfillingHistory(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) eventsBefore := createMessagesInRoom(t, alice, roomID, 1) @@ -67,10 +66,12 @@ func TestBackfillingHistory(t *testing.T) { "limit": []string{"100"}, }) - // Order events from newest to oldest var expectedMessageOrder []string - expectedMessageOrder = append(reversed(eventsAfter), reversed(backfilledEvents)...) - expectedMessageOrder = append(expectedMessageOrder, reversed(eventsBefore)...) + expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) + expectedMessageOrder = append(expectedMessageOrder, backfilledEvents...) + expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) + // Order events from newest to oldest + expectedMessageOrder = reversed(expectedMessageOrder) must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ @@ -111,51 +112,42 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Backfilled events with m.historical do not come down /sync", func(t *testing.T) { t.Parallel() - roomID := alice.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / 1000000) - // If we see this message in the /sync response, then something went wrong - event1 := sendEvent(t, alice, roomID, event{ - Type: "m.room.message", - PrevEvents: []string{ - eventBefore, - }, - OriginServerTS: insertOriginServerTs, - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message 1", - "m.historical": true, - }, - }) + // Create some more events to fill up the /sync response + createMessagesInRoom(t, alice, roomID, 5) - // This is just a dummy event we search for after event1 - eventStar := sendEvent(t, alice, roomID, event{ - Type: "m.room.message", - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message *", - }, - }) + // Insert a backfilled event + backfilledEvents := backfillMessagesAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, 1) + backfilledEvent := backfilledEvents[0] + + // This is just a dummy event we search for after the backfilledEvent + eventsAfterBackfill := createMessagesInRoom(t, alice, roomID, 1) + eventAfterBackfill := eventsAfterBackfill[0] - // Sync until we find the star message. If we're able to see the star message - // that occurs after event1 without seeing event1 in the mean-time, I think we're safe to - // assume it won't sync - alice.SyncUntil(t, "", "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { - if r.Get("event_id").Str == event1 { - t.Fatalf("We should not see the %s event in /sync response but it was present", event1) + // Sync until we find the eventAfterBackfill. If we're able to see the eventAfterBackfill + // that occurs after the backfilledEvent without seeing eventAfterBackfill in between, + // we're probably safe to assume it won't sync + alice.SyncUntil(t, "", `{ "room": { "timeline": { "limit": 3 } } }`, "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { + if r.Get("event_id").Str == backfilledEvent { + t.Fatalf("We should not see the %s backfilled event in /sync response but it was present", backfilledEvent) } - return r.Get("event_id").Str == eventStar + return r.Get("event_id").Str == eventAfterBackfill }) }) t.Run("Backfilled events without m.historical come down /sync", func(t *testing.T) { t.Parallel() - roomID := alice.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() From 63a59afce291ca89e3312436b2eb2b738e8e8eaa Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Feb 2021 02:31:28 -0600 Subject: [PATCH 38/81] Add some better comments what we're actually doing and testing for --- tests/msc2716_test.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 93e5926a..abf2636f 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -35,9 +35,11 @@ func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) defer deployment.Destroy(t) + // Create the application service bridge user that is able to backfill messages asUserID := "@the-bridge-user:hs1" as := deployment.Client(t, "hs1", asUserID) + // Create the normal user which will send messages in the room userID := "@alice:hs1" alice := deployment.Client(t, "hs1", userID) @@ -48,6 +50,7 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) alice.JoinRoom(t, roomID, nil) + // Create the "live" event we are going to insert our backfilled events next to eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() @@ -56,9 +59,10 @@ func TestBackfillingHistory(t *testing.T) { // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later time.Sleep(time.Duration(numBackfilledMessages) * time.Millisecond) + // Create some more "live" events after our insertion point eventsAfter := createMessagesInRoom(t, alice, roomID, 2) - // We backfill a bunch of events after eventBefore + // Then backfill a bunch of events between eventBefore and eventsAfter backfilledEvents := backfillMessagesAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numBackfilledMessages) messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ @@ -75,6 +79,9 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ + // We're using this weird custom matcher function because we want to iterate the full response + // to see all of the events from the response. This way we can use it to easily compare in + // the fail error message when the test fails and compare the actual order to the expected order. func(body []byte) error { eventIDsFromResponse, err := getEventIDsFromResponseBody(body) if err != nil { @@ -82,6 +89,7 @@ func TestBackfillingHistory(t *testing.T) { } // Copy the array by value so we can modify it as we iterate in the foreach loop + // We save the full untouched `expectedMessageOrder` for use in the log messages workingExpectedMessageOrder := expectedMessageOrder // Match each event from the response in order to the list of expected events @@ -90,7 +98,6 @@ func TestBackfillingHistory(t *testing.T) { if len(r.Get("content").Get("body").Str) > 0 { // Pop the next message off the expected list nextEventInOrder := workingExpectedMessageOrder[0] - // Update the list as we go for the next loop workingExpectedMessageOrder = workingExpectedMessageOrder[1:] if r.Get("event_id").Str != nextEventInOrder { @@ -115,11 +122,12 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) alice.JoinRoom(t, roomID, nil) + // Create the "live" event we are going to insert our backfilled events next to eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - // Create some more events to fill up the /sync response + // Create some "live" events to saturate and fill up the /sync response createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event @@ -153,7 +161,9 @@ func TestBackfillingHistory(t *testing.T) { timeAfterEventBefore := time.Now() insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / 1000000) - event1 := sendEvent(t, alice, roomID, event{ + // Send an event that has `prev_event` and `ts` set but not `m.historical`. + // We should see these type of events in the `/sync` response + eventWeShouldSee := sendEvent(t, alice, roomID, event{ Type: "m.room.message", PrevEvents: []string{ eventBefore, @@ -169,7 +179,7 @@ func TestBackfillingHistory(t *testing.T) { }) alice.SyncUntilTimelineHas(t, roomID, func(r gjson.Result) bool { - return r.Get("event_id").Str == event1 + return r.Get("event_id").Str == eventWeShouldSee }) }) }) @@ -204,6 +214,8 @@ func getEventIDsFromResponseBody(body []byte) (eventIDsFromResponse []string, er } var txnID int = 0 + +// The transactions need to be prefixed so they don't collide with the txnID in client.go var txnPrefix string = "msc2716-txn" func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e event) string { From 172ecf21fd7f80ad2c54d5a108c522eef0db4431 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 24 Feb 2021 03:03:55 -0600 Subject: [PATCH 39/81] Start of test that normal users can't backfill messages --- tests/msc2716_test.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index abf2636f..d581cb09 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -163,7 +163,7 @@ func TestBackfillingHistory(t *testing.T) { // Send an event that has `prev_event` and `ts` set but not `m.historical`. // We should see these type of events in the `/sync` response - eventWeShouldSee := sendEvent(t, alice, roomID, event{ + eventWeShouldSee := sendEvent(t, as, roomID, event{ Type: "m.room.message", PrevEvents: []string{ eventBefore, @@ -171,7 +171,7 @@ func TestBackfillingHistory(t *testing.T) { OriginServerTS: insertOriginServerTs, Content: map[string]interface{}{ "msgtype": "m.text", - "body": "Message 1", + "body": "Message with prev_event and ts but no m.historical", // This is commented out on purpse. // We are explicitely testing when m.historical isn't present //"m.historical": true, @@ -182,6 +182,25 @@ func TestBackfillingHistory(t *testing.T) { return r.Get("event_id").Str == eventWeShouldSee }) }) + + t.Run("Normal users aren't allowed to backfill messages", func(t *testing.T) { + t.Parallel() + + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() + + // Normal user alice should not be able to backfill messages + backfillMessagesAtTime(t, alice, roomID, eventBefore, timeAfterEventBefore, 1) + + // TODO: Check that prev_events not on message + // Also check response to https://github.com/matrix-org/synapse/pull/9247#discussion_r581761053 + // because it would be nice to just throw a 403 at the user when they try to do this + + }) }) } From 711ca3c08128541726d14d404b4593babee658df Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 12 Mar 2021 16:40:56 -0600 Subject: [PATCH 40/81] Proper message order using Synapse backfilled events and point all to the insert point See https://github.com/matrix-org/synapse/pull/9247#discussion_r592122962 --- tests/msc2716_test.go | 44 ++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index d581cb09..2b4ea347 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -18,6 +18,7 @@ import ( "github.com/matrix-org/complement/internal/client" "github.com/matrix-org/complement/internal/match" "github.com/matrix-org/complement/internal/must" + "github.com/sirupsen/logrus" "github.com/tidwall/gjson" ) @@ -55,15 +56,15 @@ func TestBackfillingHistory(t *testing.T) { eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - numBackfilledMessages := 3 + numHistoricalMessages := 3 // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later - time.Sleep(time.Duration(numBackfilledMessages) * time.Millisecond) + time.Sleep(time.Duration(numHistoricalMessages) * time.Millisecond) // Create some more "live" events after our insertion point eventsAfter := createMessagesInRoom(t, alice, roomID, 2) // Then backfill a bunch of events between eventBefore and eventsAfter - backfilledEvents := backfillMessagesAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numBackfilledMessages) + historticalEvents := reversed(backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages)) messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, @@ -72,11 +73,19 @@ func TestBackfillingHistory(t *testing.T) { var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) - expectedMessageOrder = append(expectedMessageOrder, backfilledEvents...) + expectedMessageOrder = append(expectedMessageOrder, historticalEvents...) expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) // Order events from newest to oldest expectedMessageOrder = reversed(expectedMessageOrder) + contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[1]}, nil, "application/json", url.Values{ + "limit": []string{"100"}, + }) + contextResBody := client.ParseJSON(t, contextRes) + logrus.WithFields(logrus.Fields{ + "contextResBody": string(contextResBody), + }).Error("context res") + must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ // We're using this weird custom matcher function because we want to iterate the full response @@ -131,8 +140,8 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - backfilledEvents := backfillMessagesAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, 1) - backfilledEvent := backfilledEvents[0] + historticalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, 1) + backfilledEvent := historticalEvents[0] // This is just a dummy event we search for after the backfilledEvent eventsAfterBackfill := createMessagesInRoom(t, alice, roomID, 1) @@ -194,7 +203,7 @@ func TestBackfillingHistory(t *testing.T) { timeAfterEventBefore := time.Now() // Normal user alice should not be able to backfill messages - backfillMessagesAtTime(t, alice, roomID, eventBefore, timeAfterEventBefore, 1) + backfillHistoricalMessagesInReverseChronologicalAtTime(t, alice, roomID, eventBefore, timeAfterEventBefore, 1) // TODO: Check that prev_events not on message // Also check response to https://github.com/matrix-org/synapse/pull/9247#discussion_r581761053 @@ -224,7 +233,7 @@ func getEventIDsFromResponseBody(body []byte) (eventIDsFromResponse []string, er res.ForEach(func(key, r gjson.Result) bool { if len(r.Get("content").Get("body").Str) > 0 { - eventIDsFromResponse = append(eventIDsFromResponse, r.Get("event_id").Str) + eventIDsFromResponse = append(eventIDsFromResponse, r.Get("event_id").Str+" ("+r.Get("content").Get("body").Str+")") } return true }) @@ -278,29 +287,34 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in return evs } -func backfillMessagesAtTime(t *testing.T, c *client.CSAPI, roomID string, insertAfterEventId string, insertTime time.Time, count int) []string { +// Backfill in a reverse-chronogical order (most recent history to oldest history) +// Reverse-chronogical is a constraint of the Synapse implementation. +func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *client.CSAPI, roomID string, insertAfterEventId string, insertTime time.Time, count int) []string { insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) evs := make([]string, count) - prevEventId := insertAfterEventId for i := 0; i < len(evs); i++ { + // We have to backfill historical messages from most recent to oldest + // since backfilled messages decrement their `stream_order` and we want messages + // to appear in order from the `/messages` endpoint + messageIndex := (count - 1) - i + newEvent := event{ Type: "m.room.message", PrevEvents: []string{ - prevEventId, + // Hang all histortical messages off of the insert point + insertAfterEventId, }, - OriginServerTS: insertOriginServerTs + uint64(i), + OriginServerTS: insertOriginServerTs + uint64(messageIndex), Content: map[string]interface{}{ "msgtype": "m.text", - "body": fmt.Sprintf("Backfilled %d", i), + "body": fmt.Sprintf("Historical %d", messageIndex), "m.historical": true, }, } newEventId := sendEvent(t, c, roomID, newEvent) evs[i] = newEventId - - prevEventId = newEventId } return evs From 95b20af8dcf3e05b69d35180603957f63b7c8bc0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 Mar 2021 16:34:11 -0500 Subject: [PATCH 41/81] Finish 403 forbidden for normal users trying to use ?prev_event --- tests/msc2716_test.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 2b4ea347..342ae923 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -201,14 +201,32 @@ func TestBackfillingHistory(t *testing.T) { eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() + insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / 1000000) - // Normal user alice should not be able to backfill messages - backfillHistoricalMessagesInReverseChronologicalAtTime(t, alice, roomID, eventBefore, timeAfterEventBefore, 1) + e := event{ + Type: "m.room.message", + PrevEvents: []string{ + eventBefore, + }, + OriginServerTS: insertOriginServerTs, + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Historical message", + "m.historical": true, + }, + } - // TODO: Check that prev_events not on message - // Also check response to https://github.com/matrix-org/synapse/pull/9247#discussion_r581761053 - // because it would be nice to just throw a 403 at the user when they try to do this + query := make(url.Values, len(e.PrevEvents)) + query.Add("prev_event", e.PrevEvents[0]) + query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) + b, err := json.Marshal(e.Content) + if err != nil { + t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) + } + + // Normal user alice should not be able to backfill messages + alice.MustDoWithStatusRaw(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + strconv.Itoa(txnID)}, b, "application/json", query, 403) }) }) } From dee8369e7c30e37568990ee342d39f8d45ff5604 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 Mar 2021 17:12:07 -0500 Subject: [PATCH 42/81] Add test around unknown prev event --- tests/msc2716_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 342ae923..99dca09e 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -192,6 +192,48 @@ func TestBackfillingHistory(t *testing.T) { }) }) + t.Run("Unrecognised prev_event ID will throw an error", func(t *testing.T) { + t.Parallel() + + roomID := as.CreateRoom(t, struct{}{}) + + e := event{ + Type: "m.room.message", + PrevEvents: []string{ + // Here is the area of interest in the event + "$some-non-existant-event-id", + }, + OriginServerTS: uint64(time.Now().UnixNano() / 1000000), + Content: map[string]interface{}{ + "msgtype": "m.text", + "body": "Historical message", + "m.historical": true, + }, + } + + query := make(url.Values, len(e.PrevEvents)) + query.Add("prev_event", e.PrevEvents[0]) + query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) + + b, err := json.Marshal(e.Content) + if err != nil { + t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) + } + + as.MustDoWithStatusRaw( + t, + "PUT", + []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + "404-unrecognized-prev-event-id"}, + b, + "application/json", + query, + // TODO: Seems like this makes more sense as a 404 + // But the current Synapse code around unknown prev events will throw -> + // `403: No create event in auth events` + 403, + ) + }) + t.Run("Normal users aren't allowed to backfill messages", func(t *testing.T) { t.Parallel() @@ -226,7 +268,7 @@ func TestBackfillingHistory(t *testing.T) { } // Normal user alice should not be able to backfill messages - alice.MustDoWithStatusRaw(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + strconv.Itoa(txnID)}, b, "application/json", query, 403) + alice.MustDoWithStatusRaw(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + "403-no-normal-user-test"}, b, "application/json", query, 403) }) }) } From 8978f589f83045f70e1c50ca7fdb676e80f1f3d6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 15 Mar 2021 17:31:01 -0500 Subject: [PATCH 43/81] Fix historical typo --- tests/msc2716_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 99dca09e..f3e83797 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -45,7 +45,7 @@ func TestBackfillingHistory(t *testing.T) { alice := deployment.Client(t, "hs1", userID) t.Run("parallel", func(t *testing.T) { - t.Run("Backfilled messages come back in correct order", func(t *testing.T) { + t.Run("Backfilled historical messages come back in correct order", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -64,7 +64,7 @@ func TestBackfillingHistory(t *testing.T) { eventsAfter := createMessagesInRoom(t, alice, roomID, 2) // Then backfill a bunch of events between eventBefore and eventsAfter - historticalEvents := reversed(backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages)) + historicalEvents := reversed(backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages)) messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, @@ -73,7 +73,7 @@ func TestBackfillingHistory(t *testing.T) { var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) - expectedMessageOrder = append(expectedMessageOrder, historticalEvents...) + expectedMessageOrder = append(expectedMessageOrder, historicalEvents...) expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) // Order events from newest to oldest expectedMessageOrder = reversed(expectedMessageOrder) @@ -125,7 +125,7 @@ func TestBackfillingHistory(t *testing.T) { }) }) - t.Run("Backfilled events with m.historical do not come down /sync", func(t *testing.T) { + t.Run("Backfilled historical events with m.historical do not come down /sync", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -140,8 +140,8 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - historticalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, 1) - backfilledEvent := historticalEvents[0] + historicalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, 1) + backfilledEvent := historicalEvents[0] // This is just a dummy event we search for after the backfilledEvent eventsAfterBackfill := createMessagesInRoom(t, alice, roomID, 1) @@ -159,7 +159,7 @@ func TestBackfillingHistory(t *testing.T) { }) }) - t.Run("Backfilled events without m.historical come down /sync", func(t *testing.T) { + t.Run("Backfilled historical events without m.historical come down /sync", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -363,7 +363,7 @@ func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *cli newEvent := event{ Type: "m.room.message", PrevEvents: []string{ - // Hang all histortical messages off of the insert point + // Hang all historical messages off of the insert point insertAfterEventId, }, OriginServerTS: insertOriginServerTs + uint64(messageIndex), From c5264d326fdec5a62794bb8e700882abfe980c82 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 16 Mar 2021 04:13:43 -0500 Subject: [PATCH 44/81] Work on backfilling history for a user who hasn't joined before --- dockerfiles/synapse/homeserver.yaml | 6 ++ internal/docker/builder.go | 4 +- tests/msc2716_test.go | 107 ++++++++++++++++++++++++++-- 3 files changed, 110 insertions(+), 7 deletions(-) diff --git a/dockerfiles/synapse/homeserver.yaml b/dockerfiles/synapse/homeserver.yaml index 2375efff..b0bc96bc 100644 --- a/dockerfiles/synapse/homeserver.yaml +++ b/dockerfiles/synapse/homeserver.yaml @@ -92,6 +92,12 @@ rc_joins: federation_rr_transactions_per_room_per_second: 9999 + +## Registration ## + +enable_registration: True +registration_shared_secret: "$FZjMa&9fAAi9Xf[F)jAY[C#y?QwT[!qnBi+:ZLj.-)zVf]:C39H4Y99c$LPCh}{" + ## API Configuration ## # A list of application service config files to use diff --git a/internal/docker/builder.go b/internal/docker/builder.go index 4f6fb1bf..3b211e79 100644 --- a/internal/docker/builder.go +++ b/internal/docker/builder.go @@ -442,7 +442,9 @@ func generateASRegistrationYaml(as b.ApplicationService) string { fmt.Sprintf("sender_localpart: %s\n", as.SenderLocalpart) + fmt.Sprintf("rate_limited: %v\n", as.RateLimited) + "namespaces:\n" + - " users: []\n" + + " users:\n" + + " - exclusive: false\n" + + " regex: .*\n" + " rooms: []\n" + " aliases: []\n" } diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index f3e83797..45236b04 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -35,6 +35,7 @@ type event struct { func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) defer deployment.Destroy(t) + //defer time.Sleep(2 * time.Hour) // Create the application service bridge user that is able to backfill messages asUserID := "@the-bridge-user:hs1" @@ -64,7 +65,7 @@ func TestBackfillingHistory(t *testing.T) { eventsAfter := createMessagesInRoom(t, alice, roomID, 2) // Then backfill a bunch of events between eventBefore and eventsAfter - historicalEvents := reversed(backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages)) + historicalEvents := reversed(backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, "", roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages)) messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, @@ -125,6 +126,47 @@ func TestBackfillingHistory(t *testing.T) { }) }) + t.Run("Backfilled historical events resolve with proper state", func(t *testing.T) { + t.Parallel() + + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) + alice.JoinRoom(t, roomID, nil) + + // Create the "live" event we are going to insert our backfilled events next to + eventsBefore := createMessagesInRoom(t, alice, roomID, 10) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() + + numHistoricalMessages := 6 + // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later + time.Sleep(time.Duration(numHistoricalMessages) * time.Millisecond) + + // Create some events after. + // Fill up the buffer so we have to scrollback to the inserted history later + createMessagesInRoom(t, alice, roomID, 200) + + virtualUserLocalpart := "maria" + virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) + // Register and join the virtual user + ensureRegistered(t, as, virtualUserLocalpart) + joinRoom(t, as, virtualUserID, roomID) + + // TODO: Figure out why after joining the virtual user and it gets a 200 OK, + // we still see `SynapseError: 403 - User @maria:hs1 not in room !ZIIflEwiKrDFeMEMKV:hs1` + // when sending a message. Debugging with an Element interface on top even, shows Maria as joined! + + // Insert the most recent chunk of backfilled history + backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore.Add(time.Millisecond*time.Duration(numHistoricalMessages)), 3) + + // Insert another older chunk of backfilled history from the same user. + // See if the joins and meta data still are visible on the subsequent chunk + backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore, 3) + + }) + t.Run("Backfilled historical events with m.historical do not come down /sync", func(t *testing.T) { t.Parallel() @@ -140,7 +182,7 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - historicalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, roomID, eventBefore, timeAfterEventBefore, 1) + historicalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, "", roomID, eventBefore, timeAfterEventBefore, 1) backfilledEvent := historicalEvents[0] // This is just a dummy event we search for after the backfilledEvent @@ -172,7 +214,7 @@ func TestBackfillingHistory(t *testing.T) { // Send an event that has `prev_event` and `ts` set but not `m.historical`. // We should see these type of events in the `/sync` response - eventWeShouldSee := sendEvent(t, as, roomID, event{ + eventWeShouldSee := sendEvent(t, as, "", roomID, event{ Type: "m.room.message", PrevEvents: []string{ eventBefore, @@ -306,7 +348,7 @@ var txnID int = 0 // The transactions need to be prefixed so they don't collide with the txnID in client.go var txnPrefix string = "msc2716-txn" -func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e event) string { +func sendEvent(t *testing.T, c *client.CSAPI, virtualUserID string, roomID string, e event) string { txnID++ query := make(url.Values, len(e.PrevEvents)) @@ -318,6 +360,10 @@ func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e event) string { query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) } + if virtualUserID != "" { + query.Add("user_id", virtualUserID) + } + b, err := json.Marshal(e.Content) if err != nil { t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) @@ -330,6 +376,55 @@ func sendEvent(t *testing.T, c *client.CSAPI, roomID string, e event) string { return eventID } +// ensureRegistered makes sure the user is registered for the homeserver regardless +// if they are already registered or not. If unable to register, fails the test +func ensureRegistered(t *testing.T, c *client.CSAPI, virtualUserLocalpart string) { + // b, err := json.Marshal(map[string]interface{}{ + // "username": virtualUserLocalpart, + // }) + // if err != nil { + // t.Fatalf("msc2716.ensureRegistered failed to marshal JSON body: %s", err) + // } + + res, err := c.DoWithAuthRaw(t, "POST", []string{"_matrix", "client", "r0", "register"}, json.RawMessage(fmt.Sprintf(`{ "username": "%s" }`, virtualUserLocalpart)), "application/json", url.Values{}) + + if err != nil { + t.Error(err) + } + + if res.StatusCode == 200 { + return + } + + body := client.ParseJSON(t, res) + errcode := client.GetJSONFieldStr(t, body, "errcode") + + if res.StatusCode == 400 && errcode == "M_USER_IN_USE" { + return + } else { + errorMessage := client.GetJSONFieldStr(t, body, "error") + t.Fatalf("msc2716.ensureRegistered failed to register: (%s) %s", errcode, errorMessage) + } +} + +// joinRoom joins the room ID or alias given, else fails the test. Returns the room ID. +func joinRoom(t *testing.T, c *client.CSAPI, virtualUserID string, roomIDOrAlias string) string { + query := url.Values{} + if virtualUserID != "" { + query.Add("user_id", virtualUserID) + } + + // join the room + res := c.MustDoRaw(t, "POST", []string{"_matrix", "client", "r0", "join", roomIDOrAlias}, nil, "application/json", query) + // return the room ID if we joined with it + if roomIDOrAlias[0] == '!' { + return roomIDOrAlias + } + // otherwise we should be told the room ID if we joined via an alias + body := client.ParseJSON(t, res) + return client.GetJSONFieldStr(t, body, "room_id") +} + func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count int) []string { evs := make([]string, count) for i := 0; i < len(evs); i++ { @@ -349,7 +444,7 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in // Backfill in a reverse-chronogical order (most recent history to oldest history) // Reverse-chronogical is a constraint of the Synapse implementation. -func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *client.CSAPI, roomID string, insertAfterEventId string, insertTime time.Time, count int) []string { +func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *client.CSAPI, virtualUserID string, roomID string, insertAfterEventId string, insertTime time.Time, count int) []string { insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) evs := make([]string, count) @@ -373,7 +468,7 @@ func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *cli "m.historical": true, }, } - newEventId := sendEvent(t, c, roomID, newEvent) + newEventId := sendEvent(t, c, virtualUserID, roomID, newEvent) evs[i] = newEventId } From 16c50bbfc70eee539f7e48461d44545aaf67b377 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 31 Mar 2021 02:02:16 -0500 Subject: [PATCH 45/81] Try to work with bulk send endpoint As described in https://github.com/matrix-org/matrix-doc/pull/2716#discussion_r598896168 --- internal/client/client.go | 23 ++++++ internal/docker/deployment.go | 2 +- tests/msc2716_test.go | 138 +++++++++++++++++++++++++++++++--- 3 files changed, 153 insertions(+), 10 deletions(-) diff --git a/internal/client/client.go b/internal/client/client.go index 1f0759ff..376b1032 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -325,6 +325,29 @@ func GetJSONFieldStr(t *testing.T, body []byte, wantKey string) string { return res.Str } +func GetJSONFieldArray(t *testing.T, body []byte, wantKey string) []string { + t.Helper() + + res := gjson.GetBytes(body, wantKey) + + if !res.Exists() { + t.Fatalf("JSONFieldStr: key '%s' missing from %s", wantKey, string(body)) + } + + arrLength := len(res.Array()) + arr := make([]string, arrLength) + i := 0 + res.ForEach(func(key, value gjson.Result) bool { + arr[i] = value.Str + + // Keep iterating + i++ + return true + }) + + return arr +} + // ParseJSON parses a JSON-encoded HTTP Response body into a byte slice func ParseJSON(t *testing.T, res *http.Response) []byte { t.Helper() diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index 489b69e2..7b584d92 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -31,7 +31,7 @@ type HomeserverDeployment struct { // will print container logs before killing the container. func (d *Deployment) Destroy(t *testing.T) { t.Helper() - d.Deployer.Destroy(d, t.Failed()) + d.Deployer.Destroy(d, true) // t.Failed() } // Client returns a CSAPI client targeting the given hsName, using the access token for the given userID. diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 45236b04..769cf8b6 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -136,8 +136,8 @@ func TestBackfillingHistory(t *testing.T) { alice.JoinRoom(t, roomID, nil) // Create the "live" event we are going to insert our backfilled events next to - eventsBefore := createMessagesInRoom(t, alice, roomID, 10) - eventBefore := eventsBefore[0] + eventsBefore := createMessagesInRoom(t, alice, roomID, 2) + eventBefore := eventsBefore[len(eventsBefore)-1] timeAfterEventBefore := time.Now() numHistoricalMessages := 6 @@ -146,25 +146,88 @@ func TestBackfillingHistory(t *testing.T) { // Create some events after. // Fill up the buffer so we have to scrollback to the inserted history later - createMessagesInRoom(t, alice, roomID, 200) + eventsAfter := createMessagesInRoom(t, alice, roomID, 2) virtualUserLocalpart := "maria" virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) - joinRoom(t, as, virtualUserID, roomID) - // TODO: Figure out why after joining the virtual user and it gets a 200 OK, - // we still see `SynapseError: 403 - User @maria:hs1 not in room !ZIIflEwiKrDFeMEMKV:hs1` - // when sending a message. Debugging with an Element interface on top even, shows Maria as joined! + // TODO: Try adding avatar and displayName and see if historical messages get this info // Insert the most recent chunk of backfilled history - backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore.Add(time.Millisecond*time.Duration(numHistoricalMessages)), 3) + //backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore.Add(time.Millisecond*time.Duration(numHistoricalMessages)), 3) + _, historicalEvents := backfillBulkHistoricalMessagesAtTime( + t, + as, + virtualUserID, + roomID, + eventBefore, + timeAfterEventBefore, + 3, + ) // Insert another older chunk of backfilled history from the same user. // See if the joins and meta data still are visible on the subsequent chunk - backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore, 3) + //backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore, 3) + + var expectedMessageOrder []string + expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) + expectedMessageOrder = append(expectedMessageOrder, historicalEvents...) + expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) + // Order events from newest to oldest + expectedMessageOrder = reversed(expectedMessageOrder) + + messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + }) + + contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, nil, "application/json", url.Values{ + "limit": []string{"100"}, + }) + contextResBody := client.ParseJSON(t, contextRes) + logrus.WithFields(logrus.Fields{ + "contextResBody": string(contextResBody), + }).Error("context res") + + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + // We're using this weird custom matcher function because we want to iterate the full response + // to see all of the events from the response. This way we can use it to easily compare in + // the fail error message when the test fails and compare the actual order to the expected order. + func(body []byte) error { + eventIDsFromResponse, err := getEventIDsFromResponseBody(body) + if err != nil { + return err + } + + // Copy the array by value so we can modify it as we iterate in the foreach loop + // We save the full untouched `expectedMessageOrder` for use in the log messages + workingExpectedMessageOrder := expectedMessageOrder + + // Match each event from the response in order to the list of expected events + matcher := match.JSONArrayEach("chunk", func(r gjson.Result) error { + // Find all events in order + if len(r.Get("content").Get("body").Str) > 0 { + // Pop the next message off the expected list + nextEventInOrder := workingExpectedMessageOrder[0] + workingExpectedMessageOrder = workingExpectedMessageOrder[1:] + + if r.Get("event_id").Str != nextEventInOrder { + return fmt.Errorf("Next event found was %s but expected %s\nActualEvents (%d): %v\nExpectedEvents (%d): %v", r.Get("event_id").Str, nextEventInOrder, len(eventIDsFromResponse), eventIDsFromResponse, len(expectedMessageOrder), expectedMessageOrder) + } + } + return nil + }) + + err = matcher(body) + + return err + }, + }, + }) }) t.Run("Backfilled historical events with m.historical do not come down /sync", func(t *testing.T) { @@ -474,3 +537,60 @@ func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *cli return evs } + +func backfillBulkHistoricalMessagesAtTime( + t *testing.T, + c *client.CSAPI, + virtualUserID string, + roomID string, + insertAfterEventId string, + insertTime time.Time, + count int, +) (state_event_ids []string, event_ids []string) { + insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) + + evs := make([]map[string]interface{}, count) + for i := 0; i < len(evs); i++ { + newEvent := map[string]interface{}{ + "type": "m.room.message", + "sender": virtualUserID, + "origin_server_ts": insertOriginServerTs + uint64(i), + "content": map[string]interface{}{ + "msgtype": "m.text", + "body": fmt.Sprintf("Historical %d", i), + "m.historical": true, + }, + } + evs[i] = newEvent + } + + joinEvent := map[string]interface{}{ + "type": "m.room.member", + "sender": virtualUserID, + "origin_server_ts": insertOriginServerTs, + "content": map[string]interface{}{ + "membership": "join", + }, + "state_key": virtualUserID, + } + + query := make(url.Values, 2) + query.Add("prev_event", insertAfterEventId) + query.Add("user_id", virtualUserID) + + b, err := json.Marshal(map[string]interface{}{ + "events": evs, + "state_events_at_start": []map[string]interface{}{joinEvent}, + }) + if err != nil { + t.Fatalf("msc2716.backfillBulkHistoricalMessagesAtTime failed to marshal JSON body: %s", err) + } + + res := c.MustDoRaw(t, "POST", []string{"_matrix", "client", "r0", "rooms", roomID, "bulksend"}, b, "application/json", query) + body := client.ParseJSON(t, res) + + stateEvents := client.GetJSONFieldArray(t, body, "state_events") + events := client.GetJSONFieldArray(t, body, "events") + + return stateEvents, events +} From e8ca419607fd95d4df7378ae6206d21ad9539932 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 31 Mar 2021 02:33:26 -0500 Subject: [PATCH 46/81] State test now passing after we just persist the event in Synapse and skip all the auth stuff --- tests/msc2716_test.go | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 769cf8b6..cedbc60e 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -65,7 +65,7 @@ func TestBackfillingHistory(t *testing.T) { eventsAfter := createMessagesInRoom(t, alice, roomID, 2) // Then backfill a bunch of events between eventBefore and eventsAfter - historicalEvents := reversed(backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, "", roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages)) + historicalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, "", roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages) messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, @@ -74,7 +74,9 @@ func TestBackfillingHistory(t *testing.T) { var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) - expectedMessageOrder = append(expectedMessageOrder, historicalEvents...) + // Historical events were inserted in reverse chronological + // But we expect them to come out in /messages in the correct order + expectedMessageOrder = append(expectedMessageOrder, reversed(historicalEvents)...) expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) // Order events from newest to oldest expectedMessageOrder = reversed(expectedMessageOrder) @@ -157,7 +159,7 @@ func TestBackfillingHistory(t *testing.T) { // Insert the most recent chunk of backfilled history //backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore.Add(time.Millisecond*time.Duration(numHistoricalMessages)), 3) - _, historicalEvents := backfillBulkHistoricalMessagesAtTime( + _, historicalEvents := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t, as, virtualUserID, @@ -173,7 +175,9 @@ func TestBackfillingHistory(t *testing.T) { var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) - expectedMessageOrder = append(expectedMessageOrder, historicalEvents...) + // Historical events were inserted in reverse chronological + // But we expect them to come out in /messages in the correct order + expectedMessageOrder = append(expectedMessageOrder, reversed(historicalEvents)...) expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) // Order events from newest to oldest expectedMessageOrder = reversed(expectedMessageOrder) @@ -538,7 +542,7 @@ func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *cli return evs } -func backfillBulkHistoricalMessagesAtTime( +func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t *testing.T, c *client.CSAPI, virtualUserID string, @@ -551,13 +555,18 @@ func backfillBulkHistoricalMessagesAtTime( evs := make([]map[string]interface{}, count) for i := 0; i < len(evs); i++ { + // We have to backfill historical messages from most recent to oldest + // since backfilled messages decrement their `stream_order` and we want messages + // to appear in order from the `/messages` endpoint + messageIndex := (count - 1) - i + newEvent := map[string]interface{}{ "type": "m.room.message", "sender": virtualUserID, - "origin_server_ts": insertOriginServerTs + uint64(i), + "origin_server_ts": insertOriginServerTs + uint64(messageIndex), "content": map[string]interface{}{ "msgtype": "m.text", - "body": fmt.Sprintf("Historical %d", i), + "body": fmt.Sprintf("Historical %d", messageIndex), "m.historical": true, }, } @@ -583,7 +592,7 @@ func backfillBulkHistoricalMessagesAtTime( "state_events_at_start": []map[string]interface{}{joinEvent}, }) if err != nil { - t.Fatalf("msc2716.backfillBulkHistoricalMessagesAtTime failed to marshal JSON body: %s", err) + t.Fatalf("msc2716.backfillBulkHistoricalMessagesInReverseChronologicalAtTime failed to marshal JSON body: %s", err) } res := c.MustDoRaw(t, "POST", []string{"_matrix", "client", "r0", "rooms", roomID, "bulksend"}, b, "application/json", query) From 621572619ad58c8d9c612cd8d8ffcdfdd4593fec Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 14 Apr 2021 14:43:02 -0500 Subject: [PATCH 47/81] Add better chunk identifiers --- tests/msc2716_test.go | 45 ++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index cedbc60e..4e1b6963 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -31,6 +31,10 @@ type event struct { Content map[string]interface{} } +// This is configurable because it can be nice to change it to `time.Second` while +// checking out the test result in a Synapse instance +const TimeBetweenMessages = time.Millisecond + // Test that the message events we insert between A and B come back in the correct order from /messages func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) @@ -59,7 +63,7 @@ func TestBackfillingHistory(t *testing.T) { numHistoricalMessages := 3 // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later - time.Sleep(time.Duration(numHistoricalMessages) * time.Millisecond) + time.Sleep(time.Duration(numHistoricalMessages) * TimeBetweenMessages) // Create some more "live" events after our insertion point eventsAfter := createMessagesInRoom(t, alice, roomID, 2) @@ -144,7 +148,7 @@ func TestBackfillingHistory(t *testing.T) { numHistoricalMessages := 6 // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later - time.Sleep(time.Duration(numHistoricalMessages) * time.Millisecond) + time.Sleep(time.Duration(numHistoricalMessages) * TimeBetweenMessages) // Create some events after. // Fill up the buffer so we have to scrollback to the inserted history later @@ -158,25 +162,33 @@ func TestBackfillingHistory(t *testing.T) { // TODO: Try adding avatar and displayName and see if historical messages get this info // Insert the most recent chunk of backfilled history - //backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore.Add(time.Millisecond*time.Duration(numHistoricalMessages)), 3) _, historicalEvents := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t, as, virtualUserID, roomID, eventBefore, - timeAfterEventBefore, + timeAfterEventBefore.Add(TimeBetweenMessages*3), 3, ) // Insert another older chunk of backfilled history from the same user. - // See if the joins and meta data still are visible on the subsequent chunk - //backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, virtualUserID, roomID, eventBefore, timeAfterEventBefore, 3) + // Make sure the meta data and joins still work on the subsequent chunk + _, historicalEvents2 := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + t, + as, + virtualUserID, + roomID, + eventBefore, + timeAfterEventBefore, + 3, + ) var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) // Historical events were inserted in reverse chronological // But we expect them to come out in /messages in the correct order + expectedMessageOrder = append(expectedMessageOrder, reversed(historicalEvents2)...) expectedMessageOrder = append(expectedMessageOrder, reversed(historicalEvents)...) expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) // Order events from newest to oldest @@ -277,7 +289,7 @@ func TestBackfillingHistory(t *testing.T) { eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / 1000000) + insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / int64(time.Millisecond)) // Send an event that has `prev_event` and `ts` set but not `m.historical`. // We should see these type of events in the `/sync` response @@ -312,7 +324,7 @@ func TestBackfillingHistory(t *testing.T) { // Here is the area of interest in the event "$some-non-existant-event-id", }, - OriginServerTS: uint64(time.Now().UnixNano() / 1000000), + OriginServerTS: uint64(time.Now().UnixNano() / int64(time.Millisecond)), Content: map[string]interface{}{ "msgtype": "m.text", "body": "Historical message", @@ -352,7 +364,7 @@ func TestBackfillingHistory(t *testing.T) { eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / 1000000) + insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / int64(time.Millisecond)) e := event{ Type: "m.room.message", @@ -512,7 +524,7 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in // Backfill in a reverse-chronogical order (most recent history to oldest history) // Reverse-chronogical is a constraint of the Synapse implementation. func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *client.CSAPI, virtualUserID string, roomID string, insertAfterEventId string, insertTime time.Time, count int) []string { - insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) + insertOriginServerTs := uint64(insertTime.UnixNano() / int64(time.Millisecond)) evs := make([]string, count) @@ -542,6 +554,8 @@ func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *cli return evs } +var chunkCount int64 = 0 + func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t *testing.T, c *client.CSAPI, @@ -551,7 +565,10 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( insertTime time.Time, count int, ) (state_event_ids []string, event_ids []string) { - insertOriginServerTs := uint64(insertTime.UnixNano() / 1000000) + // Timestamp in milliseconds + insertOriginServerTs := uint64(insertTime.UnixNano() / int64(time.Millisecond)) + + timeBetweenMessagesMS := uint64(TimeBetweenMessages / time.Millisecond) evs := make([]map[string]interface{}, count) for i := 0; i < len(evs); i++ { @@ -563,10 +580,10 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( newEvent := map[string]interface{}{ "type": "m.room.message", "sender": virtualUserID, - "origin_server_ts": insertOriginServerTs + uint64(messageIndex), + "origin_server_ts": insertOriginServerTs + (timeBetweenMessagesMS * uint64(messageIndex)), "content": map[string]interface{}{ "msgtype": "m.text", - "body": fmt.Sprintf("Historical %d", messageIndex), + "body": fmt.Sprintf("Historical %d (chunk=%d)", messageIndex, chunkCount), "m.historical": true, }, } @@ -601,5 +618,7 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( stateEvents := client.GetJSONFieldArray(t, body, "state_events") events := client.GetJSONFieldArray(t, body, "events") + chunkCount++ + return stateEvents, events } From 367db560904dd1a693100ca0bf31d14c2034a0c7 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 14 Apr 2021 18:52:46 -0500 Subject: [PATCH 48/81] Simplify JSON matcher for order or messages --- tests/msc2716_test.go | 112 +++++++++++++++++------------------------- 1 file changed, 46 insertions(+), 66 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 4e1b6963..71b044ca 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -7,8 +7,10 @@ package tests import ( + "bytes" "encoding/json" "fmt" + "io/ioutil" "net/url" "strconv" "testing" @@ -75,6 +77,10 @@ func TestBackfillingHistory(t *testing.T) { "dir": []string{"b"}, "limit": []string{"100"}, }) + messsageResBody := client.ParseJSON(t, messagesRes) + eventIDsFromResponse := getEventIDsFromResponseBody(t, messsageResBody) + // Since the original body can only be read once, create a new one from the body bytes we just read + messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) @@ -93,41 +99,26 @@ func TestBackfillingHistory(t *testing.T) { "contextResBody": string(contextResBody), }).Error("context res") + // Copy the array by value so we can modify it as we iterate in the foreach loop + // We save the full untouched `expectedMessageOrder` for use in the log messages + workingExpectedMessageOrder := expectedMessageOrder + must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - // We're using this weird custom matcher function because we want to iterate the full response - // to see all of the events from the response. This way we can use it to easily compare in - // the fail error message when the test fails and compare the actual order to the expected order. - func(body []byte) error { - eventIDsFromResponse, err := getEventIDsFromResponseBody(body) - if err != nil { - return err - } - - // Copy the array by value so we can modify it as we iterate in the foreach loop - // We save the full untouched `expectedMessageOrder` for use in the log messages - workingExpectedMessageOrder := expectedMessageOrder - - // Match each event from the response in order to the list of expected events - matcher := match.JSONArrayEach("chunk", func(r gjson.Result) error { - // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 { - // Pop the next message off the expected list - nextEventInOrder := workingExpectedMessageOrder[0] - workingExpectedMessageOrder = workingExpectedMessageOrder[1:] - - if r.Get("event_id").Str != nextEventInOrder { - return fmt.Errorf("Next event found was %s but expected %s\nActualEvents: %v\nExpectedEvents: %v", r.Get("event_id").Str, nextEventInOrder, eventIDsFromResponse, expectedMessageOrder) - } + match.JSONArrayEach("chunk", func(r gjson.Result) error { + // Find all events in order + if len(r.Get("content").Get("body").Str) > 0 { + // Pop the next message off the expected list + nextEventInOrder := workingExpectedMessageOrder[0] + workingExpectedMessageOrder = workingExpectedMessageOrder[1:] + + if r.Get("event_id").Str != nextEventInOrder { + return fmt.Errorf("Next event found was %s but expected %s\nActualEvents: %v\nExpectedEvents: %v", r.Get("event_id").Str, nextEventInOrder, eventIDsFromResponse, expectedMessageOrder) } + } - return nil - }) - - err = matcher(body) - - return err - }, + return nil + }), }, }) }) @@ -198,6 +189,10 @@ func TestBackfillingHistory(t *testing.T) { "dir": []string{"b"}, "limit": []string{"100"}, }) + messsageResBody := client.ParseJSON(t, messagesRes) + eventIDsFromResponse := getEventIDsFromResponseBody(t, messsageResBody) + // Since the original body can only be read once, create a new one from the body bytes we just read + messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, nil, "application/json", url.Values{ "limit": []string{"100"}, @@ -207,41 +202,26 @@ func TestBackfillingHistory(t *testing.T) { "contextResBody": string(contextResBody), }).Error("context res") + // Copy the array by value so we can modify it as we iterate in the foreach loop + // We save the full untouched `expectedMessageOrder` for use in the log messages + workingExpectedMessageOrder := expectedMessageOrder + must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - // We're using this weird custom matcher function because we want to iterate the full response - // to see all of the events from the response. This way we can use it to easily compare in - // the fail error message when the test fails and compare the actual order to the expected order. - func(body []byte) error { - eventIDsFromResponse, err := getEventIDsFromResponseBody(body) - if err != nil { - return err - } - - // Copy the array by value so we can modify it as we iterate in the foreach loop - // We save the full untouched `expectedMessageOrder` for use in the log messages - workingExpectedMessageOrder := expectedMessageOrder - - // Match each event from the response in order to the list of expected events - matcher := match.JSONArrayEach("chunk", func(r gjson.Result) error { - // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 { - // Pop the next message off the expected list - nextEventInOrder := workingExpectedMessageOrder[0] - workingExpectedMessageOrder = workingExpectedMessageOrder[1:] - - if r.Get("event_id").Str != nextEventInOrder { - return fmt.Errorf("Next event found was %s but expected %s\nActualEvents (%d): %v\nExpectedEvents (%d): %v", r.Get("event_id").Str, nextEventInOrder, len(eventIDsFromResponse), eventIDsFromResponse, len(expectedMessageOrder), expectedMessageOrder) - } + match.JSONArrayEach("chunk", func(r gjson.Result) error { + // Find all events in order + if len(r.Get("content").Get("body").Str) > 0 { + // Pop the next message off the expected list + nextEventInOrder := workingExpectedMessageOrder[0] + workingExpectedMessageOrder = workingExpectedMessageOrder[1:] + + if r.Get("event_id").Str != nextEventInOrder { + return fmt.Errorf("Next event found was %s but expected %s\nActualEvents (%d): %v\nExpectedEvents (%d): %v", r.Get("event_id").Str, nextEventInOrder, len(eventIDsFromResponse), eventIDsFromResponse, len(expectedMessageOrder), expectedMessageOrder) } + } - return nil - }) - - err = matcher(body) - - return err - }, + return nil + }), }, }) }) @@ -402,14 +382,14 @@ func reversed(in []string) []string { return out } -func getEventIDsFromResponseBody(body []byte) (eventIDsFromResponse []string, err error) { +func getEventIDsFromResponseBody(t *testing.T, body []byte) (eventIDsFromResponse []string) { wantKey := "chunk" res := gjson.GetBytes(body, wantKey) if !res.Exists() { - return eventIDsFromResponse, fmt.Errorf("missing key '%s'", wantKey) + t.Fatalf("missing key '%s'", wantKey) } if !res.IsArray() { - return eventIDsFromResponse, fmt.Errorf("key '%s' is not an array (was %s)", wantKey, res.Type) + t.Fatalf("key '%s' is not an array (was %s)", wantKey, res.Type) } res.ForEach(func(key, r gjson.Result) bool { @@ -419,7 +399,7 @@ func getEventIDsFromResponseBody(body []byte) (eventIDsFromResponse []string, er return true }) - return eventIDsFromResponse, nil + return eventIDsFromResponse } var txnID int = 0 From 88b4e989aee77e262649b2d7a3a10c3a92f85421 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 14 Apr 2021 19:57:28 -0500 Subject: [PATCH 49/81] Update remaining tests to use new /bulksend --- tests/msc2716_test.go | 231 ++++++++++++------------------------------ 1 file changed, 63 insertions(+), 168 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 71b044ca..2eec38d8 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -11,6 +11,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "net/http" "net/url" "strconv" "testing" @@ -51,79 +52,11 @@ func TestBackfillingHistory(t *testing.T) { userID := "@alice:hs1" alice := deployment.Client(t, "hs1", userID) - t.Run("parallel", func(t *testing.T) { - t.Run("Backfilled historical messages come back in correct order", func(t *testing.T) { - t.Parallel() - - roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) - - // Create the "live" event we are going to insert our backfilled events next to - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] - timeAfterEventBefore := time.Now() - - numHistoricalMessages := 3 - // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later - time.Sleep(time.Duration(numHistoricalMessages) * TimeBetweenMessages) - - // Create some more "live" events after our insertion point - eventsAfter := createMessagesInRoom(t, alice, roomID, 2) - - // Then backfill a bunch of events between eventBefore and eventsAfter - historicalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, "", roomID, eventBefore, timeAfterEventBefore, numHistoricalMessages) - - messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ - "dir": []string{"b"}, - "limit": []string{"100"}, - }) - messsageResBody := client.ParseJSON(t, messagesRes) - eventIDsFromResponse := getEventIDsFromResponseBody(t, messsageResBody) - // Since the original body can only be read once, create a new one from the body bytes we just read - messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) - - var expectedMessageOrder []string - expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) - // Historical events were inserted in reverse chronological - // But we expect them to come out in /messages in the correct order - expectedMessageOrder = append(expectedMessageOrder, reversed(historicalEvents)...) - expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) - // Order events from newest to oldest - expectedMessageOrder = reversed(expectedMessageOrder) + virtualUserLocalpart := "maria" + virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) - contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[1]}, nil, "application/json", url.Values{ - "limit": []string{"100"}, - }) - contextResBody := client.ParseJSON(t, contextRes) - logrus.WithFields(logrus.Fields{ - "contextResBody": string(contextResBody), - }).Error("context res") - - // Copy the array by value so we can modify it as we iterate in the foreach loop - // We save the full untouched `expectedMessageOrder` for use in the log messages - workingExpectedMessageOrder := expectedMessageOrder - - must.MatchResponse(t, messagesRes, match.HTTPResponse{ - JSON: []match.JSON{ - match.JSONArrayEach("chunk", func(r gjson.Result) error { - // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 { - // Pop the next message off the expected list - nextEventInOrder := workingExpectedMessageOrder[0] - workingExpectedMessageOrder = workingExpectedMessageOrder[1:] - - if r.Get("event_id").Str != nextEventInOrder { - return fmt.Errorf("Next event found was %s but expected %s\nActualEvents: %v\nExpectedEvents: %v", r.Get("event_id").Str, nextEventInOrder, eventIDsFromResponse, expectedMessageOrder) - } - } - - return nil - }), - }, - }) - }) - - t.Run("Backfilled historical events resolve with proper state", func(t *testing.T) { + t.Run("parallel", func(t *testing.T) { + t.Run("Backfilled historical events resolve with proper state in correct order", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, map[string]interface{}{ @@ -145,15 +78,13 @@ func TestBackfillingHistory(t *testing.T) { // Fill up the buffer so we have to scrollback to the inserted history later eventsAfter := createMessagesInRoom(t, alice, roomID, 2) - virtualUserLocalpart := "maria" - virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) // TODO: Try adding avatar and displayName and see if historical messages get this info // Insert the most recent chunk of backfilled history - _, historicalEvents := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t, as, virtualUserID, @@ -161,11 +92,14 @@ func TestBackfillingHistory(t *testing.T) { eventBefore, timeAfterEventBefore.Add(TimeBetweenMessages*3), 3, + // Status + 200, ) + _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) // Insert another older chunk of backfilled history from the same user. // Make sure the meta data and joins still work on the subsequent chunk - _, historicalEvents2 := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes2 := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t, as, virtualUserID, @@ -173,7 +107,10 @@ func TestBackfillingHistory(t *testing.T) { eventBefore, timeAfterEventBefore, 3, + // Status + 200, ) + _, historicalEvents2 := getEventsFromBulkSendResponse(t, backfillRes2) var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) @@ -241,7 +178,18 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - historicalEvents := backfillHistoricalMessagesInReverseChronologicalAtTime(t, as, "", roomID, eventBefore, timeAfterEventBefore, 1) + backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + t, + as, + virtualUserID, + roomID, + eventBefore, + timeAfterEventBefore, + 1, + // Status + 200, + ) + _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) backfilledEvent := historicalEvents[0] // This is just a dummy event we search for after the backfilledEvent @@ -298,36 +246,15 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) - e := event{ - Type: "m.room.message", - PrevEvents: []string{ - // Here is the area of interest in the event - "$some-non-existant-event-id", - }, - OriginServerTS: uint64(time.Now().UnixNano() / int64(time.Millisecond)), - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Historical message", - "m.historical": true, - }, - } - - query := make(url.Values, len(e.PrevEvents)) - query.Add("prev_event", e.PrevEvents[0]) - query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) - - b, err := json.Marshal(e.Content) - if err != nil { - t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) - } - - as.MustDoWithStatusRaw( + backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t, - "PUT", - []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + "404-unrecognized-prev-event-id"}, - b, - "application/json", - query, + as, + virtualUserID, + roomID, + "$some-non-existant-event-id", + time.Now(), + 1, + // Status // TODO: Seems like this makes more sense as a 404 // But the current Synapse code around unknown prev events will throw -> // `403: No create event in auth events` @@ -344,32 +271,19 @@ func TestBackfillingHistory(t *testing.T) { eventsBefore := createMessagesInRoom(t, alice, roomID, 1) eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / int64(time.Millisecond)) - - e := event{ - Type: "m.room.message", - PrevEvents: []string{ - eventBefore, - }, - OriginServerTS: insertOriginServerTs, - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Historical message", - "m.historical": true, - }, - } - - query := make(url.Values, len(e.PrevEvents)) - query.Add("prev_event", e.PrevEvents[0]) - query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) - b, err := json.Marshal(e.Content) - if err != nil { - t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) - } - - // Normal user alice should not be able to backfill messages - alice.MustDoWithStatusRaw(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + "403-no-normal-user-test"}, b, "application/json", query, 403) + backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + t, + alice, + virtualUserID, + roomID, + eventBefore, + timeAfterEventBefore, + 1, + // Status + // Normal user alice should not be able to backfill messages + 403, + ) }) }) } @@ -501,39 +415,6 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in return evs } -// Backfill in a reverse-chronogical order (most recent history to oldest history) -// Reverse-chronogical is a constraint of the Synapse implementation. -func backfillHistoricalMessagesInReverseChronologicalAtTime(t *testing.T, c *client.CSAPI, virtualUserID string, roomID string, insertAfterEventId string, insertTime time.Time, count int) []string { - insertOriginServerTs := uint64(insertTime.UnixNano() / int64(time.Millisecond)) - - evs := make([]string, count) - - for i := 0; i < len(evs); i++ { - // We have to backfill historical messages from most recent to oldest - // since backfilled messages decrement their `stream_order` and we want messages - // to appear in order from the `/messages` endpoint - messageIndex := (count - 1) - i - - newEvent := event{ - Type: "m.room.message", - PrevEvents: []string{ - // Hang all historical messages off of the insert point - insertAfterEventId, - }, - OriginServerTS: insertOriginServerTs + uint64(messageIndex), - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": fmt.Sprintf("Historical %d", messageIndex), - "m.historical": true, - }, - } - newEventId := sendEvent(t, c, virtualUserID, roomID, newEvent) - evs[i] = newEventId - } - - return evs -} - var chunkCount int64 = 0 func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( @@ -544,7 +425,8 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( insertAfterEventId string, insertTime time.Time, count int, -) (state_event_ids []string, event_ids []string) { + status int, +) (res *http.Response) { // Timestamp in milliseconds insertOriginServerTs := uint64(insertTime.UnixNano() / int64(time.Millisecond)) @@ -592,13 +474,26 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( t.Fatalf("msc2716.backfillBulkHistoricalMessagesInReverseChronologicalAtTime failed to marshal JSON body: %s", err) } - res := c.MustDoRaw(t, "POST", []string{"_matrix", "client", "r0", "rooms", roomID, "bulksend"}, b, "application/json", query) + res = c.MustDoWithStatusRaw( + t, + "POST", + []string{"_matrix", "client", "r0", "rooms", roomID, "bulksend"}, + b, + "application/json", + query, + status, + ) + + chunkCount++ + + return res +} + +func getEventsFromBulkSendResponse(t *testing.T, res *http.Response) (state_event_ids []string, event_ids []string) { body := client.ParseJSON(t, res) stateEvents := client.GetJSONFieldArray(t, body, "state_events") events := client.GetJSONFieldArray(t, body, "events") - chunkCount++ - return stateEvents, events } From 90d8f7ec0ad26709b062f2bd1ee35006f3790e74 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 22 Apr 2021 02:43:02 -0500 Subject: [PATCH 50/81] Test that historical messages can be federated --- internal/b/hs_with_application_service.go | 9 +++ internal/match/json.go | 73 ++++++++++++++------- tests/msc2716_test.go | 77 +++++++++++++++++------ 3 files changed, 118 insertions(+), 41 deletions(-) diff --git a/internal/b/hs_with_application_service.go b/internal/b/hs_with_application_service.go index ba9db923..348cd7c3 100644 --- a/internal/b/hs_with_application_service.go +++ b/internal/b/hs_with_application_service.go @@ -21,5 +21,14 @@ var BlueprintHSWithApplicationService = MustValidate(Blueprint{ }, }, }, + { + Name: "hs2", + Users: []User{ + { + Localpart: "@charlie", + DisplayName: "Charlie", + }, + }, + }, }, }) diff --git a/internal/match/json.go b/internal/match/json.go index d6de20d0..b0c73026 100644 --- a/internal/match/json.go +++ b/internal/match/json.go @@ -55,25 +55,7 @@ func JSONKeyTypeEqual(wantKey string, wantType gjson.Type) JSON { } } -// JSONCheckOff returns a matcher which will loop over `wantKey` and ensure that the items -// (which can be array elements or object keys) -// are present exactly once in any order in `wantItems`. If there are unexpected items or items -// appear more than once then the match fails. This matcher can be used to check off items in -// an array/object. The `mapper` function should map the item to an interface which will be -// comparable via `reflect.DeepEqual` with items in `wantItems`. The optional `fn` callback -// allows more checks to be performed other than checking off the item from the list. It is -// called with 2 args: the result of the `mapper` function and the element itself (or value if -// it's an object). -// -// Usage: (ensures `events` has these events in any order, with the right event type) -// JSONCheckOff("events", []interface{}{"$foo:bar", "$baz:quuz"}, func(r gjson.Result) interface{} { -// return r.Get("event_id").Str -// }, func(eventID interface{}, eventBody gjson.Result) error { -// if eventBody.Get("type").Str != "m.room.message" { -// return fmt.Errorf("expected event to be 'm.room.message'") -// } -// }) -func JSONCheckOff(wantKey string, wantItems []interface{}, mapper func(gjson.Result) interface{}, fn func(interface{}, gjson.Result) error) JSON { +func jsonCheckOffInternal(wantKey string, wantItems []interface{}, allowUnwantedItems bool, mapper func(gjson.Result) interface{}, fn func(interface{}, gjson.Result) error) JSON { return func(body []byte) error { res := gjson.GetBytes(body, wantKey) if !res.Exists() { @@ -103,12 +85,15 @@ func JSONCheckOff(wantKey string, wantItems []interface{}, mapper func(gjson.Res break } } - if want == -1 { + if !allowUnwantedItems && want == -1 { err = fmt.Errorf("JSONCheckOff: unexpected item %s", item) return false } - // delete the wanted item - wantItems = append(wantItems[:want], wantItems[want+1:]...) + + if want != -1 { + // delete the wanted item + wantItems = append(wantItems[:want], wantItems[want+1:]...) + } // do further checks if fn != nil { @@ -130,6 +115,50 @@ func JSONCheckOff(wantKey string, wantItems []interface{}, mapper func(gjson.Res } } +// JSONCheckOffAllowUnwanted returns a matcher which will loop over `wantKey` and ensure that the items +// (which can be array elements or object keys) +// are present exactly once in any order in `wantItems`. Allows unexpected items or items +// appear that more than once. This matcher can be used to check off items in +// an array/object. The `mapper` function should map the item to an interface which will be +// comparable via `reflect.DeepEqual` with items in `wantItems`. The optional `fn` callback +// allows more checks to be performed other than checking off the item from the list. It is +// called with 2 args: the result of the `mapper` function and the element itself (or value if +// it's an object). +// +// Usage: (ensures `events` has these events in any order, with the right event type) +// JSONCheckOff("events", []interface{}{"$foo:bar", "$baz:quuz"}, func(r gjson.Result) interface{} { +// return r.Get("event_id").Str +// }, func(eventID interface{}, eventBody gjson.Result) error { +// if eventBody.Get("type").Str != "m.room.message" { +// return fmt.Errorf("expected event to be 'm.room.message'") +// } +// }) +func JSONCheckOffAllowUnwanted(wantKey string, wantItems []interface{}, mapper func(gjson.Result) interface{}, fn func(interface{}, gjson.Result) error) JSON { + return jsonCheckOffInternal(wantKey, wantItems, true, mapper, fn) +} + +// JSONCheckOff returns a matcher which will loop over `wantKey` and ensure that the items +// (which can be array elements or object keys) +// are present exactly once in any order in `wantItems`. If there are unexpected items or items +// appear more than once then the match fails. This matcher can be used to check off items in +// an array/object. The `mapper` function should map the item to an interface which will be +// comparable via `reflect.DeepEqual` with items in `wantItems`. The optional `fn` callback +// allows more checks to be performed other than checking off the item from the list. It is +// called with 2 args: the result of the `mapper` function and the element itself (or value if +// it's an object). +// +// Usage: (ensures `events` has these events in any order, with the right event type) +// JSONCheckOff("events", []interface{}{"$foo:bar", "$baz:quuz"}, func(r gjson.Result) interface{} { +// return r.Get("event_id").Str +// }, func(eventID interface{}, eventBody gjson.Result) error { +// if eventBody.Get("type").Str != "m.room.message" { +// return fmt.Errorf("expected event to be 'm.room.message'") +// } +// }) +func JSONCheckOff(wantKey string, wantItems []interface{}, mapper func(gjson.Result) interface{}, fn func(interface{}, gjson.Result) error) JSON { + return jsonCheckOffInternal(wantKey, wantItems, false, mapper, fn) +} + // JSONArrayEach returns a matcher which will check that `wantKey` is an array then loops over each // item calling `fn`. If `fn` returns an error, iterating stops and an error is returned. func JSONArrayEach(wantKey string, fn func(gjson.Result) error) JSON { diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 2eec38d8..d2a87b57 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -52,6 +52,10 @@ func TestBackfillingHistory(t *testing.T) { userID := "@alice:hs1" alice := deployment.Client(t, "hs1", userID) + // Create the federated user which will fetch the messages from a remote homeserver + remoteUserID := "@charlie:hs2" + remoteCharlie := deployment.Client(t, "hs2", remoteUserID) + virtualUserLocalpart := "maria" virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) @@ -131,6 +135,7 @@ func TestBackfillingHistory(t *testing.T) { // Since the original body can only be read once, create a new one from the body bytes we just read messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) + // TODO: Remove, the context request is just for TARDIS visualizations contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, nil, "application/json", url.Values{ "limit": []string{"100"}, }) @@ -139,7 +144,7 @@ func TestBackfillingHistory(t *testing.T) { "contextResBody": string(contextResBody), }).Error("context res") - // Copy the array by value so we can modify it as we iterate in the foreach loop + // Copy the array by value so we can modify it as we iterate in the foreach loop. // We save the full untouched `expectedMessageOrder` for use in the log messages workingExpectedMessageOrder := expectedMessageOrder @@ -285,6 +290,58 @@ func TestBackfillingHistory(t *testing.T) { 403, ) }) + + t.Run("Historical messages are visible on federated server", func(t *testing.T) { + t.Parallel() + + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() + + // Register and join the virtual user + ensureRegistered(t, as, virtualUserLocalpart) + + backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + t, + as, + virtualUserID, + roomID, + eventBefore, + timeAfterEventBefore, + 1, + // Status + 200, + ) + _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + + // Join the room from a remote homeserver + remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + + messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + }) + messsageResBody := client.ParseJSON(t, messagesRes) + eventIDsFromResponse := getEventIDsFromResponseBody(t, messsageResBody) + // Since the original body can only be read once, create a new one from the body bytes we just read + messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) + + logrus.WithFields(logrus.Fields{ + "eventIDsFromResponse": eventIDsFromResponse, + "historicalEvents": historicalEvents, + }).Error("can we see historical?") + + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0]}, func(r gjson.Result) interface{} { + return r.Get("event_id").Str + }, nil), + }, + }) + }) }) } @@ -380,24 +437,6 @@ func ensureRegistered(t *testing.T, c *client.CSAPI, virtualUserLocalpart string } } -// joinRoom joins the room ID or alias given, else fails the test. Returns the room ID. -func joinRoom(t *testing.T, c *client.CSAPI, virtualUserID string, roomIDOrAlias string) string { - query := url.Values{} - if virtualUserID != "" { - query.Add("user_id", virtualUserID) - } - - // join the room - res := c.MustDoRaw(t, "POST", []string{"_matrix", "client", "r0", "join", roomIDOrAlias}, nil, "application/json", query) - // return the room ID if we joined with it - if roomIDOrAlias[0] == '!' { - return roomIDOrAlias - } - // otherwise we should be told the room ID if we joined via an alias - body := client.ParseJSON(t, res) - return client.GetJSONFieldStr(t, body, "room_id") -} - func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count int) []string { evs := make([]string, count) for i := 0; i < len(evs); i++ { From ac8e7705a2b6ddfdee1cb4350a859085a512dda4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 27 Apr 2021 01:48:35 -0500 Subject: [PATCH 51/81] Federation test debugging --- tests/msc2716_test.go | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index d2a87b57..0191d7da 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -317,9 +317,20 @@ func TestBackfillingHistory(t *testing.T) { ) _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + eventsAfter := createMessagesInRoom(t, alice, roomID, 3) + // Join the room from a remote homeserver remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + localMessagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + }) + localMesssageResBody := client.ParseJSON(t, localMessagesRes) + localEventIDsFromResponse := getEventIDsFromResponseBody(t, localMesssageResBody) + // Since the original body can only be read once, create a new one from the body bytes we just read + localMessagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(localMesssageResBody)) + messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -330,10 +341,20 @@ func TestBackfillingHistory(t *testing.T) { messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) logrus.WithFields(logrus.Fields{ - "eventIDsFromResponse": eventIDsFromResponse, - "historicalEvents": historicalEvents, + "localEventIDsFromResponse": localEventIDsFromResponse, + "eventIDsFromResponse": eventIDsFromResponse, + "historicalEvents": historicalEvents, }).Error("can we see historical?") + // TODO: Remove, the context request is just for TARDIS visualizations + contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, nil, "application/json", url.Values{ + "limit": []string{"100"}, + }) + contextResBody := client.ParseJSON(t, contextRes) + logrus.WithFields(logrus.Fields{ + "contextResBody": string(contextResBody), + }).Error("context res") + must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0]}, func(r gjson.Result) interface{} { From e1d203c9b7aeffd09aafb60a99a267d603780634 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 5 May 2021 23:30:17 -0500 Subject: [PATCH 52/81] Log historical state events --- tests/msc2716_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 0191d7da..96bdd684 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -301,6 +301,8 @@ func TestBackfillingHistory(t *testing.T) { eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() + eventsAfter := createMessagesInRoom(t, alice, roomID, 3) + // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) @@ -311,13 +313,11 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore, - 1, + 2, // Status 200, ) - _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) - - eventsAfter := createMessagesInRoom(t, alice, roomID, 3) + historicalStateEvents, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) // Join the room from a remote homeserver remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -344,6 +344,7 @@ func TestBackfillingHistory(t *testing.T) { "localEventIDsFromResponse": localEventIDsFromResponse, "eventIDsFromResponse": eventIDsFromResponse, "historicalEvents": historicalEvents, + "historicalStateEvents": historicalStateEvents, }).Error("can we see historical?") // TODO: Remove, the context request is just for TARDIS visualizations From ec5fc158d8382da6e55fe1761e8c9a04aa3d251a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 6 May 2021 01:17:08 -0500 Subject: [PATCH 53/81] Add tests to ensure historical messages are visible to federated users who are already in the room --- tests/msc2716_test.go | 131 +++++++++++++++++++++++++++++++++--------- 1 file changed, 105 insertions(+), 26 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 96bdd684..91e4312b 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -291,7 +291,7 @@ func TestBackfillingHistory(t *testing.T) { ) }) - t.Run("Historical messages are visible on federated server", func(t *testing.T) { + t.Run("Historical messages are visible when joining on federated server", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -301,7 +301,8 @@ func TestBackfillingHistory(t *testing.T) { eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - eventsAfter := createMessagesInRoom(t, alice, roomID, 3) + // eventsAfter + createMessagesInRoom(t, alice, roomID, 3) // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) @@ -317,48 +318,126 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalStateEvents, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) - // Join the room from a remote homeserver + // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - localMessagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, }) - localMesssageResBody := client.ParseJSON(t, localMessagesRes) - localEventIDsFromResponse := getEventIDsFromResponseBody(t, localMesssageResBody) - // Since the original body can only be read once, create a new one from the body bytes we just read - localMessagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(localMesssageResBody)) + + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0], historicalEvents[1]}, func(r gjson.Result) interface{} { + return r.Get("event_id").Str + }, nil), + }, + }) + }) + + t.Run("Historical messages are visible when already joined on federated server", func(t *testing.T) { + t.Parallel() + + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + // Join the room from a remote homeserver before any backfilled messages are sent + remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() + + // eventsAfter + createMessagesInRoom(t, alice, roomID, 10) + + // Mimic scrollback just through the latest messages + remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + // Limited so we can only see a few of the latest messages + "limit": []string{"5"}, + }) + + // Register and join the virtual user + ensureRegistered(t, as, virtualUserLocalpart) + + backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + t, + as, + virtualUserID, + roomID, + eventBefore, + timeAfterEventBefore, + 2, + // Status + 200, + ) + _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, }) - messsageResBody := client.ParseJSON(t, messagesRes) - eventIDsFromResponse := getEventIDsFromResponseBody(t, messsageResBody) - // Since the original body can only be read once, create a new one from the body bytes we just read - messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) - logrus.WithFields(logrus.Fields{ - "localEventIDsFromResponse": localEventIDsFromResponse, - "eventIDsFromResponse": eventIDsFromResponse, - "historicalEvents": historicalEvents, - "historicalStateEvents": historicalStateEvents, - }).Error("can we see historical?") + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0], historicalEvents[1]}, func(r gjson.Result) interface{} { + return r.Get("event_id").Str + }, nil), + }, + }) + }) - // TODO: Remove, the context request is just for TARDIS visualizations - contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, nil, "application/json", url.Values{ + t.Run("When messages have already been scrolled back through, new historical messages are visible in next scroll back on federated server", func(t *testing.T) { + t.Parallel() + + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + // Join the room from a remote homeserver before any backfilled messages are sent + remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + + eventsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventBefore := eventsBefore[0] + timeAfterEventBefore := time.Now() + + // eventsAfter + createMessagesInRoom(t, alice, roomID, 3) + + // Register and join the virtual user + ensureRegistered(t, as, virtualUserLocalpart) + + // Mimic scrollback to all of the messages + // scrollbackMessagesRes + remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + }) + + // Historical messages are inserted where we have already scrolled back to + backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + t, + as, + virtualUserID, + roomID, + eventBefore, + timeAfterEventBefore, + 2, + // Status + 200, + ) + _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + + messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + "dir": []string{"b"}, "limit": []string{"100"}, }) - contextResBody := client.ParseJSON(t, contextRes) - logrus.WithFields(logrus.Fields{ - "contextResBody": string(contextResBody), - }).Error("context res") must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0], historicalEvents[1]}, func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, From 7150004c6057f9f269166f9a71c033f8f55d8725 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 14 May 2021 03:37:48 -0500 Subject: [PATCH 54/81] Utilize chunk_id to connect to insertion points --- tests/msc2716_test.go | 44 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 91e4312b..152c9c36 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -38,6 +38,14 @@ type event struct { // checking out the test result in a Synapse instance const TimeBetweenMessages = time.Millisecond +var ( + MSC2716_INSERTION = "org.matrix.msc2716.insertion" + MSC2716_MARKER = "org.matrix.msc2716.marker" + + MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id" + MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id" +) + // Test that the message events we insert between A and B come back in the correct order from /messages func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) @@ -95,11 +103,13 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore.Add(TimeBetweenMessages*3), + "", 3, // Status 200, ) _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + nextChunkID := getNextChunkIdFromBulkSendResponse(t, backfillRes) // Insert another older chunk of backfilled history from the same user. // Make sure the meta data and joins still work on the subsequent chunk @@ -110,6 +120,7 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore, + nextChunkID, 3, // Status 200, @@ -190,6 +201,7 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore, + "", 1, // Status 200, @@ -258,6 +270,7 @@ func TestBackfillingHistory(t *testing.T) { roomID, "$some-non-existant-event-id", time.Now(), + "", 1, // Status // TODO: Seems like this makes more sense as a 404 @@ -284,6 +297,7 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore, + "", 1, // Status // Normal user alice should not be able to backfill messages @@ -314,6 +328,7 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore, + "", 2, // Status 200, @@ -370,6 +385,7 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore, + "", 2, // Status 200, @@ -424,6 +440,7 @@ func TestBackfillingHistory(t *testing.T) { roomID, eventBefore, timeAfterEventBefore, + "", 2, // Status 200, @@ -564,6 +581,7 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( roomID string, insertAfterEventId string, insertTime time.Time, + chunkID string, count int, status int, ) (res *http.Response) { @@ -589,6 +607,12 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( "m.historical": true, }, } + + // If provided, connect the chunk to the last insertion point + if chunkID != "" && i == 0 { + newEvent["content"].(map[string]interface{})[MSC2716_CHUNK_ID] = chunkID + } + evs[i] = newEvent } @@ -629,11 +653,23 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( return res } -func getEventsFromBulkSendResponse(t *testing.T, res *http.Response) (state_event_ids []string, event_ids []string) { +func getEventsFromBulkSendResponse(t *testing.T, res *http.Response) (stateEventsIDs []string, eventIDs []string) { + body := client.ParseJSON(t, res) + // Since the original body can only be read once, create a new one from the body bytes we just read + res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + + stateEventsIDs = client.GetJSONFieldArray(t, body, "state_events") + eventIDs = client.GetJSONFieldArray(t, body, "events") + + return stateEventsIDs, eventIDs +} + +func getNextChunkIdFromBulkSendResponse(t *testing.T, res *http.Response) (nextChunkID string) { body := client.ParseJSON(t, res) + // Since the original body can only be read once, create a new one from the body bytes we just read + res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) - stateEvents := client.GetJSONFieldArray(t, body, "state_events") - events := client.GetJSONFieldArray(t, body, "events") + nextChunkID = client.GetJSONFieldStr(t, body, "next_chunk_id") - return stateEvents, events + return nextChunkID } From 8d65ac718489dba53a20ab66239e47757d1ae708 Mon Sep 17 00:00:00 2001 From: Kegan Dougal Date: Fri, 14 May 2021 15:18:28 +0100 Subject: [PATCH 55/81] Remove namespace --- tests/msc2716_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 152c9c36..8c1ea6e4 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -48,7 +48,7 @@ var ( // Test that the message events we insert between A and B come back in the correct order from /messages func TestBackfillingHistory(t *testing.T) { - deployment := Deploy(t, "rooms_state", b.BlueprintHSWithApplicationService) + deployment := Deploy(t, b.BlueprintHSWithApplicationService) defer deployment.Destroy(t) //defer time.Sleep(2 * time.Hour) From 0145930941cafe0dab84801bc4d740992e32eb1a Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 26 May 2021 21:50:34 -0500 Subject: [PATCH 56/81] Add ?chunk_id query param to connect chunks --- tests/msc2716_test.go | 67 ++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 36 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 152c9c36..bdc36aaa 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -42,8 +42,11 @@ var ( MSC2716_INSERTION = "org.matrix.msc2716.insertion" MSC2716_MARKER = "org.matrix.msc2716.marker" - MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id" - MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id" + MSC2716_HISTORICAL = "org.matrix.msc2716.historical" + MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id" + MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id" + MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion" + MSC2716_MARKER_INSERTION_PREV_EVENTS = "org.matrix.msc2716.marker.insertion_prev_events" ) // Test that the message events we insert between A and B come back in the correct order from /messages @@ -96,7 +99,7 @@ func TestBackfillingHistory(t *testing.T) { // TODO: Try adding avatar and displayName and see if historical messages get this info // Insert the most recent chunk of backfilled history - backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes := backfillBulkHistoricalMessages( t, as, virtualUserID, @@ -113,7 +116,7 @@ func TestBackfillingHistory(t *testing.T) { // Insert another older chunk of backfilled history from the same user. // Make sure the meta data and joins still work on the subsequent chunk - backfillRes2 := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes2 := backfillBulkHistoricalMessages( t, as, virtualUserID, @@ -129,10 +132,8 @@ func TestBackfillingHistory(t *testing.T) { var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) - // Historical events were inserted in reverse chronological - // But we expect them to come out in /messages in the correct order - expectedMessageOrder = append(expectedMessageOrder, reversed(historicalEvents2)...) - expectedMessageOrder = append(expectedMessageOrder, reversed(historicalEvents)...) + expectedMessageOrder = append(expectedMessageOrder, historicalEvents2...) + expectedMessageOrder = append(expectedMessageOrder, historicalEvents...) expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) // Order events from newest to oldest expectedMessageOrder = reversed(expectedMessageOrder) @@ -179,7 +180,7 @@ func TestBackfillingHistory(t *testing.T) { }) }) - t.Run("Backfilled historical events with m.historical do not come down /sync", func(t *testing.T) { + t.Run("Backfilled historical events with MSC2716_HISTORICAL do not come down /sync", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -194,7 +195,7 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes := backfillBulkHistoricalMessages( t, as, virtualUserID, @@ -225,7 +226,7 @@ func TestBackfillingHistory(t *testing.T) { }) }) - t.Run("Backfilled historical events without m.historical come down /sync", func(t *testing.T) { + t.Run("Backfilled historical events without MSC2716_HISTORICAL come down /sync", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -236,7 +237,7 @@ func TestBackfillingHistory(t *testing.T) { timeAfterEventBefore := time.Now() insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / int64(time.Millisecond)) - // Send an event that has `prev_event` and `ts` set but not `m.historical`. + // Send an event that has `prev_event` and `ts` set but not `MSC2716_HISTORICAL`. // We should see these type of events in the `/sync` response eventWeShouldSee := sendEvent(t, as, "", roomID, event{ Type: "m.room.message", @@ -246,10 +247,10 @@ func TestBackfillingHistory(t *testing.T) { OriginServerTS: insertOriginServerTs, Content: map[string]interface{}{ "msgtype": "m.text", - "body": "Message with prev_event and ts but no m.historical", + "body": "Message with prev_event and ts but no MSC2716_HISTORICAL", // This is commented out on purpse. - // We are explicitely testing when m.historical isn't present - //"m.historical": true, + // We are explicitely testing when MSC2716_HISTORICAL isn't present + //MSC2716_HISTORICAL: true, }, }) @@ -263,7 +264,7 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) - backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillBulkHistoricalMessages( t, as, virtualUserID, @@ -290,7 +291,7 @@ func TestBackfillingHistory(t *testing.T) { eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillBulkHistoricalMessages( t, alice, virtualUserID, @@ -321,7 +322,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) - backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes := backfillBulkHistoricalMessages( t, as, virtualUserID, @@ -378,7 +379,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) - backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes := backfillBulkHistoricalMessages( t, as, virtualUserID, @@ -433,7 +434,7 @@ func TestBackfillingHistory(t *testing.T) { }) // Historical messages are inserted where we have already scrolled back to - backfillRes := backfillBulkHistoricalMessagesInReverseChronologicalAtTime( + backfillRes := backfillBulkHistoricalMessages( t, as, virtualUserID, @@ -574,7 +575,7 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in var chunkCount int64 = 0 -func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( +func backfillBulkHistoricalMessages( t *testing.T, c *client.CSAPI, virtualUserID string, @@ -592,27 +593,17 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( evs := make([]map[string]interface{}, count) for i := 0; i < len(evs); i++ { - // We have to backfill historical messages from most recent to oldest - // since backfilled messages decrement their `stream_order` and we want messages - // to appear in order from the `/messages` endpoint - messageIndex := (count - 1) - i - newEvent := map[string]interface{}{ "type": "m.room.message", "sender": virtualUserID, - "origin_server_ts": insertOriginServerTs + (timeBetweenMessagesMS * uint64(messageIndex)), + "origin_server_ts": insertOriginServerTs + (timeBetweenMessagesMS * uint64(i)), "content": map[string]interface{}{ - "msgtype": "m.text", - "body": fmt.Sprintf("Historical %d (chunk=%d)", messageIndex, chunkCount), - "m.historical": true, + "msgtype": "m.text", + "body": fmt.Sprintf("Historical %d (chunk=%d)", i, chunkCount), + MSC2716_HISTORICAL: true, }, } - // If provided, connect the chunk to the last insertion point - if chunkID != "" && i == 0 { - newEvent["content"].(map[string]interface{})[MSC2716_CHUNK_ID] = chunkID - } - evs[i] = newEvent } @@ -629,13 +620,17 @@ func backfillBulkHistoricalMessagesInReverseChronologicalAtTime( query := make(url.Values, 2) query.Add("prev_event", insertAfterEventId) query.Add("user_id", virtualUserID) + // If provided, connect the chunk to the last insertion point + if chunkID != "" { + query.Add("chunk_id", chunkID) + } b, err := json.Marshal(map[string]interface{}{ "events": evs, "state_events_at_start": []map[string]interface{}{joinEvent}, }) if err != nil { - t.Fatalf("msc2716.backfillBulkHistoricalMessagesInReverseChronologicalAtTime failed to marshal JSON body: %s", err) + t.Fatalf("msc2716.backfillBulkHistoricalMessages failed to marshal JSON body: %s", err) } res = c.MustDoWithStatusRaw( From 4c720f4fb8eb516902190a1ffae74406b4d24287 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 26 May 2021 21:57:21 -0500 Subject: [PATCH 57/81] Add required type to register user from application service See https://github.com/matrix-org/synapse/blob/develop/CHANGES.md#synapse-1320-2021-04-20 --- tests/msc2716_test.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index b7e737e4..a20e576a 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -535,7 +535,14 @@ func ensureRegistered(t *testing.T, c *client.CSAPI, virtualUserLocalpart string // t.Fatalf("msc2716.ensureRegistered failed to marshal JSON body: %s", err) // } - res, err := c.DoWithAuthRaw(t, "POST", []string{"_matrix", "client", "r0", "register"}, json.RawMessage(fmt.Sprintf(`{ "username": "%s" }`, virtualUserLocalpart)), "application/json", url.Values{}) + res, err := c.DoWithAuthRaw( + t, + "POST", + []string{"_matrix", "client", "r0", "register"}, + json.RawMessage(fmt.Sprintf(`{ "type": "m.login.application_service", "username": "%s" }`, virtualUserLocalpart)), + "application/json", + url.Values{}, + ) if err != nil { t.Error(err) From f970444970ce0632a4be76f4f50089b3c37e3764 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 7 Jun 2021 09:52:02 -0500 Subject: [PATCH 58/81] Also filter in insertion/marker events --- tests/msc2716_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index a20e576a..25a22f1b 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -164,7 +164,7 @@ func TestBackfillingHistory(t *testing.T) { JSON: []match.JSON{ match.JSONArrayEach("chunk", func(r gjson.Result) error { // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 { + if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == MSC2716_INSERTION || r.Get("type").Str == MSC2716_MARKER { // Pop the next message off the expected list nextEventInOrder := workingExpectedMessageOrder[0] workingExpectedMessageOrder = workingExpectedMessageOrder[1:] @@ -483,7 +483,7 @@ func getEventIDsFromResponseBody(t *testing.T, body []byte) (eventIDsFromRespons } res.ForEach(func(key, r gjson.Result) bool { - if len(r.Get("content").Get("body").Str) > 0 { + if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == MSC2716_INSERTION || r.Get("type").Str == MSC2716_MARKER { eventIDsFromResponse = append(eventIDsFromResponse, r.Get("event_id").Str+" ("+r.Get("content").Get("body").Str+")") } return true From 9ee5ad14961e5cc45930c11705eb2d0ef67d2009 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 7 Jun 2021 15:22:56 -0500 Subject: [PATCH 59/81] Switch from bulk to batch wording --- tests/msc2716_test.go | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 25a22f1b..eb743b26 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -99,7 +99,7 @@ func TestBackfillingHistory(t *testing.T) { // TODO: Try adding avatar and displayName and see if historical messages get this info // Insert the most recent chunk of backfilled history - backfillRes := backfillBulkHistoricalMessages( + backfillRes := backfillBatchHistoricalMessages( t, as, virtualUserID, @@ -111,12 +111,12 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) - nextChunkID := getNextChunkIdFromBulkSendResponse(t, backfillRes) + _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) + nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) // Insert another older chunk of backfilled history from the same user. // Make sure the meta data and joins still work on the subsequent chunk - backfillRes2 := backfillBulkHistoricalMessages( + backfillRes2 := backfillBatchHistoricalMessages( t, as, virtualUserID, @@ -128,7 +128,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEvents2 := getEventsFromBulkSendResponse(t, backfillRes2) + _, historicalEvents2 := getEventsFromBatchSendResponse(t, backfillRes2) var expectedMessageOrder []string expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) @@ -195,7 +195,7 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - backfillRes := backfillBulkHistoricalMessages( + backfillRes := backfillBatchHistoricalMessages( t, as, virtualUserID, @@ -207,7 +207,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) backfilledEvent := historicalEvents[0] // This is just a dummy event we search for after the backfilledEvent @@ -264,7 +264,7 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) - backfillBulkHistoricalMessages( + backfillBatchHistoricalMessages( t, as, virtualUserID, @@ -291,7 +291,7 @@ func TestBackfillingHistory(t *testing.T) { eventBefore := eventsBefore[0] timeAfterEventBefore := time.Now() - backfillBulkHistoricalMessages( + backfillBatchHistoricalMessages( t, alice, virtualUserID, @@ -322,7 +322,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) - backfillRes := backfillBulkHistoricalMessages( + backfillRes := backfillBatchHistoricalMessages( t, as, virtualUserID, @@ -334,7 +334,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -379,7 +379,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureRegistered(t, as, virtualUserLocalpart) - backfillRes := backfillBulkHistoricalMessages( + backfillRes := backfillBatchHistoricalMessages( t, as, virtualUserID, @@ -391,7 +391,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, @@ -434,7 +434,7 @@ func TestBackfillingHistory(t *testing.T) { }) // Historical messages are inserted where we have already scrolled back to - backfillRes := backfillBulkHistoricalMessages( + backfillRes := backfillBatchHistoricalMessages( t, as, virtualUserID, @@ -446,7 +446,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEvents := getEventsFromBulkSendResponse(t, backfillRes) + _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ "dir": []string{"b"}, @@ -582,7 +582,7 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in var chunkCount int64 = 0 -func backfillBulkHistoricalMessages( +func backfillBatchHistoricalMessages( t *testing.T, c *client.CSAPI, virtualUserID string, @@ -637,13 +637,13 @@ func backfillBulkHistoricalMessages( "state_events_at_start": []map[string]interface{}{joinEvent}, }) if err != nil { - t.Fatalf("msc2716.backfillBulkHistoricalMessages failed to marshal JSON body: %s", err) + t.Fatalf("msc2716.backfillBatchHistoricalMessages failed to marshal JSON body: %s", err) } res = c.MustDoWithStatusRaw( t, "POST", - []string{"_matrix", "client", "r0", "rooms", roomID, "bulksend"}, + []string{"_matrix", "client", "r0", "rooms", roomID, "batchsend"}, b, "application/json", query, @@ -655,7 +655,7 @@ func backfillBulkHistoricalMessages( return res } -func getEventsFromBulkSendResponse(t *testing.T, res *http.Response) (stateEventsIDs []string, eventIDs []string) { +func getEventsFromBatchSendResponse(t *testing.T, res *http.Response) (stateEventsIDs []string, eventIDs []string) { body := client.ParseJSON(t, res) // Since the original body can only be read once, create a new one from the body bytes we just read res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) @@ -666,7 +666,7 @@ func getEventsFromBulkSendResponse(t *testing.T, res *http.Response) (stateEvent return stateEventsIDs, eventIDs } -func getNextChunkIdFromBulkSendResponse(t *testing.T, res *http.Response) (nextChunkID string) { +func getNextChunkIdFromBatchSendResponse(t *testing.T, res *http.Response) (nextChunkID string) { body := client.ParseJSON(t, res) // Since the original body can only be read once, create a new one from the body bytes we just read res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) From 35c8f76122070532c98811fac9ec2602e3369f97 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 16 Jun 2021 22:22:19 -0500 Subject: [PATCH 60/81] Use unstable endpoint for MSC2716 batch send --- tests/msc2716_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index eb743b26..b2b8255e 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -643,7 +643,7 @@ func backfillBatchHistoricalMessages( res = c.MustDoWithStatusRaw( t, "POST", - []string{"_matrix", "client", "r0", "rooms", roomID, "batchsend"}, + []string{"_matrix", "client", "unstable", "org.matrix.msc2716", "rooms", roomID, "batch_send"}, b, "application/json", query, From 007877f8445cd4886aabac18de53c6f4c6d48748 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 21 Jun 2021 16:16:47 -0500 Subject: [PATCH 61/81] Refactor to use functional client functions See https://github.com/matrix-org/complement/commit/3acc9bbfb0c91cae3ccd488aff990bceb8854e62 --- tests/msc2716_test.go | 76 +++++++++++++++++++++++-------------------- 1 file changed, 40 insertions(+), 36 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index b2b8255e..e78df50a 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -94,7 +94,7 @@ func TestBackfillingHistory(t *testing.T) { eventsAfter := createMessagesInRoom(t, alice, roomID, 2) // Register and join the virtual user - ensureRegistered(t, as, virtualUserLocalpart) + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) // TODO: Try adding avatar and displayName and see if historical messages get this info @@ -138,19 +138,19 @@ func TestBackfillingHistory(t *testing.T) { // Order events from newest to oldest expectedMessageOrder = reversed(expectedMessageOrder) - messagesRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, - }) + })) messsageResBody := client.ParseJSON(t, messagesRes) eventIDsFromResponse := getEventIDsFromResponseBody(t, messsageResBody) // Since the original body can only be read once, create a new one from the body bytes we just read messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) // TODO: Remove, the context request is just for TARDIS visualizations - contextRes := alice.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, nil, "application/json", url.Values{ + contextRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "limit": []string{"100"}, - }) + })) contextResBody := client.ParseJSON(t, contextRes) logrus.WithFields(logrus.Fields{ "contextResBody": string(contextResBody), @@ -320,7 +320,7 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 3) // Register and join the virtual user - ensureRegistered(t, as, virtualUserLocalpart) + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) backfillRes := backfillBatchHistoricalMessages( t, @@ -339,10 +339,10 @@ func TestBackfillingHistory(t *testing.T) { // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, - }) + })) must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ @@ -370,14 +370,14 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 10) // Mimic scrollback just through the latest messages - remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, // Limited so we can only see a few of the latest messages "limit": []string{"5"}, - }) + })) // Register and join the virtual user - ensureRegistered(t, as, virtualUserLocalpart) + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) backfillRes := backfillBatchHistoricalMessages( t, @@ -393,10 +393,10 @@ func TestBackfillingHistory(t *testing.T) { ) _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) - messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, - }) + })) must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ @@ -424,14 +424,14 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 3) // Register and join the virtual user - ensureRegistered(t, as, virtualUserLocalpart) + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) // Mimic scrollback to all of the messages // scrollbackMessagesRes - remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, - }) + })) // Historical messages are inserted where we have already scrolled back to backfillRes := backfillBatchHistoricalMessages( @@ -448,10 +448,10 @@ func TestBackfillingHistory(t *testing.T) { ) _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) - messagesRes := remoteCharlie.MustDoRaw(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, nil, "application/json", url.Values{ + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, - }) + })) must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ @@ -518,36 +518,31 @@ func sendEvent(t *testing.T, c *client.CSAPI, virtualUserID string, roomID strin t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) } - res := c.MustDoRaw(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + strconv.Itoa(txnID)}, b, "application/json", query) + res := c.MustDoFunc(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + strconv.Itoa(txnID)}, client.WithRawBody(b), client.WithContentType("application/json"), client.WithQueries(query)) body := client.ParseJSON(t, res) eventID := client.GetJSONFieldStr(t, body, "event_id") return eventID } -// ensureRegistered makes sure the user is registered for the homeserver regardless +// ensureVirtualUserRegistered makes sure the user is registered for the homeserver regardless // if they are already registered or not. If unable to register, fails the test -func ensureRegistered(t *testing.T, c *client.CSAPI, virtualUserLocalpart string) { +func ensureVirtualUserRegistered(t *testing.T, c *client.CSAPI, virtualUserLocalpart string) { // b, err := json.Marshal(map[string]interface{}{ // "username": virtualUserLocalpart, // }) // if err != nil { - // t.Fatalf("msc2716.ensureRegistered failed to marshal JSON body: %s", err) + // t.Fatalf("msc2716.ensureVirtualUserRegistered failed to marshal JSON body: %s", err) // } - res, err := c.DoWithAuthRaw( + res := c.DoFunc( t, "POST", []string{"_matrix", "client", "r0", "register"}, - json.RawMessage(fmt.Sprintf(`{ "type": "m.login.application_service", "username": "%s" }`, virtualUserLocalpart)), - "application/json", - url.Values{}, + client.WithRawBody(json.RawMessage(fmt.Sprintf(`{ "type": "m.login.application_service", "username": "%s" }`, virtualUserLocalpart))), + client.WithContentType("application/json"), ) - if err != nil { - t.Error(err) - } - if res.StatusCode == 200 { return } @@ -559,7 +554,7 @@ func ensureRegistered(t *testing.T, c *client.CSAPI, virtualUserLocalpart string return } else { errorMessage := client.GetJSONFieldStr(t, body, "error") - t.Fatalf("msc2716.ensureRegistered failed to register: (%s) %s", errcode, errorMessage) + t.Fatalf("msc2716.ensureVirtualUserRegistered failed to register: (%s) %s", errcode, errorMessage) } } @@ -640,15 +635,24 @@ func backfillBatchHistoricalMessages( t.Fatalf("msc2716.backfillBatchHistoricalMessages failed to marshal JSON body: %s", err) } - res = c.MustDoWithStatusRaw( + res = c.DoFunc( t, "POST", []string{"_matrix", "client", "unstable", "org.matrix.msc2716", "rooms", roomID, "batch_send"}, - b, - "application/json", - query, - status, + client.WithRawBody(b), + client.WithContentType("application/json"), + client.WithQueries(query), ) + // Save the body so we can re-create after the buffer closes + body := client.ParseJSON(t, res) + // Since the original body can only be read once, create a new one from the body bytes we just read + res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + must.MatchResponse(t, res, match.HTTPResponse{ + StatusCode: status, + }) + // After using up the body in the must.MatchResponse above, create the body again + // Since the original body can only be read once, create a new one from the body bytes we just read + res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) chunkCount++ From c03e49e95f233ef3191829d35896b23550edc8cf Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 21 Jun 2021 16:35:27 -0500 Subject: [PATCH 62/81] Fix blueprint name conflict The name conflict would result in the following error when some alice tests ran before the MSC2716 tests. ``` === RUN TestBackfillingHistory msc2716_test.go:54: Deploy times: 18.499364ms blueprints, 1.581230486s containers msc2716_test.go:60: Deployment.Client - HS name 'hs1' - user ID '@the-bridge-user:hs1' not found ``` --- internal/b/hs_with_application_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/b/hs_with_application_service.go b/internal/b/hs_with_application_service.go index 348cd7c3..e3aa059b 100644 --- a/internal/b/hs_with_application_service.go +++ b/internal/b/hs_with_application_service.go @@ -2,7 +2,7 @@ package b // BlueprintHSWithApplicationService who has an application service to interact with var BlueprintHSWithApplicationService = MustValidate(Blueprint{ - Name: "alice", + Name: "hs_with_application_service", Homeservers: []Homeserver{ { Name: "hs1", From 3611002a49ed225fad3cfd400253869f1cb69ec4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 21 Jun 2021 16:45:26 -0500 Subject: [PATCH 63/81] Skip federation tests --- tests/msc2716_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index e78df50a..693d46b8 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -307,6 +307,7 @@ func TestBackfillingHistory(t *testing.T) { }) t.Run("Historical messages are visible when joining on federated server", func(t *testing.T) { + t.Skip("Skipping until federation is implemented") t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -354,6 +355,7 @@ func TestBackfillingHistory(t *testing.T) { }) t.Run("Historical messages are visible when already joined on federated server", func(t *testing.T) { + t.Skip("Skipping until federation is implemented") t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -408,6 +410,7 @@ func TestBackfillingHistory(t *testing.T) { }) t.Run("When messages have already been scrolled back through, new historical messages are visible in next scroll back on federated server", func(t *testing.T) { + t.Skip("Skipping until federation is implemented") t.Parallel() roomID := as.CreateRoom(t, struct{}{}) From 29582e547f3a7f59097cf9b72d98c750757948d8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 21 Jun 2021 17:41:27 -0500 Subject: [PATCH 64/81] Some cleanup --- internal/docker/deployment.go | 2 +- internal/match/json.go | 2 +- tests/msc2716_test.go | 53 ++++------------------------------- 3 files changed, 8 insertions(+), 49 deletions(-) diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index d36c7d8f..6b303afa 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -31,7 +31,7 @@ type HomeserverDeployment struct { // will print container logs before killing the container. func (d *Deployment) Destroy(t *testing.T) { t.Helper() - d.Deployer.Destroy(d, true) // t.Failed() + d.Deployer.Destroy(d, t.Failed()) } // Client returns a CSAPI client targeting the given hsName, using the access token for the given userID. diff --git a/internal/match/json.go b/internal/match/json.go index b0c73026..4a2a1133 100644 --- a/internal/match/json.go +++ b/internal/match/json.go @@ -126,7 +126,7 @@ func jsonCheckOffInternal(wantKey string, wantItems []interface{}, allowUnwanted // it's an object). // // Usage: (ensures `events` has these events in any order, with the right event type) -// JSONCheckOff("events", []interface{}{"$foo:bar", "$baz:quuz"}, func(r gjson.Result) interface{} { +// JSONCheckOffAllowUnwanted("events", []interface{}{"$foo:bar", "$baz:quuz"}, func(r gjson.Result) interface{} { // return r.Get("event_id").Str // }, func(eventID interface{}, eventBody gjson.Result) error { // if eventBody.Get("type").Str != "m.room.message" { diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 693d46b8..2df46b83 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -21,7 +21,6 @@ import ( "github.com/matrix-org/complement/internal/client" "github.com/matrix-org/complement/internal/match" "github.com/matrix-org/complement/internal/must" - "github.com/sirupsen/logrus" "github.com/tidwall/gjson" ) @@ -53,7 +52,6 @@ var ( func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, b.BlueprintHSWithApplicationService) defer deployment.Destroy(t) - //defer time.Sleep(2 * time.Hour) // Create the application service bridge user that is able to backfill messages asUserID := "@the-bridge-user:hs1" @@ -96,8 +94,6 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - // TODO: Try adding avatar and displayName and see if historical messages get this info - // Insert the most recent chunk of backfilled history backfillRes := backfillBatchHistoricalMessages( t, @@ -147,15 +143,6 @@ func TestBackfillingHistory(t *testing.T) { // Since the original body can only be read once, create a new one from the body bytes we just read messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) - // TODO: Remove, the context request is just for TARDIS visualizations - contextRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "context", eventsAfter[len(eventsAfter)-1]}, client.WithContentType("application/json"), client.WithQueries(url.Values{ - "limit": []string{"100"}, - })) - contextResBody := client.ParseJSON(t, contextRes) - logrus.WithFields(logrus.Fields{ - "contextResBody": string(contextResBody), - }).Error("context res") - // Copy the array by value so we can modify it as we iterate in the foreach loop. // We save the full untouched `expectedMessageOrder` for use in the log messages workingExpectedMessageOrder := expectedMessageOrder @@ -180,7 +167,7 @@ func TestBackfillingHistory(t *testing.T) { }) }) - t.Run("Backfilled historical events with MSC2716_HISTORICAL do not come down /sync", func(t *testing.T) { + t.Run("Backfilled historical events with MSC2716_HISTORICAL do not come down in an incremental sync", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) @@ -226,39 +213,6 @@ func TestBackfillingHistory(t *testing.T) { }) }) - t.Run("Backfilled historical events without MSC2716_HISTORICAL come down /sync", func(t *testing.T) { - t.Parallel() - - roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) - - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] - timeAfterEventBefore := time.Now() - insertOriginServerTs := uint64(timeAfterEventBefore.UnixNano() / int64(time.Millisecond)) - - // Send an event that has `prev_event` and `ts` set but not `MSC2716_HISTORICAL`. - // We should see these type of events in the `/sync` response - eventWeShouldSee := sendEvent(t, as, "", roomID, event{ - Type: "m.room.message", - PrevEvents: []string{ - eventBefore, - }, - OriginServerTS: insertOriginServerTs, - Content: map[string]interface{}{ - "msgtype": "m.text", - "body": "Message with prev_event and ts but no MSC2716_HISTORICAL", - // This is commented out on purpse. - // We are explicitely testing when MSC2716_HISTORICAL isn't present - //MSC2716_HISTORICAL: true, - }, - }) - - alice.SyncUntilTimelineHas(t, roomID, func(r gjson.Result) bool { - return r.Get("event_id").Str == eventWeShouldSee - }) - }) - t.Run("Unrecognised prev_event ID will throw an error", func(t *testing.T) { t.Parallel() @@ -306,6 +260,11 @@ func TestBackfillingHistory(t *testing.T) { ) }) + t.Run("TODO: Test if historical avatar/display name set back in time are picked up on historical messages", func(t *testing.T) { + t.Skip("Skipping until implemented") + // TODO: Try adding avatar and displayName and see if historical messages get this info + }) + t.Run("Historical messages are visible when joining on federated server", func(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() From 603fd8f63195a51d9ea0caa4b709373a5d5c5591 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 25 Jun 2021 17:21:49 -0500 Subject: [PATCH 65/81] Remove some unused bits --- dockerfiles/synapse/homeserver.yaml | 6 ---- tests/msc2716_test.go | 49 +++-------------------------- 2 files changed, 4 insertions(+), 51 deletions(-) diff --git a/dockerfiles/synapse/homeserver.yaml b/dockerfiles/synapse/homeserver.yaml index 8abf7e20..fab6922e 100644 --- a/dockerfiles/synapse/homeserver.yaml +++ b/dockerfiles/synapse/homeserver.yaml @@ -93,12 +93,6 @@ rc_joins: federation_rr_transactions_per_room_per_second: 9999 - -## Registration ## - -enable_registration: True -registration_shared_secret: "$FZjMa&9fAAi9Xf[F)jAY[C#y?QwT[!qnBi+:ZLj.-)zVf]:C39H4Y99c$LPCh}{" - ## API Configuration ## # A list of application service config files to use diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 2df46b83..25adfa50 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -13,7 +13,6 @@ import ( "io/ioutil" "net/http" "net/url" - "strconv" "testing" "time" @@ -35,7 +34,7 @@ type event struct { // This is configurable because it can be nice to change it to `time.Second` while // checking out the test result in a Synapse instance -const TimeBetweenMessages = time.Millisecond +const timeBetweenMessages = time.Millisecond var ( MSC2716_INSERTION = "org.matrix.msc2716.insertion" @@ -85,7 +84,7 @@ func TestBackfillingHistory(t *testing.T) { numHistoricalMessages := 6 // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later - time.Sleep(time.Duration(numHistoricalMessages) * TimeBetweenMessages) + time.Sleep(time.Duration(numHistoricalMessages) * timeBetweenMessages) // Create some events after. // Fill up the buffer so we have to scrollback to the inserted history later @@ -101,7 +100,7 @@ func TestBackfillingHistory(t *testing.T) { virtualUserID, roomID, eventBefore, - timeAfterEventBefore.Add(TimeBetweenMessages*3), + timeAfterEventBefore.Add(timeBetweenMessages*3), "", 3, // Status @@ -454,49 +453,9 @@ func getEventIDsFromResponseBody(t *testing.T, body []byte) (eventIDsFromRespons return eventIDsFromResponse } -var txnID int = 0 - -// The transactions need to be prefixed so they don't collide with the txnID in client.go -var txnPrefix string = "msc2716-txn" - -func sendEvent(t *testing.T, c *client.CSAPI, virtualUserID string, roomID string, e event) string { - txnID++ - - query := make(url.Values, len(e.PrevEvents)) - for _, prevEvent := range e.PrevEvents { - query.Add("prev_event", prevEvent) - } - - if e.OriginServerTS != 0 { - query.Add("ts", strconv.FormatUint(e.OriginServerTS, 10)) - } - - if virtualUserID != "" { - query.Add("user_id", virtualUserID) - } - - b, err := json.Marshal(e.Content) - if err != nil { - t.Fatalf("msc2716.sendEvent failed to marshal JSON body: %s", err) - } - - res := c.MustDoFunc(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", e.Type, txnPrefix + strconv.Itoa(txnID)}, client.WithRawBody(b), client.WithContentType("application/json"), client.WithQueries(query)) - body := client.ParseJSON(t, res) - eventID := client.GetJSONFieldStr(t, body, "event_id") - - return eventID -} - // ensureVirtualUserRegistered makes sure the user is registered for the homeserver regardless // if they are already registered or not. If unable to register, fails the test func ensureVirtualUserRegistered(t *testing.T, c *client.CSAPI, virtualUserLocalpart string) { - // b, err := json.Marshal(map[string]interface{}{ - // "username": virtualUserLocalpart, - // }) - // if err != nil { - // t.Fatalf("msc2716.ensureVirtualUserRegistered failed to marshal JSON body: %s", err) - // } - res := c.DoFunc( t, "POST", @@ -553,7 +512,7 @@ func backfillBatchHistoricalMessages( // Timestamp in milliseconds insertOriginServerTs := uint64(insertTime.UnixNano() / int64(time.Millisecond)) - timeBetweenMessagesMS := uint64(TimeBetweenMessages / time.Millisecond) + timeBetweenMessagesMS := uint64(timeBetweenMessages / time.Millisecond) evs := make([]map[string]interface{}, count) for i := 0; i < len(evs); i++ { From b6b615d048b1bea199083ebd5e4365ac88351280 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 25 Jun 2021 20:02:27 -0500 Subject: [PATCH 66/81] Rename to eventIDs and better comments --- internal/client/client.go | 2 +- tests/msc2716_test.go | 166 ++++++++++++++++++++------------------ 2 files changed, 89 insertions(+), 79 deletions(-) diff --git a/internal/client/client.go b/internal/client/client.go index 86e6a7c8..0fe79b69 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -371,7 +371,7 @@ func GetJSONFieldStr(t *testing.T, body []byte, wantKey string) string { return res.Str } -func GetJSONFieldArray(t *testing.T, body []byte, wantKey string) []string { +func GetJSONFieldStringArray(t *testing.T, body []byte, wantKey string) []string { t.Helper() res := gjson.GetBytes(body, wantKey) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 25adfa50..05ecc3fb 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -37,14 +37,14 @@ type event struct { const timeBetweenMessages = time.Millisecond var ( - MSC2716_INSERTION = "org.matrix.msc2716.insertion" - MSC2716_MARKER = "org.matrix.msc2716.marker" - - MSC2716_HISTORICAL = "org.matrix.msc2716.historical" - MSC2716_NEXT_CHUNK_ID = "org.matrix.msc2716.next_chunk_id" - MSC2716_CHUNK_ID = "org.matrix.msc2716.chunk_id" - MSC2716_MARKER_INSERTION = "org.matrix.msc2716.marker.insertion" - MSC2716_MARKER_INSERTION_PREV_EVENTS = "org.matrix.msc2716.marker.insertion_prev_events" + insertionEventType = "org.matrix.msc2716.insertion" + markerEventType = "org.matrix.msc2716.marker" + + historicalContentField = "org.matrix.msc2716.historical" + nextChunkIdContentField = "org.matrix.msc2716.next_chunk_id" + chunkIdContentField = "org.matrix.msc2716.chunk_id" + markerInsertionContentField = "org.matrix.msc2716.marker.insertion" + markerInsertionPrevEventsContentField = "org.matrix.msc2716.marker.insertion_prev_events" ) // Test that the message events we insert between A and B come back in the correct order from /messages @@ -68,6 +68,9 @@ func TestBackfillingHistory(t *testing.T) { virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) t.Run("parallel", func(t *testing.T) { + // Final timeline output: ( [n] = historical chunk ) + // (oldest) A, B, [c, d, e] [f, g, h], I, J (newest) + // chunk 1 chunk 0 t.Run("Backfilled historical events resolve with proper state in correct order", func(t *testing.T) { t.Parallel() @@ -77,18 +80,25 @@ func TestBackfillingHistory(t *testing.T) { }) alice.JoinRoom(t, roomID, nil) - // Create the "live" event we are going to insert our backfilled events next to - eventsBefore := createMessagesInRoom(t, alice, roomID, 2) - eventBefore := eventsBefore[len(eventsBefore)-1] + // Create some normal messages in the timeline. We're creating them in + // two batches so we can create some time in between where we are going + // to backfill. + // + // Create the first batch including the "live" event we are going to + // insert our backfilled events next to. + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 2) + eventIdBefore := eventIDsBefore[len(eventIDsBefore)-1] timeAfterEventBefore := time.Now() + // wait X number of ms to ensure that the timestamp changes enough for + // each of the messages we try to backfill later numHistoricalMessages := 6 - // wait X number of ms to ensure that the timestamp changes enough for each of the messages we try to backfill later time.Sleep(time.Duration(numHistoricalMessages) * timeBetweenMessages) - // Create some events after. - // Fill up the buffer so we have to scrollback to the inserted history later - eventsAfter := createMessagesInRoom(t, alice, roomID, 2) + // Create the second batch of events. + // This will also fill up the buffer so we have to scrollback to the + // inserted history later. + eventIDsAfter := createMessagesInRoom(t, alice, roomID, 2) // Register and join the virtual user ensureVirtualUserRegistered(t, as, virtualUserLocalpart) @@ -99,14 +109,14 @@ func TestBackfillingHistory(t *testing.T) { as, virtualUserID, roomID, - eventBefore, + eventIdBefore, timeAfterEventBefore.Add(timeBetweenMessages*3), "", 3, // Status 200, ) - _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) + _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) // Insert another older chunk of backfilled history from the same user. @@ -116,22 +126,22 @@ func TestBackfillingHistory(t *testing.T) { as, virtualUserID, roomID, - eventBefore, + eventIdBefore, timeAfterEventBefore, nextChunkID, 3, // Status 200, ) - _, historicalEvents2 := getEventsFromBatchSendResponse(t, backfillRes2) + _, historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) - var expectedMessageOrder []string - expectedMessageOrder = append(expectedMessageOrder, eventsBefore...) - expectedMessageOrder = append(expectedMessageOrder, historicalEvents2...) - expectedMessageOrder = append(expectedMessageOrder, historicalEvents...) - expectedMessageOrder = append(expectedMessageOrder, eventsAfter...) + var expectedEventIDOrder []string + expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) + expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs2...) + expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs...) + expectedEventIDOrder = append(expectedEventIDOrder, eventIDsAfter...) // Order events from newest to oldest - expectedMessageOrder = reversed(expectedMessageOrder) + expectedEventIDOrder = reversed(expectedEventIDOrder) messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -143,20 +153,20 @@ func TestBackfillingHistory(t *testing.T) { messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) // Copy the array by value so we can modify it as we iterate in the foreach loop. - // We save the full untouched `expectedMessageOrder` for use in the log messages - workingExpectedMessageOrder := expectedMessageOrder + // We save the full untouched `expectedEventIDOrder` for use in the log messages + workingExpectedEventIDOrder := expectedEventIDOrder must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ match.JSONArrayEach("chunk", func(r gjson.Result) error { // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == MSC2716_INSERTION || r.Get("type").Str == MSC2716_MARKER { + if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == insertionEventType || r.Get("type").Str == markerEventType { // Pop the next message off the expected list - nextEventInOrder := workingExpectedMessageOrder[0] - workingExpectedMessageOrder = workingExpectedMessageOrder[1:] + nextEventIdInOrder := workingExpectedEventIDOrder[0] + workingExpectedEventIDOrder = workingExpectedEventIDOrder[1:] - if r.Get("event_id").Str != nextEventInOrder { - return fmt.Errorf("Next event found was %s but expected %s\nActualEvents (%d): %v\nExpectedEvents (%d): %v", r.Get("event_id").Str, nextEventInOrder, len(eventIDsFromResponse), eventIDsFromResponse, len(expectedMessageOrder), expectedMessageOrder) + if r.Get("event_id").Str != nextEventIdInOrder { + return fmt.Errorf("Next event found was %s but expected %s\nActualEvents (%d): %v\nExpectedEvents (%d): %v", r.Get("event_id").Str, nextEventIdInOrder, len(eventIDsFromResponse), eventIDsFromResponse, len(expectedEventIDOrder), expectedEventIDOrder) } } @@ -166,15 +176,15 @@ func TestBackfillingHistory(t *testing.T) { }) }) - t.Run("Backfilled historical events with MSC2716_HISTORICAL do not come down in an incremental sync", func(t *testing.T) { + t.Run("Backfilled historical events with m.historical do not come down in an incremental sync", func(t *testing.T) { t.Parallel() roomID := as.CreateRoom(t, struct{}{}) alice.JoinRoom(t, roomID, nil) // Create the "live" event we are going to insert our backfilled events next to - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] timeAfterEventBefore := time.Now() // Create some "live" events to saturate and fill up the /sync response @@ -186,29 +196,29 @@ func TestBackfillingHistory(t *testing.T) { as, virtualUserID, roomID, - eventBefore, + eventIdBefore, timeAfterEventBefore, "", 1, // Status 200, ) - _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) - backfilledEvent := historicalEvents[0] + _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + backfilledEventId := historicalEventIDs[0] - // This is just a dummy event we search for after the backfilledEvent - eventsAfterBackfill := createMessagesInRoom(t, alice, roomID, 1) - eventAfterBackfill := eventsAfterBackfill[0] + // This is just a dummy event we search for after the backfilledEventId + eventIDsAfterBackfill := createMessagesInRoom(t, alice, roomID, 1) + eventIdAfterBackfill := eventIDsAfterBackfill[0] - // Sync until we find the eventAfterBackfill. If we're able to see the eventAfterBackfill - // that occurs after the backfilledEvent without seeing eventAfterBackfill in between, + // Sync until we find the eventIdAfterBackfill. If we're able to see the eventIdAfterBackfill + // that occurs after the backfilledEventId without seeing eventIdAfterBackfill in between, // we're probably safe to assume it won't sync alice.SyncUntil(t, "", `{ "room": { "timeline": { "limit": 3 } } }`, "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { - if r.Get("event_id").Str == backfilledEvent { - t.Fatalf("We should not see the %s backfilled event in /sync response but it was present", backfilledEvent) + if r.Get("event_id").Str == backfilledEventId { + t.Fatalf("We should not see the %s backfilled event in /sync response but it was present", backfilledEventId) } - return r.Get("event_id").Str == eventAfterBackfill + return r.Get("event_id").Str == eventIdAfterBackfill }) }) @@ -240,8 +250,8 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) alice.JoinRoom(t, roomID, nil) - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] timeAfterEventBefore := time.Now() backfillBatchHistoricalMessages( @@ -249,7 +259,7 @@ func TestBackfillingHistory(t *testing.T) { alice, virtualUserID, roomID, - eventBefore, + eventIdBefore, timeAfterEventBefore, "", 1, @@ -271,11 +281,11 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) alice.JoinRoom(t, roomID, nil) - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] timeAfterEventBefore := time.Now() - // eventsAfter + // eventIDsAfter createMessagesInRoom(t, alice, roomID, 3) // Register and join the virtual user @@ -286,14 +296,14 @@ func TestBackfillingHistory(t *testing.T) { as, virtualUserID, roomID, - eventBefore, + eventIdBefore, timeAfterEventBefore, "", 2, // Status 200, ) - _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) + _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -305,7 +315,7 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0], historicalEvents[1]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1]}, func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, @@ -322,11 +332,11 @@ func TestBackfillingHistory(t *testing.T) { // Join the room from a remote homeserver before any backfilled messages are sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] timeAfterEventBefore := time.Now() - // eventsAfter + // eventIDsAfter createMessagesInRoom(t, alice, roomID, 10) // Mimic scrollback just through the latest messages @@ -344,14 +354,14 @@ func TestBackfillingHistory(t *testing.T) { as, virtualUserID, roomID, - eventBefore, + eventIdBefore, timeAfterEventBefore, "", 2, // Status 200, ) - _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) + _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -360,7 +370,7 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0], historicalEvents[1]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1]}, func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, @@ -377,11 +387,11 @@ func TestBackfillingHistory(t *testing.T) { // Join the room from a remote homeserver before any backfilled messages are sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - eventsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventBefore := eventsBefore[0] + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] timeAfterEventBefore := time.Now() - // eventsAfter + // eventIDsAfter createMessagesInRoom(t, alice, roomID, 3) // Register and join the virtual user @@ -400,14 +410,14 @@ func TestBackfillingHistory(t *testing.T) { as, virtualUserID, roomID, - eventBefore, + eventIdBefore, timeAfterEventBefore, "", 2, // Status 200, ) - _, historicalEvents := getEventsFromBatchSendResponse(t, backfillRes) + _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -416,7 +426,7 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEvents[0], historicalEvents[1]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1]}, func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, @@ -444,7 +454,7 @@ func getEventIDsFromResponseBody(t *testing.T, body []byte) (eventIDsFromRespons } res.ForEach(func(key, r gjson.Result) bool { - if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == MSC2716_INSERTION || r.Get("type").Str == MSC2716_MARKER { + if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == insertionEventType || r.Get("type").Str == markerEventType { eventIDsFromResponse = append(eventIDsFromResponse, r.Get("event_id").Str+" ("+r.Get("content").Get("body").Str+")") } return true @@ -479,9 +489,9 @@ func ensureVirtualUserRegistered(t *testing.T, c *client.CSAPI, virtualUserLocal } } -func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count int) []string { - evs := make([]string, count) - for i := 0; i < len(evs); i++ { +func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count int) (eventIDs []string) { + eventIDs = make([]string, count) + for i := 0; i < len(eventIDs); i++ { newEvent := b.Event{ Type: "m.room.message", Content: map[string]interface{}{ @@ -490,10 +500,10 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in }, } newEventId := c.SendEventSynced(t, roomID, newEvent) - evs[i] = newEventId + eventIDs[i] = newEventId } - return evs + return eventIDs } var chunkCount int64 = 0 @@ -521,9 +531,9 @@ func backfillBatchHistoricalMessages( "sender": virtualUserID, "origin_server_ts": insertOriginServerTs + (timeBetweenMessagesMS * uint64(i)), "content": map[string]interface{}{ - "msgtype": "m.text", - "body": fmt.Sprintf("Historical %d (chunk=%d)", i, chunkCount), - MSC2716_HISTORICAL: true, + "msgtype": "m.text", + "body": fmt.Sprintf("Historical %d (chunk=%d)", i, chunkCount), + historicalContentField: true, }, } @@ -585,8 +595,8 @@ func getEventsFromBatchSendResponse(t *testing.T, res *http.Response) (stateEven // Since the original body can only be read once, create a new one from the body bytes we just read res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) - stateEventsIDs = client.GetJSONFieldArray(t, body, "state_events") - eventIDs = client.GetJSONFieldArray(t, body, "events") + stateEventsIDs = client.GetJSONFieldStringArray(t, body, "state_events") + eventIDs = client.GetJSONFieldStringArray(t, body, "events") return stateEventsIDs, eventIDs } From 9a3da4e75176a7ed116b857344a7f0990930b3d0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Sun, 27 Jun 2021 21:50:30 -0500 Subject: [PATCH 67/81] Fix assertion when no events are returned and other nits --- tests/msc2716_test.go | 56 +++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 05ecc3fb..0f89c8dc 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -8,7 +8,6 @@ package tests import ( "bytes" - "encoding/json" "fmt" "io/ioutil" "net/http" @@ -69,8 +68,8 @@ func TestBackfillingHistory(t *testing.T) { t.Run("parallel", func(t *testing.T) { // Final timeline output: ( [n] = historical chunk ) - // (oldest) A, B, [c, d, e] [f, g, h], I, J (newest) - // chunk 1 chunk 0 + // (oldest) A, B, [insertion, c, d, e] [insertion, f, g, h, insertion], I, J (newest) + // chunk 1 chunk 0 t.Run("Backfilled historical events resolve with proper state in correct order", func(t *testing.T) { t.Parallel() @@ -116,7 +115,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) // Insert another older chunk of backfilled history from the same user. @@ -133,7 +132,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) + historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) var expectedEventIDOrder []string expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) @@ -143,16 +142,21 @@ func TestBackfillingHistory(t *testing.T) { // Order events from newest to oldest expectedEventIDOrder = reversed(expectedEventIDOrder) + // 2 eventIDsBefore + 6 historical events + 3 insertion events + 2 eventIDsAfter + if len(expectedEventIDOrder) != 13 { + t.Fatalf("Expected eventID list should be length 13 but saw %d: %s", len(expectedEventIDOrder), expectedEventIDOrder) + } + messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, })) messsageResBody := client.ParseJSON(t, messagesRes) - eventIDsFromResponse := getEventIDsFromResponseBody(t, messsageResBody) + eventDebugStringsFromResponse := getRelevantEventDebugStringsFromMessagesResponse(t, messsageResBody) // Since the original body can only be read once, create a new one from the body bytes we just read messagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(messsageResBody)) - // Copy the array by value so we can modify it as we iterate in the foreach loop. + // Copy the array by slice so we can modify it as we iterate in the foreach loop. // We save the full untouched `expectedEventIDOrder` for use in the log messages workingExpectedEventIDOrder := expectedEventIDOrder @@ -166,7 +170,7 @@ func TestBackfillingHistory(t *testing.T) { workingExpectedEventIDOrder = workingExpectedEventIDOrder[1:] if r.Get("event_id").Str != nextEventIdInOrder { - return fmt.Errorf("Next event found was %s but expected %s\nActualEvents (%d): %v\nExpectedEvents (%d): %v", r.Get("event_id").Str, nextEventIdInOrder, len(eventIDsFromResponse), eventIDsFromResponse, len(expectedEventIDOrder), expectedEventIDOrder) + return fmt.Errorf("Next event found was %s but expected %s\nActualEvents (%d): %v\nExpectedEvents (%d): %v", r.Get("event_id").Str, nextEventIdInOrder, len(eventDebugStringsFromResponse), eventDebugStringsFromResponse, len(expectedEventIDOrder), expectedEventIDOrder) } } @@ -174,6 +178,10 @@ func TestBackfillingHistory(t *testing.T) { }), }, }) + + if len(workingExpectedEventIDOrder) != 0 { + t.Fatalf("Expected all events to be matched in message response but there were some left-over events: %s", workingExpectedEventIDOrder) + } }) t.Run("Backfilled historical events with m.historical do not come down in an incremental sync", func(t *testing.T) { @@ -203,7 +211,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) backfilledEventId := historicalEventIDs[0] // This is just a dummy event we search for after the backfilledEventId @@ -303,7 +311,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -361,7 +369,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -417,7 +425,7 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - _, historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -443,7 +451,9 @@ func reversed(in []string) []string { return out } -func getEventIDsFromResponseBody(t *testing.T, body []byte) (eventIDsFromResponse []string) { +func getRelevantEventDebugStringsFromMessagesResponse(t *testing.T, body []byte) (eventIDsFromResponse []string) { + t.Helper() + wantKey := "chunk" res := gjson.GetBytes(body, wantKey) if !res.Exists() { @@ -470,7 +480,7 @@ func ensureVirtualUserRegistered(t *testing.T, c *client.CSAPI, virtualUserLocal t, "POST", []string{"_matrix", "client", "r0", "register"}, - client.WithRawBody(json.RawMessage(fmt.Sprintf(`{ "type": "m.login.application_service", "username": "%s" }`, virtualUserLocalpart))), + client.WithJSONBody(t, map[string]interface{}{"type": "m.login.application_service", "username": virtualUserLocalpart}), client.WithContentType("application/json"), ) @@ -558,19 +568,14 @@ func backfillBatchHistoricalMessages( query.Add("chunk_id", chunkID) } - b, err := json.Marshal(map[string]interface{}{ - "events": evs, - "state_events_at_start": []map[string]interface{}{joinEvent}, - }) - if err != nil { - t.Fatalf("msc2716.backfillBatchHistoricalMessages failed to marshal JSON body: %s", err) - } - res = c.DoFunc( t, "POST", []string{"_matrix", "client", "unstable", "org.matrix.msc2716", "rooms", roomID, "batch_send"}, - client.WithRawBody(b), + client.WithJSONBody(t, map[string]interface{}{ + "events": evs, + "state_events_at_start": []map[string]interface{}{joinEvent}, + }), client.WithContentType("application/json"), client.WithQueries(query), ) @@ -590,15 +595,14 @@ func backfillBatchHistoricalMessages( return res } -func getEventsFromBatchSendResponse(t *testing.T, res *http.Response) (stateEventsIDs []string, eventIDs []string) { +func getEventsFromBatchSendResponse(t *testing.T, res *http.Response) (eventIDs []string) { body := client.ParseJSON(t, res) // Since the original body can only be read once, create a new one from the body bytes we just read res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) - stateEventsIDs = client.GetJSONFieldStringArray(t, body, "state_events") eventIDs = client.GetJSONFieldStringArray(t, body, "events") - return stateEventsIDs, eventIDs + return eventIDs } func getNextChunkIdFromBatchSendResponse(t *testing.T, res *http.Response) (nextChunkID string) { From dcd84ad906370762bde6444a95d79f8e0ef04772 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 28 Jun 2021 01:55:20 -0500 Subject: [PATCH 68/81] Re-use one test setup with sub-tests See https://github.com/matrix-org/complement/pull/68#discussion_r658663991 --- tests/msc2716_test.go | 326 ++++++++++++++---------------------------- 1 file changed, 104 insertions(+), 222 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 0f89c8dc..20636bf6 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -60,12 +60,99 @@ func TestBackfillingHistory(t *testing.T) { alice := deployment.Client(t, "hs1", userID) // Create the federated user which will fetch the messages from a remote homeserver - remoteUserID := "@charlie:hs2" - remoteCharlie := deployment.Client(t, "hs2", remoteUserID) + remoteCharlieUserID := "@charlie:hs2" + remoteCharlie := deployment.Client(t, "hs2", remoteCharlieUserID) + remoteCharlieAlreadyJoined := deployment.RegisterUser(t, "hs2", "remoteCharlieAlready", "123") + remoteCharlieWithFullScrollback := deployment.RegisterUser(t, "hs2", "remoteCharlieWithFullScrollback", "123") virtualUserLocalpart := "maria" virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) + alice.JoinRoom(t, roomID, nil) + + // Join the room from a remote homeserver before any backfilled messages are sent + remoteCharlieAlreadyJoined.JoinRoom(t, roomID, []string{"hs1"}) + + // Create some normal messages in the timeline. We're creating them in + // two batches so we can create some time in between where we are going + // to backfill. + // + // Create the first batch including the "live" event we are going to + // insert our backfilled events next to. + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 2) + eventIdBefore := eventIDsBefore[len(eventIDsBefore)-1] + timeAfterEventBefore := time.Now() + + // wait X number of ms to ensure that the timestamp changes enough for + // each of the messages we try to backfill later + numHistoricalMessages := 6 + time.Sleep(time.Duration(numHistoricalMessages) * timeBetweenMessages) + + // Create the second batch of events. + // This will also fill up the buffer so we have to scrollback to the + // inserted history later. + eventIDsAfter := createMessagesInRoom(t, alice, roomID, 2) + + // Mimic scrollback to all of the messages + // scrollbackMessagesRes + remoteCharlieWithFullScrollback.JoinRoom(t, roomID, []string{"hs1"}) + remoteCharlieWithFullScrollback.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + })) + + // Register and join the virtual user + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) + + // Insert the most recent chunk of backfilled history + backfillRes := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore.Add(timeBetweenMessages*3), + "", + 3, + // Status + 200, + ) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) + + // Insert another older chunk of backfilled history from the same user. + // Make sure the meta data and joins still work on the subsequent chunk + backfillRes2 := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore, + nextChunkID, + 3, + // Status + 200, + ) + historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) + + var expectedEventIDOrder []string + expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) + expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs2...) + expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs...) + expectedEventIDOrder = append(expectedEventIDOrder, eventIDsAfter...) + // Order events from newest to oldest + expectedEventIDOrder = reversed(expectedEventIDOrder) + + // 2 eventIDsBefore + 6 historical events + 3 insertion events + 2 eventIDsAfter + if len(expectedEventIDOrder) != 13 { + t.Fatalf("Expected eventID list should be length 13 but saw %d: %s", len(expectedEventIDOrder), expectedEventIDOrder) + } + t.Run("parallel", func(t *testing.T) { // Final timeline output: ( [n] = historical chunk ) // (oldest) A, B, [insertion, c, d, e] [insertion, f, g, h, insertion], I, J (newest) @@ -73,80 +160,6 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Backfilled historical events resolve with proper state in correct order", func(t *testing.T) { t.Parallel() - roomID := as.CreateRoom(t, map[string]interface{}{ - "preset": "public_chat", - "name": "the hangout spot", - }) - alice.JoinRoom(t, roomID, nil) - - // Create some normal messages in the timeline. We're creating them in - // two batches so we can create some time in between where we are going - // to backfill. - // - // Create the first batch including the "live" event we are going to - // insert our backfilled events next to. - eventIDsBefore := createMessagesInRoom(t, alice, roomID, 2) - eventIdBefore := eventIDsBefore[len(eventIDsBefore)-1] - timeAfterEventBefore := time.Now() - - // wait X number of ms to ensure that the timestamp changes enough for - // each of the messages we try to backfill later - numHistoricalMessages := 6 - time.Sleep(time.Duration(numHistoricalMessages) * timeBetweenMessages) - - // Create the second batch of events. - // This will also fill up the buffer so we have to scrollback to the - // inserted history later. - eventIDsAfter := createMessagesInRoom(t, alice, roomID, 2) - - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - - // Insert the most recent chunk of backfilled history - backfillRes := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore.Add(timeBetweenMessages*3), - "", - 3, - // Status - 200, - ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) - nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) - - // Insert another older chunk of backfilled history from the same user. - // Make sure the meta data and joins still work on the subsequent chunk - backfillRes2 := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore, - nextChunkID, - 3, - // Status - 200, - ) - historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) - - var expectedEventIDOrder []string - expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) - expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs2...) - expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs...) - expectedEventIDOrder = append(expectedEventIDOrder, eventIDsAfter...) - // Order events from newest to oldest - expectedEventIDOrder = reversed(expectedEventIDOrder) - - // 2 eventIDsBefore + 6 historical events + 3 insertion events + 2 eventIDsAfter - if len(expectedEventIDOrder) != 13 { - t.Fatalf("Expected eventID list should be length 13 but saw %d: %s", len(expectedEventIDOrder), expectedEventIDOrder) - } - messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -184,49 +197,18 @@ func TestBackfillingHistory(t *testing.T) { } }) - t.Run("Backfilled historical events with m.historical do not come down in an incremental sync", func(t *testing.T) { + t.Run("Backfilled historical events do not come down in an incremental sync", func(t *testing.T) { t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) - - // Create the "live" event we are going to insert our backfilled events next to - eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventIdBefore := eventIDsBefore[0] - timeAfterEventBefore := time.Now() - - // Create some "live" events to saturate and fill up the /sync response - createMessagesInRoom(t, alice, roomID, 5) - - // Insert a backfilled event - backfillRes := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore, - "", - 1, - // Status - 200, - ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) - backfilledEventId := historicalEventIDs[0] - - // This is just a dummy event we search for after the backfilledEventId - eventIDsAfterBackfill := createMessagesInRoom(t, alice, roomID, 1) - eventIdAfterBackfill := eventIDsAfterBackfill[0] - // Sync until we find the eventIdAfterBackfill. If we're able to see the eventIdAfterBackfill // that occurs after the backfilledEventId without seeing eventIdAfterBackfill in between, // we're probably safe to assume it won't sync - alice.SyncUntil(t, "", `{ "room": { "timeline": { "limit": 3 } } }`, "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { - if r.Get("event_id").Str == backfilledEventId { - t.Fatalf("We should not see the %s backfilled event in /sync response but it was present", backfilledEventId) + alice.SyncUntil(t, "", `{ "room": { "timeline": { "limit": 2 } } }`, "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { + if containsItem(historicalEventIDs, r.Get("event_id").Str) || containsItem(historicalEventIDs2, r.Get("event_id").Str) { + t.Fatalf("We should not see the %s backfilled event in /sync response but it was present", r.Get("event_id").Str) } - return r.Get("event_id").Str == eventIdAfterBackfill + return containsItem(eventIDsAfter, r.Get("event_id").Str) }) }) @@ -255,13 +237,6 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Normal users aren't allowed to backfill messages", func(t *testing.T) { t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) - - eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventIdBefore := eventIDsBefore[0] - timeAfterEventBefore := time.Now() - backfillBatchHistoricalMessages( t, alice, @@ -286,33 +261,6 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) - - eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventIdBefore := eventIDsBefore[0] - timeAfterEventBefore := time.Now() - - // eventIDsAfter - createMessagesInRoom(t, alice, roomID, 3) - - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - - backfillRes := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore, - "", - 2, - // Status - 200, - ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) - // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -334,43 +282,6 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) - - // Join the room from a remote homeserver before any backfilled messages are sent - remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - - eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventIdBefore := eventIDsBefore[0] - timeAfterEventBefore := time.Now() - - // eventIDsAfter - createMessagesInRoom(t, alice, roomID, 10) - - // Mimic scrollback just through the latest messages - remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ - "dir": []string{"b"}, - // Limited so we can only see a few of the latest messages - "limit": []string{"5"}, - })) - - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - - backfillRes := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore, - "", - 2, - // Status - 200, - ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) - messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -389,45 +300,7 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) - alice.JoinRoom(t, roomID, nil) - - // Join the room from a remote homeserver before any backfilled messages are sent - remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - - eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) - eventIdBefore := eventIDsBefore[0] - timeAfterEventBefore := time.Now() - - // eventIDsAfter - createMessagesInRoom(t, alice, roomID, 3) - - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - - // Mimic scrollback to all of the messages - // scrollbackMessagesRes - remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ - "dir": []string{"b"}, - "limit": []string{"100"}, - })) - - // Historical messages are inserted where we have already scrolled back to - backfillRes := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore, - "", - 2, - // Status - 200, - ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) - - messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + messagesRes := remoteCharlieWithFullScrollback.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, })) @@ -451,6 +324,15 @@ func reversed(in []string) []string { return out } +func containsItem(slice []string, item string) bool { + for _, a := range slice { + if a == item { + return true + } + } + return false +} + func getRelevantEventDebugStringsFromMessagesResponse(t *testing.T, body []byte) (eventIDsFromResponse []string) { t.Helper() From 4875ef8aacacf9c8c202ddb717c6246f9827a7f8 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 28 Jun 2021 02:01:41 -0500 Subject: [PATCH 69/81] Revert "Re-use one test setup with sub-tests" This reverts commit dcd84ad906370762bde6444a95d79f8e0ef04772. --- tests/msc2716_test.go | 326 ++++++++++++++++++++++++++++-------------- 1 file changed, 222 insertions(+), 104 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 20636bf6..0f89c8dc 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -60,99 +60,12 @@ func TestBackfillingHistory(t *testing.T) { alice := deployment.Client(t, "hs1", userID) // Create the federated user which will fetch the messages from a remote homeserver - remoteCharlieUserID := "@charlie:hs2" - remoteCharlie := deployment.Client(t, "hs2", remoteCharlieUserID) - remoteCharlieAlreadyJoined := deployment.RegisterUser(t, "hs2", "remoteCharlieAlready", "123") - remoteCharlieWithFullScrollback := deployment.RegisterUser(t, "hs2", "remoteCharlieWithFullScrollback", "123") + remoteUserID := "@charlie:hs2" + remoteCharlie := deployment.Client(t, "hs2", remoteUserID) virtualUserLocalpart := "maria" virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) - roomID := as.CreateRoom(t, map[string]interface{}{ - "preset": "public_chat", - "name": "the hangout spot", - }) - alice.JoinRoom(t, roomID, nil) - - // Join the room from a remote homeserver before any backfilled messages are sent - remoteCharlieAlreadyJoined.JoinRoom(t, roomID, []string{"hs1"}) - - // Create some normal messages in the timeline. We're creating them in - // two batches so we can create some time in between where we are going - // to backfill. - // - // Create the first batch including the "live" event we are going to - // insert our backfilled events next to. - eventIDsBefore := createMessagesInRoom(t, alice, roomID, 2) - eventIdBefore := eventIDsBefore[len(eventIDsBefore)-1] - timeAfterEventBefore := time.Now() - - // wait X number of ms to ensure that the timestamp changes enough for - // each of the messages we try to backfill later - numHistoricalMessages := 6 - time.Sleep(time.Duration(numHistoricalMessages) * timeBetweenMessages) - - // Create the second batch of events. - // This will also fill up the buffer so we have to scrollback to the - // inserted history later. - eventIDsAfter := createMessagesInRoom(t, alice, roomID, 2) - - // Mimic scrollback to all of the messages - // scrollbackMessagesRes - remoteCharlieWithFullScrollback.JoinRoom(t, roomID, []string{"hs1"}) - remoteCharlieWithFullScrollback.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ - "dir": []string{"b"}, - "limit": []string{"100"}, - })) - - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - - // Insert the most recent chunk of backfilled history - backfillRes := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore.Add(timeBetweenMessages*3), - "", - 3, - // Status - 200, - ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) - nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) - - // Insert another older chunk of backfilled history from the same user. - // Make sure the meta data and joins still work on the subsequent chunk - backfillRes2 := backfillBatchHistoricalMessages( - t, - as, - virtualUserID, - roomID, - eventIdBefore, - timeAfterEventBefore, - nextChunkID, - 3, - // Status - 200, - ) - historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) - - var expectedEventIDOrder []string - expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) - expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs2...) - expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs...) - expectedEventIDOrder = append(expectedEventIDOrder, eventIDsAfter...) - // Order events from newest to oldest - expectedEventIDOrder = reversed(expectedEventIDOrder) - - // 2 eventIDsBefore + 6 historical events + 3 insertion events + 2 eventIDsAfter - if len(expectedEventIDOrder) != 13 { - t.Fatalf("Expected eventID list should be length 13 but saw %d: %s", len(expectedEventIDOrder), expectedEventIDOrder) - } - t.Run("parallel", func(t *testing.T) { // Final timeline output: ( [n] = historical chunk ) // (oldest) A, B, [insertion, c, d, e] [insertion, f, g, h, insertion], I, J (newest) @@ -160,6 +73,80 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Backfilled historical events resolve with proper state in correct order", func(t *testing.T) { t.Parallel() + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) + alice.JoinRoom(t, roomID, nil) + + // Create some normal messages in the timeline. We're creating them in + // two batches so we can create some time in between where we are going + // to backfill. + // + // Create the first batch including the "live" event we are going to + // insert our backfilled events next to. + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 2) + eventIdBefore := eventIDsBefore[len(eventIDsBefore)-1] + timeAfterEventBefore := time.Now() + + // wait X number of ms to ensure that the timestamp changes enough for + // each of the messages we try to backfill later + numHistoricalMessages := 6 + time.Sleep(time.Duration(numHistoricalMessages) * timeBetweenMessages) + + // Create the second batch of events. + // This will also fill up the buffer so we have to scrollback to the + // inserted history later. + eventIDsAfter := createMessagesInRoom(t, alice, roomID, 2) + + // Register and join the virtual user + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) + + // Insert the most recent chunk of backfilled history + backfillRes := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore.Add(timeBetweenMessages*3), + "", + 3, + // Status + 200, + ) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) + + // Insert another older chunk of backfilled history from the same user. + // Make sure the meta data and joins still work on the subsequent chunk + backfillRes2 := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore, + nextChunkID, + 3, + // Status + 200, + ) + historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) + + var expectedEventIDOrder []string + expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) + expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs2...) + expectedEventIDOrder = append(expectedEventIDOrder, historicalEventIDs...) + expectedEventIDOrder = append(expectedEventIDOrder, eventIDsAfter...) + // Order events from newest to oldest + expectedEventIDOrder = reversed(expectedEventIDOrder) + + // 2 eventIDsBefore + 6 historical events + 3 insertion events + 2 eventIDsAfter + if len(expectedEventIDOrder) != 13 { + t.Fatalf("Expected eventID list should be length 13 but saw %d: %s", len(expectedEventIDOrder), expectedEventIDOrder) + } + messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -197,18 +184,49 @@ func TestBackfillingHistory(t *testing.T) { } }) - t.Run("Backfilled historical events do not come down in an incremental sync", func(t *testing.T) { + t.Run("Backfilled historical events with m.historical do not come down in an incremental sync", func(t *testing.T) { t.Parallel() + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + // Create the "live" event we are going to insert our backfilled events next to + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] + timeAfterEventBefore := time.Now() + + // Create some "live" events to saturate and fill up the /sync response + createMessagesInRoom(t, alice, roomID, 5) + + // Insert a backfilled event + backfillRes := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore, + "", + 1, + // Status + 200, + ) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + backfilledEventId := historicalEventIDs[0] + + // This is just a dummy event we search for after the backfilledEventId + eventIDsAfterBackfill := createMessagesInRoom(t, alice, roomID, 1) + eventIdAfterBackfill := eventIDsAfterBackfill[0] + // Sync until we find the eventIdAfterBackfill. If we're able to see the eventIdAfterBackfill // that occurs after the backfilledEventId without seeing eventIdAfterBackfill in between, // we're probably safe to assume it won't sync - alice.SyncUntil(t, "", `{ "room": { "timeline": { "limit": 2 } } }`, "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { - if containsItem(historicalEventIDs, r.Get("event_id").Str) || containsItem(historicalEventIDs2, r.Get("event_id").Str) { - t.Fatalf("We should not see the %s backfilled event in /sync response but it was present", r.Get("event_id").Str) + alice.SyncUntil(t, "", `{ "room": { "timeline": { "limit": 3 } } }`, "rooms.join."+client.GjsonEscape(roomID)+".timeline.events", func(r gjson.Result) bool { + if r.Get("event_id").Str == backfilledEventId { + t.Fatalf("We should not see the %s backfilled event in /sync response but it was present", backfilledEventId) } - return containsItem(eventIDsAfter, r.Get("event_id").Str) + return r.Get("event_id").Str == eventIdAfterBackfill }) }) @@ -237,6 +255,13 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Normal users aren't allowed to backfill messages", func(t *testing.T) { t.Parallel() + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] + timeAfterEventBefore := time.Now() + backfillBatchHistoricalMessages( t, alice, @@ -261,6 +286,33 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] + timeAfterEventBefore := time.Now() + + // eventIDsAfter + createMessagesInRoom(t, alice, roomID, 3) + + // Register and join the virtual user + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) + + backfillRes := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore, + "", + 2, + // Status + 200, + ) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -282,6 +334,43 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + // Join the room from a remote homeserver before any backfilled messages are sent + remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] + timeAfterEventBefore := time.Now() + + // eventIDsAfter + createMessagesInRoom(t, alice, roomID, 10) + + // Mimic scrollback just through the latest messages + remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + "dir": []string{"b"}, + // Limited so we can only see a few of the latest messages + "limit": []string{"5"}, + })) + + // Register and join the virtual user + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) + + backfillRes := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore, + "", + 2, + // Status + 200, + ) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -300,7 +389,45 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() - messagesRes := remoteCharlieWithFullScrollback.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + // Join the room from a remote homeserver before any backfilled messages are sent + remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] + timeAfterEventBefore := time.Now() + + // eventIDsAfter + createMessagesInRoom(t, alice, roomID, 3) + + // Register and join the virtual user + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) + + // Mimic scrollback to all of the messages + // scrollbackMessagesRes + remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + })) + + // Historical messages are inserted where we have already scrolled back to + backfillRes := backfillBatchHistoricalMessages( + t, + as, + virtualUserID, + roomID, + eventIdBefore, + timeAfterEventBefore, + "", + 2, + // Status + 200, + ) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, })) @@ -324,15 +451,6 @@ func reversed(in []string) []string { return out } -func containsItem(slice []string, item string) bool { - for _, a := range slice { - if a == item { - return true - } - } - return false -} - func getRelevantEventDebugStringsFromMessagesResponse(t *testing.T, body []byte) (eventIDsFromResponse []string) { t.Helper() From e56c87281d3b1ac7ac9edd74cc46ea4f620183b0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 29 Jun 2021 19:02:15 -0500 Subject: [PATCH 70/81] Add test for multiple senders within a single chunk Add tests for https://github.com/matrix-org/synapse/pull/10276 Also removes the `?user_id` usage because it's no longer necessary. We now create the proper requester in Synapse for each given sender in the event. --- tests/msc2716_test.go | 86 +++++++++++++++++++++++++++++++++---------- 1 file changed, 67 insertions(+), 19 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 0f89c8dc..8d5a21f1 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -106,7 +106,7 @@ func TestBackfillingHistory(t *testing.T) { backfillRes := backfillBatchHistoricalMessages( t, as, - virtualUserID, + []string{virtualUserID}, roomID, eventIdBefore, timeAfterEventBefore.Add(timeBetweenMessages*3), @@ -123,7 +123,7 @@ func TestBackfillingHistory(t *testing.T) { backfillRes2 := backfillBatchHistoricalMessages( t, as, - virtualUserID, + []string{virtualUserID}, roomID, eventIdBefore, timeAfterEventBefore, @@ -184,6 +184,48 @@ func TestBackfillingHistory(t *testing.T) { } }) + t.Run("Backfilled historical events from multiple users in the same chunk", func(t *testing.T) { + t.Parallel() + + roomID := as.CreateRoom(t, struct{}{}) + alice.JoinRoom(t, roomID, nil) + + // Create the "live" event we are going to insert our backfilled events next to + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] + timeAfterEventBefore := time.Now() + + // Insert a backfilled event + virtualUserID2 := "@ricky:hs1" + virtualUserID3 := "@carol:hs1" + backfillRes := backfillBatchHistoricalMessages( + t, + as, + []string{virtualUserID, virtualUserID2, virtualUserID3}, + roomID, + eventIdBefore, + timeAfterEventBefore, + "", + 3, + // Status + 200, + ) + historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + + messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + })) + + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1], historicalEventIDs[2]}, func(r gjson.Result) interface{} { + return r.Get("event_id").Str + }, nil), + }, + }) + }) + t.Run("Backfilled historical events with m.historical do not come down in an incremental sync", func(t *testing.T) { t.Parallel() @@ -202,7 +244,7 @@ func TestBackfillingHistory(t *testing.T) { backfillRes := backfillBatchHistoricalMessages( t, as, - virtualUserID, + []string{virtualUserID}, roomID, eventIdBefore, timeAfterEventBefore, @@ -238,7 +280,7 @@ func TestBackfillingHistory(t *testing.T) { backfillBatchHistoricalMessages( t, as, - virtualUserID, + []string{virtualUserID}, roomID, "$some-non-existant-event-id", time.Now(), @@ -265,7 +307,7 @@ func TestBackfillingHistory(t *testing.T) { backfillBatchHistoricalMessages( t, alice, - virtualUserID, + []string{virtualUserID}, roomID, eventIdBefore, timeAfterEventBefore, @@ -302,7 +344,7 @@ func TestBackfillingHistory(t *testing.T) { backfillRes := backfillBatchHistoricalMessages( t, as, - virtualUserID, + []string{virtualUserID}, roomID, eventIdBefore, timeAfterEventBefore, @@ -360,7 +402,7 @@ func TestBackfillingHistory(t *testing.T) { backfillRes := backfillBatchHistoricalMessages( t, as, - virtualUserID, + []string{virtualUserID}, roomID, eventIdBefore, timeAfterEventBefore, @@ -416,7 +458,7 @@ func TestBackfillingHistory(t *testing.T) { backfillRes := backfillBatchHistoricalMessages( t, as, - virtualUserID, + []string{virtualUserID}, roomID, eventIdBefore, timeAfterEventBefore, @@ -521,7 +563,7 @@ var chunkCount int64 = 0 func backfillBatchHistoricalMessages( t *testing.T, c *client.CSAPI, - virtualUserID string, + virtualUserIDs []string, roomID string, insertAfterEventId string, insertTime time.Time, @@ -536,6 +578,8 @@ func backfillBatchHistoricalMessages( evs := make([]map[string]interface{}, count) for i := 0; i < len(evs); i++ { + virtualUserID := virtualUserIDs[i%len(virtualUserIDs)] + newEvent := map[string]interface{}{ "type": "m.room.message", "sender": virtualUserID, @@ -550,19 +594,23 @@ func backfillBatchHistoricalMessages( evs[i] = newEvent } - joinEvent := map[string]interface{}{ - "type": "m.room.member", - "sender": virtualUserID, - "origin_server_ts": insertOriginServerTs, - "content": map[string]interface{}{ - "membership": "join", - }, - "state_key": virtualUserID, + state_evs := make([]map[string]interface{}, len(virtualUserIDs)) + for i, virtualUserID := range virtualUserIDs { + joinEvent := map[string]interface{}{ + "type": "m.room.member", + "sender": virtualUserID, + "origin_server_ts": insertOriginServerTs, + "content": map[string]interface{}{ + "membership": "join", + }, + "state_key": virtualUserID, + } + + state_evs[i] = joinEvent } query := make(url.Values, 2) query.Add("prev_event", insertAfterEventId) - query.Add("user_id", virtualUserID) // If provided, connect the chunk to the last insertion point if chunkID != "" { query.Add("chunk_id", chunkID) @@ -574,7 +622,7 @@ func backfillBatchHistoricalMessages( []string{"_matrix", "client", "unstable", "org.matrix.msc2716", "rooms", roomID, "batch_send"}, client.WithJSONBody(t, map[string]interface{}{ "events": evs, - "state_events_at_start": []map[string]interface{}{joinEvent}, + "state_events_at_start": state_evs, }), client.WithContentType("application/json"), client.WithQueries(query), From d63fb7fb84d1f3e973f76c2700d01ab9b24edd42 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jul 2021 19:11:33 -0500 Subject: [PATCH 71/81] Make backfill vs batch less confusing --- tests/msc2716_test.go | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 8d5a21f1..f170d7f5 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -103,7 +103,7 @@ func TestBackfillingHistory(t *testing.T) { ensureVirtualUserRegistered(t, as, virtualUserLocalpart) // Insert the most recent chunk of backfilled history - backfillRes := backfillBatchHistoricalMessages( + backfillRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -120,7 +120,7 @@ func TestBackfillingHistory(t *testing.T) { // Insert another older chunk of backfilled history from the same user. // Make sure the meta data and joins still work on the subsequent chunk - backfillRes2 := backfillBatchHistoricalMessages( + backfillRes2 := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -198,7 +198,7 @@ func TestBackfillingHistory(t *testing.T) { // Insert a backfilled event virtualUserID2 := "@ricky:hs1" virtualUserID3 := "@carol:hs1" - backfillRes := backfillBatchHistoricalMessages( + backfillRes := batchSendHistoricalMessages( t, as, []string{virtualUserID, virtualUserID2, virtualUserID3}, @@ -241,7 +241,7 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - backfillRes := backfillBatchHistoricalMessages( + backfillRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -277,7 +277,7 @@ func TestBackfillingHistory(t *testing.T) { roomID := as.CreateRoom(t, struct{}{}) - backfillBatchHistoricalMessages( + batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -304,7 +304,7 @@ func TestBackfillingHistory(t *testing.T) { eventIdBefore := eventIDsBefore[0] timeAfterEventBefore := time.Now() - backfillBatchHistoricalMessages( + batchSendHistoricalMessages( t, alice, []string{virtualUserID}, @@ -341,7 +341,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - backfillRes := backfillBatchHistoricalMessages( + backfillRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -399,7 +399,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - backfillRes := backfillBatchHistoricalMessages( + backfillRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -455,7 +455,7 @@ func TestBackfillingHistory(t *testing.T) { })) // Historical messages are inserted where we have already scrolled back to - backfillRes := backfillBatchHistoricalMessages( + backfillRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -560,7 +560,7 @@ func createMessagesInRoom(t *testing.T, c *client.CSAPI, roomID string, count in var chunkCount int64 = 0 -func backfillBatchHistoricalMessages( +func batchSendHistoricalMessages( t *testing.T, c *client.CSAPI, virtualUserIDs []string, @@ -569,7 +569,7 @@ func backfillBatchHistoricalMessages( insertTime time.Time, chunkID string, count int, - status int, + expectedStatus int, ) (res *http.Response) { // Timestamp in milliseconds insertOriginServerTs := uint64(insertTime.UnixNano() / int64(time.Millisecond)) @@ -627,16 +627,10 @@ func backfillBatchHistoricalMessages( client.WithContentType("application/json"), client.WithQueries(query), ) - // Save the body so we can re-create after the buffer closes - body := client.ParseJSON(t, res) - // Since the original body can only be read once, create a new one from the body bytes we just read - res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) - must.MatchResponse(t, res, match.HTTPResponse{ - StatusCode: status, - }) - // After using up the body in the must.MatchResponse above, create the body again - // Since the original body can only be read once, create a new one from the body bytes we just read - res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + + if res.StatusCode != expectedStatus { + t.Fatalf("msc2716.batchSendHistoricalMessages got %d HTTP status code from batch send response but want %d", res.StatusCode, expectedStatus) + } chunkCount++ From 34756a04608915a98f55bc1225babc90c1bcc7c9 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jul 2021 21:03:28 -0500 Subject: [PATCH 72/81] Use body bytes instead of reforming a stream buffer again See https://github.com/matrix-org/complement/pull/68#discussion_r658651317 --- tests/msc2716_test.go | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index f170d7f5..5dccb535 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -115,8 +115,9 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) - nextChunkID := getNextChunkIdFromBatchSendResponse(t, backfillRes) + backfillResBody := client.ParseJSON(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) + nextChunkID := getNextChunkIdFromBatchSendResponseBody(t, backfillResBody) // Insert another older chunk of backfilled history from the same user. // Make sure the meta data and joins still work on the subsequent chunk @@ -132,7 +133,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalEventIDs2 := getEventsFromBatchSendResponse(t, backfillRes2) + backfillResBody2 := client.ParseJSON(t, backfillRes2) + historicalEventIDs2 := getEventsFromBatchSendResponseBody(t, backfillResBody2) var expectedEventIDOrder []string expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) @@ -210,7 +212,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + backfillResBody := client.ParseJSON(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -253,7 +256,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + backfillResBody := client.ParseJSON(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) backfilledEventId := historicalEventIDs[0] // This is just a dummy event we search for after the backfilledEventId @@ -353,7 +357,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + backfillResBody := client.ParseJSON(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -411,7 +416,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + backfillResBody := client.ParseJSON(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -467,7 +473,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - historicalEventIDs := getEventsFromBatchSendResponse(t, backfillRes) + backfillResBody := client.ParseJSON(t, backfillRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -637,21 +644,13 @@ func batchSendHistoricalMessages( return res } -func getEventsFromBatchSendResponse(t *testing.T, res *http.Response) (eventIDs []string) { - body := client.ParseJSON(t, res) - // Since the original body can only be read once, create a new one from the body bytes we just read - res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) - +func getEventsFromBatchSendResponseBody(t *testing.T, body []byte) (eventIDs []string) { eventIDs = client.GetJSONFieldStringArray(t, body, "events") return eventIDs } -func getNextChunkIdFromBatchSendResponse(t *testing.T, res *http.Response) (nextChunkID string) { - body := client.ParseJSON(t, res) - // Since the original body can only be read once, create a new one from the body bytes we just read - res.Body = ioutil.NopCloser(bytes.NewBuffer(body)) - +func getNextChunkIdFromBatchSendResponseBody(t *testing.T, body []byte) (nextChunkID string) { nextChunkID = client.GetJSONFieldStr(t, body, "next_chunk_id") return nextChunkID From 322403027acc1eb6824f0069c41afa7185ba3254 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jul 2021 21:11:55 -0500 Subject: [PATCH 73/81] Move comment to relevant test --- tests/msc2716_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 5dccb535..2c5e42f7 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -46,7 +46,6 @@ var ( markerInsertionPrevEventsContentField = "org.matrix.msc2716.marker.insertion_prev_events" ) -// Test that the message events we insert between A and B come back in the correct order from /messages func TestBackfillingHistory(t *testing.T) { deployment := Deploy(t, b.BlueprintHSWithApplicationService) defer deployment.Destroy(t) @@ -67,6 +66,8 @@ func TestBackfillingHistory(t *testing.T) { virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) t.Run("parallel", func(t *testing.T) { + // Test that the message events we insert between A and B come back in the correct order from /messages + // // Final timeline output: ( [n] = historical chunk ) // (oldest) A, B, [insertion, c, d, e] [insertion, f, g, h, insertion], I, J (newest) // chunk 1 chunk 0 From 3ac2176948cad09c7c53f66efd073b48c8a36e89 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 6 Jul 2021 21:53:49 -0500 Subject: [PATCH 74/81] Backfill to batch send rename --- tests/msc2716_test.go | 44 +++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 2c5e42f7..0bfabd8e 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -104,7 +104,7 @@ func TestBackfillingHistory(t *testing.T) { ensureVirtualUserRegistered(t, as, virtualUserLocalpart) // Insert the most recent chunk of backfilled history - backfillRes := batchSendHistoricalMessages( + batchSendRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -116,13 +116,13 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - backfillResBody := client.ParseJSON(t, backfillRes) - historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) - nextChunkID := getNextChunkIdFromBatchSendResponseBody(t, backfillResBody) + batchSendResBody := client.ParseJSON(t, batchSendRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) + nextChunkID := getNextChunkIdFromBatchSendResponseBody(t, batchSendResBody) // Insert another older chunk of backfilled history from the same user. // Make sure the meta data and joins still work on the subsequent chunk - backfillRes2 := batchSendHistoricalMessages( + batchSendRes2 := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -134,8 +134,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - backfillResBody2 := client.ParseJSON(t, backfillRes2) - historicalEventIDs2 := getEventsFromBatchSendResponseBody(t, backfillResBody2) + batchSendResBody2 := client.ParseJSON(t, batchSendRes2) + historicalEventIDs2 := getEventsFromBatchSendResponseBody(t, batchSendResBody2) var expectedEventIDOrder []string expectedEventIDOrder = append(expectedEventIDOrder, eventIDsBefore...) @@ -201,7 +201,7 @@ func TestBackfillingHistory(t *testing.T) { // Insert a backfilled event virtualUserID2 := "@ricky:hs1" virtualUserID3 := "@carol:hs1" - backfillRes := batchSendHistoricalMessages( + batchSendRes := batchSendHistoricalMessages( t, as, []string{virtualUserID, virtualUserID2, virtualUserID3}, @@ -213,8 +213,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - backfillResBody := client.ParseJSON(t, backfillRes) - historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) + batchSendResBody := client.ParseJSON(t, batchSendRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) messagesRes := alice.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -245,7 +245,7 @@ func TestBackfillingHistory(t *testing.T) { createMessagesInRoom(t, alice, roomID, 5) // Insert a backfilled event - backfillRes := batchSendHistoricalMessages( + batchSendRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -257,8 +257,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - backfillResBody := client.ParseJSON(t, backfillRes) - historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) + batchSendResBody := client.ParseJSON(t, batchSendRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) backfilledEventId := historicalEventIDs[0] // This is just a dummy event we search for after the backfilledEventId @@ -346,7 +346,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - backfillRes := batchSendHistoricalMessages( + batchSendRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -358,8 +358,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - backfillResBody := client.ParseJSON(t, backfillRes) - historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) + batchSendResBody := client.ParseJSON(t, batchSendRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) @@ -405,7 +405,7 @@ func TestBackfillingHistory(t *testing.T) { // Register and join the virtual user ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - backfillRes := batchSendHistoricalMessages( + batchSendRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -417,8 +417,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - backfillResBody := client.ParseJSON(t, backfillRes) - historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) + batchSendResBody := client.ParseJSON(t, batchSendRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -462,7 +462,7 @@ func TestBackfillingHistory(t *testing.T) { })) // Historical messages are inserted where we have already scrolled back to - backfillRes := batchSendHistoricalMessages( + batchSendRes := batchSendHistoricalMessages( t, as, []string{virtualUserID}, @@ -474,8 +474,8 @@ func TestBackfillingHistory(t *testing.T) { // Status 200, ) - backfillResBody := client.ParseJSON(t, backfillRes) - historicalEventIDs := getEventsFromBatchSendResponseBody(t, backfillResBody) + batchSendResBody := client.ParseJSON(t, batchSendRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, From 1d09b18d5d1b5d193c640c8d2f1a97b9b0a33518 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 8 Jul 2021 20:24:35 -0500 Subject: [PATCH 75/81] Make sure to register all virtual users --- tests/msc2716_test.go | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 0bfabd8e..2ef46323 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -64,6 +64,8 @@ func TestBackfillingHistory(t *testing.T) { virtualUserLocalpart := "maria" virtualUserID := fmt.Sprintf("@%s:hs1", virtualUserLocalpart) + // Register and join the virtual user + ensureVirtualUserRegistered(t, as, virtualUserLocalpart) t.Run("parallel", func(t *testing.T) { // Test that the message events we insert between A and B come back in the correct order from /messages @@ -100,9 +102,6 @@ func TestBackfillingHistory(t *testing.T) { // inserted history later. eventIDsAfter := createMessagesInRoom(t, alice, roomID, 2) - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - // Insert the most recent chunk of backfilled history batchSendRes := batchSendHistoricalMessages( t, @@ -198,9 +197,13 @@ func TestBackfillingHistory(t *testing.T) { eventIdBefore := eventIDsBefore[0] timeAfterEventBefore := time.Now() - // Insert a backfilled event + // Register and join the other virtual users virtualUserID2 := "@ricky:hs1" + ensureVirtualUserRegistered(t, as, "ricky") virtualUserID3 := "@carol:hs1" + ensureVirtualUserRegistered(t, as, "carol") + + // Insert a backfilled event batchSendRes := batchSendHistoricalMessages( t, as, @@ -343,9 +346,6 @@ func TestBackfillingHistory(t *testing.T) { // eventIDsAfter createMessagesInRoom(t, alice, roomID, 3) - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - batchSendRes := batchSendHistoricalMessages( t, as, @@ -402,9 +402,6 @@ func TestBackfillingHistory(t *testing.T) { "limit": []string{"5"}, })) - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - batchSendRes := batchSendHistoricalMessages( t, as, @@ -451,9 +448,6 @@ func TestBackfillingHistory(t *testing.T) { // eventIDsAfter createMessagesInRoom(t, alice, roomID, 3) - // Register and join the virtual user - ensureVirtualUserRegistered(t, as, virtualUserLocalpart) - // Mimic scrollback to all of the messages // scrollbackMessagesRes remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ From 9c68cb568ee11187a297477d35df3ba3f3cc334c Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 9 Jul 2021 17:34:29 -0500 Subject: [PATCH 76/81] Add homeserver name to client request logging to differentiate who/where Before: ``` === CONT TestBackfillingHistory client.go:356: GET /_matrix/client/r0/sync => 200 OK (31.496008ms) client.go:356: POST /_matrix/client/unstable/org.matrix.msc2716/rooms/!GkmAGvcDmllsuqLgZA:hs1/batch_send => 200 OK (96.308307ms) client.go:356: POST /_matrix/client/r0/join/!GkmAGvcDmllsuqLgZA:hs1 => 200 OK (808.747133ms) client.go:356: GET /_matrix/client/r0/rooms/!GkmAGvcDmllsuqLgZA:hs1/messages => 200 OK (83.415512ms) ``` After: ``` === CONT TestBackfillingHistory client.go:357: GET hs1/_matrix/client/r0/sync => 200 OK (29.885812ms) client.go:357: POST hs1/_matrix/client/unstable/org.matrix.msc2716/rooms/!jbwgZJKNOedwNWRdop:hs1/batch_send => 200 OK (96.173807ms) client.go:357: POST hs2/_matrix/client/r0/join/!jbwgZJKNOedwNWRdop:hs1 => 200 OK (808.849665ms) client.go:357: GET hs2/_matrix/client/r0/rooms/!jbwgZJKNOedwNWRdop:hs1/messages => 200 OK (73.667196ms) ``` --- internal/client/client.go | 13 +++++++------ internal/docker/deployment.go | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/internal/client/client.go b/internal/client/client.go index 0fe79b69..06125615 100644 --- a/internal/client/client.go +++ b/internal/client/client.go @@ -327,7 +327,7 @@ func (c *CSAPI) DoFunc(t *testing.T, method string, paths []string, opts ...Requ } // NewLoggedClient returns an http.Client which logs requests/responses -func NewLoggedClient(t *testing.T, cli *http.Client) *http.Client { +func NewLoggedClient(t *testing.T, hsName string, cli *http.Client) *http.Client { t.Helper() if cli == nil { cli = &http.Client{ @@ -338,22 +338,23 @@ func NewLoggedClient(t *testing.T, cli *http.Client) *http.Client { if transport == nil { transport = http.DefaultTransport } - cli.Transport = &loggedRoundTripper{t, transport} + cli.Transport = &loggedRoundTripper{t, hsName, transport} return cli } type loggedRoundTripper struct { - t *testing.T - wrap http.RoundTripper + t *testing.T + hsName string + wrap http.RoundTripper } func (t *loggedRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { start := time.Now() res, err := t.wrap.RoundTrip(req) if err != nil { - t.t.Logf("%s %s => error: %s (%s)", req.Method, req.URL.Path, err, time.Since(start)) + t.t.Logf("%s %s%s => error: %s (%s)", req.Method, t.hsName, req.URL.Path, err, time.Since(start)) } else { - t.t.Logf("%s %s => %s (%s)", req.Method, req.URL.Path, res.Status, time.Since(start)) + t.t.Logf("%s %s%s => %s (%s)", req.Method, t.hsName, req.URL.Path, res.Status, time.Since(start)) } return res, err } diff --git a/internal/docker/deployment.go b/internal/docker/deployment.go index 6b303afa..0e4082fa 100644 --- a/internal/docker/deployment.go +++ b/internal/docker/deployment.go @@ -53,7 +53,7 @@ func (d *Deployment) Client(t *testing.T, hsName, userID string) *client.CSAPI { UserID: userID, AccessToken: token, BaseURL: dep.BaseURL, - Client: client.NewLoggedClient(t, nil), + Client: client.NewLoggedClient(t, hsName, nil), SyncUntilTimeout: 5 * time.Second, Debug: d.Deployer.debugLogging, } @@ -69,7 +69,7 @@ func (d *Deployment) RegisterUser(t *testing.T, hsName, localpart, password stri } client := &client.CSAPI{ BaseURL: dep.BaseURL, - Client: client.NewLoggedClient(t, nil), + Client: client.NewLoggedClient(t, hsName, nil), SyncUntilTimeout: 5 * time.Second, Debug: d.Deployer.debugLogging, } From 1f780ec8ad9429f2d1b1099dbcfda468f76fedb6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 9 Jul 2021 20:18:55 -0500 Subject: [PATCH 77/81] Allow subsequent joiners to be able to see the message history --- tests/msc2716_test.go | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 2ef46323..2eaca92c 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -189,7 +189,10 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Backfilled historical events from multiple users in the same chunk", func(t *testing.T) { t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) alice.JoinRoom(t, roomID, nil) // Create the "live" event we are going to insert our backfilled events next to @@ -236,7 +239,10 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Backfilled historical events with m.historical do not come down in an incremental sync", func(t *testing.T) { t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) alice.JoinRoom(t, roomID, nil) // Create the "live" event we are going to insert our backfilled events next to @@ -283,7 +289,10 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Unrecognised prev_event ID will throw an error", func(t *testing.T) { t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) batchSendHistoricalMessages( t, @@ -305,7 +314,10 @@ func TestBackfillingHistory(t *testing.T) { t.Run("Normal users aren't allowed to backfill messages", func(t *testing.T) { t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) alice.JoinRoom(t, roomID, nil) eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) @@ -336,7 +348,10 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) alice.JoinRoom(t, roomID, nil) eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) @@ -382,7 +397,10 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) alice.JoinRoom(t, roomID, nil) // Join the room from a remote homeserver before any backfilled messages are sent @@ -435,7 +453,10 @@ func TestBackfillingHistory(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() - roomID := as.CreateRoom(t, struct{}{}) + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) alice.JoinRoom(t, roomID, nil) // Join the room from a remote homeserver before any backfilled messages are sent From 425206d250be130c9477cb50438b55dcc1eac14c Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 13 Jul 2021 19:12:37 -0500 Subject: [PATCH 78/81] Some cleanup and improving federation tests --- tests/msc2716_test.go | 88 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 77 insertions(+), 11 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 2eaca92c..424fb4a5 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -39,11 +39,9 @@ var ( insertionEventType = "org.matrix.msc2716.insertion" markerEventType = "org.matrix.msc2716.marker" - historicalContentField = "org.matrix.msc2716.historical" - nextChunkIdContentField = "org.matrix.msc2716.next_chunk_id" - chunkIdContentField = "org.matrix.msc2716.chunk_id" - markerInsertionContentField = "org.matrix.msc2716.marker.insertion" - markerInsertionPrevEventsContentField = "org.matrix.msc2716.marker.insertion_prev_events" + historicalContentField = "org.matrix.msc2716.historical" + nextChunkIDContentField = "org.matrix.msc2716.next_chunk_id" + markerInsertionContentField = "org.matrix.msc2716.marker.insertion" ) func TestBackfillingHistory(t *testing.T) { @@ -166,7 +164,7 @@ func TestBackfillingHistory(t *testing.T) { JSON: []match.JSON{ match.JSONArrayEach("chunk", func(r gjson.Result) error { // Find all events in order - if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == insertionEventType || r.Get("type").Str == markerEventType { + if isRelevantEvent(r) { // Pop the next message off the expected list nextEventIdInOrder := workingExpectedEventIDOrder[0] workingExpectedEventIDOrder = workingExpectedEventIDOrder[1:] @@ -229,7 +227,7 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1], historicalEventIDs[2]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", makeInterfaceSlice(historicalEventIDs), func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, @@ -379,6 +377,8 @@ func TestBackfillingHistory(t *testing.T) { // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + // TODO: I think we need to update this to be similar to + // SyncUntilTimelineHas but going back in time because this can be flakey messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -386,7 +386,7 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", makeInterfaceSlice(historicalEventIDs), func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, @@ -434,6 +434,57 @@ func TestBackfillingHistory(t *testing.T) { ) batchSendResBody := client.ParseJSON(t, batchSendRes) historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) + baseInsertionEventID := historicalEventIDs[len(historicalEventIDs)-1] + + // 2 historical events + 2 insertion events + if len(historicalEventIDs) != 4 { + t.Fatalf("Expected eventID list should be length 15 but saw %d: %s", len(historicalEventIDs), historicalEventIDs) + } + + beforeMarkerMessagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + })) + beforeMarkerMesssageResBody := client.ParseJSON(t, beforeMarkerMessagesRes) + eventDebugStringsFromBeforeMarkerResponse := getRelevantEventDebugStringsFromMessagesResponse(t, beforeMarkerMesssageResBody) + // Since the original body can only be read once, create a new one from the body bytes we just read + beforeMarkerMessagesRes.Body = ioutil.NopCloser(bytes.NewBuffer(beforeMarkerMesssageResBody)) + + // Make sure the history isn't visible before we expect it to be there. + // This is to avoid some bug in the homeserver using some unknown + // mechanism to distribute the historical messages to other homeservers. + must.MatchResponse(t, beforeMarkerMessagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONArrayEach("chunk", func(r gjson.Result) error { + // Throw if we find one of the historical events in the message response + for _, historicalEventID := range historicalEventIDs { + if r.Get("event_id").Str == historicalEventID { + return fmt.Errorf("Historical event (%s) found on remote homeserver before marker event was sent out\nmessage response (%d): %v\nhistoricalEventIDs (%d): %v", historicalEventID, len(eventDebugStringsFromBeforeMarkerResponse), eventDebugStringsFromBeforeMarkerResponse, len(historicalEventIDs), historicalEventIDs) + } + } + + return nil + }), + }, + }) + + // Send a marker event to let all of the homeservers know about the + // insertion point where all of the historical messages are at + markerEvent := b.Event{ + Type: markerEventType, + Content: map[string]interface{}{ + markerInsertionContentField: baseInsertionEventID, + }, + } + // We can't use as.SendEventSynced(...) because application services can't use the /sync API + markerSendRes := as.MustDoFunc(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", markerEvent.Type, "txn-m123"}, client.WithJSONBody(t, markerEvent.Content)) + markerSendBody := client.ParseJSON(t, markerSendRes) + markerEventID := client.GetJSONFieldStr(t, markerSendBody, "event_id") + + // Make sure the marker event has reached the remote homeserver + remoteCharlie.SyncUntilTimelineHas(t, roomID, func(ev gjson.Result) bool { + return ev.Get("event_id").Str == markerEventID + }) messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, @@ -442,7 +493,7 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", makeInterfaceSlice(historicalEventIDs), func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, @@ -492,6 +543,8 @@ func TestBackfillingHistory(t *testing.T) { batchSendResBody := client.ParseJSON(t, batchSendRes) historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) + // TODO: Send marker event + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -499,7 +552,7 @@ func TestBackfillingHistory(t *testing.T) { must.MatchResponse(t, messagesRes, match.HTTPResponse{ JSON: []match.JSON{ - match.JSONCheckOffAllowUnwanted("chunk", []interface{}{historicalEventIDs[0], historicalEventIDs[1]}, func(r gjson.Result) interface{} { + match.JSONCheckOffAllowUnwanted("chunk", makeInterfaceSlice(historicalEventIDs), func(r gjson.Result) interface{} { return r.Get("event_id").Str }, nil), }, @@ -508,6 +561,15 @@ func TestBackfillingHistory(t *testing.T) { }) } +func makeInterfaceSlice(slice []string) []interface{} { + interfaceSlice := make([]interface{}, len(slice)) + for i := range slice { + interfaceSlice[i] = slice[i] + } + + return interfaceSlice +} + func reversed(in []string) []string { out := make([]string, len(in)) for i := 0; i < len(in); i++ { @@ -516,6 +578,10 @@ func reversed(in []string) []string { return out } +func isRelevantEvent(r gjson.Result) bool { + return len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == insertionEventType || r.Get("type").Str == markerEventType +} + func getRelevantEventDebugStringsFromMessagesResponse(t *testing.T, body []byte) (eventIDsFromResponse []string) { t.Helper() @@ -529,7 +595,7 @@ func getRelevantEventDebugStringsFromMessagesResponse(t *testing.T, body []byte) } res.ForEach(func(key, r gjson.Result) bool { - if len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == insertionEventType || r.Get("type").Str == markerEventType { + if isRelevantEvent(r) { eventIDsFromResponse = append(eventIDsFromResponse, r.Get("event_id").Str+" ("+r.Get("content").Get("body").Str+")") } return true From 53f5347ed43703c1eb775b0c0af8b11c320b1206 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 14 Jul 2021 02:28:02 -0500 Subject: [PATCH 79/81] Add another test for pre-made insertion event --- tests/msc2716_test.go | 71 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 1 deletion(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 424fb4a5..041f0e5b 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -342,7 +342,7 @@ func TestBackfillingHistory(t *testing.T) { // TODO: Try adding avatar and displayName and see if historical messages get this info }) - t.Run("Historical messages are visible when joining on federated server", func(t *testing.T) { + t.Run("Historical messages are visible when joining on federated server - auto-generated base insertion event", func(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() @@ -393,6 +393,75 @@ func TestBackfillingHistory(t *testing.T) { }) }) + t.Run("Historical messages are visible when joining on federated server - pre-made insertion event", func(t *testing.T) { + t.Skip("Skipping until federation is implemented") + t.Parallel() + + roomID := as.CreateRoom(t, map[string]interface{}{ + "preset": "public_chat", + "name": "the hangout spot", + }) + alice.JoinRoom(t, roomID, nil) + + eventIDsBefore := createMessagesInRoom(t, alice, roomID, 1) + eventIdBefore := eventIDsBefore[0] + timeAfterEventBefore := time.Now() + + // Create insertion event in the normal DAG + chunkId := "mynextchunkid123" + insertionEvent := b.Event{ + Type: insertionEventType, + Content: map[string]interface{}{ + nextChunkIDContentField: chunkId, + historicalContentField: true, + }, + } + // We can't use as.SendEventSynced(...) because application services can't use the /sync API + insertionSendRes := as.MustDoFunc(t, "PUT", []string{"_matrix", "client", "r0", "rooms", roomID, "send", insertionEvent.Type, "txn-m123"}, client.WithJSONBody(t, insertionEvent.Content)) + insertionSendBody := client.ParseJSON(t, insertionSendRes) + insertionEventID := client.GetJSONFieldStr(t, insertionSendBody, "event_id") + // Make sure the insertion event has reached the homeserver + alice.SyncUntilTimelineHas(t, roomID, func(ev gjson.Result) bool { + return ev.Get("event_id").Str == insertionEventID + }) + + // eventIDsAfter + createMessagesInRoom(t, alice, roomID, 3) + + batchSendRes := batchSendHistoricalMessages( + t, + as, + []string{virtualUserID}, + roomID, + eventIdBefore, + timeAfterEventBefore, + chunkId, + 2, + // Status + 200, + ) + batchSendResBody := client.ParseJSON(t, batchSendRes) + historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) + + // Join the room from a remote homeserver after the backfilled messages were sent + remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) + + // TODO: I think we need to update this to be similar to + // SyncUntilTimelineHas but going back in time because this can be flakey + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + })) + + must.MatchResponse(t, messagesRes, match.HTTPResponse{ + JSON: []match.JSON{ + match.JSONCheckOffAllowUnwanted("chunk", makeInterfaceSlice(historicalEventIDs), func(r gjson.Result) interface{} { + return r.Get("event_id").Str + }, nil), + }, + }) + }) + t.Run("Historical messages are visible when already joined on federated server", func(t *testing.T) { t.Skip("Skipping until federation is implemented") t.Parallel() From 991e91c790254d084dbab83e4ed65d4d6600df82 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 14 Jul 2021 23:34:22 -0500 Subject: [PATCH 80/81] Make federation tests more robust --- tests/msc2716_test.go | 58 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 041f0e5b..3ae6410b 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -377,8 +377,15 @@ func TestBackfillingHistory(t *testing.T) { // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - // TODO: I think we need to update this to be similar to - // SyncUntilTimelineHas but going back in time because this can be flakey + // Make sure all of the events have been backfilled + fetchUntilMessagesResponseHas(t, remoteCharlie, roomID, func(ev gjson.Result) bool { + if ev.Get("event_id").Str == eventIdBefore { + return true + } + + return false + }) + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -446,8 +453,15 @@ func TestBackfillingHistory(t *testing.T) { // Join the room from a remote homeserver after the backfilled messages were sent remoteCharlie.JoinRoom(t, roomID, []string{"hs1"}) - // TODO: I think we need to update this to be similar to - // SyncUntilTimelineHas but going back in time because this can be flakey + // Make sure all of the events have been backfilled + fetchUntilMessagesResponseHas(t, remoteCharlie, roomID, func(ev gjson.Result) bool { + if ev.Get("event_id").Str == eventIdBefore { + return true + } + + return false + }) + messagesRes := remoteCharlie.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ "dir": []string{"b"}, "limit": []string{"100"}, @@ -647,6 +661,42 @@ func reversed(in []string) []string { return out } +func fetchUntilMessagesResponseHas(t *testing.T, c *client.CSAPI, roomID string, check func(gjson.Result) bool) { + t.Helper() + start := time.Now() + checkCounter := 0 + for { + if time.Since(start) > c.SyncUntilTimeout { + t.Fatalf("fetchMessagesUntilResponseHas timed out. Called check function %d times", checkCounter) + } + + messagesRes := c.MustDoFunc(t, "GET", []string{"_matrix", "client", "r0", "rooms", roomID, "messages"}, client.WithContentType("application/json"), client.WithQueries(url.Values{ + "dir": []string{"b"}, + "limit": []string{"100"}, + })) + messsageResBody := client.ParseJSON(t, messagesRes) + wantKey := "chunk" + keyRes := gjson.GetBytes(messsageResBody, wantKey) + if !keyRes.Exists() { + t.Fatalf("missing key '%s'", wantKey) + } + if !keyRes.IsArray() { + t.Fatalf("key '%s' is not an array (was %s)", wantKey, keyRes.Type) + } + + events := keyRes.Array() + for _, ev := range events { + if check(ev) { + return + } + } + + checkCounter++ + // Add a slight delay so we don't hammmer the messages endpoint + time.Sleep(500 * time.Millisecond) + } +} + func isRelevantEvent(r gjson.Result) bool { return len(r.Get("content").Get("body").Str) > 0 || r.Get("type").Str == insertionEventType || r.Get("type").Str == markerEventType } From 0b0355bb82ed1dd2e69fd919be74f0ad4b8ae76c Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 15 Jul 2021 10:29:43 -0500 Subject: [PATCH 81/81] Make chunk breakdown comment more clear See https://github.com/matrix-org/complement/pull/68#discussion_r670360430 --- tests/msc2716_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/msc2716_test.go b/tests/msc2716_test.go index 3ae6410b..8f99881b 100644 --- a/tests/msc2716_test.go +++ b/tests/msc2716_test.go @@ -142,7 +142,8 @@ func TestBackfillingHistory(t *testing.T) { // Order events from newest to oldest expectedEventIDOrder = reversed(expectedEventIDOrder) - // 2 eventIDsBefore + 6 historical events + 3 insertion events + 2 eventIDsAfter + // 2 eventIDsBefore + [1 insertion event + 2 historical events + 1 insertion event] + [2 historical events + 1 insertion event] + 2 eventIDsAfter + // ^ chunk1 ^ chunk2 if len(expectedEventIDOrder) != 13 { t.Fatalf("Expected eventID list should be length 13 but saw %d: %s", len(expectedEventIDOrder), expectedEventIDOrder) } @@ -519,7 +520,7 @@ func TestBackfillingHistory(t *testing.T) { historicalEventIDs := getEventsFromBatchSendResponseBody(t, batchSendResBody) baseInsertionEventID := historicalEventIDs[len(historicalEventIDs)-1] - // 2 historical events + 2 insertion events + // [1 insertion event + 2 historical events + 1 insertion event] if len(historicalEventIDs) != 4 { t.Fatalf("Expected eventID list should be length 15 but saw %d: %s", len(historicalEventIDs), historicalEventIDs) }