aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorS7evinK <tfaelligen@gmail.com>2022-01-05 18:44:49 +0100
committerGitHub <noreply@github.com>2022-01-05 17:44:49 +0000
commit161f14517669410d3e8207dc41eea5c9695f7e17 (patch)
tree20db8ed83d92c688206242f84880ff2e35a1d5eb
parenta47b12dc7d692e0ddd4aaa0801dafc9bb462aad9 (diff)
Add NATS JetStream support (#1866)
* Add NATS JetStream support Update shopify/sarama * Fix addresses * Don't change Addresses in Defaults * Update saramajetstream * Add missing error check Keep typing events for at least one minute * Use all configured NATS addresses * Update saramajetstream * Try setting up with NATS * Make sure NATS uses own persistent directory (TODO: make this configurable) * Update go.mod/go.sum * Jetstream package * Various other refactoring * Build fixes * Config tweaks, make random jetstream storage path for CI * Disable interest policies * Try to sane default on jetstream base path * Try to use in-memory for CI * Restore storage/retention * Update nats.go dependency * Adapt changes to config * Remove unneeded TopicFor * Dep update * Revert "Remove unneeded TopicFor" This reverts commit f5a4e4a339b6f94ec215778dca22204adaa893d1. * Revert changes made to streams * Fix build problems * Update nats-server * Update go.mod/go.sum * Roomserver input API queuing using NATS * Fix topic naming * Prometheus metrics * More refactoring to remove saramajetstream * Add missing topic * Don't try to populate map that doesn't exist * Roomserver output topic * Update go.mod/go.sum * Message acknowledgements * Ack tweaks * Try to resume transaction re-sends * Try to resume transaction re-sends * Update to matrix-org/gomatrixserverlib@91dadfb * Remove internal.PartitionStorer from components that don't consume keychanges * Try to reduce re-allocations a bit in resolveConflictsV2 * Tweak delivery options on RS input * Publish send-to-device messages into correct JetStream subject * Async and sync roomserver input * Update dendrite-config.yaml * Remove roomserver tests for now (they need rewriting) * Remove roomserver test again (was merged back in) * Update documentation * Docker updates * More Docker updates * Update Docker readme again * Fix lint issues * Send final event in `processEvent` synchronously (since this might stop Sytest from being so upset) * Don't report event rejection errors via `/send`, since apparently this is upsetting tests that don't expect that * Go 1.16 instead of Go 1.13 for upgrade tests and Complement * Revert "Don't report event rejection errors via `/send`, since apparently this is upsetting tests that don't expect that" This reverts commit 368675283fc44501f227639811bdb16dd5deef8c. * Don't report any errors on `/send` to see what fun that creates * Fix panics on closed channel sends * Enforce state key matches sender * Do the same for leave * Various tweaks to make tests happier Squashed commit of the following: commit 13f9028e7a63662759ce7c55504a9d2423058668 Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 15:47:14 2022 +0000 Do the same for leave commit e6be7f05c349fafbdddfe818337a17a60c867be1 Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 15:33:42 2022 +0000 Enforce state key matches sender commit 85ede6d64bf10ce9b91cdd6d80f87350ee55242f Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 14:07:04 2022 +0000 Fix panics on closed channel sends commit 9755494a98bed62450f8001d8128e40481d27e15 Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 13:38:22 2022 +0000 Don't report any errors on `/send` to see what fun that creates commit 3bb4f87b5dd56882febb4db5621db484c8789b7c Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 13:00:26 2022 +0000 Revert "Don't report event rejection errors via `/send`, since apparently this is upsetting tests that don't expect that" This reverts commit 368675283fc44501f227639811bdb16dd5deef8c. commit fe2673ed7be9559eaca134424e403a4faca100b0 Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 12:09:34 2022 +0000 Go 1.16 instead of Go 1.13 for upgrade tests and Complement commit 368675283fc44501f227639811bdb16dd5deef8c Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 11:51:45 2022 +0000 Don't report event rejection errors via `/send`, since apparently this is upsetting tests that don't expect that commit b028dfc08577bcf52e6cb498026e15fa5d46d07c Author: Neil Alexander <neilalexander@users.noreply.github.com> Date: Tue Jan 4 10:29:08 2022 +0000 Send final event in `processEvent` synchronously (since this might stop Sytest from being so upset) * Merge in NATS Server v2.6.6 and nats.go v1.13 into the in-process connection fork * Add `jetstream.WithJetStreamMessage` to make ack/nak-ing less messy, use process context in consumers * Fix consumer component name in federation API * Add comment explaining where streams are defined * Tweaks to roomserver input with comments * Finish that sentence that I apparently forgot to finish in INSTALL.md * Bump version number of config to 2 * Add comments around asynchronous sends to roomserver in processEventWithMissingState * More useful error message when the config version does not match * Set version in generate-config * Fix version in config.Defaults Co-authored-by: Neil Alexander <neilalexander@users.noreply.github.com>
-rw-r--r--appservice/appservice.go6
-rw-r--r--appservice/consumers/roomserver.go82
-rw-r--r--appservice/storage/interface.go2
-rw-r--r--build/docker/README.md23
-rw-r--r--build/docker/config/dendrite.yaml (renamed from build/docker/config/dendrite-config.yaml)50
-rw-r--r--build/docker/docker-compose.deps.yml42
-rw-r--r--build/docker/docker-compose.monolith.yml23
-rw-r--r--build/docker/docker-compose.polylith.yml65
-rwxr-xr-xbuild/docker/postgres/create_db.sh2
-rw-r--r--build/gobind-pinecone/monolith.go3
-rw-r--r--build/gobind-yggdrasil/monolith.go3
-rw-r--r--build/scripts/Complement.Dockerfile2
-rw-r--r--clientapi/clientapi.go8
-rw-r--r--clientapi/producers/syncapi.go21
-rw-r--r--clientapi/routing/createroom.go1
-rw-r--r--clientapi/routing/membership.go1
-rw-r--r--clientapi/routing/profile.go4
-rw-r--r--clientapi/routing/redaction.go2
-rw-r--r--clientapi/routing/sendevent.go1
-rw-r--r--clientapi/threepid/invites.go1
-rw-r--r--cmd/dendrite-demo-libp2p/main.go3
-rw-r--r--cmd/dendrite-demo-pinecone/main.go3
-rw-r--r--cmd/dendrite-demo-yggdrasil/main.go3
-rw-r--r--cmd/dendrite-upgrade-tests/main.go2
-rw-r--r--cmd/dendritejs-pinecone/main.go3
-rw-r--r--cmd/dendritejs/main.go3
-rw-r--r--cmd/generate-config/main.go6
-rw-r--r--dendrite-config.yaml79
-rw-r--r--docs/INSTALL.md96
-rw-r--r--eduserver/eduserver.go15
-rw-r--r--eduserver/input/input.go51
-rw-r--r--federationapi/consumers/eduserver.go361
-rw-r--r--federationapi/consumers/keychange.go16
-rw-r--r--federationapi/consumers/roomserver.go144
-rw-r--r--federationapi/federationapi.go8
-rw-r--r--federationapi/federationapi_keys_test.go2
-rw-r--r--federationapi/federationapi_test.go5
-rw-r--r--federationapi/internal/perform.go4
-rw-r--r--federationapi/routing/join.go6
-rw-r--r--federationapi/routing/leave.go10
-rw-r--r--federationapi/routing/send.go10
-rw-r--r--federationapi/routing/threepid.go3
-rw-r--r--go.mod17
-rw-r--r--go.sum70
-rw-r--r--internal/test/config.go2
-rw-r--r--keyserver/consumers/cross_signing.go3
-rw-r--r--keyserver/keyserver.go6
-rw-r--r--roomserver/api/input.go1
-rw-r--r--roomserver/api/wrapper.go15
-rw-r--r--roomserver/internal/api.go17
-rw-r--r--roomserver/internal/input/input.go250
-rw-r--r--roomserver/internal/input/input_events.go28
-rw-r--r--roomserver/internal/input/input_fifo.go64
-rw-r--r--roomserver/roomserver.go11
-rw-r--r--roomserver/roomserver_test.go407
-rw-r--r--setup/base/base.go2
-rw-r--r--setup/config/config.go11
-rw-r--r--setup/config/config_global.go8
-rw-r--r--setup/config/config_jetstream.go40
-rw-r--r--setup/config/config_kafka.go63
-rw-r--r--setup/config/config_test.go2
-rw-r--r--setup/jetstream/helpers.go11
-rw-r--r--setup/jetstream/nats.go93
-rw-r--r--setup/jetstream/streams.go61
-rw-r--r--setup/kafka/kafka.go58
-rw-r--r--setup/mscs/msc2836/msc2836.go2
-rw-r--r--syncapi/consumers/clientapi.go98
-rw-r--r--syncapi/consumers/eduserver_receipts.go92
-rw-r--r--syncapi/consumers/eduserver_sendtodevice.go134
-rw-r--r--syncapi/consumers/eduserver_typing.go106
-rw-r--r--syncapi/consumers/keychange.go9
-rw-r--r--syncapi/consumers/roomserver.go138
-rw-r--r--syncapi/notifier/notifier_test.go10
-rw-r--r--syncapi/syncapi.go19
-rw-r--r--userapi/storage/accounts/interface.go2
75 files changed, 1323 insertions, 1702 deletions
diff --git a/appservice/appservice.go b/appservice/appservice.go
index 5f16c10b..924a609e 100644
--- a/appservice/appservice.go
+++ b/appservice/appservice.go
@@ -32,7 +32,7 @@ import (
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/base"
"github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/dendrite/setup/kafka"
+ "github.com/matrix-org/dendrite/setup/jetstream"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/sirupsen/logrus"
)
@@ -58,7 +58,7 @@ func NewInternalAPI(
},
},
}
- consumer, _ := kafka.SetupConsumerProducer(&base.Cfg.Global.Kafka)
+ js, _, _ := jetstream.Prepare(&base.Cfg.Global.JetStream)
// Create a connection to the appservice postgres DB
appserviceDB, err := storage.NewDatabase(&base.Cfg.AppServiceAPI.Database)
@@ -97,7 +97,7 @@ func NewInternalAPI(
// We can't add ASes at runtime so this is safe to do.
if len(workerStates) > 0 {
consumer := consumers.NewOutputRoomEventConsumer(
- base.ProcessContext, base.Cfg, consumer, appserviceDB,
+ base.ProcessContext, base.Cfg, js, appserviceDB,
rsAPI, workerStates,
)
if err := consumer.Start(); err != nil {
diff --git a/appservice/consumers/roomserver.go b/appservice/consumers/roomserver.go
index 2ad7f68f..139b5724 100644
--- a/appservice/consumers/roomserver.go
+++ b/appservice/consumers/roomserver.go
@@ -20,23 +20,25 @@ import (
"github.com/matrix-org/dendrite/appservice/storage"
"github.com/matrix-org/dendrite/appservice/types"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/gomatrixserverlib"
+ "github.com/nats-io/nats.go"
- "github.com/Shopify/sarama"
log "github.com/sirupsen/logrus"
)
// OutputRoomEventConsumer consumes events that originated in the room server.
type OutputRoomEventConsumer struct {
- roomServerConsumer *internal.ContinualConsumer
- asDB storage.Database
- rsAPI api.RoomserverInternalAPI
- serverName string
- workerStates []types.ApplicationServiceWorkerState
+ ctx context.Context
+ jetstream nats.JetStreamContext
+ topic string
+ asDB storage.Database
+ rsAPI api.RoomserverInternalAPI
+ serverName string
+ workerStates []types.ApplicationServiceWorkerState
}
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call
@@ -44,55 +46,55 @@ type OutputRoomEventConsumer struct {
func NewOutputRoomEventConsumer(
process *process.ProcessContext,
cfg *config.Dendrite,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
appserviceDB storage.Database,
rsAPI api.RoomserverInternalAPI,
workerStates []types.ApplicationServiceWorkerState,
) *OutputRoomEventConsumer {
- consumer := internal.ContinualConsumer{
- Process: process,
- ComponentName: "appservice/roomserver",
- Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent),
- Consumer: kafkaConsumer,
- PartitionStore: appserviceDB,
+ return &OutputRoomEventConsumer{
+ ctx: process.Context(),
+ jetstream: js,
+ topic: cfg.Global.JetStream.TopicFor(jetstream.OutputRoomEvent),
+ asDB: appserviceDB,
+ rsAPI: rsAPI,
+ serverName: string(cfg.Global.ServerName),
+ workerStates: workerStates,
}
- s := &OutputRoomEventConsumer{
- roomServerConsumer: &consumer,
- asDB: appserviceDB,
- rsAPI: rsAPI,
- serverName: string(cfg.Global.ServerName),
- workerStates: workerStates,
- }
- consumer.ProcessMessage = s.onMessage
-
- return s
}
// Start consuming from room servers
func (s *OutputRoomEventConsumer) Start() error {
- return s.roomServerConsumer.Start()
+ _, err := s.jetstream.Subscribe(s.topic, s.onMessage)
+ return err
}
// onMessage is called when the appservice component receives a new event from
// the room server output log.
-func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
- // Parse out the event JSON
- var output api.OutputEvent
- if err := json.Unmarshal(msg.Value, &output); err != nil {
- // If the message was invalid, log it and move on to the next message in the stream
- log.WithError(err).Errorf("roomserver output log: message parse failure")
- return nil
- }
+func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ // Parse out the event JSON
+ var output api.OutputEvent
+ if err := json.Unmarshal(msg.Data, &output); err != nil {
+ // If the message was invalid, log it and move on to the next message in the stream
+ log.WithError(err).Errorf("roomserver output log: message parse failure")
+ return true
+ }
- if output.Type != api.OutputTypeNewRoomEvent {
- return nil
- }
+ if output.Type != api.OutputTypeNewRoomEvent {
+ return true
+ }
+
+ events := []*gomatrixserverlib.HeaderedEvent{output.NewRoomEvent.Event}
+ events = append(events, output.NewRoomEvent.AddStateEvents...)
- events := []*gomatrixserverlib.HeaderedEvent{output.NewRoomEvent.Event}
- events = append(events, output.NewRoomEvent.AddStateEvents...)
+ // Send event to any relevant application services
+ if err := s.filterRoomserverEvents(context.TODO(), events); err != nil {
+ log.WithError(err).Errorf("roomserver output log: filter error")
+ return true
+ }
- // Send event to any relevant application services
- return s.filterRoomserverEvents(context.TODO(), events)
+ return true
+ })
}
// filterRoomserverEvents takes in events and decides whether any of them need
diff --git a/appservice/storage/interface.go b/appservice/storage/interface.go
index 735e2f90..25d35af6 100644
--- a/appservice/storage/interface.go
+++ b/appservice/storage/interface.go
@@ -17,12 +17,10 @@ package storage
import (
"context"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/gomatrixserverlib"
)
type Database interface {
- internal.PartitionStorer
StoreEvent(ctx context.Context, appServiceID string, event *gomatrixserverlib.HeaderedEvent) error
GetEventsWithAppServiceID(ctx context.Context, appServiceID string, limit int) (int, int, []gomatrixserverlib.HeaderedEvent, bool, error)
CountEventsWithAppServiceID(ctx context.Context, appServiceID string) (int, error)
diff --git a/build/docker/README.md b/build/docker/README.md
index 19e4234c..7425d96c 100644
--- a/build/docker/README.md
+++ b/build/docker/README.md
@@ -19,7 +19,6 @@ not contain the Go toolchain etc.
There are three sample `docker-compose` files:
-- `docker-compose.deps.yml` which runs the Postgres and Kafka prerequisites
- `docker-compose.monolith.yml` which runs a monolith Dendrite deployment
- `docker-compose.polylith.yml` which runs a polylith Dendrite deployment
@@ -28,7 +27,7 @@ There are three sample `docker-compose` files:
The `docker-compose` files refer to the `/etc/dendrite` volume as where the
runtime config should come from. The mounted folder must contain:
-- `dendrite.yaml` configuration file (based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml)
+- `dendrite.yaml` configuration file (from the [Docker config folder](https://github.com/matrix-org/dendrite/tree/master/build/docker/config)
sample in the `build/docker/config` folder of this repository.)
- `matrix_key.pem` server key, as generated using `cmd/generate-keys`
- `server.crt` certificate file
@@ -50,15 +49,9 @@ The key files will now exist in your current working directory, and can be mount
## Starting Dendrite as a monolith deployment
-Create your config based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml) configuration file in the `build/docker/config` folder of this repository. And rename the config file to `dendrite.yml` (and put it in your `config` directory).
+Create your config based on the [`dendrite.yaml`](https://github.com/matrix-org/dendrite/tree/master/build/docker/config) configuration file in the `build/docker/config` folder of this repository.
-Once in place, start the PostgreSQL dependency:
-
-```
-docker-compose -f docker-compose.deps.yml up postgres
-```
-
-Wait a few seconds for PostgreSQL to finish starting up, and then start a monolith:
+Then start the deployment:
```
docker-compose -f docker-compose.monolith.yml up
@@ -66,15 +59,9 @@ docker-compose -f docker-compose.monolith.yml up
## Starting Dendrite as a polylith deployment
-Create your config based on the [`dendrite-config.yaml`](https://raw.githubusercontent.com/matrix-org/dendrite/master/dendrite-config.yaml) configuration file in the `build/docker/config` folder of this repository. And rename the config file to `dendrite.yml` (and put it in your `config` directory).
-
-Once in place, start all the dependencies:
-
-```
-docker-compose -f docker-compose.deps.yml up
-```
+Create your config based on the [`dendrite-config.yaml`](https://github.com/matrix-org/dendrite/tree/master/build/docker/config) configuration file in the `build/docker/config` folder of this repository.
-Wait a few seconds for PostgreSQL and Kafka to finish starting up, and then start a polylith:
+Then start the deployment:
```
docker-compose -f docker-compose.polylith.yml up
diff --git a/build/docker/config/dendrite-config.yaml b/build/docker/config/dendrite.yaml
index d6357747..6d5ebc9f 100644
--- a/build/docker/config/dendrite-config.yaml
+++ b/build/docker/config/dendrite.yaml
@@ -28,7 +28,7 @@
# connection can be idle in seconds - a negative value is unlimited.
# The version of the configuration file.
-version: 1
+version: 2
# Global Matrix configuration. This configuration applies to all components.
global:
@@ -62,28 +62,27 @@ global:
- matrix.org
- vector.im
- # Configuration for Kafka/Naffka.
- kafka:
- # List of Kafka broker addresses to connect to. This is not needed if using
- # Naffka in monolith mode.
+ # Configuration for NATS JetStream
+ jetstream:
+ # A list of NATS Server addresses to connect to. If none are specified, an
+ # internal NATS server will be started automatically when running Dendrite
+ # in monolith mode. It is required to specify the address of at least one
+ # NATS Server node if running in polylith mode.
addresses:
- - kafka:9092
+ - jetstream:4222
- # The prefix to use for Kafka topic names for this homeserver. Change this only if
- # you are running more than one Dendrite homeserver on the same Kafka deployment.
- topic_prefix: Dendrite
+ # Keep all NATS streams in memory, rather than persisting it to the storage
+ # path below. This option is present primarily for integration testing and
+ # should not be used on a real world Dendrite deployment.
+ in_memory: false
- # Whether to use Naffka instead of Kafka. This is only available in monolith
- # mode, but means that you can run a single-process server without requiring
- # Kafka.
- use_naffka: false
+ # Persistent directory to store JetStream streams in. This directory
+ # should be preserved across Dendrite restarts.
+ storage_path: ./
- # Naffka database options. Not required when using Kafka.
- naffka_database:
- connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_naffka?sslmode=disable
- max_open_conns: 10
- max_idle_conns: 2
- conn_max_lifetime: -1
+ # The prefix to use for stream names for this homeserver - really only
+ # useful if running more than one Dendrite on the same NATS deployment.
+ topic_prefix: Dendrite
# Configuration for Prometheus metric collection.
metrics:
@@ -266,6 +265,19 @@ media_api:
height: 480
method: scale
+# Configuration for experimental MSC's
+mscs:
+ # A list of enabled MSC's
+ # Currently valid values are:
+ # - msc2836 (Threading, see https://github.com/matrix-org/matrix-doc/pull/2836)
+ # - msc2946 (Spaces Summary, see https://github.com/matrix-org/matrix-doc/pull/2946)
+ mscs: []
+ database:
+ connection_string: postgresql://dendrite:itsasecret@postgres/dendrite_mscs?sslmode=disable
+ max_open_conns: 5
+ max_idle_conns: 2
+ conn_max_lifetime: -1
+
# Configuration for the Room Server.
room_server:
internal_api:
diff --git a/build/docker/docker-compose.deps.yml b/build/docker/docker-compose.deps.yml
deleted file mode 100644
index aa065188..00000000
--- a/build/docker/docker-compose.deps.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-version: "3.4"
-services:
- # PostgreSQL is needed for both polylith and monolith modes.
- postgres:
- hostname: postgres
- image: postgres:14
- restart: always
- volumes:
- - ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
- # To persist your PostgreSQL databases outside of the Docker image, to
- # prevent data loss, you will need to add something like this:
- # - ./path/to/persistent/storage:/var/lib/postgresql/data
- environment:
- POSTGRES_PASSWORD: itsasecret
- POSTGRES_USER: dendrite
- networks:
- - internal
-
- # Zookeeper is only needed for polylith mode!
- zookeeper:
- hostname: zookeeper
- image: zookeeper
- networks:
- - internal
-
- # Kafka is only needed for polylith mode!
- kafka:
- container_name: dendrite_kafka
- hostname: kafka
- image: wurstmeister/kafka
- environment:
- KAFKA_ADVERTISED_HOST_NAME: "kafka"
- KAFKA_DELETE_TOPIC_ENABLE: "true"
- KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
- depends_on:
- - zookeeper
- networks:
- - internal
-
-networks:
- internal:
- attachable: true
diff --git a/build/docker/docker-compose.monolith.yml b/build/docker/docker-compose.monolith.yml
index ef8975b9..1a8fe4ee 100644
--- a/build/docker/docker-compose.monolith.yml
+++ b/build/docker/docker-compose.monolith.yml
@@ -1,5 +1,25 @@
version: "3.4"
services:
+ postgres:
+ hostname: postgres
+ image: postgres:14
+ restart: always
+ volumes:
+ - ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
+ # To persist your PostgreSQL databases outside of the Docker image,
+ # to prevent data loss, modify the following ./path_to path:
+ - ./path_to/postgresql:/var/lib/postgresql/data
+ environment:
+ POSTGRES_PASSWORD: itsasecret
+ POSTGRES_USER: dendrite
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U dendrite"]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+ networks:
+ - internal
+
monolith:
hostname: monolith
image: matrixdotorg/dendrite-monolith:latest
@@ -13,8 +33,11 @@ services:
volumes:
- ./config:/etc/dendrite
- ./media:/var/dendrite/media
+ depends_on:
+ - postgres
networks:
- internal
+ restart: unless-stopped
networks:
internal:
diff --git a/build/docker/docker-compose.polylith.yml b/build/docker/docker-compose.polylith.yml
index 9bbd6a8f..207d0451 100644
--- a/build/docker/docker-compose.polylith.yml
+++ b/build/docker/docker-compose.polylith.yml
@@ -1,13 +1,51 @@
version: "3.4"
services:
+ postgres:
+ hostname: postgres
+ image: postgres:14
+ restart: always
+ volumes:
+ - ./postgres/create_db.sh:/docker-entrypoint-initdb.d/20-create_db.sh
+ # To persist your PostgreSQL databases outside of the Docker image,
+ # to prevent data loss, modify the following ./path_to path:
+ - ./path_to/postgresql:/var/lib/postgresql/data
+ environment:
+ POSTGRES_PASSWORD: itsasecret
+ POSTGRES_USER: dendrite
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U dendrite"]
+ interval: 5s
+ timeout: 5s
+ retries: 5
+ networks:
+ - internal
+
+ jetstream:
+ hostname: jetstream
+ image: nats:latest
+ command: |
+ --jetstream
+ --store_dir /var/lib/nats
+ --cluster_name Dendrite
+ volumes:
+ # To persist your NATS JetStream streams outside of the Docker image,
+ # prevent data loss, modify the following ./path_to path:
+ - ./path_to/nats:/var/lib/nats
+ networks:
+ - internal
+
client_api:
hostname: client_api
image: matrixdotorg/dendrite-polylith:latest
command: clientapi
volumes:
- ./config:/etc/dendrite
+ depends_on:
+ - jetstream
+ - postgres
networks:
- internal
+ restart: unless-stopped
media_api:
hostname: media_api
@@ -18,6 +56,7 @@ services:
- ./media:/var/dendrite/media
networks:
- internal
+ restart: unless-stopped
sync_api:
hostname: sync_api
@@ -25,8 +64,12 @@ services:
command: syncapi
volumes:
- ./config:/etc/dendrite
+ depends_on:
+ - jetstream
+ - postgres
networks:
- internal
+ restart: unless-stopped
room_server:
hostname: room_server
@@ -34,8 +77,12 @@ services:
command: roomserver
volumes:
- ./config:/etc/dendrite
+ depends_on:
+ - jetstream
+ - postgres
networks:
- internal
+ restart: unless-stopped
edu_server:
hostname: edu_server
@@ -43,8 +90,11 @@ services:
command: eduserver
volumes:
- ./config:/etc/dendrite
+ depends_on:
+ - jetstream
networks:
- internal
+ restart: unless-stopped
federation_api:
hostname: federation_api
@@ -52,8 +102,12 @@ services:
command: federationapi
volumes:
- ./config:/etc/dendrite
+ depends_on:
+ - jetstream
+ - postgres
networks:
- internal
+ restart: unless-stopped
key_server:
hostname: key_server
@@ -61,8 +115,12 @@ services:
command: keyserver
volumes:
- ./config:/etc/dendrite
+ depends_on:
+ - jetstream
+ - postgres
networks:
- internal
+ restart: unless-stopped
user_api:
hostname: user_api
@@ -70,8 +128,12 @@ services:
command: userapi
volumes:
- ./config:/etc/dendrite
+ depends_on:
+ - jetstream
+ - postgres
networks:
- internal
+ restart: unless-stopped
appservice_api:
hostname: appservice_api
@@ -82,8 +144,11 @@ services:
networks:
- internal
depends_on:
+ - jetstream
+ - postgres
- room_server
- user_api
+ restart: unless-stopped
networks:
internal:
diff --git a/build/docker/postgres/create_db.sh b/build/docker/postgres/create_db.sh
index a7107e27..9093738e 100755
--- a/build/docker/postgres/create_db.sh
+++ b/build/docker/postgres/create_db.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-for db in userapi_accounts userapi_devices mediaapi syncapi roomserver keyserver federationapi appservice naffka; do
+for db in userapi_accounts userapi_devices mediaapi syncapi roomserver keyserver federationapi appservice mscs; do
createdb -U dendrite -O dendrite dendrite_$db
done
diff --git a/build/gobind-pinecone/monolith.go b/build/gobind-pinecone/monolith.go
index 1f7a889d..1c9c0ac4 100644
--- a/build/gobind-pinecone/monolith.go
+++ b/build/gobind-pinecone/monolith.go
@@ -281,8 +281,7 @@ func (m *DendriteMonolith) Start() {
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
cfg.Global.PrivateKey = sk
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
- cfg.Global.Kafka.UseNaffka = true
- cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-naffka.db", m.StorageDirectory, prefix))
+ cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("file:%s/%s", m.StorageDirectory, prefix))
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-account.db", m.StorageDirectory, prefix))
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-device.db", m.StorageDirectory, prefix))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/%s-mediaapi.db", m.CacheDirectory, prefix))
diff --git a/build/gobind-yggdrasil/monolith.go b/build/gobind-yggdrasil/monolith.go
index 582a2372..1aae418d 100644
--- a/build/gobind-yggdrasil/monolith.go
+++ b/build/gobind-yggdrasil/monolith.go
@@ -86,8 +86,7 @@ func (m *DendriteMonolith) Start() {
cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName())
cfg.Global.PrivateKey = ygg.PrivateKey()
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
- cfg.Global.Kafka.UseNaffka = true
- cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-naffka.db", m.StorageDirectory))
+ cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("file:%s/", m.StorageDirectory))
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-account.db", m.StorageDirectory))
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-device.db", m.StorageDirectory))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s/dendrite-p2p-mediaapi.db", m.StorageDirectory))
diff --git a/build/scripts/Complement.Dockerfile b/build/scripts/Complement.Dockerfile
index de51f16d..55b381ba 100644
--- a/build/scripts/Complement.Dockerfile
+++ b/build/scripts/Complement.Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.13-stretch as build
+FROM golang:1.16-stretch as build
RUN apt-get update && apt-get install -y sqlite3
WORKDIR /build
diff --git a/clientapi/clientapi.go b/clientapi/clientapi.go
index 64a7aa5e..7c772125 100644
--- a/clientapi/clientapi.go
+++ b/clientapi/clientapi.go
@@ -26,7 +26,7 @@ import (
keyserverAPI "github.com/matrix-org/dendrite/keyserver/api"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/dendrite/setup/kafka"
+ "github.com/matrix-org/dendrite/setup/jetstream"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/dendrite/userapi/storage/accounts"
"github.com/matrix-org/gomatrixserverlib"
@@ -49,11 +49,11 @@ func AddPublicRoutes(
extRoomsProvider api.ExtraPublicRoomsProvider,
mscCfg *config.MSCs,
) {
- _, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
+ js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
syncProducer := &producers.SyncAPIProducer{
- Producer: producer,
- Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData),
+ JetStream: js,
+ Topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData),
}
routing.Setup(
diff --git a/clientapi/producers/syncapi.go b/clientapi/producers/syncapi.go
index 6ab8eef2..bd6af5f1 100644
--- a/clientapi/producers/syncapi.go
+++ b/clientapi/producers/syncapi.go
@@ -17,39 +17,42 @@ package producers
import (
"encoding/json"
- "github.com/Shopify/sarama"
"github.com/matrix-org/dendrite/internal/eventutil"
+ "github.com/matrix-org/dendrite/setup/jetstream"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// SyncAPIProducer produces events for the sync API server to consume
type SyncAPIProducer struct {
- Topic string
- Producer sarama.SyncProducer
+ Topic string
+ JetStream nats.JetStreamContext
}
// SendData sends account data to the sync API server
func (p *SyncAPIProducer) SendData(userID string, roomID string, dataType string) error {
- var m sarama.ProducerMessage
+ m := &nats.Msg{
+ Subject: p.Topic,
+ Header: nats.Header{},
+ }
+ m.Header.Set(jetstream.UserID, userID)
data := eventutil.AccountData{
RoomID: roomID,
Type: dataType,
}
- value, err := json.Marshal(data)
+ var err error
+ m.Data, err = json.Marshal(data)
if err != nil {
return err
}
- m.Topic = string(p.Topic)
- m.Key = sarama.StringEncoder(userID)
- m.Value = sarama.ByteEncoder(value)
log.WithFields(log.Fields{
"user_id": userID,
"room_id": roomID,
"data_type": dataType,
}).Infof("Producing to topic '%s'", p.Topic)
- _, _, err = p.Producer.SendMessage(&m)
+ _, err = p.JetStream.PublishMsg(m)
return err
}
diff --git a/clientapi/routing/createroom.go b/clientapi/routing/createroom.go
index 8f96c3d3..85331192 100644
--- a/clientapi/routing/createroom.go
+++ b/clientapi/routing/createroom.go
@@ -463,6 +463,7 @@ func createRoom(
},
ev.Headered(roomVersion),
nil,
+ false,
); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("SendEventWithState failed")
return jsonerror.InternalServerError()
diff --git a/clientapi/routing/membership.go b/clientapi/routing/membership.go
index 33fb3883..7ddb827e 100644
--- a/clientapi/routing/membership.go
+++ b/clientapi/routing/membership.go
@@ -110,6 +110,7 @@ func sendMembership(ctx context.Context, accountDB accounts.Database, device *us
[]*gomatrixserverlib.HeaderedEvent{event.Event.Headered(roomVer)},
cfg.Matrix.ServerName,
nil,
+ false,
); err != nil {
util.GetLogger(ctx).WithError(err).Error("SendEvents failed")
return jsonerror.InternalServerError()
diff --git a/clientapi/routing/profile.go b/clientapi/routing/profile.go
index 7bea35e5..9de1869b 100644
--- a/clientapi/routing/profile.go
+++ b/clientapi/routing/profile.go
@@ -169,7 +169,7 @@ func SetAvatarURL(
return jsonerror.InternalServerError()
}
- if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil); err != nil {
+ if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil, false); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
return jsonerror.InternalServerError()
}
@@ -286,7 +286,7 @@ func SetDisplayName(
return jsonerror.InternalServerError()
}
- if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil); err != nil {
+ if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, events, cfg.Matrix.ServerName, nil, false); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
return jsonerror.InternalServerError()
}
diff --git a/clientapi/routing/redaction.go b/clientapi/routing/redaction.go
index c25ca4ef..8492236b 100644
--- a/clientapi/routing/redaction.go
+++ b/clientapi/routing/redaction.go
@@ -120,7 +120,7 @@ func SendRedaction(
JSON: jsonerror.NotFound("Room does not exist"),
}
}
- if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, cfg.Matrix.ServerName, nil); err != nil {
+ if err = roomserverAPI.SendEvents(context.Background(), rsAPI, roomserverAPI.KindNew, []*gomatrixserverlib.HeaderedEvent{e}, cfg.Matrix.ServerName, nil, false); err != nil {
util.GetLogger(req.Context()).WithError(err).Errorf("failed to SendEvents")
return jsonerror.InternalServerError()
}
diff --git a/clientapi/routing/sendevent.go b/clientapi/routing/sendevent.go
index 204d2592..f0498312 100644
--- a/clientapi/routing/sendevent.go
+++ b/clientapi/routing/sendevent.go
@@ -122,6 +122,7 @@ func SendEvent(
},
cfg.Matrix.ServerName,
txnAndSessionID,
+ false,
); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
return jsonerror.InternalServerError()
diff --git a/clientapi/threepid/invites.go b/clientapi/threepid/invites.go
index 53cd6b8c..985cf00c 100644
--- a/clientapi/threepid/invites.go
+++ b/clientapi/threepid/invites.go
@@ -367,5 +367,6 @@ func emit3PIDInviteEvent(
},
cfg.Matrix.ServerName,
nil,
+ false,
)
}
diff --git a/cmd/dendrite-demo-libp2p/main.go b/cmd/dendrite-demo-libp2p/main.go
index 5f26e00c..7cbd0b6d 100644
--- a/cmd/dendrite-demo-libp2p/main.go
+++ b/cmd/dendrite-demo-libp2p/main.go
@@ -123,8 +123,8 @@ func main() {
cfg.Global.ServerName = "p2p"
cfg.Global.PrivateKey = privKey
cfg.Global.KeyID = gomatrixserverlib.KeyID(fmt.Sprintf("ed25519:%s", *instanceName))
- cfg.Global.Kafka.UseNaffka = true
cfg.FederationAPI.FederationMaxRetries = 6
+ cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
@@ -132,7 +132,6 @@ func main() {
cfg.RoomServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-roomserver.db", *instanceName))
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
- cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-e2ekey.db", *instanceName))
cfg.MSCs.MSCs = []string{"msc2836"}
cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", *instanceName))
diff --git a/cmd/dendrite-demo-pinecone/main.go b/cmd/dendrite-demo-pinecone/main.go
index 180f8ae0..a897dcd1 100644
--- a/cmd/dendrite-demo-pinecone/main.go
+++ b/cmd/dendrite-demo-pinecone/main.go
@@ -158,7 +158,7 @@ func main() {
cfg.Global.ServerName = gomatrixserverlib.ServerName(hex.EncodeToString(pk))
cfg.Global.PrivateKey = sk
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
- cfg.Global.Kafka.UseNaffka = true
+ cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
@@ -167,7 +167,6 @@ func main() {
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
- cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
cfg.MSCs.MSCs = []string{"msc2836", "msc2946"}
if err := cfg.Derive(); err != nil {
panic(err)
diff --git a/cmd/dendrite-demo-yggdrasil/main.go b/cmd/dendrite-demo-yggdrasil/main.go
index b8ac3f72..52e69ee5 100644
--- a/cmd/dendrite-demo-yggdrasil/main.go
+++ b/cmd/dendrite-demo-yggdrasil/main.go
@@ -77,7 +77,7 @@ func main() {
cfg.Global.ServerName = gomatrixserverlib.ServerName(ygg.DerivedServerName())
cfg.Global.PrivateKey = ygg.PrivateKey()
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
- cfg.Global.Kafka.UseNaffka = true
+ cfg.Global.JetStream.StoragePath = config.Path(fmt.Sprintf("%s/", *instanceName))
cfg.UserAPI.AccountDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-account.db", *instanceName))
cfg.UserAPI.DeviceDatabase.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-device.db", *instanceName))
cfg.MediaAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mediaapi.db", *instanceName))
@@ -86,7 +86,6 @@ func main() {
cfg.KeyServer.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-keyserver.db", *instanceName))
cfg.FederationAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-federationapi.db", *instanceName))
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-appservice.db", *instanceName))
- cfg.Global.Kafka.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-naffka.db", *instanceName))
cfg.MSCs.MSCs = []string{"msc2836"}
cfg.MSCs.Database.ConnectionString = config.DataSource(fmt.Sprintf("file:%s-mscs.db", *instanceName))
if err = cfg.Derive(); err != nil {
diff --git a/cmd/dendrite-upgrade-tests/main.go b/cmd/dendrite-upgrade-tests/main.go
index aa8c7fdc..35852e8a 100644
--- a/cmd/dendrite-upgrade-tests/main.go
+++ b/cmd/dendrite-upgrade-tests/main.go
@@ -48,7 +48,7 @@ const HEAD = "HEAD"
// due to the error:
// When using COPY with more than one source file, the destination must be a directory and end with a /
// We need to run a postgres anyway, so use the dockerfile associated with Complement instead.
-const Dockerfile = `FROM golang:1.13-stretch as build
+const Dockerfile = `FROM golang:1.16-stretch as build
RUN apt-get update && apt-get install -y postgresql
WORKDIR /build
diff --git a/cmd/dendritejs-pinecone/main.go b/cmd/dendritejs-pinecone/main.go
index d83e9ce0..62eea78f 100644
--- a/cmd/dendritejs-pinecone/main.go
+++ b/cmd/dendritejs-pinecone/main.go
@@ -170,8 +170,7 @@ func startup() {
cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db"
cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db"
cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db"
- cfg.Global.Kafka.UseNaffka = true
- cfg.Global.Kafka.Database.ConnectionString = "file:/idb/dendritejs_naffka.db"
+ cfg.Global.JetStream.StoragePath = "file:/idb/dendritejs/"
cfg.Global.TrustedIDServers = []string{}
cfg.Global.KeyID = gomatrixserverlib.KeyID(signing.KeyID)
cfg.Global.PrivateKey = sk
diff --git a/cmd/dendritejs/main.go b/cmd/dendritejs/main.go
index 13a0872d..59de07cd 100644
--- a/cmd/dendritejs/main.go
+++ b/cmd/dendritejs/main.go
@@ -173,8 +173,7 @@ func main() {
cfg.RoomServer.Database.ConnectionString = "file:/idb/dendritejs_roomserver.db"
cfg.SyncAPI.Database.ConnectionString = "file:/idb/dendritejs_syncapi.db"
cfg.KeyServer.Database.ConnectionString = "file:/idb/dendritejs_e2ekey.db"
- cfg.Global.Kafka.UseNaffka = true
- cfg.Global.Kafka.Database.ConnectionString = "file:/idb/dendritejs_naffka.db"
+ cfg.Global.JetStream.StoragePath = "file:/idb/dendritejs/"
cfg.Global.TrustedIDServers = []string{
"matrix.org", "vector.im",
}
diff --git a/cmd/generate-config/main.go b/cmd/generate-config/main.go
index ff91e753..2ea12e34 100644
--- a/cmd/generate-config/main.go
+++ b/cmd/generate-config/main.go
@@ -16,13 +16,14 @@ func main() {
dbURI := flag.String("db", "", "The DB URI to use for all components if not SQLite files")
flag.Parse()
- cfg := &config.Dendrite{}
+ cfg := &config.Dendrite{
+ Version: config.Version,
+ }
cfg.Defaults(true)
if *serverName != "" {
cfg.Global.ServerName = gomatrixserverlib.ServerName(*serverName)
}
if *dbURI != "" {
- cfg.Global.Kafka.Database.ConnectionString = config.DataSource(*dbURI)
cfg.AppServiceAPI.Database.ConnectionString = config.DataSource(*dbURI)
cfg.FederationAPI.Database.ConnectionString = config.DataSource(*dbURI)
cfg.KeyServer.Database.ConnectionString = config.DataSource(*dbURI)
@@ -88,6 +89,7 @@ func main() {
cfg.MSCs.MSCs = []string{"msc2836", "msc2946", "msc2444", "msc2753"}
cfg.Logging[0].Level = "trace"
cfg.UserAPI.BCryptCost = bcrypt.MinCost
+ cfg.Global.JetStream.InMemory = true
}
j, err := yaml.Marshal(cfg)
diff --git a/dendrite-config.yaml b/dendrite-config.yaml
index bf3811a2..7593988f 100644
--- a/dendrite-config.yaml
+++ b/dendrite-config.yaml
@@ -28,7 +28,7 @@
# connection can be idle in seconds - a negative value is unlimited.
# The version of the configuration file.
-version: 1
+version: 2
# Global Matrix configuration. This configuration applies to all components.
global:
@@ -68,34 +68,27 @@ global:
# to other servers and the federation API will not be exposed.
disable_federation: false
- # Configuration for Kafka/Naffka.
- kafka:
- # List of Kafka broker addresses to connect to. This is not needed if using
- # Naffka in monolith mode.
+ # Configuration for NATS JetStream
+ jetstream:
+ # A list of NATS Server addresses to connect to. If none are specified, an
+ # internal NATS server will be started automatically when running Dendrite
+ # in monolith mode. It is required to specify the address of at least one
+ # NATS Server node if running in polylith mode.
addresses:
- - localhost:2181
+ # - localhost:4222
- # The prefix to use for Kafka topic names for this homeserver. Change this only if
- # you are running more than one Dendrite homeserver on the same Kafka deployment.
- topic_prefix: Dendrite
-
- # Whether to use Naffka instead of Kafka. This is only available in monolith
- # mode, but means that you can run a single-process server without requiring
- # Kafka.
- use_naffka: true
+ # Keep all NATS streams in memory, rather than persisting it to the storage
+ # path below. This option is present primarily for integration testing and
+ # should not be used on a real world Dendrite deployment.
+ in_memory: false
- # The max size a Kafka message is allowed to use.
- # You only need to change this value, if you encounter issues with too large messages.
- # Must be less than/equal to "max.message.bytes" configured in Kafka.
- # Defaults to 8388608 bytes.
- # max_message_bytes: 8388608
+ # Persistent directory to store JetStream streams in. This directory
+ # should be preserved across Dendrite restarts.
+ storage_path: ./
- # Naffka database options. Not required when using Kafka.
- naffka_database:
- connection_string: file:naffka.db
- max_open_conns: 10
- max_idle_conns: 2
- conn_max_lifetime: -1
+ # The prefix to use for stream names for this homeserver - really only
+ # useful if running more than one Dendrite on the same NATS deployment.
+ topic_prefix: Dendrite
# Configuration for Prometheus metric collection.
metrics:
@@ -121,8 +114,8 @@ global:
# Configuration for the Appservice API.
app_service_api:
internal_api:
- listen: http://localhost:7777
- connect: http://localhost:7777
+ listen: http://localhost:7777 # Only used in polylith deployments
+ connect: http://localhost:7777 # Only used in polylith deployments
database:
connection_string: file:appservice.db
max_open_conns: 10
@@ -140,8 +133,8 @@ app_service_api:
# Configuration for the Client API.
client_api:
internal_api:
- listen: http://localhost:7771
- connect: http://localhost:7771
+ listen: http://localhost:7771 # Only used in polylith deployments
+ connect: http://localhost:7771 # Only used in polylith deployments
external_api:
listen: http://[::]:8071
@@ -181,14 +174,14 @@ client_api:
# Configuration for the EDU server.
edu_server:
internal_api:
- listen: http://localhost:7778
- connect: http://localhost:7778
+ listen: http://localhost:7778 # Only used in polylith deployments
+ connect: http://localhost:7778 # Only used in polylith deployments
# Configuration for the Federation API.
federation_api:
internal_api:
- listen: http://localhost:7772
- connect: http://localhost:7772
+ listen: http://localhost:7772 # Only used in polylith deployments
+ connect: http://localhost:7772 # Only used in polylith deployments
external_api:
listen: http://[::]:8072
database:
@@ -237,8 +230,8 @@ federation_api:
# Configuration for the Key Server (for end-to-end encryption).
key_server:
internal_api:
- listen: http://localhost:7779
- connect: http://localhost:7779
+ listen: http://localhost:7779 # Only used in polylith deployments
+ connect: http://localhost:7779 # Only used in polylith deployments
database:
connection_string: file:keyserver.db
max_open_conns: 10
@@ -248,8 +241,8 @@ key_server:
# Configuration for the Media API.
media_api:
internal_api:
- listen: http://localhost:7774
- connect: http://localhost:7774
+ listen: http://localhost:7774 # Only used in polylith deployments
+ connect: http://localhost:7774 # Only used in polylith deployments
external_api:
listen: http://[::]:8074
database:
@@ -300,8 +293,8 @@ mscs:
# Configuration for the Room Server.
room_server:
internal_api:
- listen: http://localhost:7770
- connect: http://localhost:7770
+ listen: http://localhost:7770 # Only used in polylith deployments
+ connect: http://localhost:7770 # Only used in polylith deployments
database:
connection_string: file:roomserver.db
max_open_conns: 10
@@ -311,8 +304,8 @@ room_server:
# Configuration for the Sync API.
sync_api:
internal_api:
- listen: http://localhost:7773
- connect: http://localhost:7773
+ listen: http://localhost:7773 # Only used in polylith deployments
+ connect: http://localhost:7773 # Only used in polylith deployments
external_api:
listen: http://[::]:8073
database:
@@ -336,8 +329,8 @@ user_api:
# This value can be low if performing tests or on embedded Dendrite instances (e.g WASM builds)
# bcrypt_cost: 10
internal_api:
- listen: http://localhost:7781
- connect: http://localhost:7781
+ listen: http://localhost:7781 # Only used in polylith deployments
+ connect: http://localhost:7781 # Only used in polylith deployments
account_database:
connection_string: file:userapi_accounts.db
max_open_conns: 10
diff --git a/docs/INSTALL.md b/docs/INSTALL.md
index 1752d7e8..2afb43c6 100644
--- a/docs/INSTALL.md
+++ b/docs/INSTALL.md
@@ -2,21 +2,23 @@
Dendrite can be run in one of two configurations:
-* **Polylith mode**: A cluster of individual components, dealing with different
- aspects of the Matrix protocol (see [WIRING.md](WIRING-Current.md)). Components communicate
- with each other using internal HTTP APIs and [Apache Kafka](https://kafka.apache.org).
- This will almost certainly be the preferred model for large-scale deployments.
-
* **Monolith mode**: All components run in the same process. In this mode,
- Kafka is completely optional and can instead be replaced with an in-process
- lightweight implementation called [Naffka](https://github.com/matrix-org/naffka). This
- will usually be the preferred model for low-volume, low-user or experimental deployments.
+ it is possible to run an in-process [NATS Server](https://github.com/nats-io/nats-server)
+ instead of running a standalone deployment. This will usually be the preferred model for
+ low-to-mid volume deployments, providing the best balance between performance and resource usage.
+
+* **Polylith mode**: A cluster of individual components running in their own processes, dealing
+ with different aspects of the Matrix protocol (see [WIRING.md](WIRING-Current.md)). Components
+ communicate with each other using internal HTTP APIs and [NATS Server](https://github.com/nats-io/nats-server).
+ This will almost certainly be the preferred model for very large deployments but scalability
+ comes with a cost. API calls are expensive and therefore a polylith deployment may end up using
+ disproportionately more resources for a smaller number of users compared to a monolith deployment.
-For most deployments, it is **recommended to run in monolith mode with PostgreSQL databases**.
+In almost all cases, it is **recommended to run in monolith mode with PostgreSQL databases**.
Regardless of whether you are running in polylith or monolith mode, each Dendrite component that
-requires storage has its own database. Both Postgres and SQLite are supported and can be
-mixed-and-matched across components as needed in the configuration file.
+requires storage has its own database connections. Both Postgres and SQLite are supported and can
+be mixed-and-matched across components as needed in the configuration file.
Be advised that Dendrite is still in development and it's not recommended for
use in production environments just yet!
@@ -26,13 +28,11 @@ use in production environments just yet!
Dendrite requires:
* Go 1.15 or higher
-* Postgres 9.6 or higher (if using Postgres databases, not needed for SQLite)
+* PostgreSQL 12 or higher (if using PostgreSQL databases, not needed for SQLite)
If you want to run a polylith deployment, you also need:
-* Apache Kafka 0.10.2+
-
-Please note that Kafka is **not required** for a monolith deployment.
+* A standalone [NATS Server](https://github.com/nats-io/nats-server) deployment with JetStream enabled
## Building Dendrite
@@ -49,40 +49,18 @@ Then build it:
./build.sh
```
-## Install Kafka (polylith only)
-
-Install and start Kafka (c.f. [scripts/install-local-kafka.sh](scripts/install-local-kafka.sh)):
-
-```bash
-KAFKA_URL=http://archive.apache.org/dist/kafka/2.1.0/kafka_2.11-2.1.0.tgz
-
-# Only download the kafka if it isn't already downloaded.
-test -f kafka.tgz || wget $KAFKA_URL -O kafka.tgz
-# Unpack the kafka over the top of any existing installation
-mkdir -p kafka && tar xzf kafka.tgz -C kafka --strip-components 1
-
-# Start the zookeeper running in the background.
-# By default the zookeeper listens on localhost:2181
-kafka/bin/zookeeper-server-start.sh -daemon kafka/config/zookeeper.properties
-
-# Start the kafka server running in the background.
-# By default the kafka listens on localhost:9092
-kafka/bin/kafka-server-start.sh -daemon kafka/config/server.properties
-```
+## Install NATS Server
-On macOS, you can use [Homebrew](https://brew.sh/) for easier setup of Kafka:
+Follow the [NATS Server installation instructions](https://docs.nats.io/running-a-nats-service/introduction/installation) and then [start your NATS deployment](https://docs.nats.io/running-a-nats-service/introduction/running).
-```bash
-brew install kafka
-brew services start zookeeper
-brew services start kafka
-```
+JetStream must be enabled, either by passing the `-js` flag to `nats-server`,
+or by specifying the `store_dir` option in the the `jetstream` configuration.
## Configuration
### PostgreSQL database setup
-Assuming that PostgreSQL 9.6 (or later) is installed:
+Assuming that PostgreSQL 12 (or later) is installed:
* Create role, choosing a new password when prompted:
@@ -109,7 +87,7 @@ On macOS, omit `sudo -u postgres` from the below commands.
* If you want to run each Dendrite component with its own database:
```bash
- for i in mediaapi syncapi roomserver signingkeyserver federationsender appservice keyserver userapi_accounts userapi_devices naffka; do
+ for i in mediaapi syncapi roomserver federationapi appservice keyserver userapi_accounts userapi_devices; do
sudo -u postgres createdb -O dendrite dendrite_$i
done
```
@@ -163,7 +141,11 @@ Create config file, based on `dendrite-config.yaml`. Call it `dendrite.yaml`. Th
* `postgres://dendrite:password@localhost/dendrite_userapi_account?sslmode=disable` to connect to PostgreSQL without SSL/TLS
* For SQLite on disk: `file:component.db` or `file:///path/to/component.db`, e.g. `file:userapi_account.db`
* Postgres and SQLite can be mixed and matched on different components as desired.
-* The `use_naffka` option if using Naffka in a monolith deployment
+* Either one of the following in the `jetstream` configuration section:
+ * The `addresses` option — a list of one or more addresses of an external standalone
+ NATS Server deployment
+ * The `storage_path` — where on the filesystem the built-in NATS server should
+ store durable queues, if using the built-in NATS server
There are other options which may be useful so review them all. In particular,
if you are trying to federate from your Dendrite instance into public rooms
@@ -177,11 +159,6 @@ using SQLite, all components **MUST** use their own database file.
## Starting a monolith server
-It is possible to use Naffka as an in-process replacement to Kafka when using
-the monolith server. To do this, set `use_naffka: true` in your `dendrite.yaml`
-configuration and uncomment the relevant Naffka line in the `database` section.
-Be sure to update the database username and password if needed.
-
The monolith server can be started as shown below. By default it listens for
HTTP connections on port 8008, so you can configure your Matrix client to use
`http://servername:8008` as the server:
@@ -197,6 +174,10 @@ for HTTPS connections on port 8448:
./bin/dendrite-monolith-server --tls-cert=server.crt --tls-key=server.key
```
+If the `jetstream` section of the configuration contains no `addresses` but does
+contain a `store_dir`, Dendrite will start up a built-in NATS JetStream node
+automatically, eliminating the need to run a separate NATS server.
+
## Starting a polylith deployment
The following contains scripts which will run all the required processes in order to point a Matrix client at Dendrite.
@@ -263,15 +244,6 @@ This is what implements the room DAG. Clients do not talk to this.
./bin/dendrite-polylith-multi --config=dendrite.yaml roomserver
```
-#### Federation sender
-
-This sends events from our users to other servers. This is only required if
-you want to support federation.
-
-```bash
-./bin/dendrite-polylith-multi --config=dendrite.yaml federationsender
-```
-
#### Appservice server
This sends events from the network to [application
@@ -291,14 +263,6 @@ This manages end-to-end encryption keys for users.
./bin/dendrite-polylith-multi --config=dendrite.yaml keyserver
```
-#### Signing key server
-
-This manages signing keys for servers.
-
-```bash
-./bin/dendrite-polylith-multi --config=dendrite.yaml signingkeyserver
-```
-
#### EDU server
This manages processing EDUs such as typing, send-to-device events and presence. Clients do not talk to
diff --git a/eduserver/eduserver.go b/eduserver/eduserver.go
index 97831f2b..db03001b 100644
--- a/eduserver/eduserver.go
+++ b/eduserver/eduserver.go
@@ -23,8 +23,7 @@ import (
"github.com/matrix-org/dendrite/eduserver/input"
"github.com/matrix-org/dendrite/eduserver/inthttp"
"github.com/matrix-org/dendrite/setup/base"
- "github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/dendrite/setup/kafka"
+ "github.com/matrix-org/dendrite/setup/jetstream"
userapi "github.com/matrix-org/dendrite/userapi/api"
)
@@ -43,16 +42,16 @@ func NewInternalAPI(
) api.EDUServerInputAPI {
cfg := &base.Cfg.EDUServer
- _, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
+ js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
return &input.EDUServerInputAPI{
Cache: eduCache,
UserAPI: userAPI,
- Producer: producer,
- OutputTypingEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
- OutputSendToDeviceEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
- OutputReceiptEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
- OutputKeyChangeEventTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent),
+ JetStream: js,
+ OutputTypingEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
+ OutputSendToDeviceEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
+ OutputReceiptEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
+ OutputKeyChangeEventTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
ServerName: cfg.Matrix.ServerName,
}
}
diff --git a/eduserver/input/input.go b/eduserver/input/input.go
index bdc24374..e7501a90 100644
--- a/eduserver/input/input.go
+++ b/eduserver/input/input.go
@@ -21,12 +21,12 @@ import (
"encoding/json"
"time"
- "github.com/Shopify/sarama"
"github.com/matrix-org/dendrite/eduserver/api"
"github.com/matrix-org/dendrite/eduserver/cache"
keyapi "github.com/matrix-org/dendrite/keyserver/api"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
+ "github.com/nats-io/nats.go"
"github.com/sirupsen/logrus"
)
@@ -43,7 +43,7 @@ type EDUServerInputAPI struct {
// The kafka topic to output new key change events to
OutputKeyChangeEventTopic string
// kafka producer
- Producer sarama.SyncProducer
+ JetStream nats.JetStreamContext
// Internal user query API
UserAPI userapi.UserInternalAPI
// our server name
@@ -100,13 +100,11 @@ func (t *EDUServerInputAPI) InputCrossSigningKeyUpdate(
"user_id": request.UserID,
}).Infof("Producing to topic '%s'", t.OutputKeyChangeEventTopic)
- m := &sarama.ProducerMessage{
- Topic: string(t.OutputKeyChangeEventTopic),
- Key: sarama.StringEncoder(request.UserID),
- Value: sarama.ByteEncoder(eventJSON),
- }
-
- _, _, err = t.Producer.SendMessage(m)
+ _, err = t.JetStream.PublishMsg(&nats.Msg{
+ Subject: t.OutputKeyChangeEventTopic,
+ Header: nats.Header{},
+ Data: eventJSON,
+ })
return err
}
@@ -138,13 +136,11 @@ func (t *EDUServerInputAPI) sendTypingEvent(ite *api.InputTypingEvent) error {
"typing": ite.Typing,
}).Infof("Producing to topic '%s'", t.OutputTypingEventTopic)
- m := &sarama.ProducerMessage{
- Topic: string(t.OutputTypingEventTopic),
- Key: sarama.StringEncoder(ite.RoomID),
- Value: sarama.ByteEncoder(eventJSON),
- }
-
- _, _, err = t.Producer.SendMessage(m)
+ _, err = t.JetStream.PublishMsg(&nats.Msg{
+ Subject: t.OutputTypingEventTopic,
+ Header: nats.Header{},
+ Data: eventJSON,
+ })
return err
}
@@ -193,14 +189,10 @@ func (t *EDUServerInputAPI) sendToDeviceEvent(ise *api.InputSendToDeviceEvent) e
return err
}
- m := &sarama.ProducerMessage{
- Topic: string(t.OutputSendToDeviceEventTopic),
- Key: sarama.StringEncoder(ote.UserID),
- Value: sarama.ByteEncoder(eventJSON),
- }
-
- _, _, err = t.Producer.SendMessage(m)
- if err != nil {
+ if _, err = t.JetStream.PublishMsg(&nats.Msg{
+ Subject: t.OutputSendToDeviceEventTopic,
+ Data: eventJSON,
+ }); err != nil {
logrus.WithError(err).Error("sendToDevice failed t.Producer.SendMessage")
return err
}
@@ -228,11 +220,10 @@ func (t *EDUServerInputAPI) InputReceiptEvent(
if err != nil {
return err
}
- m := &sarama.ProducerMessage{
- Topic: t.OutputReceiptEventTopic,
- Key: sarama.StringEncoder(request.InputReceiptEvent.RoomID + ":" + request.InputReceiptEvent.UserID),
- Value: sarama.ByteEncoder(js),
- }
- _, _, err = t.Producer.SendMessage(m)
+
+ _, err = t.JetStream.PublishMsg(&nats.Msg{
+ Subject: t.OutputReceiptEventTopic,
+ Data: js,
+ })
return err
}
diff --git a/federationapi/consumers/eduserver.go b/federationapi/consumers/eduserver.go
index 56ec9eaf..9e52acef 100644
--- a/federationapi/consumers/eduserver.go
+++ b/federationapi/consumers/eduserver.go
@@ -17,233 +17,236 @@ package consumers
import (
"context"
"encoding/json"
- "fmt"
- "github.com/Shopify/sarama"
"github.com/matrix-org/dendrite/eduserver/api"
"github.com/matrix-org/dendrite/federationapi/queue"
"github.com/matrix-org/dendrite/federationapi/storage"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// OutputEDUConsumer consumes events that originate in EDU server.
type OutputEDUConsumer struct {
- typingConsumer *internal.ContinualConsumer
- sendToDeviceConsumer *internal.ContinualConsumer
- receiptConsumer *internal.ContinualConsumer
- db storage.Database
- queues *queue.OutgoingQueues
- ServerName gomatrixserverlib.ServerName
- TypingTopic string
- SendToDeviceTopic string
+ ctx context.Context
+ jetstream nats.JetStreamContext
+ db storage.Database
+ queues *queue.OutgoingQueues
+ ServerName gomatrixserverlib.ServerName
+ typingTopic string
+ sendToDeviceTopic string
+ receiptTopic string
}
// NewOutputEDUConsumer creates a new OutputEDUConsumer. Call Start() to begin consuming from EDU servers.
func NewOutputEDUConsumer(
process *process.ProcessContext,
cfg *config.FederationAPI,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
queues *queue.OutgoingQueues,
store storage.Database,
) *OutputEDUConsumer {
- c := &OutputEDUConsumer{
- typingConsumer: &internal.ContinualConsumer{
- Process: process,
- ComponentName: "eduserver/typing",
- Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
- Consumer: kafkaConsumer,
- PartitionStore: store,
- },
- sendToDeviceConsumer: &internal.ContinualConsumer{
- Process: process,
- ComponentName: "eduserver/sendtodevice",
- Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
- Consumer: kafkaConsumer,
- PartitionStore: store,
- },
- receiptConsumer: &internal.ContinualConsumer{
- Process: process,
- ComponentName: "eduserver/receipt",
- Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
- Consumer: kafkaConsumer,
- PartitionStore: store,
- },
+ return &OutputEDUConsumer{
+ ctx: process.Context(),
+ jetstream: js,
queues: queues,
db: store,
ServerName: cfg.Matrix.ServerName,
- TypingTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent),
- SendToDeviceTopic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent),
+ typingTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
+ sendToDeviceTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
+ receiptTopic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
}
- c.typingConsumer.ProcessMessage = c.onTypingEvent
- c.sendToDeviceConsumer.ProcessMessage = c.onSendToDeviceEvent
- c.receiptConsumer.ProcessMessage = c.onReceiptEvent
-
- return c
}
// Start consuming from EDU servers
func (t *OutputEDUConsumer) Start() error {
- if err := t.typingConsumer.Start(); err != nil {
- return fmt.Errorf("t.typingConsumer.Start: %w", err)
+ if _, err := t.jetstream.Subscribe(t.typingTopic, t.onTypingEvent); err != nil {
+ return err
}
- if err := t.sendToDeviceConsumer.Start(); err != nil {
- return fmt.Errorf("t.sendToDeviceConsumer.Start: %w", err)
+ if _, err := t.jetstream.Subscribe(t.sendToDeviceTopic, t.onSendToDeviceEvent); err != nil {
+ return err
}
- if err := t.receiptConsumer.Start(); err != nil {
- return fmt.Errorf("t.receiptConsumer.Start: %w", err)
+ if _, err := t.jetstream.Subscribe(t.receiptTopic, t.onReceiptEvent); err != nil {
+ return err
}
return nil
}
// onSendToDeviceEvent is called in response to a message received on the
// send-to-device events topic from the EDU server.
-func (t *OutputEDUConsumer) onSendToDeviceEvent(msg *sarama.ConsumerMessage) error {
+func (t *OutputEDUConsumer) onSendToDeviceEvent(msg *nats.Msg) {
// Extract the send-to-device event from msg.
- var ote api.OutputSendToDeviceEvent
- if err := json.Unmarshal(msg.Value, &ote); err != nil {
- log.WithError(err).Errorf("eduserver output log: message parse failed (expected send-to-device)")
- return nil
- }
-
- // only send send-to-device events which originated from us
- _, originServerName, err := gomatrixserverlib.SplitID('@', ote.Sender)
- if err != nil {
- log.WithError(err).WithField("user_id", ote.Sender).Error("Failed to extract domain from send-to-device sender")
- return nil
- }
- if originServerName != t.ServerName {
- log.WithField("other_server", originServerName).Info("Suppressing send-to-device: originated elsewhere")
- return nil
- }
-
- _, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID)
- if err != nil {
- log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination")
- return nil
- }
-
- // Pack the EDU and marshal it
- edu := &gomatrixserverlib.EDU{
- Type: gomatrixserverlib.MDirectToDevice,
- Origin: string(t.ServerName),
- }
- tdm := gomatrixserverlib.ToDeviceMessage{
- Sender: ote.Sender,
- Type: ote.Type,
- MessageID: util.RandomString(32),
- Messages: map[string]map[string]json.RawMessage{
- ote.UserID: {
- ote.DeviceID: ote.Content,
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ var ote api.OutputSendToDeviceEvent
+ if err := json.Unmarshal(msg.Data, &ote); err != nil {
+ log.WithError(err).Errorf("eduserver output log: message parse failed (expected send-to-device)")
+ return true
+ }
+
+ // only send send-to-device events which originated from us
+ _, originServerName, err := gomatrixserverlib.SplitID('@', ote.Sender)
+ if err != nil {
+ log.WithError(err).WithField("user_id", ote.Sender).Error("Failed to extract domain from send-to-device sender")
+ return true
+ }
+ if originServerName != t.ServerName {
+ log.WithField("other_server", originServerName).Info("Suppressing send-to-device: originated elsewhere")
+ return true
+ }
+
+ _, destServerName, err := gomatrixserverlib.SplitID('@', ote.UserID)
+ if err != nil {
+ log.WithError(err).WithField("user_id", ote.UserID).Error("Failed to extract domain from send-to-device destination")
+ return true
+ }
+
+ // Pack the EDU and marshal it
+ edu := &gomatrixserverlib.EDU{
+ Type: gomatrixserverlib.MDirectToDevice,
+ Origin: string(t.ServerName),
+ }
+ tdm := gomatrixserverlib.ToDeviceMessage{
+ Sender: ote.Sender,
+ Type: ote.Type,
+ MessageID: util.RandomString(32),
+ Messages: map[string]map[string]json.RawMessage{
+ ote.UserID: {
+ ote.DeviceID: ote.Content,
+ },
},
- },
- }
- if edu.Content, err = json.Marshal(tdm); err != nil {
- return err
- }
-
- log.Infof("Sending send-to-device message into %q destination queue", destServerName)
- return t.queues.SendEDU(edu, t.ServerName, []gomatrixserverlib.ServerName{destServerName})
+ }
+ if edu.Content, err = json.Marshal(tdm); err != nil {
+ log.WithError(err).Error("failed to marshal EDU JSON")
+ return true
+ }
+
+ log.Infof("Sending send-to-device message into %q destination queue", destServerName)
+ if err := t.queues.SendEDU(edu, t.ServerName, []gomatrixserverlib.ServerName{destServerName}); err != nil {
+ log.WithError(err).Error("failed to send EDU")
+ return false
+ }
+
+ return true
+ })
}
// onTypingEvent is called in response to a message received on the typing
// events topic from the EDU server.
-func (t *OutputEDUConsumer) onTypingEvent(msg *sarama.ConsumerMessage) error {
- // Extract the typing event from msg.
- var ote api.OutputTypingEvent
- if err := json.Unmarshal(msg.Value, &ote); err != nil {
- // Skip this msg but continue processing messages.
- log.WithError(err).Errorf("eduserver output log: message parse failed (expected typing)")
- return nil
- }
-
- // only send typing events which originated from us
- _, typingServerName, err := gomatrixserverlib.SplitID('@', ote.Event.UserID)
- if err != nil {
- log.WithError(err).WithField("user_id", ote.Event.UserID).Error("Failed to extract domain from typing sender")
- return nil
- }
- if typingServerName != t.ServerName {
- log.WithField("other_server", typingServerName).Info("Suppressing typing notif: originated elsewhere")
- return nil
- }
-
- joined, err := t.db.GetJoinedHosts(context.TODO(), ote.Event.RoomID)
- if err != nil {
- return err
- }
-
- names := make([]gomatrixserverlib.ServerName, len(joined))
- for i := range joined {
- names[i] = joined[i].ServerName
- }
-
- edu := &gomatrixserverlib.EDU{Type: ote.Event.Type}
- if edu.Content, err = json.Marshal(map[string]interface{}{
- "room_id": ote.Event.RoomID,
- "user_id": ote.Event.UserID,
- "typing": ote.Event.Typing,
- }); err != nil {
- return err
- }
-
- return t.queues.SendEDU(edu, t.ServerName, names)
+func (t *OutputEDUConsumer) onTypingEvent(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ // Extract the typing event from msg.
+ var ote api.OutputTypingEvent
+ if err := json.Unmarshal(msg.Data, &ote); err != nil {
+ // Skip this msg but continue processing messages.
+ log.WithError(err).Errorf("eduserver output log: message parse failed (expected typing)")
+ _ = msg.Ack()
+ return true
+ }
+
+ // only send typing events which originated from us
+ _, typingServerName, err := gomatrixserverlib.SplitID('@', ote.Event.UserID)
+ if err != nil {
+ log.WithError(err).WithField("user_id", ote.Event.UserID).Error("Failed to extract domain from typing sender")
+ _ = msg.Ack()
+ return true
+ }
+ if typingServerName != t.ServerName {
+ return true
+ }
+
+ joined, err := t.db.GetJoinedHosts(t.ctx, ote.Event.RoomID)
+ if err != nil {
+ log.WithError(err).WithField("room_id", ote.Event.RoomID).Error("failed to get joined hosts for room")
+ return false
+ }
+
+ names := make([]gomatrixserverlib.ServerName, len(joined))
+ for i := range joined {
+ names[i] = joined[i].ServerName
+ }
+
+ edu := &gomatrixserverlib.EDU{Type: ote.Event.Type}
+ if edu.Content, err = json.Marshal(map[string]interface{}{
+ "room_id": ote.Event.RoomID,
+ "user_id": ote.Event.UserID,
+ "typing": ote.Event.Typing,
+ }); err != nil {
+ log.WithError(err).Error("failed to marshal EDU JSON")
+ return true
+ }
+
+ if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil {
+ log.WithError(err).Error("failed to send EDU")
+ return false
+ }
+
+ return true
+ })
}
// onReceiptEvent is called in response to a message received on the receipt
// events topic from the EDU server.
-func (t *OutputEDUConsumer) onReceiptEvent(msg *sarama.ConsumerMessage) error {
- // Extract the typing event from msg.
- var receipt api.OutputReceiptEvent
- if err := json.Unmarshal(msg.Value, &receipt); err != nil {
- // Skip this msg but continue processing messages.
- log.WithError(err).Errorf("eduserver output log: message parse failed (expected receipt)")
- return nil
- }
-
- // only send receipt events which originated from us
- _, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID)
- if err != nil {
- log.WithError(err).WithField("user_id", receipt.UserID).Error("Failed to extract domain from receipt sender")
- return nil
- }
- if receiptServerName != t.ServerName {
- return nil // don't log, very spammy as it logs for each remote receipt
- }
-
- joined, err := t.db.GetJoinedHosts(context.TODO(), receipt.RoomID)
- if err != nil {
- return err
- }
-
- names := make([]gomatrixserverlib.ServerName, len(joined))
- for i := range joined {
- names[i] = joined[i].ServerName
- }
-
- content := map[string]api.FederationReceiptMRead{}
- content[receipt.RoomID] = api.FederationReceiptMRead{
- User: map[string]api.FederationReceiptData{
- receipt.UserID: {
- Data: api.ReceiptTS{
- TS: receipt.Timestamp,
+func (t *OutputEDUConsumer) onReceiptEvent(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ // Extract the typing event from msg.
+ var receipt api.OutputReceiptEvent
+ if err := json.Unmarshal(msg.Data, &receipt); err != nil {
+ // Skip this msg but continue processing messages.
+ log.WithError(err).Errorf("eduserver output log: message parse failed (expected receipt)")
+ return true
+ }
+
+ // only send receipt events which originated from us
+ _, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID)
+ if err != nil {
+ log.WithError(err).WithField("user_id", receipt.UserID).Error("failed to extract domain from receipt sender")
+ return true
+ }
+ if receiptServerName != t.ServerName {
+ return true
+ }
+
+ joined, err := t.db.GetJoinedHosts(t.ctx, receipt.RoomID)
+ if err != nil {
+ log.WithError(err).WithField("room_id", receipt.RoomID).Error("failed to get joined hosts for room")
+ return false
+ }
+
+ names := make([]gomatrixserverlib.ServerName, len(joined))
+ for i := range joined {
+ names[i] = joined[i].ServerName
+ }
+
+ content := map[string]api.FederationReceiptMRead{}
+ content[receipt.RoomID] = api.FederationReceiptMRead{
+ User: map[string]api.FederationReceiptData{
+ receipt.UserID: {
+ Data: api.ReceiptTS{
+ TS: receipt.Timestamp,
+ },
+ EventIDs: []string{receipt.EventID},
},
- EventIDs: []string{receipt.EventID},
},
- },
- }
-
- edu := &gomatrixserverlib.EDU{
- Type: gomatrixserverlib.MReceipt,
- Origin: string(t.ServerName),
- }
- if edu.Content, err = json.Marshal(content); err != nil {
- return err
- }
-
- return t.queues.SendEDU(edu, t.ServerName, names)
+ }
+
+ edu := &gomatrixserverlib.EDU{
+ Type: gomatrixserverlib.MReceipt,
+ Origin: string(t.ServerName),
+ }
+ if edu.Content, err = json.Marshal(content); err != nil {
+ log.WithError(err).Error("failed to marshal EDU JSON")
+ return true
+ }
+
+ if err := t.queues.SendEDU(edu, t.ServerName, names); err != nil {
+ log.WithError(err).Error("failed to send EDU")
+ return false
+ }
+
+ return true
+ })
}
diff --git a/federationapi/consumers/keychange.go b/federationapi/consumers/keychange.go
index a8ae0894..8231fcf4 100644
--- a/federationapi/consumers/keychange.go
+++ b/federationapi/consumers/keychange.go
@@ -27,6 +27,7 @@ import (
"github.com/matrix-org/dendrite/keyserver/api"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
@@ -34,6 +35,7 @@ import (
// KeyChangeConsumer consumes events that originate in key server.
type KeyChangeConsumer struct {
+ ctx context.Context
consumer *internal.ContinualConsumer
db storage.Database
queues *queue.OutgoingQueues
@@ -51,10 +53,11 @@ func NewKeyChangeConsumer(
rsAPI roomserverAPI.RoomserverInternalAPI,
) *KeyChangeConsumer {
c := &KeyChangeConsumer{
+ ctx: process.Context(),
consumer: &internal.ContinualConsumer{
Process: process,
ComponentName: "federationapi/keychange",
- Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
+ Topic: string(cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent)),
Consumer: kafkaConsumer,
PartitionStore: store,
},
@@ -100,6 +103,9 @@ func (t *KeyChangeConsumer) onMessage(msg *sarama.ConsumerMessage) error {
}
func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error {
+ if m.DeviceKeys == nil {
+ return nil
+ }
logger := logrus.WithField("user_id", m.UserID)
// only send key change events which originated from us
@@ -113,7 +119,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error {
}
var queryRes roomserverAPI.QueryRoomsForUserResponse
- err = t.rsAPI.QueryRoomsForUser(context.Background(), &roomserverAPI.QueryRoomsForUserRequest{
+ err = t.rsAPI.QueryRoomsForUser(t.ctx, &roomserverAPI.QueryRoomsForUserRequest{
UserID: m.UserID,
WantMembership: "join",
}, &queryRes)
@@ -122,7 +128,7 @@ func (t *KeyChangeConsumer) onDeviceKeyMessage(m api.DeviceMessage) error {
return nil
}
// send this key change to all servers who share rooms with this user.
- destinations, err := t.db.GetJoinedHostsForRooms(context.Background(), queryRes.RoomIDs)
+ destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs)
if err != nil {
logger.WithError(err).Error("failed to calculate joined hosts for rooms user is in")
return nil
@@ -165,7 +171,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) error {
logger := logrus.WithField("user_id", output.UserID)
var queryRes roomserverAPI.QueryRoomsForUserResponse
- err = t.rsAPI.QueryRoomsForUser(context.Background(), &roomserverAPI.QueryRoomsForUserRequest{
+ err = t.rsAPI.QueryRoomsForUser(t.ctx, &roomserverAPI.QueryRoomsForUserRequest{
UserID: output.UserID,
WantMembership: "join",
}, &queryRes)
@@ -174,7 +180,7 @@ func (t *KeyChangeConsumer) onCrossSigningMessage(m api.DeviceMessage) error {
return nil
}
// send this key change to all servers who share rooms with this user.
- destinations, err := t.db.GetJoinedHostsForRooms(context.Background(), queryRes.RoomIDs)
+ destinations, err := t.db.GetJoinedHostsForRooms(t.ctx, queryRes.RoomIDs)
if err != nil {
logger.WithError(err).Error("fedsender key change consumer: failed to calculate joined hosts for rooms user is in")
return nil
diff --git a/federationapi/consumers/roomserver.go b/federationapi/consumers/roomserver.go
index 20b1bacb..12410bb7 100644
--- a/federationapi/consumers/roomserver.go
+++ b/federationapi/consumers/roomserver.go
@@ -19,117 +19,115 @@ import (
"encoding/json"
"fmt"
- "github.com/Shopify/sarama"
"github.com/matrix-org/dendrite/federationapi/queue"
"github.com/matrix-org/dendrite/federationapi/storage"
"github.com/matrix-org/dendrite/federationapi/types"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/gomatrixserverlib"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// OutputRoomEventConsumer consumes events that originated in the room server.
type OutputRoomEventConsumer struct {
- cfg *config.FederationAPI
- rsAPI api.RoomserverInternalAPI
- rsConsumer *internal.ContinualConsumer
- db storage.Database
- queues *queue.OutgoingQueues
+ ctx context.Context
+ cfg *config.FederationAPI
+ rsAPI api.RoomserverInternalAPI
+ jetstream nats.JetStreamContext
+ db storage.Database
+ queues *queue.OutgoingQueues
+ topic string
}
// NewOutputRoomEventConsumer creates a new OutputRoomEventConsumer. Call Start() to begin consuming from room servers.
func NewOutputRoomEventConsumer(
process *process.ProcessContext,
cfg *config.FederationAPI,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
queues *queue.OutgoingQueues,
store storage.Database,
rsAPI api.RoomserverInternalAPI,
) *OutputRoomEventConsumer {
- consumer := internal.ContinualConsumer{
- Process: process,
- ComponentName: "federationapi/roomserver",
- Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
- Consumer: kafkaConsumer,
- PartitionStore: store,
+ return &OutputRoomEventConsumer{
+ ctx: process.Context(),
+ cfg: cfg,
+ jetstream: js,
+ db: store,
+ queues: queues,
+ rsAPI: rsAPI,
+ topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
}
- s := &OutputRoomEventConsumer{
- cfg: cfg,
- rsConsumer: &consumer,
- db: store,
- queues: queues,
- rsAPI: rsAPI,
- }
- consumer.ProcessMessage = s.onMessage
-
- return s
}
// Start consuming from room servers
func (s *OutputRoomEventConsumer) Start() error {
- return s.rsConsumer.Start()
+ _, err := s.jetstream.Subscribe(s.topic, s.onMessage)
+ return err
}
// onMessage is called when the federation server receives a new event from the room server output log.
// It is unsafe to call this with messages for the same room in multiple gorountines
// because updates it will likely fail with a types.EventIDMismatchError when it
// realises that it cannot update the room state using the deltas.
-func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
- // Parse out the event JSON
- var output api.OutputEvent
- if err := json.Unmarshal(msg.Value, &output); err != nil {
- // If the message was invalid, log it and move on to the next message in the stream
- log.WithError(err).Errorf("roomserver output log: message parse failure")
- return nil
- }
+func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ // Parse out the event JSON
+ var output api.OutputEvent
+ if err := json.Unmarshal(msg.Data, &output); err != nil {
+ // If the message was invalid, log it and move on to the next message in the stream
+ log.WithError(err).Errorf("roomserver output log: message parse failure")
+ return true
+ }
- switch output.Type {
- case api.OutputTypeNewRoomEvent:
- ev := output.NewRoomEvent.Event
+ switch output.Type {
+ case api.OutputTypeNewRoomEvent:
+ ev := output.NewRoomEvent.Event
- if output.NewRoomEvent.RewritesState {
- if err := s.db.PurgeRoomState(context.TODO(), ev.RoomID()); err != nil {
- return fmt.Errorf("s.db.PurgeRoom: %w", err)
+ if output.NewRoomEvent.RewritesState {
+ if err := s.db.PurgeRoomState(s.ctx, ev.RoomID()); err != nil {
+ log.WithError(err).Errorf("roomserver output log: purge room state failure")
+ return false
+ }
}
- }
- if err := s.processMessage(*output.NewRoomEvent); err != nil {
- switch err.(type) {
- case *queue.ErrorFederationDisabled:
- log.WithField("error", output.Type).Info(
- err.Error(),
- )
- default:
- // panic rather than continue with an inconsistent database
+ if err := s.processMessage(*output.NewRoomEvent); err != nil {
+ switch err.(type) {
+ case *queue.ErrorFederationDisabled:
+ log.WithField("error", output.Type).Info(
+ err.Error(),
+ )
+ default:
+ // panic rather than continue with an inconsistent database
+ log.WithFields(log.Fields{
+ "event_id": ev.EventID(),
+ "event": string(ev.JSON()),
+ "add": output.NewRoomEvent.AddsStateEventIDs,
+ "del": output.NewRoomEvent.RemovesStateEventIDs,
+ log.ErrorKey: err,
+ }).Panicf("roomserver output log: write room event failure")
+ }
+ }
+
+ case api.OutputTypeNewInboundPeek:
+ if err := s.processInboundPeek(*output.NewInboundPeek); err != nil {
log.WithFields(log.Fields{
- "event_id": ev.EventID(),
- "event": string(ev.JSON()),
- "add": output.NewRoomEvent.AddsStateEventIDs,
- "del": output.NewRoomEvent.RemovesStateEventIDs,
+ "event": output.NewInboundPeek,
log.ErrorKey: err,
- }).Panicf("roomserver output log: write room event failure")
+ }).Panicf("roomserver output log: remote peek event failure")
+ return false
}
- return nil
- }
- case api.OutputTypeNewInboundPeek:
- if err := s.processInboundPeek(*output.NewInboundPeek); err != nil {
- log.WithFields(log.Fields{
- "event": output.NewInboundPeek,
- log.ErrorKey: err,
- }).Panicf("roomserver output log: remote peek event failure")
- return nil
+
+ default:
+ log.WithField("type", output.Type).Debug(
+ "roomserver output log: ignoring unknown output type",
+ )
}
- default:
- log.WithField("type", output.Type).Debug(
- "roomserver output log: ignoring unknown output type",
- )
- return nil
- }
- return nil
+ return true
+ })
}
// processInboundPeek starts tracking a new federated inbound peek (replacing the existing one if any)
@@ -146,7 +144,7 @@ func (s *OutputRoomEventConsumer) processInboundPeek(orp api.OutputNewInboundPee
//
// This is making the tests flakey.
- return s.db.AddInboundPeek(context.TODO(), orp.ServerName, orp.RoomID, orp.PeekID, orp.RenewalInterval)
+ return s.db.AddInboundPeek(s.ctx, orp.ServerName, orp.RoomID, orp.PeekID, orp.RenewalInterval)
}
// processMessage updates the list of currently joined hosts in the room
@@ -162,7 +160,7 @@ func (s *OutputRoomEventConsumer) processMessage(ore api.OutputNewRoomEvent) err
// TODO(#290): handle EventIDMismatchError and recover the current state by
// talking to the roomserver
oldJoinedHosts, err := s.db.UpdateRoom(
- context.TODO(),
+ s.ctx,
ore.Event.RoomID(),
ore.LastSentEventID,
ore.Event.EventID(),
@@ -255,7 +253,7 @@ func (s *OutputRoomEventConsumer) joinedHostsAtEvent(
}
// handle peeking hosts
- inboundPeeks, err := s.db.GetInboundPeeks(context.TODO(), ore.Event.Event.RoomID())
+ inboundPeeks, err := s.db.GetInboundPeeks(s.ctx, ore.Event.Event.RoomID())
if err != nil {
return nil, err
}
@@ -373,7 +371,7 @@ func (s *OutputRoomEventConsumer) lookupStateEvents(
// from the roomserver using the query API.
eventReq := api.QueryEventsByIDRequest{EventIDs: missing}
var eventResp api.QueryEventsByIDResponse
- if err := s.rsAPI.QueryEventsByID(context.TODO(), &eventReq, &eventResp); err != nil {
+ if err := s.rsAPI.QueryEventsByID(s.ctx, &eventReq, &eventResp); err != nil {
return nil, err
}
diff --git a/federationapi/federationapi.go b/federationapi/federationapi.go
index 02c4cfdb..0b181606 100644
--- a/federationapi/federationapi.go
+++ b/federationapi/federationapi.go
@@ -30,7 +30,7 @@ import (
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/base"
"github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/dendrite/setup/kafka"
+ "github.com/matrix-org/dendrite/setup/jetstream"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/sirupsen/logrus"
@@ -92,7 +92,7 @@ func NewInternalAPI(
FailuresUntilBlacklist: cfg.FederationMaxRetries,
}
- consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
+ js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
queues := queue.NewOutgoingQueues(
federationDB, base.ProcessContext,
@@ -106,7 +106,7 @@ func NewInternalAPI(
)
rsConsumer := consumers.NewOutputRoomEventConsumer(
- base.ProcessContext, cfg, consumer, queues,
+ base.ProcessContext, cfg, js, queues,
federationDB, rsAPI,
)
if err = rsConsumer.Start(); err != nil {
@@ -114,7 +114,7 @@ func NewInternalAPI(
}
tsConsumer := consumers.NewOutputEDUConsumer(
- base.ProcessContext, cfg, consumer, queues, federationDB,
+ base.ProcessContext, cfg, js, queues, federationDB,
)
if err := tsConsumer.Start(); err != nil {
logrus.WithError(err).Panic("failed to start typing server consumer")
diff --git a/federationapi/federationapi_keys_test.go b/federationapi/federationapi_keys_test.go
index 9e6c47cd..b9503963 100644
--- a/federationapi/federationapi_keys_test.go
+++ b/federationapi/federationapi_keys_test.go
@@ -74,7 +74,7 @@ func TestMain(m *testing.M) {
cfg.Defaults(true)
cfg.Global.ServerName = gomatrixserverlib.ServerName(s.name)
cfg.Global.PrivateKey = testPriv
- cfg.Global.Kafka.UseNaffka = true
+ cfg.Global.JetStream.InMemory = true
cfg.Global.KeyID = serverKeyID
cfg.Global.KeyValidityPeriod = s.validity
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
diff --git a/federationapi/federationapi_test.go b/federationapi/federationapi_test.go
index 8b5bdd03..c660f12e 100644
--- a/federationapi/federationapi_test.go
+++ b/federationapi/federationapi_test.go
@@ -23,10 +23,9 @@ func TestRoomsV3URLEscapeDoNot404(t *testing.T) {
cfg.Global.KeyID = gomatrixserverlib.KeyID("ed25519:auto")
cfg.Global.ServerName = gomatrixserverlib.ServerName("localhost")
cfg.Global.PrivateKey = privKey
- cfg.Global.Kafka.UseNaffka = true
- cfg.Global.Kafka.Database.ConnectionString = config.DataSource("file::memory:")
+ cfg.Global.JetStream.InMemory = true
cfg.FederationAPI.Database.ConnectionString = config.DataSource("file::memory:")
- base := base.NewBaseDendrite(cfg, "Monolith", base.NoCacheMetrics)
+ base := base.NewBaseDendrite(cfg, "Monolith")
keyRing := &test.NopJSONVerifier{}
fsAPI := base.FederationAPIHTTPClient()
// TODO: This is pretty fragile, as if anything calls anything on these nils this test will break.
diff --git a/federationapi/internal/perform.go b/federationapi/internal/perform.go
index 82d04c21..b6c35842 100644
--- a/federationapi/internal/perform.go
+++ b/federationapi/internal/perform.go
@@ -249,7 +249,7 @@ func (r *FederationInternalAPI) performJoinUsingServer(
roomserverAPI.KindNew,
respState,
event.Headered(respMakeJoin.RoomVersion),
- nil,
+ nil, false,
); err != nil {
logrus.WithFields(logrus.Fields{
"room_id": roomID,
@@ -430,7 +430,7 @@ func (r *FederationInternalAPI) performOutboundPeekUsingServer(
roomserverAPI.KindNew,
&respState,
respPeek.LatestEvent.Headered(respPeek.RoomVersion),
- nil,
+ nil, false,
); err != nil {
return fmt.Errorf("r.producer.SendEventWithState: %w", err)
}
diff --git a/federationapi/routing/join.go b/federationapi/routing/join.go
index f0e1ae0d..7310a305 100644
--- a/federationapi/routing/join.go
+++ b/federationapi/routing/join.go
@@ -194,6 +194,12 @@ func SendJoin(
JSON: jsonerror.BadJSON("No state key was provided in the join event."),
}
}
+ if !event.StateKeyEquals(event.Sender()) {
+ return util.JSONResponse{
+ Code: http.StatusBadRequest,
+ JSON: jsonerror.BadJSON("Event state key must match the event sender."),
+ }
+ }
// Check that the room ID is correct.
if event.RoomID() != roomID {
diff --git a/federationapi/routing/leave.go b/federationapi/routing/leave.go
index 38f4ca76..6312adfa 100644
--- a/federationapi/routing/leave.go
+++ b/federationapi/routing/leave.go
@@ -175,10 +175,16 @@ func SendLeave(
}
}
- if event.StateKey() == nil {
+ if event.StateKey() == nil || event.StateKeyEquals("") {
return util.JSONResponse{
Code: http.StatusBadRequest,
- JSON: jsonerror.InvalidArgumentValue("missing state_key"),
+ JSON: jsonerror.BadJSON("No state key was provided in the leave event."),
+ }
+ }
+ if !event.StateKeyEquals(event.Sender()) {
+ return util.JSONResponse{
+ Code: http.StatusBadRequest,
+ JSON: jsonerror.BadJSON("Event state key must match the event sender."),
}
}
diff --git a/federationapi/routing/send.go b/federationapi/routing/send.go
index 4b5f0d66..fad23a5c 100644
--- a/federationapi/routing/send.go
+++ b/federationapi/routing/send.go
@@ -371,7 +371,7 @@ func (t *txnReq) processTransaction(ctx context.Context) (*gomatrixserverlib.Res
for _, task := range tasks {
if task.err != nil {
results[task.event.EventID()] = gomatrixserverlib.PDUResult{
- Error: task.err.Error(),
+ // Error: task.err.Error(), TODO: this upsets tests if uncommented
}
} else {
results[task.event.EventID()] = gomatrixserverlib.PDUResult{}
@@ -692,6 +692,7 @@ func (t *txnReq) processEvent(ctx context.Context, e *gomatrixserverlib.Event) e
},
api.DoNotSendToOtherServers,
nil,
+ false,
)
}
@@ -734,6 +735,7 @@ withNextEvent:
SendAsServer: api.DoNotSendToOtherServers,
},
},
+ false,
); err != nil {
return fmt.Errorf("api.SendEvents: %w", err)
}
@@ -882,6 +884,9 @@ func (t *txnReq) processEventWithMissingState(
resolvedState,
backwardsExtremity.Headered(roomVersion),
hadEvents,
+ // Send the events to the roomserver asynchronously, so they will be
+ // processed when the roomserver is able, without blocking here.
+ true,
)
if err != nil {
return fmt.Errorf("api.SendEventWithState: %w", err)
@@ -902,6 +907,9 @@ func (t *txnReq) processEventWithMissingState(
append(headeredNewEvents, e.Headered(roomVersion)),
api.DoNotSendToOtherServers,
nil,
+ // Send the events to the roomserver asynchronously, so they will be
+ // processed when the roomserver is able, without blocking here.
+ true,
); err != nil {
return fmt.Errorf("api.SendEvents: %w", err)
}
diff --git a/federationapi/routing/threepid.go b/federationapi/routing/threepid.go
index 5ba28881..fb919a59 100644
--- a/federationapi/routing/threepid.go
+++ b/federationapi/routing/threepid.go
@@ -89,7 +89,7 @@ func CreateInvitesFrom3PIDInvites(
}
// Send all the events
- if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, evs, cfg.Matrix.ServerName, nil); err != nil {
+ if err := api.SendEvents(req.Context(), rsAPI, api.KindNew, evs, cfg.Matrix.ServerName, nil, false); err != nil {
util.GetLogger(req.Context()).WithError(err).Error("SendEvents failed")
return jsonerror.InternalServerError()
}
@@ -180,6 +180,7 @@ func ExchangeThirdPartyInvite(
},
cfg.Matrix.ServerName,
nil,
+ false,
); err != nil {
util.GetLogger(httpReq.Context()).WithError(err).Error("SendEvents failed")
return jsonerror.InternalServerError()
diff --git a/go.mod b/go.mod
index 5cd4b0e1..eb421ce1 100644
--- a/go.mod
+++ b/go.mod
@@ -1,23 +1,31 @@
module github.com/matrix-org/dendrite
+replace github.com/nats-io/nats-server/v2 => github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423
+
+replace github.com/nats-io/nats.go => github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c
+
require (
github.com/Arceliar/ironwood v0.0.0-20210619124114-6ad55cae5031
+ github.com/Arceliar/phony v0.0.0-20210209235338-dde1a8dca979
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/HdrHistogram/hdrhistogram-go v1.0.1 // indirect
github.com/MFAshby/stdemuxerhook v1.0.0 // indirect
github.com/Masterminds/semver/v3 v3.1.1
- github.com/Shopify/sarama v1.29.1
+ github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32
+ github.com/Shopify/sarama v1.29.0
github.com/codeclysm/extract v2.2.0+incompatible
github.com/containerd/containerd v1.5.7 // indirect
github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-connections v0.4.0
github.com/getsentry/sentry-go v0.11.0
+ github.com/golang/snappy v0.0.4 // indirect
github.com/gologme/log v1.2.0
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2
github.com/h2non/filetype v1.1.1 // indirect
github.com/hashicorp/golang-lru v0.5.4
github.com/juju/testing v0.0.0-20210324180055-18c50b0c2098 // indirect
+ github.com/klauspost/compress v1.13.6 // indirect
github.com/lib/pq v1.10.1
github.com/libp2p/go-libp2p v0.13.0
github.com/libp2p/go-libp2p-circuit v0.4.0
@@ -33,16 +41,18 @@ require (
github.com/matrix-org/go-sqlite3-js v0.0.0-20210709140738-b0d1ba599a6d
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16
github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2
- github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0
- github.com/matrix-org/pinecone v0.0.0-20211213132835-aa2808d77947
+ github.com/matrix-org/pinecone v0.0.0-20211216094739-095c5ea64d02
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4
github.com/mattn/go-sqlite3 v1.14.8
github.com/morikuni/aec v1.0.0 // indirect
+ github.com/nats-io/nats-server/v2 v2.3.2
+ github.com/nats-io/nats.go v1.13.1-0.20211122170419-d7c1d78a50fc
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
github.com/ngrok/sqlmw v0.0.0-20200129213757-d5c93a81bec6
github.com/opentracing/opentracing-go v1.2.0
github.com/patrickmn/go-cache v2.1.0+incompatible
+ github.com/pierrec/lz4 v2.6.1+incompatible // indirect
github.com/pkg/errors v0.9.1
github.com/pressly/goose v2.7.0+incompatible
github.com/prometheus/client_golang v1.11.0
@@ -58,6 +68,7 @@ require (
golang.org/x/mobile v0.0.0-20210716004757-34ab1303b554
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
+ golang.org/x/text v0.3.7 // indirect
gopkg.in/h2non/bimg.v1 v1.1.5
gopkg.in/yaml.v2 v2.4.0
nhooyr.io/websocket v1.8.7
diff --git a/go.sum b/go.sum
index f14e2e98..377d9062 100644
--- a/go.sum
+++ b/go.sum
@@ -84,17 +84,17 @@ github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w=
github.com/RyanCarrier/dijkstra v1.0.0/go.mod h1:5agGUBNEtUAGIANmbw09fuO3a2htPEkc1jNH01qxCWA=
github.com/RyanCarrier/dijkstra-1 v0.0.0-20170512020943-0e5801a26345/go.mod h1:OK4EvWJ441LQqGzed5NGB6vKBAE34n3z7iayPcEwr30=
+github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32 h1:i3fOph9Hjleo6LbuqN9ODFxnwt7mOtYMpCGeC8qJN50=
+github.com/S7evinK/saramajetstream v0.0.0-20210709110708-de6efc8c4a32/go.mod h1:ne+jkLlzafIzaE4Q0Ze81T27dNgXe1wxovVEoAtSHTc=
github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
-github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
-github.com/Shopify/sarama v1.29.1 h1:wBAacXbYVLmWieEA/0X/JagDdCZ8NVFOfS6l6+2u5S0=
-github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE=
+github.com/Shopify/sarama v1.29.0 h1:ARid8o8oieau9XrHI55f/L3EoRAhm9px6sonbD7yuUE=
+github.com/Shopify/sarama v1.29.0/go.mod h1:2QpgD79wpdAESqNQMxNc0KYMkycd4slxGdV3TWSVqrU=
github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
@@ -119,7 +119,6 @@ github.com/anacrolix/missinggo v1.2.1 h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQy
github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y=
github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ=
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
-github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
@@ -476,9 +475,9 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gologme/log v1.2.0 h1:Ya5Ip/KD6FX7uH0S31QO87nCCSucKtF44TLbTtO7V4c=
github.com/gologme/log v1.2.0/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
@@ -488,7 +487,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -718,16 +716,16 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
-github.com/klauspost/compress v1.12.2 h1:2KCfW3I9M7nSc5wOqXAlW2v2U6v+w6cbjvbfp+OykW8=
github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ=
@@ -767,7 +765,6 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ
github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM=
github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
-github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs=
github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54=
github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k=
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
@@ -923,7 +920,6 @@ github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdm
github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0=
github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
-github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw=
github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc=
@@ -968,7 +964,6 @@ github.com/lxn/walk v0.0.0-20210112085537-c389da54e794/go.mod h1:E23UucZGqpuUANJ
github.com/lxn/win v0.0.0-20210218163916-a377121e959e/go.mod h1:KxxjdtRkfNoYDCUP5ryK7XJJNTnpC8atvtmTheChOtk=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -997,10 +992,8 @@ github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16 h1:ZtO5uywdd5d
github.com/matrix-org/gomatrix v0.0.0-20210324163249-be2af5ef2e16/go.mod h1:/gBX06Kw0exX1HrwmoBibFA98yBk/jxKpGVeyQbff+s=
github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2 h1:RFsBN3509Ql6NJ7TDVkcKoN3bb/tmqUqzur5c0AwIHQ=
github.com/matrix-org/gomatrixserverlib v0.0.0-20211115192839-15a64d244aa2/go.mod h1:rB8tBUUUo1rzUqpzklRDSooxZ6YMhoaEPx4SO5fGeUc=
-github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0 h1:HZCzy4oVzz55e+cOMiX/JtSF2UOY1evBl2raaE7ACcU=
-github.com/matrix-org/naffka v0.0.0-20210623111924-14ff508b58e0/go.mod h1:sjyPyRxKM5uw1nD2cJ6O2OxI6GOqyVBfNXqKjBZTBZE=
-github.com/matrix-org/pinecone v0.0.0-20211213132835-aa2808d77947 h1:TxO9TMFAuF+Vz3vZV53z5mjycWtF1+naY9ffs6QfZxc=
-github.com/matrix-org/pinecone v0.0.0-20211213132835-aa2808d77947/go.mod h1:r6dsL+ylE0yXe/7zh8y/Bdh6aBYI1r+u4yZni9A4iyk=
+github.com/matrix-org/pinecone v0.0.0-20211216094739-095c5ea64d02 h1:tLn95Nqq3KPOZAjogGZTKMEkn4mMIzKu09biRTz/Ack=
+github.com/matrix-org/pinecone v0.0.0-20211216094739-095c5ea64d02/go.mod h1:r6dsL+ylE0yXe/7zh8y/Bdh6aBYI1r+u4yZni9A4iyk=
github.com/matrix-org/util v0.0.0-20190711121626-527ce5ddefc7/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4 h1:eCEHXWDv9Rm335MSuB49mFUK44bwZPFSDde3ORE3syk=
github.com/matrix-org/util v0.0.0-20200807132607-55161520e1d4/go.mod h1:vVQlW/emklohkZnOPwD3LrZUBqdfsbiyO3p1lNV8F6U=
@@ -1022,7 +1015,6 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
-github.com/mattn/go-sqlite3 v1.14.2/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU=
github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
@@ -1042,6 +1034,8 @@ github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
+github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
+github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
@@ -1132,15 +1126,21 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
-github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
-github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/jwt/v2 v2.2.0 h1:Yg/4WFK6vsqMudRg91eBb7Dh6XeVcDMPHycDE8CfltE=
+github.com/nats-io/jwt/v2 v2.2.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
+github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8=
+github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
+github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423 h1:BLQVdjMH5XD4BYb0fa+c2Oh2Nr1vrO7GKvRnIJDxChc=
+github.com/neilalexander/nats-server/v2 v2.3.3-0.20220104162330-c76d5fd70423/go.mod h1:9sdEkBhyZMQG1M9TevnlYUwMusRACn2vlgOeqoHKwVo=
+github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c h1:G2qsv7D0rY94HAu8pXmElMluuMHQ85waxIDQBhIzV2Q=
+github.com/neilalexander/nats.go v1.11.1-0.20220104162523-f4ddebe1061c/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/neilalexander/utp v0.1.1-0.20210622132614-ee9a34a30488/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8=
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9 h1:lrVQzBtkeQEGGYUHwSX1XPe1E5GL6U3KYCNe2G4bncQ=
github.com/neilalexander/utp v0.1.1-0.20210727203401-54ae7b1cd5f9/go.mod h1:NPHGhPc0/wudcaCqL/H5AOddkRf8GPRhzOujuUKGQu8=
@@ -1216,9 +1216,9 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
-github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
-github.com/pierrec/lz4 v2.6.0+incompatible h1:Ix9yFKn1nSPBLFl/yZknTp8TU5G4Ps0JDmguYK6iH1A=
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
+github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -1269,7 +1269,6 @@ github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -1426,9 +1425,7 @@ github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPyS
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE=
-github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
-github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
@@ -1503,7 +1500,6 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -1512,6 +1508,7 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
@@ -1558,7 +1555,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180406214816-61147c48b25b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1604,10 +1600,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476 h1:s5hu7bTnLKswvidgtqc4GwsW83m9LZu8UAqzmWOZtI4=
golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1637,6 +1634,7 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1729,12 +1727,14 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f h1:yQJrRE0hDxDFmZLlRaw+3vusO4fwNHgHIjUOMO7bHYI=
golang.org/x/text v0.3.7-0.20210503195748-5c7c50ebbd4f/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1886,7 +1886,6 @@ gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -1905,11 +1904,6 @@ gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdOD
gopkg.in/httprequest.v1 v1.1.1/go.mod h1:/CkavNL+g3qLOrpFHVrEx4NKepeqR4XTZWNj4sGGjz0=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
-gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
-gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
-gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
-gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
gopkg.in/macaroon.v2 v2.1.0 h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI=
gopkg.in/macaroon.v2 v2.1.0/go.mod h1:OUb+TQP/OP0WOerC2Jp/3CwhIKyIa9kQjuc7H24e6/o=
gopkg.in/mgo.v2 v2.0.0-20160818015218-f2b6f6c918c4/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
diff --git a/internal/test/config.go b/internal/test/config.go
index 826d2ce0..bb2f8a4c 100644
--- a/internal/test/config.go
+++ b/internal/test/config.go
@@ -81,7 +81,7 @@ func MakeConfig(configDir, kafkaURI, database, host string, startPort int) (*con
cfg.MediaAPI.BasePath = config.Path(mediaBasePath)
- cfg.Global.Kafka.Addresses = []string{kafkaURI}
+ cfg.Global.JetStream.Addresses = []string{kafkaURI}
// TODO: Use different databases for the different schemas.
// Using the same database for every schema currently works because
diff --git a/keyserver/consumers/cross_signing.go b/keyserver/consumers/cross_signing.go
index 2ba62797..4b2bd4a9 100644
--- a/keyserver/consumers/cross_signing.go
+++ b/keyserver/consumers/cross_signing.go
@@ -22,6 +22,7 @@ import (
"github.com/matrix-org/dendrite/keyserver/api"
"github.com/matrix-org/dendrite/keyserver/storage"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
@@ -50,7 +51,7 @@ func NewOutputCrossSigningKeyUpdateConsumer(
consumer := internal.ContinualConsumer{
Process: process,
ComponentName: "keyserver/keyserver",
- Topic: cfg.Global.Kafka.TopicFor(config.TopicOutputKeyChangeEvent),
+ Topic: cfg.Global.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
Consumer: kafkaConsumer,
PartitionStore: keyDB,
}
diff --git a/keyserver/keyserver.go b/keyserver/keyserver.go
index 477efafd..03a221a6 100644
--- a/keyserver/keyserver.go
+++ b/keyserver/keyserver.go
@@ -25,7 +25,7 @@ import (
"github.com/matrix-org/dendrite/keyserver/storage"
"github.com/matrix-org/dendrite/setup/base"
"github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/dendrite/setup/kafka"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/sirupsen/logrus"
)
@@ -40,14 +40,14 @@ func AddInternalRoutes(router *mux.Router, intAPI api.KeyInternalAPI) {
func NewInternalAPI(
base *base.BaseDendrite, cfg *config.KeyServer, fedClient fedsenderapi.FederationClient,
) api.KeyInternalAPI {
- consumer, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
+ _, consumer, producer := jetstream.Prepare(&cfg.Matrix.JetStream)
db, err := storage.NewDatabase(&cfg.Database)
if err != nil {
logrus.WithError(err).Panicf("failed to connect to key server database")
}
keyChangeProducer := &producers.KeyChange{
- Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
+ Topic: string(cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent)),
Producer: producer,
DB: db,
}
diff --git a/roomserver/api/input.go b/roomserver/api/input.go
index 8e6e4ac7..a537e64e 100644
--- a/roomserver/api/input.go
+++ b/roomserver/api/input.go
@@ -86,6 +86,7 @@ type TransactionID struct {
// InputRoomEventsRequest is a request to InputRoomEvents
type InputRoomEventsRequest struct {
InputRoomEvents []InputRoomEvent `json:"input_room_events"`
+ Asynchronous bool `json:"async"`
}
// InputRoomEventsResponse is a response to InputRoomEvents
diff --git a/roomserver/api/wrapper.go b/roomserver/api/wrapper.go
index de66df80..cdb186c0 100644
--- a/roomserver/api/wrapper.go
+++ b/roomserver/api/wrapper.go
@@ -27,6 +27,7 @@ func SendEvents(
ctx context.Context, rsAPI RoomserverInternalAPI,
kind Kind, events []*gomatrixserverlib.HeaderedEvent,
sendAsServer gomatrixserverlib.ServerName, txnID *TransactionID,
+ async bool,
) error {
ires := make([]InputRoomEvent, len(events))
for i, event := range events {
@@ -38,7 +39,7 @@ func SendEvents(
TransactionID: txnID,
}
}
- return SendInputRoomEvents(ctx, rsAPI, ires)
+ return SendInputRoomEvents(ctx, rsAPI, ires, async)
}
// SendEventWithState writes an event with the specified kind to the roomserver
@@ -47,7 +48,7 @@ func SendEvents(
func SendEventWithState(
ctx context.Context, rsAPI RoomserverInternalAPI, kind Kind,
state *gomatrixserverlib.RespState, event *gomatrixserverlib.HeaderedEvent,
- haveEventIDs map[string]bool,
+ haveEventIDs map[string]bool, async bool,
) error {
outliers, err := state.Events()
if err != nil {
@@ -79,14 +80,18 @@ func SendEventWithState(
StateEventIDs: stateEventIDs,
})
- return SendInputRoomEvents(ctx, rsAPI, ires)
+ return SendInputRoomEvents(ctx, rsAPI, ires, async)
}
// SendInputRoomEvents to the roomserver.
func SendInputRoomEvents(
- ctx context.Context, rsAPI RoomserverInternalAPI, ires []InputRoomEvent,
+ ctx context.Context, rsAPI RoomserverInternalAPI,
+ ires []InputRoomEvent, async bool,
) error {
- request := InputRoomEventsRequest{InputRoomEvents: ires}
+ request := InputRoomEventsRequest{
+ InputRoomEvents: ires,
+ Asynchronous: async,
+ }
var response InputRoomEventsResponse
rsAPI.InputRoomEvents(ctx, &request, &response)
return response.Err()
diff --git a/roomserver/internal/api.go b/roomserver/internal/api.go
index 67bbc7ab..5cfe68da 100644
--- a/roomserver/internal/api.go
+++ b/roomserver/internal/api.go
@@ -3,7 +3,6 @@ package internal
import (
"context"
- "github.com/Shopify/sarama"
"github.com/getsentry/sentry-go"
asAPI "github.com/matrix-org/dendrite/appservice/api"
fsAPI "github.com/matrix-org/dendrite/federationapi/api"
@@ -16,6 +15,8 @@ import (
"github.com/matrix-org/dendrite/roomserver/storage"
"github.com/matrix-org/dendrite/setup/config"
"github.com/matrix-org/gomatrixserverlib"
+ "github.com/nats-io/nats.go"
+ "github.com/sirupsen/logrus"
)
// RoomserverInternalAPI is an implementation of api.RoomserverInternalAPI
@@ -33,19 +34,19 @@ type RoomserverInternalAPI struct {
*perform.Forgetter
DB storage.Database
Cfg *config.RoomServer
- Producer sarama.SyncProducer
Cache caching.RoomServerCaches
ServerName gomatrixserverlib.ServerName
KeyRing gomatrixserverlib.JSONVerifier
fsAPI fsAPI.FederationInternalAPI
asAPI asAPI.AppServiceQueryAPI
- OutputRoomEventTopic string // Kafka topic for new output room events
+ InputRoomEventTopic string // JetStream topic for new input room events
+ OutputRoomEventTopic string // JetStream topic for new output room events
PerspectiveServerNames []gomatrixserverlib.ServerName
}
func NewRoomserverAPI(
- cfg *config.RoomServer, roomserverDB storage.Database, producer sarama.SyncProducer,
- outputRoomEventTopic string, caches caching.RoomServerCaches,
+ cfg *config.RoomServer, roomserverDB storage.Database, consumer nats.JetStreamContext,
+ inputRoomEventTopic, outputRoomEventTopic string, caches caching.RoomServerCaches,
perspectiveServerNames []gomatrixserverlib.ServerName,
) *RoomserverInternalAPI {
serverACLs := acls.NewServerACLs(roomserverDB)
@@ -63,13 +64,17 @@ func NewRoomserverAPI(
},
Inputer: &input.Inputer{
DB: roomserverDB,
+ InputRoomEventTopic: inputRoomEventTopic,
OutputRoomEventTopic: outputRoomEventTopic,
- Producer: producer,
+ JetStream: consumer,
ServerName: cfg.Matrix.ServerName,
ACLs: serverACLs,
},
// perform-er structs get initialised when we have a federation sender to use
}
+ if err := a.Inputer.Start(); err != nil {
+ logrus.WithError(err).Panic("failed to start roomserver input API")
+ }
return a
}
diff --git a/roomserver/internal/input/input.go b/roomserver/internal/input/input.go
index de40e133..1eab6780 100644
--- a/roomserver/internal/input/input.go
+++ b/roomserver/internal/input/input.go
@@ -19,19 +19,19 @@ import (
"context"
"encoding/json"
"sync"
- "time"
- "github.com/Shopify/sarama"
+ "github.com/Arceliar/phony"
"github.com/getsentry/sentry-go"
"github.com/matrix-org/dendrite/internal/hooks"
"github.com/matrix-org/dendrite/roomserver/acls"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/roomserver/storage"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/gomatrixserverlib"
+ "github.com/nats-io/nats.go"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/tidwall/gjson"
- "go.uber.org/atomic"
)
var keyContentFields = map[string]string{
@@ -42,105 +42,161 @@ var keyContentFields = map[string]string{
type Inputer struct {
DB storage.Database
- Producer sarama.SyncProducer
+ JetStream nats.JetStreamContext
ServerName gomatrixserverlib.ServerName
ACLs *acls.ServerACLs
+ InputRoomEventTopic string
OutputRoomEventTopic string
- workers sync.Map // room ID -> *inputWorker
+ workers sync.Map // room ID -> *phony.Inbox
}
-type inputTask struct {
- ctx context.Context
- event *api.InputRoomEvent
- wg *sync.WaitGroup
- err error // written back by worker, only safe to read when all tasks are done
-}
-
-type inputWorker struct {
- r *Inputer
- running atomic.Bool
- input *fifoQueue
+// onMessage is called when a new event arrives in the roomserver input stream.
+func (r *Inputer) Start() error {
+ _, err := r.JetStream.Subscribe(
+ r.InputRoomEventTopic,
+ // We specifically don't use jetstream.WithJetStreamMessage here because we
+ // queue the task off to a room-specific queue and the ACK needs to be sent
+ // later, possibly with an error response to the inputter if synchronous.
+ func(msg *nats.Msg) {
+ roomID := msg.Header.Get("room_id")
+ defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Dec()
+ var inputRoomEvent api.InputRoomEvent
+ if err := json.Unmarshal(msg.Data, &inputRoomEvent); err != nil {
+ _ = msg.Term()
+ return
+ }
+ inbox, _ := r.workers.LoadOrStore(roomID, &phony.Inbox{})
+ inbox.(*phony.Inbox).Act(nil, func() {
+ if err := r.processRoomEvent(context.TODO(), &inputRoomEvent); err != nil {
+ sentry.CaptureException(err)
+ } else {
+ hooks.Run(hooks.KindNewEventPersisted, inputRoomEvent.Event)
+ }
+ _ = msg.Ack()
+ })
+ },
+ // NATS wants to acknowledge automatically by default when the message is
+ // read from the stream, but we want to override that behaviour by making
+ // sure that we only acknowledge when we're happy we've done everything we
+ // can. This ensures we retry things when it makes sense to do so.
+ nats.ManualAck(),
+ // NATS will try to redeliver things to us automatically if we don't ack
+ // or nak them within a certain amount of time. This stops that from
+ // happening, so we don't end up doing a lot of unnecessary duplicate work.
+ nats.MaxDeliver(0),
+ )
+ return err
}
-// Guarded by a CAS on w.running
-func (w *inputWorker) start() {
- defer w.running.Store(false)
- for {
- select {
- case <-w.input.wait():
- task, ok := w.input.pop()
- if !ok {
- continue
+// InputRoomEvents implements api.RoomserverInternalAPI
+func (r *Inputer) InputRoomEvents(
+ ctx context.Context,
+ request *api.InputRoomEventsRequest,
+ response *api.InputRoomEventsResponse,
+) {
+ if request.Asynchronous {
+ var err error
+ for _, e := range request.InputRoomEvents {
+ msg := &nats.Msg{
+ Subject: r.InputRoomEventTopic,
+ Header: nats.Header{},
+ }
+ roomID := e.Event.RoomID()
+ msg.Header.Set("room_id", roomID)
+ msg.Data, err = json.Marshal(e)
+ if err != nil {
+ response.ErrMsg = err.Error()
+ return
}
- roomserverInputBackpressure.With(prometheus.Labels{
- "room_id": task.event.Event.RoomID(),
- }).Dec()
- hooks.Run(hooks.KindNewEventReceived, task.event.Event)
- _, task.err = w.r.processRoomEvent(task.ctx, task.event)
- if task.err == nil {
- hooks.Run(hooks.KindNewEventPersisted, task.event.Event)
- } else {
- sentry.CaptureException(task.err)
+ if _, err = r.JetStream.PublishMsg(msg); err != nil {
+ return
+ }
+ roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc()
+ }
+ } else {
+ responses := make(chan error, len(request.InputRoomEvents))
+ defer close(responses)
+ for _, e := range request.InputRoomEvents {
+ inputRoomEvent := e
+ inbox, _ := r.workers.LoadOrStore(inputRoomEvent.Event.RoomID(), &phony.Inbox{})
+ inbox.(*phony.Inbox).Act(nil, func() {
+ err := r.processRoomEvent(context.TODO(), &inputRoomEvent)
+ if err != nil {
+ sentry.CaptureException(err)
+ } else {
+ hooks.Run(hooks.KindNewEventPersisted, inputRoomEvent.Event)
+ }
+ select {
+ case <-ctx.Done():
+ default:
+ responses <- err
+ }
+ })
+ }
+ for i := 0; i < len(request.InputRoomEvents); i++ {
+ select {
+ case <-ctx.Done():
+ return
+ case err := <-responses:
+ if err != nil {
+ response.ErrMsg = err.Error()
+ return
+ }
}
- task.wg.Done()
- case <-time.After(time.Second * 5):
- return
}
}
}
// WriteOutputEvents implements OutputRoomEventWriter
func (r *Inputer) WriteOutputEvents(roomID string, updates []api.OutputEvent) error {
- messages := make([]*sarama.ProducerMessage, len(updates))
- for i := range updates {
- value, err := json.Marshal(updates[i])
+ var err error
+ for _, update := range updates {
+ msg := &nats.Msg{
+ Subject: r.OutputRoomEventTopic,
+ Header: nats.Header{},
+ }
+ msg.Header.Set(jetstream.RoomID, roomID)
+ msg.Data, err = json.Marshal(update)
if err != nil {
return err
}
logger := log.WithFields(log.Fields{
"room_id": roomID,
- "type": updates[i].Type,
+ "type": update.Type,
})
- if updates[i].NewRoomEvent != nil {
- eventType := updates[i].NewRoomEvent.Event.Type()
+ if update.NewRoomEvent != nil {
+ eventType := update.NewRoomEvent.Event.Type()
logger = logger.WithFields(log.Fields{
"event_type": eventType,
- "event_id": updates[i].NewRoomEvent.Event.EventID(),
- "adds_state": len(updates[i].NewRoomEvent.AddsStateEventIDs),
- "removes_state": len(updates[i].NewRoomEvent.RemovesStateEventIDs),
- "send_as_server": updates[i].NewRoomEvent.SendAsServer,
- "sender": updates[i].NewRoomEvent.Event.Sender(),
+ "event_id": update.NewRoomEvent.Event.EventID(),
+ "adds_state": len(update.NewRoomEvent.AddsStateEventIDs),
+ "removes_state": len(update.NewRoomEvent.RemovesStateEventIDs),
+ "send_as_server": update.NewRoomEvent.SendAsServer,
+ "sender": update.NewRoomEvent.Event.Sender(),
})
- if updates[i].NewRoomEvent.Event.StateKey() != nil {
- logger = logger.WithField("state_key", *updates[i].NewRoomEvent.Event.StateKey())
+ if update.NewRoomEvent.Event.StateKey() != nil {
+ logger = logger.WithField("state_key", *update.NewRoomEvent.Event.StateKey())
}
contentKey := keyContentFields[eventType]
if contentKey != "" {
- value := gjson.GetBytes(updates[i].NewRoomEvent.Event.Content(), contentKey)
+ value := gjson.GetBytes(update.NewRoomEvent.Event.Content(), contentKey)
if value.Exists() {
logger = logger.WithField("content_value", value.String())
}
}
- if eventType == "m.room.server_acl" && updates[i].NewRoomEvent.Event.StateKeyEquals("") {
- ev := updates[i].NewRoomEvent.Event.Unwrap()
+ if eventType == "m.room.server_acl" && update.NewRoomEvent.Event.StateKeyEquals("") {
+ ev := update.NewRoomEvent.Event.Unwrap()
defer r.ACLs.OnServerACLUpdate(ev)
}
}
- logger.Infof("Producing to topic '%s'", r.OutputRoomEventTopic)
- messages[i] = &sarama.ProducerMessage{
- Topic: r.OutputRoomEventTopic,
- Key: sarama.StringEncoder(roomID),
- Value: sarama.ByteEncoder(value),
- }
- }
- errs := r.Producer.SendMessages(messages)
- if errs != nil {
- for _, err := range errs.(sarama.ProducerErrors) {
- log.WithError(err).WithField("message_bytes", err.Msg.Value.Length()).Error("Write to kafka failed")
+ logger.Tracef("Producing to topic '%s'", r.OutputRoomEventTopic)
+ if _, err := r.JetStream.PublishMsg(msg); err != nil {
+ logger.WithError(err).Errorf("Failed to produce to topic '%s': %s", r.OutputRoomEventTopic, err)
+ return err
}
}
- return errs
+ return nil
}
func init() {
@@ -156,67 +212,3 @@ var roomserverInputBackpressure = prometheus.NewGaugeVec(
},
[]string{"room_id"},
)
-
-// InputRoomEvents implements api.RoomserverInternalAPI
-func (r *Inputer) InputRoomEvents(
- _ context.Context,
- request *api.InputRoomEventsRequest,
- response *api.InputRoomEventsResponse,
-) {
- // Create a wait group. Each task that we dispatch will call Done on
- // this wait group so that we know when all of our events have been
- // processed.
- wg := &sync.WaitGroup{}
- wg.Add(len(request.InputRoomEvents))
- tasks := make([]*inputTask, len(request.InputRoomEvents))
-
- for i, e := range request.InputRoomEvents {
- // Work out if we are running per-room workers or if we're just doing
- // it on a global basis (e.g. SQLite).
- roomID := "global"
- if r.DB.SupportsConcurrentRoomInputs() {
- roomID = e.Event.RoomID()
- }
-
- // Look up the worker, or create it if it doesn't exist. This channel
- // is buffered to reduce the chance that we'll be blocked by another
- // room - the channel will be quite small as it's just pointer types.
- w, _ := r.workers.LoadOrStore(roomID, &inputWorker{
- r: r,
- input: newFIFOQueue(),
- })
- worker := w.(*inputWorker)
-
- // Create a task. This contains the input event and a reference to
- // the wait group, so that the worker can notify us when this specific
- // task has been finished.
- tasks[i] = &inputTask{
- ctx: context.Background(),
- event: &request.InputRoomEvents[i],
- wg: wg,
- }
-
- // Send the task to the worker.
- if worker.running.CAS(false, true) {
- go worker.start()
- }
- worker.input.push(tasks[i])
- roomserverInputBackpressure.With(prometheus.Labels{
- "room_id": roomID,
- }).Inc()
- }
-
- // Wait for all of the workers to return results about our tasks.
- wg.Wait()
-
- // If any of the tasks returned an error, we should probably report
- // that back to the caller.
- for _, task := range tasks {
- if task.err != nil {
- response.ErrMsg = task.err.Error()
- _, rejected := task.err.(*gomatrixserverlib.NotAllowed)
- response.NotAllowed = rejected
- return
- }
- }
-}
diff --git a/roomserver/internal/input/input_events.go b/roomserver/internal/input/input_events.go
index fc712f47..791f7f30 100644
--- a/roomserver/internal/input/input_events.go
+++ b/roomserver/internal/input/input_events.go
@@ -62,7 +62,7 @@ var processRoomEventDuration = prometheus.NewHistogramVec(
func (r *Inputer) processRoomEvent(
ctx context.Context,
input *api.InputRoomEvent,
-) (eventID string, err error) {
+) (err error) {
// Measure how long it takes to process this event.
started := time.Now()
defer func() {
@@ -88,11 +88,11 @@ func (r *Inputer) processRoomEvent(
case gomatrixserverlib.EventIDFormatV1:
if bytes.Equal(event.EventReference().EventSHA256, evs[0].EventReference().EventSHA256) {
util.GetLogger(ctx).WithField("event_id", event.EventID()).Infof("Already processed event; ignoring")
- return event.EventID(), nil
+ return nil
}
default:
util.GetLogger(ctx).WithField("event_id", event.EventID()).Infof("Already processed event; ignoring")
- return event.EventID(), nil
+ return nil
}
}
}
@@ -124,14 +124,14 @@ func (r *Inputer) processRoomEvent(
// Store the event.
_, _, stateAtEvent, redactionEvent, redactedEventID, err := r.DB.StoreEvent(ctx, event, authEventNIDs, isRejected)
if err != nil {
- return "", fmt.Errorf("r.DB.StoreEvent: %w", err)
+ return fmt.Errorf("r.DB.StoreEvent: %w", err)
}
// if storing this event results in it being redacted then do so.
if !isRejected && redactedEventID == event.EventID() {
r, rerr := eventutil.RedactEvent(redactionEvent, event)
if rerr != nil {
- return "", fmt.Errorf("eventutil.RedactEvent: %w", rerr)
+ return fmt.Errorf("eventutil.RedactEvent: %w", rerr)
}
event = r
}
@@ -146,15 +146,15 @@ func (r *Inputer) processRoomEvent(
"room": event.RoomID(),
"sender": event.Sender(),
}).Debug("Stored outlier")
- return event.EventID(), nil
+ return nil
}
roomInfo, err := r.DB.RoomInfo(ctx, event.RoomID())
if err != nil {
- return "", fmt.Errorf("r.DB.RoomInfo: %w", err)
+ return fmt.Errorf("r.DB.RoomInfo: %w", err)
}
if roomInfo == nil {
- return "", fmt.Errorf("r.DB.RoomInfo missing for room %s", event.RoomID())
+ return fmt.Errorf("r.DB.RoomInfo missing for room %s", event.RoomID())
}
if stateAtEvent.BeforeStateSnapshotNID == 0 {
@@ -162,7 +162,7 @@ func (r *Inputer) processRoomEvent(
// Lets calculate one.
err = r.calculateAndSetState(ctx, input, *roomInfo, &stateAtEvent, event, isRejected)
if err != nil && input.Kind != api.KindOld {
- return "", fmt.Errorf("r.calculateAndSetState: %w", err)
+ return fmt.Errorf("r.calculateAndSetState: %w", err)
}
}
@@ -175,7 +175,7 @@ func (r *Inputer) processRoomEvent(
"soft_fail": softfail,
"sender": event.Sender(),
}).Debug("Stored rejected event")
- return event.EventID(), rejectionErr
+ return rejectionErr
}
switch input.Kind {
@@ -189,7 +189,7 @@ func (r *Inputer) processRoomEvent(
input.TransactionID, // transaction ID
input.HasState, // rewrites state?
); err != nil {
- return "", fmt.Errorf("r.updateLatestEvents: %w", err)
+ return fmt.Errorf("r.updateLatestEvents: %w", err)
}
case api.KindOld:
err = r.WriteOutputEvents(event.RoomID(), []api.OutputEvent{
@@ -201,7 +201,7 @@ func (r *Inputer) processRoomEvent(
},
})
if err != nil {
- return "", fmt.Errorf("r.WriteOutputEvents (old): %w", err)
+ return fmt.Errorf("r.WriteOutputEvents (old): %w", err)
}
}
@@ -220,12 +220,12 @@ func (r *Inputer) processRoomEvent(
},
})
if err != nil {
- return "", fmt.Errorf("r.WriteOutputEvents (redactions): %w", err)
+ return fmt.Errorf("r.WriteOutputEvents (redactions): %w", err)
}
}
// Update the extremities of the event graph for the room
- return event.EventID(), nil
+ return nil
}
func (r *Inputer) calculateAndSetState(
diff --git a/roomserver/internal/input/input_fifo.go b/roomserver/internal/input/input_fifo.go
deleted file mode 100644
index 694b1724..00000000
--- a/roomserver/internal/input/input_fifo.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package input
-
-import (
- "sync"
-)
-
-type fifoQueue struct {
- tasks []*inputTask
- count int
- mutex sync.Mutex
- notifs chan struct{}
-}
-
-func newFIFOQueue() *fifoQueue {
- q := &fifoQueue{
- notifs: make(chan struct{}, 1),
- }
- return q
-}
-
-func (q *fifoQueue) push(frame *inputTask) {
- q.mutex.Lock()
- defer q.mutex.Unlock()
- q.tasks = append(q.tasks, frame)
- q.count++
- select {
- case q.notifs <- struct{}{}:
- default:
- }
-}
-
-// pop returns the first item of the queue, if there is one.
-// The second return value will indicate if a task was returned.
-// You must check this value, even after calling wait().
-func (q *fifoQueue) pop() (*inputTask, bool) {
- q.mutex.Lock()
- defer q.mutex.Unlock()
- if q.count == 0 {
- return nil, false
- }
- frame := q.tasks[0]
- q.tasks[0] = nil
- q.tasks = q.tasks[1:]
- q.count--
- if q.count == 0 {
- // Force a GC of the underlying array, since it might have
- // grown significantly if the queue was hammered for some reason
- q.tasks = nil
- }
- return frame, true
-}
-
-// wait returns a channel which can be used to detect when an
-// item is waiting in the queue.
-func (q *fifoQueue) wait() <-chan struct{} {
- q.mutex.Lock()
- defer q.mutex.Unlock()
- if q.count > 0 && len(q.notifs) == 0 {
- ch := make(chan struct{})
- close(ch)
- return ch
- }
- return q.notifs
-}
diff --git a/roomserver/roomserver.go b/roomserver/roomserver.go
index e4742100..669957be 100644
--- a/roomserver/roomserver.go
+++ b/roomserver/roomserver.go
@@ -23,8 +23,7 @@ import (
"github.com/matrix-org/dendrite/roomserver/internal"
"github.com/matrix-org/dendrite/roomserver/storage"
"github.com/matrix-org/dendrite/setup/base"
- "github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/dendrite/setup/kafka"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/sirupsen/logrus"
)
@@ -41,8 +40,6 @@ func NewInternalAPI(
) api.RoomserverInternalAPI {
cfg := &base.Cfg.RoomServer
- _, producer := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
-
var perspectiveServerNames []gomatrixserverlib.ServerName
for _, kp := range base.Cfg.FederationAPI.KeyPerspectives {
perspectiveServerNames = append(perspectiveServerNames, kp.ServerName)
@@ -53,8 +50,12 @@ func NewInternalAPI(
logrus.WithError(err).Panicf("failed to connect to room server db")
}
+ js, _, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
+
return internal.NewRoomserverAPI(
- cfg, roomserverDB, producer, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
+ cfg, roomserverDB, js,
+ cfg.Matrix.JetStream.TopicFor(jetstream.InputRoomEvent),
+ cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
base.Caches, perspectiveServerNames,
)
}
diff --git a/roomserver/roomserver_test.go b/roomserver/roomserver_test.go
deleted file mode 100644
index 40e8e92d..00000000
--- a/roomserver/roomserver_test.go
+++ /dev/null
@@ -1,407 +0,0 @@
-package roomserver
-
-import (
- "bytes"
- "context"
- "crypto/ed25519"
- "encoding/json"
- "fmt"
- "os"
- "reflect"
- "testing"
- "time"
-
- "github.com/Shopify/sarama"
- "github.com/matrix-org/dendrite/internal/caching"
- "github.com/matrix-org/dendrite/roomserver/api"
- "github.com/matrix-org/dendrite/roomserver/internal"
- "github.com/matrix-org/dendrite/roomserver/storage"
- "github.com/matrix-org/dendrite/setup/base"
- "github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/gomatrixserverlib"
- "github.com/sirupsen/logrus"
-)
-
-const (
- testOrigin = gomatrixserverlib.ServerName("kaer.morhen")
- // we have to use an on-disk DB because we open multiple connections due to the *Updater structs.
- // Using :memory: results in a brand new DB for each open connection, and sharing memory via
- // ?cache=shared just allows read-only sharing, so writes to the database on other connections are lost.
- roomserverDBFileURI = "file:roomserver_test.db"
- roomserverDBFilePath = "./roomserver_test.db"
-)
-
-var (
- ctx = context.Background()
-)
-
-type dummyProducer struct {
- topic string
- producedMessages []*api.OutputEvent
-}
-
-// SendMessage produces a given message, and returns only when it either has
-// succeeded or failed to produce. It will return the partition and the offset
-// of the produced message, or an error if the message failed to produce.
-func (p *dummyProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
- if msg.Topic != p.topic {
- return 0, 0, nil
- }
- be := msg.Value.(sarama.ByteEncoder)
- b := json.RawMessage(be)
- fmt.Println("SENDING >>>>>>>> ", string(b))
- var out api.OutputEvent
- err = json.Unmarshal(b, &out)
- if err != nil {
- return 0, 0, err
- }
- p.producedMessages = append(p.producedMessages, &out)
- return 0, 0, nil
-}
-
-// SendMessages produces a given set of messages, and returns only when all
-// messages in the set have either succeeded or failed. Note that messages
-// can succeed and fail individually; if some succeed and some fail,
-// SendMessages will return an error.
-func (p *dummyProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
- for _, m := range msgs {
- p.SendMessage(m)
- }
- return nil
-}
-
-// Close shuts down the producer and waits for any buffered messages to be
-// flushed. You must call this function before a producer object passes out of
-// scope, as it may otherwise leak memory. You must call this before calling
-// Close on the underlying client.
-func (p *dummyProducer) Close() error {
- return nil
-}
-
-func deleteDatabase() {
- err := os.Remove(roomserverDBFilePath)
- if err != nil {
- fmt.Printf("failed to delete database %s: %s\n", roomserverDBFilePath, err)
- }
-}
-
-type fledglingEvent struct {
- Type string
- StateKey *string
- Content interface{}
- Sender string
- RoomID string
-}
-
-func mustCreateEvents(t *testing.T, roomVer gomatrixserverlib.RoomVersion, events []fledglingEvent) (result []*gomatrixserverlib.HeaderedEvent) {
- t.Helper()
- depth := int64(1)
- seed := make([]byte, ed25519.SeedSize) // zero seed
- key := ed25519.NewKeyFromSeed(seed)
- var prevs []string
- roomState := make(map[gomatrixserverlib.StateKeyTuple]string) // state -> event ID
- for _, ev := range events {
- eb := gomatrixserverlib.EventBuilder{
- Sender: ev.Sender,
- Depth: depth,
- Type: ev.Type,
- StateKey: ev.StateKey,
- RoomID: ev.RoomID,
- PrevEvents: prevs,
- }
- err := eb.SetContent(ev.Content)
- if err != nil {
- t.Fatalf("mustCreateEvent: failed to marshal event content %+v", ev.Content)
- }
- stateNeeded, err := gomatrixserverlib.StateNeededForEventBuilder(&eb)
- if err != nil {
- t.Fatalf("mustCreateEvent: failed to work out auth_events : %s", err)
- }
- var authEvents []string
- for _, tuple := range stateNeeded.Tuples() {
- eventID := roomState[tuple]
- if eventID != "" {
- authEvents = append(authEvents, eventID)
- }
- }
- eb.AuthEvents = authEvents
- signedEvent, err := eb.Build(time.Now(), testOrigin, "ed25519:test", key, roomVer)
- if err != nil {
- t.Fatalf("mustCreateEvent: failed to sign event: %s", err)
- }
- depth++
- prevs = []string{signedEvent.EventID()}
- if ev.StateKey != nil {
- roomState[gomatrixserverlib.StateKeyTuple{
- EventType: ev.Type,
- StateKey: *ev.StateKey,
- }] = signedEvent.EventID()
- }
- result = append(result, signedEvent.Headered(roomVer))
- }
- return
-}
-
-func mustLoadRawEvents(t *testing.T, ver gomatrixserverlib.RoomVersion, events []json.RawMessage) []*gomatrixserverlib.HeaderedEvent {
- t.Helper()
- hs := make([]*gomatrixserverlib.HeaderedEvent, len(events))
- for i := range events {
- e, err := gomatrixserverlib.NewEventFromTrustedJSON(events[i], false, ver)
- if err != nil {
- t.Fatalf("cannot load test data: " + err.Error())
- }
- hs[i] = e.Headered(ver)
- }
- return hs
-}
-
-func mustCreateRoomserverAPI(t *testing.T) (api.RoomserverInternalAPI, *dummyProducer) {
- t.Helper()
- cfg := &config.Dendrite{}
- cfg.Defaults(true)
- cfg.Global.ServerName = testOrigin
- cfg.Global.Kafka.UseNaffka = true
- cfg.RoomServer.Database = config.DatabaseOptions{
- ConnectionString: roomserverDBFileURI,
- }
- dp := &dummyProducer{
- topic: cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent),
- }
- cache, err := caching.NewInMemoryLRUCache(false)
- if err != nil {
- t.Fatalf("failed to make caches: %s", err)
- }
- base := &base.BaseDendrite{
- Caches: cache,
- Cfg: cfg,
- }
- roomserverDB, err := storage.Open(&cfg.RoomServer.Database, base.Caches)
- if err != nil {
- logrus.WithError(err).Panicf("failed to connect to room server db")
- }
- return internal.NewRoomserverAPI(
- &cfg.RoomServer, roomserverDB, dp, string(cfg.Global.Kafka.TopicFor(config.TopicOutputRoomEvent)),
- base.Caches, nil,
- ), dp
-}
-
-func mustSendEvents(t *testing.T, ver gomatrixserverlib.RoomVersion, events []json.RawMessage) (api.RoomserverInternalAPI, *dummyProducer, []*gomatrixserverlib.HeaderedEvent) {
- t.Helper()
- rsAPI, dp := mustCreateRoomserverAPI(t)
- hevents := mustLoadRawEvents(t, ver, events)
- if err := api.SendEvents(ctx, rsAPI, api.KindNew, hevents, testOrigin, nil); err != nil {
- t.Errorf("failed to SendEvents: %s", err)
- }
- return rsAPI, dp, hevents
-}
-
-func TestOutputRedactedEvent(t *testing.T) {
- redactionEvents := []json.RawMessage{
- // create event
- []byte(`{"auth_events":[],"content":{"creator":"@userid:kaer.morhen"},"depth":0,"event_id":"$N4us6vqqq3RjvpKd:kaer.morhen","hashes":{"sha256":"WTdrCn/YsiounXcJPsLP8xT0ZjHiO5Ov0NvXYmK2onE"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"9+5JcpaN5b5KlHYHGp6r+GoNDH98lbfzGYwjfxensa5C5D/bDACaYnMDLnhwsHOE5nxgI+jT/GV271pz6PMSBQ"}},"state_key":"","type":"m.room.create"}`),
- // join event
- []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}]],"content":{"membership":"join"},"depth":1,"event_id":"$6sUiGPQ0a3tqYGKo:kaer.morhen","hashes":{"sha256":"eYVBC7RO+FlxRyW1aXYf/ad4Dzi7T93tArdGw3r4RwQ"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}]],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"tiDBTPFa53YMfHiupX3vSRE/ZcCiCjmGt7gDpIpDpwZapeays5Vqqcqb7KiywrDldpTkrrdJBAw2jXcq6ZyhDw"}},"state_key":"@userid:kaer.morhen","type":"m.room.member"}`),
- // room name
- []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"name":"My Room Name"},"depth":2,"event_id":"$VC1zZ9YWwuUbSNHD:kaer.morhen","hashes":{"sha256":"bpqTkfLx6KHzWz7/wwpsXnXwJWEGW14aV63ffexzDFg"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"prev_state":[],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"mhJZ3X4bAKrF/T0mtPf1K2Tmls0h6xGY1IPDpJ/SScQBqDlu3HQR2BPa7emqj5bViyLTWVNh+ZCpzx/6STTrAg"}},"state_key":"","type":"m.room.name"}`),
- // redact room name
- []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"reason":"Spamming"},"depth":3,"event_id":"$tJI0pE3b8u9UMYpT:kaer.morhen","hashes":{"sha256":"/3TStqa5SQqYaEtl7ajEvSRvu6d12MMKfICUzrBpd2Q"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$VC1zZ9YWwuUbSNHD:kaer.morhen",{"sha256":"+l8cNa7syvm0EF7CAmQRlYknLEMjivnI4FLhB/TUBEY"}]],"redacts":"$VC1zZ9YWwuUbSNHD:kaer.morhen","room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"QBOh+amf0vTJbm6+9VwAcR9uJviBIor2KON0Y7+EyQx5YbUZEzW1HPeJxarLIHBcxMzgOVzjuM+StzjbUgDzAg"}},"type":"m.room.redaction"}`),
- // message
- []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"body":"Test Message"},"depth":4,"event_id":"$o8KHsgSIYbJrddnd:kaer.morhen","hashes":{"sha256":"IE/rGVlKOpiGWeIo887g1CK1drYqcWDZhL6THZHkJ1c"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$tJI0pE3b8u9UMYpT:kaer.morhen",{"sha256":"zvmwyXuDox7jpA16JRH6Fc1zbfQht2tpkBbMTUOi3Jw"}]],"room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"/3z+pJjiJXWhwfqIEzmNksvBHCoXTktK/y0rRuWJXw6i1+ygRG/suDCKhFuuz6gPapRmEMPVILi2mJqHHXPKAg"}},"type":"m.room.message"}`),
- // redact previous message
- []byte(`{"auth_events":[["$N4us6vqqq3RjvpKd:kaer.morhen",{"sha256":"SylirfgfXFhscZL7p10NmOa1nFFEckiwz0lAideQMIM"}],["$6sUiGPQ0a3tqYGKo:kaer.morhen",{"sha256":"IS4HSMqpqVUGh1Z3qgC99YcaizjCoO4yFhYYe8j53IE"}]],"content":{"reason":"Spamming more"},"depth":5,"event_id":"$UpsE8belb2gJItJG:kaer.morhen","hashes":{"sha256":"zU8PWJOld/I7OtjdpltFSKC+DMNm2ZyEXAHcprsafD0"},"origin":"kaer.morhen","origin_server_ts":0,"prev_events":[["$o8KHsgSIYbJrddnd:kaer.morhen",{"sha256":"UgjMuCFXH4warIjKuwlRq9zZ6dSJrZWCd+CkqtgLSHM"}]],"redacts":"$o8KHsgSIYbJrddnd:kaer.morhen","room_id":"!roomid:kaer.morhen","sender":"@userid:kaer.morhen","signatures":{"kaer.morhen":{"ed25519:auto":"zxFGr/7aGOzqOEN6zRNrBpFkkMnfGFPbCteYL33wC+PycBPIK+2WRa5qlAR2+lcLiK3HjIzwRYkKNsVFTqvRAw"}},"type":"m.room.redaction"}`),
- }
- var redactedOutputs []api.OutputEvent
- deleteDatabase()
- _, producer, hevents := mustSendEvents(t, gomatrixserverlib.RoomVersionV1, redactionEvents)
- defer deleteDatabase()
- for _, msg := range producer.producedMessages {
- if msg.Type == api.OutputTypeRedactedEvent {
- redactedOutputs = append(redactedOutputs, *msg)
- }
- }
- wantRedactedOutputs := []api.OutputEvent{
- {
- Type: api.OutputTypeRedactedEvent,
- RedactedEvent: &api.OutputRedactedEvent{
- RedactedEventID: hevents[2].EventID(),
- RedactedBecause: hevents[3],
- },
- },
- {
- Type: api.OutputTypeRedactedEvent,
- RedactedEvent: &api.OutputRedactedEvent{
- RedactedEventID: hevents[4].EventID(),
- RedactedBecause: hevents[5],
- },
- },
- }
- t.Logf("redactedOutputs: %+v", redactedOutputs)
- if len(wantRedactedOutputs) != len(redactedOutputs) {
- t.Fatalf("Got %d redacted events, want %d", len(redactedOutputs), len(wantRedactedOutputs))
- }
- for i := 0; i < len(wantRedactedOutputs); i++ {
- if !reflect.DeepEqual(*redactedOutputs[i].RedactedEvent, *wantRedactedOutputs[i].RedactedEvent) {
- t.Errorf("OutputRedactionEvent %d: wrong event got:\n%+v want:\n%+v", i+1, redactedOutputs[i].RedactedEvent, wantRedactedOutputs[i].RedactedEvent)
- }
- }
-}
-
-// This tests that rewriting state works correctly.
-// This creates a small room with a create/join/name state, then replays it
-// with a new room name. We expect the output events to contain the original events,
-// followed by a single OutputNewRoomEvent with RewritesState set to true with the
-// rewritten state events (with the 2nd room name).
-func TestOutputRewritesState(t *testing.T) {
- roomID := "!foo:" + string(testOrigin)
- alice := "@alice:" + string(testOrigin)
- emptyKey := ""
- originalEvents := mustCreateEvents(t, gomatrixserverlib.RoomVersionV6, []fledglingEvent{
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "creator": alice,
- "room_version": "6",
- },
- StateKey: &emptyKey,
- Type: gomatrixserverlib.MRoomCreate,
- },
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "membership": "join",
- },
- StateKey: &alice,
- Type: gomatrixserverlib.MRoomMember,
- },
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "body": "hello world",
- },
- StateKey: nil,
- Type: "m.room.message",
- },
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "name": "Room Name",
- },
- StateKey: &emptyKey,
- Type: "m.room.name",
- },
- })
- rewriteEvents := mustCreateEvents(t, gomatrixserverlib.RoomVersionV6, []fledglingEvent{
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "creator": alice,
- },
- StateKey: &emptyKey,
- Type: gomatrixserverlib.MRoomCreate,
- },
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "membership": "join",
- },
- StateKey: &alice,
- Type: gomatrixserverlib.MRoomMember,
- },
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "name": "Room Name 2",
- },
- StateKey: &emptyKey,
- Type: "m.room.name",
- },
- {
- RoomID: roomID,
- Sender: alice,
- Content: map[string]interface{}{
- "body": "hello world 2",
- },
- StateKey: nil,
- Type: "m.room.message",
- },
- })
- deleteDatabase()
- rsAPI, producer := mustCreateRoomserverAPI(t)
- defer deleteDatabase()
- err := api.SendEvents(context.Background(), rsAPI, api.KindNew, originalEvents, testOrigin, nil)
- if err != nil {
- t.Fatalf("failed to send original events: %s", err)
- }
- // assert we got them produced, this is just a sanity check and isn't the intention of this test
- if len(producer.producedMessages) != len(originalEvents) {
- t.Fatalf("SendEvents didn't result in same number of produced output events: got %d want %d", len(producer.producedMessages), len(originalEvents))
- }
- producer.producedMessages = nil // we aren't actually interested in these events, just the rewrite ones
-
- var inputEvents []api.InputRoomEvent
- // slowly build up the state IDs again, we're basically telling the roomserver what to store as a snapshot
- var stateIDs []string
- // skip the last event, we'll use this to tie together the rewrite as the KindNew event
- for i := 0; i < len(rewriteEvents)-1; i++ {
- ev := rewriteEvents[i]
- inputEvents = append(inputEvents, api.InputRoomEvent{
- Kind: api.KindOutlier,
- Event: ev,
- AuthEventIDs: ev.AuthEventIDs(),
- HasState: true,
- StateEventIDs: stateIDs,
- })
- if ev.StateKey() != nil {
- stateIDs = append(stateIDs, ev.EventID())
- }
- }
- lastEv := rewriteEvents[len(rewriteEvents)-1]
- inputEvents = append(inputEvents, api.InputRoomEvent{
- Kind: api.KindNew,
- Event: lastEv,
- AuthEventIDs: lastEv.AuthEventIDs(),
- HasState: true,
- StateEventIDs: stateIDs,
- })
- if err := api.SendInputRoomEvents(context.Background(), rsAPI, inputEvents); err != nil {
- t.Fatalf("SendInputRoomEvents returned error for rewrite events: %s", err)
- }
- // we should just have one output event with the entire state of the room in it
- if len(producer.producedMessages) != 1 {
- t.Fatalf("Rewritten events got output, want only 1 got %d", len(producer.producedMessages))
- }
- outputEvent := producer.producedMessages[len(producer.producedMessages)-1]
- if !outputEvent.NewRoomEvent.RewritesState {
- t.Errorf("RewritesState flag not set on output event")
- }
- if !reflect.DeepEqual(stateIDs, outputEvent.NewRoomEvent.AddsStateEventIDs) {
- t.Errorf("Output event is missing room state event IDs, got %v want %v", outputEvent.NewRoomEvent.AddsStateEventIDs, stateIDs)
- }
- if !bytes.Equal(outputEvent.NewRoomEvent.Event.JSON(), lastEv.JSON()) {
- t.Errorf(
- "Output event isn't the latest KindNew event:\ngot %s\nwant %s",
- string(outputEvent.NewRoomEvent.Event.JSON()),
- string(lastEv.JSON()),
- )
- }
- if len(outputEvent.NewRoomEvent.AddStateEvents) != len(stateIDs) {
- t.Errorf("Output event is missing room state events themselves, got %d want %d", len(outputEvent.NewRoomEvent.AddStateEvents), len(stateIDs))
- }
- // make sure the state got overwritten, check the room name
- hasRoomName := false
- for _, ev := range outputEvent.NewRoomEvent.AddStateEvents {
- if ev.Type() == "m.room.name" {
- hasRoomName = string(ev.Content()) == `{"name":"Room Name 2"}`
- }
- }
- if !hasRoomName {
- t.Errorf("Output event did not overwrite room state")
- }
-}
diff --git a/setup/base/base.go b/setup/base/base.go
index 06c97117..819fe1ad 100644
--- a/setup/base/base.go
+++ b/setup/base/base.go
@@ -81,8 +81,6 @@ type BaseDendrite struct {
Cfg *config.Dendrite
Caches *caching.Caches
DNSCache *gomatrixserverlib.DNSCache
- // KafkaConsumer sarama.Consumer
- // KafkaProducer sarama.SyncProducer
}
const NoListener = ""
diff --git a/setup/config/config.go b/setup/config/config.go
index 404b7178..eb371a54 100644
--- a/setup/config/config.go
+++ b/setup/config/config.go
@@ -40,7 +40,7 @@ var keyIDRegexp = regexp.MustCompile("^ed25519:[a-zA-Z0-9_]+$")
// Version is the current version of the config format.
// This will change whenever we make breaking changes to the config format.
-const Version = 1
+const Version = 2
// Dendrite contains all the config used by a dendrite process.
// Relative paths are resolved relative to the current working directory
@@ -292,7 +292,7 @@ func (config *Dendrite) Derive() error {
// SetDefaults sets default config values if they are not explicitly set.
func (c *Dendrite) Defaults(generate bool) {
- c.Version = 1
+ c.Version = Version
c.Global.Defaults(generate)
c.ClientAPI.Defaults(generate)
@@ -325,6 +325,7 @@ func (c *Dendrite) Verify(configErrs *ConfigErrors, isMonolith bool) {
}
func (c *Dendrite) Wiring() {
+ c.Global.JetStream.Matrix = &c.Global
c.ClientAPI.Matrix = &c.Global
c.EDUServer.Matrix = &c.Global
c.FederationAPI.Matrix = &c.Global
@@ -420,7 +421,11 @@ func (config *Dendrite) check(_ bool) error { // monolithic
if config.Version != Version {
configErrs.Add(fmt.Sprintf(
- "unknown config version %q, expected %q", config.Version, Version,
+ "config version is %q, expected %q - this means that the format of the configuration "+
+ "file has changed in some significant way, so please revisit the sample config "+
+ "and ensure you are not missing any important options that may have been added "+
+ "or changed recently!",
+ config.Version, Version,
))
return configErrs
}
diff --git a/setup/config/config_global.go b/setup/config/config_global.go
index 20ee6d37..6f2306a6 100644
--- a/setup/config/config_global.go
+++ b/setup/config/config_global.go
@@ -46,8 +46,8 @@ type Global struct {
// Defaults to an empty array.
TrustedIDServers []string `yaml:"trusted_third_party_id_servers"`
- // Kafka/Naffka configuration
- Kafka Kafka `yaml:"kafka"`
+ // JetStream configuration
+ JetStream JetStream `yaml:"jetstream"`
// Metrics configuration
Metrics Metrics `yaml:"metrics"`
@@ -68,7 +68,7 @@ func (c *Global) Defaults(generate bool) {
}
c.KeyValidityPeriod = time.Hour * 24 * 7
- c.Kafka.Defaults(generate)
+ c.JetStream.Defaults(generate)
c.Metrics.Defaults(generate)
c.DNSCache.Defaults()
c.Sentry.Defaults()
@@ -78,7 +78,7 @@ func (c *Global) Verify(configErrs *ConfigErrors, isMonolith bool) {
checkNotEmpty(configErrs, "global.server_name", string(c.ServerName))
checkNotEmpty(configErrs, "global.private_key", string(c.PrivateKeyPath))
- c.Kafka.Verify(configErrs, isMonolith)
+ c.JetStream.Verify(configErrs, isMonolith)
c.Metrics.Verify(configErrs, isMonolith)
c.Sentry.Verify(configErrs, isMonolith)
c.DNSCache.Verify(configErrs, isMonolith)
diff --git a/setup/config/config_jetstream.go b/setup/config/config_jetstream.go
new file mode 100644
index 00000000..0bd84899
--- /dev/null
+++ b/setup/config/config_jetstream.go
@@ -0,0 +1,40 @@
+package config
+
+import (
+ "fmt"
+)
+
+type JetStream struct {
+ Matrix *Global `yaml:"-"`
+
+ // Persistent directory to store JetStream streams in.
+ StoragePath Path `yaml:"storage_path"`
+ // A list of NATS addresses to connect to. If none are specified, an
+ // internal NATS server will be used when running in monolith mode only.
+ Addresses []string `yaml:"addresses"`
+ // The prefix to use for stream names for this homeserver - really only
+ // useful if running more than one Dendrite on the same NATS deployment.
+ TopicPrefix string `yaml:"topic_prefix"`
+ // Keep all storage in memory. This is mostly useful for unit tests.
+ InMemory bool `yaml:"in_memory"`
+}
+
+func (c *JetStream) TopicFor(name string) string {
+ return fmt.Sprintf("%s%s", c.TopicPrefix, name)
+}
+
+func (c *JetStream) Defaults(generate bool) {
+ c.Addresses = []string{}
+ c.TopicPrefix = "Dendrite"
+ if generate {
+ c.StoragePath = Path("./")
+ }
+}
+
+func (c *JetStream) Verify(configErrs *ConfigErrors, isMonolith bool) {
+ // If we are running in a polylith deployment then we need at least
+ // one NATS JetStream server to talk to.
+ if !isMonolith {
+ checkNotZero(configErrs, "global.jetstream.addresses", int64(len(c.Addresses)))
+ }
+}
diff --git a/setup/config/config_kafka.go b/setup/config/config_kafka.go
deleted file mode 100644
index 5a61f17e..00000000
--- a/setup/config/config_kafka.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package config
-
-import "fmt"
-
-// Defined Kafka topics.
-const (
- TopicOutputTypingEvent = "OutputTypingEvent"
- TopicOutputSendToDeviceEvent = "OutputSendToDeviceEvent"
- TopicOutputKeyChangeEvent = "OutputKeyChangeEvent"
- TopicOutputRoomEvent = "OutputRoomEvent"
- TopicOutputClientData = "OutputClientData"
- TopicOutputReceiptEvent = "OutputReceiptEvent"
-)
-
-type Kafka struct {
- // A list of kafka addresses to connect to.
- Addresses []string `yaml:"addresses"`
- // The prefix to use for Kafka topic names for this homeserver - really only
- // useful if running more than one Dendrite on the same Kafka deployment.
- TopicPrefix string `yaml:"topic_prefix"`
- // Whether to use naffka instead of kafka.
- // Naffka can only be used when running dendrite as a single monolithic server.
- // Kafka can be used both with a monolithic server and when running the
- // components as separate servers.
- UseNaffka bool `yaml:"use_naffka"`
- // The Naffka database is used internally by the naffka library, if used.
- Database DatabaseOptions `yaml:"naffka_database"`
- // The max size a Kafka message passed between consumer/producer can have
- // Equals roughly max.message.bytes / fetch.message.max.bytes in Kafka
- MaxMessageBytes *int `yaml:"max_message_bytes"`
-}
-
-func (k *Kafka) TopicFor(name string) string {
- return fmt.Sprintf("%s%s", k.TopicPrefix, name)
-}
-
-func (c *Kafka) Defaults(generate bool) {
- c.UseNaffka = true
- c.Database.Defaults(10)
- if generate {
- c.Addresses = []string{"localhost:2181"}
- c.Database.ConnectionString = DataSource("file:naffka.db")
- }
- c.TopicPrefix = "Dendrite"
-
- maxBytes := 1024 * 1024 * 8 // about 8MB
- c.MaxMessageBytes = &maxBytes
-}
-
-func (c *Kafka) Verify(configErrs *ConfigErrors, isMonolith bool) {
- if c.UseNaffka {
- if !isMonolith {
- configErrs.Add("naffka can only be used in a monolithic server")
- }
- checkNotEmpty(configErrs, "global.kafka.database.connection_string", string(c.Database.ConnectionString))
- } else {
- // If we aren't using naffka then we need to have at least one kafka
- // server to talk to.
- checkNotZero(configErrs, "global.kafka.addresses", int64(len(c.Addresses)))
- }
- checkNotEmpty(configErrs, "global.kafka.topic_prefix", string(c.TopicPrefix))
- checkPositive(configErrs, "global.kafka.max_message_bytes", int64(*c.MaxMessageBytes))
-}
diff --git a/setup/config/config_test.go b/setup/config/config_test.go
index ffe9edab..5aa54929 100644
--- a/setup/config/config_test.go
+++ b/setup/config/config_test.go
@@ -33,7 +33,7 @@ func TestLoadConfigRelative(t *testing.T) {
}
const testConfig = `
-version: 1
+version: 2
global:
server_name: localhost
private_key: matrix_key.pem
diff --git a/setup/jetstream/helpers.go b/setup/jetstream/helpers.go
new file mode 100644
index 00000000..2d563226
--- /dev/null
+++ b/setup/jetstream/helpers.go
@@ -0,0 +1,11 @@
+package jetstream
+
+import "github.com/nats-io/nats.go"
+
+func WithJetStreamMessage(msg *nats.Msg, f func(msg *nats.Msg) bool) {
+ if f(msg) {
+ _ = msg.Ack()
+ } else {
+ _ = msg.Nak()
+ }
+}
diff --git a/setup/jetstream/nats.go b/setup/jetstream/nats.go
new file mode 100644
index 00000000..eec68d82
--- /dev/null
+++ b/setup/jetstream/nats.go
@@ -0,0 +1,93 @@
+package jetstream
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/Shopify/sarama"
+ "github.com/matrix-org/dendrite/setup/config"
+ "github.com/sirupsen/logrus"
+
+ saramajs "github.com/S7evinK/saramajetstream"
+ natsserver "github.com/nats-io/nats-server/v2/server"
+ "github.com/nats-io/nats.go"
+ natsclient "github.com/nats-io/nats.go"
+)
+
+var natsServer *natsserver.Server
+var natsServerMutex sync.Mutex
+
+func Prepare(cfg *config.JetStream) (nats.JetStreamContext, sarama.Consumer, sarama.SyncProducer) {
+ // check if we need an in-process NATS Server
+ if len(cfg.Addresses) != 0 {
+ return setupNATS(cfg, nil)
+ }
+ natsServerMutex.Lock()
+ if natsServer == nil {
+ var err error
+ natsServer, err = natsserver.NewServer(&natsserver.Options{
+ ServerName: "monolith",
+ DontListen: true,
+ JetStream: true,
+ StoreDir: string(cfg.StoragePath),
+ NoSystemAccount: true,
+ AllowNewAccounts: false,
+ })
+ if err != nil {
+ panic(err)
+ }
+ natsServer.ConfigureLogger()
+ go natsServer.Start()
+ }
+ natsServerMutex.Unlock()
+ if !natsServer.ReadyForConnections(time.Second * 10) {
+ logrus.Fatalln("NATS did not start in time")
+ }
+ nc, err := natsclient.Connect("", natsclient.InProcessServer(natsServer))
+ if err != nil {
+ logrus.Fatalln("Failed to create NATS client")
+ }
+ return setupNATS(cfg, nc)
+}
+
+func setupNATS(cfg *config.JetStream, nc *natsclient.Conn) (nats.JetStreamContext, sarama.Consumer, sarama.SyncProducer) {
+ if nc == nil {
+ var err error
+ nc, err = nats.Connect(strings.Join(cfg.Addresses, ","))
+ if err != nil {
+ logrus.WithError(err).Panic("Unable to connect to NATS")
+ return nil, nil, nil
+ }
+ }
+
+ s, err := nc.JetStream()
+ if err != nil {
+ logrus.WithError(err).Panic("Unable to get JetStream context")
+ return nil, nil, nil
+ }
+
+ for _, stream := range streams { // streams are defined in streams.go
+ name := cfg.TopicFor(stream.Name)
+ info, err := s.StreamInfo(name)
+ if err != nil && err != natsclient.ErrStreamNotFound {
+ logrus.WithError(err).Fatal("Unable to get stream info")
+ }
+ if info == nil {
+ stream.Subjects = []string{name}
+ // If we're trying to keep everything in memory (e.g. unit tests)
+ // then overwrite the storage policy.
+ if cfg.InMemory {
+ stream.Storage = nats.MemoryStorage
+ }
+
+ if _, err = s.AddStream(stream); err != nil {
+ logrus.WithError(err).WithField("stream", name).Fatal("Unable to add stream")
+ }
+ }
+ }
+
+ consumer := saramajs.NewJetStreamConsumer(nc, s, "")
+ producer := saramajs.NewJetStreamProducer(nc, s, "")
+ return s, consumer, producer
+}
diff --git a/setup/jetstream/streams.go b/setup/jetstream/streams.go
new file mode 100644
index 00000000..0fd31082
--- /dev/null
+++ b/setup/jetstream/streams.go
@@ -0,0 +1,61 @@
+package jetstream
+
+import (
+ "time"
+
+ "github.com/nats-io/nats.go"
+)
+
+const (
+ UserID = "user_id"
+ RoomID = "room_id"
+)
+
+var (
+ InputRoomEvent = "InputRoomEvent"
+ OutputRoomEvent = "OutputRoomEvent"
+ OutputSendToDeviceEvent = "OutputSendToDeviceEvent"
+ OutputKeyChangeEvent = "OutputKeyChangeEvent"
+ OutputTypingEvent = "OutputTypingEvent"
+ OutputClientData = "OutputClientData"
+ OutputReceiptEvent = "OutputReceiptEvent"
+)
+
+var streams = []*nats.StreamConfig{
+ {
+ Name: InputRoomEvent,
+ Retention: nats.InterestPolicy,
+ Storage: nats.FileStorage,
+ },
+ {
+ Name: OutputRoomEvent,
+ Retention: nats.InterestPolicy,
+ Storage: nats.FileStorage,
+ },
+ {
+ Name: OutputSendToDeviceEvent,
+ Retention: nats.InterestPolicy,
+ Storage: nats.FileStorage,
+ },
+ {
+ Name: OutputKeyChangeEvent,
+ Retention: nats.LimitsPolicy,
+ Storage: nats.FileStorage,
+ },
+ {
+ Name: OutputTypingEvent,
+ Retention: nats.InterestPolicy,
+ Storage: nats.MemoryStorage,
+ MaxAge: time.Second * 60,
+ },
+ {
+ Name: OutputClientData,
+ Retention: nats.InterestPolicy,
+ Storage: nats.FileStorage,
+ },
+ {
+ Name: OutputReceiptEvent,
+ Retention: nats.InterestPolicy,
+ Storage: nats.FileStorage,
+ },
+}
diff --git a/setup/kafka/kafka.go b/setup/kafka/kafka.go
deleted file mode 100644
index a2902c96..00000000
--- a/setup/kafka/kafka.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package kafka
-
-import (
- "github.com/Shopify/sarama"
- "github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/naffka"
- naffkaStorage "github.com/matrix-org/naffka/storage"
- "github.com/sirupsen/logrus"
-)
-
-func SetupConsumerProducer(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) {
- if cfg.UseNaffka {
- return setupNaffka(cfg)
- }
- return setupKafka(cfg)
-}
-
-// setupKafka creates kafka consumer/producer pair from the config.
-func setupKafka(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) {
- sCfg := sarama.NewConfig()
- sCfg.Producer.MaxMessageBytes = *cfg.MaxMessageBytes
- sCfg.Producer.Return.Successes = true
- sCfg.Consumer.Fetch.Default = int32(*cfg.MaxMessageBytes)
-
- consumer, err := sarama.NewConsumer(cfg.Addresses, sCfg)
- if err != nil {
- logrus.WithError(err).Panic("failed to start kafka consumer")
- }
-
- producer, err := sarama.NewSyncProducer(cfg.Addresses, sCfg)
- if err != nil {
- logrus.WithError(err).Panic("failed to setup kafka producers")
- }
-
- return consumer, producer
-}
-
-// In monolith mode with Naffka, we don't have the same constraints about
-// consuming the same topic from more than one place like we do with Kafka.
-// Therefore, we will only open one Naffka connection in case Naffka is
-// running on SQLite.
-var naffkaInstance *naffka.Naffka
-
-// setupNaffka creates kafka consumer/producer pair from the config.
-func setupNaffka(cfg *config.Kafka) (sarama.Consumer, sarama.SyncProducer) {
- if naffkaInstance != nil {
- return naffkaInstance, naffkaInstance
- }
- naffkaDB, err := naffkaStorage.NewDatabase(string(cfg.Database.ConnectionString))
- if err != nil {
- logrus.WithError(err).Panic("Failed to setup naffka database")
- }
- naffkaInstance, err = naffka.New(naffkaDB)
- if err != nil {
- logrus.WithError(err).Panic("Failed to setup naffka")
- }
- return naffkaInstance, naffkaInstance
-}
diff --git a/setup/mscs/msc2836/msc2836.go b/setup/mscs/msc2836/msc2836.go
index 7e2ecfb9..e048d736 100644
--- a/setup/mscs/msc2836/msc2836.go
+++ b/setup/mscs/msc2836/msc2836.go
@@ -649,7 +649,7 @@ func (rc *reqCtx) injectResponseToRoomserver(res *gomatrixserverlib.MSC2836Event
})
}
// we've got the data by this point so use a background context
- err = roomserver.SendInputRoomEvents(context.Background(), rc.rsAPI, ires)
+ err = roomserver.SendInputRoomEvents(context.Background(), rc.rsAPI, ires, false)
if err != nil {
util.GetLogger(rc.ctx).WithError(err).Error("failed to inject MSC2836EventRelationshipsResponse into the roomserver")
}
diff --git a/syncapi/consumers/clientapi.go b/syncapi/consumers/clientapi.go
index a166ae14..85710cdd 100644
--- a/syncapi/consumers/clientapi.go
+++ b/syncapi/consumers/clientapi.go
@@ -18,90 +18,88 @@ import (
"context"
"encoding/json"
- "github.com/Shopify/sarama"
"github.com/getsentry/sentry-go"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/internal/eventutil"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/syncapi/notifier"
"github.com/matrix-org/dendrite/syncapi/storage"
"github.com/matrix-org/dendrite/syncapi/types"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// OutputClientDataConsumer consumes events that originated in the client API server.
type OutputClientDataConsumer struct {
- clientAPIConsumer *internal.ContinualConsumer
- db storage.Database
- stream types.StreamProvider
- notifier *notifier.Notifier
+ ctx context.Context
+ jetstream nats.JetStreamContext
+ topic string
+ db storage.Database
+ stream types.StreamProvider
+ notifier *notifier.Notifier
}
// NewOutputClientDataConsumer creates a new OutputClientData consumer. Call Start() to begin consuming from room servers.
func NewOutputClientDataConsumer(
process *process.ProcessContext,
cfg *config.SyncAPI,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
store storage.Database,
notifier *notifier.Notifier,
stream types.StreamProvider,
) *OutputClientDataConsumer {
- consumer := internal.ContinualConsumer{
- Process: process,
- ComponentName: "syncapi/clientapi",
- Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputClientData)),
- Consumer: kafkaConsumer,
- PartitionStore: store,
+ return &OutputClientDataConsumer{
+ ctx: process.Context(),
+ jetstream: js,
+ topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputClientData),
+ db: store,
+ notifier: notifier,
+ stream: stream,
}
- s := &OutputClientDataConsumer{
- clientAPIConsumer: &consumer,
- db: store,
- notifier: notifier,
- stream: stream,
- }
- consumer.ProcessMessage = s.onMessage
-
- return s
}
// Start consuming from room servers
func (s *OutputClientDataConsumer) Start() error {
- return s.clientAPIConsumer.Start()
+ _, err := s.jetstream.Subscribe(s.topic, s.onMessage)
+ return err
}
// onMessage is called when the sync server receives a new event from the client API server output log.
// It is not safe for this function to be called from multiple goroutines, or else the
// sync stream position may race and be incorrectly calculated.
-func (s *OutputClientDataConsumer) onMessage(msg *sarama.ConsumerMessage) error {
- // Parse out the event JSON
- var output eventutil.AccountData
- if err := json.Unmarshal(msg.Value, &output); err != nil {
- // If the message was invalid, log it and move on to the next message in the stream
- log.WithError(err).Errorf("client API server output log: message parse failure")
- sentry.CaptureException(err)
- return nil
- }
+func (s *OutputClientDataConsumer) onMessage(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ // Parse out the event JSON
+ userID := msg.Header.Get(jetstream.UserID)
+ var output eventutil.AccountData
+ if err := json.Unmarshal(msg.Data, &output); err != nil {
+ // If the message was invalid, log it and move on to the next message in the stream
+ log.WithError(err).Errorf("client API server output log: message parse failure")
+ sentry.CaptureException(err)
+ return true
+ }
- log.WithFields(log.Fields{
- "type": output.Type,
- "room_id": output.RoomID,
- }).Info("received data from client API server")
-
- streamPos, err := s.db.UpsertAccountData(
- context.TODO(), string(msg.Key), output.RoomID, output.Type,
- )
- if err != nil {
- sentry.CaptureException(err)
log.WithFields(log.Fields{
- "type": output.Type,
- "room_id": output.RoomID,
- log.ErrorKey: err,
- }).Panicf("could not save account data")
- }
+ "type": output.Type,
+ "room_id": output.RoomID,
+ }).Info("received data from client API server")
+
+ streamPos, err := s.db.UpsertAccountData(
+ s.ctx, userID, output.RoomID, output.Type,
+ )
+ if err != nil {
+ sentry.CaptureException(err)
+ log.WithFields(log.Fields{
+ "type": output.Type,
+ "room_id": output.RoomID,
+ log.ErrorKey: err,
+ }).Panicf("could not save account data")
+ }
- s.stream.Advance(streamPos)
- s.notifier.OnNewAccountData(string(msg.Key), types.StreamingToken{AccountDataPosition: streamPos})
+ s.stream.Advance(streamPos)
+ s.notifier.OnNewAccountData(userID, types.StreamingToken{AccountDataPosition: streamPos})
- return nil
+ return true
+ })
}
diff --git a/syncapi/consumers/eduserver_receipts.go b/syncapi/consumers/eduserver_receipts.go
index 668f945b..582e1d64 100644
--- a/syncapi/consumers/eduserver_receipts.go
+++ b/syncapi/consumers/eduserver_receipts.go
@@ -18,24 +18,26 @@ import (
"context"
"encoding/json"
- "github.com/Shopify/sarama"
"github.com/getsentry/sentry-go"
"github.com/matrix-org/dendrite/eduserver/api"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/syncapi/notifier"
"github.com/matrix-org/dendrite/syncapi/storage"
"github.com/matrix-org/dendrite/syncapi/types"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// OutputReceiptEventConsumer consumes events that originated in the EDU server.
type OutputReceiptEventConsumer struct {
- receiptConsumer *internal.ContinualConsumer
- db storage.Database
- stream types.StreamProvider
- notifier *notifier.Notifier
+ ctx context.Context
+ jetstream nats.JetStreamContext
+ topic string
+ db storage.Database
+ stream types.StreamProvider
+ notifier *notifier.Notifier
}
// NewOutputReceiptEventConsumer creates a new OutputReceiptEventConsumer.
@@ -43,61 +45,53 @@ type OutputReceiptEventConsumer struct {
func NewOutputReceiptEventConsumer(
process *process.ProcessContext,
cfg *config.SyncAPI,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
store storage.Database,
notifier *notifier.Notifier,
stream types.StreamProvider,
) *OutputReceiptEventConsumer {
-
- consumer := internal.ContinualConsumer{
- Process: process,
- ComponentName: "syncapi/eduserver/receipt",
- Topic: cfg.Matrix.Kafka.TopicFor(config.TopicOutputReceiptEvent),
- Consumer: kafkaConsumer,
- PartitionStore: store,
- }
-
- s := &OutputReceiptEventConsumer{
- receiptConsumer: &consumer,
- db: store,
- notifier: notifier,
- stream: stream,
+ return &OutputReceiptEventConsumer{
+ ctx: process.Context(),
+ jetstream: js,
+ topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputReceiptEvent),
+ db: store,
+ notifier: notifier,
+ stream: stream,
}
-
- consumer.ProcessMessage = s.onMessage
-
- return s
}
// Start consuming from EDU api
func (s *OutputReceiptEventConsumer) Start() error {
- return s.receiptConsumer.Start()
+ _, err := s.jetstream.Subscribe(s.topic, s.onMessage)
+ return err
}
-func (s *OutputReceiptEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
- var output api.OutputReceiptEvent
- if err := json.Unmarshal(msg.Value, &output); err != nil {
- // If the message was invalid, log it and move on to the next message in the stream
- log.WithError(err).Errorf("EDU server output log: message parse failure")
- sentry.CaptureException(err)
- return nil
- }
+func (s *OutputReceiptEventConsumer) onMessage(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ var output api.OutputReceiptEvent
+ if err := json.Unmarshal(msg.Data, &output); err != nil {
+ // If the message was invalid, log it and move on to the next message in the stream
+ log.WithError(err).Errorf("EDU server output log: message parse failure")
+ sentry.CaptureException(err)
+ return true
+ }
- streamPos, err := s.db.StoreReceipt(
- context.TODO(),
- output.RoomID,
- output.Type,
- output.UserID,
- output.EventID,
- output.Timestamp,
- )
- if err != nil {
- sentry.CaptureException(err)
- return err
- }
+ streamPos, err := s.db.StoreReceipt(
+ s.ctx,
+ output.RoomID,
+ output.Type,
+ output.UserID,
+ output.EventID,
+ output.Timestamp,
+ )
+ if err != nil {
+ sentry.CaptureException(err)
+ return true
+ }
- s.stream.Advance(streamPos)
- s.notifier.OnNewReceipt(output.RoomID, types.StreamingToken{ReceiptPosition: streamPos})
+ s.stream.Advance(streamPos)
+ s.notifier.OnNewReceipt(output.RoomID, types.StreamingToken{ReceiptPosition: streamPos})
- return nil
+ return true
+ })
}
diff --git a/syncapi/consumers/eduserver_sendtodevice.go b/syncapi/consumers/eduserver_sendtodevice.go
index 5e626aef..6579c303 100644
--- a/syncapi/consumers/eduserver_sendtodevice.go
+++ b/syncapi/consumers/eduserver_sendtodevice.go
@@ -18,27 +18,29 @@ import (
"context"
"encoding/json"
- "github.com/Shopify/sarama"
"github.com/getsentry/sentry-go"
"github.com/matrix-org/dendrite/eduserver/api"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/syncapi/notifier"
"github.com/matrix-org/dendrite/syncapi/storage"
"github.com/matrix-org/dendrite/syncapi/types"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// OutputSendToDeviceEventConsumer consumes events that originated in the EDU server.
type OutputSendToDeviceEventConsumer struct {
- sendToDeviceConsumer *internal.ContinualConsumer
- db storage.Database
- serverName gomatrixserverlib.ServerName // our server name
- stream types.StreamProvider
- notifier *notifier.Notifier
+ ctx context.Context
+ jetstream nats.JetStreamContext
+ topic string
+ db storage.Database
+ serverName gomatrixserverlib.ServerName // our server name
+ stream types.StreamProvider
+ notifier *notifier.Notifier
}
// NewOutputSendToDeviceEventConsumer creates a new OutputSendToDeviceEventConsumer.
@@ -46,78 +48,70 @@ type OutputSendToDeviceEventConsumer struct {
func NewOutputSendToDeviceEventConsumer(
process *process.ProcessContext,
cfg *config.SyncAPI,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
store storage.Database,
notifier *notifier.Notifier,
stream types.StreamProvider,
) *OutputSendToDeviceEventConsumer {
-
- consumer := internal.ContinualConsumer{
- Process: process,
- ComponentName: "syncapi/eduserver/sendtodevice",
- Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputSendToDeviceEvent)),
- Consumer: kafkaConsumer,
- PartitionStore: store,
- }
-
- s := &OutputSendToDeviceEventConsumer{
- sendToDeviceConsumer: &consumer,
- db: store,
- serverName: cfg.Matrix.ServerName,
- notifier: notifier,
- stream: stream,
+ return &OutputSendToDeviceEventConsumer{
+ ctx: process.Context(),
+ jetstream: js,
+ topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputSendToDeviceEvent),
+ db: store,
+ serverName: cfg.Matrix.ServerName,
+ notifier: notifier,
+ stream: stream,
}
-
- consumer.ProcessMessage = s.onMessage
-
- return s
}
// Start consuming from EDU api
func (s *OutputSendToDeviceEventConsumer) Start() error {
- return s.sendToDeviceConsumer.Start()
+ _, err := s.jetstream.Subscribe(s.topic, s.onMessage)
+ return err
}
-func (s *OutputSendToDeviceEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
- var output api.OutputSendToDeviceEvent
- if err := json.Unmarshal(msg.Value, &output); err != nil {
- // If the message was invalid, log it and move on to the next message in the stream
- log.WithError(err).Errorf("EDU server output log: message parse failure")
- sentry.CaptureException(err)
- return err
- }
-
- _, domain, err := gomatrixserverlib.SplitID('@', output.UserID)
- if err != nil {
- sentry.CaptureException(err)
- return err
- }
- if domain != s.serverName {
- return nil
- }
-
- util.GetLogger(context.TODO()).WithFields(log.Fields{
- "sender": output.Sender,
- "user_id": output.UserID,
- "device_id": output.DeviceID,
- "event_type": output.Type,
- }).Info("sync API received send-to-device event from EDU server")
-
- streamPos, err := s.db.StoreNewSendForDeviceMessage(
- context.TODO(), output.UserID, output.DeviceID, output.SendToDeviceEvent,
- )
- if err != nil {
- sentry.CaptureException(err)
- log.WithError(err).Errorf("failed to store send-to-device message")
- return err
- }
-
- s.stream.Advance(streamPos)
- s.notifier.OnNewSendToDevice(
- output.UserID,
- []string{output.DeviceID},
- types.StreamingToken{SendToDevicePosition: streamPos},
- )
-
- return nil
+func (s *OutputSendToDeviceEventConsumer) onMessage(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ var output api.OutputSendToDeviceEvent
+ if err := json.Unmarshal(msg.Data, &output); err != nil {
+ // If the message was invalid, log it and move on to the next message in the stream
+ log.WithError(err).Errorf("EDU server output log: message parse failure")
+ sentry.CaptureException(err)
+ return true
+ }
+
+ _, domain, err := gomatrixserverlib.SplitID('@', output.UserID)
+ if err != nil {
+ sentry.CaptureException(err)
+ return true
+ }
+ if domain != s.serverName {
+ return true
+ }
+
+ util.GetLogger(context.TODO()).WithFields(log.Fields{
+ "sender": output.Sender,
+ "user_id": output.UserID,
+ "device_id": output.DeviceID,
+ "event_type": output.Type,
+ }).Info("sync API received send-to-device event from EDU server")
+
+ streamPos, err := s.db.StoreNewSendForDeviceMessage(
+ s.ctx, output.UserID, output.DeviceID, output.SendToDeviceEvent,
+ )
+ if err != nil {
+ sentry.CaptureException(err)
+ log.WithError(err).Errorf("failed to store send-to-device message")
+ return false
+ }
+
+ s.stream.Advance(streamPos)
+ s.notifier.OnNewSendToDevice(
+ output.UserID,
+ []string{output.DeviceID},
+ types.StreamingToken{SendToDevicePosition: streamPos},
+ )
+
+ return true
+ })
}
diff --git a/syncapi/consumers/eduserver_typing.go b/syncapi/consumers/eduserver_typing.go
index 8d06e3ca..487befe8 100644
--- a/syncapi/consumers/eduserver_typing.go
+++ b/syncapi/consumers/eduserver_typing.go
@@ -15,27 +15,30 @@
package consumers
import (
+ "context"
"encoding/json"
- "github.com/Shopify/sarama"
"github.com/getsentry/sentry-go"
"github.com/matrix-org/dendrite/eduserver/api"
"github.com/matrix-org/dendrite/eduserver/cache"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/syncapi/notifier"
"github.com/matrix-org/dendrite/syncapi/storage"
"github.com/matrix-org/dendrite/syncapi/types"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// OutputTypingEventConsumer consumes events that originated in the EDU server.
type OutputTypingEventConsumer struct {
- typingConsumer *internal.ContinualConsumer
- eduCache *cache.EDUCache
- stream types.StreamProvider
- notifier *notifier.Notifier
+ ctx context.Context
+ jetstream nats.JetStreamContext
+ topic string
+ eduCache *cache.EDUCache
+ stream types.StreamProvider
+ notifier *notifier.Notifier
}
// NewOutputTypingEventConsumer creates a new OutputTypingEventConsumer.
@@ -43,72 +46,59 @@ type OutputTypingEventConsumer struct {
func NewOutputTypingEventConsumer(
process *process.ProcessContext,
cfg *config.SyncAPI,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
store storage.Database,
eduCache *cache.EDUCache,
notifier *notifier.Notifier,
stream types.StreamProvider,
) *OutputTypingEventConsumer {
-
- consumer := internal.ContinualConsumer{
- Process: process,
- ComponentName: "syncapi/eduserver/typing",
- Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputTypingEvent)),
- Consumer: kafkaConsumer,
- PartitionStore: store,
- }
-
- s := &OutputTypingEventConsumer{
- typingConsumer: &consumer,
- eduCache: eduCache,
- notifier: notifier,
- stream: stream,
+ return &OutputTypingEventConsumer{
+ ctx: process.Context(),
+ jetstream: js,
+ topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputTypingEvent),
+ eduCache: eduCache,
+ notifier: notifier,
+ stream: stream,
}
-
- consumer.ProcessMessage = s.onMessage
-
- return s
}
// Start consuming from EDU api
func (s *OutputTypingEventConsumer) Start() error {
- s.eduCache.SetTimeoutCallback(func(userID, roomID string, latestSyncPosition int64) {
- pos := types.StreamPosition(latestSyncPosition)
- s.stream.Advance(pos)
- s.notifier.OnNewTyping(roomID, types.StreamingToken{TypingPosition: pos})
- })
- return s.typingConsumer.Start()
+ _, err := s.jetstream.Subscribe(s.topic, s.onMessage)
+ return err
}
-func (s *OutputTypingEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
- var output api.OutputTypingEvent
- if err := json.Unmarshal(msg.Value, &output); err != nil {
- // If the message was invalid, log it and move on to the next message in the stream
- log.WithError(err).Errorf("EDU server output log: message parse failure")
- sentry.CaptureException(err)
- return nil
- }
+func (s *OutputTypingEventConsumer) onMessage(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ var output api.OutputTypingEvent
+ if err := json.Unmarshal(msg.Data, &output); err != nil {
+ // If the message was invalid, log it and move on to the next message in the stream
+ log.WithError(err).Errorf("EDU server output log: message parse failure")
+ sentry.CaptureException(err)
+ return true
+ }
- log.WithFields(log.Fields{
- "room_id": output.Event.RoomID,
- "user_id": output.Event.UserID,
- "typing": output.Event.Typing,
- }).Debug("received data from EDU server")
+ log.WithFields(log.Fields{
+ "room_id": output.Event.RoomID,
+ "user_id": output.Event.UserID,
+ "typing": output.Event.Typing,
+ }).Debug("received data from EDU server")
- var typingPos types.StreamPosition
- typingEvent := output.Event
- if typingEvent.Typing {
- typingPos = types.StreamPosition(
- s.eduCache.AddTypingUser(typingEvent.UserID, typingEvent.RoomID, output.ExpireTime),
- )
- } else {
- typingPos = types.StreamPosition(
- s.eduCache.RemoveUser(typingEvent.UserID, typingEvent.RoomID),
- )
- }
+ var typingPos types.StreamPosition
+ typingEvent := output.Event
+ if typingEvent.Typing {
+ typingPos = types.StreamPosition(
+ s.eduCache.AddTypingUser(typingEvent.UserID, typingEvent.RoomID, output.ExpireTime),
+ )
+ } else {
+ typingPos = types.StreamPosition(
+ s.eduCache.RemoveUser(typingEvent.UserID, typingEvent.RoomID),
+ )
+ }
- s.stream.Advance(typingPos)
- s.notifier.OnNewTyping(output.Event.RoomID, types.StreamingToken{TypingPosition: typingPos})
+ s.stream.Advance(typingPos)
+ s.notifier.OnNewTyping(output.Event.RoomID, types.StreamingToken{TypingPosition: typingPos})
- return nil
+ return true
+ })
}
diff --git a/syncapi/consumers/keychange.go b/syncapi/consumers/keychange.go
index dfedc640..76b143d8 100644
--- a/syncapi/consumers/keychange.go
+++ b/syncapi/consumers/keychange.go
@@ -34,6 +34,7 @@ import (
// OutputKeyChangeEventConsumer consumes events that originated in the key server.
type OutputKeyChangeEventConsumer struct {
+ ctx context.Context
keyChangeConsumer *internal.ContinualConsumer
db storage.Database
notifier *notifier.Notifier
@@ -68,6 +69,7 @@ func NewOutputKeyChangeEventConsumer(
}
s := &OutputKeyChangeEventConsumer{
+ ctx: process.Context(),
keyChangeConsumer: &consumer,
db: store,
serverName: serverName,
@@ -125,10 +127,13 @@ func (s *OutputKeyChangeEventConsumer) onMessage(msg *sarama.ConsumerMessage) er
}
func (s *OutputKeyChangeEventConsumer) onDeviceKeyMessage(m api.DeviceMessage, offset int64, partition int32) error {
+ if m.DeviceKeys == nil {
+ return nil
+ }
output := m.DeviceKeys
// work out who we need to notify about the new key
var queryRes roomserverAPI.QuerySharedUsersResponse
- err := s.rsAPI.QuerySharedUsers(context.Background(), &roomserverAPI.QuerySharedUsersRequest{
+ err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{
UserID: output.UserID,
}, &queryRes)
if err != nil {
@@ -155,7 +160,7 @@ func (s *OutputKeyChangeEventConsumer) onCrossSigningMessage(m api.DeviceMessage
output := m.CrossSigningKeyUpdate
// work out who we need to notify about the new key
var queryRes roomserverAPI.QuerySharedUsersResponse
- err := s.rsAPI.QuerySharedUsers(context.Background(), &roomserverAPI.QuerySharedUsersRequest{
+ err := s.rsAPI.QuerySharedUsers(s.ctx, &roomserverAPI.QuerySharedUsersRequest{
UserID: output.UserID,
}, &queryRes)
if err != nil {
diff --git a/syncapi/consumers/roomserver.go b/syncapi/consumers/roomserver.go
index 0d6f528a..5b008e3d 100644
--- a/syncapi/consumers/roomserver.go
+++ b/syncapi/consumers/roomserver.go
@@ -19,24 +19,26 @@ import (
"encoding/json"
"fmt"
- "github.com/Shopify/sarama"
"github.com/getsentry/sentry-go"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
"github.com/matrix-org/dendrite/syncapi/notifier"
"github.com/matrix-org/dendrite/syncapi/storage"
"github.com/matrix-org/dendrite/syncapi/types"
"github.com/matrix-org/gomatrixserverlib"
+ "github.com/nats-io/nats.go"
log "github.com/sirupsen/logrus"
)
// OutputRoomEventConsumer consumes events that originated in the room server.
type OutputRoomEventConsumer struct {
+ ctx context.Context
cfg *config.SyncAPI
rsAPI api.RoomserverInternalAPI
- rsConsumer *internal.ContinualConsumer
+ jetstream nats.JetStreamContext
+ topic string
db storage.Database
pduStream types.StreamProvider
inviteStream types.StreamProvider
@@ -47,83 +49,83 @@ type OutputRoomEventConsumer struct {
func NewOutputRoomEventConsumer(
process *process.ProcessContext,
cfg *config.SyncAPI,
- kafkaConsumer sarama.Consumer,
+ js nats.JetStreamContext,
store storage.Database,
notifier *notifier.Notifier,
pduStream types.StreamProvider,
inviteStream types.StreamProvider,
rsAPI api.RoomserverInternalAPI,
) *OutputRoomEventConsumer {
-
- consumer := internal.ContinualConsumer{
- Process: process,
- ComponentName: "syncapi/roomserver",
- Topic: string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputRoomEvent)),
- Consumer: kafkaConsumer,
- PartitionStore: store,
- }
- s := &OutputRoomEventConsumer{
+ return &OutputRoomEventConsumer{
+ ctx: process.Context(),
cfg: cfg,
- rsConsumer: &consumer,
+ jetstream: js,
+ topic: cfg.Matrix.JetStream.TopicFor(jetstream.OutputRoomEvent),
db: store,
notifier: notifier,
pduStream: pduStream,
inviteStream: inviteStream,
rsAPI: rsAPI,
}
- consumer.ProcessMessage = s.onMessage
-
- return s
}
// Start consuming from room servers
func (s *OutputRoomEventConsumer) Start() error {
- return s.rsConsumer.Start()
+ _, err := s.jetstream.Subscribe(s.topic, s.onMessage)
+ return err
}
// onMessage is called when the sync server receives a new event from the room server output log.
// It is not safe for this function to be called from multiple goroutines, or else the
// sync stream position may race and be incorrectly calculated.
-func (s *OutputRoomEventConsumer) onMessage(msg *sarama.ConsumerMessage) error {
- // Parse out the event JSON
- var output api.OutputEvent
- if err := json.Unmarshal(msg.Value, &output); err != nil {
- // If the message was invalid, log it and move on to the next message in the stream
- log.WithError(err).Errorf("roomserver output log: message parse failure")
- return nil
- }
+func (s *OutputRoomEventConsumer) onMessage(msg *nats.Msg) {
+ jetstream.WithJetStreamMessage(msg, func(msg *nats.Msg) bool {
+ // Parse out the event JSON
+ var err error
+ var output api.OutputEvent
+ if err = json.Unmarshal(msg.Data, &output); err != nil {
+ // If the message was invalid, log it and move on to the next message in the stream
+ log.WithError(err).Errorf("roomserver output log: message parse failure")
+ return true
+ }
- switch output.Type {
- case api.OutputTypeNewRoomEvent:
- // Ignore redaction events. We will add them to the database when they are
- // validated (when we receive OutputTypeRedactedEvent)
- event := output.NewRoomEvent.Event
- if event.Type() == gomatrixserverlib.MRoomRedaction && event.StateKey() == nil {
- // in the special case where the event redacts itself, just pass the message through because
- // we will never see the other part of the pair
- if event.Redacts() != event.EventID() {
- return nil
+ switch output.Type {
+ case api.OutputTypeNewRoomEvent:
+ // Ignore redaction events. We will add them to the database when they are
+ // validated (when we receive OutputTypeRedactedEvent)
+ event := output.NewRoomEvent.Event
+ if event.Type() == gomatrixserverlib.MRoomRedaction && event.StateKey() == nil {
+ // in the special case where the event redacts itself, just pass the message through because
+ // we will never see the other part of the pair
+ if event.Redacts() != event.EventID() {
+ return true
+ }
}
+ err = s.onNewRoomEvent(s.ctx, *output.NewRoomEvent)
+ case api.OutputTypeOldRoomEvent:
+ err = s.onOldRoomEvent(s.ctx, *output.OldRoomEvent)
+ case api.OutputTypeNewInviteEvent:
+ s.onNewInviteEvent(s.ctx, *output.NewInviteEvent)
+ case api.OutputTypeRetireInviteEvent:
+ s.onRetireInviteEvent(s.ctx, *output.RetireInviteEvent)
+ case api.OutputTypeNewPeek:
+ s.onNewPeek(s.ctx, *output.NewPeek)
+ case api.OutputTypeRetirePeek:
+ s.onRetirePeek(s.ctx, *output.RetirePeek)
+ case api.OutputTypeRedactedEvent:
+ err = s.onRedactEvent(s.ctx, *output.RedactedEvent)
+ default:
+ log.WithField("type", output.Type).Debug(
+ "roomserver output log: ignoring unknown output type",
+ )
}
- return s.onNewRoomEvent(context.TODO(), *output.NewRoomEvent)
- case api.OutputTypeOldRoomEvent:
- return s.onOldRoomEvent(context.TODO(), *output.OldRoomEvent)
- case api.OutputTypeNewInviteEvent:
- return s.onNewInviteEvent(context.TODO(), *output.NewInviteEvent)
- case api.OutputTypeRetireInviteEvent:
- return s.onRetireInviteEvent(context.TODO(), *output.RetireInviteEvent)
- case api.OutputTypeNewPeek:
- return s.onNewPeek(context.TODO(), *output.NewPeek)
- case api.OutputTypeRetirePeek:
- return s.onRetirePeek(context.TODO(), *output.RetirePeek)
- case api.OutputTypeRedactedEvent:
- return s.onRedactEvent(context.TODO(), *output.RedactedEvent)
- default:
- log.WithField("type", output.Type).Debug(
- "roomserver output log: ignoring unknown output type",
- )
- return nil
- }
+ if err != nil {
+ log.WithError(err).Error("roomserver output log: failed to process event")
+ return false
+ }
+
+ return true
+ })
}
func (s *OutputRoomEventConsumer) onRedactEvent(
@@ -275,12 +277,12 @@ func (s *OutputRoomEventConsumer) notifyJoinedPeeks(ctx context.Context, ev *gom
func (s *OutputRoomEventConsumer) onNewInviteEvent(
ctx context.Context, msg api.OutputNewInviteEvent,
-) error {
+) {
if msg.Event.StateKey() == nil {
log.WithFields(log.Fields{
"event": string(msg.Event.JSON()),
}).Panicf("roomserver output log: invite has no state key")
- return nil
+ return
}
pduPos, err := s.db.AddInviteEvent(ctx, msg.Event)
if err != nil {
@@ -292,18 +294,16 @@ func (s *OutputRoomEventConsumer) onNewInviteEvent(
"pdupos": pduPos,
log.ErrorKey: err,
}).Panicf("roomserver output log: write invite failure")
- return nil
+ return
}
s.inviteStream.Advance(pduPos)
s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, *msg.Event.StateKey())
-
- return nil
}
func (s *OutputRoomEventConsumer) onRetireInviteEvent(
ctx context.Context, msg api.OutputRetireInviteEvent,
-) error {
+) {
pduPos, err := s.db.RetireInviteEvent(ctx, msg.EventID)
if err != nil {
sentry.CaptureException(err)
@@ -312,19 +312,17 @@ func (s *OutputRoomEventConsumer) onRetireInviteEvent(
"event_id": msg.EventID,
log.ErrorKey: err,
}).Panicf("roomserver output log: remove invite failure")
- return nil
+ return
}
// Notify any active sync requests that the invite has been retired.
s.inviteStream.Advance(pduPos)
s.notifier.OnNewInvite(types.StreamingToken{InvitePosition: pduPos}, msg.TargetUserID)
-
- return nil
}
func (s *OutputRoomEventConsumer) onNewPeek(
ctx context.Context, msg api.OutputNewPeek,
-) error {
+) {
sp, err := s.db.AddPeek(ctx, msg.RoomID, msg.UserID, msg.DeviceID)
if err != nil {
sentry.CaptureException(err)
@@ -332,7 +330,7 @@ func (s *OutputRoomEventConsumer) onNewPeek(
log.WithFields(log.Fields{
log.ErrorKey: err,
}).Panicf("roomserver output log: write peek failure")
- return nil
+ return
}
// tell the notifier about the new peek so it knows to wake up new devices
@@ -340,20 +338,18 @@ func (s *OutputRoomEventConsumer) onNewPeek(
// index as PDUs, but we should fix this
s.pduStream.Advance(sp)
s.notifier.OnNewPeek(msg.RoomID, msg.UserID, msg.DeviceID, types.StreamingToken{PDUPosition: sp})
-
- return nil
}
func (s *OutputRoomEventConsumer) onRetirePeek(
ctx context.Context, msg api.OutputRetirePeek,
-) error {
+) {
sp, err := s.db.DeletePeek(ctx, msg.RoomID, msg.UserID, msg.DeviceID)
if err != nil {
// panic rather than continue with an inconsistent database
log.WithFields(log.Fields{
log.ErrorKey: err,
}).Panicf("roomserver output log: write peek failure")
- return nil
+ return
}
// tell the notifier about the new peek so it knows to wake up new devices
@@ -361,8 +357,6 @@ func (s *OutputRoomEventConsumer) onRetirePeek(
// index as PDUs, but we should fix this
s.pduStream.Advance(sp)
s.notifier.OnRetirePeek(msg.RoomID, msg.UserID, msg.DeviceID, types.StreamingToken{PDUPosition: sp})
-
- return nil
}
func (s *OutputRoomEventConsumer) updateStateEvent(event *gomatrixserverlib.HeaderedEvent) (*gomatrixserverlib.HeaderedEvent, error) {
diff --git a/syncapi/notifier/notifier_test.go b/syncapi/notifier/notifier_test.go
index 1401fc67..c6d3df7e 100644
--- a/syncapi/notifier/notifier_test.go
+++ b/syncapi/notifier/notifier_test.go
@@ -127,7 +127,7 @@ func TestNewEventAndJoinedToRoom(t *testing.T) {
go func() {
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
if err != nil {
- t.Errorf("TestNewEventAndJoinedToRoom error: %w", err)
+ t.Errorf("TestNewEventAndJoinedToRoom error: %s", err)
}
mustEqualPositions(t, pos, syncPositionAfter)
wg.Done()
@@ -190,7 +190,7 @@ func TestNewInviteEventForUser(t *testing.T) {
go func() {
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
if err != nil {
- t.Errorf("TestNewInviteEventForUser error: %w", err)
+ t.Errorf("TestNewInviteEventForUser error: %s", err)
}
mustEqualPositions(t, pos, syncPositionAfter)
wg.Done()
@@ -246,7 +246,7 @@ func TestMultipleRequestWakeup(t *testing.T) {
poll := func() {
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
if err != nil {
- t.Errorf("TestMultipleRequestWakeup error: %w", err)
+ t.Errorf("TestMultipleRequestWakeup error: %s", err)
}
mustEqualPositions(t, pos, syncPositionAfter)
wg.Done()
@@ -284,7 +284,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) {
go func() {
pos, err := waitForEvents(n, newTestSyncRequest(bob, bobDev, syncPositionBefore))
if err != nil {
- t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %w", err)
+ t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err)
}
mustEqualPositions(t, pos, syncPositionAfter)
leaveWG.Done()
@@ -301,7 +301,7 @@ func TestNewEventAndWasPreviouslyJoinedToRoom(t *testing.T) {
go func() {
pos, err := waitForEvents(n, newTestSyncRequest(alice, aliceDev, syncPositionAfter))
if err != nil {
- t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %w", err)
+ t.Errorf("TestNewEventAndWasPreviouslyJoinedToRoom error: %s", err)
}
mustEqualPositions(t, pos, syncPositionAfter2)
aliceWG.Done()
diff --git a/syncapi/syncapi.go b/syncapi/syncapi.go
index 84c7140c..39bc233a 100644
--- a/syncapi/syncapi.go
+++ b/syncapi/syncapi.go
@@ -24,7 +24,7 @@ import (
keyapi "github.com/matrix-org/dendrite/keyserver/api"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/dendrite/setup/config"
- "github.com/matrix-org/dendrite/setup/kafka"
+ "github.com/matrix-org/dendrite/setup/jetstream"
"github.com/matrix-org/dendrite/setup/process"
userapi "github.com/matrix-org/dendrite/userapi/api"
"github.com/matrix-org/gomatrixserverlib"
@@ -48,7 +48,7 @@ func AddPublicRoutes(
federation *gomatrixserverlib.FederationClient,
cfg *config.SyncAPI,
) {
- consumer, _ := kafka.SetupConsumerProducer(&cfg.Matrix.Kafka)
+ js, consumer, _ := jetstream.Prepare(&cfg.Matrix.JetStream)
syncDB, err := storage.NewSyncServerDatasource(&cfg.Database)
if err != nil {
@@ -65,15 +65,16 @@ func AddPublicRoutes(
requestPool := sync.NewRequestPool(syncDB, cfg, userAPI, keyAPI, rsAPI, streams, notifier)
keyChangeConsumer := consumers.NewOutputKeyChangeEventConsumer(
- process, cfg.Matrix.ServerName, string(cfg.Matrix.Kafka.TopicFor(config.TopicOutputKeyChangeEvent)),
- consumer, keyAPI, rsAPI, syncDB, notifier, streams.DeviceListStreamProvider,
+ process, cfg.Matrix.ServerName, cfg.Matrix.JetStream.TopicFor(jetstream.OutputKeyChangeEvent),
+ consumer, keyAPI, rsAPI, syncDB, notifier,
+ streams.DeviceListStreamProvider,
)
if err = keyChangeConsumer.Start(); err != nil {
logrus.WithError(err).Panicf("failed to start key change consumer")
}
roomConsumer := consumers.NewOutputRoomEventConsumer(
- process, cfg, consumer, syncDB, notifier, streams.PDUStreamProvider,
+ process, cfg, js, syncDB, notifier, streams.PDUStreamProvider,
streams.InviteStreamProvider, rsAPI,
)
if err = roomConsumer.Start(); err != nil {
@@ -81,28 +82,28 @@ func AddPublicRoutes(
}
clientConsumer := consumers.NewOutputClientDataConsumer(
- process, cfg, consumer, syncDB, notifier, streams.AccountDataStreamProvider,
+ process, cfg, js, syncDB, notifier, streams.AccountDataStreamProvider,
)
if err = clientConsumer.Start(); err != nil {
logrus.WithError(err).Panicf("failed to start client data consumer")
}
typingConsumer := consumers.NewOutputTypingEventConsumer(
- process, cfg, consumer, syncDB, eduCache, notifier, streams.TypingStreamProvider,
+ process, cfg, js, syncDB, eduCache, notifier, streams.TypingStreamProvider,
)
if err = typingConsumer.Start(); err != nil {
logrus.WithError(err).Panicf("failed to start typing consumer")
}
sendToDeviceConsumer := consumers.NewOutputSendToDeviceEventConsumer(
- process, cfg, consumer, syncDB, notifier, streams.SendToDeviceStreamProvider,
+ process, cfg, js, syncDB, notifier, streams.SendToDeviceStreamProvider,
)
if err = sendToDeviceConsumer.Start(); err != nil {
logrus.WithError(err).Panicf("failed to start send-to-device consumer")
}
receiptConsumer := consumers.NewOutputReceiptEventConsumer(
- process, cfg, consumer, syncDB, notifier, streams.ReceiptStreamProvider,
+ process, cfg, js, syncDB, notifier, streams.ReceiptStreamProvider,
)
if err = receiptConsumer.Start(); err != nil {
logrus.WithError(err).Panicf("failed to start receipts consumer")
diff --git a/userapi/storage/accounts/interface.go b/userapi/storage/accounts/interface.go
index 7af2f15f..f03b3774 100644
--- a/userapi/storage/accounts/interface.go
+++ b/userapi/storage/accounts/interface.go
@@ -20,12 +20,10 @@ import (
"errors"
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
- "github.com/matrix-org/dendrite/internal"
"github.com/matrix-org/dendrite/userapi/api"
)
type Database interface {
- internal.PartitionStorer
GetAccountByPassword(ctx context.Context, localpart, plaintextPassword string) (*api.Account, error)
GetProfileByLocalpart(ctx context.Context, localpart string) (*authtypes.Profile, error)
SetPassword(ctx context.Context, localpart string, plaintextPassword string) error