The server, yes all of it (well, except for auth)
This commit is contained in:
parent
48727833ca
commit
55ee8de9a0
41 changed files with 11534 additions and 173 deletions
7
.gitignore
vendored
7
.gitignore
vendored
|
@ -1,14 +1,7 @@
|
|||
node_modules
|
||||
.DS_Store
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
/certificates/
|
||||
|
||||
/.devenv/
|
||||
/.direnv/
|
||||
|
||||
# Serwist
|
||||
public/sw*
|
||||
public/swe-worker*
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ Yes, another rewrite was needed. Again.
|
|||
|
||||
## TODO
|
||||
- Server
|
||||
- Yes all of it
|
||||
- Auth
|
||||
- Client
|
||||
- Overview
|
||||
- Graph
|
||||
|
|
2
backend/.gitignore
vendored
Normal file
2
backend/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
target/
|
||||
data/
|
1719
backend/Cargo.lock
generated
Normal file
1719
backend/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
|
@ -4,3 +4,11 @@ version = "0.1.0"
|
|||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
futures-util = "0.3.31"
|
||||
rocksdb = "0.22.0"
|
||||
tokio = { version = "1.44.1", features = ["full"] }
|
||||
warp = "0.3.7"
|
||||
yrs = { version = "0.19.2", features = ["sync"] }
|
||||
yrs-kvstore = "0.3.0"
|
||||
yrs-rocksdb = "0.3.0"
|
||||
yrs-warp = { path = "./yrs-warp" }
|
||||
|
|
|
@ -1,3 +1,137 @@
|
|||
fn main() {
|
||||
println!("Hello, world!");
|
||||
use futures_util::StreamExt;
|
||||
use rocksdb::TransactionDB;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use warp::{
|
||||
ws::{WebSocket, Ws},
|
||||
Filter, Rejection, Reply,
|
||||
};
|
||||
use yrs::{sync::Awareness, Doc, Transact};
|
||||
use yrs_kvstore::DocOps;
|
||||
use yrs_rocksdb::RocksDBStore;
|
||||
use yrs_warp::{
|
||||
broadcast::BroadcastGroup,
|
||||
ws::{WarpSink, WarpStream},
|
||||
};
|
||||
|
||||
const DATA_DIR: &str = "data";
|
||||
const STATIC_DIR: &str = "../frontend/dist";
|
||||
const INDEX_HTML: &str = "../frontend/dist/index.html";
|
||||
|
||||
// Web socket flow:
|
||||
// -> Handle connection (ws_handler)
|
||||
// -> Check permissions (Not doing for new)
|
||||
// -> Get or load document
|
||||
// ->
|
||||
//
|
||||
//
|
||||
|
||||
struct Connection {
|
||||
bcast: BroadcastGroup,
|
||||
// This is purely here to keep the connection to the DB alive while there is at least one
|
||||
// connection.
|
||||
_db_subscription: Arc<dyn Drop + Send + Sync>,
|
||||
}
|
||||
|
||||
struct Server {
|
||||
// There is something to be said for keeping the broadcast group in memory for a bit after all
|
||||
// clients disconnect, but for now we don't bother.
|
||||
pub open_docs: RwLock<HashMap<String, Weak<Connection>>>,
|
||||
pub db: Arc<TransactionDB>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub fn new(db: TransactionDB) -> Self {
|
||||
Self {
|
||||
open_docs: RwLock::default(),
|
||||
db: Arc::new(db),
|
||||
}
|
||||
}
|
||||
pub async fn get_or_create_doc(&self, name: String) -> Arc<Connection> {
|
||||
let open_docs = self.open_docs.read().await;
|
||||
match open_docs.get(&name).and_then(Weak::upgrade) {
|
||||
Some(group) => group.clone(),
|
||||
None => {
|
||||
drop(open_docs);
|
||||
let mut open_docs = self.open_docs.write().await;
|
||||
|
||||
let doc = Doc::new();
|
||||
|
||||
// Subscribe the DB to updates
|
||||
let sub = {
|
||||
let db = self.db.clone();
|
||||
let name = name.clone();
|
||||
doc.observe_update_v1(move |_, e| {
|
||||
let txn = RocksDBStore::from(db.transaction());
|
||||
let i = txn.push_update(&name, &e.update).unwrap();
|
||||
if i % 128 == 0 {
|
||||
// compact updates into document
|
||||
txn.flush_doc(&name).unwrap();
|
||||
}
|
||||
txn.commit().unwrap();
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
{
|
||||
// Load document from DB
|
||||
let mut txn = doc.transact_mut();
|
||||
let db_txn = RocksDBStore::from(self.db.transaction());
|
||||
db_txn.load_doc(&name, &mut txn).unwrap();
|
||||
}
|
||||
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let group = BroadcastGroup::new(awareness, 32).await;
|
||||
let connection = Arc::new(Connection {
|
||||
bcast: group,
|
||||
_db_subscription: sub,
|
||||
});
|
||||
|
||||
open_docs.insert(name, Arc::downgrade(&connection));
|
||||
|
||||
connection
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let db = TransactionDB::open_default(DATA_DIR).expect("Failed to open DB");
|
||||
let server = Arc::new(Server::new(db));
|
||||
|
||||
let ws = warp::path("sync")
|
||||
.and(warp::path::param())
|
||||
.and(warp::ws())
|
||||
.and(warp::any().map(move || server.clone()))
|
||||
.and_then(ws_handler);
|
||||
|
||||
let static_files = warp::fs::dir(STATIC_DIR);
|
||||
|
||||
let index = warp::fs::file(INDEX_HTML);
|
||||
|
||||
let routes = ws.or(static_files).or(index);
|
||||
|
||||
warp::serve(routes).run(([0, 0, 0, 0], 9000)).await;
|
||||
}
|
||||
|
||||
async fn ws_handler(name: String, ws: Ws, server: Arc<Server>) -> Result<impl Reply, Rejection> {
|
||||
// TODO: Check permissions before upgrading
|
||||
|
||||
println!("ws_handler: {}", name);
|
||||
let connection = server.get_or_create_doc(name).await;
|
||||
Ok(ws.on_upgrade(move |socket| peer(socket, connection)))
|
||||
}
|
||||
|
||||
async fn peer(ws: WebSocket, connection: Arc<Connection>) {
|
||||
let (sink, stream) = ws.split();
|
||||
let sink = Arc::new(Mutex::new(WarpSink::from(sink)));
|
||||
let stream = WarpStream::from(stream);
|
||||
let sub = connection.bcast.subscribe(sink, stream);
|
||||
match sub.completed().await {
|
||||
Ok(_) => println!("broadcasting for channel finished successfully"),
|
||||
Err(e) => eprintln!("broadcasting for channel finished abruptly: {}", e),
|
||||
}
|
||||
}
|
||||
|
|
5
backend/yrs-warp/.gitignore
vendored
Normal file
5
backend/yrs-warp/.gitignore
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
/.idea
|
||||
/target
|
||||
/Cargo.lock/
|
||||
/**/*/node_modules
|
||||
/examples/*/frontend/dist
|
1315
backend/yrs-warp/Cargo.lock
generated
Normal file
1315
backend/yrs-warp/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load diff
28
backend/yrs-warp/Cargo.toml
Normal file
28
backend/yrs-warp/Cargo.toml
Normal file
|
@ -0,0 +1,28 @@
|
|||
[package]
|
||||
name = "yrs-warp"
|
||||
version = "0.8.0"
|
||||
edition = "2021"
|
||||
description = "Yrs synchronization protocol using Warp web sockets"
|
||||
license = "MIT"
|
||||
authors = ["Bartosz Sypytkowski <b.sypytkowski@gmail.com>"]
|
||||
keywords = ["crdt", "yrs", "warp"]
|
||||
homepage = "https://github.com/y-crdt/yrs-warp/"
|
||||
repository = "https://github.com/y-crdt/yrs-warp/"
|
||||
readme = "./README.md"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
yrs = { version = "0.19.2", features = ["sync"] }
|
||||
warp = "0.3"
|
||||
futures-util = { version = "0.3", features = ["sink"] }
|
||||
tokio = { version = "1.36", features = ["rt", "net", "sync", "macros"] }
|
||||
serde = { version = "1.0", features = ["derive", "rc"] }
|
||||
serde_json = "1.0"
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tokio-util = { version = "0.7.10", features = ["codec"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-tungstenite = "0.21"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
bytes = "1.6"
|
22
backend/yrs-warp/LICENSE
Normal file
22
backend/yrs-warp/LICENSE
Normal file
|
@ -0,0 +1,22 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2022
|
||||
- Bartosz Sypytkowski <b.sypytkowski@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
111
backend/yrs-warp/README.md
Normal file
111
backend/yrs-warp/README.md
Normal file
|
@ -0,0 +1,111 @@
|
|||
# Yrs web socket connections
|
||||
|
||||
This library is an extension over [Yjs](https://yjs.dev)/[Yrs](https://github.com/y-crdt/y-crdt) Conflict-Free Replicated Data Types (CRDT) message exchange protocol. It provides an utilities connect with Yjs web socket provider using Rust tokio's [warp](https://github.com/seanmonstar/warp) web server.
|
||||
|
||||
### Demo
|
||||
|
||||
A working demo can be seen under [examples](./examples) subfolder. It integrates this library API with Code Mirror 6, enhancing it with collaborative rich text document editing capabilities.
|
||||
|
||||
### Example
|
||||
|
||||
In order to gossip updates between different web socket connection from the clients collaborating over the same logical document, a broadcast group can be used:
|
||||
|
||||
```rust
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// We're using a single static document shared among all the peers.
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(Doc::new())));
|
||||
|
||||
// open a broadcast group that listens to awareness and document updates
|
||||
// and has a pending message buffer of up to 32 updates
|
||||
let bcast = Arc::new(BroadcastGroup::new(awareness, 32).await);
|
||||
|
||||
let ws = warp::path("my-room")
|
||||
.and(warp::ws())
|
||||
.and(warp::any().map(move || bcast.clone()))
|
||||
.and_then(ws_handler);
|
||||
|
||||
warp::serve(ws).run(([0, 0, 0, 0], 8000)).await;
|
||||
}
|
||||
|
||||
async fn ws_handler(ws: Ws, bcast: Arc<BroadcastGroup>) -> Result<impl Reply, Rejection> {
|
||||
Ok(ws.on_upgrade(move |socket| peer(socket, bcast)))
|
||||
}
|
||||
|
||||
async fn peer(ws: WebSocket, bcast: Arc<BroadcastGroup>) {
|
||||
let (sink, stream) = ws.split();
|
||||
let sink = Arc::new(Mutex::new(WarpSink::from(sink)));
|
||||
let stream = WarpStream::from(stream);
|
||||
let sub = bcast.subscribe(sink, stream);
|
||||
match sub.completed().await {
|
||||
Ok(_) => println!("broadcasting for channel finished successfully"),
|
||||
Err(e) => eprintln!("broadcasting for channel finished abruptly: {}", e),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Custom protocol extensions
|
||||
|
||||
[y-sync](https://crates.io/crates/y-sync) protocol enables to extend it's own protocol, and yrs-warp supports this as well.
|
||||
This can be done by implementing your own protocol, eg.:
|
||||
|
||||
```rust
|
||||
use y_sync::sync::Protocol;
|
||||
|
||||
struct EchoProtocol;
|
||||
impl Protocol for EchoProtocol {
|
||||
fn missing_handle(
|
||||
&self,
|
||||
awareness: &mut Awareness,
|
||||
tag: u8,
|
||||
data: Vec<u8>,
|
||||
) -> Result<Option<Message>, Error> {
|
||||
// all messages prefixed with tags unknown to y-sync protocol
|
||||
// will be echo-ed back to the sender
|
||||
Ok(Some(Message::Custom(tag, data)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn peer(ws: WebSocket, awareness: AwarenessRef) {
|
||||
//.. later in code subscribe with custom protocol parameter
|
||||
let sub = bcast.subscribe_with(sink, stream, EchoProtocol);
|
||||
// .. rest of the code
|
||||
}
|
||||
```
|
||||
|
||||
## y-webrtc and signaling service
|
||||
|
||||
Additionally to performing it's role as a [y-websocket](https://docs.yjs.dev/ecosystem/connection-provider/y-websocket)
|
||||
server, `yrs-warp` also provides a signaling server implementation used by [y-webrtc](https://github.com/yjs/y-webrtc)
|
||||
clients to exchange information necessary to connect WebRTC peers together and make them subscribe/unsubscribe from specific rooms.
|
||||
|
||||
```rust
|
||||
use warp::{Filter, Rejection, Reply};
|
||||
use warp::ws::{Ws, WebSocket};
|
||||
use yrs_warp::signaling::{SignalingService, signaling_conn};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let signaling = SignalingService::new();
|
||||
let ws = warp::path("signaling")
|
||||
.and(warp::ws())
|
||||
.and(warp::any().map(move || signaling.clone()))
|
||||
.and_then(ws_handler);
|
||||
warp::serve(routes).run(([0, 0, 0, 0], 8000)).await;
|
||||
}
|
||||
async fn ws_handler(ws: Ws, svc: SignalingService) -> Result<impl Reply, Rejection> {
|
||||
Ok(ws.on_upgrade(move |socket| peer(socket, svc)))
|
||||
}
|
||||
async fn peer(ws: WebSocket, svc: SignalingService) {
|
||||
match signaling_conn(ws, svc).await {
|
||||
Ok(_) => println!("signaling connection stopped"),
|
||||
Err(e) => eprintln!("signaling connection failed: {}", e),
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Sponsors
|
||||
|
||||
[](https://nlnet.nl/)
|
20
backend/yrs-warp/examples/code-mirror/README.md
Normal file
20
backend/yrs-warp/examples/code-mirror/README.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
|
||||
### Running an example
|
||||
|
||||
In order to generate static website content, first you need build it. This can be done via npm.
|
||||
|
||||
```bash
|
||||
cd examples/code-mirror/frontend
|
||||
npm install
|
||||
npm run build
|
||||
```
|
||||
|
||||
These commands will install all dependencies and run [rollup.js](https://rollupjs.org/), which is used for bundling the JavaScript code and dependecies for Code Mirror.
|
||||
|
||||
Once the steps above are done, a `./frontent/dist` directory should appear. If so, all you need to do is to run following command from the *main git repository directory*:
|
||||
|
||||
```bash
|
||||
cargo run --example code-mirror
|
||||
```
|
||||
|
||||
It will run a local warp server with an index page at [http://localhost:8000](http://localhost:8000).
|
26
backend/yrs-warp/examples/code-mirror/frontend/index.html
Normal file
26
backend/yrs-warp/examples/code-mirror/frontend/index.html
Normal file
|
@ -0,0 +1,26 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>Yjs CodeMirror6 Demo + Yrs warp websocket</title>
|
||||
<style>
|
||||
.cm-wrap { height: 300px; border: 1px solid #ddd}
|
||||
.cm-scroller { overflow: auto; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<button type="button" id="y-connect-btn">Disconnect</button>
|
||||
<p></p>
|
||||
<p>
|
||||
This is a demo of the <a href="https://github.com/yjs/yjs">Yjs</a> ⇔
|
||||
<a href="https://codemirror.net">CodeMirror</a> binding:
|
||||
<a href="https://github.com/yjs/y-codemirror.next">y-codemirror.next</a>.
|
||||
</p>
|
||||
<p>
|
||||
The content of this editor is shared with every client that visits this
|
||||
domain.
|
||||
</p>
|
||||
<div id=editor></div>
|
||||
<script type="text/javascript" src="../main.js" async></script>
|
||||
</body>
|
||||
</html>
|
64
backend/yrs-warp/examples/code-mirror/frontend/index.js
Normal file
64
backend/yrs-warp/examples/code-mirror/frontend/index.js
Normal file
|
@ -0,0 +1,64 @@
|
|||
import * as Y from 'yjs'
|
||||
import { yCollab } from 'y-codemirror.next'
|
||||
import { EditorView, basicSetup } from "codemirror";
|
||||
import { EditorState } from "@codemirror/state";
|
||||
import { javascript } from '@codemirror/lang-javascript'
|
||||
import {syntaxHighlighting, defaultHighlightStyle} from "@codemirror/language"
|
||||
import * as random from 'lib0/random'
|
||||
import {WebsocketProvider} from "y-websocket";
|
||||
|
||||
export const usercolors = [
|
||||
{ color: '#30bced', light: '#30bced33' },
|
||||
{ color: '#6eeb83', light: '#6eeb8333' },
|
||||
{ color: '#ffbc42', light: '#ffbc4233' },
|
||||
{ color: '#ecd444', light: '#ecd44433' },
|
||||
{ color: '#ee6352', light: '#ee635233' },
|
||||
{ color: '#9ac2c9', light: '#9ac2c933' },
|
||||
{ color: '#8acb88', light: '#8acb8833' },
|
||||
{ color: '#1be7ff', light: '#1be7ff33' }
|
||||
]
|
||||
|
||||
// select a random color for this user
|
||||
export const userColor = usercolors[random.uint32() % usercolors.length]
|
||||
|
||||
const doc = new Y.Doc()
|
||||
const ytext = doc.getText('codemirror')
|
||||
|
||||
const provider = new WebsocketProvider('ws://localhost:8000', 'my-room', doc, { disableBc: true })
|
||||
|
||||
const undoManager = new Y.UndoManager(ytext)
|
||||
|
||||
provider.awareness.setLocalStateField('user', {
|
||||
name: 'Anonymous ' + Math.floor(Math.random() * 100),
|
||||
color: userColor.color,
|
||||
colorLight: userColor.light
|
||||
})
|
||||
|
||||
|
||||
|
||||
const state = EditorState.create({
|
||||
doc: ytext.toString(),
|
||||
extensions: [
|
||||
javascript(),
|
||||
syntaxHighlighting(defaultHighlightStyle),
|
||||
yCollab(ytext, provider.awareness, { undoManager })
|
||||
]
|
||||
})
|
||||
|
||||
const view = new EditorView({ state, parent: document.querySelector('#editor') })
|
||||
|
||||
// toggle connection by clicking on connect-btn
|
||||
const toggleButton = document.getElementById('y-connect-btn')
|
||||
let connected = true
|
||||
|
||||
toggleButton.addEventListener('click', () => {
|
||||
if (connected) {
|
||||
provider.disconnect()
|
||||
connected = false
|
||||
toggleButton.innerText = 'Reconnect'
|
||||
} else {
|
||||
provider.connect()
|
||||
connected = true
|
||||
toggleButton.innerText = 'Disconnect'
|
||||
}
|
||||
})
|
3004
backend/yrs-warp/examples/code-mirror/frontend/package-lock.json
generated
Normal file
3004
backend/yrs-warp/examples/code-mirror/frontend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
33
backend/yrs-warp/examples/code-mirror/frontend/package.json
Normal file
33
backend/yrs-warp/examples/code-mirror/frontend/package.json
Normal file
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
"name": "frontend",
|
||||
"version": "1.0.0",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"clean": "rm -rf ./dist",
|
||||
"build": "npm run clean && mkdir ./dist && rollup -c && cp ./index.html ./dist/index.html"
|
||||
},
|
||||
"keywords": [
|
||||
"yjs",
|
||||
"yrs",
|
||||
"codemirror"
|
||||
],
|
||||
"author": "Bartosz Sypytkowski <b.sypytkowski@gmail.com>",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@codemirror/lang-javascript": "^6.0.2",
|
||||
"@codemirror/language": "github:codemirror/language",
|
||||
"@codemirror/state": "^6.1.1",
|
||||
"codemirror": "^6.0.1",
|
||||
"lib0": "^0.2.52",
|
||||
"y-codemirror.next": "^0.3.2",
|
||||
"y-websocket": "^1.4.4",
|
||||
"yjs": "^13.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"rollup": "^2.79.0",
|
||||
"@rollup/plugin-babel": "^5.3.1",
|
||||
"@rollup/plugin-commonjs": "^22.0.2",
|
||||
"@rollup/plugin-node-resolve": "^13.3.0"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
import { babel } from '@rollup/plugin-babel';
|
||||
import { nodeResolve } from '@rollup/plugin-node-resolve';
|
||||
import commonjs from '@rollup/plugin-commonjs';
|
||||
|
||||
export default [
|
||||
{
|
||||
input: "index.js",
|
||||
output: {
|
||||
file: "dist/main.js",
|
||||
format: "iife",
|
||||
},
|
||||
plugins: [
|
||||
nodeResolve({ browser: true }),
|
||||
commonjs(),
|
||||
babel({
|
||||
exclude: 'node_modules/**',
|
||||
babelHelpers: 'bundled'
|
||||
})
|
||||
]
|
||||
}
|
||||
];
|
62
backend/yrs-warp/examples/code-mirror/main.rs
Normal file
62
backend/yrs-warp/examples/code-mirror/main.rs
Normal file
|
@ -0,0 +1,62 @@
|
|||
use futures_util::StreamExt;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use warp::ws::{WebSocket, Ws};
|
||||
use warp::{Filter, Rejection, Reply};
|
||||
use yrs::sync::Awareness;
|
||||
use yrs::{Doc, Text, Transact};
|
||||
use yrs_warp::broadcast::BroadcastGroup;
|
||||
use yrs_warp::ws::{WarpSink, WarpStream};
|
||||
use yrs_warp::AwarenessRef;
|
||||
|
||||
const STATIC_FILES_DIR: &str = "examples/code-mirror/frontend/dist";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// We're using a single static document shared among all the peers.
|
||||
let awareness: AwarenessRef = {
|
||||
let doc = Doc::new();
|
||||
{
|
||||
// pre-initialize code mirror document with some text
|
||||
let txt = doc.get_or_insert_text("codemirror");
|
||||
let mut txn = doc.transact_mut();
|
||||
txt.push(
|
||||
&mut txn,
|
||||
r#"function hello() {
|
||||
console.log('hello world');
|
||||
}"#,
|
||||
);
|
||||
}
|
||||
Arc::new(RwLock::new(Awareness::new(doc)))
|
||||
};
|
||||
|
||||
// open a broadcast group that listens to awareness and document updates
|
||||
// and has a pending message buffer of up to 32 updates
|
||||
let bcast = Arc::new(BroadcastGroup::new(awareness.clone(), 32).await);
|
||||
|
||||
let static_files = warp::get().and(warp::fs::dir(STATIC_FILES_DIR));
|
||||
|
||||
let ws = warp::path("my-room")
|
||||
.and(warp::ws())
|
||||
.and(warp::any().map(move || bcast.clone()))
|
||||
.and_then(ws_handler);
|
||||
|
||||
let routes = ws.or(static_files);
|
||||
|
||||
warp::serve(routes).run(([0, 0, 0, 0], 8000)).await;
|
||||
}
|
||||
|
||||
async fn ws_handler(ws: Ws, bcast: Arc<BroadcastGroup>) -> Result<impl Reply, Rejection> {
|
||||
Ok(ws.on_upgrade(move |socket| peer(socket, bcast)))
|
||||
}
|
||||
|
||||
async fn peer(ws: WebSocket, bcast: Arc<BroadcastGroup>) {
|
||||
let (sink, stream) = ws.split();
|
||||
let sink = Arc::new(Mutex::new(WarpSink::from(sink)));
|
||||
let stream = WarpStream::from(stream);
|
||||
let sub = bcast.subscribe(sink, stream);
|
||||
match sub.completed().await {
|
||||
Ok(_) => println!("broadcasting for channel finished successfully"),
|
||||
Err(e) => eprintln!("broadcasting for channel finished abruptly: {}", e),
|
||||
}
|
||||
}
|
20
backend/yrs-warp/examples/webrtc-signaling-server/README.md
Normal file
20
backend/yrs-warp/examples/webrtc-signaling-server/README.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
|
||||
### Running an example
|
||||
|
||||
In order to generate static website content, first you need build it. This can be done via npm.
|
||||
|
||||
```bash
|
||||
cd examples/webrtc-signaling-server/frontend
|
||||
npm install
|
||||
npm run build
|
||||
```
|
||||
|
||||
These commands will install all dependencies and run [rollup.js](https://rollupjs.org/), which is used for bundling the JavaScript code and dependecies for Code Mirror.
|
||||
|
||||
Once the steps above are done, a `./frontent/dist` directory should appear. If so, all you need to do is to run following command from the *main git repository directory*:
|
||||
|
||||
```bash
|
||||
cargo run --example webrtc-signaling-server
|
||||
```
|
||||
|
||||
It will run a local warp server with an index page at [http://localhost:8000](http://localhost:8000).
|
|
@ -0,0 +1,26 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>Yjs CodeMirror6 Demo + Yrs warp websocket</title>
|
||||
<style>
|
||||
.cm-wrap { height: 300px; border: 1px solid #ddd}
|
||||
.cm-scroller { overflow: auto; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<button type="button" id="y-connect-btn">Disconnect</button>
|
||||
<p></p>
|
||||
<p>
|
||||
This is a demo of the <a href="https://github.com/yjs/yjs">Yjs</a> ⇔
|
||||
<a href="https://codemirror.net">CodeMirror</a> binding:
|
||||
<a href="https://github.com/yjs/y-codemirror.next">y-codemirror.next</a>.
|
||||
</p>
|
||||
<p>
|
||||
The content of this editor is shared with every client that visits this
|
||||
domain.
|
||||
</p>
|
||||
<div id=editor></div>
|
||||
<script type="text/javascript" src="../main.js" async></script>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,64 @@
|
|||
import * as Y from 'yjs'
|
||||
import { yCollab } from 'y-codemirror.next'
|
||||
import { EditorView, basicSetup } from "codemirror";
|
||||
import { EditorState } from "@codemirror/state";
|
||||
import { javascript } from '@codemirror/lang-javascript'
|
||||
import {syntaxHighlighting, defaultHighlightStyle} from "@codemirror/language"
|
||||
import * as random from 'lib0/random'
|
||||
import {WebrtcProvider} from "y-webrtc";
|
||||
|
||||
export const usercolors = [
|
||||
{ color: '#30bced', light: '#30bced33' },
|
||||
{ color: '#6eeb83', light: '#6eeb8333' },
|
||||
{ color: '#ffbc42', light: '#ffbc4233' },
|
||||
{ color: '#ecd444', light: '#ecd44433' },
|
||||
{ color: '#ee6352', light: '#ee635233' },
|
||||
{ color: '#9ac2c9', light: '#9ac2c933' },
|
||||
{ color: '#8acb88', light: '#8acb8833' },
|
||||
{ color: '#1be7ff', light: '#1be7ff33' }
|
||||
]
|
||||
|
||||
// select a random color for this user
|
||||
export const userColor = usercolors[random.uint32() % usercolors.length]
|
||||
|
||||
const doc = new Y.Doc()
|
||||
const ytext = doc.getText('codemirror')
|
||||
|
||||
const provider = new WebrtcProvider('sample', doc, { signaling: ['ws://localhost:8000/signaling'] })
|
||||
|
||||
const undoManager = new Y.UndoManager(ytext)
|
||||
|
||||
provider.awareness.setLocalStateField('user', {
|
||||
name: 'Anonymous ' + Math.floor(Math.random() * 100),
|
||||
color: userColor.color,
|
||||
colorLight: userColor.light
|
||||
})
|
||||
|
||||
|
||||
|
||||
const state = EditorState.create({
|
||||
doc: ytext.toString(),
|
||||
extensions: [
|
||||
javascript(),
|
||||
syntaxHighlighting(defaultHighlightStyle),
|
||||
yCollab(ytext, provider.awareness, { undoManager })
|
||||
]
|
||||
})
|
||||
|
||||
const view = new EditorView({ state, parent: document.querySelector('#editor') })
|
||||
|
||||
// toggle connection by clicking on connect-btn
|
||||
const toggleButton = document.getElementById('y-connect-btn')
|
||||
let connected = true
|
||||
|
||||
toggleButton.addEventListener('click', () => {
|
||||
if (connected) {
|
||||
provider.disconnect()
|
||||
connected = false
|
||||
toggleButton.innerText = 'Reconnect'
|
||||
} else {
|
||||
provider.connect()
|
||||
connected = true
|
||||
toggleButton.innerText = 'Disconnect'
|
||||
}
|
||||
})
|
2630
backend/yrs-warp/examples/webrtc-signaling-server/frontend/package-lock.json
generated
Normal file
2630
backend/yrs-warp/examples/webrtc-signaling-server/frontend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
"name": "frontend",
|
||||
"version": "1.0.0",
|
||||
"description": "",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"clean": "rm -rf ./dist",
|
||||
"build": "npm run clean && mkdir ./dist && rollup -c && cp ./index.html ./dist/index.html"
|
||||
},
|
||||
"keywords": [
|
||||
"yjs",
|
||||
"yrs",
|
||||
"codemirror"
|
||||
],
|
||||
"author": "Bartosz Sypytkowski <b.sypytkowski@gmail.com>",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@codemirror/lang-javascript": "^6.0.2",
|
||||
"@codemirror/language": "github:codemirror/language",
|
||||
"@codemirror/state": "^6.1.1",
|
||||
"codemirror": "^6.0.1",
|
||||
"lib0": "^0.2.52",
|
||||
"y-codemirror.next": "^0.3.2",
|
||||
"y-webrtc": "^10.2.4",
|
||||
"yjs": "^13.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"rollup": "^2.79.0",
|
||||
"@rollup/plugin-babel": "^5.3.1",
|
||||
"@rollup/plugin-commonjs": "^22.0.2",
|
||||
"@rollup/plugin-node-resolve": "^13.3.0"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
import { babel } from '@rollup/plugin-babel';
|
||||
import { nodeResolve } from '@rollup/plugin-node-resolve';
|
||||
import commonjs from '@rollup/plugin-commonjs';
|
||||
|
||||
export default [
|
||||
{
|
||||
input: "index.js",
|
||||
output: {
|
||||
file: "dist/main.js",
|
||||
format: "iife",
|
||||
},
|
||||
plugins: [
|
||||
nodeResolve({ browser: true }),
|
||||
commonjs(),
|
||||
babel({
|
||||
exclude: 'node_modules/**',
|
||||
babelHelpers: 'bundled'
|
||||
})
|
||||
]
|
||||
}
|
||||
];
|
33
backend/yrs-warp/examples/webrtc-signaling-server/main.rs
Normal file
33
backend/yrs-warp/examples/webrtc-signaling-server/main.rs
Normal file
|
@ -0,0 +1,33 @@
|
|||
use warp::ws::{WebSocket, Ws};
|
||||
use warp::{Filter, Rejection, Reply};
|
||||
use yrs_warp::signaling::{signaling_conn, SignalingService};
|
||||
|
||||
const STATIC_FILES_DIR: &str = "examples/webrtc-signaling-server/frontend/dist";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let signaling = SignalingService::new();
|
||||
|
||||
let static_files = warp::get().and(warp::fs::dir(STATIC_FILES_DIR));
|
||||
|
||||
let ws = warp::path("signaling")
|
||||
.and(warp::ws())
|
||||
.and(warp::any().map(move || signaling.clone()))
|
||||
.and_then(ws_handler);
|
||||
|
||||
let routes = ws.or(static_files);
|
||||
|
||||
warp::serve(routes).run(([0, 0, 0, 0], 8000)).await;
|
||||
}
|
||||
|
||||
async fn ws_handler(ws: Ws, svc: SignalingService) -> Result<impl Reply, Rejection> {
|
||||
Ok(ws.on_upgrade(move |socket| peer(socket, svc)))
|
||||
}
|
||||
|
||||
async fn peer(ws: WebSocket, svc: SignalingService) {
|
||||
println!("new incoming signaling connection");
|
||||
match signaling_conn(ws, svc).await {
|
||||
Ok(_) => println!("signaling connection stopped"),
|
||||
Err(e) => eprintln!("signaling connection failed: {}", e),
|
||||
}
|
||||
}
|
376
backend/yrs-warp/src/broadcast.rs
Normal file
376
backend/yrs-warp/src/broadcast.rs
Normal file
|
@ -0,0 +1,376 @@
|
|||
#![allow(dead_code)]
|
||||
use crate::AwarenessRef;
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use std::sync::Arc;
|
||||
use tokio::select;
|
||||
use tokio::sync::broadcast::error::SendError;
|
||||
use tokio::sync::broadcast::{channel, Receiver, Sender};
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::JoinHandle;
|
||||
use yrs::encoding::write::Write;
|
||||
use yrs::sync::protocol::{MSG_SYNC, MSG_SYNC_UPDATE};
|
||||
use yrs::sync::{DefaultProtocol, Error, Message, Protocol, SyncMessage};
|
||||
use yrs::updates::decoder::Decode;
|
||||
use yrs::updates::encoder::{Encode, Encoder, EncoderV1};
|
||||
use yrs::Update;
|
||||
|
||||
/// A broadcast group can be used to propagate updates produced by yrs [yrs::Doc] and [Awareness]
|
||||
/// structures in a binary form that conforms to a y-sync protocol.
|
||||
///
|
||||
/// New receivers can subscribe to a broadcasting group via [BroadcastGroup::subscribe] method.
|
||||
pub struct BroadcastGroup {
|
||||
awareness_sub: yrs::Subscription,
|
||||
doc_sub: yrs::Subscription,
|
||||
awareness_ref: AwarenessRef,
|
||||
sender: Sender<Vec<u8>>,
|
||||
receiver: Receiver<Vec<u8>>,
|
||||
awareness_updater: JoinHandle<()>,
|
||||
}
|
||||
|
||||
unsafe impl Send for BroadcastGroup {}
|
||||
unsafe impl Sync for BroadcastGroup {}
|
||||
|
||||
impl BroadcastGroup {
|
||||
/// Creates a new [BroadcastGroup] over a provided `awareness` instance. All changes triggered
|
||||
/// by this awareness structure or its underlying document will be propagated to all subscribers
|
||||
/// which have been registered via [BroadcastGroup::subscribe] method.
|
||||
///
|
||||
/// The overflow of the incoming events that needs to be propagates will be buffered up to a
|
||||
/// provided `buffer_capacity` size.
|
||||
pub async fn new(awareness: AwarenessRef, buffer_capacity: usize) -> Self {
|
||||
let (sender, receiver) = channel(buffer_capacity);
|
||||
let awareness_c = Arc::downgrade(&awareness);
|
||||
let mut lock = awareness.write().await;
|
||||
let sink = sender.clone();
|
||||
let doc_sub = {
|
||||
lock.doc_mut()
|
||||
.observe_update_v1(move |_txn, u| {
|
||||
// we manually construct msg here to avoid update data copying
|
||||
let mut encoder = EncoderV1::new();
|
||||
encoder.write_var(MSG_SYNC);
|
||||
encoder.write_var(MSG_SYNC_UPDATE);
|
||||
encoder.write_buf(&u.update);
|
||||
let msg = encoder.to_vec();
|
||||
if let Err(_e) = sink.send(msg) {
|
||||
// current broadcast group is being closed
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel();
|
||||
let sink = sender.clone();
|
||||
let awareness_sub = lock.on_update(move |_, e, _| {
|
||||
let added = e.added();
|
||||
let updated = e.updated();
|
||||
let removed = e.removed();
|
||||
let mut changed = Vec::with_capacity(added.len() + updated.len() + removed.len());
|
||||
changed.extend_from_slice(added);
|
||||
changed.extend_from_slice(updated);
|
||||
changed.extend_from_slice(removed);
|
||||
|
||||
if let Err(_) = tx.send(changed) {
|
||||
tracing::warn!("failed to send awareness update");
|
||||
}
|
||||
});
|
||||
drop(lock);
|
||||
let awareness_updater = tokio::task::spawn(async move {
|
||||
while let Some(changed_clients) = rx.recv().await {
|
||||
if let Some(awareness) = awareness_c.upgrade() {
|
||||
let awareness = awareness.read().await;
|
||||
match awareness.update_with_clients(changed_clients) {
|
||||
Ok(update) => {
|
||||
if let Err(_) = sink.send(Message::Awareness(update).encode_v1()) {
|
||||
tracing::warn!("couldn't broadcast awareness update");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("error while computing awareness update: {}", e)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
});
|
||||
BroadcastGroup {
|
||||
awareness_ref: awareness,
|
||||
awareness_updater,
|
||||
sender,
|
||||
receiver,
|
||||
awareness_sub,
|
||||
doc_sub,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to an underlying [Awareness] instance.
|
||||
pub fn awareness(&self) -> &AwarenessRef {
|
||||
&self.awareness_ref
|
||||
}
|
||||
|
||||
/// Broadcasts user message to all active subscribers. Returns error if message could not have
|
||||
/// been broadcasted.
|
||||
pub fn broadcast(&self, msg: Vec<u8>) -> Result<(), SendError<Vec<u8>>> {
|
||||
self.sender.send(msg)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Subscribes a new connection - represented by `sink`/`stream` pair implementing a futures
|
||||
/// Sink and Stream protocols - to a current broadcast group.
|
||||
///
|
||||
/// Returns a subscription structure, which can be dropped in order to unsubscribe or awaited
|
||||
/// via [Subscription::completed] method in order to complete of its own volition (due to
|
||||
/// an internal connection error or closed connection).
|
||||
pub fn subscribe<Sink, Stream, E>(&self, sink: Arc<Mutex<Sink>>, stream: Stream) -> Subscription
|
||||
where
|
||||
Sink: SinkExt<Vec<u8>> + Send + Sync + Unpin + 'static,
|
||||
Stream: StreamExt<Item = Result<Vec<u8>, E>> + Send + Sync + Unpin + 'static,
|
||||
<Sink as futures_util::Sink<Vec<u8>>>::Error: std::error::Error + Send + Sync,
|
||||
E: std::error::Error + Send + Sync + 'static,
|
||||
{
|
||||
self.subscribe_with(sink, stream, DefaultProtocol)
|
||||
}
|
||||
|
||||
/// Subscribes a new connection - represented by `sink`/`stream` pair implementing a futures
|
||||
/// Sink and Stream protocols - to a current broadcast group.
|
||||
///
|
||||
/// Returns a subscription structure, which can be dropped in order to unsubscribe or awaited
|
||||
/// via [Subscription::completed] method in order to complete of its own volition (due to
|
||||
/// an internal connection error or closed connection).
|
||||
///
|
||||
/// Unlike [BroadcastGroup::subscribe], this method can take [Protocol] parameter that allows to
|
||||
/// customize the y-sync protocol behavior.
|
||||
pub fn subscribe_with<Sink, Stream, E, P>(
|
||||
&self,
|
||||
sink: Arc<Mutex<Sink>>,
|
||||
mut stream: Stream,
|
||||
protocol: P,
|
||||
) -> Subscription
|
||||
where
|
||||
Sink: SinkExt<Vec<u8>> + Send + Sync + Unpin + 'static,
|
||||
Stream: StreamExt<Item = Result<Vec<u8>, E>> + Send + Sync + Unpin + 'static,
|
||||
<Sink as futures_util::Sink<Vec<u8>>>::Error: std::error::Error + Send + Sync,
|
||||
E: std::error::Error + Send + Sync + 'static,
|
||||
P: Protocol + Send + Sync + 'static,
|
||||
{
|
||||
let sink_task = {
|
||||
let sink = sink.clone();
|
||||
let mut receiver = self.sender.subscribe();
|
||||
tokio::spawn(async move {
|
||||
while let Ok(msg) = receiver.recv().await {
|
||||
let mut sink = sink.lock().await;
|
||||
if let Err(e) = sink.send(msg).await {
|
||||
println!("broadcast failed to sent sync message");
|
||||
return Err(Error::Other(Box::new(e)));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
};
|
||||
let stream_task = {
|
||||
let awareness = self.awareness().clone();
|
||||
tokio::spawn(async move {
|
||||
while let Some(res) = stream.next().await {
|
||||
let msg = Message::decode_v1(&res.map_err(|e| Error::Other(Box::new(e)))?)?;
|
||||
let reply = Self::handle_msg(&protocol, &awareness, msg).await?;
|
||||
match reply {
|
||||
None => {}
|
||||
Some(reply) => {
|
||||
let mut sink = sink.lock().await;
|
||||
sink.send(reply.encode_v1())
|
||||
.await
|
||||
.map_err(|e| Error::Other(Box::new(e)))?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
};
|
||||
|
||||
Subscription {
|
||||
sink_task,
|
||||
stream_task,
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_msg<P: Protocol>(
|
||||
protocol: &P,
|
||||
awareness: &AwarenessRef,
|
||||
msg: Message,
|
||||
) -> Result<Option<Message>, Error> {
|
||||
match msg {
|
||||
Message::Sync(msg) => match msg {
|
||||
SyncMessage::SyncStep1(state_vector) => {
|
||||
let awareness = awareness.read().await;
|
||||
protocol.handle_sync_step1(&*awareness, state_vector)
|
||||
}
|
||||
SyncMessage::SyncStep2(update) => {
|
||||
let mut awareness = awareness.write().await;
|
||||
let update = Update::decode_v1(&update)?;
|
||||
protocol.handle_sync_step2(&mut *awareness, update)
|
||||
}
|
||||
SyncMessage::Update(update) => {
|
||||
let mut awareness = awareness.write().await;
|
||||
let update = Update::decode_v1(&update)?;
|
||||
protocol.handle_sync_step2(&mut *awareness, update)
|
||||
}
|
||||
},
|
||||
Message::Auth(deny_reason) => {
|
||||
let awareness = awareness.read().await;
|
||||
protocol.handle_auth(&*awareness, deny_reason)
|
||||
}
|
||||
Message::AwarenessQuery => {
|
||||
let awareness = awareness.read().await;
|
||||
protocol.handle_awareness_query(&*awareness)
|
||||
}
|
||||
Message::Awareness(update) => {
|
||||
let mut awareness = awareness.write().await;
|
||||
protocol.handle_awareness_update(&mut *awareness, update)
|
||||
}
|
||||
Message::Custom(tag, data) => {
|
||||
let mut awareness = awareness.write().await;
|
||||
protocol.missing_handle(&mut *awareness, tag, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for BroadcastGroup {
|
||||
fn drop(&mut self) {
|
||||
self.awareness_updater.abort();
|
||||
}
|
||||
}
|
||||
|
||||
/// A subscription structure returned from [BroadcastGroup::subscribe], which represents a
|
||||
/// subscribed connection. It can be dropped in order to unsubscribe or awaited via
|
||||
/// [Subscription::completed] method in order to complete of its own volition (due to an internal
|
||||
/// connection error or closed connection).
|
||||
#[derive(Debug)]
|
||||
pub struct Subscription {
|
||||
sink_task: JoinHandle<Result<(), Error>>,
|
||||
stream_task: JoinHandle<Result<(), Error>>,
|
||||
}
|
||||
|
||||
impl Subscription {
|
||||
/// Consumes current subscription, waiting for it to complete. If an underlying connection was
|
||||
/// closed because of failure, an error which caused it to happen will be returned.
|
||||
///
|
||||
/// This method doesn't invoke close procedure. If you need that, drop current subscription instead.
|
||||
pub async fn completed(self) -> Result<(), Error> {
|
||||
let res = select! {
|
||||
r1 = self.sink_task => r1,
|
||||
r2 = self.stream_task => r2,
|
||||
};
|
||||
res.map_err(|e| Error::Other(e.into()))?
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::broadcast::BroadcastGroup;
|
||||
use futures_util::{ready, SinkExt, StreamExt};
|
||||
use std::collections::HashMap;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tokio_util::sync::PollSender;
|
||||
use yrs::sync::awareness::AwarenessUpdateEntry;
|
||||
use yrs::sync::{Awareness, AwarenessUpdate, Error, Message, SyncMessage};
|
||||
use yrs::updates::decoder::Decode;
|
||||
use yrs::updates::encoder::Encode;
|
||||
use yrs::{Doc, StateVector, Text, Transact};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ReceiverStream<T> {
|
||||
inner: tokio::sync::mpsc::Receiver<T>,
|
||||
}
|
||||
|
||||
impl<T> ReceiverStream<T> {
|
||||
/// Create a new `ReceiverStream`.
|
||||
pub fn new(recv: tokio::sync::mpsc::Receiver<T>) -> Self {
|
||||
Self { inner: recv }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> futures_util::Stream for ReceiverStream<T> {
|
||||
type Item = Result<T, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
match ready!(self.inner.poll_recv(cx)) {
|
||||
None => Poll::Ready(None),
|
||||
Some(v) => Poll::Ready(Some(Ok(v))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn test_channel(capacity: usize) -> (PollSender<Vec<u8>>, ReceiverStream<Vec<u8>>) {
|
||||
let (s, r) = tokio::sync::mpsc::channel::<Vec<u8>>(capacity);
|
||||
let s = PollSender::new(s);
|
||||
let r = ReceiverStream::new(r);
|
||||
(s, r)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn broadcast_changes() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let doc = Doc::with_client_id(1);
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let group = BroadcastGroup::new(awareness.clone(), 1).await;
|
||||
|
||||
let (server_sender, mut client_receiver) = test_channel(1);
|
||||
let (mut client_sender, server_receiver) = test_channel(1);
|
||||
let _sub1 = group.subscribe(Arc::new(Mutex::new(server_sender)), server_receiver);
|
||||
|
||||
// check update propagation
|
||||
{
|
||||
let a = awareness.write().await;
|
||||
text.push(&mut a.doc().transact_mut(), "a");
|
||||
}
|
||||
let msg = client_receiver.next().await;
|
||||
let msg = msg.map(|x| Message::decode_v1(&x.unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msg,
|
||||
Some(Message::Sync(SyncMessage::Update(vec![
|
||||
1, 1, 1, 0, 4, 1, 4, 116, 101, 115, 116, 1, 97, 0,
|
||||
])))
|
||||
);
|
||||
|
||||
// check awareness update propagation
|
||||
{
|
||||
let mut a = awareness.write().await;
|
||||
a.set_local_state(r#"{"key":"value"}"#)
|
||||
}
|
||||
|
||||
let msg = client_receiver.next().await;
|
||||
let msg = msg.map(|x| Message::decode_v1(&x.unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msg,
|
||||
Some(Message::Awareness(AwarenessUpdate {
|
||||
clients: HashMap::from([(
|
||||
1,
|
||||
AwarenessUpdateEntry {
|
||||
clock: 1,
|
||||
json: r#"{"key":"value"}"#.to_string(),
|
||||
},
|
||||
)]),
|
||||
}))
|
||||
);
|
||||
|
||||
// check sync state request/response
|
||||
{
|
||||
client_sender
|
||||
.send(Message::Sync(SyncMessage::SyncStep1(StateVector::default())).encode_v1())
|
||||
.await?;
|
||||
let msg = client_receiver.next().await;
|
||||
let msg = msg.map(|x| Message::decode_v1(&x.unwrap()).unwrap());
|
||||
assert_eq!(
|
||||
msg,
|
||||
Some(Message::Sync(SyncMessage::SyncStep2(vec![
|
||||
1, 1, 1, 0, 4, 1, 4, 116, 101, 115, 116, 1, 97, 0,
|
||||
])))
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
530
backend/yrs-warp/src/conn.rs
Normal file
530
backend/yrs-warp/src/conn.rs
Normal file
|
@ -0,0 +1,530 @@
|
|||
#![allow(dead_code)]
|
||||
use futures_util::sink::SinkExt;
|
||||
use futures_util::StreamExt;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::task::{Context, Poll};
|
||||
use tokio::spawn;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tokio::task::JoinHandle;
|
||||
use yrs::encoding::read::Cursor;
|
||||
use yrs::sync::Awareness;
|
||||
use yrs::sync::{DefaultProtocol, Error, Message, MessageReader, Protocol, SyncMessage};
|
||||
use yrs::updates::decoder::{Decode, DecoderV1};
|
||||
use yrs::updates::encoder::{Encode, Encoder, EncoderV1};
|
||||
use yrs::Update;
|
||||
|
||||
/// Connection handler over a pair of message streams, which implements a Yjs/Yrs awareness and
|
||||
/// update exchange protocol.
|
||||
///
|
||||
/// This connection implements Future pattern and can be awaited upon in order for a caller to
|
||||
/// recognize whether underlying websocket connection has been finished gracefully or abruptly.
|
||||
#[derive(Debug)]
|
||||
pub struct Connection<Sink, Stream> {
|
||||
processing_loop: JoinHandle<Result<(), Error>>,
|
||||
awareness: Arc<RwLock<Awareness>>,
|
||||
inbox: Arc<Mutex<Sink>>,
|
||||
_stream: PhantomData<Stream>,
|
||||
}
|
||||
|
||||
impl<Sink, Stream, E> Connection<Sink, Stream>
|
||||
where
|
||||
Sink: SinkExt<Vec<u8>, Error = E> + Send + Sync + Unpin + 'static,
|
||||
E: Into<Error> + Send + Sync,
|
||||
{
|
||||
pub async fn send(&self, msg: Vec<u8>) -> Result<(), Error> {
|
||||
let mut inbox = self.inbox.lock().await;
|
||||
match inbox.send(msg).await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn close(self) -> Result<(), E> {
|
||||
let mut inbox = self.inbox.lock().await;
|
||||
inbox.close().await
|
||||
}
|
||||
|
||||
pub fn sink(&self) -> Weak<Mutex<Sink>> {
|
||||
Arc::downgrade(&self.inbox)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Sink, Stream, E> Connection<Sink, Stream>
|
||||
where
|
||||
Stream: StreamExt<Item = Result<Vec<u8>, E>> + Send + Sync + Unpin + 'static,
|
||||
Sink: SinkExt<Vec<u8>, Error = E> + Send + Sync + Unpin + 'static,
|
||||
E: Into<Error> + Send + Sync,
|
||||
{
|
||||
/// Wraps incoming [WebSocket] connection and supplied [Awareness] accessor into a new
|
||||
/// connection handler capable of exchanging Yrs/Yjs messages.
|
||||
///
|
||||
/// While creation of new [WarpConn] always succeeds, a connection itself can possibly fail
|
||||
/// while processing incoming input/output. This can be detected by awaiting for returned
|
||||
/// [WarpConn] and handling the awaited result.
|
||||
pub fn new(awareness: Arc<RwLock<Awareness>>, sink: Sink, stream: Stream) -> Self {
|
||||
Self::with_protocol(awareness, sink, stream, DefaultProtocol)
|
||||
}
|
||||
|
||||
/// Returns an underlying [Awareness] structure, that contains client state of that connection.
|
||||
pub fn awareness(&self) -> &Arc<RwLock<Awareness>> {
|
||||
&self.awareness
|
||||
}
|
||||
|
||||
/// Wraps incoming [WebSocket] connection and supplied [Awareness] accessor into a new
|
||||
/// connection handler capable of exchanging Yrs/Yjs messages.
|
||||
///
|
||||
/// While creation of new [WarpConn] always succeeds, a connection itself can possibly fail
|
||||
/// while processing incoming input/output. This can be detected by awaiting for returned
|
||||
/// [WarpConn] and handling the awaited result.
|
||||
pub fn with_protocol<P>(
|
||||
awareness: Arc<RwLock<Awareness>>,
|
||||
sink: Sink,
|
||||
mut stream: Stream,
|
||||
protocol: P,
|
||||
) -> Self
|
||||
where
|
||||
P: Protocol + Send + Sync + 'static,
|
||||
{
|
||||
let sink = Arc::new(Mutex::new(sink));
|
||||
let inbox = sink.clone();
|
||||
let loop_sink = Arc::downgrade(&sink);
|
||||
let loop_awareness = Arc::downgrade(&awareness);
|
||||
let processing_loop: JoinHandle<Result<(), Error>> = spawn(async move {
|
||||
// at the beginning send SyncStep1 and AwarenessUpdate
|
||||
let payload = {
|
||||
let awareness = loop_awareness.upgrade().unwrap();
|
||||
let mut encoder = EncoderV1::new();
|
||||
let awareness = awareness.read().await;
|
||||
protocol.start(&awareness, &mut encoder)?;
|
||||
encoder.to_vec()
|
||||
};
|
||||
if !payload.is_empty() {
|
||||
if let Some(sink) = loop_sink.upgrade() {
|
||||
let mut s = sink.lock().await;
|
||||
if let Err(e) = s.send(payload).await {
|
||||
return Err(e.into());
|
||||
}
|
||||
} else {
|
||||
return Ok(()); // parent ConnHandler has been dropped
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(input) = stream.next().await {
|
||||
match input {
|
||||
Ok(data) => {
|
||||
if let Some(mut sink) = loop_sink.upgrade() {
|
||||
if let Some(awareness) = loop_awareness.upgrade() {
|
||||
match Self::process(&protocol, &awareness, &mut sink, data).await {
|
||||
Ok(()) => { /* continue */ }
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Ok(()); // parent ConnHandler has been dropped
|
||||
}
|
||||
} else {
|
||||
return Ok(()); // parent ConnHandler has been dropped
|
||||
}
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
});
|
||||
Connection {
|
||||
processing_loop,
|
||||
awareness,
|
||||
inbox,
|
||||
_stream: PhantomData::default(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn process<P: Protocol>(
|
||||
protocol: &P,
|
||||
awareness: &Arc<RwLock<Awareness>>,
|
||||
sink: &mut Arc<Mutex<Sink>>,
|
||||
input: Vec<u8>,
|
||||
) -> Result<(), Error> {
|
||||
let mut decoder = DecoderV1::new(Cursor::new(&input));
|
||||
let reader = MessageReader::new(&mut decoder);
|
||||
for r in reader {
|
||||
let msg = r?;
|
||||
if let Some(reply) = handle_msg(protocol, &awareness, msg).await? {
|
||||
let mut sender = sink.lock().await;
|
||||
if let Err(e) = sender.send(reply.encode_v1()).await {
|
||||
println!("connection failed to send back the reply");
|
||||
return Err(e.into());
|
||||
} else {
|
||||
println!("connection send back the reply");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Sink, Stream> Unpin for Connection<Sink, Stream> {}
|
||||
|
||||
impl<Sink, Stream> Future for Connection<Sink, Stream> {
|
||||
type Output = Result<(), Error>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match Pin::new(&mut self.processing_loop).poll(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => Poll::Ready(Err(Error::Other(e.into()))),
|
||||
Poll::Ready(Ok(r)) => Poll::Ready(r),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn handle_msg<P: Protocol>(
|
||||
protocol: &P,
|
||||
a: &Arc<RwLock<Awareness>>,
|
||||
msg: Message,
|
||||
) -> Result<Option<Message>, Error> {
|
||||
match msg {
|
||||
Message::Sync(msg) => match msg {
|
||||
SyncMessage::SyncStep1(sv) => {
|
||||
let awareness = a.read().await;
|
||||
protocol.handle_sync_step1(&awareness, sv)
|
||||
}
|
||||
SyncMessage::SyncStep2(update) => {
|
||||
let mut awareness = a.write().await;
|
||||
protocol.handle_sync_step2(&mut awareness, Update::decode_v1(&update)?)
|
||||
}
|
||||
SyncMessage::Update(update) => {
|
||||
let mut awareness = a.write().await;
|
||||
protocol.handle_update(&mut awareness, Update::decode_v1(&update)?)
|
||||
}
|
||||
},
|
||||
Message::Auth(reason) => {
|
||||
let awareness = a.read().await;
|
||||
protocol.handle_auth(&awareness, reason)
|
||||
}
|
||||
Message::AwarenessQuery => {
|
||||
let awareness = a.read().await;
|
||||
protocol.handle_awareness_query(&awareness)
|
||||
}
|
||||
Message::Awareness(update) => {
|
||||
let mut awareness = a.write().await;
|
||||
protocol.handle_awareness_update(&mut awareness, update)
|
||||
}
|
||||
Message::Custom(tag, data) => {
|
||||
let mut awareness = a.write().await;
|
||||
protocol.missing_handle(&mut awareness, tag, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::broadcast::BroadcastGroup;
|
||||
use crate::conn::Connection;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures_util::SinkExt;
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
|
||||
use tokio::net::{TcpListener, TcpSocket};
|
||||
use tokio::sync::{Mutex, Notify, RwLock};
|
||||
use tokio::task;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{sleep, timeout};
|
||||
use tokio_util::codec::{Decoder, Encoder, FramedRead, FramedWrite, LengthDelimitedCodec};
|
||||
use yrs::sync::{Awareness, Error, Message, SyncMessage};
|
||||
use yrs::updates::encoder::Encode;
|
||||
use yrs::{Doc, GetString, Subscription, Text, Transact};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct YrsCodec(LengthDelimitedCodec);
|
||||
|
||||
impl Encoder<Vec<u8>> for YrsCodec {
|
||||
type Error = Error;
|
||||
|
||||
fn encode(&mut self, item: Vec<u8>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
self.0.encode(Bytes::from(item), dst)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for YrsCodec {
|
||||
type Item = Vec<u8>;
|
||||
type Error = Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
if let Some(bytes) = self.0.decode(src)? {
|
||||
Ok(Some(bytes.freeze().to_vec()))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type WrappedStream = FramedRead<OwnedReadHalf, YrsCodec>;
|
||||
type WrappedSink = FramedWrite<OwnedWriteHalf, YrsCodec>;
|
||||
|
||||
async fn start_server(
|
||||
addr: SocketAddr,
|
||||
bcast: BroadcastGroup,
|
||||
) -> Result<JoinHandle<()>, Box<dyn std::error::Error>> {
|
||||
let server = TcpListener::bind(addr).await?;
|
||||
Ok(tokio::spawn(async move {
|
||||
let mut subscribers = Vec::new();
|
||||
while let Ok((stream, _)) = server.accept().await {
|
||||
let (reader, writer) = stream.into_split();
|
||||
let stream = WrappedStream::new(reader, YrsCodec::default());
|
||||
let sink = WrappedSink::new(writer, YrsCodec::default());
|
||||
let sub = bcast.subscribe(Arc::new(Mutex::new(sink)), stream);
|
||||
subscribers.push(sub);
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn client(
|
||||
addr: SocketAddr,
|
||||
doc: Doc,
|
||||
) -> Result<Connection<WrappedSink, WrappedStream>, Box<dyn std::error::Error>> {
|
||||
let stream = TcpSocket::new_v4()?.connect(addr).await?;
|
||||
let (reader, writer) = stream.into_split();
|
||||
let stream: WrappedStream = WrappedStream::new(reader, YrsCodec::default());
|
||||
let sink: WrappedSink = WrappedSink::new(writer, YrsCodec::default());
|
||||
Ok(Connection::new(
|
||||
Arc::new(RwLock::new(Awareness::new(doc))),
|
||||
sink,
|
||||
stream,
|
||||
))
|
||||
}
|
||||
|
||||
fn create_notifier(doc: &Doc) -> (Arc<Notify>, Subscription) {
|
||||
let n = Arc::new(Notify::new());
|
||||
let sub = {
|
||||
let n = n.clone();
|
||||
doc.observe_update_v1(move |_, _| n.notify_waiters())
|
||||
.unwrap()
|
||||
};
|
||||
(n, sub)
|
||||
}
|
||||
|
||||
const TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
#[tokio::test]
|
||||
async fn change_introduced_by_server_reaches_subscribed_clients(
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let server_addr = SocketAddr::from_str("127.0.0.1:6600").unwrap();
|
||||
let doc = Doc::with_client_id(1);
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server(server_addr.clone(), bcast).await?;
|
||||
|
||||
let doc = Doc::new();
|
||||
let (n, _sub) = create_notifier(&doc);
|
||||
let c1 = client(server_addr.clone(), doc).await?;
|
||||
|
||||
{
|
||||
let lock = awareness.write().await;
|
||||
text.push(&mut lock.doc().transact_mut(), "abc");
|
||||
}
|
||||
|
||||
timeout(TIMEOUT, n.notified()).await?;
|
||||
|
||||
{
|
||||
let awareness = c1.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn subscribed_client_fetches_initial_state() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let server_addr = SocketAddr::from_str("127.0.0.1:6601").unwrap();
|
||||
let doc = Doc::with_client_id(1);
|
||||
let text = doc.get_or_insert_text("test");
|
||||
|
||||
text.push(&mut doc.transact_mut(), "abc");
|
||||
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server(server_addr.clone(), bcast).await?;
|
||||
|
||||
let doc = Doc::new();
|
||||
let (n, _sub) = create_notifier(&doc);
|
||||
let c1 = client(server_addr.clone(), doc).await?;
|
||||
|
||||
timeout(TIMEOUT, n.notified()).await?;
|
||||
|
||||
{
|
||||
let awareness = c1.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn changes_from_one_client_reach_others() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let server_addr = SocketAddr::from_str("127.0.0.1:6602").unwrap();
|
||||
let doc = Doc::with_client_id(1);
|
||||
let _text = doc.get_or_insert_text("test");
|
||||
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server(server_addr.clone(), bcast).await?;
|
||||
|
||||
let d1 = Doc::with_client_id(2);
|
||||
let c1 = client(server_addr.clone(), d1).await?;
|
||||
// by default changes made by document on the client side are not propagated automatically
|
||||
let _sub11 = {
|
||||
let sink = c1.sink();
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
doc.observe_update_v1(move |_, e| {
|
||||
let update = e.update.to_owned();
|
||||
if let Some(sink) = sink.upgrade() {
|
||||
task::spawn(async move {
|
||||
let msg = Message::Sync(SyncMessage::Update(update)).encode_v1();
|
||||
let mut sink = sink.lock().await;
|
||||
sink.send(msg).await.unwrap();
|
||||
});
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let d2 = Doc::with_client_id(3);
|
||||
let (n2, _sub2) = create_notifier(&d2);
|
||||
let c2 = client(server_addr.clone(), d2).await?;
|
||||
|
||||
{
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
text.push(&mut doc.transact_mut(), "def");
|
||||
}
|
||||
|
||||
timeout(TIMEOUT, n2.notified()).await?;
|
||||
|
||||
{
|
||||
let awareness = c2.awareness.read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "def".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_failure_doesnt_affect_others() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let server_addr = SocketAddr::from_str("127.0.0.1:6604").unwrap();
|
||||
let doc = Doc::with_client_id(1);
|
||||
let _ = doc.get_or_insert_text("test");
|
||||
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server(server_addr.clone(), bcast).await?;
|
||||
|
||||
let d1 = Doc::with_client_id(2);
|
||||
let c1 = client(server_addr.clone(), d1).await?;
|
||||
// by default changes made by document on the client side are not propagated automatically
|
||||
let _sub11 = {
|
||||
let sink = c1.sink();
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
doc.observe_update_v1(move |_, e| {
|
||||
let update = e.update.to_owned();
|
||||
if let Some(sink) = sink.upgrade() {
|
||||
task::spawn(async move {
|
||||
let msg = Message::Sync(SyncMessage::Update(update)).encode_v1();
|
||||
let mut sink = sink.lock().await;
|
||||
sink.send(msg).await.unwrap();
|
||||
});
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let d2 = Doc::with_client_id(3);
|
||||
let (n2, sub2) = create_notifier(&d2);
|
||||
let c2 = client(server_addr.clone(), d2).await?;
|
||||
|
||||
let d3 = Doc::with_client_id(4);
|
||||
let (n3, sub3) = create_notifier(&d3);
|
||||
let c3 = client(server_addr.clone(), d3).await?;
|
||||
|
||||
{
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
text.push(&mut doc.transact_mut(), "abc");
|
||||
}
|
||||
|
||||
// on the first try both C2 and C3 should receive the update
|
||||
//timeout(TIMEOUT, n2.notified()).await.unwrap();
|
||||
//timeout(TIMEOUT, n3.notified()).await.unwrap();
|
||||
sleep(TIMEOUT).await;
|
||||
|
||||
{
|
||||
let awareness = c2.awareness.read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
{
|
||||
let awareness = c3.awareness.read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
|
||||
// drop client, causing abrupt ending
|
||||
drop(c3);
|
||||
drop(n3);
|
||||
drop(sub3);
|
||||
// C2 notification subscription has been realized, we need to refresh it
|
||||
drop(n2);
|
||||
drop(sub2);
|
||||
|
||||
let (n2, _sub2) = {
|
||||
let a = c2.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
create_notifier(doc)
|
||||
};
|
||||
|
||||
{
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
text.push(&mut doc.transact_mut(), "def");
|
||||
}
|
||||
|
||||
timeout(TIMEOUT, n2.notified()).await.unwrap();
|
||||
|
||||
{
|
||||
let awareness = c2.awareness.read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abcdef".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
9
backend/yrs-warp/src/lib.rs
Normal file
9
backend/yrs-warp/src/lib.rs
Normal file
|
@ -0,0 +1,9 @@
|
|||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub mod broadcast;
|
||||
pub mod conn;
|
||||
pub mod signaling;
|
||||
pub mod ws;
|
||||
|
||||
pub type AwarenessRef = Arc<RwLock<yrs::sync::Awareness>>;
|
330
backend/yrs-warp/src/signaling.rs
Normal file
330
backend/yrs-warp/src/signaling.rs
Normal file
|
@ -0,0 +1,330 @@
|
|||
use futures_util::stream::SplitSink;
|
||||
use futures_util::{SinkExt, StreamExt};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::select;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tokio::time::interval;
|
||||
use warp::ws::{Message, WebSocket};
|
||||
use warp::Error;
|
||||
|
||||
const PING_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
|
||||
/// Signaling service is used by y-webrtc protocol in order to exchange WebRTC offerings between
|
||||
/// clients subscribing to particular rooms.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use warp::{Filter, Rejection, Reply};
|
||||
/// use warp::ws::{Ws, WebSocket};
|
||||
/// use yrs_warp::signaling::{SignalingService, signaling_conn};
|
||||
///
|
||||
/// fn main() {
|
||||
/// let signaling = SignalingService::new();
|
||||
/// let ws = warp::path("signaling")
|
||||
/// .and(warp::ws())
|
||||
/// .and(warp::any().map(move || signaling.clone()))
|
||||
/// .and_then(ws_handler);
|
||||
///
|
||||
/// //warp::serve(routes).run(([0, 0, 0, 0], 8000)).await;
|
||||
/// }
|
||||
///
|
||||
/// async fn ws_handler(ws: Ws, svc: SignalingService) -> Result<impl Reply, Rejection> {
|
||||
/// Ok(ws.on_upgrade(move |socket| peer(socket, svc)))
|
||||
/// }
|
||||
///
|
||||
/// async fn peer(ws: WebSocket, svc: SignalingService) {
|
||||
/// match signaling_conn(ws, svc).await {
|
||||
/// Ok(_) => println!("signaling connection stopped"),
|
||||
/// Err(e) => eprintln!("signaling connection failed: {}", e),
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SignalingService(Topics);
|
||||
|
||||
impl SignalingService {
|
||||
pub fn new() -> Self {
|
||||
SignalingService(Arc::new(RwLock::new(Default::default())))
|
||||
}
|
||||
|
||||
pub async fn publish(&self, topic: &str, msg: Message) -> Result<(), Error> {
|
||||
let mut failed = Vec::new();
|
||||
{
|
||||
let topics = self.0.read().await;
|
||||
if let Some(subs) = topics.get(topic) {
|
||||
let client_count = subs.len();
|
||||
tracing::info!("publishing message to {client_count} clients: {msg:?}");
|
||||
for sub in subs {
|
||||
if let Err(e) = sub.try_send(msg.clone()).await {
|
||||
tracing::info!("failed to send {msg:?}: {e}");
|
||||
failed.push(sub.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !failed.is_empty() {
|
||||
let mut topics = self.0.write().await;
|
||||
if let Some(subs) = topics.get_mut(topic) {
|
||||
for f in failed {
|
||||
subs.remove(&f);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close_topic(&self, topic: &str) -> Result<(), Error> {
|
||||
let mut topics = self.0.write().await;
|
||||
if let Some(subs) = topics.remove(topic) {
|
||||
for sub in subs {
|
||||
if let Err(e) = sub.close().await {
|
||||
tracing::warn!("failed to close connection on topic '{topic}': {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn close(self) -> Result<(), Error> {
|
||||
let mut topics = self.0.write_owned().await;
|
||||
let mut all_conns = HashSet::new();
|
||||
for (_, subs) in topics.drain() {
|
||||
for sub in subs {
|
||||
all_conns.insert(sub);
|
||||
}
|
||||
}
|
||||
|
||||
for conn in all_conns {
|
||||
if let Err(e) = conn.close().await {
|
||||
tracing::warn!("failed to close connection: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SignalingService {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
type Topics = Arc<RwLock<HashMap<Arc<str>, HashSet<WsSink>>>>;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct WsSink(Arc<Mutex<SplitSink<WebSocket, Message>>>);
|
||||
|
||||
impl WsSink {
|
||||
fn new(sink: SplitSink<WebSocket, Message>) -> Self {
|
||||
WsSink(Arc::new(Mutex::new(sink)))
|
||||
}
|
||||
|
||||
async fn try_send(&self, msg: Message) -> Result<(), Error> {
|
||||
let mut sink = self.0.lock().await;
|
||||
if let Err(e) = sink.send(msg).await {
|
||||
sink.close().await?;
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn close(&self) -> Result<(), Error> {
|
||||
let mut sink = self.0.lock().await;
|
||||
sink.close().await
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for WsSink {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
let ptr = Arc::as_ptr(&self.0) as usize;
|
||||
ptr.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Self> for WsSink {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
Arc::ptr_eq(&self.0, &other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for WsSink {}
|
||||
|
||||
/// Handle incoming signaling connection - it's a websocket connection used by y-webrtc protocol
|
||||
/// to exchange offering metadata between y-webrtc peers. It also manages topic/room access.
|
||||
pub async fn signaling_conn(ws: WebSocket, service: SignalingService) -> Result<(), Error> {
|
||||
let mut topics: Topics = service.0;
|
||||
let (sink, mut stream) = ws.split();
|
||||
let ws = WsSink::new(sink);
|
||||
let mut ping_interval = interval(PING_TIMEOUT);
|
||||
let mut state = ConnState::default();
|
||||
loop {
|
||||
select! {
|
||||
_ = ping_interval.tick() => {
|
||||
if !state.pong_received {
|
||||
ws.close().await?;
|
||||
drop(ping_interval);
|
||||
return Ok(());
|
||||
} else {
|
||||
state.pong_received = false;
|
||||
if let Err(e) = ws.try_send(Message::ping(Vec::default())).await {
|
||||
ws.close().await?;
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
},
|
||||
res = stream.next() => {
|
||||
match res {
|
||||
None => {
|
||||
ws.close().await?;
|
||||
return Ok(());
|
||||
},
|
||||
Some(Err(e)) => {
|
||||
ws.close().await?;
|
||||
return Err(e);
|
||||
},
|
||||
Some(Ok(msg)) => {
|
||||
process_msg(msg, &ws, &mut state, &mut topics).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const PING_MSG: &'static str = r#"{"type":"ping"}"#;
|
||||
const PONG_MSG: &'static str = r#"{"type":"pong"}"#;
|
||||
|
||||
async fn process_msg(
|
||||
msg: Message,
|
||||
ws: &WsSink,
|
||||
state: &mut ConnState,
|
||||
topics: &mut Topics,
|
||||
) -> Result<(), Error> {
|
||||
if msg.is_text() {
|
||||
let json = msg.to_str().unwrap();
|
||||
let msg = serde_json::from_str(json).unwrap();
|
||||
match msg {
|
||||
Signal::Subscribe {
|
||||
topics: topic_names,
|
||||
} => {
|
||||
if !topic_names.is_empty() {
|
||||
let mut topics = topics.write().await;
|
||||
for topic in topic_names {
|
||||
tracing::trace!("subscribing new client to '{topic}'");
|
||||
if let Some((key, _)) = topics.get_key_value(topic) {
|
||||
state.subscribed_topics.insert(key.clone());
|
||||
let subs = topics.get_mut(topic).unwrap();
|
||||
subs.insert(ws.clone());
|
||||
} else {
|
||||
let topic: Arc<str> = topic.into();
|
||||
state.subscribed_topics.insert(topic.clone());
|
||||
let mut subs = HashSet::new();
|
||||
subs.insert(ws.clone());
|
||||
topics.insert(topic, subs);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
Signal::Unsubscribe {
|
||||
topics: topic_names,
|
||||
} => {
|
||||
if !topic_names.is_empty() {
|
||||
let mut topics = topics.write().await;
|
||||
for topic in topic_names {
|
||||
if let Some(subs) = topics.get_mut(topic) {
|
||||
tracing::trace!("unsubscribing client from '{topic}'");
|
||||
subs.remove(ws);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Signal::Publish { topic } => {
|
||||
let mut failed = Vec::new();
|
||||
{
|
||||
let topics = topics.read().await;
|
||||
if let Some(receivers) = topics.get(topic) {
|
||||
let client_count = receivers.len();
|
||||
tracing::trace!(
|
||||
"publishing on {client_count} clients at '{topic}': {json}"
|
||||
);
|
||||
for receiver in receivers.iter() {
|
||||
if let Err(e) = receiver.try_send(Message::text(json)).await {
|
||||
tracing::info!(
|
||||
"failed to publish message {json} on '{topic}': {e}"
|
||||
);
|
||||
failed.push(receiver.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !failed.is_empty() {
|
||||
let mut topics = topics.write().await;
|
||||
if let Some(receivers) = topics.get_mut(topic) {
|
||||
for f in failed {
|
||||
receivers.remove(&f);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Signal::Ping => {
|
||||
ws.try_send(Message::text(PONG_MSG)).await?;
|
||||
}
|
||||
Signal::Pong => {
|
||||
ws.try_send(Message::text(PING_MSG)).await?;
|
||||
}
|
||||
}
|
||||
} else if msg.is_close() {
|
||||
let mut topics = topics.write().await;
|
||||
for topic in state.subscribed_topics.drain() {
|
||||
if let Some(subs) = topics.get_mut(&topic) {
|
||||
subs.remove(ws);
|
||||
if subs.is_empty() {
|
||||
topics.remove(&topic);
|
||||
}
|
||||
}
|
||||
}
|
||||
state.closed = true;
|
||||
} else if msg.is_ping() {
|
||||
ws.try_send(Message::ping(Vec::default())).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ConnState {
|
||||
closed: bool,
|
||||
pong_received: bool,
|
||||
subscribed_topics: HashSet<Arc<str>>,
|
||||
}
|
||||
|
||||
impl Default for ConnState {
|
||||
fn default() -> Self {
|
||||
ConnState {
|
||||
closed: false,
|
||||
pong_received: true,
|
||||
subscribed_topics: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub(crate) enum Signal<'a> {
|
||||
#[serde(rename = "publish")]
|
||||
Publish { topic: &'a str },
|
||||
#[serde(rename = "subscribe")]
|
||||
Subscribe { topics: Vec<&'a str> },
|
||||
#[serde(rename = "unsubscribe")]
|
||||
Unsubscribe { topics: Vec<&'a str> },
|
||||
#[serde(rename = "ping")]
|
||||
Ping,
|
||||
#[serde(rename = "pong")]
|
||||
Pong,
|
||||
}
|
572
backend/yrs-warp/src/ws.rs
Normal file
572
backend/yrs-warp/src/ws.rs
Normal file
|
@ -0,0 +1,572 @@
|
|||
use crate::conn::Connection;
|
||||
use crate::AwarenessRef;
|
||||
use futures_util::stream::{SplitSink, SplitStream};
|
||||
use futures_util::{Stream, StreamExt};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use warp::ws::{Message, WebSocket};
|
||||
use yrs::sync::Error;
|
||||
|
||||
/// Connection Wrapper over a [WebSocket], which implements a Yjs/Yrs awareness and update exchange
|
||||
/// protocol.
|
||||
///
|
||||
/// This connection implements Future pattern and can be awaited upon in order for a caller to
|
||||
/// recognize whether underlying websocket connection has been finished gracefully or abruptly.
|
||||
#[repr(transparent)]
|
||||
#[derive(Debug)]
|
||||
pub struct WarpConn(Connection<WarpSink, WarpStream>);
|
||||
|
||||
impl WarpConn {
|
||||
pub fn new(awareness: AwarenessRef, socket: WebSocket) -> Self {
|
||||
let (sink, stream) = socket.split();
|
||||
let conn = Connection::new(awareness, WarpSink(sink), WarpStream(stream));
|
||||
WarpConn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
impl core::future::Future for WarpConn {
|
||||
type Output = Result<(), Error>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match Pin::new(&mut self.0).poll(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => Poll::Ready(Err(Error::Other(e.into()))),
|
||||
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A warp websocket sink wrapper, that implements futures `Sink` in a way, that makes it compatible
|
||||
/// with y-sync protocol, so that it can be used by y-sync crate [BroadcastGroup].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::net::SocketAddr;
|
||||
/// use std::str::FromStr;
|
||||
/// use std::sync::Arc;
|
||||
/// use futures_util::StreamExt;
|
||||
/// use tokio::sync::Mutex;
|
||||
/// use tokio::task::JoinHandle;
|
||||
/// use warp::{Filter, Rejection, Reply};
|
||||
/// use warp::ws::{WebSocket, Ws};
|
||||
/// use yrs_warp::broadcast::BroadcastGroup;
|
||||
/// use yrs_warp::ws::{WarpSink, WarpStream};
|
||||
///
|
||||
/// async fn start_server(
|
||||
/// addr: &str,
|
||||
/// bcast: Arc<BroadcastGroup>,
|
||||
/// ) -> Result<JoinHandle<()>, Box<dyn std::error::Error>> {
|
||||
/// let addr = SocketAddr::from_str(addr)?;
|
||||
/// let ws = warp::path("my-room")
|
||||
/// .and(warp::ws())
|
||||
/// .and(warp::any().map(move || bcast.clone()))
|
||||
/// .and_then(ws_handler);
|
||||
///
|
||||
/// Ok(tokio::spawn(async move {
|
||||
/// warp::serve(ws).run(addr).await;
|
||||
/// }))
|
||||
/// }
|
||||
///
|
||||
/// async fn ws_handler(ws: Ws, bcast: Arc<BroadcastGroup>) -> Result<impl Reply, Rejection> {
|
||||
/// Ok(ws.on_upgrade(move |socket| peer(socket, bcast)))
|
||||
/// }
|
||||
///
|
||||
/// async fn peer(ws: WebSocket, bcast: Arc<BroadcastGroup>) {
|
||||
/// let (sink, stream) = ws.split();
|
||||
/// // convert warp web socket into compatible sink/stream
|
||||
/// let sink = Arc::new(Mutex::new(WarpSink::from(sink)));
|
||||
/// let stream = WarpStream::from(stream);
|
||||
/// // subscribe to broadcast group
|
||||
/// let sub = bcast.subscribe(sink, stream);
|
||||
/// // wait for subscribed connection to close itself
|
||||
/// match sub.completed().await {
|
||||
/// Ok(_) => println!("broadcasting for channel finished successfully"),
|
||||
/// Err(e) => eprintln!("broadcasting for channel finished abruptly: {}", e),
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[repr(transparent)]
|
||||
#[derive(Debug)]
|
||||
pub struct WarpSink(SplitSink<WebSocket, Message>);
|
||||
|
||||
impl From<SplitSink<WebSocket, Message>> for WarpSink {
|
||||
fn from(sink: SplitSink<WebSocket, Message>) -> Self {
|
||||
WarpSink(sink)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SplitSink<WebSocket, Message>> for WarpSink {
|
||||
fn into(self) -> SplitSink<WebSocket, Message> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl futures_util::Sink<Vec<u8>> for WarpSink {
|
||||
type Error = Error;
|
||||
|
||||
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
match Pin::new(&mut self.0).poll_ready(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => Poll::Ready(Err(Error::Other(e.into()))),
|
||||
Poll::Ready(_) => Poll::Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
|
||||
fn start_send(mut self: Pin<&mut Self>, item: Vec<u8>) -> Result<(), Self::Error> {
|
||||
if let Err(e) = Pin::new(&mut self.0).start_send(Message::binary(item)) {
|
||||
Err(Error::Other(e.into()))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
match Pin::new(&mut self.0).poll_flush(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => Poll::Ready(Err(Error::Other(e.into()))),
|
||||
Poll::Ready(_) => Poll::Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
match Pin::new(&mut self.0).poll_close(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(Err(e)) => Poll::Ready(Err(Error::Other(e.into()))),
|
||||
Poll::Ready(_) => Poll::Ready(Ok(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A warp websocket stream wrapper, that implements futures `Stream` in a way, that makes it compatible
|
||||
/// with y-sync protocol, so that it can be used by y-sync crate [BroadcastGroup].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use std::net::SocketAddr;
|
||||
/// use std::str::FromStr;
|
||||
/// use std::sync::Arc;
|
||||
/// use futures_util::StreamExt;
|
||||
/// use tokio::sync::Mutex;
|
||||
/// use tokio::task::JoinHandle;
|
||||
/// use warp::{Filter, Rejection, Reply};
|
||||
/// use warp::ws::{WebSocket, Ws};
|
||||
/// use yrs_warp::broadcast::BroadcastGroup;
|
||||
/// use yrs_warp::ws::{WarpSink, WarpStream};
|
||||
///
|
||||
/// async fn start_server(
|
||||
/// addr: &str,
|
||||
/// bcast: Arc<BroadcastGroup>,
|
||||
/// ) -> Result<JoinHandle<()>, Box<dyn std::error::Error>> {
|
||||
/// let addr = SocketAddr::from_str(addr)?;
|
||||
/// let ws = warp::path("my-room")
|
||||
/// .and(warp::ws())
|
||||
/// .and(warp::any().map(move || bcast.clone()))
|
||||
/// .and_then(ws_handler);
|
||||
///
|
||||
/// Ok(tokio::spawn(async move {
|
||||
/// warp::serve(ws).run(addr).await;
|
||||
/// }))
|
||||
/// }
|
||||
///
|
||||
/// async fn ws_handler(ws: Ws, bcast: Arc<BroadcastGroup>) -> Result<impl Reply, Rejection> {
|
||||
/// Ok(ws.on_upgrade(move |socket| peer(socket, bcast)))
|
||||
/// }
|
||||
///
|
||||
/// async fn peer(ws: WebSocket, bcast: Arc<BroadcastGroup>) {
|
||||
/// let (sink, stream) = ws.split();
|
||||
/// // convert warp web socket into compatible sink/stream
|
||||
/// let sink = Arc::new(Mutex::new(WarpSink::from(sink)));
|
||||
/// let stream = WarpStream::from(stream);
|
||||
/// // subscribe to broadcast group
|
||||
/// let sub = bcast.subscribe(sink, stream);
|
||||
/// // wait for subscribed connection to close itself
|
||||
/// match sub.completed().await {
|
||||
/// Ok(_) => println!("broadcasting for channel finished successfully"),
|
||||
/// Err(e) => eprintln!("broadcasting for channel finished abruptly: {}", e),
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct WarpStream(SplitStream<WebSocket>);
|
||||
|
||||
impl From<SplitStream<WebSocket>> for WarpStream {
|
||||
fn from(stream: SplitStream<WebSocket>) -> Self {
|
||||
WarpStream(stream)
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<SplitStream<WebSocket>> for WarpStream {
|
||||
fn into(self) -> SplitStream<WebSocket> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for WarpStream {
|
||||
type Item = Result<Vec<u8>, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
match Pin::new(&mut self.0).poll_next(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(None) => Poll::Ready(None),
|
||||
Poll::Ready(Some(res)) => match res {
|
||||
Ok(item) => Poll::Ready(Some(Ok(item.into_bytes()))),
|
||||
Err(e) => Poll::Ready(Some(Err(Error::Other(e.into())))),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::broadcast::BroadcastGroup;
|
||||
use crate::conn::Connection;
|
||||
use crate::ws::{WarpSink, WarpStream};
|
||||
use futures_util::stream::{SplitSink, SplitStream};
|
||||
use futures_util::{ready, SinkExt, Stream, StreamExt};
|
||||
use std::net::SocketAddr;
|
||||
use std::pin::Pin;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::{Mutex, Notify, RwLock};
|
||||
use tokio::task;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{sleep, timeout};
|
||||
use tokio_tungstenite::tungstenite::Message;
|
||||
use tokio_tungstenite::{MaybeTlsStream, WebSocketStream};
|
||||
use warp::ws::{WebSocket, Ws};
|
||||
use warp::{Filter, Rejection, Reply, Sink};
|
||||
use yrs::sync::{Awareness, Error};
|
||||
use yrs::updates::encoder::Encode;
|
||||
use yrs::{Doc, GetString, Subscription, Text, Transact};
|
||||
|
||||
async fn start_server(
|
||||
addr: &str,
|
||||
bcast: Arc<BroadcastGroup>,
|
||||
) -> Result<JoinHandle<()>, Box<dyn std::error::Error>> {
|
||||
let addr = SocketAddr::from_str(addr)?;
|
||||
let ws = warp::path("my-room")
|
||||
.and(warp::ws())
|
||||
.and(warp::any().map(move || bcast.clone()))
|
||||
.and_then(ws_handler);
|
||||
|
||||
Ok(tokio::spawn(async move {
|
||||
warp::serve(ws).run(addr).await;
|
||||
}))
|
||||
}
|
||||
|
||||
async fn ws_handler(ws: Ws, bcast: Arc<BroadcastGroup>) -> Result<impl Reply, Rejection> {
|
||||
Ok(ws.on_upgrade(move |socket| peer(socket, bcast)))
|
||||
}
|
||||
|
||||
async fn peer(ws: WebSocket, bcast: Arc<BroadcastGroup>) {
|
||||
let (sink, stream) = ws.split();
|
||||
let sink = Arc::new(Mutex::new(WarpSink::from(sink)));
|
||||
let stream = WarpStream::from(stream);
|
||||
let sub = bcast.subscribe(sink, stream);
|
||||
match sub.completed().await {
|
||||
Ok(_) => println!("broadcasting for channel finished successfully"),
|
||||
Err(e) => eprintln!("broadcasting for channel finished abruptly: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
struct TungsteniteSink(SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>);
|
||||
|
||||
impl Sink<Vec<u8>> for TungsteniteSink {
|
||||
type Error = Error;
|
||||
|
||||
fn poll_ready(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), Self::Error>> {
|
||||
let sink = unsafe { Pin::new_unchecked(&mut self.0) };
|
||||
let result = ready!(sink.poll_ready(cx));
|
||||
match result {
|
||||
Ok(_) => Poll::Ready(Ok(())),
|
||||
Err(e) => Poll::Ready(Err(Error::Other(Box::new(e)))),
|
||||
}
|
||||
}
|
||||
|
||||
fn start_send(mut self: Pin<&mut Self>, item: Vec<u8>) -> Result<(), Self::Error> {
|
||||
let sink = unsafe { Pin::new_unchecked(&mut self.0) };
|
||||
let result = sink.start_send(Message::binary(item));
|
||||
match result {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(Error::Other(Box::new(e))),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_flush(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), Self::Error>> {
|
||||
let sink = unsafe { Pin::new_unchecked(&mut self.0) };
|
||||
let result = ready!(sink.poll_flush(cx));
|
||||
match result {
|
||||
Ok(_) => Poll::Ready(Ok(())),
|
||||
Err(e) => Poll::Ready(Err(Error::Other(Box::new(e)))),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_close(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Result<(), Self::Error>> {
|
||||
let sink = unsafe { Pin::new_unchecked(&mut self.0) };
|
||||
let result = ready!(sink.poll_close(cx));
|
||||
match result {
|
||||
Ok(_) => Poll::Ready(Ok(())),
|
||||
Err(e) => Poll::Ready(Err(Error::Other(Box::new(e)))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct TungsteniteStream(SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>);
|
||||
impl Stream for TungsteniteStream {
|
||||
type Item = Result<Vec<u8>, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
let stream = unsafe { Pin::new_unchecked(&mut self.0) };
|
||||
let result = ready!(stream.poll_next(cx));
|
||||
match result {
|
||||
None => Poll::Ready(None),
|
||||
Some(Ok(msg)) => Poll::Ready(Some(Ok(msg.into_data()))),
|
||||
Some(Err(e)) => Poll::Ready(Some(Err(Error::Other(Box::new(e))))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn client(
|
||||
addr: &str,
|
||||
doc: Doc,
|
||||
) -> Result<Connection<TungsteniteSink, TungsteniteStream>, Box<dyn std::error::Error>> {
|
||||
let (stream, _) = tokio_tungstenite::connect_async(addr).await?;
|
||||
let (sink, stream) = stream.split();
|
||||
let sink = TungsteniteSink(sink);
|
||||
let stream = TungsteniteStream(stream);
|
||||
Ok(Connection::new(
|
||||
Arc::new(RwLock::new(Awareness::new(doc))),
|
||||
sink,
|
||||
stream,
|
||||
))
|
||||
}
|
||||
|
||||
fn create_notifier(doc: &Doc) -> (Arc<Notify>, Subscription) {
|
||||
let n = Arc::new(Notify::new());
|
||||
let sub = {
|
||||
let n = n.clone();
|
||||
doc.observe_update_v1(move |_, _| n.notify_waiters())
|
||||
.unwrap()
|
||||
};
|
||||
(n, sub)
|
||||
}
|
||||
|
||||
const TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
#[tokio::test]
|
||||
async fn change_introduced_by_server_reaches_subscribed_clients() {
|
||||
let doc = Doc::with_client_id(1);
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server("0.0.0.0:6600", Arc::new(bcast)).await.unwrap();
|
||||
|
||||
let doc = Doc::new();
|
||||
let (n, _sub) = create_notifier(&doc);
|
||||
let c1 = client("ws://localhost:6600/my-room", doc).await.unwrap();
|
||||
|
||||
{
|
||||
let lock = awareness.write().await;
|
||||
text.push(&mut lock.doc().transact_mut(), "abc");
|
||||
}
|
||||
|
||||
timeout(TIMEOUT, n.notified()).await.unwrap();
|
||||
|
||||
{
|
||||
let awareness = c1.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn subscribed_client_fetches_initial_state() {
|
||||
let doc = Doc::with_client_id(1);
|
||||
let text = doc.get_or_insert_text("test");
|
||||
|
||||
text.push(&mut doc.transact_mut(), "abc");
|
||||
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server("0.0.0.0:6601", Arc::new(bcast)).await.unwrap();
|
||||
|
||||
let doc = Doc::new();
|
||||
let (n, _sub) = create_notifier(&doc);
|
||||
let c1 = client("ws://localhost:6601/my-room", doc).await.unwrap();
|
||||
|
||||
timeout(TIMEOUT, n.notified()).await.unwrap();
|
||||
|
||||
{
|
||||
let awareness = c1.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn changes_from_one_client_reach_others() {
|
||||
let doc = Doc::with_client_id(1);
|
||||
let _ = doc.get_or_insert_text("test");
|
||||
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server("0.0.0.0:6602", Arc::new(bcast)).await.unwrap();
|
||||
|
||||
let d1 = Doc::with_client_id(2);
|
||||
let c1 = client("ws://localhost:6602/my-room", d1).await.unwrap();
|
||||
// by default changes made by document on the client side are not propagated automatically
|
||||
let _sub11 = {
|
||||
let sink = c1.sink();
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
doc.observe_update_v1(move |_, e| {
|
||||
let update = e.update.to_owned();
|
||||
if let Some(sink) = sink.upgrade() {
|
||||
task::spawn(async move {
|
||||
let msg = yrs::sync::Message::Sync(yrs::sync::SyncMessage::Update(update))
|
||||
.encode_v1();
|
||||
let mut sink = sink.lock().await;
|
||||
sink.send(msg).await.unwrap();
|
||||
});
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let d2 = Doc::with_client_id(3);
|
||||
let (n2, _sub2) = create_notifier(&d2);
|
||||
let c2 = client("ws://localhost:6602/my-room", d2).await.unwrap();
|
||||
|
||||
{
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
text.push(&mut doc.transact_mut(), "def");
|
||||
}
|
||||
|
||||
timeout(TIMEOUT, n2.notified()).await.unwrap();
|
||||
|
||||
{
|
||||
let awareness = c2.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "def".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_failure_doesnt_affect_others() {
|
||||
let doc = Doc::with_client_id(1);
|
||||
let _text = doc.get_or_insert_text("test");
|
||||
|
||||
let awareness = Arc::new(RwLock::new(Awareness::new(doc)));
|
||||
let bcast = BroadcastGroup::new(awareness.clone(), 10).await;
|
||||
let _server = start_server("0.0.0.0:6603", Arc::new(bcast)).await.unwrap();
|
||||
|
||||
let d1 = Doc::with_client_id(2);
|
||||
let c1 = client("ws://localhost:6603/my-room", d1).await.unwrap();
|
||||
// by default changes made by document on the client side are not propagated automatically
|
||||
let _sub11 = {
|
||||
let sink = c1.sink();
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
doc.observe_update_v1(move |_, e| {
|
||||
let update = e.update.to_owned();
|
||||
if let Some(sink) = sink.upgrade() {
|
||||
task::spawn(async move {
|
||||
let msg = yrs::sync::Message::Sync(yrs::sync::SyncMessage::Update(update))
|
||||
.encode_v1();
|
||||
let mut sink = sink.lock().await;
|
||||
sink.send(msg).await.unwrap();
|
||||
});
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let d2 = Doc::with_client_id(3);
|
||||
let (n2, sub2) = create_notifier(&d2);
|
||||
let c2 = client("ws://localhost:6603/my-room", d2).await.unwrap();
|
||||
|
||||
let d3 = Doc::with_client_id(4);
|
||||
let (n3, sub3) = create_notifier(&d3);
|
||||
let c3 = client("ws://localhost:6603/my-room", d3).await.unwrap();
|
||||
|
||||
{
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
text.push(&mut doc.transact_mut(), "abc");
|
||||
}
|
||||
|
||||
// on the first try both C2 and C3 should receive the update
|
||||
//timeout(TIMEOUT, n2.notified()).await.unwrap();
|
||||
//timeout(TIMEOUT, n3.notified()).await.unwrap();
|
||||
sleep(TIMEOUT).await;
|
||||
|
||||
{
|
||||
let awareness = c2.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
{
|
||||
let awareness = c3.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abc".to_string());
|
||||
}
|
||||
|
||||
// drop client, causing abrupt ending
|
||||
drop(c3);
|
||||
drop(n3);
|
||||
drop(sub3);
|
||||
// C2 notification subscription has been realized, we need to refresh it
|
||||
drop(n2);
|
||||
drop(sub2);
|
||||
|
||||
let (n2, _sub2) = {
|
||||
let a = c2.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
create_notifier(doc)
|
||||
};
|
||||
|
||||
{
|
||||
let a = c1.awareness().write().await;
|
||||
let doc = a.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
text.push(&mut doc.transact_mut(), "def");
|
||||
}
|
||||
|
||||
timeout(TIMEOUT, n2.notified()).await.unwrap();
|
||||
|
||||
{
|
||||
let awareness = c2.awareness().read().await;
|
||||
let doc = awareness.doc();
|
||||
let text = doc.get_or_insert_text("test");
|
||||
let str = text.get_string(&doc.transact());
|
||||
assert_eq!(str, "abcdef".to_string());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -44,6 +44,7 @@
|
|||
|
||||
packages = with pkgs; [
|
||||
nodejs
|
||||
rustPlatform.bindgenHook
|
||||
|
||||
nodePackages.pnpm
|
||||
|
||||
|
|
8
frontend/.gitignore
vendored
Normal file
8
frontend/.gitignore
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
node_modules
|
||||
|
||||
dist
|
||||
dist-ssr
|
||||
|
||||
# Serwist
|
||||
public/sw*
|
||||
public/swe-worker*
|
|
@ -5,7 +5,7 @@
|
|||
"tsx": true,
|
||||
"tailwind": {
|
||||
"config": "",
|
||||
"css": "src/styles.css",
|
||||
"css": "src/styles/global.css",
|
||||
"baseColor": "neutral",
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
|
@ -18,4 +18,4 @@
|
|||
"hooks": "~/hooks"
|
||||
},
|
||||
"iconLibrary": "lucide"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,9 +10,9 @@
|
|||
},
|
||||
"dependencies": {
|
||||
"@excalidraw/excalidraw": "^0.18.0",
|
||||
"@lexical/react": "^0.28.0",
|
||||
"@lexical/utils": "^0.28.0",
|
||||
"@lexical/yjs": "^0.28.0",
|
||||
"@lexical/react": "^0.25.0",
|
||||
"@lexical/utils": "^0.25.0",
|
||||
"@lexical/yjs": "^0.25.0",
|
||||
"@mdi/js": "^7.4.47",
|
||||
"@mdi/react": "^1.6.1",
|
||||
"@radix-ui/react-alert-dialog": "^1.1.6",
|
||||
|
@ -33,7 +33,7 @@
|
|||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "1.0.0",
|
||||
"lexical": "^0.28.0",
|
||||
"lexical": "^0.25.0",
|
||||
"lucide-react": "^0.483.0",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0",
|
||||
|
|
296
frontend/pnpm-lock.yaml
generated
296
frontend/pnpm-lock.yaml
generated
|
@ -12,14 +12,14 @@ importers:
|
|||
specifier: ^0.18.0
|
||||
version: 0.18.0(@types/react-dom@19.0.4(@types/react@19.0.11))(@types/react@19.0.11)(react-dom@19.0.0(react@19.0.0))(react@19.0.0)
|
||||
'@lexical/react':
|
||||
specifier: ^0.28.0
|
||||
version: 0.28.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)(yjs@13.6.24)
|
||||
specifier: ^0.25.0
|
||||
version: 0.25.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)(yjs@13.6.24)
|
||||
'@lexical/utils':
|
||||
specifier: ^0.28.0
|
||||
version: 0.28.0
|
||||
specifier: ^0.25.0
|
||||
version: 0.25.0
|
||||
'@lexical/yjs':
|
||||
specifier: ^0.28.0
|
||||
version: 0.28.0(yjs@13.6.24)
|
||||
specifier: ^0.25.0
|
||||
version: 0.25.0(yjs@13.6.24)
|
||||
'@mdi/js':
|
||||
specifier: ^7.4.47
|
||||
version: 7.4.47
|
||||
|
@ -81,8 +81,8 @@ importers:
|
|||
specifier: 1.0.0
|
||||
version: 1.0.0(@types/react-dom@19.0.4(@types/react@19.0.11))(@types/react@19.0.11)(react-dom@19.0.0(react@19.0.0))(react@19.0.0)
|
||||
lexical:
|
||||
specifier: ^0.28.0
|
||||
version: 0.28.0
|
||||
specifier: ^0.25.0
|
||||
version: 0.25.0
|
||||
lucide-react:
|
||||
specifier: ^0.483.0
|
||||
version: 0.483.0(react@19.0.0)
|
||||
|
@ -501,74 +501,74 @@ packages:
|
|||
'@jridgewell/trace-mapping@0.3.25':
|
||||
resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==}
|
||||
|
||||
'@lexical/clipboard@0.28.0':
|
||||
resolution: {integrity: sha512-LYqion+kAwFQJStA37JAEMxTL/m1WlZbotDfM/2WuONmlO0yWxiyRDI18oeCwhBD6LQQd9c3Ccxp9HFwUG1AVw==}
|
||||
'@lexical/clipboard@0.25.0':
|
||||
resolution: {integrity: sha512-z0Dx6fDSLfbfK/LsfjkFbOTO87kHXQ6FNi8eCvVHtMLnME91kHTaotIalEdlS69Hw6032AThnEqeN5rAB24BlQ==}
|
||||
|
||||
'@lexical/code@0.28.0':
|
||||
resolution: {integrity: sha512-9LOKSWdRhxqAKRq5yveNC21XKtW4h2rmFNTucwMWZ9vLu9xteOHEwZdO1Qv82PFUmgCpAhg6EntmnZu9xD3K7Q==}
|
||||
'@lexical/code@0.25.0':
|
||||
resolution: {integrity: sha512-jTUFIYbbsud4qdLV+zAYj1UhzC2LiKSly5uprsJgTvBwdZppexcNOiHFvH1kEODVXVSLgU6WCg0XROPB+fXh9A==}
|
||||
|
||||
'@lexical/devtools-core@0.28.0':
|
||||
resolution: {integrity: sha512-Fk4itAjZ+MqTYXN84aE5RDf+wQX67N5nyo3JVxQTFZGAghx7Ux1xLWHB25zzD0YfjMtJ0NQROAbE3xdecZzxcQ==}
|
||||
'@lexical/devtools-core@0.25.0':
|
||||
resolution: {integrity: sha512-DJl5reXPdohZulAfemZCzhkglJfSJnQbu7bAtBMhUx0U0t51YOycCqXcKeXwNge74h3T3aG4/N4fPV50Cq1izA==}
|
||||
peerDependencies:
|
||||
react: '>=17.x'
|
||||
react-dom: '>=17.x'
|
||||
|
||||
'@lexical/dragon@0.28.0':
|
||||
resolution: {integrity: sha512-T6T8YaHnhU863ruuqmRHTLUYa8sfg/ArYcrnNGZGfpvvFTfFjpWb/ELOvOWo8N6Y/4fnSLjQ20aXexVW1KcTBQ==}
|
||||
'@lexical/dragon@0.25.0':
|
||||
resolution: {integrity: sha512-mMMX2clk8nQHZEgPuVlTOPYMCH0Kg4EevJ4dg6q5sGJzkHjXerooGm+PYfLEcOk7SQC0YOhvW7gkM/UQYYHHWA==}
|
||||
|
||||
'@lexical/hashtag@0.28.0':
|
||||
resolution: {integrity: sha512-zcqX9Qna4lj96bAUfwSQSVEhYQ0O5erSjrIhOVqEgeQ5ubz0EvqnnMbbwNHIb2n6jzSwAvpD/3UZJZtolh+zVg==}
|
||||
'@lexical/hashtag@0.25.0':
|
||||
resolution: {integrity: sha512-di9pcBeCS/hx5z4EudOp7J6m4bNVqxVzm4J/15D4WDz6Jen3dh2Jha0N2pmFuHbOiuLyPAhYAV4PacFi5+ne+w==}
|
||||
|
||||
'@lexical/history@0.28.0':
|
||||
resolution: {integrity: sha512-CHzDxaGDn6qCFFhU0YKP1B8sgEb++0Ksqsj6BfDL/6TMxoLNQwRQhP3BUNNXl1kvUhxTQZgk3b9MjJZRaFKG9Q==}
|
||||
'@lexical/history@0.25.0':
|
||||
resolution: {integrity: sha512-mUkXhVZDbgUYkpjFLcMDSOorOVeX/l2eV2wxnju9hjodGenEfVK1yWv1A2siCiG0K9y8o1Eksa4F7zf3ZlXRuA==}
|
||||
|
||||
'@lexical/html@0.28.0':
|
||||
resolution: {integrity: sha512-ayb0FPxr55Ko99/d9ewbfrApul4L0z+KpU2ZG03im7EvUPVLyIGLx4S0QguMDvQh0Vu+eJ7/EESuonDs5BCe3A==}
|
||||
'@lexical/html@0.25.0':
|
||||
resolution: {integrity: sha512-bjO5RbMpyFw4H6kCgYcdAlxzhoF1tjtRZ1w0MxSHReLg9wqNaeg4uNwdW+uVMbxRv+cYqaO0To66qMAcND6tiQ==}
|
||||
|
||||
'@lexical/link@0.28.0':
|
||||
resolution: {integrity: sha512-T5VKxpOnML5DcXv2lW3Le0vjNlcbdohZjS9f6PAvm6eX8EzBKDpLQCopr1/0KGdlLd1QrzQsykQrdU7ieC4LRg==}
|
||||
'@lexical/link@0.25.0':
|
||||
resolution: {integrity: sha512-epFwgLWfmYgtXgI1n7oJJ3ana48qTm+WHEAYnFK7mxXEV6V93j5hm3MJgAJL4Lut/iHkrBXhT9PVMwkg75ImRQ==}
|
||||
|
||||
'@lexical/list@0.28.0':
|
||||
resolution: {integrity: sha512-3a8QcZ75n2TLxP+xkSPJ2V15jsysMLMe0YoObG+ew/sioVelIU8GciYsWBo5GgQmwSzJNQJeK5cJ9p1b71z2cg==}
|
||||
'@lexical/list@0.25.0':
|
||||
resolution: {integrity: sha512-hAEcNuE9WxDHJGUcwkTN+4sGn1/j5/wBqkuaYrBrhf7vCHeKUAu7er7T7Gdfj1HPfjqZm6YxdqFF4LYqejwuRg==}
|
||||
|
||||
'@lexical/mark@0.28.0':
|
||||
resolution: {integrity: sha512-v5PzmTACsJrw3GvNZy2rgPxrNn9InLvLFoKqrSlNhhyvYNIAcuC4KVy00LKLja43Gw/fuB3QwKohYfAtM3yR3g==}
|
||||
'@lexical/mark@0.25.0':
|
||||
resolution: {integrity: sha512-J3T35abTjvri5rq+3P/exWdB6uHTQDmAi3gy1PkHSGLODhfKypo1gZdFY80QZmxcR9yewhMr9d9o5YwRsfZqSw==}
|
||||
|
||||
'@lexical/markdown@0.28.0':
|
||||
resolution: {integrity: sha512-F3JXClqN4cjmXYLDK0IztxkbZuqkqS/AVbxnhGvnDYHQ9Gp8l7BonczhOiPwmJCDubJrAACP0L9LCqyt0jDRFw==}
|
||||
'@lexical/markdown@0.25.0':
|
||||
resolution: {integrity: sha512-KHj/AdjJIzCYtWRPyb8LjPo19gqGuLHu5Q3S2+n1OgCEwhpemPFHQPGk8AGd21jyF4h2psAe7d7ikMQj81wOrw==}
|
||||
|
||||
'@lexical/offset@0.28.0':
|
||||
resolution: {integrity: sha512-/SMDQgBPeWM936t04mtH6UAn3xAjP/meu9q136bcT3S7p7V8ew9JfNp9aznTPTx+2W3brJORAvUow7Xn1fSHmw==}
|
||||
'@lexical/offset@0.25.0':
|
||||
resolution: {integrity: sha512-3qcqMb07OyA174nB/ghWCj6fgGSPlqjYNzJ75xuvg1n8S3RpobDarin2iEEUb/n74wA4w5fwufJPe/Nu0NRJ/g==}
|
||||
|
||||
'@lexical/overflow@0.28.0':
|
||||
resolution: {integrity: sha512-ppmhHXEZVicBm05w9EVflzwFavTVNAe4q0bkabWUeW0IoCT3Vg2A3JT7PC9ypmp+mboUD195foFEr1BBSv1Y8Q==}
|
||||
'@lexical/overflow@0.25.0':
|
||||
resolution: {integrity: sha512-TEgn3HMx1pNI3VwlPcmFo29H+yLKvuX+zNlvdGwQh6tHHvw7DEyJ7iAuK74V4UZZT2bWcOr0ZArBYSDV/9bZtQ==}
|
||||
|
||||
'@lexical/plain-text@0.28.0':
|
||||
resolution: {integrity: sha512-Jj2dCMDEfRuVetfDKcUes8J5jvAfZrLnILFlHxnu7y+lC+7R/NR403DYb3NJ8H7+lNiH1K15+U2K7ewbjxS6KQ==}
|
||||
'@lexical/plain-text@0.25.0':
|
||||
resolution: {integrity: sha512-k0SG0mmsF/pKsvMMSnilysfu22NqapaNtJHtF3QPvOv8ryDuILm9HgwD8svALVvR9BI8PeCSspnoVKEELwVzCw==}
|
||||
|
||||
'@lexical/react@0.28.0':
|
||||
resolution: {integrity: sha512-dWPnxrKrbQFjNqExqnaAsV0UEUgw/5M1ZYRWd5FGBGjHqVTCaX2jNHlKLMA68Od0VPIoOX2Zy1TYZ8ZKtsj5Dg==}
|
||||
'@lexical/react@0.25.0':
|
||||
resolution: {integrity: sha512-cdrwfNyknAwbFyO+3vUDjRADJ/3yckVeUKrZSQN32/71kdIas4OXYDYbPNdsUVEiIvicTkQJof8naG+cj9LtTQ==}
|
||||
peerDependencies:
|
||||
react: '>=17.x'
|
||||
react-dom: '>=17.x'
|
||||
|
||||
'@lexical/rich-text@0.28.0':
|
||||
resolution: {integrity: sha512-y+vUWI+9uFupIb9UvssKU/DKcT9dFUZuQBu7utFkLadxCNyXQHeRjxzjzmvFiM3DBV0guPUDGu5VS5TPnIA+OA==}
|
||||
'@lexical/rich-text@0.25.0':
|
||||
resolution: {integrity: sha512-cNQeqKtJGnAp1ndRlohGoWcEE59WS17bfjMmPbbHbWUSlvFMIsa/7GopauW4KyuEjFkNNhDVEB21r1zDX97unA==}
|
||||
|
||||
'@lexical/selection@0.28.0':
|
||||
resolution: {integrity: sha512-AJDi67Nsexyejzp4dEQSVoPov4P+FJ0t1v6DxUU+YmcvV56QyJQi6ue0i/xd8unr75ZufzLsAC0cDJJCEI7QDA==}
|
||||
'@lexical/selection@0.25.0':
|
||||
resolution: {integrity: sha512-zbUflzM5bAER5JyDXNbaw4s9w5kp8Y2oYqpplkZhMXj8Ri3CUE5bQgt4vH6XbyKVl4hsJlVcZ3qmw3HJAcnWNQ==}
|
||||
|
||||
'@lexical/table@0.28.0':
|
||||
resolution: {integrity: sha512-HMPCwXdj0sRWdlDzsHcNWRgbeKbEhn3L8LPhFnTq7q61gZ4YW2umdmuvQFKnIBcKq49drTH8cUwZoIwI8+AEEw==}
|
||||
'@lexical/table@0.25.0':
|
||||
resolution: {integrity: sha512-gWOvqyz1eGv504fxH7r0hZACYgxEldr5GlzS2GoII3S1OjGhEOjBkM5JNMzATFaxvfdUkL2ywbmBfhk5hI/spg==}
|
||||
|
||||
'@lexical/text@0.28.0':
|
||||
resolution: {integrity: sha512-PT/A2RZv+ktn7SG/tJkOpGlYE6zjOND59VtRHnV/xciZ+jEJVaqAHtWjhbWibAIZQAkv/O7UouuDqzDaNTSGAA==}
|
||||
'@lexical/text@0.25.0':
|
||||
resolution: {integrity: sha512-u5WtmkbYYMePRQGAMFKqtbq4JhEMtSFwPYvtbS/5WUoYBf55IE+0kQA3meMK9MqBsxWil6TTekWOnKYmoLXv4w==}
|
||||
|
||||
'@lexical/utils@0.28.0':
|
||||
resolution: {integrity: sha512-Qw00DjkS1nRK7DLSgqJpJ77Ti2AuiOQ6m5eM38YojoWXkVmoxqKAUMaIbVNVKqjFgrQvKFF46sXxIJPbUQkB0w==}
|
||||
'@lexical/utils@0.25.0':
|
||||
resolution: {integrity: sha512-LAB/5OniVCDFMWh8zuKejqU4blAP5CEIpPXBtevbHROtbz3fqo/xqcHK3bGGP9w1UKYH0S6n+MA8Uv9hdVzf7w==}
|
||||
|
||||
'@lexical/yjs@0.28.0':
|
||||
resolution: {integrity: sha512-rKHpUEd3nrvMY7ghmOC0AeGSYT7YIviba+JViaOzrCX4/Wtv5C/3Sl7Io12Z9k+s1BKmy7C28bOdQHvRWaD7vQ==}
|
||||
'@lexical/yjs@0.25.0':
|
||||
resolution: {integrity: sha512-fRXLfwt7Qp897GqRnzQ5Ew+kWaFmdixRtD3BhGoMh8/KfvccbFeL7M4d29KbQe+jOWljlpO57X1sKBZ/OWNjJw==}
|
||||
peerDependencies:
|
||||
yjs: '>=13.5.22'
|
||||
|
||||
|
@ -2368,8 +2368,8 @@ packages:
|
|||
engines: {node: '>=6'}
|
||||
deprecated: Superseded by abstract-level (https://github.com/Level/community#faq)
|
||||
|
||||
lexical@0.28.0:
|
||||
resolution: {integrity: sha512-dLE3O1PZg0TlZxRQo9YDpjCjDUj8zluGyBO9MHdjo21qZmMUNrxQPeCRt8fn2s5l4HKYFQ1YNgl7k1pOJB/vZQ==}
|
||||
lexical@0.25.0:
|
||||
resolution: {integrity: sha512-yZNfC+3Df/qGAbGD1Lsh+dkG0COJKGuDlT1e/t0qS9F1jaKvoz7Fm0tACQ3eRTbOYncJiaJveLiT9GADac2hrA==}
|
||||
|
||||
lib0@0.2.99:
|
||||
resolution: {integrity: sha512-vwztYuUf1uf/1zQxfzRfO5yzfNKhTtgOByCruuiQQxWQXnPb8Itaube5ylofcV0oM0aKal9Mv+S1s1Ky0UYP1w==}
|
||||
|
@ -3601,149 +3601,151 @@ snapshots:
|
|||
'@jridgewell/resolve-uri': 3.1.2
|
||||
'@jridgewell/sourcemap-codec': 1.5.0
|
||||
|
||||
'@lexical/clipboard@0.28.0':
|
||||
'@lexical/clipboard@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/html': 0.28.0
|
||||
'@lexical/list': 0.28.0
|
||||
'@lexical/selection': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/html': 0.25.0
|
||||
'@lexical/list': 0.25.0
|
||||
'@lexical/selection': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/code@0.28.0':
|
||||
'@lexical/code@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
prismjs: 1.30.0
|
||||
|
||||
'@lexical/devtools-core@0.28.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)':
|
||||
'@lexical/devtools-core@0.25.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)':
|
||||
dependencies:
|
||||
'@lexical/html': 0.28.0
|
||||
'@lexical/link': 0.28.0
|
||||
'@lexical/mark': 0.28.0
|
||||
'@lexical/table': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/html': 0.25.0
|
||||
'@lexical/link': 0.25.0
|
||||
'@lexical/mark': 0.25.0
|
||||
'@lexical/table': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
react: 19.0.0
|
||||
react-dom: 19.0.0(react@19.0.0)
|
||||
|
||||
'@lexical/dragon@0.28.0':
|
||||
'@lexical/dragon@0.25.0':
|
||||
dependencies:
|
||||
lexical: 0.28.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/hashtag@0.28.0':
|
||||
'@lexical/hashtag@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/history@0.28.0':
|
||||
'@lexical/history@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/html@0.28.0':
|
||||
'@lexical/html@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/selection': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/selection': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/link@0.28.0':
|
||||
'@lexical/link@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/list@0.28.0':
|
||||
'@lexical/list@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/selection': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/mark@0.28.0':
|
||||
'@lexical/mark@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/markdown@0.28.0':
|
||||
'@lexical/markdown@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/code': 0.28.0
|
||||
'@lexical/link': 0.28.0
|
||||
'@lexical/list': 0.28.0
|
||||
'@lexical/rich-text': 0.28.0
|
||||
'@lexical/text': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/code': 0.25.0
|
||||
'@lexical/link': 0.25.0
|
||||
'@lexical/list': 0.25.0
|
||||
'@lexical/rich-text': 0.25.0
|
||||
'@lexical/text': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/offset@0.28.0':
|
||||
'@lexical/offset@0.25.0':
|
||||
dependencies:
|
||||
lexical: 0.28.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/overflow@0.28.0':
|
||||
'@lexical/overflow@0.25.0':
|
||||
dependencies:
|
||||
lexical: 0.28.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/plain-text@0.28.0':
|
||||
'@lexical/plain-text@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/clipboard': 0.28.0
|
||||
'@lexical/selection': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/clipboard': 0.25.0
|
||||
'@lexical/selection': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/react@0.28.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)(yjs@13.6.24)':
|
||||
'@lexical/react@0.25.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)(yjs@13.6.24)':
|
||||
dependencies:
|
||||
'@lexical/devtools-core': 0.28.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)
|
||||
'@lexical/dragon': 0.28.0
|
||||
'@lexical/hashtag': 0.28.0
|
||||
'@lexical/history': 0.28.0
|
||||
'@lexical/link': 0.28.0
|
||||
'@lexical/list': 0.28.0
|
||||
'@lexical/mark': 0.28.0
|
||||
'@lexical/markdown': 0.28.0
|
||||
'@lexical/overflow': 0.28.0
|
||||
'@lexical/plain-text': 0.28.0
|
||||
'@lexical/rich-text': 0.28.0
|
||||
'@lexical/table': 0.28.0
|
||||
'@lexical/text': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
'@lexical/yjs': 0.28.0(yjs@13.6.24)
|
||||
lexical: 0.28.0
|
||||
'@lexical/clipboard': 0.25.0
|
||||
'@lexical/code': 0.25.0
|
||||
'@lexical/devtools-core': 0.25.0(react-dom@19.0.0(react@19.0.0))(react@19.0.0)
|
||||
'@lexical/dragon': 0.25.0
|
||||
'@lexical/hashtag': 0.25.0
|
||||
'@lexical/history': 0.25.0
|
||||
'@lexical/link': 0.25.0
|
||||
'@lexical/list': 0.25.0
|
||||
'@lexical/mark': 0.25.0
|
||||
'@lexical/markdown': 0.25.0
|
||||
'@lexical/overflow': 0.25.0
|
||||
'@lexical/plain-text': 0.25.0
|
||||
'@lexical/rich-text': 0.25.0
|
||||
'@lexical/selection': 0.25.0
|
||||
'@lexical/table': 0.25.0
|
||||
'@lexical/text': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
'@lexical/yjs': 0.25.0(yjs@13.6.24)
|
||||
lexical: 0.25.0
|
||||
react: 19.0.0
|
||||
react-dom: 19.0.0(react@19.0.0)
|
||||
react-error-boundary: 3.1.4(react@19.0.0)
|
||||
transitivePeerDependencies:
|
||||
- yjs
|
||||
|
||||
'@lexical/rich-text@0.28.0':
|
||||
'@lexical/rich-text@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/clipboard': 0.28.0
|
||||
'@lexical/selection': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/clipboard': 0.25.0
|
||||
'@lexical/selection': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/selection@0.28.0':
|
||||
'@lexical/selection@0.25.0':
|
||||
dependencies:
|
||||
lexical: 0.28.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/table@0.28.0':
|
||||
'@lexical/table@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/clipboard': 0.28.0
|
||||
'@lexical/utils': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/clipboard': 0.25.0
|
||||
'@lexical/utils': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/text@0.28.0':
|
||||
'@lexical/text@0.25.0':
|
||||
dependencies:
|
||||
lexical: 0.28.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/utils@0.28.0':
|
||||
'@lexical/utils@0.25.0':
|
||||
dependencies:
|
||||
'@lexical/list': 0.28.0
|
||||
'@lexical/selection': 0.28.0
|
||||
'@lexical/table': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/list': 0.25.0
|
||||
'@lexical/selection': 0.25.0
|
||||
'@lexical/table': 0.25.0
|
||||
lexical: 0.25.0
|
||||
|
||||
'@lexical/yjs@0.28.0(yjs@13.6.24)':
|
||||
'@lexical/yjs@0.25.0(yjs@13.6.24)':
|
||||
dependencies:
|
||||
'@lexical/offset': 0.28.0
|
||||
'@lexical/selection': 0.28.0
|
||||
lexical: 0.28.0
|
||||
'@lexical/offset': 0.25.0
|
||||
'@lexical/selection': 0.25.0
|
||||
lexical: 0.25.0
|
||||
yjs: 13.6.24
|
||||
|
||||
'@mdi/js@7.4.47': {}
|
||||
|
@ -5560,7 +5562,7 @@ snapshots:
|
|||
xtend: 4.0.2
|
||||
optional: true
|
||||
|
||||
lexical@0.28.0: {}
|
||||
lexical@0.25.0: {}
|
||||
|
||||
lib0@0.2.99:
|
||||
dependencies:
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
"use client";
|
||||
|
||||
import * as Y from "yjs";
|
||||
import { type InitialConfigType, LexicalComposer } from "@lexical/react/LexicalComposer";
|
||||
import { PlainTextPlugin } from "@lexical/react/LexicalPlainTextPlugin";
|
||||
|
@ -47,7 +45,7 @@ export function Editor(props: { noteId: NoteId }) {
|
|||
<CollaborationPlugin
|
||||
id={`note-${props.noteId}`}
|
||||
providerFactory={providerFactory}
|
||||
shouldBootstrap={false}
|
||||
shouldBootstrap={true}
|
||||
excludedProperties={excludedProperties}
|
||||
/>
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import { $isRootOrShadowRoot, ElementNode, LexicalNode } from "lexical";
|
||||
import { $isLinkUrlNode } from "./nodes/link_node";
|
||||
import { $isTermNode } from "./nodes/term_node";
|
||||
import { $isRootOrShadowRoot, ElementNode, type LexicalNode } from "lexical";
|
||||
|
||||
export function findBlockNode(node: LexicalNode): LexicalNode {
|
||||
let toCheck: LexicalNode | null = node
|
||||
|
|
|
@ -16,9 +16,8 @@ export function useYDoc(name: string) {
|
|||
const indexeddbProvider = new IndexeddbPersistence(name, ydoc);
|
||||
setIndexeddbProvider(indexeddbProvider);
|
||||
|
||||
const websocketProvider = new WebsocketProvider(`ws://localhost:1234`, name, ydoc, {
|
||||
// TODO: For now we don't connect to a server yet, this obviously needs to change.
|
||||
connect: false,
|
||||
const websocketProvider = new WebsocketProvider(`/sync`, name, ydoc, {
|
||||
connect: true,
|
||||
});
|
||||
setWebsocketProvider(websocketProvider);
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import {
|
|||
createRouter,
|
||||
} from '@tanstack/react-router'
|
||||
|
||||
import '~/styles.css'
|
||||
import "~/styles/global.css"
|
||||
import reportWebVitals from '~/reportWebVitals.ts'
|
||||
|
||||
// Import the generated route tree
|
||||
|
|
142
frontend/src/styles/editor.css
Normal file
142
frontend/src/styles/editor.css
Normal file
|
@ -0,0 +1,142 @@
|
|||
.editor-content {
|
||||
@apply text-wrap;
|
||||
|
||||
p {
|
||||
@apply my-2;
|
||||
}
|
||||
|
||||
.header {
|
||||
@apply mt-4 mb-2 text-blue-200 font-bold;
|
||||
}
|
||||
|
||||
.header.header-1 {
|
||||
@apply text-3xl;
|
||||
}
|
||||
|
||||
.header.header-2 {
|
||||
@apply text-2xl;
|
||||
}
|
||||
|
||||
.header.header-3 {
|
||||
@apply text-xl;
|
||||
}
|
||||
|
||||
.header.header-4 {
|
||||
@apply text-lg;
|
||||
}
|
||||
|
||||
.header-marker {
|
||||
font-size: 0;
|
||||
display: inline-block;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
.header.focus > .header-marker {
|
||||
font-size: unset;
|
||||
height: unset;
|
||||
}
|
||||
|
||||
.formatted-text {
|
||||
@apply inline;
|
||||
}
|
||||
|
||||
.formatted-text-marker {
|
||||
font-size: 0;
|
||||
display: inline-block;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
.formatted-text.focus > .formatted-text-marker {
|
||||
font-size: unset;
|
||||
height: unset;
|
||||
}
|
||||
|
||||
.formatted-text.format-bold {
|
||||
@apply font-bold text-cyan-200;
|
||||
}
|
||||
|
||||
.formatted-text.format-italic {
|
||||
@apply italic text-lime-200;
|
||||
}
|
||||
|
||||
.formatted-text.format-underline {
|
||||
@apply underline text-purple-200;
|
||||
}
|
||||
|
||||
.formatted-text.format-strikethrough {
|
||||
@apply line-through text-red-200;
|
||||
}
|
||||
|
||||
.formatted-text.format-code {
|
||||
@apply font-mono;
|
||||
}
|
||||
|
||||
.task[data-task-type=TODO] .task-marker {
|
||||
@apply text-green-400;
|
||||
}
|
||||
|
||||
.task[data-task-type=DOING] .task-marker {
|
||||
@apply text-yellow-400;
|
||||
}
|
||||
|
||||
.task[data-task-type=DOING] .task-icon {
|
||||
@apply bg-yellow-400 text-black border-black;
|
||||
}
|
||||
|
||||
.task[data-task-type=DONE] .task-marker {
|
||||
@apply line-through;
|
||||
}
|
||||
|
||||
.task[data-task-type=DONE]:not(.focus) {
|
||||
@apply line-through;
|
||||
}
|
||||
|
||||
.task[data-task-type=DONE] .task-icon {
|
||||
@apply bg-green-400 text-black border-black;
|
||||
}
|
||||
|
||||
.task[data-task-type=IDEA] .task-marker {
|
||||
@apply text-fuchsia-400;
|
||||
}
|
||||
|
||||
.task[data-task-type=IDEA] .task-icon {
|
||||
@apply bg-fuchsia-400 text-black border-black;
|
||||
}
|
||||
|
||||
.task[data-task-type=DEADLINE] .task-marker {
|
||||
@apply text-red-400;
|
||||
}
|
||||
|
||||
.task[data-task-type=DEADLINE] .task-icon {
|
||||
@apply bg-red-400 text-black border-black;
|
||||
}
|
||||
|
||||
.link {
|
||||
@apply text-teal-300 underline inline;
|
||||
}
|
||||
|
||||
.link-marker,.link-url {
|
||||
font-size: 0;
|
||||
display: inline-block;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
.link.focus > .link-marker, .link.focus > .link-url {
|
||||
font-size: unset;
|
||||
height: unset;
|
||||
}
|
||||
|
||||
.term {
|
||||
@apply text-blue-300 inline;
|
||||
}
|
||||
|
||||
.term-marker {
|
||||
font-size: 0;
|
||||
height: 0;
|
||||
}
|
||||
|
||||
.term.focus > .term-marker {
|
||||
font-size: unset;
|
||||
height: unset;
|
||||
}
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
@import "tailwindcss";
|
||||
@import "tw-animate-css";
|
||||
|
||||
@import "./editor.css";
|
||||
|
||||
@custom-variant dark (&:is([data-theme="dark"] *));
|
||||
|
||||
body {
|
Loading…
Add table
Reference in a new issue