From 2a96243474027379ca50520868c10973ecbf0ef0 Mon Sep 17 00:00:00 2001 From: Tienson Qin Date: Sat, 27 Dec 2025 06:36:33 +0800 Subject: [PATCH] add publish worker --- .gitignore | 2 + deps/publish/README.md | 6 +- deps/publish/worker/README.md | 38 ++ .../publish/worker/scripts/clear_dev_state.sh | 12 + deps/publish/worker/scripts/dev_test.sh | 32 ++ deps/publish/worker/src/index.js | 501 ++++++++++++++++++ deps/publish/worker/wrangler.toml | 25 + src/main/frontend/config.cljs | 4 + src/main/frontend/handler/publish.cljs | 68 ++- src/main/frontend/publish/client.cljs | 96 ++++ 10 files changed, 777 insertions(+), 7 deletions(-) create mode 100644 deps/publish/worker/README.md create mode 100755 deps/publish/worker/scripts/clear_dev_state.sh create mode 100755 deps/publish/worker/scripts/dev_test.sh create mode 100644 deps/publish/worker/src/index.js create mode 100644 deps/publish/worker/wrangler.toml create mode 100644 src/main/frontend/publish/client.cljs diff --git a/.gitignore b/.gitignore index 9efc3a3ad0..8bdd393102 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,8 @@ packages/ui/.storybook/cljs deps/shui/.lsp deps/shui/.lsp-cache deps/shui/.clj-kondo +deps/publish/worker/.wrangler + tx-log* clj-e2e/.wally clj-e2e/resources diff --git a/deps/publish/README.md b/deps/publish/README.md index 46012a31ca..452a458ea8 100644 --- a/deps/publish/README.md +++ b/deps/publish/README.md @@ -3,7 +3,11 @@ Shared library for page publishing (snapshot payloads, SSR helpers, shared schemas, and storage contracts). The Cloudflare Durable Object implementation is expected to use SQLite with the -Logseq datascript fork layered on top. +Logseq datascript fork layered on top. Page publish payloads are expected to +send datoms (transit) so the DO can reconstruct/query datascript state. + +See `deps/publish/worker` for a Cloudflare Worker skeleton that stores transit +blobs in R2 and metadata in a SQLite-backed Durable Object. ## API diff --git a/deps/publish/worker/README.md b/deps/publish/worker/README.md new file mode 100644 index 0000000000..a0788b1f23 --- /dev/null +++ b/deps/publish/worker/README.md @@ -0,0 +1,38 @@ +## Cloudflare Publish Worker (Skeleton) + +This worker accepts publish payloads and stores transit blobs in R2 while keeping +metadata in a Durable Object backed by SQLite. + +### Bindings + +- `PUBLISH_META_DO`: Durable Object namespace +- `PUBLISH_R2`: R2 bucket +- `R2_ACCOUNT_ID`: Cloudflare account id for signing +- `R2_BUCKET`: R2 bucket name for signing +- `R2_ACCESS_KEY_ID`: R2 access key for signing +- `R2_SECRET_ACCESS_KEY`: R2 secret key for signing +- `COGNITO_JWKS_URL`: JWKS URL for Cognito user pool +- `COGNITO_ISSUER`: Cognito issuer URL +- `COGNITO_CLIENT_ID`: Cognito client ID +- `DEV_SKIP_AUTH`: set to `true` to bypass JWT verification in local dev + +### Routes + +- `POST /pages` + - Requires `Authorization: Bearer ` + - Requires `x-publish-meta` header (JSON) + - Body is transit payload (stored in R2 as-is) +- `GET /pages/:page-uuid` + - Returns metadata for the page +- `GET /pages/:page-uuid/transit` + - Returns JSON with a signed R2 URL and `etag` +- `GET /pages` + - Lists metadata entries (from the index DO) + +### Notes + +- This is a starter implementation. Integrate with your deployment tooling + (wrangler, etc.) as needed. +- For local testing, run `wrangler dev` and use `deps/publish/worker/scripts/dev_test.sh`. +- If you switch schema versions, clear local DO state with + `deps/publish/worker/scripts/clear_dev_state.sh`. diff --git a/deps/publish/worker/scripts/clear_dev_state.sh b/deps/publish/worker/scripts/clear_dev_state.sh new file mode 100755 index 0000000000..26006a1544 --- /dev/null +++ b/deps/publish/worker/scripts/clear_dev_state.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +set -euo pipefail + +GRAPH_UUID=${GRAPH_UUID:-"00000000-0000-0000-0000-000000000000"} + +cat < b.toString(16).padStart(2, "0")) + .join(""); +} + +async function sha256Hex(message) { + const data = new TextEncoder().encode(message); + const digest = await crypto.subtle.digest("SHA-256", data); + return toHex(digest); +} + +async function hmacSha256(key, message) { + const cryptoKey = await crypto.subtle.importKey( + "raw", + key, + { name: "HMAC", hash: "SHA-256" }, + false, + ["sign"] + ); + return crypto.subtle.sign("HMAC", cryptoKey, message); +} + +function encodeRfc3986(value) { + return encodeURIComponent(value).replace(/[!'()*]/g, (c) => + `%${c.charCodeAt(0).toString(16).toUpperCase()}` + ); +} + +function encodePath(path) { + return path + .split("/") + .map((part) => encodeRfc3986(part)) + .join("/"); +} + +async function getSignatureKey(secret, dateStamp, region, service) { + const kDate = await hmacSha256( + new TextEncoder().encode(`AWS4${secret}`), + new TextEncoder().encode(dateStamp) + ); + const kRegion = await hmacSha256(kDate, new TextEncoder().encode(region)); + const kService = await hmacSha256(kRegion, new TextEncoder().encode(service)); + return hmacSha256(kService, new TextEncoder().encode("aws4_request")); +} + +async function presignR2Url(r2Key, env, expiresSeconds = 300) { + const region = "auto"; + const service = "s3"; + const host = `${env.R2_ACCOUNT_ID}.r2.cloudflarestorage.com`; + const bucket = env.R2_BUCKET; + const method = "GET"; + const now = new Date(); + const amzDate = now + .toISOString() + .replace(/[:-]|\.\d{3}/g, ""); + const dateStamp = amzDate.slice(0, 8); + const credentialScope = `${dateStamp}/${region}/${service}/aws4_request`; + + const params = [ + ["X-Amz-Algorithm", "AWS4-HMAC-SHA256"], + ["X-Amz-Credential", `${env.R2_ACCESS_KEY_ID}/${credentialScope}`], + ["X-Amz-Date", amzDate], + ["X-Amz-Expires", String(expiresSeconds)], + ["X-Amz-SignedHeaders", "host"], + ]; + params.sort((a, b) => (a[0] < b[0] ? -1 : 1)); + const canonicalQueryString = params + .map(([k, v]) => `${encodeRfc3986(k)}=${encodeRfc3986(v)}`) + .join("&"); + + const canonicalUri = `/${bucket}/${encodePath(r2Key)}`; + const canonicalHeaders = `host:${host}\n`; + const signedHeaders = "host"; + const payloadHash = "UNSIGNED-PAYLOAD"; + const canonicalRequest = [ + method, + canonicalUri, + canonicalQueryString, + canonicalHeaders, + signedHeaders, + payloadHash, + ].join("\n"); + + const stringToSign = [ + "AWS4-HMAC-SHA256", + amzDate, + credentialScope, + await sha256Hex(canonicalRequest), + ].join("\n"); + + const signingKey = await getSignatureKey( + env.R2_SECRET_ACCESS_KEY, + dateStamp, + region, + service + ); + const signature = toHex(await hmacSha256(signingKey, new TextEncoder().encode(stringToSign))); + const signedQuery = `${canonicalQueryString}&X-Amz-Signature=${signature}`; + + return `https://${host}${canonicalUri}?${signedQuery}`; +} + +function decodeJwtPart(part) { + const bytes = base64UrlToUint8Array(part); + return JSON.parse(textDecoder.decode(bytes)); +} + +async function importRsaKey(jwk) { + return crypto.subtle.importKey( + "jwk", + jwk, + { + name: "RSASSA-PKCS1-v1_5", + hash: "SHA-256", + }, + false, + ["verify"] + ); +} + +async function verifyJwt(token, env) { + const parts = token.split("."); + if (parts.length !== 3) { + return null; + } + const [headerPart, payloadPart, signaturePart] = parts; + const header = decodeJwtPart(headerPart); + const payload = decodeJwtPart(payloadPart); + + if (payload.iss !== env.COGNITO_ISSUER) { + return null; + } + if (payload.aud !== env.COGNITO_CLIENT_ID) { + return null; + } + const now = Math.floor(Date.now() / 1000); + if (payload.exp && payload.exp < now) { + return null; + } + + const jwksResp = await fetch(env.COGNITO_JWKS_URL); + if (!jwksResp.ok) { + return null; + } + const jwks = await jwksResp.json(); + const key = (jwks.keys || []).find((k) => k.kid === header.kid); + if (!key) { + return null; + } + + const cryptoKey = await importRsaKey(key); + const data = new TextEncoder().encode(`${headerPart}.${payloadPart}`); + const signature = base64UrlToUint8Array(signaturePart); + const ok = await crypto.subtle.verify("RSASSA-PKCS1-v1_5", cryptoKey, signature, data); + return ok ? payload : null; +} + +async function handlePostPages(request, env) { + const authHeader = request.headers.get("authorization") || ""; + const token = authHeader.startsWith("Bearer ") ? authHeader.slice(7) : null; + const devSkipAuth = env.DEV_SKIP_AUTH === "true"; + if (!token && !devSkipAuth) { + return unauthorized(); + } + + const claims = devSkipAuth ? { sub: "dev" } : await verifyJwt(token, env); + if (!claims && !devSkipAuth) { + return unauthorized(); + } + + const metaHeader = request.headers.get("x-publish-meta"); + if (!metaHeader) { + return badRequest("missing x-publish-meta header"); + } + + let meta; + try { + meta = JSON.parse(metaHeader); + } catch (_err) { + return badRequest("invalid x-publish-meta header"); + } + + if (!meta["publish/content-hash"] || !meta["publish/graph"] || !meta["page-uuid"]) { + return badRequest("missing publish metadata"); + } + + const body = await request.arrayBuffer(); + const r2Key = `publish/${meta["publish/graph"]}/${meta["publish/content-hash"]}.transit`; + + const existing = await env.PUBLISH_R2.head(r2Key); + if (!existing) { + await env.PUBLISH_R2.put(r2Key, body, { + httpMetadata: { + contentType: "application/transit+json", + }, + }); + } + + const doId = env.PUBLISH_META_DO.idFromName(meta["page-uuid"]); + const doStub = env.PUBLISH_META_DO.get(doId); + const metaResponse = await doStub.fetch("https://publish/pages", { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ + ...meta, + r2_key: r2Key, + owner_sub: claims.sub, + updated_at: Date.now(), + }), + }); + + if (!metaResponse.ok) { + return jsonResponse({ error: "metadata store failed" }, 500); + } + + const indexId = env.PUBLISH_META_DO.idFromName("index"); + const indexStub = env.PUBLISH_META_DO.get(indexId); + await indexStub.fetch("https://publish/pages", { + method: "POST", + headers: { "content-type": "application/json" }, + body: JSON.stringify({ + ...meta, + r2_key: r2Key, + owner_sub: claims.sub, + updated_at: Date.now(), + }), + }); + + return jsonResponse({ + page_uuid: meta["page-uuid"], + r2_key: r2Key, + updated_at: Date.now(), + }); +} + +async function handleGetPage(request, env) { + const url = new URL(request.url); + const pageUuid = url.pathname.split("/")[2]; + if (!pageUuid) { + return badRequest("missing page uuid"); + } + const doId = env.PUBLISH_META_DO.idFromName(pageUuid); + const doStub = env.PUBLISH_META_DO.get(doId); + const metaResponse = await doStub.fetch(`https://publish/pages/${pageUuid}`); + if (!metaResponse.ok) { + return jsonResponse({ error: "not found" }, 404); + } + const meta = await metaResponse.json(); + const etag = meta["publish/content-hash"]; + const ifNoneMatch = request.headers.get("if-none-match"); + if (etag && ifNoneMatch && ifNoneMatch.replace(/\"/g, "") === etag) { + return new Response(null, { + status: 304, + headers: { + etag, + }, + }); + } + return jsonResponse(meta, 200); +} + +async function handleGetPageTransit(request, env) { + const url = new URL(request.url); + const pageUuid = url.pathname.split("/")[2]; + if (!pageUuid) { + return badRequest("missing page uuid"); + } + const doId = env.PUBLISH_META_DO.idFromName(pageUuid); + const doStub = env.PUBLISH_META_DO.get(doId); + const metaResponse = await doStub.fetch(`https://publish/pages/${pageUuid}`); + if (!metaResponse.ok) { + return jsonResponse({ error: "not found" }, 404); + } + const meta = await metaResponse.json(); + if (!meta.r2_key) { + return jsonResponse({ error: "missing transit" }, 404); + } + + const etag = meta["publish/content-hash"]; + const ifNoneMatch = request.headers.get("if-none-match"); + if (etag && ifNoneMatch && ifNoneMatch.replace(/\"/g, "") === etag) { + return new Response(null, { + status: 304, + headers: { + etag, + }, + }); + } + + const signedUrl = await presignR2Url(meta.r2_key, env); + return jsonResponse( + { + url: signedUrl, + expires_in: 300, + etag, + }, + 200 + ); +} + +async function handleListPages(request, env) { + const doId = env.PUBLISH_META_DO.idFromName("index"); + const doStub = env.PUBLISH_META_DO.get(doId); + const metaResponse = await doStub.fetch("https://publish/pages", { + method: "GET", + }); + if (!metaResponse.ok) { + return jsonResponse({ error: "not found" }, 404); + } + const meta = await metaResponse.json(); + return jsonResponse(meta, 200); +} + +export default { + async fetch(request, env) { + const url = new URL(request.url); + if (url.pathname === "/pages" && request.method === "POST") { + return handlePostPages(request, env); + } + if (url.pathname === "/pages" && request.method === "GET") { + return handleListPages(request, env); + } + if (url.pathname.startsWith("/pages/") && request.method === "GET") { + const parts = url.pathname.split("/"); + if (parts[3] === "transit") { + return handleGetPageTransit(request, env); + } + return handleGetPage(request, env); + } + return jsonResponse({ error: "not found" }, 404); + }, +}; + +export class PublishMetaDO extends DurableObject { + constructor(state, env) { + super(state, env); + this.state = state; + this.env = env; + this.sql = state.storage.sql; + } + + async initSchema() { + const cols = getSqlRows(this.sql.exec("PRAGMA table_info(pages);")); + const hasLegacyId = cols.some((col) => col.name === "page_id"); + if (hasLegacyId) { + this.sql.exec("DROP TABLE IF EXISTS pages;"); + } + this.sql.exec(` + CREATE TABLE IF NOT EXISTS pages ( + page_uuid TEXT NOT NULL, + graph TEXT NOT NULL, + schema_version TEXT, + block_count INTEGER, + content_hash TEXT NOT NULL, + content_length INTEGER, + r2_key TEXT NOT NULL, + owner_sub TEXT, + created_at INTEGER, + updated_at INTEGER, + PRIMARY KEY (graph, page_uuid) + ); + `); + } + + async fetch(request) { + await this.initSchema(); + if (request.method === "POST") { + const body = await request.json(); + this.sql.exec( + ` + INSERT INTO pages ( + page_uuid, + graph, + schema_version, + block_count, + content_hash, + content_length, + r2_key, + owner_sub, + created_at, + updated_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(graph, page_uuid) DO UPDATE SET + page_uuid=excluded.page_uuid, + schema_version=excluded.schema_version, + block_count=excluded.block_count, + content_hash=excluded.content_hash, + content_length=excluded.content_length, + r2_key=excluded.r2_key, + owner_sub=excluded.owner_sub, + updated_at=excluded.updated_at; + `, + body["page-uuid"], + body["publish/graph"], + body["schema-version"], + body["block-count"], + body["publish/content-hash"], + body["publish/content-length"], + body["r2_key"], + body["owner_sub"], + body["publish/created-at"], + body["updated_at"] + ); + + return jsonResponse({ ok: true }); + } + + if (request.method === "GET") { + const url = new URL(request.url); + const parts = url.pathname.split("/"); + const pageUuid = parts[2]; + if (pageUuid) { + const result = this.sql.exec( + ` + SELECT page_uuid, graph, schema_version, block_count, + content_hash, content_length, r2_key, owner_sub, created_at, updated_at + FROM pages WHERE page_uuid = ? LIMIT 1; + `, + pageUuid + ); + const rows = getSqlRows(result); + const row = rows[0]; + if (!row) { + return jsonResponse({ error: "not found" }, 404); + } + return jsonResponse({ + ...row, + "publish/content-hash": row.content_hash, + "publish/content-length": row.content_length, + }); + } + + const result = this.sql.exec(` + SELECT page_uuid, graph, schema_version, block_count, + content_hash, content_length, r2_key, owner_sub, created_at, updated_at + FROM pages ORDER BY updated_at DESC; + `); + const rows = getSqlRows(result); + return jsonResponse({ + pages: rows.map((row) => ({ + ...row, + "publish/content-hash": row.content_hash, + "publish/content-length": row.content_length, + })), + }); + } + + return jsonResponse({ error: "method not allowed" }, 405); + } +} diff --git a/deps/publish/worker/wrangler.toml b/deps/publish/worker/wrangler.toml new file mode 100644 index 0000000000..72eb2065c9 --- /dev/null +++ b/deps/publish/worker/wrangler.toml @@ -0,0 +1,25 @@ +name = "logseq-publish" +main = "src/index.js" +compatibility_date = "2024-11-01" + +[[durable_objects.bindings]] +name = "PUBLISH_META_DO" +class_name = "PublishMetaDO" + +[[migrations]] +tag = "v2" +new_sqlite_classes = ["PublishMetaDO"] + +[[r2_buckets]] +binding = "PUBLISH_R2" +bucket_name = "logseq-publish-dev" + +[vars] +COGNITO_JWKS_URL = "https://cognito-idp.us-east-1.amazonaws.com/POOL_ID/.well-known/jwks.json" +COGNITO_ISSUER = "https://cognito-idp.us-east-1.amazonaws.com/POOL_ID" +COGNITO_CLIENT_ID = "CLIENT_ID" +R2_ACCOUNT_ID = "YOUR_ACCOUNT_ID" +R2_BUCKET = "logseq-publish-dev" +R2_ACCESS_KEY_ID = "R2_ACCESS_KEY_ID" +R2_SECRET_ACCESS_KEY = "R2_SECRET_ACCESS_KEY" +DEV_SKIP_AUTH = "true" diff --git a/src/main/frontend/config.cljs b/src/main/frontend/config.cljs index 58d1f681db..bfb185e650 100644 --- a/src/main/frontend/config.cljs +++ b/src/main/frontend/config.cljs @@ -29,6 +29,8 @@ (do (def LOGIN-URL "https://logseq-prod.auth.us-east-1.amazoncognito.com/login?client_id=3c7np6bjtb4r1k1bi9i049ops5&response_type=code&scope=email+openid+phone&redirect_uri=logseq%3A%2F%2Fauth-callback") (def API-DOMAIN "api.logseq.com") + (def PUBLISH-API-DOMAIN "publish.logseq.com") + (def PUBLISH-API-BASE (str "https://" PUBLISH-API-DOMAIN)) (def COGNITO-IDP "https://cognito-idp.us-east-1.amazonaws.com/") (def COGNITO-CLIENT-ID "69cs1lgme7p8kbgld8n5kseii6") (def REGION "us-east-1") @@ -39,6 +41,8 @@ (do (def LOGIN-URL "https://logseq-test2.auth.us-east-2.amazoncognito.com/login?client_id=3ji1a0059hspovjq5fhed3uil8&response_type=code&scope=email+openid+phone&redirect_uri=logseq%3A%2F%2Fauth-callback") (def API-DOMAIN "api-dev.logseq.com") + (def PUBLISH-API-DOMAIN "publish-dev.logseq.com") + (def PUBLISH-API-BASE (str "https://" PUBLISH-API-DOMAIN)) (def COGNITO-IDP "https://cognito-idp.us-east-2.amazonaws.com/") (def COGNITO-CLIENT-ID "1qi1uijg8b6ra70nejvbptis0q") (def REGION "us-east-2") diff --git a/src/main/frontend/handler/publish.cljs b/src/main/frontend/handler/publish.cljs index 637d6a3126..9280023c9e 100644 --- a/src/main/frontend/handler/publish.cljs +++ b/src/main/frontend/handler/publish.cljs @@ -1,12 +1,15 @@ (ns frontend.handler.publish "Prepare publish payloads for pages." (:require [datascript.core :as d] + [frontend.config :as config] [frontend.db :as db] [frontend.handler.notification :as notification] [frontend.state :as state] + [frontend.util :as util] [logseq.db :as ldb] [logseq.db.common.entity-util :as entity-util] - [logseq.db.frontend.schema :as db-schema])) + [logseq.db.frontend.schema :as db-schema] + [promesa.core :as p])) (defn- datom->vec [datom] @@ -36,20 +39,73 @@ (map datom->vec (d/datoms db :eavt eid))) eids)] {:page (entity-util/entity->map page-entity) - :page-id (:db/id page-entity) + :page-uuid (:block/uuid page-entity) :block-count (count blocks) :schema-version (db-schema/schema-version->string db-schema/version) :datoms (vec datoms)})) +(defn- > bytes + (map (fn [b] + (.padStart (.toString b 16) 2 "0"))) + (apply str)))) + +(defn- publish-endpoint + [] + (str config/PUBLISH-API-BASE "/pages")) + +(defn- {"content-type" "application/transit+json"} + token (assoc "authorization" (str "Bearer " token)))] + (p/let [body (ldb/write-transit-str payload) + content-hash ( (ldb/get-graph-rtc-uuid (db/get-db)) str) + _ (when-not graph-uuid + (throw (ex-info "Missing graph UUID" {:repo (state/get-current-repo)}))) + publish-graph graph-uuid + publish-meta {:page-uuid (:page-uuid payload) + :block-count (:block-count payload) + :schema-version (:schema-version payload) + :publish/format :transit + :publish/compression :none + :publish/content-hash content-hash + :publish/content-length (count body) + :publish/graph publish-graph + :publish/created-at (util/time-ms)} + publish-body (assoc payload + :publish/meta publish-meta) + headers (assoc headers "x-publish-meta" (js/JSON.stringify (clj->js publish-meta))) + resp (js/fetch (publish-endpoint) + (clj->js {:method "POST" + :headers headers + :body (ldb/write-transit-str publish-body)}))] + (if (.-ok resp) + resp + (p/let [body (.text resp)] + (throw (ex-info "Publish failed" + {:status (.-status resp) + :body body}))))))) + (defn publish-page! - "Prepares the publish payload for a page. The upload step is stubbed for now." + "Prepares and uploads the publish payload for a page." [page] (let [repo (state/get-current-repo)] (if-let [db* (and repo (db/get-db repo))] (if (and page (:db/id page)) (let [payload (build-page-publish-datoms db* page)] - (notification/show! "Publish payload prepared." :success) - (js/console.log "Publish payload" (clj->js payload)) - payload) + (notification/show! "Publishing page..." :success) + (-> (js {:headers headers}))] + (cond + (= 304 (.-status resp)) {:status 304} + (.-ok resp) (p/let [data (.json resp)] {:status 200 :data data}) + :else (p/let [body (.text resp)] + (throw (ex-info "Publish fetch failed" {:status (.-status resp) :body body})))))) + +(defn- cache-key + [page-uuid] + (str "publish/" page-uuid)) + +(defn- get-cache + [page-uuid] + (when-let [raw (js/localStorage.getItem (cache-key page-uuid))] + (try + (js/JSON.parse raw) + (catch :default _e nil)))) + +(defn- set-cache! + [page-uuid value] + (js/localStorage.setItem (cache-key page-uuid) + (js/JSON.stringify (clj->js value)))) + +(defn } or {:status 304}. + " + [page-uuid] + (let [cached (get-cache page-uuid) + headers (cond-> {} + (and cached (.-etag cached)) + (assoc "if-none-match" (.-etag cached)))] + (p/let [resp (clj (:data resp) :keywordize-keys true) + etag (get meta :publish/content-hash)] + (set-cache! page-uuid {:etag etag :meta meta}) + {:status 200 :data meta})))) + +(defn {} + (and cached (.-etag cached)) + (assoc "if-none-match" (.-etag cached)))] + (p/let [resp (clj (:data resp) :keywordize-keys true)] + {:status 200 :data data})))) + +(defn (ldb/get-graph-rtc-uuid (state/get-current-repo)) str))