synops/maskinrommet/src/queries.rs
vegard 63630eb55a Fullfører oppgave 14.16: Presentasjonselementer som noder
Publisert tittel, ingress, OG-bilde og undertittel er nå egne noder
koblet til artikler via title/subtitle/summary/og_image-edges.
Rendering bruker presentasjonselementer med fallback til artikkelfelt.

Backend:
- Ny query: GET /query/presentation_elements?article_id=...
- render_article_to_cas henter presentasjonselementer via edges
- fetch_article + fetch_index_articles bruker pres.elementer
- Batch-henting for forsideartikler (én SQL-spørring)
- ArticleData utvides med subtitle + og_image
- Alle fire temaer viser subtitle og OG-bilde
- SEO og_image-tag fylles fra presentasjonselement

Frontend:
- PresentationEditor.svelte: opprett/rediger tittel, undertittel,
  ingress, OG-bilde med variantvelger (editorial/ai/social/rss)
- Integrert i PublishDialog via <details>-seksjon
- API-klient: fetchPresentationElements(), deleteNode()

Grunnlag for A/B-testing (oppgave 14.17): edge-metadata støtter
ab_status/impressions/clicks/ctr, best_of() prioriterer winner > testing.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 02:55:23 +00:00

1400 lines
44 KiB
Rust

// Tunge spørringer — lesestien via PostgreSQL med RLS.
//
// For søk, statistikk, og graf-traversering brukes PG direkte (ikke STDB).
// Alle spørringer kjøres med SET LOCAL ROLE synops_reader, som er underlagt
// RLS-policies. Brukerens node_id settes som sesjonsvariabel.
//
// Ref: docs/retninger/datalaget.md (tunge spørringer-seksjonen)
use axum::{extract::State, http::StatusCode, Json};
use axum::response::{IntoResponse, Response};
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use uuid::Uuid;
use crate::auth::AuthUser;
use crate::AppState;
use crate::intentions::ErrorResponse;
// =============================================================================
// GET /query/segments — transkripsjons-segmenter for en node
// =============================================================================
#[derive(Deserialize)]
pub struct QuerySegmentsRequest {
/// Node-ID for media-noden.
pub node_id: Uuid,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct SegmentResult {
pub id: i64,
pub seq: i32,
pub start_ms: i32,
pub end_ms: i32,
pub content: String,
pub edited: bool,
}
#[derive(Serialize)]
pub struct QuerySegmentsResponse {
pub segments: Vec<SegmentResult>,
pub transcribed_at: Option<String>,
}
/// GET /query/segments?node_id=...
///
/// Henter nyeste transkripsjons-segmenter for en media-node.
/// Verifiserer tilgang via RLS på nodes-tabellen først.
pub async fn query_segments(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QuerySegmentsRequest>,
) -> Result<Json<QuerySegmentsResponse>, (StatusCode, Json<ErrorResponse>)> {
let result = run_query_segments(&state.db, user.node_id, params.node_id).await;
match result {
Ok(resp) => Ok(Json(resp)),
Err(e) => {
tracing::error!(error = %e, "query_segments feilet");
Err(internal_error("Databasefeil ved henting av segmenter"))
}
}
}
async fn run_query_segments(
db: &PgPool,
user_node_id: Uuid,
node_id: Uuid,
) -> Result<QuerySegmentsResponse, sqlx::Error> {
// Verifiser tilgang: sjekk at brukeren kan se noden via RLS
let mut tx = db.begin().await?;
set_rls_context(&mut tx, user_node_id).await?;
let exists = sqlx::query_scalar::<_, bool>(
"SELECT EXISTS(SELECT 1 FROM nodes WHERE id = $1)",
)
.bind(node_id)
.fetch_one(&mut *tx)
.await?;
tx.commit().await?;
if !exists {
return Ok(QuerySegmentsResponse {
segments: vec![],
transcribed_at: None,
});
}
// Hent nyeste transcribed_at for denne noden
let latest: Option<(chrono::DateTime<chrono::Utc>,)> = sqlx::query_as(
"SELECT transcribed_at FROM transcription_segments WHERE node_id = $1 ORDER BY transcribed_at DESC LIMIT 1",
)
.bind(node_id)
.fetch_optional(db)
.await?;
let Some((transcribed_at,)) = latest else {
return Ok(QuerySegmentsResponse {
segments: vec![],
transcribed_at: None,
});
};
// Hent alle segmenter for nyeste kjøring
let segments = sqlx::query_as::<_, SegmentResult>(
r#"
SELECT id, seq, start_ms, end_ms, content, edited
FROM transcription_segments
WHERE node_id = $1 AND transcribed_at = $2
ORDER BY seq
"#,
)
.bind(node_id)
.bind(transcribed_at)
.fetch_all(db)
.await?;
Ok(QuerySegmentsResponse {
segments,
transcribed_at: Some(transcribed_at.to_rfc3339()),
})
}
// =============================================================================
// GET /query/transcription_versions — alle transkripsjonsversjoner for en node
// =============================================================================
#[derive(Deserialize)]
pub struct QueryVersionsRequest {
pub node_id: Uuid,
}
#[derive(Serialize)]
pub struct TranscriptionVersion {
pub transcribed_at: String,
pub segment_count: i64,
pub edited_count: i64,
}
#[derive(Serialize)]
pub struct QueryVersionsResponse {
pub versions: Vec<TranscriptionVersion>,
}
/// GET /query/transcription_versions?node_id=...
///
/// Lister alle transkripsjonsversjoner for en node, sortert nyeste først.
pub async fn query_transcription_versions(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QueryVersionsRequest>,
) -> Result<Json<QueryVersionsResponse>, (StatusCode, Json<ErrorResponse>)> {
// Verifiser tilgang
let mut tx = state.db.begin().await.map_err(|e| {
tracing::error!(error = %e, "Transaksjon feilet");
internal_error("Databasefeil")
})?;
set_rls_context(&mut tx, user.node_id).await.map_err(|e| {
tracing::error!(error = %e, "RLS-kontekst feilet");
internal_error("Databasefeil")
})?;
let exists = sqlx::query_scalar::<_, bool>(
"SELECT EXISTS(SELECT 1 FROM nodes WHERE id = $1)",
)
.bind(params.node_id)
.fetch_one(&mut *tx)
.await
.map_err(|e| {
tracing::error!(error = %e, "Tilgangssjekk feilet");
internal_error("Databasefeil")
})?;
tx.commit().await.map_err(|e| {
tracing::error!(error = %e, "Commit feilet");
internal_error("Databasefeil")
})?;
if !exists {
return Ok(Json(QueryVersionsResponse { versions: vec![] }));
}
let rows: Vec<(chrono::DateTime<chrono::Utc>, i64, i64)> = sqlx::query_as(
r#"
SELECT transcribed_at, COUNT(*) as segment_count,
COUNT(*) FILTER (WHERE edited) as edited_count
FROM transcription_segments
WHERE node_id = $1
GROUP BY transcribed_at
ORDER BY transcribed_at DESC
"#,
)
.bind(params.node_id)
.fetch_all(&state.db)
.await
.map_err(|e| {
tracing::error!(error = %e, "Feil ved henting av versjoner");
internal_error("Databasefeil")
})?;
let versions = rows
.into_iter()
.map(|(ts, count, edited)| TranscriptionVersion {
transcribed_at: ts.to_rfc3339(),
segment_count: count,
edited_count: edited,
})
.collect();
Ok(Json(QueryVersionsResponse { versions }))
}
// =============================================================================
// GET /query/segments_version — segmenter for en spesifikk versjon
// =============================================================================
#[derive(Deserialize)]
pub struct QuerySegmentsVersionRequest {
pub node_id: Uuid,
pub transcribed_at: String,
}
/// GET /query/segments_version?node_id=...&transcribed_at=...
///
/// Henter segmenter for en spesifikk transkripsjonsversjon.
pub async fn query_segments_version(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QuerySegmentsVersionRequest>,
) -> Result<Json<QuerySegmentsResponse>, (StatusCode, Json<ErrorResponse>)> {
// Verifiser tilgang
let mut tx = state.db.begin().await.map_err(|e| {
tracing::error!(error = %e, "Transaksjon feilet");
internal_error("Databasefeil")
})?;
set_rls_context(&mut tx, user.node_id).await.map_err(|e| {
tracing::error!(error = %e, "RLS-kontekst feilet");
internal_error("Databasefeil")
})?;
let exists = sqlx::query_scalar::<_, bool>(
"SELECT EXISTS(SELECT 1 FROM nodes WHERE id = $1)",
)
.bind(params.node_id)
.fetch_one(&mut *tx)
.await
.map_err(|e| {
tracing::error!(error = %e, "Tilgangssjekk feilet");
internal_error("Databasefeil")
})?;
tx.commit().await.map_err(|e| {
tracing::error!(error = %e, "Commit feilet");
internal_error("Databasefeil")
})?;
if !exists {
return Ok(Json(QuerySegmentsResponse {
segments: vec![],
transcribed_at: None,
}));
}
let ts: chrono::DateTime<chrono::Utc> = params.transcribed_at.parse()
.map_err(|_| {
(StatusCode::BAD_REQUEST, Json(ErrorResponse {
error: "Ugyldig transcribed_at-tidsstempel".to_string(),
}))
})?;
let segments = sqlx::query_as::<_, SegmentResult>(
r#"
SELECT id, seq, start_ms, end_ms, content, edited
FROM transcription_segments
WHERE node_id = $1 AND transcribed_at = $2
ORDER BY seq
"#,
)
.bind(params.node_id)
.bind(ts)
.fetch_all(&state.db)
.await
.map_err(|e| {
tracing::error!(error = %e, "Feil ved henting av segmenter");
internal_error("Databasefeil")
})?;
Ok(Json(QuerySegmentsResponse {
segments,
transcribed_at: Some(ts.to_rfc3339()),
}))
}
// =============================================================================
// GET /query/segments/srt — eksporter segmenter som nedlastbar SRT-fil
// =============================================================================
/// GET /query/segments/srt?node_id=...
///
/// Genererer en SRT-fil fra nyeste transkripsjons-segmenter.
/// Returnerer filen med Content-Disposition: attachment for nedlasting.
pub async fn export_srt(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QuerySegmentsRequest>,
) -> Result<Response, (StatusCode, Json<ErrorResponse>)> {
let resp = run_query_segments(&state.db, user.node_id, params.node_id)
.await
.map_err(|e| {
tracing::error!(error = %e, "export_srt feilet");
internal_error("Databasefeil ved henting av segmenter")
})?;
if resp.segments.is_empty() {
return Err((
StatusCode::NOT_FOUND,
Json(ErrorResponse {
error: "Ingen transkripsjons-segmenter funnet".to_string(),
}),
));
}
let srt = segments_to_srt(&resp.segments);
Ok((
StatusCode::OK,
[
(axum::http::header::CONTENT_TYPE, "application/x-subrip; charset=utf-8"),
(axum::http::header::CONTENT_DISPOSITION, "attachment; filename=\"transcription.srt\""),
],
srt,
)
.into_response())
}
/// Konverterer segmenter til SRT-format.
///
/// SRT-format:
/// ```text
/// 1
/// 00:00:00,000 --> 00:00:05,230
/// Hei og velkommen.
///
/// 2
/// 00:00:05,230 --> 00:00:10,500
/// I dag snakker vi om...
/// ```
fn segments_to_srt(segments: &[SegmentResult]) -> String {
let mut srt = String::new();
for (i, seg) in segments.iter().enumerate() {
if i > 0 {
srt.push('\n');
}
srt.push_str(&format!(
"{}\n{} --> {}\n{}\n",
seg.seq,
format_srt_timestamp(seg.start_ms),
format_srt_timestamp(seg.end_ms),
seg.content,
));
}
srt
}
/// Formaterer millisekunder til SRT-tidsstempel: HH:MM:SS,mmm
fn format_srt_timestamp(ms: i32) -> String {
let total_seconds = ms / 1000;
let millis = ms % 1000;
let hours = total_seconds / 3600;
let minutes = (total_seconds % 3600) / 60;
let seconds = total_seconds % 60;
format!("{:02}:{:02}:{:02},{:03}", hours, minutes, seconds, millis)
}
// =============================================================================
// RLS-kontekst
// =============================================================================
/// Setter opp RLS-kontekst for en transaksjon.
/// Etter dette kallet er alle SELECT-spørringer filtrert via node_access.
///
/// MÅ kalles innenfor en transaksjon (SET LOCAL gjelder kun innenfor tx).
async fn set_rls_context(
tx: &mut sqlx::Transaction<'_, sqlx::Postgres>,
user_node_id: Uuid,
) -> Result<(), sqlx::Error> {
// Sett brukerens node_id som sesjonsvariabel
sqlx::query(&format!(
"SET LOCAL app.current_node_id = '{}'",
user_node_id
))
.execute(&mut **tx)
.await?;
// Bytt til synops_reader-rollen (underlagt RLS)
sqlx::query("SET LOCAL ROLE synops_reader")
.execute(&mut **tx)
.await?;
Ok(())
}
// =============================================================================
// GET /query/nodes — søk og filtrering av noder
// =============================================================================
#[derive(Deserialize)]
pub struct QueryNodesRequest {
/// Fritekst-søk i tittel og innhold. Valgfritt.
pub q: Option<String>,
/// Filtrer på node_kind. Valgfritt.
pub kind: Option<String>,
/// Maks antall resultater. Default: 50.
pub limit: Option<i64>,
/// Offset for paginering. Default: 0.
pub offset: Option<i64>,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct QueryNodeResult {
pub id: Uuid,
pub node_kind: String,
pub title: Option<String>,
pub content: Option<String>,
pub visibility: String,
pub metadata: serde_json::Value,
pub created_at: chrono::DateTime<chrono::Utc>,
pub created_by: Option<Uuid>,
}
#[derive(Serialize)]
pub struct QueryNodesResponse {
pub nodes: Vec<QueryNodeResult>,
pub total: i64,
}
fn internal_error(msg: &str) -> (StatusCode, Json<ErrorResponse>) {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: msg.to_string(),
}),
)
}
/// GET /query/nodes?q=...&kind=...&limit=...&offset=...
///
/// Søk og filtrering av noder. Bruker RLS — returnerer kun noder
/// brukeren har tilgang til.
pub async fn query_nodes(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QueryNodesRequest>,
) -> Result<Json<QueryNodesResponse>, (StatusCode, Json<ErrorResponse>)> {
let limit = params.limit.unwrap_or(50).min(200);
let offset = params.offset.unwrap_or(0).max(0);
let result = run_query_nodes(&state.db, user.node_id, &params.q, &params.kind, limit, offset).await;
match result {
Ok(resp) => Ok(Json(resp)),
Err(e) => {
tracing::error!(error = %e, "query_nodes feilet");
Err(internal_error("Databasefeil ved søk"))
}
}
}
async fn run_query_nodes(
db: &PgPool,
user_node_id: Uuid,
q: &Option<String>,
kind: &Option<String>,
limit: i64,
offset: i64,
) -> Result<QueryNodesResponse, sqlx::Error> {
let mut tx = db.begin().await?;
set_rls_context(&mut tx, user_node_id).await?;
// Bygg spørring basert på filtre
let (nodes, total) = if let Some(search) = q.as_deref().filter(|s| !s.is_empty()) {
let search_pattern = format!("%{}%", search.replace('%', "\\%").replace('_', "\\_"));
let nodes = sqlx::query_as::<_, QueryNodeResult>(
r#"
SELECT id, node_kind, title, content, visibility::text as visibility,
metadata, created_at, created_by
FROM nodes
WHERE (title ILIKE $1 OR content ILIKE $1)
ORDER BY created_at DESC
LIMIT $2 OFFSET $3
"#,
)
.bind(&search_pattern)
.bind(limit)
.bind(offset)
.fetch_all(&mut *tx)
.await?;
let total: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM nodes WHERE (title ILIKE $1 OR content ILIKE $1)",
)
.bind(&search_pattern)
.fetch_one(&mut *tx)
.await?;
(nodes, total.0)
} else if let Some(kind) = kind.as_deref().filter(|s| !s.is_empty()) {
let nodes = sqlx::query_as::<_, QueryNodeResult>(
r#"
SELECT id, node_kind, title, content, visibility::text as visibility,
metadata, created_at, created_by
FROM nodes
WHERE node_kind = $1
ORDER BY created_at DESC
LIMIT $2 OFFSET $3
"#,
)
.bind(kind)
.bind(limit)
.bind(offset)
.fetch_all(&mut *tx)
.await?;
let total: (i64,) = sqlx::query_as(
"SELECT COUNT(*) FROM nodes WHERE node_kind = $1",
)
.bind(kind)
.fetch_one(&mut *tx)
.await?;
(nodes, total.0)
} else {
let nodes = sqlx::query_as::<_, QueryNodeResult>(
r#"
SELECT id, node_kind, title, content, visibility::text as visibility,
metadata, created_at, created_by
FROM nodes
ORDER BY created_at DESC
LIMIT $1 OFFSET $2
"#,
)
.bind(limit)
.bind(offset)
.fetch_all(&mut *tx)
.await?;
let total: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM nodes")
.fetch_one(&mut *tx)
.await?;
(nodes, total.0)
};
// Transaksjon avsluttes — SET LOCAL tilbakestilles automatisk
tx.commit().await?;
Ok(QueryNodesResponse { nodes, total })
}
// =============================================================================
// GET /query/aliases — brukerens alias-noder
// =============================================================================
#[derive(Serialize, sqlx::FromRow)]
pub struct AliasResult {
pub node_id: Uuid,
pub title: Option<String>,
pub metadata: serde_json::Value,
pub created_at: chrono::DateTime<chrono::Utc>,
}
#[derive(Serialize)]
pub struct QueryAliasesResponse {
pub aliases: Vec<AliasResult>,
}
/// GET /query/aliases
///
/// Returnerer alle alias-noder for den autentiserte brukeren.
/// Henter via alias-edges (system=true) fra brukerens hovednode.
/// Ingen RLS nødvendig — vi spør direkte med brukerens node_id.
pub async fn query_aliases(
State(state): State<AppState>,
user: AuthUser,
) -> Result<Json<QueryAliasesResponse>, (StatusCode, Json<ErrorResponse>)> {
let aliases = sqlx::query_as::<_, AliasResult>(
r#"
SELECT n.id AS node_id, n.title, n.metadata, n.created_at
FROM edges e
JOIN nodes n ON n.id = e.target_id
WHERE e.source_id = $1
AND e.edge_type = 'alias'
AND e.system = true
ORDER BY n.created_at
"#,
)
.bind(user.node_id)
.fetch_all(&state.db)
.await
.map_err(|e| {
tracing::error!(error = %e, "query_aliases feilet");
internal_error("Databasefeil ved henting av aliaser")
})?;
Ok(Json(QueryAliasesResponse { aliases }))
}
// =============================================================================
// GET /query/board — kanban-brett: noder med belongs_to-edge, gruppert på status
// =============================================================================
#[derive(Deserialize)]
pub struct QueryBoardRequest {
/// Board-nodens ID.
pub board_id: Uuid,
}
#[derive(Serialize)]
pub struct BoardCard {
pub node_id: Uuid,
pub title: Option<String>,
pub content: Option<String>,
pub node_kind: String,
pub metadata: serde_json::Value,
pub created_at: chrono::DateTime<chrono::Utc>,
pub created_by: Option<Uuid>,
/// Status-verdi fra status-edge metadata (null hvis ingen status-edge)
pub status: Option<String>,
/// Position fra belongs_to-edge metadata (default 0)
pub position: f64,
/// belongs_to-edge ID (for referanse)
pub belongs_to_edge_id: Uuid,
/// status-edge ID (null hvis ingen)
pub status_edge_id: Option<Uuid>,
}
#[derive(Serialize)]
pub struct QueryBoardResponse {
pub board_id: Uuid,
pub board_title: Option<String>,
/// Kolonne-definisjoner fra board-nodens metadata
pub columns: Vec<String>,
pub cards: Vec<BoardCard>,
}
/// GET /query/board?board_id=...
///
/// Henter alle kort (noder med belongs_to-edge) på et kanban-brett,
/// inkludert status-edges og posisjonsdata.
pub async fn query_board(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QueryBoardRequest>,
) -> Result<Json<QueryBoardResponse>, (StatusCode, Json<ErrorResponse>)> {
// Verifiser tilgang til board-noden via RLS
let mut tx = state.db.begin().await.map_err(|e| {
tracing::error!(error = %e, "Transaksjon feilet");
internal_error("Databasefeil")
})?;
set_rls_context(&mut tx, user.node_id).await.map_err(|e| {
tracing::error!(error = %e, "RLS-kontekst feilet");
internal_error("Databasefeil")
})?;
// Hent board-noden
let board = sqlx::query_as::<_, (Option<String>, serde_json::Value)>(
"SELECT title, metadata FROM nodes WHERE id = $1",
)
.bind(params.board_id)
.fetch_optional(&mut *tx)
.await
.map_err(|e| {
tracing::error!(error = %e, "Feil ved henting av board-node");
internal_error("Databasefeil")
})?;
tx.commit().await.map_err(|e| {
tracing::error!(error = %e, "Commit feilet");
internal_error("Databasefeil")
})?;
let Some((board_title, board_metadata)) = board else {
return Err((
StatusCode::NOT_FOUND,
Json(ErrorResponse {
error: format!("Board {} finnes ikke eller du har ikke tilgang", params.board_id),
}),
));
};
// Hent kolonner fra board-metadata, fallback til standard
let columns: Vec<String> = board_metadata
.get("columns")
.and_then(|v| serde_json::from_value(v.clone()).ok())
.unwrap_or_else(|| vec!["todo".to_string(), "in_progress".to_string(), "done".to_string()]);
// Hent alle kort: noder med belongs_to-edge til dette boardet
let cards = sqlx::query_as::<_, (
Uuid, // n.id
String, // n.node_kind
Option<String>, // n.title
Option<String>, // n.content
serde_json::Value, // n.metadata
chrono::DateTime<chrono::Utc>, // n.created_at
Option<Uuid>, // n.created_by
Uuid, // bt.id (belongs_to edge)
serde_json::Value, // bt.metadata
Option<Uuid>, // st.id (status edge)
Option<serde_json::Value>, // st.metadata
)>(
r#"
SELECT
n.id, n.node_kind, n.title, n.content, n.metadata,
n.created_at, n.created_by,
bt.id AS belongs_to_edge_id, bt.metadata AS bt_metadata,
st.id AS status_edge_id, st.metadata AS st_metadata
FROM edges bt
JOIN nodes n ON n.id = bt.source_id
LEFT JOIN edges st ON st.source_id = n.id
AND st.target_id = $1
AND st.edge_type = 'status'
WHERE bt.target_id = $1
AND bt.edge_type = 'belongs_to'
ORDER BY n.created_at
"#,
)
.bind(params.board_id)
.fetch_all(&state.db)
.await
.map_err(|e| {
tracing::error!(error = %e, "Feil ved henting av board-kort");
internal_error("Databasefeil ved henting av kort")
})?;
let board_cards: Vec<BoardCard> = cards
.into_iter()
.map(|(node_id, node_kind, title, content, metadata, created_at, created_by,
belongs_to_edge_id, bt_metadata, status_edge_id, st_metadata)| {
let status = st_metadata
.as_ref()
.and_then(|m| m.get("value"))
.and_then(|v| v.as_str())
.map(|s| s.to_string());
let position = bt_metadata
.get("position")
.and_then(|v| v.as_f64())
.unwrap_or(0.0);
BoardCard {
node_id,
title,
content,
node_kind,
metadata,
created_at,
created_by,
status,
position,
belongs_to_edge_id,
status_edge_id,
}
})
.collect();
Ok(Json(QueryBoardResponse {
board_id: params.board_id,
board_title,
columns,
cards: board_cards,
}))
}
// =============================================================================
// GET /query/editorial_board — redaktørens arbeidsflate
// =============================================================================
//
// Viser noder med submitted_to-edge til en samling, gruppert på status.
// Brukes av frontend for Kanban-visning av redaksjonelle innsendinger.
#[derive(Deserialize)]
pub struct QueryEditorialBoardRequest {
/// Samlings-nodens ID (target for submitted_to-edges).
pub collection_id: Uuid,
}
#[derive(Serialize)]
pub struct EditorialCard {
pub node_id: Uuid,
pub title: Option<String>,
pub content: Option<String>,
pub node_kind: String,
pub metadata: serde_json::Value,
pub created_at: chrono::DateTime<chrono::Utc>,
pub created_by: Option<Uuid>,
/// Forfatterens navn (title fra person-noden)
pub author_name: Option<String>,
/// Status fra submitted_to-edge metadata
pub status: String,
/// submitted_to-edge ID
pub submitted_to_edge_id: Uuid,
/// Full metadata fra submitted_to-edge (inkl. feedback, publish_at, etc.)
pub edge_metadata: serde_json::Value,
/// ID til kommunikasjonsnode(r) knyttet til artikkelen (redaksjonell samtale)
pub discussion_ids: Vec<Uuid>,
}
#[derive(Serialize)]
pub struct QueryEditorialBoardResponse {
pub collection_id: Uuid,
pub collection_title: Option<String>,
/// Faste kolonner for redaksjonell arbeidsflate
pub columns: Vec<String>,
/// Visningsnavn for kolonnene
pub column_labels: std::collections::HashMap<String, String>,
pub cards: Vec<EditorialCard>,
}
/// GET /query/editorial_board?collection_id=...
///
/// Henter alle noder med submitted_to-edge til en samling,
/// inkludert status, metadata og forfatterinfo.
pub async fn query_editorial_board(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QueryEditorialBoardRequest>,
) -> Result<Json<QueryEditorialBoardResponse>, (StatusCode, Json<ErrorResponse>)> {
// Verifiser tilgang til samlings-noden via RLS
let mut tx = state.db.begin().await.map_err(|e| {
tracing::error!(error = %e, "Transaksjon feilet");
internal_error("Databasefeil")
})?;
set_rls_context(&mut tx, user.node_id).await.map_err(|e| {
tracing::error!(error = %e, "RLS-kontekst feilet");
internal_error("Databasefeil")
})?;
let collection = sqlx::query_as::<_, (Option<String>,)>(
"SELECT title FROM nodes WHERE id = $1",
)
.bind(params.collection_id)
.fetch_optional(&mut *tx)
.await
.map_err(|e| {
tracing::error!(error = %e, "Feil ved henting av samlings-node");
internal_error("Databasefeil")
})?;
tx.commit().await.map_err(|e| {
tracing::error!(error = %e, "Commit feilet");
internal_error("Databasefeil")
})?;
let Some((collection_title,)) = collection else {
return Err((
StatusCode::NOT_FOUND,
Json(ErrorResponse {
error: format!(
"Samling {} finnes ikke eller du har ikke tilgang",
params.collection_id
),
}),
));
};
// Hent alle noder med submitted_to-edge til denne samlingen,
// pluss forfatterens navn via created_by → person-node
let rows = sqlx::query_as::<_, (
Uuid, // n.id
String, // n.node_kind
Option<String>, // n.title
Option<String>, // n.content
serde_json::Value, // n.metadata
chrono::DateTime<chrono::Utc>, // n.created_at
Option<Uuid>, // n.created_by
Option<String>, // author.title (forfatterens navn)
Uuid, // e.id (submitted_to edge)
serde_json::Value, // e.metadata
)>(
r#"
SELECT
n.id, n.node_kind, n.title, n.content, n.metadata,
n.created_at, n.created_by,
author.title AS author_name,
e.id AS edge_id, e.metadata AS edge_metadata
FROM edges e
JOIN nodes n ON n.id = e.source_id
LEFT JOIN nodes author ON author.id = n.created_by
AND author.node_kind = 'person'
WHERE e.target_id = $1
AND e.edge_type = 'submitted_to'
ORDER BY e.created_at DESC
"#,
)
.bind(params.collection_id)
.fetch_all(&state.db)
.await
.map_err(|e| {
tracing::error!(error = %e, "Feil ved henting av editorial-kort");
internal_error("Databasefeil ved henting av innsendinger")
})?;
// Samle alle artikkel-IDer for å hente diskusjoner i én spørring
let article_ids: Vec<Uuid> = rows.iter().map(|r| r.0).collect();
// Hent kommunikasjonsnoder som har belongs_to-edge til artiklene
let discussions: Vec<(Uuid, Uuid)> = if !article_ids.is_empty() {
sqlx::query_as::<_, (Uuid, Uuid)>(
r#"
SELECT e.source_id AS communication_id, e.target_id AS article_id
FROM edges e
JOIN nodes n ON n.id = e.source_id AND n.node_kind = 'communication'
WHERE e.edge_type = 'belongs_to'
AND e.target_id = ANY($1)
"#,
)
.bind(&article_ids)
.fetch_all(&state.db)
.await
.unwrap_or_default()
} else {
vec![]
};
// Bygg oppslag: artikkel_id → liste med diskusjons-IDer
let mut discussion_map: std::collections::HashMap<Uuid, Vec<Uuid>> =
std::collections::HashMap::new();
for (comm_id, article_id) in discussions {
discussion_map.entry(article_id).or_default().push(comm_id);
}
let cards: Vec<EditorialCard> = rows
.into_iter()
.map(|(node_id, node_kind, title, content, metadata, created_at, created_by,
author_name, edge_id, edge_metadata)| {
let status = edge_metadata
.get("status")
.and_then(|v| v.as_str())
.unwrap_or("pending")
.to_string();
let discussion_ids = discussion_map
.get(&node_id)
.cloned()
.unwrap_or_default();
EditorialCard {
node_id,
title,
content,
node_kind,
metadata,
created_at,
created_by,
author_name,
status,
submitted_to_edge_id: edge_id,
edge_metadata,
discussion_ids,
}
})
.collect();
let columns = vec![
"pending".to_string(),
"in_review".to_string(),
"approved".to_string(),
"scheduled".to_string(),
];
let mut column_labels = std::collections::HashMap::new();
column_labels.insert("pending".to_string(), "Innkomne".to_string());
column_labels.insert("in_review".to_string(), "Under vurdering".to_string());
column_labels.insert("approved".to_string(), "Godkjent".to_string());
column_labels.insert("scheduled".to_string(), "Planlagt".to_string());
Ok(Json(QueryEditorialBoardResponse {
collection_id: params.collection_id,
collection_title,
columns,
column_labels,
cards,
}))
}
// =============================================================================
// GET /query/graph — graf-traversering fra en fokusnode
// =============================================================================
#[derive(Deserialize)]
pub struct QueryGraphRequest {
/// Fokusnode å starte traverseringen fra. Valgfritt — uten returneres hele grafen.
pub focus_id: Option<Uuid>,
/// Maks dybde for traversering (1-3). Default: 2.
pub depth: Option<i32>,
/// Filtrer på edge_type (komma-separert). Valgfritt.
pub edge_types: Option<String>,
/// Filtrer på node_kind (komma-separert). Valgfritt.
pub node_kinds: Option<String>,
}
#[derive(Serialize)]
pub struct GraphNode {
pub id: Uuid,
pub node_kind: String,
pub title: Option<String>,
pub visibility: String,
pub metadata: serde_json::Value,
pub created_at: chrono::DateTime<chrono::Utc>,
}
#[derive(Serialize)]
pub struct GraphEdge {
pub id: Uuid,
pub source_id: Uuid,
pub target_id: Uuid,
pub edge_type: String,
pub metadata: serde_json::Value,
}
#[derive(Serialize)]
pub struct QueryGraphResponse {
pub nodes: Vec<GraphNode>,
pub edges: Vec<GraphEdge>,
}
/// GET /query/graph?focus_id=...&depth=...&edge_types=...&node_kinds=...
///
/// Returnerer noder og edges for graf-visualisering.
/// Med focus_id: traverserer grafen N ledd ut fra fokusnode.
/// Uten focus_id: returnerer alle synlige noder (maks 200) med edges mellom dem.
pub async fn query_graph(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QueryGraphRequest>,
) -> Result<Json<QueryGraphResponse>, (StatusCode, Json<ErrorResponse>)> {
let depth = params.depth.unwrap_or(2).clamp(1, 3);
let edge_type_filter: Option<Vec<String>> = params.edge_types.as_ref().map(|s| {
s.split(',').map(|t| t.trim().to_string()).filter(|t| !t.is_empty()).collect()
});
let node_kind_filter: Option<Vec<String>> = params.node_kinds.as_ref().map(|s| {
s.split(',').map(|t| t.trim().to_string()).filter(|t| !t.is_empty()).collect()
});
let result = run_query_graph(
&state.db,
user.node_id,
params.focus_id,
depth,
&edge_type_filter,
&node_kind_filter,
)
.await;
match result {
Ok(resp) => Ok(Json(resp)),
Err(e) => {
tracing::error!(error = %e, "query_graph feilet");
Err(internal_error("Databasefeil ved graf-spørring"))
}
}
}
async fn run_query_graph(
db: &PgPool,
user_node_id: Uuid,
focus_id: Option<Uuid>,
depth: i32,
edge_type_filter: &Option<Vec<String>>,
node_kind_filter: &Option<Vec<String>>,
) -> Result<QueryGraphResponse, sqlx::Error> {
let mut tx = db.begin().await?;
set_rls_context(&mut tx, user_node_id).await?;
let (nodes, edges) = if let Some(focus) = focus_id {
// Traverser fra fokusnode med rekursiv CTE
let node_rows = sqlx::query_as::<_, (Uuid, String, Option<String>, String, serde_json::Value, chrono::DateTime<chrono::Utc>)>(
r#"
WITH RECURSIVE reachable(id, depth) AS (
SELECT $1::uuid, 0
UNION
SELECT CASE WHEN e.source_id = r.id THEN e.target_id ELSE e.source_id END, r.depth + 1
FROM reachable r
JOIN edges e ON (e.source_id = r.id OR e.target_id = r.id)
WHERE r.depth < $2
)
SELECT DISTINCT n.id, n.node_kind, n.title, n.visibility::text, n.metadata, n.created_at
FROM reachable r
JOIN nodes n ON n.id = r.id
ORDER BY n.created_at DESC
LIMIT 200
"#,
)
.bind(focus)
.bind(depth)
.fetch_all(&mut *tx)
.await?;
// Samle opp node-IDer for å filtrere edges
let node_ids: Vec<Uuid> = node_rows.iter().map(|r| r.0).collect();
let edge_rows = sqlx::query_as::<_, (Uuid, Uuid, Uuid, String, serde_json::Value)>(
r#"
SELECT e.id, e.source_id, e.target_id, e.edge_type, e.metadata
FROM edges e
WHERE e.source_id = ANY($1) AND e.target_id = ANY($1)
AND e.system = false
"#,
)
.bind(&node_ids)
.fetch_all(&mut *tx)
.await?;
(node_rows, edge_rows)
} else {
// Ingen fokus — returner alle synlige noder (begrenset)
let node_rows = sqlx::query_as::<_, (Uuid, String, Option<String>, String, serde_json::Value, chrono::DateTime<chrono::Utc>)>(
r#"
SELECT n.id, n.node_kind, n.title, n.visibility::text, n.metadata, n.created_at
FROM nodes n
ORDER BY n.created_at DESC
LIMIT 200
"#,
)
.fetch_all(&mut *tx)
.await?;
let node_ids: Vec<Uuid> = node_rows.iter().map(|r| r.0).collect();
let edge_rows = sqlx::query_as::<_, (Uuid, Uuid, Uuid, String, serde_json::Value)>(
r#"
SELECT e.id, e.source_id, e.target_id, e.edge_type, e.metadata
FROM edges e
WHERE e.source_id = ANY($1) AND e.target_id = ANY($1)
AND e.system = false
"#,
)
.bind(&node_ids)
.fetch_all(&mut *tx)
.await?;
(node_rows, edge_rows)
};
tx.commit().await?;
// Appliser klientside-filtre
let mut graph_nodes: Vec<GraphNode> = nodes
.into_iter()
.map(|(id, node_kind, title, visibility, metadata, created_at)| GraphNode {
id,
node_kind,
title,
visibility,
metadata,
created_at,
})
.collect();
if let Some(kinds) = node_kind_filter {
if !kinds.is_empty() {
// Behold alltid fokusnode selv om den ikke matcher filteret
graph_nodes.retain(|n| kinds.contains(&n.node_kind) || focus_id == Some(n.id));
}
}
let visible_ids: std::collections::HashSet<Uuid> =
graph_nodes.iter().map(|n| n.id).collect();
let mut graph_edges: Vec<GraphEdge> = edges
.into_iter()
.map(|(id, source_id, target_id, edge_type, metadata)| GraphEdge {
id,
source_id,
target_id,
edge_type,
metadata,
})
.filter(|e| visible_ids.contains(&e.source_id) && visible_ids.contains(&e.target_id))
.collect();
if let Some(types) = edge_type_filter {
if !types.is_empty() {
graph_edges.retain(|e| types.contains(&e.edge_type));
}
}
Ok(QueryGraphResponse {
nodes: graph_nodes,
edges: graph_edges,
})
}
// =============================================================================
// GET /query/presentation_elements — presentasjonselementer for en artikkel
// =============================================================================
//
// Henter noder koblet til en artikkel via title/subtitle/summary/og_image/
// og_description-edges. Disse er separate noder med variantmetadata.
// Ref: docs/concepts/publisering.md § "Presentasjonselementer"
#[derive(Deserialize)]
pub struct QueryPresentationRequest {
/// Artikkelens node-ID.
pub article_id: Uuid,
}
#[derive(Serialize)]
pub struct PresentationElement {
/// Presentasjonselementets node-ID.
pub node_id: Uuid,
/// Edge-ID (for oppdatering/sletting).
pub edge_id: Uuid,
/// Edge-type: title, subtitle, summary, og_image, og_description.
pub element_type: String,
/// Nodens tittel (brukt for title/subtitle).
pub title: Option<String>,
/// Nodens innhold (brukt for summary/og_description).
pub content: Option<String>,
/// Node-kind (content eller media).
pub node_kind: String,
/// Node metadata (inkl. cas_hash for media).
pub metadata: serde_json::Value,
/// Edge metadata (variant, language, ab_status etc.).
pub edge_metadata: serde_json::Value,
pub created_at: chrono::DateTime<chrono::Utc>,
}
#[derive(Serialize)]
pub struct QueryPresentationResponse {
pub article_id: Uuid,
pub elements: Vec<PresentationElement>,
}
/// GET /query/presentation_elements?article_id=...
///
/// Henter alle presentasjonselementer (tittel, undertittel, ingress,
/// OG-bilde, OG-beskrivelse) knyttet til en artikkel.
/// Returnerer nodene med edge-metadata (variant, ab_status etc.).
pub async fn query_presentation_elements(
State(state): State<AppState>,
user: AuthUser,
axum::extract::Query(params): axum::extract::Query<QueryPresentationRequest>,
) -> Result<Json<QueryPresentationResponse>, (StatusCode, Json<ErrorResponse>)> {
// Verifiser tilgang til artikkelen via RLS
let mut tx = state.db.begin().await.map_err(|e| {
tracing::error!(error = %e, "Transaksjon feilet");
internal_error("Databasefeil")
})?;
set_rls_context(&mut tx, user.node_id).await.map_err(|e| {
tracing::error!(error = %e, "RLS-kontekst feilet");
internal_error("Databasefeil")
})?;
let exists = sqlx::query_scalar::<_, bool>(
"SELECT EXISTS(SELECT 1 FROM nodes WHERE id = $1)",
)
.bind(params.article_id)
.fetch_one(&mut *tx)
.await
.map_err(|e| {
tracing::error!(error = %e, "Tilgangssjekk feilet");
internal_error("Databasefeil")
})?;
tx.commit().await.map_err(|e| {
tracing::error!(error = %e, "Commit feilet");
internal_error("Databasefeil")
})?;
if !exists {
return Err((
StatusCode::NOT_FOUND,
Json(ErrorResponse {
error: format!("Artikkel {} finnes ikke eller du har ikke tilgang", params.article_id),
}),
));
}
// Hent presentasjonselementer: noder med title/subtitle/summary/og_image/og_description-edge
// til artikkelen (source_id = element-node, target_id = article)
let rows = sqlx::query_as::<_, (
Uuid, // n.id
Uuid, // e.id
String, // e.edge_type
Option<String>, // n.title
Option<String>, // n.content
String, // n.node_kind
serde_json::Value, // n.metadata
serde_json::Value, // e.metadata
chrono::DateTime<chrono::Utc>, // n.created_at
)>(
r#"
SELECT
n.id, e.id AS edge_id, e.edge_type,
n.title, n.content, n.node_kind, n.metadata,
e.metadata AS edge_metadata, n.created_at
FROM edges e
JOIN nodes n ON n.id = e.source_id
WHERE e.target_id = $1
AND e.edge_type IN ('title', 'subtitle', 'summary', 'og_image', 'og_description')
ORDER BY e.edge_type, e.created_at
"#,
)
.bind(params.article_id)
.fetch_all(&state.db)
.await
.map_err(|e| {
tracing::error!(error = %e, "Feil ved henting av presentasjonselementer");
internal_error("Databasefeil ved henting av presentasjonselementer")
})?;
let elements: Vec<PresentationElement> = rows
.into_iter()
.map(|(node_id, edge_id, element_type, title, content, node_kind, metadata, edge_metadata, created_at)| {
PresentationElement {
node_id,
edge_id,
element_type,
title,
content,
node_kind,
metadata,
edge_metadata,
created_at,
}
})
.collect();
Ok(Json(QueryPresentationResponse {
article_id: params.article_id,
elements,
}))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_format_srt_timestamp() {
assert_eq!(format_srt_timestamp(0), "00:00:00,000");
assert_eq!(format_srt_timestamp(5230), "00:00:05,230");
assert_eq!(format_srt_timestamp(83456), "00:01:23,456");
assert_eq!(format_srt_timestamp(3_600_000), "01:00:00,000");
assert_eq!(format_srt_timestamp(3_723_456), "01:02:03,456");
}
#[test]
fn test_segments_to_srt() {
let segments = vec![
SegmentResult {
id: 1,
seq: 1,
start_ms: 0,
end_ms: 5230,
content: "Hei og velkommen.".to_string(),
edited: false,
},
SegmentResult {
id: 2,
seq: 2,
start_ms: 5230,
end_ms: 10500,
content: "I dag snakker vi om fotball.".to_string(),
edited: true,
},
];
let srt = segments_to_srt(&segments);
let expected = "\
1
00:00:00,000 --> 00:00:05,230
Hei og velkommen.
2
00:00:05,230 --> 00:00:10,500
I dag snakker vi om fotball.
";
assert_eq!(srt, expected);
}
#[test]
fn test_segments_to_srt_single() {
let segments = vec![SegmentResult {
id: 1,
seq: 1,
start_ms: 0,
end_ms: 3000,
content: "Bare ett segment.".to_string(),
edited: false,
}];
let srt = segments_to_srt(&segments);
assert_eq!(srt, "1\n00:00:00,000 --> 00:00:03,000\nBare ett segment.\n");
}
}