diff --git a/docs/concepts/publisering.md b/docs/concepts/publisering.md index 6b7f757..8863e0b 100644 --- a/docs/concepts/publisering.md +++ b/docs/concepts/publisering.md @@ -595,12 +595,17 @@ LIMIT 20 OFFSET $page; Med indeks på `(target_id, edge_type)` og GIN-indeks på `metadata` er dette raskt uansett samlingsstørrelse. -**Forside-rendering:** -- `index_mode: "static"` — full HTML rendres til CAS ved publisering. +**Forside-rendering (implementert):** +- `index_mode: "static"` — full HTML rendres til CAS via `render_index`-jobb + ved publisering. Samlingens `metadata.rendered_index.index_hash` peker til + CAS-filen. Serveres med `Cache-Control: immutable`. Passer for magasin/blogg med lav frekvens. -- `index_mode: "dynamic"` — maskinrommet serverer on-demand med - in-memory cache, invalidert ved publisering. `index_cache_ttl` - styrer cachens levetid. Passer for nyhetsavis med høy frekvens. +- `index_mode: "dynamic"` (default) — maskinrommet serverer on-demand med + in-memory cache (`IndexCache`), invalidert ved publisering (belongs_to-endring). + `index_cache_ttl` (default 300s) styrer cachens levetid. + Passer for nyhetsavis med høy frekvens. +- Tre separate indekserte PG-spørringer for hero/featured/strøm — + filtrerer på `slot` i edge-metadata, bruker GIN-indeks. **Bulk re-rendering ved temaendring:** Temaendring trigger batch-jobb via jobbkøen. Maskinrommet paginerer 100 artikler om gangen, rendrer diff --git a/maskinrommet/src/intentions.rs b/maskinrommet/src/intentions.rs index 78f4add..674be50 100644 --- a/maskinrommet/src/intentions.rs +++ b/maskinrommet/src/intentions.rs @@ -448,6 +448,7 @@ pub async fn create_node( spawn_pg_insert_edge( state.db.clone(), state.stdb.clone(), + state.index_cache.clone(), edge_id, node_id, ctx_id, @@ -636,6 +637,7 @@ pub async fn create_edge( spawn_pg_insert_edge( state.db.clone(), state.stdb.clone(), + state.index_cache.clone(), edge_id, req.source_id, req.target_id, @@ -1075,6 +1077,7 @@ pub async fn create_communication( spawn_pg_insert_edge( state.db.clone(), state.stdb.clone(), + state.index_cache.clone(), owner_edge_id, user.node_id, node_id, @@ -1116,6 +1119,7 @@ pub async fn create_communication( spawn_pg_insert_edge( state.db.clone(), state.stdb.clone(), + state.index_cache.clone(), edge_id, *participant_id, node_id, @@ -1339,6 +1343,7 @@ pub async fn upload_media( spawn_pg_insert_edge( state.db.clone(), state.stdb.clone(), + state.index_cache.clone(), edge_id, src_id, media_node_id, @@ -1649,6 +1654,7 @@ fn edge_type_to_access_level(edge_type: &str) -> Option<&'static str> { fn spawn_pg_insert_edge( db: PgPool, stdb: crate::stdb::StdbClient, + index_cache: crate::publishing::IndexCache, edge_id: Uuid, source_id: Uuid, target_id: Uuid, @@ -1707,7 +1713,7 @@ fn spawn_pg_insert_edge( // Trigger artikkelrendering ved belongs_to til publiseringssamling if edge_type == "belongs_to" { - trigger_render_if_publishing(&db, source_id, target_id).await; + trigger_render_if_publishing(&db, &index_cache, source_id, target_id).await; } } Err(e) => { @@ -1719,16 +1725,23 @@ fn spawn_pg_insert_edge( } /// Sjekker om target er en samling med publishing-trait, og legger i så fall -/// en `render_article`-jobb i køen for å rendere artikkelens HTML til CAS. -async fn trigger_render_if_publishing(db: &PgPool, source_id: Uuid, target_id: Uuid) { +/// en `render_article`-jobb i køen. For statisk modus legges også en +/// `render_index`-jobb. For dynamisk modus invalideres in-memory-cachen. +async fn trigger_render_if_publishing( + db: &PgPool, + index_cache: &crate::publishing::IndexCache, + source_id: Uuid, + target_id: Uuid, +) { match crate::publishing::find_publishing_collection_by_id(db, target_id).await { - Ok(Some(_config)) => { - let payload = serde_json::json!({ + Ok(Some(config)) => { + // Render artikkelen + let article_payload = serde_json::json!({ "node_id": source_id.to_string(), "collection_id": target_id.to_string(), }); - match crate::jobs::enqueue(db, "render_article", payload, Some(target_id), 5).await { + match crate::jobs::enqueue(db, "render_article", article_payload, Some(target_id), 5).await { Ok(job_id) => { tracing::info!( job_id = %job_id, @@ -1746,6 +1759,35 @@ async fn trigger_render_if_publishing(db: &PgPool, source_id: Uuid, target_id: U ); } } + + // Re-render forsiden + let index_mode = config.index_mode.as_deref().unwrap_or("dynamic"); + if index_mode == "static" { + // Statisk modus: legg render_index-jobb i køen + let index_payload = serde_json::json!({ + "collection_id": target_id.to_string(), + }); + + match crate::jobs::enqueue(db, "render_index", index_payload, Some(target_id), 4).await { + Ok(job_id) => { + tracing::info!( + job_id = %job_id, + collection_id = %target_id, + "render_index-jobb lagt i kø (statisk modus)" + ); + } + Err(e) => { + tracing::error!( + collection_id = %target_id, + error = %e, + "Kunne ikke legge render_index-jobb i kø" + ); + } + } + } else { + // Dynamisk modus: invalider in-memory-cache + crate::publishing::invalidate_index_cache(index_cache, target_id).await; + } } Ok(None) => { // Target er ikke en publiseringssamling — ingen rendering nødvendig diff --git a/maskinrommet/src/jobs.rs b/maskinrommet/src/jobs.rs index 19aa3a6..2b3b1b9 100644 --- a/maskinrommet/src/jobs.rs +++ b/maskinrommet/src/jobs.rs @@ -175,6 +175,9 @@ async fn dispatch( "render_article" => { handle_render_article(job, db, cas).await } + "render_index" => { + handle_render_index(job, db, cas).await + } other => Err(format!("Ukjent jobbtype: {other}")), } } @@ -206,6 +209,26 @@ async fn handle_render_article( publishing::render_article_to_cas(db, cas, node_id, collection_id).await } +/// Handler for `render_index`-jobb. +/// +/// Payload: `{ "collection_id": "..." }` +/// Rendrer forsiden til HTML via Tera, lagrer i CAS, +/// oppdaterer samlingens metadata.rendered_index. +async fn handle_render_index( + job: &JobRow, + db: &PgPool, + cas: &CasStore, +) -> Result { + let collection_id: Uuid = job + .payload + .get("collection_id") + .and_then(|v| v.as_str()) + .and_then(|s| s.parse().ok()) + .ok_or("Mangler collection_id i payload")?; + + publishing::render_index_to_cas(db, cas, collection_id).await +} + /// Starter worker-loopen som poller job_queue. /// Kjører som en bakgrunnsoppgave i tokio. pub fn start_worker(db: PgPool, stdb: StdbClient, cas: CasStore) { diff --git a/maskinrommet/src/main.rs b/maskinrommet/src/main.rs index b05cb90..1a2ebbb 100644 --- a/maskinrommet/src/main.rs +++ b/maskinrommet/src/main.rs @@ -35,6 +35,7 @@ pub struct AppState { pub jwks: JwksKeys, pub stdb: StdbClient, pub cas: CasStore, + pub index_cache: publishing::IndexCache, } #[derive(Serialize)] @@ -138,7 +139,8 @@ async fn main() { // Start periodisk CAS-pruning i bakgrunnen pruning::start_pruning_loop(db.clone(), cas.clone()); - let state = AppState { db, jwks, stdb, cas }; + let index_cache = publishing::new_index_cache(); + let state = AppState { db, jwks, stdb, cas, index_cache }; // Ruter: /health er offentlig, /me krever gyldig JWT let app = Router::new() diff --git a/maskinrommet/src/publishing.rs b/maskinrommet/src/publishing.rs index 818ec0e..d906679 100644 --- a/maskinrommet/src/publishing.rs +++ b/maskinrommet/src/publishing.rs @@ -10,6 +10,9 @@ //! //! Ref: docs/concepts/publisering.md § "Temaer", "HTML-rendering og CAS" +use std::collections::HashMap; +use std::sync::Arc; + use axum::{ extract::{Path, State}, http::{header, StatusCode}, @@ -19,6 +22,7 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use sqlx::PgPool; use tera::{Context, Tera}; +use tokio::sync::RwLock; use uuid::Uuid; use crate::cas::CasStore; @@ -41,6 +45,7 @@ pub struct PublishingConfig { pub theme_config: ThemeConfig, pub custom_domain: Option, pub index_mode: Option, + pub index_cache_ttl: Option, pub featured_max: Option, pub stream_page_size: Option, } @@ -284,6 +289,33 @@ pub struct IndexData { pub stream: Vec, } +// ============================================================================= +// In-memory index-cache (dynamisk modus) +// ============================================================================= + +/// Cachet forside-HTML med utløpstid. +pub struct CachedIndex { + html: String, + expires_at: DateTime, +} + +/// Thread-safe cache for forside-rendering (dynamisk modus). +/// Nøkkel: collection UUID. Verdi: rendret HTML med TTL. +pub type IndexCache = Arc>>; + +/// Opprett en ny tom IndexCache. +pub fn new_index_cache() -> IndexCache { + Arc::new(RwLock::new(HashMap::new())) +} + +/// Invalider cache for en gitt samling. +pub async fn invalidate_index_cache(cache: &IndexCache, collection_id: Uuid) { + let mut map = cache.write().await; + if map.remove(&collection_id).is_some() { + tracing::info!(collection_id = %collection_id, "Forside-cache invalidert"); + } +} + // ============================================================================= // Render-funksjoner // ============================================================================= @@ -527,6 +559,130 @@ pub async fn render_article_to_cas( })) } +/// Render forsiden til CAS (statisk modus). +/// +/// Henter hero/featured/stream med tre indekserte spørringer, +/// rendrer via Tera-template, lagrer HTML i CAS, og oppdaterer +/// samlingens metadata.rendered_index med index_hash. +pub async fn render_index_to_cas( + db: &PgPool, + cas: &CasStore, + collection_id: Uuid, +) -> Result { + // Hent samlingens konfig + let collection_row: Option<(Option, serde_json::Value)> = sqlx::query_as( + r#" + SELECT title, metadata + FROM nodes + WHERE id = $1 AND node_kind = 'collection' + "#, + ) + .bind(collection_id) + .fetch_optional(db) + .await + .map_err(|e| format!("Feil ved henting av samling: {e}"))?; + + let Some((collection_title_opt, collection_metadata)) = collection_row else { + return Err(format!("Samling {collection_id} finnes ikke")); + }; + + let publishing_config: PublishingConfig = collection_metadata + .get("traits") + .and_then(|t| t.get("publishing")) + .cloned() + .map(|v| serde_json::from_value(v).unwrap_or_default()) + .unwrap_or_default(); + + let slug = publishing_config.slug.as_deref().unwrap_or("unknown"); + let theme = publishing_config.theme.as_deref().unwrap_or("blogg"); + let config = &publishing_config.theme_config; + let collection_title = collection_title_opt.unwrap_or_else(|| slug.to_string()); + let featured_max = publishing_config.featured_max.unwrap_or(4); + let stream_page_size = publishing_config.stream_page_size.unwrap_or(20); + + let base_url = publishing_config + .custom_domain + .as_deref() + .map(|d| format!("https://{d}")) + .unwrap_or_else(|| format!("/pub/{slug}")); + + // Hent artikler med tre indekserte spørringer + let (hero, featured, stream) = + fetch_index_articles_optimized(db, collection_id, featured_max, stream_page_size).await + .map_err(|e| format!("Feil ved henting av forsideartikler: {e}"))?; + + let index_data = IndexData { + title: collection_title, + description: None, + hero, + featured, + stream, + }; + + // Render med Tera + let tera = build_tera(); + let html = render_index(&tera, theme, config, &index_data, &base_url) + .map_err(|e| format!("Tera render-feil (index): {e}"))?; + + // Lagre i CAS + let store_result = cas + .store(html.as_bytes()) + .await + .map_err(|e| format!("CAS-lagring feilet: {e}"))?; + + tracing::info!( + collection_id = %collection_id, + hash = %store_result.hash, + size = store_result.size, + deduplicated = store_result.already_existed, + "Forside rendret og lagret i CAS" + ); + + // Oppdater samlingens metadata.rendered_index + let now = Utc::now(); + sqlx::query( + r#" + UPDATE nodes + SET metadata = jsonb_set( + jsonb_set( + jsonb_set( + CASE WHEN metadata ? 'rendered_index' + THEN metadata + ELSE jsonb_set(metadata, '{rendered_index}', '{}'::jsonb) + END, + '{rendered_index,index_hash}', + to_jsonb($2::text) + ), + '{rendered_index,rendered_at}', + to_jsonb($3::text) + ), + '{rendered_index,renderer_version}', + to_jsonb($4::bigint) + ) + WHERE id = $1 + "#, + ) + .bind(collection_id) + .bind(&store_result.hash) + .bind(now.to_rfc3339()) + .bind(RENDERER_VERSION) + .execute(db) + .await + .map_err(|e| format!("Feil ved oppdatering av metadata.rendered_index: {e}"))?; + + tracing::info!( + collection_id = %collection_id, + index_hash = %store_result.hash, + "metadata.rendered_index oppdatert" + ); + + Ok(serde_json::json!({ + "index_hash": store_result.hash, + "size": store_result.size, + "renderer_version": RENDERER_VERSION + })) +} + // ============================================================================= // Database-spørringer // ============================================================================= @@ -691,42 +847,25 @@ struct FetchedArticle { edge_meta: Option, } -/// Hent artikler for forsiden, sortert i slots. -async fn fetch_index_articles( +/// Hent forsideartikler med tre separate, indekserte spørringer. +/// +/// Hver spørring filtrerer på slot i edge.metadata og bruker +/// GIN-indeks på edges.metadata. Mer effektivt enn å hente alt +/// og filtrere i Rust, spesielt med mange artikler. +async fn fetch_index_articles_optimized( db: &PgPool, collection_id: Uuid, featured_max: i64, stream_page_size: i64, ) -> Result<(Option, Vec, Vec), sqlx::Error> { - // Hent alle publiserte artikler med edge-metadata - let rows: Vec<(Uuid, Option, Option, DateTime, Option)> = sqlx::query_as( - r#" - SELECT n.id, n.title, n.content, n.created_at, e.metadata - FROM edges e - JOIN nodes n ON n.id = e.source_id - WHERE e.target_id = $1 - AND e.edge_type = 'belongs_to' - ORDER BY COALESCE( - (e.metadata->>'publish_at')::timestamptz, - n.created_at - ) DESC - "#, - ) - .bind(collection_id) - .fetch_all(db) - .await?; - - let mut hero: Option = None; - let mut featured: Vec = Vec::new(); - let mut stream: Vec = Vec::new(); - - for (id, title, content, created_at, edge_meta) in rows { - let slot = edge_meta - .as_ref() - .and_then(|m| m.get("slot")) - .and_then(|v| v.as_str()) - .unwrap_or(""); - + // Hjelpefunksjon for å konvertere rader til ArticleData + fn row_to_article( + id: Uuid, + title: Option, + content: Option, + created_at: DateTime, + edge_meta: Option, + ) -> ArticleData { let publish_at = edge_meta .as_ref() .and_then(|m| m.get("publish_at")) @@ -734,11 +873,9 @@ async fn fetch_index_articles( .and_then(|s| s.parse::>().ok()) .unwrap_or(created_at); - let summary = content - .as_deref() - .map(|c| truncate(c, 200)); + let summary = content.as_deref().map(|c| truncate(c, 200)); - let article = ArticleData { + ArticleData { id: id.to_string(), short_id: id.to_string()[..8].to_string(), title: title.unwrap_or_else(|| "Uten tittel".to_string()), @@ -746,19 +883,84 @@ async fn fetch_index_articles( summary, published_at: publish_at.to_rfc3339(), published_at_short: publish_at.format("%e. %B %Y").to_string(), - }; - - match slot { - "hero" if hero.is_none() => hero = Some(article), - "featured" if (featured.len() as i64) < featured_max => featured.push(article), - _ => { - if (stream.len() as i64) < stream_page_size { - stream.push(article); - } - } } } + type Row = (Uuid, Option, Option, DateTime, Option); + + // 1. Hero: slot = "hero", maks 1 + let hero_row: Option = sqlx::query_as( + r#" + SELECT n.id, n.title, n.content, n.created_at, e.metadata + FROM edges e + JOIN nodes n ON n.id = e.source_id + WHERE e.target_id = $1 + AND e.edge_type = 'belongs_to' + AND e.metadata->>'slot' = 'hero' + LIMIT 1 + "#, + ) + .bind(collection_id) + .fetch_optional(db) + .await?; + + let hero = hero_row.map(|(id, title, content, created_at, edge_meta)| { + row_to_article(id, title, content, created_at, edge_meta) + }); + + // 2. Featured: slot = "featured", sortert på slot_order + let featured_rows: Vec = sqlx::query_as( + r#" + SELECT n.id, n.title, n.content, n.created_at, e.metadata + FROM edges e + JOIN nodes n ON n.id = e.source_id + WHERE e.target_id = $1 + AND e.edge_type = 'belongs_to' + AND e.metadata->>'slot' = 'featured' + ORDER BY (e.metadata->>'slot_order')::int ASC NULLS LAST + LIMIT $2 + "#, + ) + .bind(collection_id) + .bind(featured_max) + .fetch_all(db) + .await?; + + let featured: Vec = featured_rows + .into_iter() + .map(|(id, title, content, created_at, edge_meta)| { + row_to_article(id, title, content, created_at, edge_meta) + }) + .collect(); + + // 3. Strøm: slot IS NULL (eller mangler), sortert på publish_at + let stream_rows: Vec = sqlx::query_as( + r#" + SELECT n.id, n.title, n.content, n.created_at, e.metadata + FROM edges e + JOIN nodes n ON n.id = e.source_id + WHERE e.target_id = $1 + AND e.edge_type = 'belongs_to' + AND (e.metadata->>'slot' IS NULL OR e.metadata->>'slot' = '') + ORDER BY COALESCE( + (e.metadata->>'publish_at')::timestamptz, + n.created_at + ) DESC + LIMIT $2 + "#, + ) + .bind(collection_id) + .bind(stream_page_size) + .fetch_all(db) + .await?; + + let stream: Vec = stream_rows + .into_iter() + .map(|(id, title, content, created_at, edge_meta)| { + row_to_article(id, title, content, created_at, edge_meta) + }) + .collect(); + Ok((hero, featured, stream)) } @@ -777,6 +979,12 @@ fn truncate(s: &str, max: usize) -> String { // ============================================================================= /// GET /pub/{slug} — forside for en publikasjon. +/// +/// Støtter to moduser styrt av `index_mode` i trait-konfig: +/// - **static**: Serverer pre-rendret HTML fra CAS (immutable cache). +/// Forsiden rendres til CAS via `render_index`-jobb ved publisering. +/// - **dynamic** (default): Rendrer on-the-fly med in-memory cache. +/// TTL styres av `index_cache_ttl` (default 300s). pub async fn serve_index( State(state): State, Path(slug): Path, @@ -789,12 +997,74 @@ pub async fn serve_index( })? .ok_or(StatusCode::NOT_FOUND)?; + let index_mode = collection.publishing_config.index_mode.as_deref().unwrap_or("dynamic"); + let cache_ttl = collection.publishing_config.index_cache_ttl.unwrap_or(300); + + // --- Statisk modus: server fra CAS --- + if index_mode == "static" { + // Sjekk metadata.rendered_index.index_hash + let hash_row: Option<(serde_json::Value,)> = sqlx::query_as( + "SELECT metadata FROM nodes WHERE id = $1", + ) + .bind(collection.id) + .fetch_optional(&state.db) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + if let Some((metadata,)) = hash_row { + if let Some(index_hash) = metadata + .get("rendered_index") + .and_then(|r| r.get("index_hash")) + .and_then(|h| h.as_str()) + { + let cas_path = state.cas.path_for(index_hash); + if cas_path.exists() { + let html_bytes = tokio::fs::read(&cas_path).await.map_err(|e| { + tracing::error!(hash = %index_hash, error = %e, "Kunne ikke lese CAS-fil for index"); + StatusCode::INTERNAL_SERVER_ERROR + })?; + + return Ok(Response::builder() + .header(header::CONTENT_TYPE, "text/html; charset=utf-8") + .header( + header::CACHE_CONTROL, + "public, max-age=31536000, immutable", + ) + .body(html_bytes.into()) + .unwrap()); + } + } + } + + // Fallthrough: ingen CAS-versjon — render on-the-fly som fallback + tracing::warn!(slug = %slug, "Statisk index mangler i CAS, faller tilbake til dynamisk rendering"); + } + + // --- Dynamisk modus: in-memory cache med TTL --- + { + let cache = state.index_cache.read().await; + if let Some(cached) = cache.get(&collection.id) { + if cached.expires_at > Utc::now() { + let max_age = (cached.expires_at - Utc::now()).num_seconds().max(0); + return Ok(Response::builder() + .header(header::CONTENT_TYPE, "text/html; charset=utf-8") + .header( + header::CACHE_CONTROL, + format!("public, max-age={max_age}"), + ) + .body(cached.html.clone().into()) + .unwrap()); + } + } + } + + // Cache miss eller utløpt — render og cache let theme = collection.publishing_config.theme.as_deref().unwrap_or("blogg"); - let config = &collection.publishing_config.theme_config; + let config = collection.publishing_config.theme_config.clone(); let featured_max = collection.publishing_config.featured_max.unwrap_or(4); let stream_page_size = collection.publishing_config.stream_page_size.unwrap_or(20); - let (hero, featured, stream) = fetch_index_articles( + let (hero, featured, stream) = fetch_index_articles_optimized( &state.db, collection.id, featured_max, @@ -823,14 +1093,27 @@ pub async fn serve_index( }; let tera = build_tera(); - let html = render_index(&tera, theme, config, &index_data, &base_url).map_err(|e| { + let html = render_index(&tera, theme, &config, &index_data, &base_url).map_err(|e| { tracing::error!(slug = %slug, theme = %theme, error = %e, "Tera render-feil (index)"); StatusCode::INTERNAL_SERVER_ERROR })?; + // Legg i cache + let expires_at = Utc::now() + chrono::Duration::seconds(cache_ttl as i64); + { + let mut cache = state.index_cache.write().await; + cache.insert(collection.id, CachedIndex { + html: html.clone(), + expires_at, + }); + } + Ok(Response::builder() .header(header::CONTENT_TYPE, "text/html; charset=utf-8") - .header(header::CACHE_CONTROL, "public, max-age=60") + .header( + header::CACHE_CONTROL, + format!("public, max-age={cache_ttl}"), + ) .body(html.into()) .unwrap()) } diff --git a/tasks.md b/tasks.md index 525d4ef..3614b1b 100644 --- a/tasks.md +++ b/tasks.md @@ -141,8 +141,7 @@ Uavhengige faser kan fortsatt plukkes. - [x] 14.1 Tera-templates: innebygde temaer (avis, magasin, blogg, tidsskrift) med Tera i Rust. Artikkelmal + forside-mal per tema. CSS-variabler for theme_config-overstyring. Ref: `docs/concepts/publisering.md` § "Temaer". - [x] 14.2 HTML-rendering av enkeltartikler: maskinrommet rendrer `metadata.document` til HTML via Tera, lagrer i CAS. Noden får `metadata.rendered.html_hash` + `renderer_version`. SEO-metadata (OG-tags, canonical, JSON-LD). -- [~] 14.3 Forside-rendering: maskinrommet spør PG for hero/featured/strøm (tre indekserte spørringer), appliserer tema-template, rendrer til CAS (statisk modus) eller serverer med in-memory cache (dynamisk modus). `index_mode` og `index_cache_ttl` i trait-konfig. - > Påbegynt: 2026-03-18T00:54 +- [x] 14.3 Forside-rendering: maskinrommet spør PG for hero/featured/strøm (tre indekserte spørringer), appliserer tema-template, rendrer til CAS (statisk modus) eller serverer med in-memory cache (dynamisk modus). `index_mode` og `index_cache_ttl` i trait-konfig. - [ ] 14.4 Caddy-ruting for synops.no/pub: Caddy reverse-proxyer til maskinrommet som gjør slug→hash-oppslag og streamer CAS-fil. `Cache-Control: immutable` for artikler. Kategori/arkiv/søk serveres dynamisk av maskinrommet med kortere cache-TTL. - [ ] 14.5 Slot-håndtering i maskinrommet: `slot` og `slot_order` i `belongs_to`-edge metadata. Ved ny hero → gammel hero flyttes til strøm. Ved featured over `featured_max` → FIFO tilbake til strøm. `pinned`-flagg forhindrer automatisk fjerning. - [ ] 14.6 Forside-admin i frontend: visuell editor for hero/featured/strøm. Drag-and-drop mellom plasser. Pin-knapp. Forhåndsvisning. Oppdaterer edge-metadata via maskinrommet.