synops/maskinrommet/src/usage_overview.rs
vegard d7dffa06e6 Fullfører oppgave 15.8: Forbruksoversikt i admin
Aggregert ressursforbruk-dashboard som spør mot resource_usage_log
(oppgave 15.7). Tre visninger: totaler per ressurstype, per samling,
og daglig tidsserie. AI drill-down viser forbruk per jobbtype og
modellnivå (fast/smart/deep).

Backend: GET /admin/usage med days- og collection_id-filtre.
Frontend: /admin/usage med filterbare tabeller og fargekodede kort.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 04:34:08 +00:00

238 lines
7.9 KiB
Rust

// Forbruksoversikt — aggregert ressursforbruk (oppgave 15.8)
//
// Admin-API for å se totalt forbruk per samling, per ressurstype,
// per tidsperiode. Drill-down til jobbtype og modellnivå for AI.
//
// Spør mot resource_usage_log (oppgave 15.7) og ai_usage_log (oppgave 15.4).
//
// Ref: docs/features/ressursforbruk.md
use axum::extract::State;
use axum::http::StatusCode;
use axum::Json;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use uuid::Uuid;
use crate::auth::AuthUser;
use crate::AppState;
// =============================================================================
// Datatyper
// =============================================================================
/// Aggregert forbruk per samling og ressurstype.
#[derive(Serialize, sqlx::FromRow)]
pub struct CollectionUsageSummary {
pub collection_id: Option<Uuid>,
pub collection_title: Option<String>,
pub resource_type: String,
pub event_count: i64,
/// Hovedmetrikk i naturlig enhet (tokens, sekunder, bytes, tegn, minutter).
pub total_value: f64,
/// Sekundær metrikk (f.eks. tokens_out for AI, 0 for andre).
pub secondary_value: f64,
}
/// Drill-down for AI: forbruk per jobbtype og modellnivå.
#[derive(Serialize, sqlx::FromRow)]
pub struct AiDrillDown {
pub collection_id: Option<Uuid>,
pub collection_title: Option<String>,
pub job_type: Option<String>,
pub model_level: Option<String>,
pub tokens_in: i64,
pub tokens_out: i64,
pub event_count: i64,
}
/// Tidsserie: forbruk per dag for en gitt ressurstype.
#[derive(Serialize, sqlx::FromRow)]
pub struct DailyUsage {
pub day: DateTime<Utc>,
pub resource_type: String,
pub event_count: i64,
pub total_value: f64,
}
/// Samlet respons for forbruksoversikten.
#[derive(Serialize)]
pub struct UsageOverviewResponse {
pub by_collection: Vec<CollectionUsageSummary>,
pub ai_drilldown: Vec<AiDrillDown>,
pub daily: Vec<DailyUsage>,
}
#[derive(Deserialize)]
pub struct UsageOverviewParams {
pub days: Option<i32>,
pub collection_id: Option<Uuid>,
}
#[derive(Serialize)]
pub struct ErrorResponse {
pub error: String,
}
fn internal_error(msg: &str) -> (StatusCode, Json<ErrorResponse>) {
(
StatusCode::INTERNAL_SERVER_ERROR,
Json(ErrorResponse {
error: msg.to_string(),
}),
)
}
// =============================================================================
// GET /admin/usage — forbruksoversikt
// =============================================================================
pub async fn usage_overview(
State(state): State<AppState>,
_user: AuthUser,
axum::extract::Query(params): axum::extract::Query<UsageOverviewParams>,
) -> Result<Json<UsageOverviewResponse>, (StatusCode, Json<ErrorResponse>)> {
let days = params.days.unwrap_or(30).clamp(1, 365);
let by_collection = fetch_by_collection(&state.db, days, params.collection_id)
.await
.map_err(|e| internal_error(&format!("Feil i samlingsoversikt: {e}")))?;
let ai_drilldown = fetch_ai_drilldown(&state.db, days, params.collection_id)
.await
.map_err(|e| internal_error(&format!("Feil i AI drill-down: {e}")))?;
let daily = fetch_daily(&state.db, days, params.collection_id)
.await
.map_err(|e| internal_error(&format!("Feil i daglig oversikt: {e}")))?;
Ok(Json(UsageOverviewResponse {
by_collection,
ai_drilldown,
daily,
}))
}
// =============================================================================
// Spørringer
// =============================================================================
/// Aggregert forbruk per samling og ressurstype.
///
/// Hovedmetrikk (total_value) er type-spesifikk:
/// ai → tokens_in (detail->>'tokens_in')
/// whisper → duration_seconds
/// tts → characters
/// cas → size_bytes (kun store)
/// bandwidth→ size_bytes
/// livekit → participant_minutes
async fn fetch_by_collection(
db: &PgPool,
days: i32,
collection_filter: Option<Uuid>,
) -> Result<Vec<CollectionUsageSummary>, sqlx::Error> {
sqlx::query_as::<_, CollectionUsageSummary>(
r#"
SELECT
r.collection_id,
n.title AS collection_title,
r.resource_type,
COUNT(*)::BIGINT AS event_count,
COALESCE(SUM(
CASE r.resource_type
WHEN 'ai' THEN (r.detail->>'tokens_in')::FLOAT8
WHEN 'whisper' THEN (r.detail->>'duration_seconds')::FLOAT8
WHEN 'tts' THEN (r.detail->>'characters')::FLOAT8
WHEN 'cas' THEN (r.detail->>'size_bytes')::FLOAT8
WHEN 'bandwidth' THEN (r.detail->>'size_bytes')::FLOAT8
WHEN 'livekit' THEN (r.detail->>'participant_minutes')::FLOAT8
ELSE 0
END
), 0) AS total_value,
COALESCE(SUM(
CASE r.resource_type
WHEN 'ai' THEN (r.detail->>'tokens_out')::FLOAT8
ELSE 0
END
), 0) AS secondary_value
FROM resource_usage_log r
LEFT JOIN nodes n ON n.id = r.collection_id
WHERE r.created_at >= now() - make_interval(days := $1)
AND ($2::UUID IS NULL OR r.collection_id = $2)
GROUP BY r.collection_id, n.title, r.resource_type
ORDER BY total_value DESC
"#,
)
.bind(days)
.bind(collection_filter)
.fetch_all(db)
.await
}
/// AI drill-down: per jobbtype og modellnivå (fra resource_usage_log).
async fn fetch_ai_drilldown(
db: &PgPool,
days: i32,
collection_filter: Option<Uuid>,
) -> Result<Vec<AiDrillDown>, sqlx::Error> {
sqlx::query_as::<_, AiDrillDown>(
r#"
SELECT
r.collection_id,
n.title AS collection_title,
r.detail->>'job_type' AS job_type,
r.detail->>'model_level' AS model_level,
COALESCE(SUM((r.detail->>'tokens_in')::BIGINT), 0)::BIGINT AS tokens_in,
COALESCE(SUM((r.detail->>'tokens_out')::BIGINT), 0)::BIGINT AS tokens_out,
COUNT(*)::BIGINT AS event_count
FROM resource_usage_log r
LEFT JOIN nodes n ON n.id = r.collection_id
WHERE r.resource_type = 'ai'
AND r.created_at >= now() - make_interval(days := $1)
AND ($2::UUID IS NULL OR r.collection_id = $2)
GROUP BY r.collection_id, n.title, r.detail->>'job_type', r.detail->>'model_level'
ORDER BY tokens_in DESC
"#,
)
.bind(days)
.bind(collection_filter)
.fetch_all(db)
.await
}
/// Daglig tidsserie per ressurstype.
async fn fetch_daily(
db: &PgPool,
days: i32,
collection_filter: Option<Uuid>,
) -> Result<Vec<DailyUsage>, sqlx::Error> {
sqlx::query_as::<_, DailyUsage>(
r#"
SELECT
date_trunc('day', r.created_at) AS day,
r.resource_type,
COUNT(*)::BIGINT AS event_count,
COALESCE(SUM(
CASE r.resource_type
WHEN 'ai' THEN (r.detail->>'tokens_in')::FLOAT8
WHEN 'whisper' THEN (r.detail->>'duration_seconds')::FLOAT8
WHEN 'tts' THEN (r.detail->>'characters')::FLOAT8
WHEN 'cas' THEN (r.detail->>'size_bytes')::FLOAT8
WHEN 'bandwidth' THEN (r.detail->>'size_bytes')::FLOAT8
WHEN 'livekit' THEN (r.detail->>'participant_minutes')::FLOAT8
ELSE 0
END
), 0) AS total_value
FROM resource_usage_log r
WHERE r.created_at >= now() - make_interval(days := $1)
AND ($2::UUID IS NULL OR r.collection_id = $2)
GROUP BY date_trunc('day', r.created_at), r.resource_type
ORDER BY day DESC, resource_type
"#,
)
.bind(days)
.bind(collection_filter)
.fetch_all(db)
.await
}