swarm repositories / source
aboutsummaryrefslogtreecommitdiff
path: root/crates
diff options
context:
space:
mode:
authormain <main@swarm.moe>2026-03-20 16:00:30 -0400
committermain <main@swarm.moe>2026-03-20 16:00:30 -0400
commit9d63844f3a28fde70b19500422f17379e99e588a (patch)
tree163cfbd65a8d3528346561410ef39eb1183a16f2 /crates
parent22fe3d2ce7478450a1d7443c4ecbd85fd4c46716 (diff)
downloadfidget_spinner-9d63844f3a28fde70b19500422f17379e99e588a.zip
Refound Spinner as an austere frontier ledger
Diffstat (limited to 'crates')
-rw-r--r--crates/fidget-spinner-cli/Cargo.toml3
-rw-r--r--crates/fidget-spinner-cli/src/main.rs2014
-rw-r--r--crates/fidget-spinner-cli/src/mcp/catalog.rs1380
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/runtime.rs53
-rw-r--r--crates/fidget-spinner-cli/src/mcp/service.rs3448
-rw-r--r--crates/fidget-spinner-cli/src/ui.rs1603
-rw-r--r--crates/fidget-spinner-cli/tests/mcp_hardening.rs1574
-rw-r--r--crates/fidget-spinner-core/Cargo.toml2
-rw-r--r--crates/fidget-spinner-core/src/error.rs6
-rw-r--r--crates/fidget-spinner-core/src/id.rs5
-rw-r--r--crates/fidget-spinner-core/src/lib.rs28
-rw-r--r--crates/fidget-spinner-core/src/model.rs956
-rw-r--r--crates/fidget-spinner-store-sqlite/Cargo.toml2
-rw-r--r--crates/fidget-spinner-store-sqlite/src/lib.rs6259
14 files changed, 7277 insertions, 10056 deletions
diff --git a/crates/fidget-spinner-cli/Cargo.toml b/crates/fidget-spinner-cli/Cargo.toml
index bf8ffb7..58263ad 100644
--- a/crates/fidget-spinner-cli/Cargo.toml
+++ b/crates/fidget-spinner-cli/Cargo.toml
@@ -18,14 +18,13 @@ clap.workspace = true
dirs.workspace = true
fidget-spinner-core = { path = "../fidget-spinner-core" }
fidget-spinner-store-sqlite = { path = "../fidget-spinner-store-sqlite" }
-linkify.workspace = true
libmcp = { git = "https://git.swarm.moe/libmcp.git", rev = "84e898d9ba699451d5d13fe384e7bbe220564bc1" }
maud.workspace = true
+percent-encoding.workspace = true
serde.workspace = true
serde_json.workspace = true
time.workspace = true
tokio.workspace = true
-uuid.workspace = true
[lints]
workspace = true
diff --git a/crates/fidget-spinner-cli/src/main.rs b/crates/fidget-spinner-cli/src/main.rs
index f56e751..9de2515 100644
--- a/crates/fidget-spinner-cli/src/main.rs
+++ b/crates/fidget-spinner-cli/src/main.rs
@@ -4,27 +4,28 @@ mod ui;
use std::collections::{BTreeMap, BTreeSet};
use std::fs;
+use std::io;
use std::net::SocketAddr;
use std::path::{Path, PathBuf};
use camino::{Utf8Path, Utf8PathBuf};
use clap::{Args, Parser, Subcommand, ValueEnum};
use fidget_spinner_core::{
- AnnotationVisibility, CommandRecipe, DiagnosticSeverity, ExecutionBackend, FieldPresence,
- FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierVerdict, InferencePolicy,
- MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass, NodePayload, NonEmptyText,
- OptimizationObjective, ProjectFieldSpec, TagName,
+ ArtifactKind, CommandRecipe, ExecutionBackend, ExperimentAnalysis, ExperimentStatus,
+ FieldValueType, FrontierVerdict, MetricUnit, MetricVisibility, NonEmptyText,
+ OptimizationObjective, RunDimensionValue, Slug, TagName,
};
use fidget_spinner_store_sqlite::{
- CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest,
- DefineRunDimensionRequest, EdgeAttachment, EdgeAttachmentDirection, ExperimentAnalysisDraft,
- ListNodesQuery, MetricBestQuery, MetricFieldSource, MetricKeyQuery, MetricRankOrder,
- OpenExperimentRequest, ProjectStore, RemoveSchemaFieldRequest, STORE_DIR_NAME, StoreError,
- UpsertSchemaFieldRequest,
+ AttachmentSelector, CloseExperimentRequest, CreateArtifactRequest, CreateFrontierRequest,
+ CreateHypothesisRequest, DefineMetricRequest, DefineRunDimensionRequest,
+ ExperimentOutcomePatch, FrontierRoadmapItemDraft, ListArtifactsQuery, ListExperimentsQuery,
+ ListHypothesesQuery, MetricBestQuery, MetricKeysQuery, MetricRankOrder, MetricScope,
+ OpenExperimentRequest, ProjectStore, STORE_DIR_NAME, StoreError, TextPatch,
+ UpdateArtifactRequest, UpdateExperimentRequest, UpdateFrontierBriefRequest,
+ UpdateHypothesisRequest, VertexSelector,
};
use serde::Serialize;
-use serde_json::{Map, Value, json};
-use uuid::Uuid;
+use serde_json::Value;
#[derive(Parser)]
#[command(
@@ -41,53 +42,52 @@ struct Cli {
enum Command {
/// Initialize a project-local `.fidget_spinner/` store.
Init(InitArgs),
- /// Read the local project payload schema.
- Schema {
+ /// Inspect project metadata and coarse counts.
+ Project {
#[command(subcommand)]
- command: SchemaCommand,
+ command: ProjectCommand,
},
- /// Create and inspect frontiers.
+ /// Manage the repo-local tag registry.
+ Tag {
+ #[command(subcommand)]
+ command: TagCommand,
+ },
+ /// Create and inspect frontier scopes.
Frontier {
#[command(subcommand)]
command: FrontierCommand,
},
- /// Create, inspect, and mutate DAG nodes.
- Node {
+ /// Record and inspect hypotheses.
+ Hypothesis {
#[command(subcommand)]
- command: NodeCommand,
+ command: HypothesisCommand,
},
- /// Record terse off-path notes.
- Note(NoteCommand),
- /// Record core-path hypotheses before experimental work begins.
- Hypothesis(HypothesisCommand),
- /// Manage the repo-local tag registry.
- Tag {
+ /// Open, inspect, update, and close experiments.
+ Experiment {
#[command(subcommand)]
- command: TagCommand,
+ command: ExperimentCommand,
},
- /// Record imported sources and documentary context.
- Source(SourceCommand),
- /// Inspect rankable metrics across closed experiments.
+ /// Register external references and attach them to the ledger.
+ Artifact {
+ #[command(subcommand)]
+ command: ArtifactCommand,
+ },
+ /// Manage project-level metric definitions and rankings.
Metric {
#[command(subcommand)]
command: MetricCommand,
},
- /// Define and inspect run dimensions used to slice experiment metrics.
+ /// Define the typed dimension vocabulary used to slice experiments.
Dimension {
#[command(subcommand)]
command: DimensionCommand,
},
- /// Close a core-path experiment atomically.
- Experiment {
- #[command(subcommand)]
- command: ExperimentCommand,
- },
/// Serve the hardened stdio MCP endpoint.
Mcp {
#[command(subcommand)]
command: McpCommand,
},
- /// Serve the minimal local web navigator.
+ /// Serve the local navigator.
Ui {
#[command(subcommand)]
command: UiCommand,
@@ -101,295 +101,148 @@ enum Command {
#[derive(Args)]
struct InitArgs {
- /// Project root to initialize.
#[arg(long, default_value = ".")]
project: PathBuf,
- /// Human-facing project name. Defaults to the directory name.
#[arg(long)]
name: Option<String>,
- /// Payload schema namespace written into `.fidget_spinner/schema.json`.
- #[arg(long, default_value = "local.project")]
- namespace: String,
}
#[derive(Subcommand)]
-enum SchemaCommand {
- /// Show the current project schema as JSON.
- Show(ProjectArg),
- /// Add or replace one project schema field definition.
- UpsertField(SchemaFieldUpsertArgs),
- /// Remove one project schema field definition.
- RemoveField(SchemaFieldRemoveArgs),
+enum ProjectCommand {
+ Status(ProjectArg),
}
#[derive(Subcommand)]
-enum FrontierCommand {
- /// Create a frontier and root contract node.
- Init(FrontierInitArgs),
- /// Show one frontier projection or list frontiers when omitted.
- Status(FrontierStatusArgs),
-}
-
-#[derive(Args)]
-struct FrontierInitArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- label: String,
- #[arg(long)]
- objective: String,
- #[arg(long, default_value = "frontier contract")]
- contract_title: String,
- #[arg(long)]
- contract_summary: Option<String>,
- #[arg(long = "benchmark-suite")]
- benchmark_suites: Vec<String>,
- #[arg(long = "promotion-criterion")]
- promotion_criteria: Vec<String>,
- #[arg(long = "primary-metric-key")]
- primary_metric_key: String,
- #[arg(long = "primary-metric-unit", value_enum)]
- primary_metric_unit: CliMetricUnit,
- #[arg(long = "primary-metric-objective", value_enum)]
- primary_metric_objective: CliOptimizationObjective,
-}
-
-#[derive(Args)]
-struct FrontierStatusArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- frontier: Option<String>,
+enum TagCommand {
+ Add(TagAddArgs),
+ List(ProjectArg),
}
#[derive(Subcommand)]
-enum NodeCommand {
- /// Create a generic DAG node.
- Add(NodeAddArgs),
- /// List recent nodes.
- List(NodeListArgs),
- /// Show one node in full.
- Show(NodeShowArgs),
- /// Attach an annotation to a node.
- Annotate(NodeAnnotateArgs),
- /// Archive a node without deleting it.
- Archive(NodeArchiveArgs),
-}
-
-#[derive(Args)]
-struct NodeAddArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long, value_enum)]
- class: CliNodeClass,
- #[arg(long)]
- frontier: Option<String>,
- #[arg(long)]
- title: String,
- #[arg(long)]
- /// Required for `note` and `source` nodes.
- summary: Option<String>,
- #[arg(long = "payload-json")]
- /// JSON object payload. `note` and `source` nodes require a non-empty `body` string.
- payload_json: Option<String>,
- #[arg(long = "payload-file")]
- payload_file: Option<PathBuf>,
- #[command(flatten)]
- tag_selection: ExplicitTagSelectionArgs,
- #[arg(long = "field")]
- fields: Vec<String>,
- #[arg(long = "annotation")]
- annotations: Vec<String>,
- #[arg(long = "parent")]
- parents: Vec<String>,
-}
-
-#[derive(Args)]
-struct NodeListArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- frontier: Option<String>,
- #[arg(long, value_enum)]
- class: Option<CliNodeClass>,
- #[arg(long = "tag")]
- tags: Vec<String>,
- #[arg(long)]
- include_archived: bool,
- #[arg(long, default_value_t = 20)]
- limit: u32,
-}
-
-#[derive(Args, Default)]
-struct ExplicitTagSelectionArgs {
- #[arg(long = "tag")]
- tags: Vec<String>,
- #[arg(long, conflicts_with = "tags")]
- no_tags: bool,
-}
-
-#[derive(Args)]
-struct NodeShowArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- node: String,
-}
-
-#[derive(Args)]
-struct NodeAnnotateArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- node: String,
- #[arg(long)]
- body: String,
- #[arg(long)]
- label: Option<String>,
- #[arg(long)]
- visible: bool,
-}
-
-#[derive(Args)]
-struct NodeArchiveArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- node: String,
+enum FrontierCommand {
+ Create(FrontierCreateArgs),
+ List(ProjectArg),
+ Read(FrontierSelectorArgs),
+ Open(FrontierSelectorArgs),
+ UpdateBrief(FrontierBriefUpdateArgs),
+ History(FrontierSelectorArgs),
}
-#[derive(Args)]
-struct NoteCommand {
- #[command(subcommand)]
- command: NoteSubcommand,
+#[derive(Subcommand)]
+enum HypothesisCommand {
+ Record(HypothesisRecordArgs),
+ List(HypothesisListArgs),
+ Read(HypothesisSelectorArgs),
+ Update(HypothesisUpdateArgs),
+ History(HypothesisSelectorArgs),
}
-#[derive(Args)]
-struct HypothesisCommand {
- #[command(subcommand)]
- command: HypothesisSubcommand,
+#[derive(Subcommand)]
+enum ExperimentCommand {
+ Open(ExperimentOpenArgs),
+ List(ExperimentListArgs),
+ Read(ExperimentSelectorArgs),
+ Update(ExperimentUpdateArgs),
+ Close(ExperimentCloseArgs),
+ History(ExperimentSelectorArgs),
}
#[derive(Subcommand)]
-enum NoteSubcommand {
- /// Record a quick off-path note.
- Quick(QuickNoteArgs),
+enum ArtifactCommand {
+ Record(ArtifactRecordArgs),
+ List(ArtifactListArgs),
+ Read(ArtifactSelectorArgs),
+ Update(ArtifactUpdateArgs),
+ History(ArtifactSelectorArgs),
}
#[derive(Subcommand)]
-enum HypothesisSubcommand {
- /// Record a core-path hypothesis with low ceremony.
- Add(QuickHypothesisArgs),
+enum MetricCommand {
+ Define(MetricDefineArgs),
+ Keys(MetricKeysArgs),
+ Best(MetricBestArgs),
}
#[derive(Subcommand)]
-enum TagCommand {
- /// Register a new repo-local tag.
- Add(TagAddArgs),
- /// List registered repo-local tags.
+enum DimensionCommand {
+ Define(DimensionDefineArgs),
List(ProjectArg),
}
-#[derive(Args)]
-struct SourceCommand {
- #[command(subcommand)]
- command: SourceSubcommand,
+#[derive(Subcommand)]
+enum McpCommand {
+ Serve(McpServeArgs),
+ Worker(McpWorkerArgs),
}
#[derive(Subcommand)]
-enum SourceSubcommand {
- /// Record imported source material or documentary context.
- Add(QuickSourceArgs),
+enum UiCommand {
+ Serve(UiServeArgs),
}
#[derive(Subcommand)]
-enum MetricCommand {
- /// Register a project-level metric definition.
- Define(MetricDefineArgs),
- /// List rankable numeric keys observed in completed experiments.
- Keys(MetricKeysArgs),
- /// Rank completed experiments by one numeric key.
- Best(MetricBestArgs),
- /// Re-run the idempotent legacy metric-plane normalization.
- Migrate(ProjectArg),
+enum SkillCommand {
+ List,
+ Install(SkillInstallArgs),
+ Show(SkillShowArgs),
}
-#[derive(Subcommand)]
-enum DimensionCommand {
- /// Register a project-level run dimension definition.
- Define(DimensionDefineArgs),
- /// List run dimensions and sample values observed in completed runs.
- List(ProjectArg),
+#[derive(Args, Clone)]
+struct ProjectArg {
+ #[arg(long, default_value = ".")]
+ project: PathBuf,
}
#[derive(Args)]
-struct MetricDefineArgs {
+struct TagAddArgs {
#[command(flatten)]
project: ProjectArg,
- /// Metric key used in experiment closure and ranking.
#[arg(long)]
- key: String,
- /// Canonical unit for this metric key.
- #[arg(long, value_enum)]
- unit: CliMetricUnit,
- /// Optimization direction for this metric key.
- #[arg(long, value_enum)]
- objective: CliOptimizationObjective,
- /// Optional human description shown in metric listings.
+ name: String,
#[arg(long)]
- description: Option<String>,
+ description: String,
}
#[derive(Args)]
-struct MetricKeysArgs {
+struct FrontierCreateArgs {
#[command(flatten)]
project: ProjectArg,
- /// Restrict results to one frontier.
#[arg(long)]
- frontier: Option<String>,
- /// Restrict results to one metric source.
- #[arg(long, value_enum)]
- source: Option<CliMetricSource>,
- /// Exact run-dimension filter in the form `key=value`.
- #[arg(long = "dimension")]
- dimensions: Vec<String>,
+ label: String,
+ #[arg(long)]
+ objective: String,
+ #[arg(long)]
+ slug: Option<String>,
}
#[derive(Args)]
-struct DimensionDefineArgs {
+struct FrontierSelectorArgs {
#[command(flatten)]
project: ProjectArg,
- /// Run-dimension key used to slice experiments.
#[arg(long)]
- key: String,
- /// Canonical value type for this run dimension.
- #[arg(long = "type", value_enum)]
- value_type: CliFieldValueType,
- /// Optional human description shown in dimension listings.
- #[arg(long)]
- description: Option<String>,
+ frontier: String,
}
#[derive(Args)]
-struct QuickNoteArgs {
+struct FrontierBriefUpdateArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- frontier: Option<String>,
+ frontier: String,
#[arg(long)]
- title: String,
+ expected_revision: Option<u64>,
#[arg(long)]
- summary: String,
+ situation: Option<String>,
#[arg(long)]
- body: String,
- #[command(flatten)]
- tag_selection: ExplicitTagSelectionArgs,
- #[arg(long = "parent")]
- parents: Vec<String>,
+ clear_situation: bool,
+ #[arg(long = "unknown")]
+ unknowns: Vec<String>,
+ #[arg(long = "roadmap")]
+ roadmap: Vec<String>,
}
#[derive(Args)]
-struct QuickHypothesisArgs {
+struct HypothesisRecordArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
@@ -400,255 +253,330 @@ struct QuickHypothesisArgs {
summary: String,
#[arg(long)]
body: String,
+ #[arg(long)]
+ slug: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
#[arg(long = "parent")]
parents: Vec<String>,
}
#[derive(Args)]
-struct TagAddArgs {
+struct HypothesisListArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- name: String,
+ frontier: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
#[arg(long)]
- description: String,
+ include_archived: bool,
+ #[arg(long)]
+ limit: Option<u32>,
}
#[derive(Args)]
-struct QuickSourceArgs {
+struct HypothesisSelectorArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- frontier: Option<String>,
+ hypothesis: String,
+}
+
+#[derive(Args)]
+struct HypothesisUpdateArgs {
+ #[command(flatten)]
+ project: ProjectArg,
#[arg(long)]
- title: String,
+ hypothesis: String,
#[arg(long)]
- summary: String,
+ expected_revision: Option<u64>,
#[arg(long)]
- body: String,
+ title: Option<String>,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ body: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
+ #[arg(long = "replace-tags")]
+ replace_tags: bool,
+ #[arg(long = "parent")]
+ parents: Vec<String>,
+ #[arg(long = "replace-parents")]
+ replace_parents: bool,
+ #[arg(long, value_enum)]
+ state: Option<CliArchivePatch>,
+}
+
+#[derive(Args)]
+struct ExperimentOpenArgs {
#[command(flatten)]
- tag_selection: ExplicitTagSelectionArgs,
+ project: ProjectArg,
+ #[arg(long)]
+ hypothesis: String,
+ #[arg(long)]
+ title: String,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ slug: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
#[arg(long = "parent")]
parents: Vec<String>,
}
#[derive(Args)]
-struct SchemaFieldUpsertArgs {
+struct ExperimentListArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- name: String,
- #[arg(long = "class", value_enum)]
- classes: Vec<CliNodeClass>,
- #[arg(long, value_enum)]
- presence: CliFieldPresence,
- #[arg(long, value_enum)]
- severity: CliDiagnosticSeverity,
+ frontier: Option<String>,
+ #[arg(long)]
+ hypothesis: Option<String>,
#[arg(long, value_enum)]
- role: CliFieldRole,
- #[arg(long = "inference", value_enum)]
- inference_policy: CliInferencePolicy,
- #[arg(long = "type", value_enum)]
- value_type: Option<CliFieldValueType>,
+ status: Option<CliExperimentStatus>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
+ #[arg(long)]
+ include_archived: bool,
+ #[arg(long)]
+ limit: Option<u32>,
}
#[derive(Args)]
-struct SchemaFieldRemoveArgs {
+struct ExperimentSelectorArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- name: String,
- #[arg(long = "class", value_enum)]
- classes: Vec<CliNodeClass>,
+ experiment: String,
}
#[derive(Args)]
-struct MetricBestArgs {
+struct ExperimentUpdateArgs {
#[command(flatten)]
project: ProjectArg,
- /// Metric key to rank on.
#[arg(long)]
- key: String,
- /// Restrict results to one frontier.
+ experiment: String,
#[arg(long)]
- frontier: Option<String>,
- /// Restrict results to one metric source.
- #[arg(long, value_enum)]
- source: Option<CliMetricSource>,
- /// Explicit ordering for sources whose objective cannot be inferred.
+ expected_revision: Option<u64>,
+ #[arg(long)]
+ title: Option<String>,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ clear_summary: bool,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
+ #[arg(long = "replace-tags")]
+ replace_tags: bool,
+ #[arg(long = "parent")]
+ parents: Vec<String>,
+ #[arg(long = "replace-parents")]
+ replace_parents: bool,
#[arg(long, value_enum)]
- order: Option<CliMetricOrder>,
- /// Exact run-dimension filter in the form `key=value`.
- #[arg(long = "dimension")]
- dimensions: Vec<String>,
- /// Maximum number of ranked experiments to return.
- #[arg(long, default_value_t = 10)]
- limit: u32,
-}
-
-#[derive(Subcommand)]
-enum ExperimentCommand {
- /// Open a stateful experiment against one hypothesis.
- Open(ExperimentOpenArgs),
- /// List open experiments, optionally narrowed to one frontier.
- List(ExperimentListArgs),
- /// Close a core-path experiment with run data, note, and verdict.
- Close(Box<ExperimentCloseArgs>),
-}
-
-#[derive(Subcommand)]
-enum McpCommand {
- /// Serve the public stdio MCP host. If `--project` is omitted, the host starts unbound.
- Serve(McpServeArgs),
- #[command(hide = true)]
- Worker(McpWorkerArgs),
-}
-
-#[derive(Subcommand)]
-enum UiCommand {
- /// Serve the local read-only navigator.
- Serve(UiServeArgs),
+ state: Option<CliArchivePatch>,
+ #[arg(long = "outcome-json")]
+ outcome_json: Option<String>,
+ #[arg(long = "outcome-file")]
+ outcome_file: Option<PathBuf>,
}
#[derive(Args)]
struct ExperimentCloseArgs {
#[command(flatten)]
project: ProjectArg,
- #[arg(long = "experiment")]
- experiment_id: String,
- #[arg(long = "run-title")]
- run_title: String,
- #[arg(long = "run-summary")]
- run_summary: Option<String>,
- /// Repeat for each run dimension as `key=value`.
- #[arg(long = "dimension")]
- dimensions: Vec<String>,
- #[arg(long = "backend", value_enum, default_value_t = CliExecutionBackend::Worktree)]
+ #[arg(long)]
+ experiment: String,
+ #[arg(long)]
+ expected_revision: Option<u64>,
+ #[arg(long, value_enum)]
backend: CliExecutionBackend,
- #[arg(long = "cwd")]
- working_directory: Option<PathBuf>,
- /// Repeat for each argv token passed to the recorded command.
#[arg(long = "argv")]
argv: Vec<String>,
- /// Repeat for each environment override as `KEY=VALUE`.
+ #[arg(long)]
+ working_directory: Option<PathBuf>,
#[arg(long = "env")]
env: Vec<String>,
- /// Primary metric in the form `key=value`; key must be preregistered.
+ #[arg(long = "dimension")]
+ dimensions: Vec<String>,
#[arg(long = "primary-metric")]
primary_metric: String,
- /// Supporting metric in the form `key=value`; repeat as needed.
#[arg(long = "metric")]
- metrics: Vec<String>,
- #[arg(long)]
- note: String,
- #[arg(long = "next-hypothesis")]
- next_hypotheses: Vec<String>,
- #[arg(long = "verdict", value_enum)]
+ supporting_metrics: Vec<String>,
+ #[arg(long, value_enum)]
verdict: CliFrontierVerdict,
- #[arg(long = "analysis-title")]
- analysis_title: Option<String>,
- #[arg(long = "analysis-summary")]
+ #[arg(long)]
+ rationale: String,
+ #[arg(long)]
analysis_summary: Option<String>,
- #[arg(long = "analysis-body")]
+ #[arg(long)]
analysis_body: Option<String>,
- #[arg(long = "decision-title")]
- decision_title: String,
- #[arg(long = "decision-rationale")]
- decision_rationale: String,
}
#[derive(Args)]
-struct ExperimentOpenArgs {
+struct ArtifactRecordArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- frontier: String,
- #[arg(long = "hypothesis-node")]
- hypothesis_node: String,
+ kind: CliArtifactKind,
#[arg(long)]
- title: String,
+ label: String,
#[arg(long)]
summary: Option<String>,
+ #[arg(long)]
+ locator: String,
+ #[arg(long)]
+ media_type: Option<String>,
+ #[arg(long)]
+ slug: Option<String>,
+ #[arg(long = "attach")]
+ attachments: Vec<String>,
}
#[derive(Args)]
-struct ExperimentListArgs {
+struct ArtifactListArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
frontier: Option<String>,
+ #[arg(long)]
+ kind: Option<CliArtifactKind>,
+ #[arg(long)]
+ attached_to: Option<String>,
+ #[arg(long)]
+ limit: Option<u32>,
}
-#[derive(Subcommand)]
-enum SkillCommand {
- /// List bundled skills.
- List,
- /// Install bundled skills into a Codex skill directory.
- Install(SkillInstallArgs),
- /// Print one bundled skill body.
- Show(SkillShowArgs),
+#[derive(Args)]
+struct ArtifactSelectorArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ artifact: String,
}
#[derive(Args)]
-struct SkillInstallArgs {
- /// Bundled skill name. Defaults to all bundled skills.
+struct ArtifactUpdateArgs {
+ #[command(flatten)]
+ project: ProjectArg,
#[arg(long)]
- name: Option<String>,
- /// Destination root. Defaults to `~/.codex/skills`.
+ artifact: String,
#[arg(long)]
- destination: Option<PathBuf>,
+ expected_revision: Option<u64>,
+ #[arg(long)]
+ kind: Option<CliArtifactKind>,
+ #[arg(long)]
+ label: Option<String>,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ clear_summary: bool,
+ #[arg(long)]
+ locator: Option<String>,
+ #[arg(long)]
+ media_type: Option<String>,
+ #[arg(long)]
+ clear_media_type: bool,
+ #[arg(long = "attach")]
+ attachments: Vec<String>,
+ #[arg(long = "replace-attachments")]
+ replace_attachments: bool,
}
#[derive(Args)]
-struct SkillShowArgs {
- /// Bundled skill name. Defaults to `fidget-spinner`.
+struct MetricDefineArgs {
+ #[command(flatten)]
+ project: ProjectArg,
#[arg(long)]
- name: Option<String>,
+ key: String,
+ #[arg(long, value_enum)]
+ unit: CliMetricUnit,
+ #[arg(long, value_enum)]
+ objective: CliOptimizationObjective,
+ #[arg(long, value_enum, default_value_t = CliMetricVisibility::Canonical)]
+ visibility: CliMetricVisibility,
+ #[arg(long)]
+ description: Option<String>,
}
#[derive(Args)]
-struct ProjectArg {
- /// Project root or any nested path inside a project containing `.fidget_spinner/`.
- #[arg(long, default_value = ".")]
- project: PathBuf,
+struct MetricKeysArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long, value_enum, default_value_t = CliMetricScope::Live)]
+ scope: CliMetricScope,
+}
+
+#[derive(Args)]
+struct MetricBestArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long)]
+ hypothesis: Option<String>,
+ #[arg(long)]
+ key: String,
+ #[arg(long = "dimension")]
+ dimensions: Vec<String>,
+ #[arg(long)]
+ include_rejected: bool,
+ #[arg(long)]
+ limit: Option<u32>,
+ #[arg(long, value_enum)]
+ order: Option<CliMetricRankOrder>,
+}
+
+#[derive(Args)]
+struct DimensionDefineArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ key: String,
+ #[arg(long, value_enum)]
+ value_type: CliFieldValueType,
+ #[arg(long)]
+ description: Option<String>,
}
#[derive(Args)]
struct McpServeArgs {
- /// Optional initial project binding. When omitted, the MCP starts unbound.
#[arg(long)]
project: Option<PathBuf>,
}
#[derive(Args)]
struct McpWorkerArgs {
- #[arg(long)]
+ #[arg(long, default_value = ".")]
project: PathBuf,
}
#[derive(Args)]
struct UiServeArgs {
- /// Path to serve. Accepts a project root, `.fidget_spinner/`, descendants inside it,
- /// or a parent directory containing one unique descendant project store.
- #[arg(long = "path", alias = "project", default_value = ".")]
+ #[arg(long, default_value = ".")]
path: PathBuf,
- /// Bind address for the local navigator.
#[arg(long, default_value = "127.0.0.1:8913")]
bind: SocketAddr,
- /// Maximum rows rendered in list views.
- #[arg(long, default_value_t = 200)]
- limit: u32,
+ #[arg(long)]
+ limit: Option<u32>,
}
-#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliNodeClass {
- Contract,
- Hypothesis,
- Run,
- Analysis,
- Decision,
- Source,
- Note,
+#[derive(Args)]
+struct SkillInstallArgs {
+ #[arg(long)]
+ name: Option<String>,
+ #[arg(long)]
+ destination: Option<PathBuf>,
+}
+
+#[derive(Args)]
+struct SkillShowArgs {
+ #[arg(long)]
+ name: Option<String>,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
@@ -668,23 +596,22 @@ enum CliOptimizationObjective {
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliExecutionBackend {
- Local,
- Worktree,
- Ssh,
+enum CliMetricVisibility {
+ Canonical,
+ Minor,
+ Hidden,
+ Archived,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliMetricSource {
- RunMetric,
- HypothesisPayload,
- RunPayload,
- AnalysisPayload,
- DecisionPayload,
+enum CliMetricScope {
+ Live,
+ Visible,
+ All,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliMetricOrder {
+enum CliMetricRankOrder {
Asc,
Desc,
}
@@ -698,31 +625,23 @@ enum CliFieldValueType {
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliDiagnosticSeverity {
- Error,
- Warning,
- Info,
-}
-
-#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliFieldPresence {
- Required,
- Recommended,
- Optional,
+enum CliArtifactKind {
+ Document,
+ Link,
+ Log,
+ Table,
+ Plot,
+ Dump,
+ Binary,
+ Other,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliFieldRole {
- Index,
- ProjectionGate,
- RenderOnly,
- Opaque,
-}
-
-#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliInferencePolicy {
- ManualOnly,
- ModelMayInfer,
+enum CliExecutionBackend {
+ Manual,
+ LocalProcess,
+ WorktreeProcess,
+ SshProcess,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
@@ -733,63 +652,89 @@ enum CliFrontierVerdict {
Rejected,
}
-fn main() {
- if let Err(error) = run() {
- eprintln!("error: {error}");
- std::process::exit(1);
- }
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliExperimentStatus {
+ Open,
+ Closed,
}
-fn run() -> Result<(), StoreError> {
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliArchivePatch {
+ Archive,
+ Restore,
+}
+
+fn main() -> Result<(), StoreError> {
let cli = Cli::parse();
match cli.command {
Command::Init(args) => run_init(args),
- Command::Schema { command } => match command {
- SchemaCommand::Show(project) => {
- let store = open_store(&project.project)?;
- print_json(store.schema())
- }
- SchemaCommand::UpsertField(args) => run_schema_field_upsert(args),
- SchemaCommand::RemoveField(args) => run_schema_field_remove(args),
+ Command::Project { command } => match command {
+ ProjectCommand::Status(args) => print_json(&open_store(&args.project)?.status()?),
},
- Command::Frontier { command } => match command {
- FrontierCommand::Init(args) => run_frontier_init(args),
- FrontierCommand::Status(args) => run_frontier_status(args),
- },
- Command::Node { command } => match command {
- NodeCommand::Add(args) => run_node_add(args),
- NodeCommand::List(args) => run_node_list(args),
- NodeCommand::Show(args) => run_node_show(args),
- NodeCommand::Annotate(args) => run_node_annotate(args),
- NodeCommand::Archive(args) => run_node_archive(args),
+ Command::Tag { command } => match command {
+ TagCommand::Add(args) => run_tag_add(args),
+ TagCommand::List(args) => print_json(&open_store(&args.project)?.list_tags()?),
},
- Command::Note(command) => match command.command {
- NoteSubcommand::Quick(args) => run_quick_note(args),
+ Command::Frontier { command } => match command {
+ FrontierCommand::Create(args) => run_frontier_create(args),
+ FrontierCommand::List(args) => {
+ print_json(&open_store(&args.project)?.list_frontiers()?)
+ }
+ FrontierCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_frontier(&args.frontier)?)
+ }
+ FrontierCommand::Open(args) => {
+ print_json(&open_store(&args.project.project)?.frontier_open(&args.frontier)?)
+ }
+ FrontierCommand::UpdateBrief(args) => run_frontier_brief_update(args),
+ FrontierCommand::History(args) => {
+ print_json(&open_store(&args.project.project)?.frontier_history(&args.frontier)?)
+ }
},
- Command::Hypothesis(command) => match command.command {
- HypothesisSubcommand::Add(args) => run_quick_hypothesis(args),
+ Command::Hypothesis { command } => match command {
+ HypothesisCommand::Record(args) => run_hypothesis_record(args),
+ HypothesisCommand::List(args) => run_hypothesis_list(args),
+ HypothesisCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_hypothesis(&args.hypothesis)?)
+ }
+ HypothesisCommand::Update(args) => run_hypothesis_update(args),
+ HypothesisCommand::History(args) => print_json(
+ &open_store(&args.project.project)?.hypothesis_history(&args.hypothesis)?,
+ ),
},
- Command::Tag { command } => match command {
- TagCommand::Add(args) => run_tag_add(args),
- TagCommand::List(project) => run_tag_list(project),
+ Command::Experiment { command } => match command {
+ ExperimentCommand::Open(args) => run_experiment_open(args),
+ ExperimentCommand::List(args) => run_experiment_list(args),
+ ExperimentCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_experiment(&args.experiment)?)
+ }
+ ExperimentCommand::Update(args) => run_experiment_update(args),
+ ExperimentCommand::Close(args) => run_experiment_close(args),
+ ExperimentCommand::History(args) => print_json(
+ &open_store(&args.project.project)?.experiment_history(&args.experiment)?,
+ ),
},
- Command::Source(command) => match command.command {
- SourceSubcommand::Add(args) => run_quick_source(args),
+ Command::Artifact { command } => match command {
+ ArtifactCommand::Record(args) => run_artifact_record(args),
+ ArtifactCommand::List(args) => run_artifact_list(args),
+ ArtifactCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_artifact(&args.artifact)?)
+ }
+ ArtifactCommand::Update(args) => run_artifact_update(args),
+ ArtifactCommand::History(args) => {
+ print_json(&open_store(&args.project.project)?.artifact_history(&args.artifact)?)
+ }
},
Command::Metric { command } => match command {
MetricCommand::Define(args) => run_metric_define(args),
MetricCommand::Keys(args) => run_metric_keys(args),
MetricCommand::Best(args) => run_metric_best(args),
- MetricCommand::Migrate(project) => run_metric_migrate(project),
},
Command::Dimension { command } => match command {
DimensionCommand::Define(args) => run_dimension_define(args),
- DimensionCommand::List(project) => run_dimension_list(project),
- },
- Command::Experiment { command } => match command {
- ExperimentCommand::Open(args) => run_experiment_open(args),
- ExperimentCommand::List(args) => run_experiment_list(args),
- ExperimentCommand::Close(args) => run_experiment_close(*args),
+ DimensionCommand::List(args) => {
+ print_json(&open_store(&args.project)?.list_run_dimensions()?)
+ }
},
Command::Mcp { command } => match command {
McpCommand::Serve(args) => mcp::serve(args.project),
@@ -811,385 +756,278 @@ fn run() -> Result<(), StoreError> {
fn run_init(args: InitArgs) -> Result<(), StoreError> {
let project_root = utf8_path(args.project);
- let display_name = args
- .name
- .map(NonEmptyText::new)
- .transpose()?
- .unwrap_or(default_display_name_for_root(&project_root)?);
- let namespace = NonEmptyText::new(args.namespace)?;
- let store = ProjectStore::init(&project_root, display_name, namespace)?;
- println!("initialized {}", store.state_root());
- println!("project: {}", store.config().display_name);
- println!("schema: {}", store.state_root().join("schema.json"));
- maybe_print_gitignore_hint(&project_root)?;
- Ok(())
+ let store = ProjectStore::init(
+ &project_root,
+ args.name
+ .map(NonEmptyText::new)
+ .transpose()?
+ .unwrap_or(default_display_name_for_root(&project_root)?),
+ )?;
+ print_json(&store.status()?)
}
-fn run_frontier_init(args: FrontierInitArgs) -> Result<(), StoreError> {
+fn run_tag_add(args: TagAddArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let projection = store.create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new(args.label)?,
- contract_title: NonEmptyText::new(args.contract_title)?,
- contract_summary: args.contract_summary.map(NonEmptyText::new).transpose()?,
- contract: FrontierContract {
- objective: NonEmptyText::new(args.objective)?,
- evaluation: fidget_spinner_core::EvaluationProtocol {
- benchmark_suites: to_text_set(args.benchmark_suites)?,
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new(args.primary_metric_key)?,
- unit: args.primary_metric_unit.into(),
- objective: args.primary_metric_objective.into(),
- },
- supporting_metrics: BTreeSet::new(),
- },
- promotion_criteria: to_text_vec(args.promotion_criteria)?,
- },
- })?;
- print_json(&projection)
+ print_json(&store.register_tag(
+ TagName::new(args.name)?,
+ NonEmptyText::new(args.description)?,
+ )?)
}
-fn run_frontier_status(args: FrontierStatusArgs) -> Result<(), StoreError> {
- let store = open_store(&args.project.project)?;
- if let Some(frontier) = args.frontier {
- let projection = store.frontier_projection(parse_frontier_id(&frontier)?)?;
- return print_json(&projection);
- }
- let frontiers = store.list_frontiers()?;
- if frontiers.len() == 1 {
- return print_json(&store.frontier_projection(frontiers[0].id)?);
- }
- print_json(&frontiers)
+fn run_frontier_create(args: FrontierCreateArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ print_json(&store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new(args.label)?,
+ objective: NonEmptyText::new(args.objective)?,
+ slug: args.slug.map(Slug::new).transpose()?,
+ })?)
}
-fn run_schema_field_upsert(args: SchemaFieldUpsertArgs) -> Result<(), StoreError> {
- let mut store = open_store(&args.project.project)?;
- let field = store.upsert_schema_field(UpsertSchemaFieldRequest {
- name: NonEmptyText::new(args.name)?,
- node_classes: parse_node_class_set(args.classes),
- presence: args.presence.into(),
- severity: args.severity.into(),
- role: args.role.into(),
- inference_policy: args.inference_policy.into(),
- value_type: args.value_type.map(Into::into),
- })?;
- print_json(&json!({
- "schema": store.schema().schema_ref(),
- "field": schema_field_json(&field),
- }))
-}
-
-fn run_schema_field_remove(args: SchemaFieldRemoveArgs) -> Result<(), StoreError> {
+fn run_frontier_brief_update(args: FrontierBriefUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let removed_count = store.remove_schema_field(RemoveSchemaFieldRequest {
- name: NonEmptyText::new(args.name)?,
- node_classes: (!args.classes.is_empty()).then(|| parse_node_class_set(args.classes)),
- })?;
- print_json(&json!({
- "schema": store.schema().schema_ref(),
- "removed_count": removed_count,
- }))
+ let roadmap = if args.roadmap.is_empty() {
+ None
+ } else {
+ Some(
+ args.roadmap
+ .into_iter()
+ .map(parse_roadmap_item)
+ .collect::<Result<Vec<_>, _>>()?,
+ )
+ };
+ let unknowns = if args.unknowns.is_empty() {
+ None
+ } else {
+ Some(to_non_empty_texts(args.unknowns)?)
+ };
+ print_json(&store.update_frontier_brief(UpdateFrontierBriefRequest {
+ frontier: args.frontier,
+ expected_revision: args.expected_revision,
+ situation: cli_text_patch(args.situation, args.clear_situation)?,
+ roadmap,
+ unknowns,
+ })?)
}
-fn run_node_add(args: NodeAddArgs) -> Result<(), StoreError> {
+fn run_hypothesis_record(args: HypothesisRecordArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let class: NodeClass = args.class.into();
- let frontier_id = args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?;
- let tags = optional_cli_tags(args.tag_selection, class == NodeClass::Note)?;
- let payload = load_payload(
- store.schema().schema_ref(),
- args.payload_json,
- args.payload_file,
- args.fields,
- )?;
- validate_cli_prose_payload(class, args.summary.as_deref(), &payload)?;
- let annotations = args
- .annotations
- .into_iter()
- .map(|body| Ok(NodeAnnotation::hidden(NonEmptyText::new(body)?)))
- .collect::<Result<Vec<_>, StoreError>>()?;
- let node = store.add_node(CreateNodeRequest {
- class,
- frontier_id,
+ print_json(&store.create_hypothesis(CreateHypothesisRequest {
+ frontier: args.frontier,
+ slug: args.slug.map(Slug::new).transpose()?,
title: NonEmptyText::new(args.title)?,
- summary: args.summary.map(NonEmptyText::new).transpose()?,
- tags,
- payload,
- annotations,
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ summary: NonEmptyText::new(args.summary)?,
+ body: NonEmptyText::new(args.body)?,
+ tags: parse_tag_set(args.tags)?,
+ parents: parse_vertex_selectors(args.parents)?,
+ })?)
}
-fn run_node_list(args: NodeListArgs) -> Result<(), StoreError> {
+fn run_hypothesis_list(args: HypothesisListArgs) -> Result<(), StoreError> {
let store = open_store(&args.project.project)?;
- let items = store.list_nodes(ListNodesQuery {
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- class: args.class.map(Into::into),
+ print_json(&store.list_hypotheses(ListHypothesesQuery {
+ frontier: args.frontier,
tags: parse_tag_set(args.tags)?,
include_archived: args.include_archived,
limit: args.limit,
- })?;
- print_json(&items)
-}
-
-fn run_node_show(args: NodeShowArgs) -> Result<(), StoreError> {
- let store = open_store(&args.project.project)?;
- let node_id = parse_node_id(&args.node)?;
- let node = store
- .get_node(node_id)?
- .ok_or(StoreError::NodeNotFound(node_id))?;
- print_json(&node)
+ })?)
}
-fn run_node_annotate(args: NodeAnnotateArgs) -> Result<(), StoreError> {
+fn run_hypothesis_update(args: HypothesisUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let annotation = NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if args.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: args.label.map(NonEmptyText::new).transpose()?,
- body: NonEmptyText::new(args.body)?,
- created_at: time::OffsetDateTime::now_utc(),
+ let tags = if args.replace_tags {
+ Some(parse_tag_set(args.tags)?)
+ } else {
+ None
};
- store.annotate_node(parse_node_id(&args.node)?, annotation)?;
- println!("annotated {}", args.node);
- Ok(())
+ let parents = if args.replace_parents {
+ Some(parse_vertex_selectors(args.parents)?)
+ } else {
+ None
+ };
+ print_json(&store.update_hypothesis(UpdateHypothesisRequest {
+ hypothesis: args.hypothesis,
+ expected_revision: args.expected_revision,
+ title: args.title.map(NonEmptyText::new).transpose()?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ body: args.body.map(NonEmptyText::new).transpose()?,
+ tags,
+ parents,
+ archived: archive_patch(args.state),
+ })?)
}
-fn run_node_archive(args: NodeArchiveArgs) -> Result<(), StoreError> {
+fn run_experiment_open(args: ExperimentOpenArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- store.archive_node(parse_node_id(&args.node)?)?;
- println!("archived {}", args.node);
- Ok(())
+ print_json(&store.open_experiment(OpenExperimentRequest {
+ hypothesis: args.hypothesis,
+ slug: args.slug.map(Slug::new).transpose()?,
+ title: NonEmptyText::new(args.title)?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ tags: parse_tag_set(args.tags)?,
+ parents: parse_vertex_selectors(args.parents)?,
+ })?)
}
-fn run_quick_note(args: QuickNoteArgs) -> Result<(), StoreError> {
+fn run_experiment_list(args: ExperimentListArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ print_json(&store.list_experiments(ListExperimentsQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ tags: parse_tag_set(args.tags)?,
+ include_archived: args.include_archived,
+ status: args.status.map(Into::into),
+ limit: args.limit,
+ })?)
+}
+
+fn run_experiment_update(args: ExperimentUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let payload = NodePayload::with_schema(
- store.schema().schema_ref(),
- json_object(json!({ "body": args.body }))?,
- );
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- title: NonEmptyText::new(args.title)?,
- summary: Some(NonEmptyText::new(args.summary)?),
- tags: Some(explicit_cli_tags(args.tag_selection)?),
- payload,
- annotations: Vec::new(),
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ let outcome =
+ load_optional_json::<ExperimentOutcomePatch>(args.outcome_json, args.outcome_file)?;
+ print_json(&store.update_experiment(UpdateExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ title: args.title.map(NonEmptyText::new).transpose()?,
+ summary: cli_text_patch(args.summary, args.clear_summary)?,
+ tags: if args.replace_tags {
+ Some(parse_tag_set(args.tags)?)
+ } else {
+ None
+ },
+ parents: if args.replace_parents {
+ Some(parse_vertex_selectors(args.parents)?)
+ } else {
+ None
+ },
+ archived: archive_patch(args.state),
+ outcome,
+ })?)
}
-fn run_quick_hypothesis(args: QuickHypothesisArgs) -> Result<(), StoreError> {
+fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let payload = NodePayload::with_schema(
- store.schema().schema_ref(),
- json_object(json!({ "body": args.body }))?,
- );
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(parse_frontier_id(&args.frontier)?),
- title: NonEmptyText::new(args.title)?,
- summary: Some(NonEmptyText::new(args.summary)?),
- tags: None,
- payload,
- annotations: Vec::new(),
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ let analysis = match (args.analysis_summary, args.analysis_body) {
+ (Some(summary), Some(body)) => Some(ExperimentAnalysis {
+ summary: NonEmptyText::new(summary)?,
+ body: NonEmptyText::new(body)?,
+ }),
+ (None, None) => None,
+ _ => {
+ return Err(invalid_input(
+ "analysis requires both --analysis-summary and --analysis-body",
+ ));
+ }
+ };
+ print_json(
+ &store.close_experiment(CloseExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ backend: args.backend.into(),
+ command: CommandRecipe::new(
+ args.working_directory.map(utf8_path),
+ to_non_empty_texts(args.argv)?,
+ parse_env(args.env),
+ )?,
+ dimensions: parse_dimension_assignments(args.dimensions)?,
+ primary_metric: parse_metric_value_assignment(&args.primary_metric)?,
+ supporting_metrics: args
+ .supporting_metrics
+ .into_iter()
+ .map(|raw| parse_metric_value_assignment(&raw))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: args.verdict.into(),
+ rationale: NonEmptyText::new(args.rationale)?,
+ analysis,
+ })?,
+ )
}
-fn run_tag_add(args: TagAddArgs) -> Result<(), StoreError> {
+fn run_artifact_record(args: ArtifactRecordArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let tag = store.add_tag(
- TagName::new(args.name)?,
- NonEmptyText::new(args.description)?,
- )?;
- print_json(&tag)
+ print_json(&store.create_artifact(CreateArtifactRequest {
+ slug: args.slug.map(Slug::new).transpose()?,
+ kind: args.kind.into(),
+ label: NonEmptyText::new(args.label)?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ locator: NonEmptyText::new(args.locator)?,
+ media_type: args.media_type.map(NonEmptyText::new).transpose()?,
+ attachments: parse_attachment_selectors(args.attachments)?,
+ })?)
}
-fn run_tag_list(args: ProjectArg) -> Result<(), StoreError> {
- let store = open_store(&args.project)?;
- print_json(&store.list_tags()?)
+fn run_artifact_list(args: ArtifactListArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ print_json(
+ &store.list_artifacts(ListArtifactsQuery {
+ frontier: args.frontier,
+ kind: args.kind.map(Into::into),
+ attached_to: args
+ .attached_to
+ .as_deref()
+ .map(parse_attachment_selector)
+ .transpose()?,
+ limit: args.limit,
+ })?,
+ )
}
-fn run_quick_source(args: QuickSourceArgs) -> Result<(), StoreError> {
+fn run_artifact_update(args: ArtifactUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let payload = NodePayload::with_schema(
- store.schema().schema_ref(),
- json_object(json!({ "body": args.body }))?,
- );
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- title: NonEmptyText::new(args.title)?,
- summary: Some(NonEmptyText::new(args.summary)?),
- tags: optional_cli_tags(args.tag_selection, false)?,
- payload,
- annotations: Vec::new(),
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ print_json(&store.update_artifact(UpdateArtifactRequest {
+ artifact: args.artifact,
+ expected_revision: args.expected_revision,
+ kind: args.kind.map(Into::into),
+ label: args.label.map(NonEmptyText::new).transpose()?,
+ summary: cli_text_patch(args.summary, args.clear_summary)?,
+ locator: args.locator.map(NonEmptyText::new).transpose()?,
+ media_type: cli_text_patch(args.media_type, args.clear_media_type)?,
+ attachments: if args.replace_attachments {
+ Some(parse_attachment_selectors(args.attachments)?)
+ } else {
+ None
+ },
+ })?)
}
fn run_metric_define(args: MetricDefineArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let record = store.define_metric(DefineMetricRequest {
+ print_json(&store.define_metric(DefineMetricRequest {
key: NonEmptyText::new(args.key)?,
unit: args.unit.into(),
objective: args.objective.into(),
+ visibility: args.visibility.into(),
description: args.description.map(NonEmptyText::new).transpose()?,
- })?;
- print_json(&record)
+ })?)
}
fn run_metric_keys(args: MetricKeysArgs) -> Result<(), StoreError> {
let store = open_store(&args.project.project)?;
- print_json(
- &store.list_metric_keys_filtered(MetricKeyQuery {
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- source: args.source.map(Into::into),
- dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
- })?,
- )
+ print_json(&store.metric_keys(MetricKeysQuery {
+ frontier: args.frontier,
+ scope: args.scope.into(),
+ })?)
}
fn run_metric_best(args: MetricBestArgs) -> Result<(), StoreError> {
let store = open_store(&args.project.project)?;
- let entries = store.best_metrics(MetricBestQuery {
+ print_json(&store.metric_best(MetricBestQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
key: NonEmptyText::new(args.key)?,
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- source: args.source.map(Into::into),
- dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
- order: args.order.map(Into::into),
+ dimensions: parse_dimension_assignments(args.dimensions)?,
+ include_rejected: args.include_rejected,
limit: args.limit,
- })?;
- print_json(&entries)
-}
-
-fn run_metric_migrate(args: ProjectArg) -> Result<(), StoreError> {
- let mut store = open_store(&args.project)?;
- print_json(&store.migrate_metric_plane()?)
+ order: args.order.map(Into::into),
+ })?)
}
fn run_dimension_define(args: DimensionDefineArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let record = store.define_run_dimension(DefineRunDimensionRequest {
+ print_json(&store.define_run_dimension(DefineRunDimensionRequest {
key: NonEmptyText::new(args.key)?,
value_type: args.value_type.into(),
description: args.description.map(NonEmptyText::new).transpose()?,
- })?;
- print_json(&record)
-}
-
-fn run_dimension_list(args: ProjectArg) -> Result<(), StoreError> {
- let store = open_store(&args.project)?;
- print_json(&store.list_run_dimensions()?)
-}
-
-fn run_experiment_open(args: ExperimentOpenArgs) -> Result<(), StoreError> {
- let mut store = open_store(&args.project.project)?;
- let summary = args.summary.map(NonEmptyText::new).transpose()?;
- let experiment = store.open_experiment(OpenExperimentRequest {
- frontier_id: parse_frontier_id(&args.frontier)?,
- hypothesis_node_id: parse_node_id(&args.hypothesis_node)?,
- title: NonEmptyText::new(args.title)?,
- summary,
- })?;
- print_json(&experiment)
-}
-
-fn run_experiment_list(args: ExperimentListArgs) -> Result<(), StoreError> {
- let store = open_store(&args.project.project)?;
- let frontier_id = args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?;
- print_json(&store.list_open_experiments(frontier_id)?)
-}
-
-fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> {
- let mut store = open_store(&args.project.project)?;
- let command = CommandRecipe::new(
- args.working_directory
- .map(utf8_path)
- .unwrap_or_else(|| store.project_root().to_path_buf()),
- to_text_vec(args.argv)?,
- parse_env(args.env),
- )?;
- let analysis = match (
- args.analysis_title,
- args.analysis_summary,
- args.analysis_body,
- ) {
- (Some(title), Some(summary), Some(body)) => Some(ExperimentAnalysisDraft {
- title: NonEmptyText::new(title)?,
- summary: NonEmptyText::new(summary)?,
- body: NonEmptyText::new(body)?,
- }),
- (None, None, None) => None,
- _ => {
- return Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- "analysis-title, analysis-summary, and analysis-body must be provided together",
- ),
- )));
- }
- };
- let receipt = store.close_experiment(CloseExperimentRequest {
- experiment_id: parse_experiment_id(&args.experiment_id)?,
- run_title: NonEmptyText::new(args.run_title)?,
- run_summary: args.run_summary.map(NonEmptyText::new).transpose()?,
- backend: args.backend.into(),
- dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
- command,
- primary_metric: parse_metric_value(args.primary_metric)?,
- supporting_metrics: args
- .metrics
- .into_iter()
- .map(parse_metric_value)
- .collect::<Result<Vec<_>, _>>()?,
- note: FrontierNote {
- summary: NonEmptyText::new(args.note)?,
- next_hypotheses: to_text_vec(args.next_hypotheses)?,
- },
- verdict: args.verdict.into(),
- analysis,
- decision_title: NonEmptyText::new(args.decision_title)?,
- decision_rationale: NonEmptyText::new(args.decision_rationale)?,
- })?;
- print_json(&receipt)
+ })?)
}
fn run_skill_install(args: SkillInstallArgs) -> Result<(), StoreError> {
@@ -1240,11 +1078,11 @@ fn install_skill(skill: bundled_skill::BundledSkill, destination: &Path) -> Resu
Ok(())
}
-fn open_store(path: &Path) -> Result<ProjectStore, StoreError> {
+pub(crate) fn open_store(path: &Path) -> Result<ProjectStore, StoreError> {
ProjectStore::open(utf8_path(path.to_path_buf()))
}
-fn resolve_ui_project_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
+pub(crate) fn resolve_ui_project_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
if let Some(project_root) = fidget_spinner_store_sqlite::discover_project_root(path) {
return Ok(project_root);
}
@@ -1266,7 +1104,7 @@ fn resolve_ui_project_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
}
}
-fn open_or_init_store_for_binding(path: &Path) -> Result<ProjectStore, StoreError> {
+pub(crate) fn open_or_init_store_for_binding(path: &Path) -> Result<ProjectStore, StoreError> {
let requested_root = utf8_path(path.to_path_buf());
match ProjectStore::open(requested_root.clone()) {
Ok(store) => Ok(store),
@@ -1275,17 +1113,13 @@ fn open_or_init_store_for_binding(path: &Path) -> Result<ProjectStore, StoreErro
if !is_empty_directory(&project_root)? {
return Err(StoreError::MissingProjectStore(requested_root));
}
- ProjectStore::init(
- &project_root,
- default_display_name_for_root(&project_root)?,
- default_namespace_for_root(&project_root)?,
- )
+ ProjectStore::init(&project_root, default_display_name_for_root(&project_root)?)
}
Err(error) => Err(error),
}
}
-fn utf8_path(path: impl Into<PathBuf>) -> Utf8PathBuf {
+pub(crate) fn utf8_path(path: impl Into<PathBuf>) -> Utf8PathBuf {
Utf8PathBuf::from(path.into().to_string_lossy().into_owned())
}
@@ -1295,7 +1129,7 @@ fn binding_bootstrap_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
.parent()
.map_or_else(|| path.to_path_buf(), Utf8Path::to_path_buf)),
Ok(_) => Ok(path.to_path_buf()),
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(path.to_path_buf()),
+ Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(path.to_path_buf()),
Err(error) => Err(StoreError::from(error)),
}
}
@@ -1307,7 +1141,7 @@ fn is_empty_directory(path: &Utf8Path) -> Result<bool, StoreError> {
Ok(entries.next().transpose()?.is_none())
}
Ok(_) => Ok(false),
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(false),
+ Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(false),
Err(error) => Err(StoreError::from(error)),
}
}
@@ -1325,7 +1159,7 @@ fn collect_descendant_project_roots(
) -> Result<(), StoreError> {
let metadata = match fs::metadata(path.as_std_path()) {
Ok(metadata) => metadata,
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => return Ok(()),
+ Err(error) if error.kind() == io::ErrorKind::NotFound => return Ok(()),
Err(error) => return Err(StoreError::from(error)),
};
if metadata.is_file() {
@@ -1362,45 +1196,6 @@ fn default_display_name_for_root(project_root: &Utf8Path) -> Result<NonEmptyText
.map_err(StoreError::from)
}
-fn default_namespace_for_root(project_root: &Utf8Path) -> Result<NonEmptyText, StoreError> {
- let slug = slugify_namespace_component(project_root.file_name().unwrap_or("project"));
- NonEmptyText::new(format!("local.{slug}")).map_err(StoreError::from)
-}
-
-fn slugify_namespace_component(raw: &str) -> String {
- let mut slug = String::new();
- let mut previous_was_separator = false;
- for character in raw.chars().flat_map(char::to_lowercase) {
- if character.is_ascii_alphanumeric() {
- slug.push(character);
- previous_was_separator = false;
- continue;
- }
- if !previous_was_separator {
- slug.push('_');
- previous_was_separator = true;
- }
- }
- let slug = slug.trim_matches('_').to_owned();
- if slug.is_empty() {
- "project".to_owned()
- } else {
- slug
- }
-}
-
-fn to_text_vec(values: Vec<String>) -> Result<Vec<NonEmptyText>, StoreError> {
- values
- .into_iter()
- .map(NonEmptyText::new)
- .collect::<Result<Vec<_>, _>>()
- .map_err(StoreError::from)
-}
-
-fn to_text_set(values: Vec<String>) -> Result<BTreeSet<NonEmptyText>, StoreError> {
- to_text_vec(values).map(BTreeSet::from_iter)
-}
-
fn parse_tag_set(values: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
values
.into_iter()
@@ -1409,290 +1204,198 @@ fn parse_tag_set(values: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
.map_err(StoreError::from)
}
-fn explicit_cli_tags(selection: ExplicitTagSelectionArgs) -> Result<BTreeSet<TagName>, StoreError> {
- optional_cli_tags(selection, true)?.ok_or(StoreError::NoteTagsRequired)
-}
-
-fn optional_cli_tags(
- selection: ExplicitTagSelectionArgs,
- required: bool,
-) -> Result<Option<BTreeSet<TagName>>, StoreError> {
- if selection.no_tags {
- return Ok(Some(BTreeSet::new()));
- }
- if selection.tags.is_empty() {
- return if required {
- Err(StoreError::NoteTagsRequired)
- } else {
- Ok(None)
- };
- }
- Ok(Some(parse_tag_set(selection.tags)?))
-}
-
-fn parse_env(values: Vec<String>) -> BTreeMap<String, String> {
+pub(crate) fn parse_vertex_selectors(
+ values: Vec<String>,
+) -> Result<Vec<VertexSelector>, StoreError> {
values
.into_iter()
- .filter_map(|entry| {
- let (key, value) = entry.split_once('=')?;
- Some((key.to_owned(), value.to_owned()))
+ .map(|raw| {
+ let (kind, selector) = raw
+ .split_once(':')
+ .ok_or_else(|| invalid_input("expected parent selector in the form `hypothesis:<selector>` or `experiment:<selector>`"))?;
+ match kind {
+ "hypothesis" => Ok(VertexSelector::Hypothesis(selector.to_owned())),
+ "experiment" => Ok(VertexSelector::Experiment(selector.to_owned())),
+ _ => Err(invalid_input(format!("unknown parent kind `{kind}`"))),
+ }
})
.collect()
}
-fn lineage_attachments(parents: Vec<String>) -> Result<Vec<EdgeAttachment>, StoreError> {
- parents
+pub(crate) fn parse_attachment_selectors(
+ values: Vec<String>,
+) -> Result<Vec<AttachmentSelector>, StoreError> {
+ values
.into_iter()
- .map(|parent| {
- Ok(EdgeAttachment {
- node_id: parse_node_id(&parent)?,
- kind: fidget_spinner_core::EdgeKind::Lineage,
- direction: EdgeAttachmentDirection::ExistingToNew,
- })
- })
+ .map(|raw| parse_attachment_selector(&raw))
.collect()
}
-fn load_payload(
- schema: fidget_spinner_core::PayloadSchemaRef,
- payload_json: Option<String>,
- payload_file: Option<PathBuf>,
- fields: Vec<String>,
-) -> Result<NodePayload, StoreError> {
- let mut map = Map::new();
- if let Some(text) = payload_json {
- map.extend(json_object(serde_json::from_str::<Value>(&text)?)?);
- }
- if let Some(path) = payload_file {
- let text = fs::read_to_string(path)?;
- map.extend(json_object(serde_json::from_str::<Value>(&text)?)?);
+pub(crate) fn parse_attachment_selector(raw: &str) -> Result<AttachmentSelector, StoreError> {
+ let (kind, selector) = raw
+ .split_once(':')
+ .ok_or_else(|| invalid_input("expected attachment selector in the form `frontier:<selector>`, `hypothesis:<selector>`, or `experiment:<selector>`"))?;
+ match kind {
+ "frontier" => Ok(AttachmentSelector::Frontier(selector.to_owned())),
+ "hypothesis" => Ok(AttachmentSelector::Hypothesis(selector.to_owned())),
+ "experiment" => Ok(AttachmentSelector::Experiment(selector.to_owned())),
+ _ => Err(invalid_input(format!("unknown attachment kind `{kind}`"))),
}
- for field in fields {
- let Some((key, raw_value)) = field.split_once('=') else {
- continue;
- };
- let value = serde_json::from_str::<Value>(raw_value).unwrap_or_else(|_| json!(raw_value));
- let _ = map.insert(key.to_owned(), value);
- }
- Ok(NodePayload::with_schema(schema, map))
}
-fn validate_cli_prose_payload(
- class: NodeClass,
- summary: Option<&str>,
- payload: &NodePayload,
-) -> Result<(), StoreError> {
- if !matches!(class, NodeClass::Note | NodeClass::Source) {
- return Ok(());
- }
- if summary.is_none() {
- return Err(StoreError::ProseSummaryRequired(class));
- }
- match payload.field("body") {
- Some(Value::String(body)) if !body.trim().is_empty() => Ok(()),
- _ => Err(StoreError::ProseBodyRequired(class)),
- }
+fn parse_roadmap_item(raw: String) -> Result<FrontierRoadmapItemDraft, StoreError> {
+ let mut parts = raw.splitn(3, ':');
+ let rank = parts
+ .next()
+ .ok_or_else(|| invalid_input("roadmap items must look like `rank:hypothesis[:summary]`"))?
+ .parse::<u32>()
+ .map_err(|error| invalid_input(format!("invalid roadmap rank: {error}")))?;
+ let hypothesis = parts
+ .next()
+ .ok_or_else(|| invalid_input("roadmap items must include a hypothesis selector"))?
+ .to_owned();
+ let summary = parts
+ .next()
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(StoreError::from)?;
+ Ok(FrontierRoadmapItemDraft {
+ rank,
+ hypothesis,
+ summary,
+ })
}
-fn json_object(value: Value) -> Result<Map<String, Value>, StoreError> {
- match value {
- Value::Object(map) => Ok(map),
- other => Err(invalid_input(format!(
- "expected JSON object, got {other:?}"
- ))),
- }
+pub(crate) fn parse_env(values: Vec<String>) -> BTreeMap<String, String> {
+ values
+ .into_iter()
+ .filter_map(|entry| {
+ let (key, value) = entry.split_once('=')?;
+ Some((key.to_owned(), value.to_owned()))
+ })
+ .collect()
}
-fn schema_field_json(field: &ProjectFieldSpec) -> Value {
- json!({
- "name": field.name,
- "node_classes": field.node_classes.iter().map(ToString::to_string).collect::<Vec<_>>(),
- "presence": field.presence.as_str(),
- "severity": field.severity.as_str(),
- "role": field.role.as_str(),
- "inference_policy": field.inference_policy.as_str(),
- "value_type": field.value_type.map(FieldValueType::as_str),
+fn parse_metric_value_assignment(
+ raw: &str,
+) -> Result<fidget_spinner_core::MetricValue, StoreError> {
+ let (key, value) = raw
+ .split_once('=')
+ .ok_or_else(|| invalid_input("expected metric assignment in the form `key=value`"))?;
+ let value = value
+ .parse::<f64>()
+ .map_err(|error| invalid_input(format!("invalid metric value `{value}`: {error}")))?;
+ Ok(fidget_spinner_core::MetricValue {
+ key: NonEmptyText::new(key.to_owned())?,
+ value,
})
}
-fn parse_node_class_set(classes: Vec<CliNodeClass>) -> BTreeSet<NodeClass> {
- classes.into_iter().map(Into::into).collect()
-}
-
-fn run_git(project_root: &Utf8Path, args: &[&str]) -> Result<Option<String>, StoreError> {
- let output = std::process::Command::new("git")
- .arg("-C")
- .arg(project_root.as_str())
- .args(args)
- .output()?;
- if !output.status.success() {
- return Ok(None);
- }
- let text = String::from_utf8_lossy(&output.stdout).trim().to_owned();
- if text.is_empty() {
- return Ok(None);
- }
- Ok(Some(text))
+pub(crate) fn parse_dimension_assignments(
+ values: Vec<String>,
+) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, StoreError> {
+ values
+ .into_iter()
+ .map(|entry| {
+ let (key, raw_value) = entry.split_once('=').ok_or_else(|| {
+ invalid_input("expected dimension assignment in the form `key=value`")
+ })?;
+ let json_value = serde_json::from_str::<Value>(raw_value)
+ .unwrap_or_else(|_| Value::String(raw_value.to_owned()));
+ Ok((
+ NonEmptyText::new(key.to_owned())?,
+ json_to_dimension_value(json_value)?,
+ ))
+ })
+ .collect()
}
-fn maybe_print_gitignore_hint(project_root: &Utf8Path) -> Result<(), StoreError> {
- if run_git(project_root, &["rev-parse", "--show-toplevel"])?.is_none() {
- return Ok(());
- }
-
- let status = std::process::Command::new("git")
- .arg("-C")
- .arg(project_root.as_str())
- .args(["check-ignore", "-q", ".fidget_spinner"])
- .status()?;
-
- match status.code() {
- Some(0) => Ok(()),
- Some(1) => {
- println!(
- "note: add `.fidget_spinner/` to `.gitignore` or `.git/info/exclude` if you do not want local state in `git status`"
- );
- Ok(())
+fn json_to_dimension_value(value: Value) -> Result<RunDimensionValue, StoreError> {
+ match value {
+ Value::String(raw) => {
+ if time::OffsetDateTime::parse(&raw, &time::format_description::well_known::Rfc3339)
+ .is_ok()
+ {
+ Ok(RunDimensionValue::Timestamp(NonEmptyText::new(raw)?))
+ } else {
+ Ok(RunDimensionValue::String(NonEmptyText::new(raw)?))
+ }
}
- _ => Ok(()),
+ Value::Number(number) => number
+ .as_f64()
+ .map(RunDimensionValue::Numeric)
+ .ok_or_else(|| invalid_input("numeric dimension values must fit into f64")),
+ Value::Bool(value) => Ok(RunDimensionValue::Boolean(value)),
+ _ => Err(invalid_input(
+ "dimension values must be string, number, boolean, or RFC3339 timestamp",
+ )),
}
}
-fn parse_metric_value(raw: String) -> Result<MetricValue, StoreError> {
- let Some((key, value)) = raw.split_once('=') else {
- return Err(invalid_input("metrics must look like key=value"));
- };
- Ok(MetricValue {
- key: NonEmptyText::new(key)?,
- value: value
- .parse::<f64>()
- .map_err(|error| invalid_input(format!("invalid metric value: {error}")))?,
- })
-}
-
-fn coerce_cli_dimension_filters(
- store: &ProjectStore,
- raw_dimensions: Vec<String>,
-) -> Result<BTreeMap<NonEmptyText, fidget_spinner_core::RunDimensionValue>, StoreError> {
- let definitions = store
- .list_run_dimensions()?
- .into_iter()
- .map(|summary| (summary.key.to_string(), summary.value_type))
- .collect::<BTreeMap<_, _>>();
- let raw_dimensions = parse_dimension_assignments(raw_dimensions)?
- .into_iter()
- .map(|(key, raw_value)| {
- let Some(value_type) = definitions.get(&key) else {
- return Err(invalid_input(format!(
- "unknown run dimension `{key}`; register it first"
- )));
- };
- Ok((key, parse_cli_dimension_value(*value_type, &raw_value)?))
- })
- .collect::<Result<BTreeMap<_, _>, StoreError>>()?;
- store.coerce_run_dimensions(raw_dimensions)
-}
-
-fn parse_dimension_assignments(
- raw_dimensions: Vec<String>,
-) -> Result<BTreeMap<String, String>, StoreError> {
- raw_dimensions
+fn to_non_empty_texts(values: Vec<String>) -> Result<Vec<NonEmptyText>, StoreError> {
+ values
.into_iter()
- .map(|raw| {
- let Some((key, value)) = raw.split_once('=') else {
- return Err(invalid_input("dimensions must look like key=value"));
- };
- Ok((key.to_owned(), value.to_owned()))
- })
- .collect()
+ .map(NonEmptyText::new)
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
}
-fn parse_cli_dimension_value(value_type: FieldValueType, raw: &str) -> Result<Value, StoreError> {
- match value_type {
- FieldValueType::String | FieldValueType::Timestamp => Ok(Value::String(raw.to_owned())),
- FieldValueType::Numeric => Ok(json!(raw.parse::<f64>().map_err(|error| {
- invalid_input(format!("invalid numeric dimension value: {error}"))
- })?)),
- FieldValueType::Boolean => match raw {
- "true" => Ok(Value::Bool(true)),
- "false" => Ok(Value::Bool(false)),
- other => Err(invalid_input(format!(
- "invalid boolean dimension value `{other}`"
- ))),
- },
+fn load_optional_json<T: for<'de> serde::Deserialize<'de>>(
+ inline: Option<String>,
+ file: Option<PathBuf>,
+) -> Result<Option<T>, StoreError> {
+ match (inline, file) {
+ (Some(raw), None) => serde_json::from_str(&raw)
+ .map(Some)
+ .map_err(StoreError::from),
+ (None, Some(path)) => serde_json::from_slice(&fs::read(path)?)
+ .map(Some)
+ .map_err(StoreError::from),
+ (None, None) => Ok(None),
+ (Some(_), Some(_)) => Err(invalid_input(
+ "use only one of --outcome-json or --outcome-file",
+ )),
}
}
-fn parse_metric_unit(raw: &str) -> Result<MetricUnit, StoreError> {
- match raw {
- "seconds" => Ok(MetricUnit::Seconds),
- "bytes" => Ok(MetricUnit::Bytes),
- "count" => Ok(MetricUnit::Count),
- "ratio" => Ok(MetricUnit::Ratio),
- "custom" => Ok(MetricUnit::Custom),
- other => Err(invalid_input(format!("unknown metric unit `{other}`"))),
+const fn archive_patch(state: Option<CliArchivePatch>) -> Option<bool> {
+ match state {
+ None => None,
+ Some(CliArchivePatch::Archive) => Some(true),
+ Some(CliArchivePatch::Restore) => Some(false),
}
}
-fn parse_optimization_objective(raw: &str) -> Result<OptimizationObjective, StoreError> {
- match raw {
- "minimize" => Ok(OptimizationObjective::Minimize),
- "maximize" => Ok(OptimizationObjective::Maximize),
- "target" => Ok(OptimizationObjective::Target),
- other => Err(invalid_input(format!(
- "unknown optimization objective `{other}`"
- ))),
+fn cli_text_patch(
+ value: Option<String>,
+ clear: bool,
+) -> Result<Option<TextPatch<NonEmptyText>>, StoreError> {
+ if clear {
+ if value.is_some() {
+ return Err(invalid_input("cannot set and clear the same field"));
+ }
+ return Ok(Some(TextPatch::Clear));
}
+ value
+ .map(NonEmptyText::new)
+ .transpose()
+ .map(|value| value.map(TextPatch::Set))
+ .map_err(StoreError::from)
}
-fn parse_node_id(raw: &str) -> Result<fidget_spinner_core::NodeId, StoreError> {
- Ok(fidget_spinner_core::NodeId::from_uuid(Uuid::parse_str(
- raw,
- )?))
-}
-
-fn parse_frontier_id(raw: &str) -> Result<fidget_spinner_core::FrontierId, StoreError> {
- Ok(fidget_spinner_core::FrontierId::from_uuid(Uuid::parse_str(
- raw,
- )?))
+fn invalid_input(message: impl Into<String>) -> StoreError {
+ StoreError::InvalidInput(message.into())
}
-fn parse_experiment_id(raw: &str) -> Result<fidget_spinner_core::ExperimentId, StoreError> {
- Ok(fidget_spinner_core::ExperimentId::from_uuid(
- Uuid::parse_str(raw)?,
- ))
+pub(crate) fn to_pretty_json(value: &impl Serialize) -> Result<String, StoreError> {
+ serde_json::to_string_pretty(value).map_err(StoreError::from)
}
-fn print_json<T: Serialize>(value: &T) -> Result<(), StoreError> {
+fn print_json(value: &impl Serialize) -> Result<(), StoreError> {
println!("{}", to_pretty_json(value)?);
Ok(())
}
-fn to_pretty_json<T: Serialize>(value: &T) -> Result<String, StoreError> {
- serde_json::to_string_pretty(value).map_err(StoreError::from)
-}
-
-fn invalid_input(message: impl Into<String>) -> StoreError {
- StoreError::Json(serde_json::Error::io(std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- message.into(),
- )))
-}
-
-impl From<CliNodeClass> for NodeClass {
- fn from(value: CliNodeClass) -> Self {
- match value {
- CliNodeClass::Contract => Self::Contract,
- CliNodeClass::Hypothesis => Self::Hypothesis,
- CliNodeClass::Run => Self::Run,
- CliNodeClass::Analysis => Self::Analysis,
- CliNodeClass::Decision => Self::Decision,
- CliNodeClass::Source => Self::Source,
- CliNodeClass::Note => Self::Note,
- }
- }
-}
-
impl From<CliMetricUnit> for MetricUnit {
fn from(value: CliMetricUnit) -> Self {
match value {
@@ -1715,33 +1418,32 @@ impl From<CliOptimizationObjective> for OptimizationObjective {
}
}
-impl From<CliExecutionBackend> for ExecutionBackend {
- fn from(value: CliExecutionBackend) -> Self {
+impl From<CliMetricVisibility> for MetricVisibility {
+ fn from(value: CliMetricVisibility) -> Self {
match value {
- CliExecutionBackend::Local => Self::LocalProcess,
- CliExecutionBackend::Worktree => Self::WorktreeProcess,
- CliExecutionBackend::Ssh => Self::SshProcess,
+ CliMetricVisibility::Canonical => Self::Canonical,
+ CliMetricVisibility::Minor => Self::Minor,
+ CliMetricVisibility::Hidden => Self::Hidden,
+ CliMetricVisibility::Archived => Self::Archived,
}
}
}
-impl From<CliMetricSource> for MetricFieldSource {
- fn from(value: CliMetricSource) -> Self {
+impl From<CliMetricScope> for MetricScope {
+ fn from(value: CliMetricScope) -> Self {
match value {
- CliMetricSource::RunMetric => Self::RunMetric,
- CliMetricSource::HypothesisPayload => Self::HypothesisPayload,
- CliMetricSource::RunPayload => Self::RunPayload,
- CliMetricSource::AnalysisPayload => Self::AnalysisPayload,
- CliMetricSource::DecisionPayload => Self::DecisionPayload,
+ CliMetricScope::Live => Self::Live,
+ CliMetricScope::Visible => Self::Visible,
+ CliMetricScope::All => Self::All,
}
}
}
-impl From<CliMetricOrder> for MetricRankOrder {
- fn from(value: CliMetricOrder) -> Self {
+impl From<CliMetricRankOrder> for MetricRankOrder {
+ fn from(value: CliMetricRankOrder) -> Self {
match value {
- CliMetricOrder::Asc => Self::Asc,
- CliMetricOrder::Desc => Self::Desc,
+ CliMetricRankOrder::Asc => Self::Asc,
+ CliMetricRankOrder::Desc => Self::Desc,
}
}
}
@@ -1757,42 +1459,28 @@ impl From<CliFieldValueType> for FieldValueType {
}
}
-impl From<CliDiagnosticSeverity> for DiagnosticSeverity {
- fn from(value: CliDiagnosticSeverity) -> Self {
- match value {
- CliDiagnosticSeverity::Error => Self::Error,
- CliDiagnosticSeverity::Warning => Self::Warning,
- CliDiagnosticSeverity::Info => Self::Info,
- }
- }
-}
-
-impl From<CliFieldPresence> for FieldPresence {
- fn from(value: CliFieldPresence) -> Self {
- match value {
- CliFieldPresence::Required => Self::Required,
- CliFieldPresence::Recommended => Self::Recommended,
- CliFieldPresence::Optional => Self::Optional,
- }
- }
-}
-
-impl From<CliFieldRole> for FieldRole {
- fn from(value: CliFieldRole) -> Self {
+impl From<CliArtifactKind> for ArtifactKind {
+ fn from(value: CliArtifactKind) -> Self {
match value {
- CliFieldRole::Index => Self::Index,
- CliFieldRole::ProjectionGate => Self::ProjectionGate,
- CliFieldRole::RenderOnly => Self::RenderOnly,
- CliFieldRole::Opaque => Self::Opaque,
+ CliArtifactKind::Document => Self::Document,
+ CliArtifactKind::Link => Self::Link,
+ CliArtifactKind::Log => Self::Log,
+ CliArtifactKind::Table => Self::Table,
+ CliArtifactKind::Plot => Self::Plot,
+ CliArtifactKind::Dump => Self::Dump,
+ CliArtifactKind::Binary => Self::Binary,
+ CliArtifactKind::Other => Self::Other,
}
}
}
-impl From<CliInferencePolicy> for InferencePolicy {
- fn from(value: CliInferencePolicy) -> Self {
+impl From<CliExecutionBackend> for ExecutionBackend {
+ fn from(value: CliExecutionBackend) -> Self {
match value {
- CliInferencePolicy::ManualOnly => Self::ManualOnly,
- CliInferencePolicy::ModelMayInfer => Self::ModelMayInfer,
+ CliExecutionBackend::Manual => Self::Manual,
+ CliExecutionBackend::LocalProcess => Self::LocalProcess,
+ CliExecutionBackend::WorktreeProcess => Self::WorktreeProcess,
+ CliExecutionBackend::SshProcess => Self::SshProcess,
}
}
}
@@ -1808,89 +1496,11 @@ impl From<CliFrontierVerdict> for FrontierVerdict {
}
}
-#[cfg(test)]
-mod tests {
- use super::resolve_ui_project_root;
- use std::fs;
-
- use camino::Utf8PathBuf;
- use fidget_spinner_core::NonEmptyText;
- use fidget_spinner_store_sqlite::{
- PROJECT_CONFIG_NAME, ProjectStore, STORE_DIR_NAME, StoreError,
- };
-
- fn temp_project_root(label: &str) -> Utf8PathBuf {
- let mut path = std::env::temp_dir();
- path.push(format!(
- "fidget_spinner_cli_test_{}_{}",
- label,
- uuid::Uuid::now_v7()
- ));
- Utf8PathBuf::from(path.to_string_lossy().into_owned())
- }
-
- #[test]
- fn ui_resolver_accepts_state_root_and_descendants() -> Result<(), StoreError> {
- let project_root = temp_project_root("ui_resolve_state_root");
- let _store = ProjectStore::init(
- &project_root,
- NonEmptyText::new("ui dogfood")?,
- NonEmptyText::new("local.ui")?,
- )?;
- let state_root = project_root.join(STORE_DIR_NAME);
- let config_path = state_root.join(PROJECT_CONFIG_NAME);
-
- assert_eq!(resolve_ui_project_root(&state_root)?, project_root);
- assert_eq!(resolve_ui_project_root(&config_path)?, project_root);
- Ok(())
- }
-
- #[test]
- fn ui_resolver_accepts_unique_descendant_store_from_parent() -> Result<(), StoreError> {
- let parent_root = temp_project_root("ui_resolve_parent");
- let nested_project = parent_root.join("nested/libgrid");
- fs::create_dir_all(nested_project.as_std_path())?;
- let _store = ProjectStore::init(
- &nested_project,
- NonEmptyText::new("nested ui dogfood")?,
- NonEmptyText::new("local.nested.ui")?,
- )?;
-
- assert_eq!(resolve_ui_project_root(&parent_root)?, nested_project);
- Ok(())
- }
-
- #[test]
- fn ui_resolver_rejects_ambiguous_descendant_stores() -> Result<(), StoreError> {
- let parent_root = temp_project_root("ui_resolve_ambiguous");
- let alpha_project = parent_root.join("alpha");
- let beta_project = parent_root.join("beta");
- fs::create_dir_all(alpha_project.as_std_path())?;
- fs::create_dir_all(beta_project.as_std_path())?;
- let _alpha = ProjectStore::init(
- &alpha_project,
- NonEmptyText::new("alpha")?,
- NonEmptyText::new("local.alpha")?,
- )?;
- let _beta = ProjectStore::init(
- &beta_project,
- NonEmptyText::new("beta")?,
- NonEmptyText::new("local.beta")?,
- )?;
-
- let error = match resolve_ui_project_root(&parent_root) {
- Ok(project_root) => {
- return Err(StoreError::Io(std::io::Error::other(format!(
- "expected ambiguous descendant discovery failure, got {project_root}"
- ))));
- }
- Err(error) => error,
- };
- assert!(
- error
- .to_string()
- .contains("multiple descendant project stores")
- );
- Ok(())
+impl From<CliExperimentStatus> for ExperimentStatus {
+ fn from(value: CliExperimentStatus) -> Self {
+ match value {
+ CliExperimentStatus::Open => Self::Open,
+ CliExperimentStatus::Closed => Self::Closed,
+ }
}
}
diff --git a/crates/fidget-spinner-cli/src/mcp/catalog.rs b/crates/fidget-spinner-cli/src/mcp/catalog.rs
index ae3ca78..9b486bc 100644
--- a/crates/fidget-spinner-cli/src/mcp/catalog.rs
+++ b/crates/fidget-spinner-cli/src/mcp/catalog.rs
@@ -46,756 +46,814 @@ impl ToolSpec {
}
}
+const TOOL_SPECS: &[ToolSpec] = &[
+ ToolSpec {
+ name: "project.bind",
+ description: "Bind this MCP session to a project root or nested path inside a project store.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "project.status",
+ description: "Read coarse project metadata and ledger counts for the bound project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "tag.add",
+ description: "Register one repo-local tag with a required description.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "tag.list",
+ description: "List the repo-local tag registry.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.create",
+ description: "Create a new frontier scope.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "frontier.list",
+ description: "List frontier scopes in the current project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.read",
+ description: "Read one frontier record, including its brief.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.open",
+ description: "Open the bounded frontier overview: brief, active tags, live metrics, active hypotheses, and open experiments.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.brief.update",
+ description: "Replace or patch the singleton frontier brief.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "frontier.history",
+ description: "Read the frontier revision history.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.record",
+ description: "Record one hypothesis. The body must stay a single paragraph.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "hypothesis.list",
+ description: "List hypotheses, optionally narrowed by frontier or tag.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.read",
+ description: "Read one hypothesis with its local neighborhood, experiments, and artifacts.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.update",
+ description: "Patch hypothesis title, summary, body, tags, influence parents, or archive state.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "hypothesis.history",
+ description: "Read the revision history for one hypothesis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.open",
+ description: "Open one experiment anchored to exactly one hypothesis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.list",
+ description: "List experiments, optionally narrowed by frontier, hypothesis, status, or tags.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.read",
+ description: "Read one experiment with its owning hypothesis, local neighborhood, outcome, and artifacts.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.update",
+ description: "Patch experiment metadata, influence parents, archive state, or replace the closed outcome wholesale.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.close",
+ description: "Close one open experiment with typed dimensions, structured metrics, verdict, rationale, and optional analysis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.history",
+ description: "Read the revision history for one experiment.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.record",
+ description: "Register an external artifact reference and attach it to frontiers, hypotheses, or experiments. Artifact bodies are never read through Spinner.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "artifact.list",
+ description: "List artifact references, optionally narrowed by frontier, kind, or attachment target.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.read",
+ description: "Read one artifact reference and its attachment targets.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.update",
+ description: "Patch artifact metadata or replace its attachment set.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "artifact.history",
+ description: "Read the revision history for one artifact.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "metric.define",
+ description: "Register one project-level metric definition.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "metric.keys",
+ description: "List metric keys, defaulting to the live frontier comparison set.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "metric.best",
+ description: "Rank closed experiments by one metric key with optional frontier, hypothesis, or dimension narrowing.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "run.dimension.define",
+ description: "Register one typed run-dimension key.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "run.dimension.list",
+ description: "List registered run dimensions.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "skill.list",
+ description: "List bundled skills shipped with this package.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "skill.show",
+ description: "Return one bundled skill text shipped with this package. Defaults to `fidget-spinner` when name is omitted.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "system.health",
+ description: "Read MCP host health, session binding, worker generation, and rollout state.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "system.telemetry",
+ description: "Read aggregate MCP host telemetry for this session.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+];
+
+const RESOURCE_SPECS: &[ResourceSpec] = &[
+ ResourceSpec {
+ uri: "fidget-spinner://skill/fidget-spinner",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ResourceSpec {
+ uri: "fidget-spinner://skill/frontier-loop",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+];
+
#[must_use]
pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> {
- match name {
- "project.bind" => Some(ToolSpec {
- name: "project.bind",
- description: "Bind this MCP session to a project root or nested path inside a project store.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::NeverReplay,
- }),
- "project.status" => Some(ToolSpec {
- name: "project.status",
- description: "Read local project status, store paths, and git availability for the currently bound project.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "project.schema" => Some(ToolSpec {
- name: "project.schema",
- description: "Read the project-local payload schema and field validation tiers.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "schema.field.upsert" => Some(ToolSpec {
- name: "schema.field.upsert",
- description: "Add or replace one project-local payload schema field definition.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "schema.field.remove" => Some(ToolSpec {
- name: "schema.field.remove",
- description: "Remove one project-local payload schema field definition, optionally narrowed by node-class set.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "tag.add" => Some(ToolSpec {
- name: "tag.add",
- description: "Register one repo-local tag with a required description. Notes may only reference tags from this registry.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "tag.list" => Some(ToolSpec {
- name: "tag.list",
- description: "List repo-local tags available for note and node tagging.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.list" => Some(ToolSpec {
- name: "frontier.list",
- description: "List frontiers for the current project.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.status" => Some(ToolSpec {
- name: "frontier.status",
- description: "Read one frontier projection, including open/completed experiment counts and verdict totals.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.init" => Some(ToolSpec {
- name: "frontier.init",
- description: "Create a new frontier rooted in a contract node.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.create" => Some(ToolSpec {
- name: "node.create",
- description: "Create a generic DAG node with project payload fields and optional lineage parents.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "hypothesis.record" => Some(ToolSpec {
- name: "hypothesis.record",
- description: "Record a core-path hypothesis with low ceremony.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.list" => Some(ToolSpec {
- name: "node.list",
- description: "List recent nodes. Archived nodes are hidden unless explicitly requested.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "node.read" => Some(ToolSpec {
- name: "node.read",
- description: "Read one node including payload, diagnostics, and hidden annotations.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "node.annotate" => Some(ToolSpec {
- name: "node.annotate",
- description: "Attach a free-form annotation to any node.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.archive" => Some(ToolSpec {
- name: "node.archive",
- description: "Archive a node so it falls out of default enumeration without being deleted.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "note.quick" => Some(ToolSpec {
- name: "note.quick",
- description: "Push a quick off-path note without bureaucratic experiment closure.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "source.record" => Some(ToolSpec {
- name: "source.record",
- description: "Record imported sources and documentary context that should live in the DAG without polluting the core path.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "metric.define" => Some(ToolSpec {
- name: "metric.define",
- description: "Register one project-level metric definition so experiment ingestion only has to send key/value observations.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "run.dimension.define" => Some(ToolSpec {
- name: "run.dimension.define",
- description: "Register one project-level run dimension used to slice metrics across scenarios, budgets, and flags.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "run.dimension.list" => Some(ToolSpec {
- name: "run.dimension.list",
- description: "List registered run dimensions together with observed value counts and sample values.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.keys" => Some(ToolSpec {
- name: "metric.keys",
- description: "List rankable metric keys, including registered run metrics and observed payload-derived numeric fields.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.best" => Some(ToolSpec {
- name: "metric.best",
- description: "Rank completed experiments by one numeric key, with optional run-dimension filters.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.migrate" => Some(ToolSpec {
- name: "metric.migrate",
- description: "Re-run the idempotent legacy metric-plane normalization that registers canonical metrics and backfills benchmark_suite dimensions.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "experiment.open" => Some(ToolSpec {
- name: "experiment.open",
- description: "Open a stateful experiment against one hypothesis.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "experiment.list" => Some(ToolSpec {
- name: "experiment.list",
- description: "List currently open experiments, optionally narrowed to one frontier.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "experiment.read" => Some(ToolSpec {
- name: "experiment.read",
- description: "Read one currently open experiment by id.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "experiment.close" => Some(ToolSpec {
- name: "experiment.close",
- description: "Close one open experiment with typed run dimensions, preregistered metric observations, optional analysis, note, and verdict.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "skill.list" => Some(ToolSpec {
- name: "skill.list",
- description: "List bundled skills shipped with this package.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "skill.show" => Some(ToolSpec {
- name: "skill.show",
- description: "Return one bundled skill text shipped with this package. Defaults to `fidget-spinner` when name is omitted.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "system.health" => Some(ToolSpec {
- name: "system.health",
- description: "Read MCP host health, session binding, worker generation, rollout state, and the last fault.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "system.telemetry" => Some(ToolSpec {
- name: "system.telemetry",
- description: "Read aggregate request, retry, restart, and per-operation telemetry for this MCP session.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- _ => None,
- }
+ TOOL_SPECS.iter().copied().find(|spec| spec.name == name)
}
#[must_use]
pub(crate) fn resource_spec(uri: &str) -> Option<ResourceSpec> {
- match uri {
- "fidget-spinner://project/config" => Some(ResourceSpec {
- uri: "fidget-spinner://project/config",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://project/schema" => Some(ResourceSpec {
- uri: "fidget-spinner://project/schema",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://skill/fidget-spinner" => Some(ResourceSpec {
- uri: "fidget-spinner://skill/fidget-spinner",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://skill/frontier-loop" => Some(ResourceSpec {
- uri: "fidget-spinner://skill/frontier-loop",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- _ => None,
- }
+ RESOURCE_SPECS.iter().copied().find(|spec| spec.uri == uri)
}
#[must_use]
pub(crate) fn tool_definitions() -> Vec<Value> {
- [
- "project.bind",
- "project.status",
- "project.schema",
- "schema.field.upsert",
- "schema.field.remove",
- "tag.add",
- "tag.list",
- "frontier.list",
- "frontier.status",
- "frontier.init",
- "node.create",
- "hypothesis.record",
- "node.list",
- "node.read",
- "node.annotate",
- "node.archive",
- "note.quick",
- "source.record",
- "metric.define",
- "run.dimension.define",
- "run.dimension.list",
- "metric.keys",
- "metric.best",
- "metric.migrate",
- "experiment.open",
- "experiment.list",
- "experiment.read",
- "experiment.close",
- "skill.list",
- "skill.show",
- "system.health",
- "system.telemetry",
- ]
- .into_iter()
- .filter_map(tool_spec)
- .map(|spec| {
- json!({
- "name": spec.name,
- "description": spec.description,
- "inputSchema": with_common_presentation(input_schema(spec.name)),
- "annotations": spec.annotation_json(),
+ TOOL_SPECS
+ .iter()
+ .copied()
+ .map(|spec| {
+ json!({
+ "name": spec.name,
+ "description": spec.description,
+ "annotations": spec.annotation_json(),
+ "inputSchema": tool_input_schema(spec.name),
+ })
})
- })
- .collect()
+ .collect()
}
#[must_use]
pub(crate) fn list_resources() -> Vec<Value> {
- vec![
- json!({
- "uri": "fidget-spinner://project/config",
- "name": "project-config",
- "description": "Project-local store configuration",
- "mimeType": "application/json"
- }),
- json!({
- "uri": "fidget-spinner://project/schema",
- "name": "project-schema",
- "description": "Project-local payload schema and validation tiers",
- "mimeType": "application/json"
- }),
- json!({
- "uri": "fidget-spinner://skill/fidget-spinner",
- "name": "fidget-spinner-skill",
- "description": "Bundled base Fidget Spinner skill text for this package",
- "mimeType": "text/markdown"
- }),
- json!({
- "uri": "fidget-spinner://skill/frontier-loop",
- "name": "frontier-loop-skill",
- "description": "Bundled frontier-loop specialization skill text for this package",
- "mimeType": "text/markdown"
- }),
- ]
+ RESOURCE_SPECS
+ .iter()
+ .map(|spec| {
+ json!({
+ "uri": spec.uri,
+ "name": spec.uri.rsplit('/').next().unwrap_or(spec.uri),
+ "description": resource_description(spec.uri),
+ })
+ })
+ .collect()
}
-fn input_schema(name: &str) -> Value {
- match name {
- "project.status" | "project.schema" | "tag.list" | "skill.list" | "system.health"
- | "system.telemetry" | "run.dimension.list" | "metric.migrate" => {
- json!({"type":"object","additionalProperties":false})
- }
- "schema.field.upsert" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Project payload field name." },
- "node_classes": { "type": "array", "items": node_class_schema(), "description": "Optional node-class scope. Omit or pass [] for all classes." },
- "presence": field_presence_schema(),
- "severity": diagnostic_severity_schema(),
- "role": field_role_schema(),
- "inference_policy": inference_policy_schema(),
- "value_type": field_value_type_schema(),
- },
- "required": ["name", "presence", "severity", "role", "inference_policy"],
- "additionalProperties": false
- }),
- "schema.field.remove" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Project payload field name." },
- "node_classes": { "type": "array", "items": node_class_schema(), "description": "Optional exact node-class scope to remove." }
- },
- "required": ["name"],
- "additionalProperties": false
- }),
- "project.bind" => json!({
- "type": "object",
- "properties": {
- "path": { "type": "string", "description": "Project root or any nested path inside a project with .fidget_spinner state." }
- },
- "required": ["path"],
- "additionalProperties": false
- }),
- "tag.add" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Lowercase repo-local tag name." },
- "description": { "type": "string", "description": "Human-facing tag description." }
- },
- "required": ["name", "description"],
- "additionalProperties": false
- }),
- "skill.show" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Bundled skill name. Defaults to `fidget-spinner`." }
- },
- "additionalProperties": false
- }),
- "frontier.list" => json!({"type":"object","additionalProperties":false}),
- "frontier.status" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string", "description": "Frontier UUID" }
- },
- "required": ["frontier_id"],
- "additionalProperties": false
- }),
- "frontier.init" => json!({
- "type": "object",
- "properties": {
- "label": { "type": "string" },
- "objective": { "type": "string" },
- "contract_title": { "type": "string" },
- "contract_summary": { "type": "string" },
- "benchmark_suites": { "type": "array", "items": { "type": "string" } },
- "promotion_criteria": { "type": "array", "items": { "type": "string" } },
- "primary_metric": metric_spec_schema(),
- "supporting_metrics": { "type": "array", "items": metric_spec_schema() },
- "seed_summary": { "type": "string" }
- },
- "required": ["label", "objective", "contract_title", "benchmark_suites", "promotion_criteria", "primary_metric"],
- "additionalProperties": false
- }),
- "node.create" => json!({
- "type": "object",
- "properties": {
- "class": node_class_schema(),
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string", "description": "Required for `note` and `source` nodes." },
- "tags": { "type": "array", "items": tag_name_schema(), "description": "Required for `note` nodes; optional for other classes." },
- "payload": { "type": "object", "description": "`note` and `source` nodes require a non-empty string `body` field." },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["class", "title"],
- "additionalProperties": false
- }),
- "hypothesis.record" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["frontier_id", "title", "summary", "body"],
- "additionalProperties": false
- }),
- "node.list" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "class": node_class_schema(),
- "tags": { "type": "array", "items": tag_name_schema() },
- "include_archived": { "type": "boolean" },
- "limit": { "type": "integer", "minimum": 1, "maximum": 500 }
- },
- "additionalProperties": false
- }),
- "node.read" | "node.archive" => json!({
- "type": "object",
- "properties": {
- "node_id": { "type": "string" }
- },
- "required": ["node_id"],
- "additionalProperties": false
- }),
- "node.annotate" => json!({
- "type": "object",
- "properties": {
- "node_id": { "type": "string" },
- "body": { "type": "string" },
- "label": { "type": "string" },
- "visible": { "type": "boolean" }
- },
- "required": ["node_id", "body"],
- "additionalProperties": false
- }),
- "note.quick" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "tags": { "type": "array", "items": tag_name_schema() },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["title", "summary", "body", "tags"],
- "additionalProperties": false
- }),
- "source.record" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "tags": { "type": "array", "items": tag_name_schema() },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["title", "summary", "body"],
- "additionalProperties": false
- }),
- "metric.define" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "unit": metric_unit_schema(),
- "objective": optimization_objective_schema(),
- "description": { "type": "string" }
- },
- "required": ["key", "unit", "objective"],
- "additionalProperties": false
- }),
- "run.dimension.define" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "value_type": field_value_type_schema(),
- "description": { "type": "string" }
- },
- "required": ["key", "value_type"],
- "additionalProperties": false
- }),
- "metric.keys" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "source": metric_source_schema(),
- "dimensions": { "type": "object" }
- },
- "additionalProperties": false
- }),
- "metric.best" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "frontier_id": { "type": "string" },
- "source": metric_source_schema(),
- "dimensions": { "type": "object" },
- "order": metric_order_schema(),
- "limit": { "type": "integer", "minimum": 1, "maximum": 500 }
- },
- "required": ["key"],
- "additionalProperties": false
- }),
- "experiment.open" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "hypothesis_node_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" }
- },
- "required": ["frontier_id", "hypothesis_node_id", "title"],
- "additionalProperties": false
- }),
- "experiment.list" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" }
- },
- "additionalProperties": false
- }),
- "experiment.read" => json!({
- "type": "object",
- "properties": {
- "experiment_id": { "type": "string" }
- },
- "required": ["experiment_id"],
- "additionalProperties": false
- }),
- "experiment.close" => json!({
- "type": "object",
- "properties": {
- "experiment_id": { "type": "string" },
- "run": run_schema(),
- "primary_metric": metric_value_schema(),
- "supporting_metrics": { "type": "array", "items": metric_value_schema() },
- "note": note_schema(),
- "verdict": verdict_schema(),
- "decision_title": { "type": "string" },
- "decision_rationale": { "type": "string" },
- "analysis": analysis_schema()
- },
- "required": [
- "experiment_id",
- "run",
+fn resource_description(uri: &str) -> &'static str {
+ match uri {
+ "fidget-spinner://skill/fidget-spinner" => "Bundled Fidget Spinner operating doctrine.",
+ "fidget-spinner://skill/frontier-loop" => "Bundled frontier-loop specialization.",
+ _ => "Fidget Spinner resource.",
+ }
+}
+
+fn tool_input_schema(name: &str) -> Value {
+ let schema = match name {
+ "project.bind" => object_schema(
+ &[(
+ "path",
+ string_schema("Project root or any nested path inside it."),
+ )],
+ &["path"],
+ ),
+ "project.status" | "tag.list" | "frontier.list" | "run.dimension.list" | "skill.list"
+ | "system.health" | "system.telemetry" => empty_object_schema(),
+ "tag.add" => object_schema(
+ &[
+ ("name", string_schema("Repo-local tag token.")),
+ (
+ "description",
+ string_schema("Human-facing tag description."),
+ ),
+ ],
+ &["name", "description"],
+ ),
+ "frontier.create" => object_schema(
+ &[
+ ("label", string_schema("Short frontier label.")),
+ ("objective", string_schema("Frontier objective.")),
+ ("slug", string_schema("Optional stable frontier slug.")),
+ ],
+ &["label", "objective"],
+ ),
+ "frontier.read" | "frontier.open" | "frontier.history" => object_schema(
+ &[("frontier", selector_schema("Frontier UUID or slug."))],
+ &["frontier"],
+ ),
+ "frontier.brief.update" => object_schema(
+ &[
+ ("frontier", selector_schema("Frontier UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "situation",
+ nullable_string_schema("Optional frontier situation text."),
+ ),
+ ("roadmap", roadmap_schema()),
+ (
+ "unknowns",
+ string_array_schema("Ordered frontier unknowns."),
+ ),
+ ],
+ &["frontier"],
+ ),
+ "hypothesis.record" => object_schema(
+ &[
+ ("frontier", selector_schema("Owning frontier UUID or slug.")),
+ ("title", string_schema("Terse hypothesis title.")),
+ ("summary", string_schema("One-line hypothesis summary.")),
+ ("body", string_schema("Single-paragraph hypothesis body.")),
+ ("slug", string_schema("Optional stable hypothesis slug.")),
+ ("tags", string_array_schema("Tag names.")),
+ ("parents", vertex_selector_array_schema()),
+ ],
+ &["frontier", "title", "summary", "body"],
+ ),
+ "hypothesis.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ ("tags", string_array_schema("Require all listed tags.")),
+ (
+ "include_archived",
+ boolean_schema("Include archived hypotheses."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "hypothesis.read" | "hypothesis.history" => object_schema(
+ &[("hypothesis", selector_schema("Hypothesis UUID or slug."))],
+ &["hypothesis"],
+ ),
+ "hypothesis.update" => object_schema(
+ &[
+ ("hypothesis", selector_schema("Hypothesis UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ ("title", string_schema("Replacement title.")),
+ ("summary", string_schema("Replacement summary.")),
+ ("body", string_schema("Replacement single-paragraph body.")),
+ ("tags", string_array_schema("Replacement tag set.")),
+ ("parents", vertex_selector_array_schema()),
+ ("archived", boolean_schema("Archive state override.")),
+ ],
+ &["hypothesis"],
+ ),
+ "experiment.open" => object_schema(
+ &[
+ (
+ "hypothesis",
+ selector_schema("Owning hypothesis UUID or slug."),
+ ),
+ ("title", string_schema("Experiment title.")),
+ ("summary", string_schema("Optional experiment summary.")),
+ ("slug", string_schema("Optional stable experiment slug.")),
+ ("tags", string_array_schema("Tag names.")),
+ ("parents", vertex_selector_array_schema()),
+ ],
+ &["hypothesis", "title"],
+ ),
+ "experiment.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "hypothesis",
+ selector_schema("Optional hypothesis UUID or slug."),
+ ),
+ ("tags", string_array_schema("Require all listed tags.")),
+ (
+ "status",
+ enum_string_schema(&["open", "closed"], "Optional experiment status filter."),
+ ),
+ (
+ "include_archived",
+ boolean_schema("Include archived experiments."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "experiment.read" | "experiment.history" => object_schema(
+ &[("experiment", selector_schema("Experiment UUID or slug."))],
+ &["experiment"],
+ ),
+ "experiment.update" => object_schema(
+ &[
+ ("experiment", selector_schema("Experiment UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ ("title", string_schema("Replacement title.")),
+ (
+ "summary",
+ nullable_string_schema("Replacement summary or explicit null."),
+ ),
+ ("tags", string_array_schema("Replacement tag set.")),
+ ("parents", vertex_selector_array_schema()),
+ ("archived", boolean_schema("Archive state override.")),
+ ("outcome", experiment_outcome_schema()),
+ ],
+ &["experiment"],
+ ),
+ "experiment.close" => object_schema(
+ &[
+ ("experiment", selector_schema("Experiment UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "backend",
+ enum_string_schema(
+ &["manual", "local_process", "worktree_process", "ssh_process"],
+ "Execution backend.",
+ ),
+ ),
+ ("command", command_schema()),
+ ("dimensions", run_dimensions_schema()),
+ ("primary_metric", metric_value_schema()),
+ ("supporting_metrics", metric_value_array_schema()),
+ (
+ "verdict",
+ enum_string_schema(
+ &["accepted", "kept", "parked", "rejected"],
+ "Closed verdict.",
+ ),
+ ),
+ ("rationale", string_schema("Decision rationale.")),
+ ("analysis", experiment_analysis_schema()),
+ ],
+ &[
+ "experiment",
+ "backend",
+ "command",
+ "dimensions",
"primary_metric",
- "note",
"verdict",
- "decision_title",
- "decision_rationale"
+ "rationale",
],
- "additionalProperties": false
- }),
- _ => json!({"type":"object","additionalProperties":false}),
- }
+ ),
+ "artifact.record" => object_schema(
+ &[
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Artifact kind.",
+ ),
+ ),
+ ("label", string_schema("Human-facing artifact label.")),
+ ("summary", string_schema("Optional summary.")),
+ (
+ "locator",
+ string_schema(
+ "Opaque locator or URI. Artifact bodies are never read through Spinner.",
+ ),
+ ),
+ ("media_type", string_schema("Optional media type.")),
+ ("slug", string_schema("Optional stable artifact slug.")),
+ ("attachments", attachment_selector_array_schema()),
+ ],
+ &["kind", "label", "locator"],
+ ),
+ "artifact.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Optional artifact kind.",
+ ),
+ ),
+ ("attached_to", attachment_selector_schema()),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "artifact.read" | "artifact.history" => object_schema(
+ &[("artifact", selector_schema("Artifact UUID or slug."))],
+ &["artifact"],
+ ),
+ "artifact.update" => object_schema(
+ &[
+ ("artifact", selector_schema("Artifact UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Replacement artifact kind.",
+ ),
+ ),
+ ("label", string_schema("Replacement label.")),
+ (
+ "summary",
+ nullable_string_schema("Replacement summary or explicit null."),
+ ),
+ ("locator", string_schema("Replacement locator.")),
+ (
+ "media_type",
+ nullable_string_schema("Replacement media type or explicit null."),
+ ),
+ ("attachments", attachment_selector_array_schema()),
+ ],
+ &["artifact"],
+ ),
+ "metric.define" => object_schema(
+ &[
+ ("key", string_schema("Metric key.")),
+ (
+ "unit",
+ enum_string_schema(
+ &["seconds", "bytes", "count", "ratio", "custom"],
+ "Metric unit.",
+ ),
+ ),
+ (
+ "objective",
+ enum_string_schema(
+ &["minimize", "maximize", "target"],
+ "Optimization objective.",
+ ),
+ ),
+ (
+ "visibility",
+ enum_string_schema(
+ &["canonical", "minor", "hidden", "archived"],
+ "Metric visibility tier.",
+ ),
+ ),
+ ("description", string_schema("Optional description.")),
+ ],
+ &["key", "unit", "objective"],
+ ),
+ "metric.keys" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "scope",
+ enum_string_schema(&["live", "visible", "all"], "Registry slice to enumerate."),
+ ),
+ ],
+ &[],
+ ),
+ "metric.best" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "hypothesis",
+ selector_schema("Optional hypothesis UUID or slug."),
+ ),
+ ("key", string_schema("Metric key.")),
+ ("dimensions", run_dimensions_schema()),
+ (
+ "include_rejected",
+ boolean_schema("Include rejected experiments."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ (
+ "order",
+ enum_string_schema(&["asc", "desc"], "Optional explicit ranking direction."),
+ ),
+ ],
+ &["key"],
+ ),
+ "run.dimension.define" => object_schema(
+ &[
+ ("key", string_schema("Dimension key.")),
+ (
+ "value_type",
+ enum_string_schema(
+ &["string", "numeric", "boolean", "timestamp"],
+ "Dimension value type.",
+ ),
+ ),
+ ("description", string_schema("Optional description.")),
+ ],
+ &["key", "value_type"],
+ ),
+ "skill.show" => object_schema(&[("name", string_schema("Bundled skill name."))], &[]),
+ _ => empty_object_schema(),
+ };
+ with_common_presentation(schema)
}
-fn metric_spec_schema() -> Value {
+fn empty_object_schema() -> Value {
json!({
"type": "object",
- "properties": {
- "key": { "type": "string" },
- "unit": metric_unit_schema(),
- "objective": optimization_objective_schema()
- },
- "required": ["key", "unit", "objective"],
- "additionalProperties": false
+ "properties": {},
+ "additionalProperties": false,
})
}
-fn metric_value_schema() -> Value {
+fn object_schema(properties: &[(&str, Value)], required: &[&str]) -> Value {
+ let mut map = serde_json::Map::new();
+ for (key, value) in properties {
+ let _ = map.insert((*key).to_owned(), value.clone());
+ }
json!({
"type": "object",
- "properties": {
- "key": { "type": "string" },
- "value": { "type": "number" }
- },
- "required": ["key", "value"],
- "additionalProperties": false
+ "properties": Value::Object(map),
+ "required": required,
+ "additionalProperties": false,
})
}
-fn annotation_schema() -> Value {
- json!({
- "type": "object",
- "properties": {
- "body": { "type": "string" },
- "label": { "type": "string" },
- "visible": { "type": "boolean" }
- },
- "required": ["body"],
- "additionalProperties": false
- })
+fn string_schema(description: &str) -> Value {
+ json!({ "type": "string", "description": description })
}
-fn analysis_schema() -> Value {
+fn nullable_string_schema(description: &str) -> Value {
json!({
- "type": "object",
- "properties": {
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" }
- },
- "required": ["title", "summary", "body"],
- "additionalProperties": false
+ "description": description,
+ "oneOf": [
+ { "type": "string" },
+ { "type": "null" }
+ ]
})
}
-fn tag_name_schema() -> Value {
- json!({
- "type": "string",
- "pattern": "^[a-z0-9]+(?:[-_/][a-z0-9]+)*$"
- })
+fn integer_schema(description: &str) -> Value {
+ json!({ "type": "integer", "minimum": 0, "description": description })
}
-fn node_class_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["contract", "hypothesis", "run", "analysis", "decision", "source", "note"]
- })
+fn boolean_schema(description: &str) -> Value {
+ json!({ "type": "boolean", "description": description })
}
-fn metric_unit_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["seconds", "bytes", "count", "ratio", "custom"]
- })
+fn enum_string_schema(values: &[&str], description: &str) -> Value {
+ json!({ "type": "string", "enum": values, "description": description })
}
-fn metric_source_schema() -> Value {
+fn string_array_schema(description: &str) -> Value {
json!({
- "type": "string",
- "enum": [
- "run_metric",
- "hypothesis_payload",
- "run_payload",
- "analysis_payload",
- "decision_payload"
- ]
+ "type": "array",
+ "items": { "type": "string" },
+ "description": description
})
}
-fn metric_order_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["asc", "desc"]
- })
+fn selector_schema(description: &str) -> Value {
+ string_schema(description)
}
-fn field_value_type_schema() -> Value {
+fn vertex_selector_schema() -> Value {
json!({
- "type": "string",
- "enum": ["string", "numeric", "boolean", "timestamp"]
+ "type": "object",
+ "properties": {
+ "kind": { "type": "string", "enum": ["hypothesis", "experiment"] },
+ "selector": { "type": "string" }
+ },
+ "required": ["kind", "selector"],
+ "additionalProperties": false
})
}
-fn diagnostic_severity_schema() -> Value {
+fn attachment_selector_schema() -> Value {
json!({
- "type": "string",
- "enum": ["error", "warning", "info"]
+ "type": "object",
+ "properties": {
+ "kind": { "type": "string", "enum": ["frontier", "hypothesis", "experiment"] },
+ "selector": { "type": "string" }
+ },
+ "required": ["kind", "selector"],
+ "additionalProperties": false
})
}
-fn field_presence_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["required", "recommended", "optional"]
- })
+fn vertex_selector_array_schema() -> Value {
+ json!({ "type": "array", "items": vertex_selector_schema() })
+}
+
+fn attachment_selector_array_schema() -> Value {
+ json!({ "type": "array", "items": attachment_selector_schema() })
}
-fn field_role_schema() -> Value {
+fn roadmap_schema() -> Value {
json!({
- "type": "string",
- "enum": ["index", "projection_gate", "render_only", "opaque"]
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "rank": { "type": "integer", "minimum": 0 },
+ "hypothesis": { "type": "string" },
+ "summary": { "type": "string" }
+ },
+ "required": ["rank", "hypothesis"],
+ "additionalProperties": false
+ }
})
}
-fn inference_policy_schema() -> Value {
+fn command_schema() -> Value {
json!({
- "type": "string",
- "enum": ["manual_only", "model_may_infer"]
+ "type": "object",
+ "properties": {
+ "working_directory": { "type": "string" },
+ "argv": { "type": "array", "items": { "type": "string" } },
+ "env": {
+ "type": "object",
+ "additionalProperties": { "type": "string" }
+ }
+ },
+ "required": ["argv"],
+ "additionalProperties": false
})
}
-fn optimization_objective_schema() -> Value {
+fn metric_value_schema() -> Value {
json!({
- "type": "string",
- "enum": ["minimize", "maximize", "target"]
+ "type": "object",
+ "properties": {
+ "key": { "type": "string" },
+ "value": { "type": "number" }
+ },
+ "required": ["key", "value"],
+ "additionalProperties": false
})
}
-fn verdict_schema() -> Value {
+fn metric_value_array_schema() -> Value {
+ json!({ "type": "array", "items": metric_value_schema() })
+}
+
+fn run_dimensions_schema() -> Value {
json!({
- "type": "string",
- "enum": [
- "accepted",
- "kept",
- "parked",
- "rejected"
- ]
+ "type": "object",
+ "additionalProperties": true,
+ "description": "Exact run-dimension filter or outcome dimension map. Values may be strings, numbers, booleans, or RFC3339 timestamps."
})
}
-fn run_schema() -> Value {
+fn experiment_analysis_schema() -> Value {
json!({
"type": "object",
"properties": {
- "title": { "type": "string" },
"summary": { "type": "string" },
- "backend": {
- "type": "string",
- "enum": ["local_process", "worktree_process", "ssh_process"]
- },
- "dimensions": { "type": "object" },
- "command": {
- "type": "object",
- "properties": {
- "working_directory": { "type": "string" },
- "argv": { "type": "array", "items": { "type": "string" } },
- "env": {
- "type": "object",
- "additionalProperties": { "type": "string" }
- }
- },
- "required": ["argv"],
- "additionalProperties": false
- }
+ "body": { "type": "string" }
},
- "required": ["title", "backend", "dimensions", "command"],
+ "required": ["summary", "body"],
"additionalProperties": false
})
}
-fn note_schema() -> Value {
+fn experiment_outcome_schema() -> Value {
json!({
"type": "object",
"properties": {
- "summary": { "type": "string" },
- "next_hypotheses": { "type": "array", "items": { "type": "string" } }
+ "backend": { "type": "string", "enum": ["manual", "local_process", "worktree_process", "ssh_process"] },
+ "command": command_schema(),
+ "dimensions": run_dimensions_schema(),
+ "primary_metric": metric_value_schema(),
+ "supporting_metrics": metric_value_array_schema(),
+ "verdict": { "type": "string", "enum": ["accepted", "kept", "parked", "rejected"] },
+ "rationale": { "type": "string" },
+ "analysis": experiment_analysis_schema()
},
- "required": ["summary"],
+ "required": ["backend", "command", "dimensions", "primary_metric", "verdict", "rationale"],
"additionalProperties": false
})
}
diff --git a/crates/fidget-spinner-cli/src/mcp/host/runtime.rs b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
index d57a21e..bf0484a 100644
--- a/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
+++ b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
@@ -230,7 +230,7 @@ impl HostRuntime {
"name": SERVER_NAME,
"version": env!("CARGO_PKG_VERSION")
},
- "instructions": "The DAG is canonical truth. Frontier state is derived. Bind the session with project.bind before project-local DAG operations when the MCP is running unbound."
+ "instructions": "Bind the session with project.bind before project-local work when the MCP is unbound. Use frontier.open as the only overview surface, then walk hypotheses and experiments deliberately by selector. Artifacts are references only; Spinner does not read artifact bodies."
}))),
"notifications/initialized" => {
if !self.seed_captured() {
@@ -598,8 +598,11 @@ struct ProjectBindStatus {
project_root: String,
state_root: String,
display_name: fidget_spinner_core::NonEmptyText,
- schema: fidget_spinner_core::PayloadSchemaRef,
- git_repo_detected: bool,
+ frontier_count: u64,
+ hypothesis_count: u64,
+ experiment_count: u64,
+ open_experiment_count: u64,
+ artifact_count: u64,
}
struct ResolvedProjectBinding {
@@ -611,6 +614,7 @@ fn resolve_project_binding(
requested_path: PathBuf,
) -> Result<ResolvedProjectBinding, fidget_spinner_store_sqlite::StoreError> {
let store = crate::open_or_init_store_for_binding(&requested_path)?;
+ let project_status = store.status()?;
Ok(ResolvedProjectBinding {
binding: ProjectBinding {
requested_path: requested_path.clone(),
@@ -621,12 +625,11 @@ fn resolve_project_binding(
project_root: store.project_root().to_string(),
state_root: store.state_root().to_string(),
display_name: store.config().display_name.clone(),
- schema: store.schema().schema_ref(),
- git_repo_detected: crate::run_git(
- store.project_root(),
- &["rev-parse", "--show-toplevel"],
- )?
- .is_some(),
+ frontier_count: project_status.frontier_count,
+ hypothesis_count: project_status.hypothesis_count,
+ experiment_count: project_status.experiment_count,
+ open_experiment_count: project_status.open_experiment_count,
+ artifact_count: project_status.artifact_count,
},
})
}
@@ -728,17 +731,20 @@ fn project_bind_output(status: &ProjectBindStatus) -> Result<ToolOutput, FaultRe
let _ = concise.insert("project_root".to_owned(), json!(status.project_root));
let _ = concise.insert("state_root".to_owned(), json!(status.state_root));
let _ = concise.insert("display_name".to_owned(), json!(status.display_name));
+ let _ = concise.insert("frontier_count".to_owned(), json!(status.frontier_count));
let _ = concise.insert(
- "schema".to_owned(),
- json!(format!(
- "{}@{}",
- status.schema.namespace, status.schema.version
- )),
+ "hypothesis_count".to_owned(),
+ json!(status.hypothesis_count),
);
let _ = concise.insert(
- "git_repo_detected".to_owned(),
- json!(status.git_repo_detected),
+ "experiment_count".to_owned(),
+ json!(status.experiment_count),
);
+ let _ = concise.insert(
+ "open_experiment_count".to_owned(),
+ json!(status.open_experiment_count),
+ );
+ let _ = concise.insert("artifact_count".to_owned(), json!(status.artifact_count));
if status.requested_path != status.project_root {
let _ = concise.insert("requested_path".to_owned(), json!(status.requested_path));
}
@@ -749,18 +755,13 @@ fn project_bind_output(status: &ProjectBindStatus) -> Result<ToolOutput, FaultRe
format!("bound project {}", status.display_name),
format!("root: {}", status.project_root),
format!("state: {}", status.state_root),
+ format!("frontiers: {}", status.frontier_count),
+ format!("hypotheses: {}", status.hypothesis_count),
format!(
- "schema: {}@{}",
- status.schema.namespace, status.schema.version
- ),
- format!(
- "git: {}",
- if status.git_repo_detected {
- "detected"
- } else {
- "not detected"
- }
+ "experiments: {} total, {} open",
+ status.experiment_count, status.open_experiment_count
),
+ format!("artifacts: {}", status.artifact_count),
]
.join("\n"),
None,
diff --git a/crates/fidget-spinner-cli/src/mcp/service.rs b/crates/fidget-spinner-cli/src/mcp/service.rs
index f0cca1e..adc29f9 100644
--- a/crates/fidget-spinner-cli/src/mcp/service.rs
+++ b/crates/fidget-spinner-cli/src/mcp/service.rs
@@ -1,20 +1,22 @@
use std::collections::{BTreeMap, BTreeSet};
+use std::fmt::Write as _;
use std::fs;
use camino::{Utf8Path, Utf8PathBuf};
use fidget_spinner_core::{
- AdmissionState, AnnotationVisibility, CommandRecipe, DiagnosticSeverity, ExecutionBackend,
- FieldPresence, FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierProjection,
- FrontierRecord, FrontierVerdict, InferencePolicy, MetricSpec, MetricUnit, MetricValue,
- NodeAnnotation, NodeClass, NodePayload, NonEmptyText, ProjectFieldSpec, ProjectSchema,
- RunDimensionValue, TagName, TagRecord,
+ ArtifactKind, CommandRecipe, ExecutionBackend, ExperimentAnalysis, ExperimentStatus,
+ FieldValueType, FrontierVerdict, MetricUnit, MetricVisibility, NonEmptyText,
+ OptimizationObjective, RunDimensionValue, Slug, TagName,
};
use fidget_spinner_store_sqlite::{
- CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest,
- DefineRunDimensionRequest, EdgeAttachment, EdgeAttachmentDirection, ExperimentAnalysisDraft,
- ExperimentReceipt, ListNodesQuery, MetricBestQuery, MetricFieldSource, MetricKeyQuery,
- MetricKeySummary, MetricRankOrder, NodeSummary, OpenExperimentRequest, OpenExperimentSummary,
- ProjectStore, RemoveSchemaFieldRequest, StoreError, UpsertSchemaFieldRequest,
+ AttachmentSelector, CloseExperimentRequest, CreateArtifactRequest, CreateFrontierRequest,
+ CreateHypothesisRequest, DefineMetricRequest, DefineRunDimensionRequest, EntityHistoryEntry,
+ ExperimentOutcomePatch, FrontierOpenProjection, FrontierRoadmapItemDraft, FrontierSummary,
+ ListArtifactsQuery, ListExperimentsQuery, ListHypothesesQuery, MetricBestEntry,
+ MetricBestQuery, MetricKeySummary, MetricKeysQuery, MetricRankOrder, MetricScope,
+ OpenExperimentRequest, ProjectStatus, ProjectStore, StoreError, TextPatch,
+ UpdateArtifactRequest, UpdateExperimentRequest, UpdateFrontierBriefRequest,
+ UpdateHypothesisRequest, VertexSelector,
};
use serde::Deserialize;
use serde_json::{Map, Value, json};
@@ -42,10 +44,9 @@ impl WorkerService {
WorkerOperation::ReadResource { uri } => format!("resources/read:{uri}"),
};
Self::maybe_inject_transient(&operation_key)?;
-
match operation {
WorkerOperation::CallTool { name, arguments } => self.call_tool(&name, arguments),
- WorkerOperation::ReadResource { uri } => self.read_resource(&uri),
+ WorkerOperation::ReadResource { uri } => Self::read_resource(&uri),
}
}
@@ -53,796 +54,449 @@ impl WorkerService {
let operation = format!("tools/call:{name}");
let (presentation, arguments) =
split_presentation(arguments, &operation, FaultStage::Worker)?;
- match name {
- "project.status" => {
- let status = json!({
- "project_root": self.store.project_root(),
- "state_root": self.store.state_root(),
- "display_name": self.store.config().display_name,
- "schema": self.store.schema().schema_ref(),
- "git_repo_detected": crate::run_git(self.store.project_root(), &["rev-parse", "--show-toplevel"])
- .map_err(store_fault("tools/call:project.status"))?
- .is_some(),
- });
- tool_success(
- project_status_output(&status, self.store.schema()),
- presentation,
- FaultStage::Worker,
- "tools/call:project.status",
- )
+ macro_rules! lift {
+ ($expr:expr) => {
+ with_fault($expr, &operation)?
+ };
+ }
+ let output = match name {
+ "project.status" => project_status_output(&lift!(self.store.status()), &operation)?,
+ "tag.add" => {
+ let args = deserialize::<TagAddArgs>(arguments)?;
+ let tag = lift!(self.store.register_tag(
+ TagName::new(args.name).map_err(store_fault(&operation))?,
+ NonEmptyText::new(args.description).map_err(store_fault(&operation))?,
+ ));
+ tool_output(&tag, FaultStage::Worker, &operation)?
}
- "project.schema" => tool_success(
- project_schema_output(self.store.schema())?,
- presentation,
- FaultStage::Worker,
- "tools/call:project.schema",
- ),
- "schema.field.upsert" => {
- let args = deserialize::<SchemaFieldUpsertToolArgs>(arguments)?;
- let field = self
- .store
- .upsert_schema_field(UpsertSchemaFieldRequest {
- name: NonEmptyText::new(args.name)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- node_classes: args
- .node_classes
- .unwrap_or_default()
- .into_iter()
- .map(|class| {
- parse_node_class_name(&class)
- .map_err(store_fault("tools/call:schema.field.upsert"))
- })
- .collect::<Result<_, _>>()?,
- presence: parse_field_presence_name(&args.presence)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- severity: parse_diagnostic_severity_name(&args.severity)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- role: parse_field_role_name(&args.role)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- inference_policy: parse_inference_policy_name(&args.inference_policy)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- value_type: args
- .value_type
- .as_deref()
- .map(parse_field_value_type_name)
+ "tag.list" => tag_list_output(&lift!(self.store.list_tags()), &operation)?,
+ "frontier.create" => {
+ let args = deserialize::<FrontierCreateArgs>(arguments)?;
+ let frontier = lift!(
+ self.store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new(args.label).map_err(store_fault(&operation))?,
+ objective: NonEmptyText::new(args.objective)
+ .map_err(store_fault(&operation))?,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
+ .map_err(store_fault(&operation))?,
})
- .map_err(store_fault("tools/call:schema.field.upsert"))?;
- tool_success(
- schema_field_upsert_output(self.store.schema(), &field)?,
- presentation,
- FaultStage::Worker,
- "tools/call:schema.field.upsert",
- )
+ );
+ frontier_record_output(&frontier, &operation)?
}
- "schema.field.remove" => {
- let args = deserialize::<SchemaFieldRemoveToolArgs>(arguments)?;
- let removed_count = self
- .store
- .remove_schema_field(RemoveSchemaFieldRequest {
- name: NonEmptyText::new(args.name)
- .map_err(store_fault("tools/call:schema.field.remove"))?,
- node_classes: args
- .node_classes
- .map(|node_classes| {
- node_classes
- .into_iter()
- .map(|class| {
- parse_node_class_name(&class)
- .map_err(store_fault("tools/call:schema.field.remove"))
- })
- .collect::<Result<_, _>>()
- })
- .transpose()?,
- })
- .map_err(store_fault("tools/call:schema.field.remove"))?;
- tool_success(
- schema_field_remove_output(self.store.schema(), removed_count)?,
- presentation,
- FaultStage::Worker,
- "tools/call:schema.field.remove",
- )
+ "frontier.list" => {
+ frontier_list_output(&lift!(self.store.list_frontiers()), &operation)?
}
- "tag.add" => {
- let args = deserialize::<TagAddToolArgs>(arguments)?;
- let tag = self
- .store
- .add_tag(
- TagName::new(args.name).map_err(store_fault("tools/call:tag.add"))?,
- NonEmptyText::new(args.description)
- .map_err(store_fault("tools/call:tag.add"))?,
- )
- .map_err(store_fault("tools/call:tag.add"))?;
- tool_success(
- tag_add_output(&tag)?,
- presentation,
- FaultStage::Worker,
- "tools/call:tag.add",
- )
+ "frontier.read" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ frontier_record_output(
+ &lift!(self.store.read_frontier(&args.frontier)),
+ &operation,
+ )?
}
- "tag.list" => {
- let tags = self
- .store
- .list_tags()
- .map_err(store_fault("tools/call:tag.list"))?;
- tool_success(
- tag_list_output(tags.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:tag.list",
- )
+ "frontier.open" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ frontier_open_output(&lift!(self.store.frontier_open(&args.frontier)), &operation)?
}
- "frontier.list" => {
- let frontiers = self
- .store
- .list_frontiers()
- .map_err(store_fault("tools/call:frontier.list"))?;
- tool_success(
- frontier_list_output(frontiers.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.list",
- )
+ "frontier.brief.update" => {
+ let args = deserialize::<FrontierBriefUpdateArgs>(arguments)?;
+ let frontier = lift!(
+ self.store
+ .update_frontier_brief(UpdateFrontierBriefRequest {
+ frontier: args.frontier,
+ expected_revision: args.expected_revision,
+ situation: nullable_text_patch_from_wire(args.situation, &operation)?,
+ roadmap: args
+ .roadmap
+ .map(|items| {
+ items
+ .into_iter()
+ .map(|item| {
+ Ok(FrontierRoadmapItemDraft {
+ rank: item.rank,
+ hypothesis: item.hypothesis,
+ summary: item
+ .summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ })
+ .collect::<Result<Vec<_>, FaultRecord>>()
+ })
+ .transpose()?,
+ unknowns: args
+ .unknowns
+ .map(|items| {
+ items
+ .into_iter()
+ .map(NonEmptyText::new)
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(store_fault(&operation))
+ })
+ .transpose()?,
+ })
+ );
+ frontier_record_output(&frontier, &operation)?
}
- "frontier.status" => {
- let args = deserialize::<FrontierStatusToolArgs>(arguments)?;
- let projection = self
- .store
- .frontier_projection(
- crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:frontier.status"))?,
- )
- .map_err(store_fault("tools/call:frontier.status"))?;
- tool_success(
- frontier_status_output(&projection)?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.status",
- )
+ "frontier.history" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.frontier_history(&args.frontier)),
+ &operation,
+ )?
}
- "frontier.init" => {
- let args = deserialize::<FrontierInitToolArgs>(arguments)?;
- let projection = self
- .store
- .create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new(args.label)
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract_title: NonEmptyText::new(args.contract_title)
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract_summary: args
- .contract_summary
- .map(NonEmptyText::new)
+ "hypothesis.record" => {
+ let args = deserialize::<HypothesisRecordArgs>(arguments)?;
+ let hypothesis = lift!(
+ self.store.create_hypothesis(CreateHypothesisRequest {
+ frontier: args.frontier,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract: FrontierContract {
- objective: NonEmptyText::new(args.objective)
- .map_err(store_fault("tools/call:frontier.init"))?,
- evaluation: fidget_spinner_core::EvaluationProtocol {
- benchmark_suites: crate::to_text_set(args.benchmark_suites)
- .map_err(store_fault("tools/call:frontier.init"))?,
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new(args.primary_metric.key)
- .map_err(store_fault("tools/call:frontier.init"))?,
- unit: parse_metric_unit_name(&args.primary_metric.unit)
- .map_err(store_fault("tools/call:frontier.init"))?,
- objective: crate::parse_optimization_objective(
- &args.primary_metric.objective,
- )
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
- supporting_metrics: args
- .supporting_metrics
- .into_iter()
- .map(metric_spec_from_wire)
- .collect::<Result<_, _>>()
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
- promotion_criteria: crate::to_text_vec(args.promotion_criteria)
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
+ .map_err(store_fault(&operation))?,
+ title: NonEmptyText::new(args.title).map_err(store_fault(&operation))?,
+ summary: NonEmptyText::new(args.summary)
+ .map_err(store_fault(&operation))?,
+ body: NonEmptyText::new(args.body).map_err(store_fault(&operation))?,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ parents: args.parents.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:frontier.init"))?;
- tool_success(
- frontier_created_output(&projection)?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.init",
- )
+ );
+ hypothesis_record_output(&hypothesis, &operation)?
+ }
+ "hypothesis.list" => {
+ let args = deserialize::<HypothesisListArgs>(arguments)?;
+ let hypotheses = lift!(
+ self.store.list_hypotheses(ListHypothesesQuery {
+ frontier: args.frontier,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ include_archived: args.include_archived.unwrap_or(false),
+ limit: args.limit,
+ })
+ );
+ hypothesis_list_output(&hypotheses, &operation)?
}
- "node.create" => {
- let args = deserialize::<NodeCreateToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: parse_node_class_name(&args.class)
- .map_err(store_fault("tools/call:node.create"))?,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "hypothesis.read" => {
+ let args = deserialize::<HypothesisSelectorArgs>(arguments)?;
+ hypothesis_detail_output(
+ &lift!(self.store.read_hypothesis(&args.hypothesis)),
+ &operation,
+ )?
+ }
+ "hypothesis.update" => {
+ let args = deserialize::<HypothesisUpdateArgs>(arguments)?;
+ let hypothesis = lift!(
+ self.store.update_hypothesis(UpdateHypothesisRequest {
+ hypothesis: args.hypothesis,
+ expected_revision: args.expected_revision,
+ title: args
+ .title
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
summary: args
.summary
.map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
+ body: args
+ .body
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
tags: args
.tags
- .map(parse_tag_set)
+ .map(tags_to_set)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- args.payload.unwrap_or_default(),
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:node.create"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
+ parents: args.parents,
+ archived: args.archived,
})
- .map_err(store_fault("tools/call:node.create"))?;
- tool_success(
- created_node_output("created node", &node, "tools/call:node.create")?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.create",
- )
+ );
+ hypothesis_record_output(&hypothesis, &operation)?
}
- "hypothesis.record" => {
- let args = deserialize::<HypothesisRecordToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(
- crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- tags: None,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- })
- .map_err(store_fault("tools/call:hypothesis.record"))?;
- tool_success(
- created_node_output(
- "recorded hypothesis",
- &node,
- "tools/call:hypothesis.record",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:hypothesis.record",
- )
+ "hypothesis.history" => {
+ let args = deserialize::<HypothesisSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.hypothesis_history(&args.hypothesis)),
+ &operation,
+ )?
}
- "node.list" => {
- let args = deserialize::<NodeListToolArgs>(arguments)?;
- let nodes = self
- .store
- .list_nodes(ListNodesQuery {
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "experiment.open" => {
+ let args = deserialize::<ExperimentOpenArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.open_experiment(OpenExperimentRequest {
+ hypothesis: args.hypothesis,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:node.list"))?,
- class: args
- .class
- .as_deref()
- .map(parse_node_class_name)
+ .map_err(store_fault(&operation))?,
+ title: NonEmptyText::new(args.title).map_err(store_fault(&operation))?,
+ summary: args
+ .summary
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.list"))?,
- tags: parse_tag_set(args.tags)
- .map_err(store_fault("tools/call:node.list"))?,
- include_archived: args.include_archived,
- limit: args.limit.unwrap_or(20),
+ .map_err(store_fault(&operation))?,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ parents: args.parents.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:node.list"))?;
- tool_success(
- node_list_output(nodes.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.list",
- )
- }
- "node.read" => {
- let args = deserialize::<NodeReadToolArgs>(arguments)?;
- let node_id = crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.read"))?;
- let node = self
- .store
- .get_node(node_id)
- .map_err(store_fault("tools/call:node.read"))?
- .ok_or_else(|| {
- FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Store,
- "tools/call:node.read",
- format!("node {node_id} was not found"),
- )
- })?;
- tool_success(
- node_read_output(&node)?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.read",
- )
- }
- "node.annotate" => {
- let args = deserialize::<NodeAnnotateToolArgs>(arguments)?;
- let annotation = NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if args.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: args
- .label
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:node.annotate"))?,
- body: NonEmptyText::new(args.body)
- .map_err(store_fault("tools/call:node.annotate"))?,
- created_at: time::OffsetDateTime::now_utc(),
- };
- self.store
- .annotate_node(
- crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.annotate"))?,
- annotation,
- )
- .map_err(store_fault("tools/call:node.annotate"))?;
- tool_success(
- tool_output(
- &json!({"annotated": args.node_id}),
- FaultStage::Worker,
- "tools/call:node.annotate",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.annotate",
- )
- }
- "node.archive" => {
- let args = deserialize::<NodeArchiveToolArgs>(arguments)?;
- self.store
- .archive_node(
- crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.archive"))?,
- )
- .map_err(store_fault("tools/call:node.archive"))?;
- tool_success(
- tool_output(
- &json!({"archived": args.node_id}),
- FaultStage::Worker,
- "tools/call:node.archive",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.archive",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "note.quick" => {
- let args = deserialize::<QuickNoteToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:note.quick"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:note.quick"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- tags: Some(
- parse_tag_set(args.tags)
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:note.quick"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:note.quick"))?,
+ "experiment.list" => {
+ let args = deserialize::<ExperimentListArgs>(arguments)?;
+ let experiments = lift!(
+ self.store.list_experiments(ListExperimentsQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ include_archived: args.include_archived.unwrap_or(false),
+ status: args.status,
+ limit: args.limit,
})
- .map_err(store_fault("tools/call:note.quick"))?;
- tool_success(
- created_node_output("recorded note", &node, "tools/call:note.quick")?,
- presentation,
- FaultStage::Worker,
- "tools/call:note.quick",
- )
+ );
+ experiment_list_output(&experiments, &operation)?
+ }
+ "experiment.read" => {
+ let args = deserialize::<ExperimentSelectorArgs>(arguments)?;
+ experiment_detail_output(
+ &lift!(self.store.read_experiment(&args.experiment)),
+ &operation,
+ )?
}
- "source.record" => {
- let args = deserialize::<SourceRecordToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "experiment.update" => {
+ let args = deserialize::<ExperimentUpdateArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.update_experiment(UpdateExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ title: args
+ .title
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:source.record"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:source.record"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:source.record"))?,
- ),
+ .map_err(store_fault(&operation))?,
+ summary: nullable_text_patch_from_wire(args.summary, &operation)?,
tags: args
.tags
- .map(parse_tag_set)
+ .map(tags_to_set)
.transpose()
- .map_err(store_fault("tools/call:source.record"))?,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:source.record"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:source.record"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:source.record"))?,
+ .map_err(store_fault(&operation))?,
+ parents: args.parents,
+ archived: args.archived,
+ outcome: args
+ .outcome
+ .map(|wire| experiment_outcome_patch_from_wire(wire, &operation))
+ .transpose()?,
})
- .map_err(store_fault("tools/call:source.record"))?;
- tool_success(
- created_node_output("recorded source", &node, "tools/call:source.record")?,
- presentation,
- FaultStage::Worker,
- "tools/call:source.record",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "metric.define" => {
- let args = deserialize::<MetricDefineToolArgs>(arguments)?;
- let metric = self
- .store
- .define_metric(DefineMetricRequest {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:metric.define"))?,
- unit: parse_metric_unit_name(&args.unit)
- .map_err(store_fault("tools/call:metric.define"))?,
- objective: crate::parse_optimization_objective(&args.objective)
- .map_err(store_fault("tools/call:metric.define"))?,
- description: args
- .description
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:metric.define"))?,
+ "experiment.close" => {
+ let args = deserialize::<ExperimentCloseArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.close_experiment(CloseExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ backend: args.backend,
+ command: args.command,
+ dimensions: dimension_map_from_wire(args.dimensions)?,
+ primary_metric: metric_value_from_wire(args.primary_metric, &operation)?,
+ supporting_metrics: args
+ .supporting_metrics
+ .unwrap_or_default()
+ .into_iter()
+ .map(|metric| metric_value_from_wire(metric, &operation))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: args.verdict,
+ rationale: NonEmptyText::new(args.rationale)
+ .map_err(store_fault(&operation))?,
+ analysis: args
+ .analysis
+ .map(|analysis| experiment_analysis_from_wire(analysis, &operation))
+ .transpose()?,
})
- .map_err(store_fault("tools/call:metric.define"))?;
- tool_success(
- json_created_output(
- "registered metric",
- json!({
- "key": metric.key,
- "unit": metric_unit_name(metric.unit),
- "objective": metric_objective_name(metric.objective),
- "description": metric.description,
- }),
- "tools/call:metric.define",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.define",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "run.dimension.define" => {
- let args = deserialize::<RunDimensionDefineToolArgs>(arguments)?;
- let dimension = self
- .store
- .define_run_dimension(DefineRunDimensionRequest {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:run.dimension.define"))?,
- value_type: parse_field_value_type_name(&args.value_type)
- .map_err(store_fault("tools/call:run.dimension.define"))?,
- description: args
- .description
+ "experiment.history" => {
+ let args = deserialize::<ExperimentSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.experiment_history(&args.experiment)),
+ &operation,
+ )?
+ }
+ "artifact.record" => {
+ let args = deserialize::<ArtifactRecordArgs>(arguments)?;
+ let artifact = lift!(
+ self.store.create_artifact(CreateArtifactRequest {
+ slug: args
+ .slug
+ .map(Slug::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ kind: args.kind,
+ label: NonEmptyText::new(args.label).map_err(store_fault(&operation))?,
+ summary: args
+ .summary
.map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:run.dimension.define"))?,
+ .map_err(store_fault(&operation))?,
+ locator: NonEmptyText::new(args.locator)
+ .map_err(store_fault(&operation))?,
+ media_type: args
+ .media_type
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ attachments: args.attachments.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:run.dimension.define"))?;
- tool_success(
- json_created_output(
- "registered run dimension",
- json!({
- "key": dimension.key,
- "value_type": dimension.value_type.as_str(),
- "description": dimension.description,
- }),
- "tools/call:run.dimension.define",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:run.dimension.define",
- )
+ );
+ artifact_record_output(&artifact, &operation)?
}
- "run.dimension.list" => {
- let items = self
- .store
- .list_run_dimensions()
- .map_err(store_fault("tools/call:run.dimension.list"))?;
- tool_success(
- run_dimension_list_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:run.dimension.list",
- )
+ "artifact.list" => {
+ let args = deserialize::<ArtifactListArgs>(arguments)?;
+ let artifacts = lift!(self.store.list_artifacts(ListArtifactsQuery {
+ frontier: args.frontier,
+ kind: args.kind,
+ attached_to: args.attached_to,
+ limit: args.limit,
+ }));
+ artifact_list_output(&artifacts, &operation)?
}
- "metric.keys" => {
- let args = deserialize::<MetricKeysToolArgs>(arguments)?;
- let keys = self
- .store
- .list_metric_keys_filtered(MetricKeyQuery {
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:metric.keys"))?,
- source: args
- .source
- .as_deref()
- .map(parse_metric_source_name)
- .transpose()
- .map_err(store_fault("tools/call:metric.keys"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.dimensions.unwrap_or_default(),
- "tools/call:metric.keys",
- )?,
- })
- .map_err(store_fault("tools/call:metric.keys"))?;
- tool_success(
- metric_keys_output(keys.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.keys",
- )
+ "artifact.read" => {
+ let args = deserialize::<ArtifactSelectorArgs>(arguments)?;
+ artifact_detail_output(
+ &lift!(self.store.read_artifact(&args.artifact)),
+ &operation,
+ )?
}
- "metric.best" => {
- let args = deserialize::<MetricBestToolArgs>(arguments)?;
- let items = self
- .store
- .best_metrics(MetricBestQuery {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:metric.best"))?,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- source: args
- .source
- .as_deref()
- .map(parse_metric_source_name)
+ "artifact.update" => {
+ let args = deserialize::<ArtifactUpdateArgs>(arguments)?;
+ let artifact = lift!(
+ self.store.update_artifact(UpdateArtifactRequest {
+ artifact: args.artifact,
+ expected_revision: args.expected_revision,
+ kind: args.kind,
+ label: args
+ .label
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.dimensions.unwrap_or_default(),
- "tools/call:metric.best",
- )?,
- order: args
- .order
- .as_deref()
- .map(parse_metric_order_name)
+ .map_err(store_fault(&operation))?,
+ summary: nullable_text_patch_from_wire(args.summary, &operation)?,
+ locator: args
+ .locator
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- limit: args.limit.unwrap_or(10),
+ .map_err(store_fault(&operation))?,
+ media_type: nullable_text_patch_from_wire(args.media_type, &operation)?,
+ attachments: args.attachments,
})
- .map_err(store_fault("tools/call:metric.best"))?;
- tool_success(
- metric_best_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.best",
- )
+ );
+ artifact_record_output(&artifact, &operation)?
}
- "metric.migrate" => {
- let report = self
- .store
- .migrate_metric_plane()
- .map_err(store_fault("tools/call:metric.migrate"))?;
- tool_success(
- json_created_output(
- "normalized legacy metric plane",
- json!(report),
- "tools/call:metric.migrate",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.migrate",
- )
+ "artifact.history" => {
+ let args = deserialize::<ArtifactSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.artifact_history(&args.artifact)),
+ &operation,
+ )?
}
- "experiment.open" => {
- let args = deserialize::<ExperimentOpenToolArgs>(arguments)?;
- let item = self
- .store
- .open_experiment(OpenExperimentRequest {
- frontier_id: crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:experiment.open"))?,
- hypothesis_node_id: crate::parse_node_id(&args.hypothesis_node_id)
- .map_err(store_fault("tools/call:experiment.open"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:experiment.open"))?,
- summary: args
- .summary
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:experiment.open"))?,
- })
- .map_err(store_fault("tools/call:experiment.open"))?;
- tool_success(
- experiment_open_output(
- &item,
- "tools/call:experiment.open",
- "opened experiment",
- )?,
- presentation,
+ "metric.define" => {
+ let args = deserialize::<MetricDefineArgs>(arguments)?;
+ tool_output(
+ &lift!(
+ self.store.define_metric(DefineMetricRequest {
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ unit: args.unit,
+ objective: args.objective,
+ visibility: args.visibility.unwrap_or(MetricVisibility::Canonical),
+ description: args
+ .description
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ ),
FaultStage::Worker,
- "tools/call:experiment.open",
- )
+ &operation,
+ )?
}
- "experiment.list" => {
- let args = deserialize::<ExperimentListToolArgs>(arguments)?;
- let items = self
- .store
- .list_open_experiments(
- args.frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:experiment.list"))?,
- )
- .map_err(store_fault("tools/call:experiment.list"))?;
- tool_success(
- experiment_list_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:experiment.list",
- )
+ "metric.keys" => {
+ let args = deserialize::<MetricKeysArgs>(arguments)?;
+ metric_keys_output(
+ &lift!(self.store.metric_keys(MetricKeysQuery {
+ frontier: args.frontier,
+ scope: args.scope.unwrap_or(MetricScope::Live),
+ })),
+ &operation,
+ )?
}
- "experiment.read" => {
- let args = deserialize::<ExperimentReadToolArgs>(arguments)?;
- let item = self
- .store
- .read_open_experiment(
- crate::parse_experiment_id(&args.experiment_id)
- .map_err(store_fault("tools/call:experiment.read"))?,
- )
- .map_err(store_fault("tools/call:experiment.read"))?;
- tool_success(
- experiment_open_output(&item, "tools/call:experiment.read", "open experiment")?,
- presentation,
- FaultStage::Worker,
- "tools/call:experiment.read",
- )
+ "metric.best" => {
+ let args = deserialize::<MetricBestArgs>(arguments)?;
+ metric_best_output(
+ &lift!(self.store.metric_best(MetricBestQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ dimensions: dimension_map_from_wire(args.dimensions)?,
+ include_rejected: args.include_rejected.unwrap_or(false),
+ limit: args.limit,
+ order: args.order,
+ })),
+ &operation,
+ )?
}
- "experiment.close" => {
- let args = deserialize::<ExperimentCloseToolArgs>(arguments)?;
- let receipt = self
- .store
- .close_experiment(CloseExperimentRequest {
- experiment_id: crate::parse_experiment_id(&args.experiment_id)
- .map_err(store_fault("tools/call:experiment.close"))?,
- run_title: NonEmptyText::new(args.run.title)
- .map_err(store_fault("tools/call:experiment.close"))?,
- run_summary: args
- .run
- .summary
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:experiment.close"))?,
- backend: parse_backend_name(&args.run.backend)
- .map_err(store_fault("tools/call:experiment.close"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.run.dimensions,
- "tools/call:experiment.close",
- )?,
- command: command_recipe_from_wire(
- args.run.command,
- self.store.project_root(),
- )
- .map_err(store_fault("tools/call:experiment.close"))?,
- primary_metric: metric_value_from_wire(args.primary_metric)
- .map_err(store_fault("tools/call:experiment.close"))?,
- supporting_metrics: args
- .supporting_metrics
- .into_iter()
- .map(metric_value_from_wire)
- .collect::<Result<Vec<_>, _>>()
- .map_err(store_fault("tools/call:experiment.close"))?,
- note: FrontierNote {
- summary: NonEmptyText::new(args.note.summary)
- .map_err(store_fault("tools/call:experiment.close"))?,
- next_hypotheses: crate::to_text_vec(args.note.next_hypotheses)
- .map_err(store_fault("tools/call:experiment.close"))?,
- },
- verdict: parse_verdict_name(&args.verdict)
- .map_err(store_fault("tools/call:experiment.close"))?,
- analysis: args
- .analysis
- .map(experiment_analysis_from_wire)
- .transpose()
- .map_err(store_fault("tools/call:experiment.close"))?,
- decision_title: NonEmptyText::new(args.decision_title)
- .map_err(store_fault("tools/call:experiment.close"))?,
- decision_rationale: NonEmptyText::new(args.decision_rationale)
- .map_err(store_fault("tools/call:experiment.close"))?,
- })
- .map_err(store_fault("tools/call:experiment.close"))?;
- tool_success(
- experiment_close_output(&self.store, &receipt)?,
- presentation,
+ "run.dimension.define" => {
+ let args = deserialize::<DimensionDefineArgs>(arguments)?;
+ tool_output(
+ &lift!(
+ self.store.define_run_dimension(DefineRunDimensionRequest {
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ value_type: args.value_type,
+ description: args
+ .description
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ ),
FaultStage::Worker,
- "tools/call:experiment.close",
- )
+ &operation,
+ )?
}
- other => Err(FaultRecord::new(
- FaultKind::InvalidInput,
+ "run.dimension.list" => tool_output(
+ &lift!(self.store.list_run_dimensions()),
FaultStage::Worker,
- format!("tools/call:{other}"),
- format!("unknown tool `{other}`"),
- )),
- }
+ &operation,
+ )?,
+ other => {
+ return Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ &operation,
+ format!("unknown worker tool `{other}`"),
+ ));
+ }
+ };
+ tool_success(output, presentation, FaultStage::Worker, &operation)
}
- fn read_resource(&mut self, uri: &str) -> Result<Value, FaultRecord> {
- match uri {
- "fidget-spinner://project/config" => Ok(json!({
- "contents": [{
- "uri": uri,
- "mimeType": "application/json",
- "text": crate::to_pretty_json(self.store.config())
- .map_err(store_fault("resources/read:fidget-spinner://project/config"))?,
- }]
- })),
- "fidget-spinner://project/schema" => Ok(json!({
- "contents": [{
- "uri": uri,
- "mimeType": "application/json",
- "text": crate::to_pretty_json(self.store.schema())
- .map_err(store_fault("resources/read:fidget-spinner://project/schema"))?,
- }]
- })),
- _ => Err(FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Worker,
- format!("resources/read:{uri}"),
- format!("unknown resource `{uri}`"),
- )),
- }
+ fn read_resource(uri: &str) -> Result<Value, FaultRecord> {
+ Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ format!("resources/read:{uri}"),
+ format!("unknown worker resource `{uri}`"),
+ ))
}
fn maybe_inject_transient(operation: &str) -> Result<(), FaultRecord> {
@@ -877,6 +531,227 @@ impl WorkerService {
}
}
+#[derive(Debug, Deserialize)]
+struct TagAddArgs {
+ name: String,
+ description: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierCreateArgs {
+ label: String,
+ objective: String,
+ slug: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierSelectorArgs {
+ frontier: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierBriefUpdateArgs {
+ frontier: String,
+ expected_revision: Option<u64>,
+ situation: Option<NullableStringArg>,
+ roadmap: Option<Vec<FrontierRoadmapItemWire>>,
+ unknowns: Option<Vec<String>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierRoadmapItemWire {
+ rank: u32,
+ hypothesis: String,
+ summary: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisRecordArgs {
+ frontier: String,
+ title: String,
+ summary: String,
+ body: String,
+ slug: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisListArgs {
+ frontier: Option<String>,
+ tags: Option<Vec<String>>,
+ include_archived: Option<bool>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisSelectorArgs {
+ hypothesis: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisUpdateArgs {
+ hypothesis: String,
+ expected_revision: Option<u64>,
+ title: Option<String>,
+ summary: Option<String>,
+ body: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+ archived: Option<bool>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentOpenArgs {
+ hypothesis: String,
+ title: String,
+ summary: Option<String>,
+ slug: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentListArgs {
+ frontier: Option<String>,
+ hypothesis: Option<String>,
+ tags: Option<Vec<String>>,
+ include_archived: Option<bool>,
+ status: Option<ExperimentStatus>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentSelectorArgs {
+ experiment: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentUpdateArgs {
+ experiment: String,
+ expected_revision: Option<u64>,
+ title: Option<String>,
+ summary: Option<NullableStringArg>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+ archived: Option<bool>,
+ outcome: Option<ExperimentOutcomeWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentCloseArgs {
+ experiment: String,
+ expected_revision: Option<u64>,
+ backend: ExecutionBackend,
+ command: CommandRecipe,
+ dimensions: Option<Map<String, Value>>,
+ primary_metric: MetricValueWire,
+ supporting_metrics: Option<Vec<MetricValueWire>>,
+ verdict: FrontierVerdict,
+ rationale: String,
+ analysis: Option<ExperimentAnalysisWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentOutcomeWire {
+ backend: ExecutionBackend,
+ command: CommandRecipe,
+ dimensions: Option<Map<String, Value>>,
+ primary_metric: MetricValueWire,
+ supporting_metrics: Option<Vec<MetricValueWire>>,
+ verdict: FrontierVerdict,
+ rationale: String,
+ analysis: Option<ExperimentAnalysisWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentAnalysisWire {
+ summary: String,
+ body: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricValueWire {
+ key: String,
+ value: f64,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactRecordArgs {
+ kind: ArtifactKind,
+ label: String,
+ summary: Option<String>,
+ locator: String,
+ media_type: Option<String>,
+ slug: Option<String>,
+ attachments: Option<Vec<AttachmentSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactListArgs {
+ frontier: Option<String>,
+ kind: Option<ArtifactKind>,
+ attached_to: Option<AttachmentSelector>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactSelectorArgs {
+ artifact: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactUpdateArgs {
+ artifact: String,
+ expected_revision: Option<u64>,
+ kind: Option<ArtifactKind>,
+ label: Option<String>,
+ summary: Option<NullableStringArg>,
+ locator: Option<String>,
+ media_type: Option<NullableStringArg>,
+ attachments: Option<Vec<AttachmentSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(untagged)]
+enum NullableStringArg {
+ Set(String),
+ Clear(()),
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricDefineArgs {
+ key: String,
+ unit: MetricUnit,
+ objective: OptimizationObjective,
+ visibility: Option<MetricVisibility>,
+ description: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricKeysArgs {
+ frontier: Option<String>,
+ scope: Option<MetricScope>,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricBestArgs {
+ frontier: Option<String>,
+ hypothesis: Option<String>,
+ key: String,
+ dimensions: Option<Map<String, Value>>,
+ include_rejected: Option<bool>,
+ limit: Option<u32>,
+ order: Option<MetricRankOrder>,
+}
+
+#[derive(Debug, Deserialize)]
+struct DimensionDefineArgs {
+ key: String,
+ value_type: FieldValueType,
+ description: Option<String>,
+}
+
fn deserialize<T: for<'de> Deserialize<'de>>(value: Value) -> Result<T, FaultRecord> {
serde_json::from_value(value).map_err(|error| {
FaultRecord::new(
@@ -888,256 +763,302 @@ fn deserialize<T: for<'de> Deserialize<'de>>(value: Value) -> Result<T, FaultRec
})
}
-fn project_status_output(full: &Value, schema: &ProjectSchema) -> ToolOutput {
- let concise = json!({
- "display_name": full["display_name"],
- "project_root": full["project_root"],
- "state_root": full["state_root"],
- "schema": schema_label(schema),
- "git_repo_detected": full["git_repo_detected"],
- });
- let git = if full["git_repo_detected"].as_bool().unwrap_or(false) {
- "detected"
- } else {
- "not detected"
- };
- ToolOutput::from_values(
- concise,
- full.clone(),
- [
- format!("project {}", value_summary(&full["display_name"])),
- format!("root: {}", value_summary(&full["project_root"])),
- format!("state: {}", value_summary(&full["state_root"])),
- format!("schema: {}", schema_label(schema)),
- format!("git: {git}"),
- ]
- .join("\n"),
- None,
- )
+fn store_fault<E>(operation: &str) -> impl FnOnce(E) -> FaultRecord + '_
+where
+ E: Into<StoreError>,
+{
+ move |error| {
+ let error: StoreError = error.into();
+ let kind = match error {
+ StoreError::MissingProjectStore(_)
+ | StoreError::AmbiguousProjectStoreDiscovery { .. }
+ | StoreError::UnknownTag(_)
+ | StoreError::UnknownMetricDefinition(_)
+ | StoreError::UnknownRunDimension(_)
+ | StoreError::UnknownFrontierSelector(_)
+ | StoreError::UnknownHypothesisSelector(_)
+ | StoreError::UnknownExperimentSelector(_)
+ | StoreError::UnknownArtifactSelector(_)
+ | StoreError::RevisionMismatch { .. }
+ | StoreError::HypothesisBodyMustBeSingleParagraph
+ | StoreError::ExperimentHypothesisRequired
+ | StoreError::ExperimentAlreadyClosed(_)
+ | StoreError::ExperimentStillOpen(_)
+ | StoreError::CrossFrontierInfluence
+ | StoreError::SelfEdge
+ | StoreError::UnknownRoadmapHypothesis(_)
+ | StoreError::ManualExperimentRequiresCommand
+ | StoreError::MetricOrderRequired { .. }
+ | StoreError::UnknownDimensionFilter(_)
+ | StoreError::DuplicateTag(_)
+ | StoreError::DuplicateMetricDefinition(_)
+ | StoreError::DuplicateRunDimension(_)
+ | StoreError::InvalidInput(_) => FaultKind::InvalidInput,
+ StoreError::IncompatibleStoreFormatVersion { .. } => FaultKind::Unavailable,
+ StoreError::Io(_)
+ | StoreError::Sql(_)
+ | StoreError::Json(_)
+ | StoreError::TimeParse(_)
+ | StoreError::TimeFormat(_)
+ | StoreError::Core(_)
+ | StoreError::Uuid(_) => FaultKind::Internal,
+ };
+ FaultRecord::new(kind, FaultStage::Store, operation, error.to_string())
+ }
}
-fn project_schema_output(schema: &ProjectSchema) -> Result<ToolOutput, FaultRecord> {
- let field_previews = schema
- .fields
- .iter()
- .take(8)
- .map(project_schema_field_value)
- .collect::<Vec<_>>();
- let concise = json!({
- "namespace": schema.namespace,
- "version": schema.version,
- "field_count": schema.fields.len(),
- "fields": field_previews,
- "truncated": schema.fields.len() > 8,
- });
- let mut lines = vec![
- format!("schema {}", schema_label(schema)),
- format!("{} field(s)", schema.fields.len()),
- ];
- for field in schema.fields.iter().take(8) {
- lines.push(format!(
- "{} [{}] {} {}",
- field.name,
- if field.node_classes.is_empty() {
- "any".to_owned()
- } else {
- field
- .node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(",")
- },
- field.presence.as_str(),
- field.role.as_str(),
- ));
- }
- if schema.fields.len() > 8 {
- lines.push(format!("... +{} more field(s)", schema.fields.len() - 8));
+fn with_fault<T, E>(result: Result<T, E>, operation: &str) -> Result<T, FaultRecord>
+where
+ E: Into<StoreError>,
+{
+ result.map_err(store_fault(operation))
+}
+
+fn tags_to_set(tags: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
+ tags.into_iter()
+ .map(TagName::new)
+ .collect::<Result<BTreeSet<_>, _>>()
+ .map_err(StoreError::from)
+}
+
+fn metric_value_from_wire(
+ wire: MetricValueWire,
+ operation: &str,
+) -> Result<fidget_spinner_core::MetricValue, FaultRecord> {
+ Ok(fidget_spinner_core::MetricValue {
+ key: NonEmptyText::new(wire.key).map_err(store_fault(operation))?,
+ value: wire.value,
+ })
+}
+
+fn experiment_analysis_from_wire(
+ wire: ExperimentAnalysisWire,
+ operation: &str,
+) -> Result<ExperimentAnalysis, FaultRecord> {
+ Ok(ExperimentAnalysis {
+ summary: NonEmptyText::new(wire.summary).map_err(store_fault(operation))?,
+ body: NonEmptyText::new(wire.body).map_err(store_fault(operation))?,
+ })
+}
+
+fn experiment_outcome_patch_from_wire(
+ wire: ExperimentOutcomeWire,
+ operation: &str,
+) -> Result<ExperimentOutcomePatch, FaultRecord> {
+ Ok(ExperimentOutcomePatch {
+ backend: wire.backend,
+ command: wire.command,
+ dimensions: dimension_map_from_wire(wire.dimensions)?,
+ primary_metric: metric_value_from_wire(wire.primary_metric, operation)?,
+ supporting_metrics: wire
+ .supporting_metrics
+ .unwrap_or_default()
+ .into_iter()
+ .map(|metric| metric_value_from_wire(metric, operation))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: wire.verdict,
+ rationale: NonEmptyText::new(wire.rationale).map_err(store_fault(operation))?,
+ analysis: wire
+ .analysis
+ .map(|analysis| experiment_analysis_from_wire(analysis, operation))
+ .transpose()?,
+ })
+}
+
+fn nullable_text_patch_from_wire(
+ patch: Option<NullableStringArg>,
+ operation: &str,
+) -> Result<Option<TextPatch<NonEmptyText>>, FaultRecord> {
+ match patch {
+ None => Ok(None),
+ Some(NullableStringArg::Clear(())) => Ok(Some(TextPatch::Clear)),
+ Some(NullableStringArg::Set(value)) => Ok(Some(TextPatch::Set(
+ NonEmptyText::new(value).map_err(store_fault(operation))?,
+ ))),
}
- detailed_tool_output(
- &concise,
- schema,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:project.schema",
- )
}
-fn schema_field_upsert_output(
- schema: &ProjectSchema,
- field: &ProjectFieldSpec,
-) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "schema": schema.schema_ref(),
- "field": project_schema_field_value(field),
- });
- detailed_tool_output(
- &concise,
- &concise,
- format!(
- "upserted schema field {}\nschema: {}\nclasses: {}\npresence: {}\nseverity: {}\nrole: {}\ninference: {}{}",
- field.name,
- schema_label(schema),
- render_schema_node_classes(&field.node_classes),
- field.presence.as_str(),
- field.severity.as_str(),
- field.role.as_str(),
- field.inference_policy.as_str(),
- field
- .value_type
- .map(|value_type| format!("\nvalue_type: {}", value_type.as_str()))
- .unwrap_or_default(),
- ),
- None,
- FaultStage::Worker,
- "tools/call:schema.field.upsert",
- )
+fn dimension_map_from_wire(
+ dimensions: Option<Map<String, Value>>,
+) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, FaultRecord> {
+ dimensions
+ .unwrap_or_default()
+ .into_iter()
+ .map(|(key, value)| {
+ Ok((
+ NonEmptyText::new(key).map_err(store_fault("dimension-map"))?,
+ json_value_to_dimension(value)?,
+ ))
+ })
+ .collect()
+}
+
+fn json_value_to_dimension(value: Value) -> Result<RunDimensionValue, FaultRecord> {
+ match value {
+ Value::String(raw) => {
+ if time::OffsetDateTime::parse(&raw, &time::format_description::well_known::Rfc3339)
+ .is_ok()
+ {
+ NonEmptyText::new(raw)
+ .map(RunDimensionValue::Timestamp)
+ .map_err(store_fault("dimension-map"))
+ } else {
+ NonEmptyText::new(raw)
+ .map(RunDimensionValue::String)
+ .map_err(store_fault("dimension-map"))
+ }
+ }
+ Value::Number(number) => number
+ .as_f64()
+ .map(RunDimensionValue::Numeric)
+ .ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "dimension-map",
+ "numeric dimension values must fit into f64",
+ )
+ }),
+ Value::Bool(value) => Ok(RunDimensionValue::Boolean(value)),
+ _ => Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "dimension-map",
+ "dimension values must be string, number, boolean, or RFC3339 timestamp",
+ )),
+ }
}
-fn schema_field_remove_output(
- schema: &ProjectSchema,
- removed_count: u64,
+fn project_status_output(
+ status: &ProjectStatus,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
let concise = json!({
- "schema": schema.schema_ref(),
- "removed_count": removed_count,
+ "display_name": status.display_name,
+ "project_root": status.project_root,
+ "frontier_count": status.frontier_count,
+ "hypothesis_count": status.hypothesis_count,
+ "experiment_count": status.experiment_count,
+ "open_experiment_count": status.open_experiment_count,
+ "artifact_count": status.artifact_count,
});
detailed_tool_output(
&concise,
- &concise,
- format!(
- "removed {} schema field definition(s)\nschema: {}",
- removed_count,
- schema_label(schema),
- ),
+ status,
+ [
+ format!("project {}", status.display_name),
+ format!("root: {}", status.project_root),
+ format!("frontiers: {}", status.frontier_count),
+ format!("hypotheses: {}", status.hypothesis_count),
+ format!(
+ "experiments: {} (open {})",
+ status.experiment_count, status.open_experiment_count
+ ),
+ format!("artifacts: {}", status.artifact_count),
+ ]
+ .join("\n"),
None,
FaultStage::Worker,
- "tools/call:schema.field.remove",
+ operation,
)
}
-fn tag_add_output(tag: &TagRecord) -> Result<ToolOutput, FaultRecord> {
+fn tag_list_output(
+ tags: &[fidget_spinner_core::TagRecord],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
let concise = json!({
- "name": tag.name,
- "description": tag.description,
+ "count": tags.len(),
+ "tags": tags,
});
detailed_tool_output(
&concise,
- tag,
- format!("registered tag {}\n{}", tag.name, tag.description),
- None,
- FaultStage::Worker,
- "tools/call:tag.add",
- )
-}
-
-fn tag_list_output(tags: &[TagRecord]) -> Result<ToolOutput, FaultRecord> {
- let concise = tags
- .iter()
- .map(|tag| {
- json!({
- "name": tag.name,
- "description": tag.description,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} tag(s)", tags.len())];
- lines.extend(
- tags.iter()
- .map(|tag| format!("{}: {}", tag.name, tag.description)),
- );
- detailed_tool_output(
- &concise,
- &tags,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:tag.list",
- )
-}
-
-fn frontier_list_output(frontiers: &[FrontierRecord]) -> Result<ToolOutput, FaultRecord> {
- let concise = frontiers
- .iter()
- .map(|frontier| {
- json!({
- "frontier_id": frontier.id,
- "label": frontier.label,
- "status": format!("{:?}", frontier.status).to_ascii_lowercase(),
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} frontier(s)", frontiers.len())];
- lines.extend(frontiers.iter().map(|frontier| {
- format!(
- "{} {} {}",
- frontier.id,
- format!("{:?}", frontier.status).to_ascii_lowercase(),
- frontier.label,
- )
- }));
- detailed_tool_output(
&concise,
- &frontiers,
- lines.join("\n"),
+ if tags.is_empty() {
+ "no tags".to_owned()
+ } else {
+ tags.iter()
+ .map(|tag| format!("{} — {}", tag.name, tag.description))
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:frontier.list",
+ operation,
)
}
-fn frontier_status_output(projection: &FrontierProjection) -> Result<ToolOutput, FaultRecord> {
- let concise = frontier_projection_summary_value(projection);
+fn frontier_list_output(
+ frontiers: &[FrontierSummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": frontiers.len(), "frontiers": frontiers });
detailed_tool_output(
&concise,
- projection,
- frontier_projection_text("frontier", projection),
- None,
- FaultStage::Worker,
- "tools/call:frontier.status",
- )
-}
-
-fn frontier_created_output(projection: &FrontierProjection) -> Result<ToolOutput, FaultRecord> {
- let concise = frontier_projection_summary_value(projection);
- detailed_tool_output(
&concise,
- projection,
- frontier_projection_text("created frontier", projection),
+ if frontiers.is_empty() {
+ "no frontiers".to_owned()
+ } else {
+ frontiers
+ .iter()
+ .map(|frontier| {
+ format!(
+ "{} — {} | active hypotheses {} | open experiments {}",
+ frontier.slug,
+ frontier.objective,
+ frontier.active_hypothesis_count,
+ frontier.open_experiment_count
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:frontier.init",
+ operation,
)
}
-fn created_node_output(
- action: &str,
- node: &fidget_spinner_core::DagNode,
- operation: &'static str,
+fn frontier_record_output(
+ frontier: &fidget_spinner_core::FrontierRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = node_brief_value(node);
- let mut lines = vec![format!("{action}: {} {}", node.class, node.id)];
- lines.push(format!("title: {}", node.title));
- if let Some(summary) = node.summary.as_ref() {
- lines.push(format!("summary: {summary}"));
- }
- if !node.tags.is_empty() {
- lines.push(format!("tags: {}", format_tags(&node.tags)));
- }
- if let Some(frontier_id) = node.frontier_id {
- lines.push(format!("frontier: {frontier_id}"));
+ let mut lines = vec![format!(
+ "frontier {} — {}",
+ frontier.slug, frontier.objective
+ )];
+ lines.push(format!("status: {}", frontier.status.as_str()));
+ if let Some(situation) = frontier.brief.situation.as_ref() {
+ lines.push(format!("situation: {}", situation));
+ }
+ if !frontier.brief.roadmap.is_empty() {
+ lines.push("roadmap:".to_owned());
+ for item in &frontier.brief.roadmap {
+ lines.push(format!(
+ " {}. {}{}",
+ item.rank,
+ item.hypothesis_id,
+ item.summary
+ .as_ref()
+ .map_or_else(String::new, |summary| format!(" — {summary}"))
+ ));
+ }
}
- if !node.diagnostics.items.is_empty() {
+ if !frontier.brief.unknowns.is_empty() {
lines.push(format!(
- "diagnostics: {}",
- diagnostic_summary_text(&node.diagnostics)
+ "unknowns: {}",
+ frontier
+ .brief
+ .unknowns
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join("; ")
));
}
detailed_tool_output(
- &concise,
- node,
+ &frontier,
+ frontier,
lines.join("\n"),
None,
FaultStage::Worker,
@@ -1145,434 +1066,285 @@ fn created_node_output(
)
}
-fn node_list_output(nodes: &[NodeSummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = nodes.iter().map(node_summary_value).collect::<Vec<_>>();
- let mut lines = vec![format!("{} node(s)", nodes.len())];
- lines.extend(nodes.iter().map(render_node_summary_line));
- detailed_tool_output(
- &concise,
- &nodes,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:node.list",
- )
-}
-
-fn node_read_output(node: &fidget_spinner_core::DagNode) -> Result<ToolOutput, FaultRecord> {
- let visible_annotations = node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::Visible)
- .map(|annotation| {
- let mut value = Map::new();
- if let Some(label) = annotation.label.as_ref() {
- let _ = value.insert("label".to_owned(), json!(label));
- }
- let _ = value.insert("body".to_owned(), json!(annotation.body));
- Value::Object(value)
- })
- .collect::<Vec<_>>();
- let visible_annotation_count = visible_annotations.len();
- let hidden_annotation_count = node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::HiddenByDefault)
- .count();
- let mut concise = Map::new();
- let _ = concise.insert("id".to_owned(), json!(node.id));
- let _ = concise.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = concise.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = concise.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = concise.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = concise.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if !node.payload.fields.is_empty() {
- let filtered_fields =
- filtered_payload_fields(node.class, &node.payload.fields).collect::<Vec<_>>();
- if !filtered_fields.is_empty() {
- let _ = concise.insert(
- "payload_field_count".to_owned(),
- json!(filtered_fields.len()),
- );
- if is_prose_node(node.class) {
- let _ = concise.insert(
- "payload_fields".to_owned(),
- json!(
- filtered_fields
- .iter()
- .take(6)
- .map(|(name, _)| (*name).clone())
- .collect::<Vec<_>>()
- ),
- );
- } else {
- let payload_preview = payload_preview_value(node.class, &node.payload.fields);
- if let Value::Object(object) = &payload_preview
- && !object.is_empty()
- {
- let _ = concise.insert("payload_preview".to_owned(), payload_preview);
- }
- }
- }
- }
- if !node.diagnostics.items.is_empty() {
- let _ = concise.insert(
- "diagnostics".to_owned(),
- diagnostic_summary_value(&node.diagnostics),
- );
- }
- if visible_annotation_count > 0 {
- let _ = concise.insert(
- "visible_annotations".to_owned(),
- Value::Array(visible_annotations),
- );
- }
- if hidden_annotation_count > 0 {
- let _ = concise.insert(
- "hidden_annotation_count".to_owned(),
- json!(hidden_annotation_count),
- );
- }
-
- let mut lines = vec![format!("{} {} {}", node.class, node.id, node.title)];
- if let Some(summary) = node.summary.as_ref() {
- lines.push(format!("summary: {summary}"));
- }
- if let Some(frontier_id) = node.frontier_id {
- lines.push(format!("frontier: {frontier_id}"));
- }
- if !node.tags.is_empty() {
- lines.push(format!("tags: {}", format_tags(&node.tags)));
+fn frontier_open_output(
+ projection: &FrontierOpenProjection,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![format!(
+ "frontier {} — {}",
+ projection.frontier.slug, projection.frontier.objective
+ )];
+ if let Some(situation) = projection.frontier.brief.situation.as_ref() {
+ lines.push(format!("situation: {}", situation));
+ }
+ if !projection.active_tags.is_empty() {
+ lines.push(format!(
+ "active tags: {}",
+ projection
+ .active_tags
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(", ")
+ ));
}
- lines.extend(payload_preview_lines(node.class, &node.payload.fields));
- if !node.diagnostics.items.is_empty() {
+ if !projection.active_metric_keys.is_empty() {
lines.push(format!(
- "diagnostics: {}",
- diagnostic_summary_text(&node.diagnostics)
+ "live metrics: {}",
+ projection
+ .active_metric_keys
+ .iter()
+ .map(|metric| metric.key.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
));
}
- if visible_annotation_count > 0 {
- lines.push(format!("visible annotations: {}", visible_annotation_count));
- for annotation in node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::Visible)
- .take(4)
- {
- let label = annotation
- .label
+ if !projection.active_hypotheses.is_empty() {
+ lines.push("active hypotheses:".to_owned());
+ for state in &projection.active_hypotheses {
+ let status = state
+ .latest_closed_experiment
.as_ref()
- .map(|label| format!("{label}: "))
- .unwrap_or_default();
- lines.push(format!("annotation: {label}{}", annotation.body));
- }
- if visible_annotation_count > 4 {
+ .and_then(|experiment| experiment.verdict)
+ .map_or_else(
+ || "unjudged".to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
lines.push(format!(
- "... +{} more visible annotation(s)",
- visible_annotation_count - 4
+ " {} — {} | open {} | latest {}",
+ state.hypothesis.slug,
+ state.hypothesis.summary,
+ state.open_experiments.len(),
+ status
));
}
}
- if hidden_annotation_count > 0 {
- lines.push(format!("hidden annotations: {hidden_annotation_count}"));
+ if !projection.open_experiments.is_empty() {
+ lines.push("open experiments:".to_owned());
+ for experiment in &projection.open_experiments {
+ lines.push(format!(
+ " {} — {}",
+ experiment.slug,
+ experiment
+ .summary
+ .as_ref()
+ .map_or_else(|| experiment.title.to_string(), ToString::to_string)
+ ));
+ }
}
detailed_tool_output(
- &Value::Object(concise),
- node,
+ projection,
+ projection,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:node.read",
+ operation,
)
}
-fn experiment_close_output(
- store: &ProjectStore,
- receipt: &ExperimentReceipt,
+fn hypothesis_record_output(
+ hypothesis: &fidget_spinner_core::HypothesisRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "experiment_id": receipt.experiment.id,
- "frontier_id": receipt.experiment.frontier_id,
- "experiment_title": receipt.experiment.title,
- "verdict": metric_verdict_name(receipt.experiment.verdict),
- "run_id": receipt.run.run_id,
- "hypothesis_node_id": receipt.experiment.hypothesis_node_id,
- "decision_node_id": receipt.decision_node.id,
- "dimensions": run_dimensions_value(&receipt.experiment.result.dimensions),
- "primary_metric": metric_value(store, &receipt.experiment.result.primary_metric)?,
- });
detailed_tool_output(
- &concise,
- receipt,
- [
- format!(
- "closed experiment {} on frontier {}",
- receipt.experiment.id, receipt.experiment.frontier_id
- ),
- format!("title: {}", receipt.experiment.title),
- format!("hypothesis: {}", receipt.experiment.hypothesis_node_id),
- format!(
- "verdict: {}",
- metric_verdict_name(receipt.experiment.verdict)
- ),
- format!(
- "primary metric: {}",
- metric_text(store, &receipt.experiment.result.primary_metric)?
- ),
- format!(
- "dimensions: {}",
- render_dimension_kv(&receipt.experiment.result.dimensions)
- ),
- format!("run: {}", receipt.run.run_id),
- ]
- .join("\n"),
+ hypothesis,
+ hypothesis,
+ format!("hypothesis {} — {}", hypothesis.slug, hypothesis.summary),
None,
FaultStage::Worker,
- "tools/call:experiment.close",
+ operation,
)
}
-fn experiment_open_output(
- item: &OpenExperimentSummary,
- operation: &'static str,
- action: &'static str,
+fn hypothesis_list_output(
+ hypotheses: &[fidget_spinner_store_sqlite::HypothesisSummary],
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "experiment_id": item.id,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "title": item.title,
- "summary": item.summary,
- });
+ let concise = json!({ "count": hypotheses.len(), "hypotheses": hypotheses });
detailed_tool_output(
&concise,
- item,
- [
- format!("{action} {}", item.id),
- format!("frontier: {}", item.frontier_id),
- format!("hypothesis: {}", item.hypothesis_node_id),
- format!("title: {}", item.title),
- item.summary
- .as_ref()
- .map(|summary| format!("summary: {summary}"))
- .unwrap_or_else(|| "summary: <none>".to_owned()),
- ]
- .join("\n"),
+ &concise,
+ if hypotheses.is_empty() {
+ "no hypotheses".to_owned()
+ } else {
+ hypotheses
+ .iter()
+ .map(|hypothesis| {
+ let verdict = hypothesis.latest_verdict.map_or_else(
+ || "unjudged".to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
+ format!(
+ "{} — {} | open {} | latest {}",
+ hypothesis.slug,
+ hypothesis.summary,
+ hypothesis.open_experiment_count,
+ verdict
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
operation,
)
}
-fn experiment_list_output(items: &[OpenExperimentSummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .map(|item| {
- json!({
- "experiment_id": item.id,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "title": item.title,
- "summary": item.summary,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} open experiment(s)", items.len())];
- lines.extend(items.iter().map(|item| {
+fn hypothesis_detail_output(
+ detail: &fidget_spinner_store_sqlite::HypothesisDetail,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![
format!(
- "{} {} | hypothesis={}",
- item.id, item.title, item.hypothesis_node_id,
- )
- }));
+ "hypothesis {} — {}",
+ detail.record.slug, detail.record.summary
+ ),
+ detail.record.body.to_string(),
+ ];
+ if !detail.record.tags.is_empty() {
+ lines.push(format!(
+ "tags: {}",
+ detail
+ .record
+ .tags
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(", ")
+ ));
+ }
+ lines.push(format!(
+ "parents: {} | children: {} | open experiments: {} | closed experiments: {} | artifacts: {}",
+ detail.parents.len(),
+ detail.children.len(),
+ detail.open_experiments.len(),
+ detail.closed_experiments.len(),
+ detail.artifacts.len()
+ ));
detailed_tool_output(
- &concise,
- &items,
+ detail,
+ detail,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:experiment.list",
+ operation,
)
}
-fn metric_keys_output(keys: &[MetricKeySummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = keys
- .iter()
- .map(|key| {
- json!({
- "key": key.key,
- "source": key.source.as_str(),
- "experiment_count": key.experiment_count,
- "unit": key.unit.map(metric_unit_name),
- "objective": key.objective.map(metric_objective_name),
- "description": key.description,
- "requires_order": key.requires_order,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} metric key(s)", keys.len())];
- lines.extend(keys.iter().map(|key| {
- let mut line = format!(
- "{} [{}] experiments={}",
- key.key,
- key.source.as_str(),
- key.experiment_count
+fn experiment_record_output(
+ experiment: &fidget_spinner_core::ExperimentRecord,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut line = format!("experiment {} — {}", experiment.slug, experiment.title);
+ if let Some(outcome) = experiment.outcome.as_ref() {
+ let _ = write!(
+ line,
+ " | {} {}={}",
+ outcome.verdict.as_str(),
+ outcome.primary_metric.key,
+ outcome.primary_metric.value
);
- if let Some(unit) = key.unit {
- line.push_str(format!(" unit={}", metric_unit_name(unit)).as_str());
- }
- if let Some(objective) = key.objective {
- line.push_str(format!(" objective={}", metric_objective_name(objective)).as_str());
- }
- if let Some(description) = key.description.as_ref() {
- line.push_str(format!(" | {description}").as_str());
- }
- if key.requires_order {
- line.push_str(" order=required");
- }
- line
- }));
+ } else {
+ let _ = write!(line, " | open");
+ }
detailed_tool_output(
- &concise,
- &keys,
- lines.join("\n"),
+ experiment,
+ experiment,
+ line,
None,
FaultStage::Worker,
- "tools/call:metric.keys",
+ operation,
)
}
-fn metric_best_output(
- items: &[fidget_spinner_store_sqlite::MetricBestEntry],
+fn experiment_list_output(
+ experiments: &[fidget_spinner_store_sqlite::ExperimentSummary],
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .enumerate()
- .map(|(index, item)| {
- json!({
- "rank": index + 1,
- "key": item.key,
- "source": item.source.as_str(),
- "value": item.value,
- "order": item.order.as_str(),
- "experiment_id": item.experiment_id,
- "experiment_title": item.experiment_title,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "hypothesis_title": item.hypothesis_title,
- "verdict": metric_verdict_name(item.verdict),
- "run_id": item.run_id,
- "unit": item.unit.map(metric_unit_name),
- "objective": item.objective.map(metric_objective_name),
- "dimensions": run_dimensions_value(&item.dimensions),
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} ranked experiment(s)", items.len())];
- lines.extend(items.iter().enumerate().map(|(index, item)| {
- format!(
- "{}. {}={} [{}] {} | verdict={} | hypothesis={}",
- index + 1,
- item.key,
- item.value,
- item.source.as_str(),
- item.experiment_title,
- metric_verdict_name(item.verdict),
- item.hypothesis_title,
- )
- }));
- lines.extend(
- items
- .iter()
- .map(|item| format!(" dims: {}", render_dimension_kv(&item.dimensions))),
- );
+ let concise = json!({ "count": experiments.len(), "experiments": experiments });
detailed_tool_output(
&concise,
- &items,
- lines.join("\n"),
+ &concise,
+ if experiments.is_empty() {
+ "no experiments".to_owned()
+ } else {
+ experiments
+ .iter()
+ .map(|experiment| {
+ let status = experiment.verdict.map_or_else(
+ || experiment.status.as_str().to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
+ let metric = experiment
+ .primary_metric
+ .as_ref()
+ .map_or_else(String::new, |metric| {
+ format!(" | {}={}", metric.key, metric.value)
+ });
+ format!(
+ "{} — {} | {}{}",
+ experiment.slug, experiment.title, status, metric
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:metric.best",
+ operation,
)
}
-fn run_dimension_list_output(
- items: &[fidget_spinner_store_sqlite::RunDimensionSummary],
+fn experiment_detail_output(
+ detail: &fidget_spinner_store_sqlite::ExperimentDetail,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .map(|item| {
- json!({
- "key": item.key,
- "value_type": item.value_type.as_str(),
- "description": item.description,
- "observed_run_count": item.observed_run_count,
- "distinct_value_count": item.distinct_value_count,
- "sample_values": item.sample_values,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} run dimension(s)", items.len())];
- lines.extend(items.iter().map(|item| {
- let mut line = format!(
- "{} [{}] runs={} distinct={}",
- item.key,
- item.value_type.as_str(),
- item.observed_run_count,
- item.distinct_value_count
- );
- if let Some(description) = item.description.as_ref() {
- line.push_str(format!(" | {description}").as_str());
- }
- if !item.sample_values.is_empty() {
- line.push_str(
- format!(
- " | samples={}",
- item.sample_values
- .iter()
- .map(value_summary)
- .collect::<Vec<_>>()
- .join(", ")
- )
- .as_str(),
- );
- }
- line
- }));
+ let mut lines = vec![format!(
+ "experiment {} — {}",
+ detail.record.slug, detail.record.title
+ )];
+ lines.push(format!("hypothesis: {}", detail.owning_hypothesis.slug));
+ lines.push(format!(
+ "status: {}",
+ detail.record.outcome.as_ref().map_or_else(
+ || "open".to_owned(),
+ |outcome| outcome.verdict.as_str().to_owned()
+ )
+ ));
+ if let Some(outcome) = detail.record.outcome.as_ref() {
+ lines.push(format!(
+ "primary metric: {}={}",
+ outcome.primary_metric.key, outcome.primary_metric.value
+ ));
+ lines.push(format!("rationale: {}", outcome.rationale));
+ }
+ lines.push(format!(
+ "parents: {} | children: {} | artifacts: {}",
+ detail.parents.len(),
+ detail.children.len(),
+ detail.artifacts.len()
+ ));
detailed_tool_output(
- &concise,
- &items,
+ detail,
+ detail,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:run.dimension.list",
+ operation,
)
}
-fn json_created_output(
- headline: &str,
- structured: Value,
- operation: &'static str,
+fn artifact_record_output(
+ artifact: &fidget_spinner_core::ArtifactRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
detailed_tool_output(
- &structured,
- &structured,
+ artifact,
+ artifact,
format!(
- "{headline}\n{}",
- crate::to_pretty_json(&structured).map_err(store_fault(operation))?
+ "artifact {} — {} -> {}",
+ artifact.slug, artifact.label, artifact.locator
),
None,
FaultStage::Worker,
@@ -1580,938 +1352,146 @@ fn json_created_output(
)
}
-fn project_schema_field_value(field: &ProjectFieldSpec) -> Value {
- let mut value = Map::new();
- let _ = value.insert("name".to_owned(), json!(field.name));
- if !field.node_classes.is_empty() {
- let _ = value.insert(
- "node_classes".to_owned(),
- json!(
- field
- .node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- let _ = value.insert("presence".to_owned(), json!(field.presence.as_str()));
- let _ = value.insert("severity".to_owned(), json!(field.severity.as_str()));
- let _ = value.insert("role".to_owned(), json!(field.role.as_str()));
- let _ = value.insert(
- "inference_policy".to_owned(),
- json!(field.inference_policy.as_str()),
- );
- if let Some(value_type) = field.value_type {
- let _ = value.insert("value_type".to_owned(), json!(value_type.as_str()));
- }
- Value::Object(value)
-}
-
-fn render_schema_node_classes(node_classes: &BTreeSet<NodeClass>) -> String {
- if node_classes.is_empty() {
- return "any".to_owned();
- }
- node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn frontier_projection_summary_value(projection: &FrontierProjection) -> Value {
- json!({
- "frontier_id": projection.frontier.id,
- "label": projection.frontier.label,
- "status": format!("{:?}", projection.frontier.status).to_ascii_lowercase(),
- "open_experiment_count": projection.open_experiment_count,
- "completed_experiment_count": projection.completed_experiment_count,
- "verdict_counts": projection.verdict_counts,
- })
-}
-
-fn frontier_projection_text(prefix: &str, projection: &FrontierProjection) -> String {
- [
- format!(
- "{prefix} {} {}",
- projection.frontier.id, projection.frontier.label
- ),
- format!(
- "status: {}",
- format!("{:?}", projection.frontier.status).to_ascii_lowercase()
- ),
- format!("open experiments: {}", projection.open_experiment_count),
- format!(
- "completed experiments: {}",
- projection.completed_experiment_count
- ),
- format!(
- "verdicts: accepted={} kept={} parked={} rejected={}",
- projection.verdict_counts.accepted,
- projection.verdict_counts.kept,
- projection.verdict_counts.parked,
- projection.verdict_counts.rejected,
- ),
- ]
- .join("\n")
-}
-
-fn node_summary_value(node: &NodeSummary) -> Value {
- let mut value = Map::new();
- let _ = value.insert("id".to_owned(), json!(node.id));
- let _ = value.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = value.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = value.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = value.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = value.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if node.archived {
- let _ = value.insert("archived".to_owned(), json!(true));
- }
- if node.diagnostic_count > 0 {
- let _ = value.insert("diagnostic_count".to_owned(), json!(node.diagnostic_count));
- }
- if node.hidden_annotation_count > 0 {
- let _ = value.insert(
- "hidden_annotation_count".to_owned(),
- json!(node.hidden_annotation_count),
- );
- }
- Value::Object(value)
-}
-
-fn node_brief_value(node: &fidget_spinner_core::DagNode) -> Value {
- let mut value = Map::new();
- let _ = value.insert("id".to_owned(), json!(node.id));
- let _ = value.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = value.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = value.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = value.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = value.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if !node.diagnostics.items.is_empty() {
- let _ = value.insert(
- "diagnostics".to_owned(),
- diagnostic_summary_value(&node.diagnostics),
- );
- }
- Value::Object(value)
-}
-
-fn render_node_summary_line(node: &NodeSummary) -> String {
- let mut line = format!("{} {} {}", node.class, node.id, node.title);
- if let Some(summary) = node.summary.as_ref() {
- line.push_str(format!(" | {summary}").as_str());
- }
- if let Some(frontier_id) = node.frontier_id {
- line.push_str(format!(" | frontier={frontier_id}").as_str());
- }
- if !node.tags.is_empty() {
- line.push_str(format!(" | tags={}", format_tags(&node.tags)).as_str());
- }
- if node.diagnostic_count > 0 {
- line.push_str(format!(" | diag={}", node.diagnostic_count).as_str());
- }
- if node.hidden_annotation_count > 0 {
- line.push_str(format!(" | hidden-ann={}", node.hidden_annotation_count).as_str());
- }
- if node.archived {
- line.push_str(" | archived");
- }
- line
-}
-
-fn diagnostic_summary_value(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> Value {
- let tally = diagnostic_tally(diagnostics);
- json!({
- "admission": match diagnostics.admission {
- AdmissionState::Admitted => "admitted",
- AdmissionState::Rejected => "rejected",
- },
- "count": tally.total,
- "error_count": tally.errors,
- "warning_count": tally.warnings,
- "info_count": tally.infos,
- })
-}
-
-fn diagnostic_summary_text(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> String {
- let tally = diagnostic_tally(diagnostics);
- let mut parts = vec![format!("{}", tally.total)];
- if tally.errors > 0 {
- parts.push(format!("{} error", tally.errors));
- }
- if tally.warnings > 0 {
- parts.push(format!("{} warning", tally.warnings));
- }
- if tally.infos > 0 {
- parts.push(format!("{} info", tally.infos));
- }
- format!(
- "{} ({})",
- match diagnostics.admission {
- AdmissionState::Admitted => "admitted",
- AdmissionState::Rejected => "rejected",
+fn artifact_list_output(
+ artifacts: &[fidget_spinner_store_sqlite::ArtifactSummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": artifacts.len(), "artifacts": artifacts });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if artifacts.is_empty() {
+ "no artifacts".to_owned()
+ } else {
+ artifacts
+ .iter()
+ .map(|artifact| {
+ format!(
+ "{} — {} -> {}",
+ artifact.slug, artifact.label, artifact.locator
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
},
- parts.join(", ")
+ None,
+ FaultStage::Worker,
+ operation,
)
}
-fn diagnostic_tally(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> DiagnosticTally {
- diagnostics
- .items
- .iter()
- .fold(DiagnosticTally::default(), |mut tally, item| {
- tally.total += 1;
- match item.severity {
- DiagnosticSeverity::Error => tally.errors += 1,
- DiagnosticSeverity::Warning => tally.warnings += 1,
- DiagnosticSeverity::Info => tally.infos += 1,
- }
- tally
- })
-}
-
-fn payload_preview_value(class: NodeClass, fields: &Map<String, Value>) -> Value {
- let mut preview = Map::new();
- for (index, (name, value)) in filtered_payload_fields(class, fields).enumerate() {
- if index == 6 {
- let _ = preview.insert(
- "...".to_owned(),
- json!(format!("+{} more field(s)", fields.len() - index)),
- );
- break;
- }
- let _ = preview.insert(name.clone(), payload_value_preview(value));
- }
- Value::Object(preview)
-}
-
-fn payload_preview_lines(class: NodeClass, fields: &Map<String, Value>) -> Vec<String> {
- let filtered = filtered_payload_fields(class, fields).collect::<Vec<_>>();
- if filtered.is_empty() {
- return Vec::new();
- }
- if is_prose_node(class) {
- let preview_names = filtered
- .iter()
- .take(6)
- .map(|(name, _)| (*name).clone())
- .collect::<Vec<_>>();
- let mut lines = vec![format!("payload fields: {}", preview_names.join(", "))];
- if filtered.len() > preview_names.len() {
- lines.push(format!(
- "payload fields: +{} more field(s)",
- filtered.len() - preview_names.len()
- ));
- }
- return lines;
- }
- let mut lines = vec![format!("payload fields: {}", filtered.len())];
- for (index, (name, value)) in filtered.iter().enumerate() {
- if index == 6 {
- lines.push(format!(
- "payload: +{} more field(s)",
- filtered.len() - index
- ));
- break;
- }
- lines.push(format!(
- "payload.{}: {}",
- name,
- value_summary(&payload_value_preview(value))
- ));
+fn artifact_detail_output(
+ detail: &fidget_spinner_store_sqlite::ArtifactDetail,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![format!(
+ "artifact {} — {} -> {}",
+ detail.record.slug, detail.record.label, detail.record.locator
+ )];
+ if !detail.attachments.is_empty() {
+ lines.push(format!("attachments: {}", detail.attachments.len()));
}
- lines
+ detailed_tool_output(
+ detail,
+ detail,
+ lines.join("\n"),
+ None,
+ FaultStage::Worker,
+ operation,
+ )
}
-fn filtered_payload_fields(
- class: NodeClass,
- fields: &Map<String, Value>,
-) -> impl Iterator<Item = (&String, &Value)> + '_ {
- fields.iter().filter(move |(name, _)| {
- !matches!(class, NodeClass::Note | NodeClass::Source) || name.as_str() != "body"
- })
+fn metric_keys_output(
+ keys: &[MetricKeySummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": keys.len(), "metrics": keys });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if keys.is_empty() {
+ "no metrics".to_owned()
+ } else {
+ keys.iter()
+ .map(|metric| {
+ format!(
+ "{} [{} {} {}] refs={}",
+ metric.key,
+ metric.unit.as_str(),
+ metric.objective.as_str(),
+ metric.visibility.as_str(),
+ metric.reference_count
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
+ )
}
-fn payload_value_preview(value: &Value) -> Value {
- match value {
- Value::Null | Value::Bool(_) | Value::Number(_) => value.clone(),
- Value::String(text) => Value::String(truncated_inline_preview(text, 96)),
- Value::Array(items) => {
- let preview = items
+fn metric_best_output(
+ entries: &[MetricBestEntry],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": entries.len(), "entries": entries });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if entries.is_empty() {
+ "no matching experiments".to_owned()
+ } else {
+ entries
.iter()
- .take(3)
- .map(payload_value_preview)
- .collect::<Vec<_>>();
- if items.len() > 3 {
- json!({
- "items": preview,
- "truncated": true,
- "total_count": items.len(),
+ .enumerate()
+ .map(|(index, entry)| {
+ format!(
+ "{}. {} / {} = {} ({})",
+ index + 1,
+ entry.experiment.slug,
+ entry.hypothesis.slug,
+ entry.value,
+ entry.experiment.verdict.map_or_else(
+ || entry.experiment.status.as_str().to_owned(),
+ |verdict| verdict.as_str().to_owned()
+ )
+ )
})
- } else {
- Value::Array(preview)
- }
- }
- Value::Object(object) => {
- let mut preview = Map::new();
- for (index, (name, nested)) in object.iter().enumerate() {
- if index == 4 {
- let _ = preview.insert(
- "...".to_owned(),
- json!(format!("+{} more field(s)", object.len() - index)),
- );
- break;
- }
- let _ = preview.insert(name.clone(), payload_value_preview(nested));
- }
- Value::Object(preview)
- }
- }
-}
-
-fn is_prose_node(class: NodeClass) -> bool {
- matches!(class, NodeClass::Note | NodeClass::Source)
-}
-
-fn truncated_inline_preview(text: &str, limit: usize) -> String {
- let collapsed = libmcp::collapse_inline_whitespace(text);
- let truncated = libmcp::render::truncate_chars(&collapsed, Some(limit));
- if truncated.truncated {
- format!("{}...", truncated.text)
- } else {
- truncated.text
- }
-}
-
-fn metric_value(store: &ProjectStore, metric: &MetricValue) -> Result<Value, FaultRecord> {
- let definition = metric_definition(store, &metric.key)?;
- Ok(json!({
- "key": metric.key,
- "value": metric.value,
- "unit": metric_unit_name(definition.unit),
- "objective": metric_objective_name(definition.objective),
- }))
-}
-
-fn metric_text(store: &ProjectStore, metric: &MetricValue) -> Result<String, FaultRecord> {
- let definition = metric_definition(store, &metric.key)?;
- Ok(format!(
- "{}={} {} ({})",
- metric.key,
- metric.value,
- metric_unit_name(definition.unit),
- metric_objective_name(definition.objective),
- ))
-}
-
-fn metric_unit_name(unit: MetricUnit) -> &'static str {
- match unit {
- MetricUnit::Seconds => "seconds",
- MetricUnit::Bytes => "bytes",
- MetricUnit::Count => "count",
- MetricUnit::Ratio => "ratio",
- MetricUnit::Custom => "custom",
- }
-}
-
-fn metric_objective_name(objective: fidget_spinner_core::OptimizationObjective) -> &'static str {
- match objective {
- fidget_spinner_core::OptimizationObjective::Minimize => "minimize",
- fidget_spinner_core::OptimizationObjective::Maximize => "maximize",
- fidget_spinner_core::OptimizationObjective::Target => "target",
- }
-}
-
-fn metric_verdict_name(verdict: FrontierVerdict) -> &'static str {
- match verdict {
- FrontierVerdict::Accepted => "accepted",
- FrontierVerdict::Kept => "kept",
- FrontierVerdict::Parked => "parked",
- FrontierVerdict::Rejected => "rejected",
- }
-}
-
-fn run_dimensions_value(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> Value {
- Value::Object(
- dimensions
- .iter()
- .map(|(key, value)| (key.to_string(), value.as_json()))
- .collect::<Map<String, Value>>(),
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
)
}
-fn render_dimension_kv(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> String {
- if dimensions.is_empty() {
- return "none".to_owned();
- }
- dimensions
- .iter()
- .map(|(key, value)| format!("{key}={}", value_summary(&value.as_json())))
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn format_tags(tags: &BTreeSet<TagName>) -> String {
- tags.iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn schema_label(schema: &ProjectSchema) -> String {
- format!("{}@{}", schema.namespace, schema.version)
-}
-
-fn value_summary(value: &Value) -> String {
- match value {
- Value::Null => "null".to_owned(),
- Value::Bool(flag) => flag.to_string(),
- Value::Number(number) => number.to_string(),
- Value::String(text) => text.clone(),
- Value::Array(items) => format!("{} item(s)", items.len()),
- Value::Object(object) => format!("{} field(s)", object.len()),
- }
-}
-
-#[derive(Default)]
-struct DiagnosticTally {
- total: usize,
- errors: usize,
- warnings: usize,
- infos: usize,
-}
-
-fn store_fault<E>(operation: &'static str) -> impl FnOnce(E) -> FaultRecord
-where
- E: std::fmt::Display,
-{
- move |error| {
- FaultRecord::new(
- classify_fault_kind(&error.to_string()),
- FaultStage::Store,
- operation,
- error.to_string(),
- )
- }
-}
-
-fn classify_fault_kind(message: &str) -> FaultKind {
- if message.contains("was not found")
- || message.contains("invalid")
- || message.contains("unknown")
- || message.contains("empty")
- || message.contains("already exists")
- || message.contains("require an explicit tag list")
- || message.contains("requires a non-empty summary")
- || message.contains("requires a non-empty string payload field `body`")
- || message.contains("requires an explicit order")
- || message.contains("is ambiguous across sources")
- || message.contains("has conflicting semantics")
- || message.contains("conflicts with existing definition")
- {
- FaultKind::InvalidInput
- } else {
- FaultKind::Internal
- }
-}
-
-fn tool_annotations(raw: Vec<WireAnnotation>) -> Result<Vec<NodeAnnotation>, StoreError> {
- raw.into_iter()
- .map(|annotation| {
- Ok(NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if annotation.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: annotation.label.map(NonEmptyText::new).transpose()?,
- body: NonEmptyText::new(annotation.body)?,
- created_at: time::OffsetDateTime::now_utc(),
- })
- })
- .collect()
-}
-
-fn lineage_attachments(parents: Vec<String>) -> Result<Vec<EdgeAttachment>, StoreError> {
- parents
- .into_iter()
- .map(|parent| {
- Ok(EdgeAttachment {
- node_id: crate::parse_node_id(&parent)?,
- kind: fidget_spinner_core::EdgeKind::Lineage,
- direction: EdgeAttachmentDirection::ExistingToNew,
- })
- })
- .collect()
-}
-
-fn parse_tag_set(values: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
- values
- .into_iter()
- .map(TagName::new)
- .collect::<Result<BTreeSet<_>, _>>()
- .map_err(StoreError::from)
-}
-
-fn metric_spec_from_wire(raw: WireMetricSpec) -> Result<MetricSpec, StoreError> {
- Ok(MetricSpec {
- metric_key: NonEmptyText::new(raw.key)?,
- unit: parse_metric_unit_name(&raw.unit)?,
- objective: crate::parse_optimization_objective(&raw.objective)?,
- })
-}
-
-fn metric_value_from_wire(raw: WireMetricValue) -> Result<MetricValue, StoreError> {
- Ok(MetricValue {
- key: NonEmptyText::new(raw.key)?,
- value: raw.value,
- })
-}
-
-fn experiment_analysis_from_wire(raw: WireAnalysis) -> Result<ExperimentAnalysisDraft, StoreError> {
- Ok(ExperimentAnalysisDraft {
- title: NonEmptyText::new(raw.title)?,
- summary: NonEmptyText::new(raw.summary)?,
- body: NonEmptyText::new(raw.body)?,
- })
-}
-
-fn metric_definition(store: &ProjectStore, key: &NonEmptyText) -> Result<MetricSpec, FaultRecord> {
- store
- .list_metric_definitions()
- .map_err(store_fault("tools/call:experiment.close"))?
- .into_iter()
- .find(|definition| definition.key == *key)
- .map(|definition| MetricSpec {
- metric_key: definition.key,
- unit: definition.unit,
- objective: definition.objective,
- })
- .ok_or_else(|| {
- FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Store,
- "tools/call:experiment.close",
- format!("metric `{key}` is not registered"),
- )
- })
-}
-
-fn coerce_tool_dimensions(
- store: &ProjectStore,
- raw_dimensions: BTreeMap<String, Value>,
- operation: &'static str,
-) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, FaultRecord> {
- store
- .coerce_run_dimensions(raw_dimensions)
- .map_err(store_fault(operation))
-}
-
-fn command_recipe_from_wire(
- raw: WireRunCommand,
- project_root: &Utf8Path,
-) -> Result<CommandRecipe, StoreError> {
- let working_directory = raw
- .working_directory
- .map(Utf8PathBuf::from)
- .unwrap_or_else(|| project_root.to_path_buf());
- CommandRecipe::new(
- working_directory,
- crate::to_text_vec(raw.argv)?,
- raw.env.into_iter().collect::<BTreeMap<_, _>>(),
+fn history_output(
+ history: &[EntityHistoryEntry],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": history.len(), "history": history });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if history.is_empty() {
+ "no history".to_owned()
+ } else {
+ history
+ .iter()
+ .map(|entry| {
+ format!(
+ "rev {} {} @ {}",
+ entry.revision, entry.event_kind, entry.occurred_at
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
)
- .map_err(StoreError::from)
-}
-
-fn parse_node_class_name(raw: &str) -> Result<NodeClass, StoreError> {
- match raw {
- "contract" => Ok(NodeClass::Contract),
- "hypothesis" => Ok(NodeClass::Hypothesis),
- "run" => Ok(NodeClass::Run),
- "analysis" => Ok(NodeClass::Analysis),
- "decision" => Ok(NodeClass::Decision),
- "source" => Ok(NodeClass::Source),
- "note" => Ok(NodeClass::Note),
- other => Err(crate::invalid_input(format!(
- "unknown node class `{other}`"
- ))),
- }
-}
-
-fn parse_metric_unit_name(raw: &str) -> Result<MetricUnit, StoreError> {
- crate::parse_metric_unit(raw)
-}
-
-fn parse_metric_source_name(raw: &str) -> Result<MetricFieldSource, StoreError> {
- match raw {
- "run_metric" => Ok(MetricFieldSource::RunMetric),
- "hypothesis_payload" => Ok(MetricFieldSource::HypothesisPayload),
- "run_payload" => Ok(MetricFieldSource::RunPayload),
- "analysis_payload" => Ok(MetricFieldSource::AnalysisPayload),
- "decision_payload" => Ok(MetricFieldSource::DecisionPayload),
- other => Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- format!("unknown metric source `{other}`"),
- ),
- ))),
- }
-}
-
-fn parse_metric_order_name(raw: &str) -> Result<MetricRankOrder, StoreError> {
- match raw {
- "asc" => Ok(MetricRankOrder::Asc),
- "desc" => Ok(MetricRankOrder::Desc),
- other => Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- format!("unknown metric order `{other}`"),
- ),
- ))),
- }
-}
-
-fn parse_field_value_type_name(raw: &str) -> Result<FieldValueType, StoreError> {
- match raw {
- "string" => Ok(FieldValueType::String),
- "numeric" => Ok(FieldValueType::Numeric),
- "boolean" => Ok(FieldValueType::Boolean),
- "timestamp" => Ok(FieldValueType::Timestamp),
- other => Err(crate::invalid_input(format!(
- "unknown field value type `{other}`"
- ))),
- }
-}
-
-fn parse_diagnostic_severity_name(raw: &str) -> Result<DiagnosticSeverity, StoreError> {
- match raw {
- "error" => Ok(DiagnosticSeverity::Error),
- "warning" => Ok(DiagnosticSeverity::Warning),
- "info" => Ok(DiagnosticSeverity::Info),
- other => Err(crate::invalid_input(format!(
- "unknown diagnostic severity `{other}`"
- ))),
- }
-}
-
-fn parse_field_presence_name(raw: &str) -> Result<FieldPresence, StoreError> {
- match raw {
- "required" => Ok(FieldPresence::Required),
- "recommended" => Ok(FieldPresence::Recommended),
- "optional" => Ok(FieldPresence::Optional),
- other => Err(crate::invalid_input(format!(
- "unknown field presence `{other}`"
- ))),
- }
-}
-
-fn parse_field_role_name(raw: &str) -> Result<FieldRole, StoreError> {
- match raw {
- "index" => Ok(FieldRole::Index),
- "projection_gate" => Ok(FieldRole::ProjectionGate),
- "render_only" => Ok(FieldRole::RenderOnly),
- "opaque" => Ok(FieldRole::Opaque),
- other => Err(crate::invalid_input(format!(
- "unknown field role `{other}`"
- ))),
- }
-}
-
-fn parse_inference_policy_name(raw: &str) -> Result<InferencePolicy, StoreError> {
- match raw {
- "manual_only" => Ok(InferencePolicy::ManualOnly),
- "model_may_infer" => Ok(InferencePolicy::ModelMayInfer),
- other => Err(crate::invalid_input(format!(
- "unknown inference policy `{other}`"
- ))),
- }
-}
-
-fn parse_backend_name(raw: &str) -> Result<ExecutionBackend, StoreError> {
- match raw {
- "local_process" => Ok(ExecutionBackend::LocalProcess),
- "worktree_process" => Ok(ExecutionBackend::WorktreeProcess),
- "ssh_process" => Ok(ExecutionBackend::SshProcess),
- other => Err(crate::invalid_input(format!("unknown backend `{other}`"))),
- }
-}
-
-fn parse_verdict_name(raw: &str) -> Result<FrontierVerdict, StoreError> {
- match raw {
- "accepted" => Ok(FrontierVerdict::Accepted),
- "kept" => Ok(FrontierVerdict::Kept),
- "parked" => Ok(FrontierVerdict::Parked),
- "rejected" => Ok(FrontierVerdict::Rejected),
- other => Err(crate::invalid_input(format!("unknown verdict `{other}`"))),
- }
-}
-
-#[derive(Debug, Deserialize)]
-struct FrontierStatusToolArgs {
- frontier_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct TagAddToolArgs {
- name: String,
- description: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct FrontierInitToolArgs {
- label: String,
- objective: String,
- contract_title: String,
- contract_summary: Option<String>,
- benchmark_suites: Vec<String>,
- promotion_criteria: Vec<String>,
- primary_metric: WireMetricSpec,
- #[serde(default)]
- supporting_metrics: Vec<WireMetricSpec>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeCreateToolArgs {
- class: String,
- frontier_id: Option<String>,
- title: String,
- summary: Option<String>,
- tags: Option<Vec<String>>,
- #[serde(default)]
- payload: Option<Map<String, Value>>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct HypothesisRecordToolArgs {
- frontier_id: String,
- title: String,
- summary: String,
- body: String,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeListToolArgs {
- frontier_id: Option<String>,
- class: Option<String>,
- #[serde(default)]
- tags: Vec<String>,
- #[serde(default)]
- include_archived: bool,
- limit: Option<u32>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeReadToolArgs {
- node_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeAnnotateToolArgs {
- node_id: String,
- body: String,
- label: Option<String>,
- #[serde(default)]
- visible: bool,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeArchiveToolArgs {
- node_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct QuickNoteToolArgs {
- frontier_id: Option<String>,
- title: String,
- summary: String,
- body: String,
- tags: Vec<String>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SourceRecordToolArgs {
- frontier_id: Option<String>,
- title: String,
- summary: String,
- body: String,
- tags: Option<Vec<String>>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SchemaFieldUpsertToolArgs {
- name: String,
- node_classes: Option<Vec<String>>,
- presence: String,
- severity: String,
- role: String,
- inference_policy: String,
- value_type: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SchemaFieldRemoveToolArgs {
- name: String,
- node_classes: Option<Vec<String>>,
-}
-
-#[derive(Debug, Deserialize)]
-struct MetricDefineToolArgs {
- key: String,
- unit: String,
- objective: String,
- description: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct RunDimensionDefineToolArgs {
- key: String,
- value_type: String,
- description: Option<String>,
-}
-
-#[derive(Debug, Deserialize, Default)]
-struct MetricKeysToolArgs {
- frontier_id: Option<String>,
- source: Option<String>,
- dimensions: Option<BTreeMap<String, Value>>,
-}
-
-#[derive(Debug, Deserialize)]
-struct MetricBestToolArgs {
- key: String,
- frontier_id: Option<String>,
- source: Option<String>,
- dimensions: Option<BTreeMap<String, Value>>,
- order: Option<String>,
- limit: Option<u32>,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentOpenToolArgs {
- frontier_id: String,
- hypothesis_node_id: String,
- title: String,
- summary: Option<String>,
-}
-
-#[derive(Debug, Deserialize, Default)]
-struct ExperimentListToolArgs {
- frontier_id: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentReadToolArgs {
- experiment_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentCloseToolArgs {
- experiment_id: String,
- run: WireRun,
- primary_metric: WireMetricValue,
- #[serde(default)]
- supporting_metrics: Vec<WireMetricValue>,
- note: WireFrontierNote,
- verdict: String,
- decision_title: String,
- decision_rationale: String,
- analysis: Option<WireAnalysis>,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireAnnotation {
- body: String,
- label: Option<String>,
- #[serde(default)]
- visible: bool,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireMetricSpec {
- key: String,
- unit: String,
- objective: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireMetricValue {
- key: String,
- value: f64,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireRun {
- title: String,
- summary: Option<String>,
- backend: String,
- #[serde(default)]
- dimensions: BTreeMap<String, Value>,
- command: WireRunCommand,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireAnalysis {
- title: String,
- summary: String,
- body: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireRunCommand {
- working_directory: Option<String>,
- argv: Vec<String>,
- #[serde(default)]
- env: BTreeMap<String, String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireFrontierNote {
- summary: String,
- #[serde(default)]
- next_hypotheses: Vec<String>,
}
diff --git a/crates/fidget-spinner-cli/src/ui.rs b/crates/fidget-spinner-cli/src/ui.rs
index 29b5058..98cc95d 100644
--- a/crates/fidget-spinner-cli/src/ui.rs
+++ b/crates/fidget-spinner-cli/src/ui.rs
@@ -1,79 +1,113 @@
-use std::collections::BTreeMap;
use std::io;
use std::net::SocketAddr;
use axum::Router;
-use axum::extract::{Query, State};
+use axum::extract::{Path, State};
use axum::http::StatusCode;
use axum::response::{Html, IntoResponse, Response};
use axum::routing::get;
use camino::Utf8PathBuf;
-use fidget_spinner_core::{DagNode, FieldValueType, NodeClass, ProjectSchema, TagName};
-use linkify::{LinkFinder, LinkKind};
+use fidget_spinner_core::{
+ AttachmentTargetRef, ExperimentAnalysis, ExperimentOutcome, ExperimentStatus, FrontierRecord,
+ FrontierVerdict, MetricUnit, RunDimensionValue, Slug, VertexRef,
+};
+use fidget_spinner_store_sqlite::{
+ ExperimentDetail, ExperimentSummary, FrontierOpenProjection, FrontierSummary,
+ HypothesisCurrentState, HypothesisDetail, ProjectStatus, StoreError, VertexSummary,
+};
use maud::{DOCTYPE, Markup, PreEscaped, html};
-use serde::Deserialize;
-use serde_json::Value;
+use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
use time::macros::format_description;
-use crate::{open_store, to_pretty_json};
+use crate::open_store;
#[derive(Clone)]
struct NavigatorState {
project_root: Utf8PathBuf,
- limit: u32,
+ limit: Option<u32>,
}
-#[derive(Debug, Default, Deserialize)]
-struct NavigatorQuery {
- tag: Option<String>,
-}
-
-struct NavigatorEntry {
- node: DagNode,
- frontier_label: Option<String>,
-}
-
-struct TagFacet {
- name: TagName,
- description: String,
- count: usize,
+struct AttachmentDisplay {
+ kind: &'static str,
+ href: String,
+ title: String,
+ summary: Option<String>,
}
pub(crate) fn serve(
project_root: Utf8PathBuf,
bind: SocketAddr,
- limit: u32,
-) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ limit: Option<u32>,
+) -> Result<(), StoreError> {
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_io()
.build()
- .map_err(fidget_spinner_store_sqlite::StoreError::from)?;
+ .map_err(StoreError::from)?;
runtime.block_on(async move {
let state = NavigatorState {
project_root,
limit,
};
let app = Router::new()
- .route("/", get(navigator))
+ .route("/", get(project_home))
+ .route("/frontier/{selector}", get(frontier_detail))
+ .route("/hypothesis/{selector}", get(hypothesis_detail))
+ .route("/experiment/{selector}", get(experiment_detail))
+ .route("/artifact/{selector}", get(artifact_detail))
.with_state(state.clone());
let listener = tokio::net::TcpListener::bind(bind)
.await
- .map_err(fidget_spinner_store_sqlite::StoreError::from)?;
+ .map_err(StoreError::from)?;
println!("navigator: http://{bind}/");
- axum::serve(listener, app).await.map_err(|error| {
- fidget_spinner_store_sqlite::StoreError::Io(io::Error::other(error.to_string()))
- })
+ axum::serve(listener, app)
+ .await
+ .map_err(|error| StoreError::Io(io::Error::other(error.to_string())))
})
}
-async fn navigator(
+async fn project_home(State(state): State<NavigatorState>) -> Response {
+ render_response(render_project_home(state))
+}
+
+async fn frontier_detail(
State(state): State<NavigatorState>,
- Query(query): Query<NavigatorQuery>,
+ Path(selector): Path<String>,
) -> Response {
- match render_navigator(state, query) {
+ render_response(render_frontier_detail(state, selector))
+}
+
+async fn hypothesis_detail(
+ State(state): State<NavigatorState>,
+ Path(selector): Path<String>,
+) -> Response {
+ render_response(render_hypothesis_detail(state, selector))
+}
+
+async fn experiment_detail(
+ State(state): State<NavigatorState>,
+ Path(selector): Path<String>,
+) -> Response {
+ render_response(render_experiment_detail(state, selector))
+}
+
+async fn artifact_detail(
+ State(state): State<NavigatorState>,
+ Path(selector): Path<String>,
+) -> Response {
+ render_response(render_artifact_detail(state, selector))
+}
+
+fn render_response(result: Result<Markup, StoreError>) -> Response {
+ match result {
Ok(markup) => Html(markup.into_string()).into_response(),
+ Err(StoreError::UnknownFrontierSelector(_))
+ | Err(StoreError::UnknownHypothesisSelector(_))
+ | Err(StoreError::UnknownExperimentSelector(_))
+ | Err(StoreError::UnknownArtifactSelector(_)) => {
+ (StatusCode::NOT_FOUND, "not found".to_owned()).into_response()
+ }
Err(error) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("navigator render failed: {error}"),
@@ -82,565 +116,1226 @@ async fn navigator(
}
}
-fn render_navigator(
- state: NavigatorState,
- query: NavigatorQuery,
-) -> Result<Markup, fidget_spinner_store_sqlite::StoreError> {
+fn render_project_home(state: NavigatorState) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let project_status = store.status()?;
+ let frontiers = store.list_frontiers()?;
+ let title = format!("{} navigator", project_status.display_name);
+ let content = html! {
+ (render_project_status(&project_status))
+ (render_frontier_grid(&frontiers, state.limit))
+ };
+ Ok(render_shell(
+ &title,
+ Some(&project_status.display_name.to_string()),
+ None,
+ content,
+ ))
+}
+
+fn render_frontier_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let projection = store.frontier_open(&selector)?;
+ let title = format!("{} · frontier", projection.frontier.label);
+ let subtitle = format!(
+ "{} hypotheses active · {} experiments open",
+ projection.active_hypotheses.len(),
+ projection.open_experiments.len()
+ );
+ let content = html! {
+ (render_frontier_header(&projection.frontier))
+ (render_frontier_brief(&projection))
+ (render_frontier_active_sets(&projection))
+ (render_hypothesis_current_state_grid(
+ &projection.active_hypotheses,
+ state.limit,
+ ))
+ (render_open_experiment_grid(
+ &projection.open_experiments,
+ state.limit,
+ ))
+ };
+ Ok(render_shell(&title, Some(&subtitle), None, content))
+}
+
+fn render_hypothesis_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let detail = store.read_hypothesis(&selector)?;
+ let frontier = store.read_frontier(&detail.record.frontier_id.to_string())?;
+ let title = format!("{} · hypothesis", detail.record.title);
+ let subtitle = detail.record.summary.to_string();
+ let content = html! {
+ (render_hypothesis_header(&detail, &frontier))
+ (render_prose_block("Body", detail.record.body.as_str()))
+ (render_vertex_relation_sections(&detail.parents, &detail.children, state.limit))
+ (render_artifact_section(&detail.artifacts, state.limit))
+ (render_experiment_section(
+ "Open Experiments",
+ &detail.open_experiments,
+ state.limit,
+ ))
+ (render_experiment_section(
+ "Closed Experiments",
+ &detail.closed_experiments,
+ state.limit,
+ ))
+ };
+ Ok(render_shell(
+ &title,
+ Some(&subtitle),
+ Some((frontier.label.as_str(), frontier_href(&frontier.slug))),
+ content,
+ ))
+}
+
+fn render_experiment_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
let store = open_store(state.project_root.as_std_path())?;
- let selected_tag = query.tag.map(TagName::new).transpose()?;
- let schema = store.schema().clone();
- let frontiers = store
- .list_frontiers()?
- .into_iter()
- .map(|frontier| (frontier.id, frontier.label.to_string()))
- .collect::<BTreeMap<_, _>>();
-
- let recent_nodes = load_recent_nodes(&store, None, state.limit)?;
- let visible_nodes = load_recent_nodes(&store, selected_tag.clone(), state.limit)?;
- let tag_facets = store
- .list_tags()?
- .into_iter()
- .map(|tag| TagFacet {
- count: recent_nodes
- .iter()
- .filter(|node| node.tags.contains(&tag.name))
- .count(),
- description: tag.description.to_string(),
- name: tag.name,
- })
- .collect::<Vec<_>>();
- let entries = visible_nodes
- .into_iter()
- .map(|node| NavigatorEntry {
- frontier_label: node
- .frontier_id
- .and_then(|frontier_id| frontiers.get(&frontier_id).cloned()),
- node,
- })
- .collect::<Vec<_>>();
-
- let title = selected_tag.as_ref().map_or_else(
- || "all recent nodes".to_owned(),
- |tag| format!("tag: {tag}"),
+ let detail = store.read_experiment(&selector)?;
+ let frontier = store.read_frontier(&detail.record.frontier_id.to_string())?;
+ let title = format!("{} · experiment", detail.record.title);
+ let subtitle = detail.record.summary.as_ref().map_or_else(
+ || detail.record.status.as_str().to_owned(),
+ ToString::to_string,
);
- let project_name = store.config().display_name.to_string();
+ let content = html! {
+ (render_experiment_header(&detail, &frontier))
+ (render_vertex_relation_sections(&detail.parents, &detail.children, state.limit))
+ (render_artifact_section(&detail.artifacts, state.limit))
+ @if let Some(outcome) = detail.record.outcome.as_ref() {
+ (render_experiment_outcome(outcome))
+ } @else {
+ section.card {
+ h2 { "Outcome" }
+ p.muted { "Open experiment. No outcome recorded yet." }
+ }
+ }
+ };
+ Ok(render_shell(
+ &title,
+ Some(&subtitle),
+ Some((frontier.label.as_str(), frontier_href(&frontier.slug))),
+ content,
+ ))
+}
- Ok(html! {
- (DOCTYPE)
- html {
- head {
- meta charset="utf-8";
- meta name="viewport" content="width=device-width, initial-scale=1";
- title { "Fidget Spinner Navigator" }
- style { (PreEscaped(stylesheet().to_owned())) }
+fn render_artifact_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let detail = store.read_artifact(&selector)?;
+ let attachments = detail
+ .attachments
+ .iter()
+ .map(|target| resolve_attachment_display(&store, *target))
+ .collect::<Result<Vec<_>, StoreError>>()?;
+ let title = format!("{} · artifact", detail.record.label);
+ let subtitle = detail.record.summary.as_ref().map_or_else(
+ || detail.record.kind.as_str().to_owned(),
+ ToString::to_string,
+ );
+ let content = html! {
+ section.card {
+ h2 { "Artifact" }
+ div.kv-grid {
+ (render_kv("Kind", detail.record.kind.as_str()))
+ (render_kv("Slug", detail.record.slug.as_str()))
+ (render_kv("Locator", detail.record.locator.as_str()))
+ @if let Some(media_type) = detail.record.media_type.as_ref() {
+ (render_kv("Media type", media_type.as_str()))
+ }
+ (render_kv("Updated", &format_timestamp(detail.record.updated_at)))
}
- body {
- main class="shell" {
- aside class="rail" {
- h1 { "Navigator" }
- p class="project" { (project_name) }
- nav class="tag-list" {
- a
- href="/"
- class={ "tag-link " (if selected_tag.is_none() { "selected" } else { "" }) } {
- span class="tag-name" { "all" }
- span class="tag-count" { (recent_nodes.len()) }
- }
- @for facet in &tag_facets {
- a
- href={ "/?tag=" (facet.name.as_str()) }
- class={ "tag-link " (if selected_tag.as_ref() == Some(&facet.name) { "selected" } else { "" }) } {
- span class="tag-name" { (facet.name.as_str()) }
- span class="tag-count" { (facet.count) }
- span class="tag-description" { (facet.description.as_str()) }
- }
- }
- }
+ @if let Some(summary) = detail.record.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ p.muted {
+ "Artifact bodies are intentionally out of band. Spinner only preserves references."
+ }
+ }
+ section.card {
+ h2 { "Attachments" }
+ @if attachments.is_empty() {
+ p.muted { "No attachments." }
+ } @else {
+ div.link-list {
+ @for attachment in &attachments {
+ (render_attachment_chip(attachment))
}
- section class="feed" {
- header class="feed-header" {
- h2 { (title) }
- p class="feed-meta" {
- (entries.len()) " shown"
- " · "
- (recent_nodes.len()) " recent"
- " · "
- (state.limit) " max"
+ }
+ }
+ }
+ };
+ Ok(render_shell(&title, Some(&subtitle), None, content))
+}
+
+fn render_frontier_grid(frontiers: &[FrontierSummary], limit: Option<u32>) -> Markup {
+ html! {
+ section.card {
+ h2 { "Frontiers" }
+ @if frontiers.is_empty() {
+ p.muted { "No frontiers yet." }
+ } @else {
+ div.card-grid {
+ @for frontier in limit_items(frontiers, limit) {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(frontier_href(&frontier.slug)) { (frontier.label) }
+ span.status-chip class=(frontier_status_class(frontier.status.as_str())) {
+ (frontier.status.as_str())
}
}
- @if entries.is_empty() {
- article class="empty-state" {
- h3 { "No matching nodes" }
- p { "Try clearing the tag filter or recording new notes." }
- }
- } @else {
- @for entry in &entries {
- (render_entry(entry, &schema))
- }
+ p.prose { (frontier.objective) }
+ div.meta-row {
+ span { (format!("{} active hypotheses", frontier.active_hypothesis_count)) }
+ span { (format!("{} open experiments", frontier.open_experiment_count)) }
+ }
+ div.meta-row.muted {
+ span { "updated " (format_timestamp(frontier.updated_at)) }
}
}
}
}
}
- })
+ }
+ }
}
-fn load_recent_nodes(
- store: &fidget_spinner_store_sqlite::ProjectStore,
- tag: Option<TagName>,
- limit: u32,
-) -> Result<Vec<DagNode>, fidget_spinner_store_sqlite::StoreError> {
- let summaries = store.list_nodes(fidget_spinner_store_sqlite::ListNodesQuery {
- tags: tag.into_iter().collect(),
- limit,
- ..fidget_spinner_store_sqlite::ListNodesQuery::default()
- })?;
- summaries
- .into_iter()
- .map(|summary| {
- store.get_node(summary.id)?.ok_or(
- fidget_spinner_store_sqlite::StoreError::NodeNotFound(summary.id),
- )
- })
- .collect()
-}
-
-fn render_entry(entry: &NavigatorEntry, schema: &ProjectSchema) -> Markup {
- let body = entry.node.payload.field("body").and_then(Value::as_str);
- let mut keys = entry
- .node
- .payload
- .fields
- .keys()
- .filter(|name| name.as_str() != "body")
- .cloned()
- .collect::<Vec<_>>();
- keys.sort_unstable();
+fn render_project_status(status: &ProjectStatus) -> Markup {
+ html! {
+ section.card {
+ h1 { (status.display_name) }
+ p.prose {
+ "Austere experimental ledger. Frontier overview is the only sanctioned dump; everything else is deliberate traversal."
+ }
+ div.kv-grid {
+ (render_kv("Project root", status.project_root.as_str()))
+ (render_kv("Store format", &status.store_format_version.to_string()))
+ (render_kv("Frontiers", &status.frontier_count.to_string()))
+ (render_kv("Hypotheses", &status.hypothesis_count.to_string()))
+ (render_kv("Experiments", &status.experiment_count.to_string()))
+ (render_kv("Open experiments", &status.open_experiment_count.to_string()))
+ (render_kv("Artifacts", &status.artifact_count.to_string()))
+ }
+ }
+ }
+}
+
+fn render_frontier_header(frontier: &FrontierRecord) -> Markup {
+ html! {
+ section.card {
+ h1 { (frontier.label) }
+ p.prose { (frontier.objective) }
+ div.meta-row {
+ span { "slug " code { (frontier.slug) } }
+ span.status-chip class=(frontier_status_class(frontier.status.as_str())) {
+ (frontier.status.as_str())
+ }
+ span.muted { "updated " (format_timestamp(frontier.updated_at)) }
+ }
+ }
+ }
+}
+fn render_frontier_brief(projection: &FrontierOpenProjection) -> Markup {
+ let frontier = &projection.frontier;
html! {
- article class="entry" id={ "node-" (entry.node.id) } {
- header class="entry-header" {
- div class="entry-title-row" {
- span class={ "class-badge class-" (entry.node.class.as_str()) } {
- (entry.node.class.as_str())
+ section.card {
+ h2 { "Frontier Brief" }
+ @if let Some(situation) = frontier.brief.situation.as_ref() {
+ div.block {
+ h3 { "Situation" }
+ p.prose { (situation) }
+ }
+ } @else {
+ p.muted { "No situation summary recorded." }
+ }
+ div.split {
+ div.subcard {
+ h3 { "Roadmap" }
+ @if frontier.brief.roadmap.is_empty() {
+ p.muted { "No roadmap ordering recorded." }
+ } @else {
+ ol.roadmap-list {
+ @for item in &frontier.brief.roadmap {
+ @let title = hypothesis_title_for_roadmap_item(projection, item.hypothesis_id);
+ li {
+ a href=(hypothesis_href_from_id(item.hypothesis_id)) {
+ (format!("{}.", item.rank)) " "
+ (title)
+ }
+ @if let Some(summary) = item.summary.as_ref() {
+ span.muted { " · " (summary) }
+ }
+ }
+ }
}
- h3 class="entry-title" {
- a href={ "#node-" (entry.node.id) } { (entry.node.title.as_str()) }
+ }
+ }
+ div.subcard {
+ h3 { "Unknowns" }
+ @if frontier.brief.unknowns.is_empty() {
+ p.muted { "No explicit unknowns." }
+ } @else {
+ ul.simple-list {
+ @for unknown in &frontier.brief.unknowns {
+ li { (unknown) }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_frontier_active_sets(projection: &FrontierOpenProjection) -> Markup {
+ html! {
+ section.card {
+ h2 { "Active Surface" }
+ div.split {
+ div.subcard {
+ h3 { "Active Tags" }
+ @if projection.active_tags.is_empty() {
+ p.muted { "No active tags." }
+ } @else {
+ div.chip-row {
+ @for tag in &projection.active_tags {
+ span.tag-chip { (tag) }
+ }
}
}
- div class="entry-meta" {
- span { (render_timestamp(entry.node.updated_at)) }
- @if let Some(label) = &entry.frontier_label {
- span { "frontier: " (label.as_str()) }
+ }
+ div.subcard {
+ h3 { "Live Metrics" }
+ @if projection.active_metric_keys.is_empty() {
+ p.muted { "No live metrics." }
+ } @else {
+ table.metric-table {
+ thead {
+ tr {
+ th { "Key" }
+ th { "Unit" }
+ th { "Objective" }
+ th { "Refs" }
+ }
+ }
+ tbody {
+ @for metric in &projection.active_metric_keys {
+ tr {
+ td { (metric.key) }
+ td { (metric.unit.as_str()) }
+ td { (metric.objective.as_str()) }
+ td { (metric.reference_count) }
+ }
+ }
+ }
}
- @if !entry.node.tags.is_empty() {
- span class="tag-strip" {
- @for tag in &entry.node.tags {
- a class="entry-tag" href={ "/?tag=" (tag.as_str()) } { (tag.as_str()) }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_hypothesis_current_state_grid(
+ states: &[HypothesisCurrentState],
+ limit: Option<u32>,
+) -> Markup {
+ html! {
+ section.card {
+ h2 { "Active Hypotheses" }
+ @if states.is_empty() {
+ p.muted { "No active hypotheses." }
+ } @else {
+ div.card-grid {
+ @for state in limit_items(states, limit) {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(hypothesis_href(&state.hypothesis.slug)) {
+ (state.hypothesis.title)
+ }
+ @if let Some(verdict) = state.hypothesis.latest_verdict {
+ span.status-chip class=(verdict_class(verdict)) {
+ (verdict.as_str())
+ }
+ }
+ }
+ p.prose { (state.hypothesis.summary) }
+ @if !state.hypothesis.tags.is_empty() {
+ div.chip-row {
+ @for tag in &state.hypothesis.tags {
+ span.tag-chip { (tag) }
+ }
+ }
+ }
+ div.meta-row {
+ span { (format!("{} open", state.open_experiments.len())) }
+ @if let Some(latest) = state.latest_closed_experiment.as_ref() {
+ span {
+ "latest "
+ a href=(experiment_href(&latest.slug)) { (latest.title) }
+ }
+ } @else {
+ span.muted { "no closed experiments" }
+ }
+ }
+ @if !state.open_experiments.is_empty() {
+ div.related-block {
+ h3 { "Open" }
+ div.link-list {
+ @for experiment in &state.open_experiments {
+ (render_experiment_link_chip(experiment))
+ }
+ }
+ }
+ }
+ @if let Some(latest) = state.latest_closed_experiment.as_ref() {
+ div.related-block {
+ h3 { "Latest Closed" }
+ (render_experiment_summary_line(latest))
}
}
}
}
}
- @if let Some(summary) = &entry.node.summary {
- p class="entry-summary" { (summary.as_str()) }
+ }
+ }
+ }
+}
+
+fn render_open_experiment_grid(experiments: &[ExperimentSummary], limit: Option<u32>) -> Markup {
+ html! {
+ section.card {
+ h2 { "Open Experiments" }
+ @if experiments.is_empty() {
+ p.muted { "No open experiments." }
+ } @else {
+ div.card-grid {
+ @for experiment in limit_items(experiments, limit) {
+ (render_experiment_card(experiment))
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_hypothesis_header(detail: &HypothesisDetail, frontier: &FrontierRecord) -> Markup {
+ html! {
+ section.card {
+ h1 { (detail.record.title) }
+ p.prose { (detail.record.summary) }
+ div.meta-row {
+ span { "frontier " a href=(frontier_href(&frontier.slug)) { (frontier.label) } }
+ span { "slug " code { (detail.record.slug) } }
+ @if detail.record.archived {
+ span.status-chip.archived { "archived" }
}
- @if let Some(body) = body {
- section class="entry-body" {
- (render_string_value(body))
+ span.muted { "updated " (format_timestamp(detail.record.updated_at)) }
+ }
+ @if !detail.record.tags.is_empty() {
+ div.chip-row {
+ @for tag in &detail.record.tags {
+ span.tag-chip { (tag) }
}
}
- @if !keys.is_empty() {
- dl class="field-list" {
- @for key in &keys {
- @if let Some(value) = entry.node.payload.field(key) {
- (render_field(entry.node.class, schema, key, value))
- }
- }
+ }
+ }
+ }
+}
+
+fn render_experiment_header(detail: &ExperimentDetail, frontier: &FrontierRecord) -> Markup {
+ html! {
+ section.card {
+ h1 { (detail.record.title) }
+ @if let Some(summary) = detail.record.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ div.meta-row {
+ span {
+ "frontier "
+ a href=(frontier_href(&frontier.slug)) { (frontier.label) }
+ }
+ span {
+ "hypothesis "
+ a href=(hypothesis_href(&detail.owning_hypothesis.slug)) {
+ (detail.owning_hypothesis.title)
}
}
- @if !entry.node.diagnostics.items.is_empty() {
- section class="diagnostics" {
- h4 { "diagnostics" }
- ul {
- @for item in &entry.node.diagnostics.items {
- li {
- span class="diag-severity" { (format!("{:?}", item.severity).to_ascii_lowercase()) }
- " "
- (item.message.as_str())
+ span.status-chip class=(experiment_status_class(detail.record.status)) {
+ (detail.record.status.as_str())
+ }
+ @if let Some(verdict) = detail
+ .record
+ .outcome
+ .as_ref()
+ .map(|outcome| outcome.verdict)
+ {
+ span.status-chip class=(verdict_class(verdict)) { (verdict.as_str()) }
+ }
+ span.muted { "updated " (format_timestamp(detail.record.updated_at)) }
+ }
+ @if !detail.record.tags.is_empty() {
+ div.chip-row {
+ @for tag in &detail.record.tags {
+ span.tag-chip { (tag) }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_experiment_outcome(outcome: &ExperimentOutcome) -> Markup {
+ html! {
+ section.card {
+ h2 { "Outcome" }
+ div.kv-grid {
+ (render_kv("Verdict", outcome.verdict.as_str()))
+ (render_kv("Backend", outcome.backend.as_str()))
+ (render_kv("Closed", &format_timestamp(outcome.closed_at)))
+ }
+ (render_command_recipe(&outcome.command))
+ (render_metric_panel("Primary metric", std::slice::from_ref(&outcome.primary_metric), outcome))
+ @if !outcome.supporting_metrics.is_empty() {
+ (render_metric_panel("Supporting metrics", &outcome.supporting_metrics, outcome))
+ }
+ @if !outcome.dimensions.is_empty() {
+ section.subcard {
+ h3 { "Dimensions" }
+ table.metric-table {
+ thead { tr { th { "Key" } th { "Value" } } }
+ tbody {
+ @for (key, value) in &outcome.dimensions {
+ tr {
+ td { (key) }
+ td { (render_dimension_value(value)) }
}
}
}
}
}
}
+ section.subcard {
+ h3 { "Rationale" }
+ p.prose { (outcome.rationale) }
+ }
+ @if let Some(analysis) = outcome.analysis.as_ref() {
+ (render_experiment_analysis(analysis))
+ }
+ }
}
}
-fn render_field(class: NodeClass, schema: &ProjectSchema, key: &str, value: &Value) -> Markup {
- let value_type = schema
- .field_spec(class, key)
- .and_then(|field| field.value_type);
- let is_plottable = schema
- .field_spec(class, key)
- .is_some_and(|field| field.is_plottable());
+fn render_experiment_analysis(analysis: &ExperimentAnalysis) -> Markup {
html! {
- dt {
- (key)
- @if let Some(value_type) = value_type {
- span class="field-type" { (value_type.as_str()) }
- }
- @if is_plottable {
- span class="field-type plottable" { "plot" }
+ section.subcard {
+ h3 { "Analysis" }
+ p.prose { (analysis.summary) }
+ div.code-block {
+ (analysis.body)
+ }
+ }
+ }
+}
+
+fn render_command_recipe(command: &fidget_spinner_core::CommandRecipe) -> Markup {
+ html! {
+ section.subcard {
+ h3 { "Command" }
+ div.kv-grid {
+ (render_kv(
+ "argv",
+ &command
+ .argv
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(" "),
+ ))
+ @if let Some(working_directory) = command.working_directory.as_ref() {
+ (render_kv("cwd", working_directory.as_str()))
}
}
- dd {
- @match value_type {
- Some(FieldValueType::String) => {
- @if let Some(text) = value.as_str() {
- (render_string_value(text))
- } @else {
- (render_json_value(value))
+ @if !command.env.is_empty() {
+ table.metric-table {
+ thead { tr { th { "Env" } th { "Value" } } }
+ tbody {
+ @for (key, value) in &command.env {
+ tr {
+ td { (key) }
+ td { (value) }
+ }
}
}
- Some(FieldValueType::Numeric) => {
- @if let Some(number) = value.as_f64() {
- code class="numeric" { (number) }
- } @else {
- (render_json_value(value))
+ }
+ }
+ }
+ }
+}
+
+fn render_metric_panel(
+ title: &str,
+ metrics: &[fidget_spinner_core::MetricValue],
+ outcome: &ExperimentOutcome,
+) -> Markup {
+ html! {
+ section.subcard {
+ h3 { (title) }
+ table.metric-table {
+ thead {
+ tr {
+ th { "Key" }
+ th { "Value" }
+ }
+ }
+ tbody {
+ @for metric in metrics {
+ tr {
+ td { (metric.key) }
+ td { (format_metric_value(metric.value, metric_unit_for(metric, outcome))) }
}
}
- Some(FieldValueType::Boolean) => {
- @if let Some(boolean) = value.as_bool() {
- span class={ "boolean " (if boolean { "true" } else { "false" }) } {
- (if boolean { "true" } else { "false" })
- }
+ }
+ }
+ }
+ }
+}
+
+fn metric_unit_for(
+ metric: &fidget_spinner_core::MetricValue,
+ outcome: &ExperimentOutcome,
+) -> MetricUnit {
+ if metric.key == outcome.primary_metric.key {
+ return MetricUnit::Custom;
+ }
+ MetricUnit::Custom
+}
+
+fn render_vertex_relation_sections(
+ parents: &[VertexSummary],
+ children: &[VertexSummary],
+ limit: Option<u32>,
+) -> Markup {
+ html! {
+ section.card {
+ h2 { "Influence Network" }
+ div.split {
+ div.subcard {
+ h3 { "Parents" }
+ @if parents.is_empty() {
+ p.muted { "No parent influences." }
} @else {
- (render_json_value(value))
+ div.link-list {
+ @for parent in limit_items(parents, limit) {
+ (render_vertex_chip(parent))
+ }
+ }
}
}
- Some(FieldValueType::Timestamp) => {
- @if let Some(raw) = value.as_str() {
- time datetime=(raw) { (render_timestamp_value(raw)) }
+ div.subcard {
+ h3 { "Children" }
+ @if children.is_empty() {
+ p.muted { "No downstream influences." }
} @else {
- (render_untyped_value(value))
+ div.link-list {
+ @for child in limit_items(children, limit) {
+ (render_vertex_chip(child))
+ }
+ }
}
}
- None => (render_untyped_value(value)),
}
}
}
}
-fn render_string_value(text: &str) -> Markup {
- let finder = LinkFinder::new();
+fn render_artifact_section(
+ artifacts: &[fidget_spinner_store_sqlite::ArtifactSummary],
+ limit: Option<u32>,
+) -> Markup {
html! {
- div class="rich-text" {
- @for line in text.lines() {
- p {
- @for span in finder.spans(line) {
- @match span.kind() {
- Some(LinkKind::Url) => a href=(span.as_str()) { (span.as_str()) },
- _ => (span.as_str()),
+ section.card {
+ h2 { "Artifacts" }
+ @if artifacts.is_empty() {
+ p.muted { "No attached artifacts." }
+ } @else {
+ div.card-grid {
+ @for artifact in limit_items(artifacts, limit) {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(artifact_href(&artifact.slug)) { (artifact.label) }
+ span.status-chip.classless { (artifact.kind.as_str()) }
+ }
+ @if let Some(summary) = artifact.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ div.meta-row {
+ span.muted { (artifact.locator) }
}
}
}
}
}
}
+ }
}
-fn render_json_value(value: &Value) -> Markup {
- let text = to_pretty_json(value).unwrap_or_else(|_| value.to_string());
+fn render_experiment_section(
+ title: &str,
+ experiments: &[ExperimentSummary],
+ limit: Option<u32>,
+) -> Markup {
html! {
- pre class="json-value" { (text) }
+ section.card {
+ h2 { (title) }
+ @if experiments.is_empty() {
+ p.muted { "None." }
+ } @else {
+ div.card-grid {
+ @for experiment in limit_items(experiments, limit) {
+ (render_experiment_card(experiment))
+ }
+ }
+ }
+ }
}
}
-fn render_untyped_value(value: &Value) -> Markup {
- match value {
- Value::String(text) => render_string_value(text),
- Value::Number(number) => html! {
- code class="numeric" { (number) }
- },
- Value::Bool(boolean) => html! {
- span class={ "boolean " (if *boolean { "true" } else { "false" }) } {
- (if *boolean { "true" } else { "false" })
+fn render_experiment_card(experiment: &ExperimentSummary) -> Markup {
+ html! {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(experiment_href(&experiment.slug)) { (experiment.title) }
+ span.status-chip class=(experiment_status_class(experiment.status)) {
+ (experiment.status.as_str())
+ }
+ @if let Some(verdict) = experiment.verdict {
+ span.status-chip class=(verdict_class(verdict)) { (verdict.as_str()) }
}
- },
- _ => render_json_value(value),
+ }
+ @if let Some(summary) = experiment.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ @if let Some(metric) = experiment.primary_metric.as_ref() {
+ div.meta-row {
+ span.metric-pill {
+ (metric.key) ": "
+ (format_metric_value(metric.value, metric.unit))
+ }
+ }
+ }
+ @if !experiment.tags.is_empty() {
+ div.chip-row {
+ @for tag in &experiment.tags {
+ span.tag-chip { (tag) }
+ }
+ }
+ }
+ div.meta-row.muted {
+ span { "updated " (format_timestamp(experiment.updated_at)) }
+ }
+ }
}
}
-fn render_timestamp(timestamp: OffsetDateTime) -> String {
- timestamp
- .format(&format_description!(
- "[year]-[month]-[day] [hour]:[minute]:[second]Z"
- ))
- .unwrap_or_else(|_| timestamp.to_string())
+fn render_experiment_summary_line(experiment: &ExperimentSummary) -> Markup {
+ html! {
+ div.link-list {
+ (render_experiment_link_chip(experiment))
+ @if let Some(metric) = experiment.primary_metric.as_ref() {
+ span.metric-pill {
+ (metric.key) ": "
+ (format_metric_value(metric.value, metric.unit))
+ }
+ }
+ }
+ }
}
-fn render_timestamp_value(raw: &str) -> String {
- OffsetDateTime::parse(raw, &Rfc3339)
- .map(render_timestamp)
- .unwrap_or_else(|_| raw.to_owned())
+fn render_experiment_link_chip(experiment: &ExperimentSummary) -> Markup {
+ html! {
+ a.link-chip href=(experiment_href(&experiment.slug)) {
+ span { (experiment.title) }
+ @if let Some(verdict) = experiment.verdict {
+ span.status-chip class=(verdict_class(verdict)) { (verdict.as_str()) }
+ }
+ }
+ }
}
-fn stylesheet() -> &'static str {
- r#"
- :root {
- color-scheme: light;
- --bg: #f6f3ec;
- --panel: #fffdf8;
- --line: #d8d1c4;
- --text: #22201a;
- --muted: #746e62;
- --accent: #2d5c4d;
- --accent-soft: #dbe8e2;
- --tag: #ece5d8;
- --warn: #8b5b24;
+fn render_vertex_chip(summary: &VertexSummary) -> Markup {
+ let href = match summary.vertex {
+ VertexRef::Hypothesis(_) => hypothesis_href(&summary.slug),
+ VertexRef::Experiment(_) => experiment_href(&summary.slug),
+ };
+ let kind = match summary.vertex {
+ VertexRef::Hypothesis(_) => "hypothesis",
+ VertexRef::Experiment(_) => "experiment",
+ };
+ html! {
+ a.link-chip href=(href) {
+ span.kind-chip { (kind) }
+ span { (summary.title) }
+ @if let Some(summary_text) = summary.summary.as_ref() {
+ span.muted { " — " (summary_text) }
+ }
+ }
}
+}
- * { box-sizing: border-box; }
-
- body {
- margin: 0;
- background: var(--bg);
- color: var(--text);
- font: 15px/1.5 "Iosevka Web", "IBM Plex Mono", "SFMono-Regular", monospace;
+fn render_attachment_chip(attachment: &AttachmentDisplay) -> Markup {
+ html! {
+ a.link-chip href=(&attachment.href) {
+ span.kind-chip { (attachment.kind) }
+ span { (&attachment.title) }
+ @if let Some(summary) = attachment.summary.as_ref() {
+ span.muted { " — " (summary) }
+ }
+ }
}
+}
- a {
- color: var(--accent);
- text-decoration: none;
+fn render_prose_block(title: &str, body: &str) -> Markup {
+ html! {
+ section.card {
+ h2 { (title) }
+ p.prose { (body) }
+ }
}
+}
- a:hover {
- text-decoration: underline;
+fn render_shell(
+ title: &str,
+ subtitle: Option<&str>,
+ breadcrumb: Option<(&str, String)>,
+ content: Markup,
+) -> Markup {
+ html! {
+ (DOCTYPE)
+ html {
+ head {
+ meta charset="utf-8";
+ meta name="viewport" content="width=device-width, initial-scale=1";
+ title { (title) }
+ style { (PreEscaped(styles())) }
+ }
+ body {
+ main.shell {
+ header.page-header {
+ div.eyebrow {
+ a href="/" { "home" }
+ @if let Some((label, href)) = breadcrumb {
+ span.sep { "/" }
+ a href=(href) { (label) }
+ }
+ }
+ h1.page-title { (title) }
+ @if let Some(subtitle) = subtitle {
+ p.page-subtitle { (subtitle) }
+ }
+ }
+ (content)
+ }
+ }
+ }
}
+}
- .shell {
- display: grid;
- grid-template-columns: 18rem minmax(0, 1fr);
- min-height: 100vh;
+fn render_kv(label: &str, value: &str) -> Markup {
+ html! {
+ div.kv {
+ div.kv-label { (label) }
+ div.kv-value { (value) }
+ }
}
+}
- .rail {
- border-right: 1px solid var(--line);
- padding: 1.25rem 1rem;
- position: sticky;
- top: 0;
- align-self: start;
- height: 100vh;
- overflow: auto;
- background: rgba(255, 253, 248, 0.85);
- backdrop-filter: blur(6px);
+fn render_dimension_value(value: &RunDimensionValue) -> String {
+ match value {
+ RunDimensionValue::String(value) => value.to_string(),
+ RunDimensionValue::Numeric(value) => format_float(*value),
+ RunDimensionValue::Boolean(value) => value.to_string(),
+ RunDimensionValue::Timestamp(value) => value.to_string(),
}
+}
- .project, .feed-meta, .entry-meta, .entry-summary, .tag-description {
- color: var(--muted);
+fn format_metric_value(value: f64, unit: MetricUnit) -> String {
+ match unit {
+ MetricUnit::Bytes => format!("{} B", format_integerish(value)),
+ MetricUnit::Seconds => format!("{value:.3} s"),
+ MetricUnit::Count => format_integerish(value),
+ MetricUnit::Ratio => format!("{value:.4}"),
+ MetricUnit::Custom => format_float(value),
}
+}
- .tag-list {
- display: grid;
- gap: 0.5rem;
+fn format_float(value: f64) -> String {
+ if value.fract() == 0.0 {
+ format_integerish(value)
+ } else {
+ format!("{value:.4}")
}
+}
- .tag-link {
- display: grid;
- grid-template-columns: minmax(0, 1fr) auto;
- gap: 0.2rem 0.75rem;
- padding: 0.55rem 0.7rem;
- border: 1px solid var(--line);
- background: var(--panel);
+fn format_integerish(value: f64) -> String {
+ let negative = value.is_sign_negative();
+ let digits = format!("{:.0}", value.abs());
+ let mut grouped = String::with_capacity(digits.len() + (digits.len() / 3));
+ for (index, ch) in digits.chars().rev().enumerate() {
+ if index != 0 && index % 3 == 0 {
+ grouped.push(',');
+ }
+ grouped.push(ch);
}
+ let grouped: String = grouped.chars().rev().collect();
+ if negative {
+ format!("-{grouped}")
+ } else {
+ grouped
+ }
+}
+
+fn format_timestamp(value: OffsetDateTime) -> String {
+ const TIMESTAMP: &[time::format_description::FormatItem<'static>] =
+ format_description!("[year]-[month]-[day] [hour]:[minute]");
+ value.format(TIMESTAMP).unwrap_or_else(|_| {
+ value
+ .format(&Rfc3339)
+ .unwrap_or_else(|_| value.unix_timestamp().to_string())
+ })
+}
+
+fn frontier_href(slug: &Slug) -> String {
+ format!("/frontier/{}", encode_path_segment(slug.as_str()))
+}
+
+fn hypothesis_href(slug: &Slug) -> String {
+ format!("/hypothesis/{}", encode_path_segment(slug.as_str()))
+}
- .tag-link.selected {
- border-color: var(--accent);
- background: var(--accent-soft);
+fn hypothesis_href_from_id(id: fidget_spinner_core::HypothesisId) -> String {
+ format!("/hypothesis/{}", encode_path_segment(&id.to_string()))
+}
+
+fn hypothesis_title_for_roadmap_item(
+ projection: &FrontierOpenProjection,
+ hypothesis_id: fidget_spinner_core::HypothesisId,
+) -> String {
+ projection
+ .active_hypotheses
+ .iter()
+ .find(|state| state.hypothesis.id == hypothesis_id)
+ .map(|state| state.hypothesis.title.to_string())
+ .unwrap_or_else(|| hypothesis_id.to_string())
+}
+
+fn experiment_href(slug: &Slug) -> String {
+ format!("/experiment/{}", encode_path_segment(slug.as_str()))
+}
+
+fn artifact_href(slug: &Slug) -> String {
+ format!("/artifact/{}", encode_path_segment(slug.as_str()))
+}
+
+fn resolve_attachment_display(
+ store: &fidget_spinner_store_sqlite::ProjectStore,
+ target: AttachmentTargetRef,
+) -> Result<AttachmentDisplay, StoreError> {
+ match target {
+ AttachmentTargetRef::Frontier(id) => {
+ let frontier = store.read_frontier(&id.to_string())?;
+ Ok(AttachmentDisplay {
+ kind: "frontier",
+ href: frontier_href(&frontier.slug),
+ title: frontier.label.to_string(),
+ summary: Some(frontier.objective.to_string()),
+ })
+ }
+ AttachmentTargetRef::Hypothesis(id) => {
+ let detail = store.read_hypothesis(&id.to_string())?;
+ Ok(AttachmentDisplay {
+ kind: "hypothesis",
+ href: hypothesis_href(&detail.record.slug),
+ title: detail.record.title.to_string(),
+ summary: Some(detail.record.summary.to_string()),
+ })
+ }
+ AttachmentTargetRef::Experiment(id) => {
+ let detail = store.read_experiment(&id.to_string())?;
+ Ok(AttachmentDisplay {
+ kind: "experiment",
+ href: experiment_href(&detail.record.slug),
+ title: detail.record.title.to_string(),
+ summary: detail.record.summary.as_ref().map(ToString::to_string),
+ })
+ }
}
+}
- .tag-name {
- font-weight: 700;
- overflow-wrap: anywhere;
+fn encode_path_segment(value: &str) -> String {
+ utf8_percent_encode(value, NON_ALPHANUMERIC).to_string()
+}
+
+fn frontier_status_class(status: &str) -> &'static str {
+ match status {
+ "exploring" => "status-exploring",
+ "paused" => "status-parked",
+ "archived" => "status-archived",
+ _ => "status-neutral",
}
+}
- .tag-count {
- color: var(--muted);
+fn experiment_status_class(status: ExperimentStatus) -> &'static str {
+ match status {
+ ExperimentStatus::Open => "status-open",
+ ExperimentStatus::Closed => "status-neutral",
}
+}
- .tag-description {
- grid-column: 1 / -1;
- font-size: 0.9rem;
- overflow-wrap: anywhere;
+fn verdict_class(verdict: FrontierVerdict) -> &'static str {
+ match verdict {
+ FrontierVerdict::Accepted => "status-accepted",
+ FrontierVerdict::Kept => "status-kept",
+ FrontierVerdict::Parked => "status-parked",
+ FrontierVerdict::Rejected => "status-rejected",
}
+}
+
+fn limit_items<T>(items: &[T], limit: Option<u32>) -> &[T] {
+ let Some(limit) = limit else {
+ return items;
+ };
+ let Ok(limit) = usize::try_from(limit) else {
+ return items;
+ };
+ let end = items.len().min(limit);
+ &items[..end]
+}
- .feed {
- padding: 1.5rem;
+fn styles() -> &'static str {
+ r#"
+ :root {
+ color-scheme: dark;
+ --bg: #091019;
+ --panel: #0f1823;
+ --panel-2: #131f2d;
+ --border: #1e3850;
+ --text: #d8e6f3;
+ --muted: #87a0b8;
+ --accent: #6dc7ff;
+ --accepted: #7ce38b;
+ --kept: #8de0c0;
+ --parked: #d9c17d;
+ --rejected: #ee7a7a;
+ }
+ * { box-sizing: border-box; }
+ body {
+ margin: 0;
+ background: var(--bg);
+ color: var(--text);
+ font: 15px/1.5 "Iosevka Web", "Iosevka", "JetBrains Mono", monospace;
+ }
+ a {
+ color: var(--accent);
+ text-decoration: none;
+ }
+ a:hover { text-decoration: underline; }
+ .shell {
+ width: min(1500px, 100%);
+ margin: 0 auto;
+ padding: 20px;
display: grid;
- gap: 1rem;
- min-width: 0;
+ gap: 16px;
}
-
- .feed-header {
- padding-bottom: 0.5rem;
- border-bottom: 1px solid var(--line);
+ .page-header {
+ display: grid;
+ gap: 8px;
+ padding: 16px 18px;
+ border: 1px solid var(--border);
+ background: var(--panel);
}
-
- .entry, .empty-state {
+ .eyebrow {
+ display: flex;
+ gap: 10px;
+ color: var(--muted);
+ font-size: 13px;
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
+ }
+ .sep { color: #4d6478; }
+ .page-title {
+ margin: 0;
+ font-size: clamp(22px, 3.8vw, 34px);
+ line-height: 1.1;
+ }
+ .page-subtitle {
+ margin: 0;
+ color: var(--muted);
+ max-width: 90ch;
+ }
+ .card {
+ border: 1px solid var(--border);
background: var(--panel);
- border: 1px solid var(--line);
- padding: 1rem 1.1rem;
+ padding: 16px 18px;
+ display: grid;
+ gap: 12px;
+ }
+ .subcard {
+ border: 1px solid #1a2b3c;
+ background: var(--panel-2);
+ padding: 12px 14px;
+ display: grid;
+ gap: 10px;
min-width: 0;
- overflow: hidden;
}
-
- .entry-header {
+ .block { display: grid; gap: 10px; }
+ .split {
display: grid;
- gap: 0.35rem;
- margin-bottom: 0.75rem;
+ gap: 16px;
+ grid-template-columns: repeat(auto-fit, minmax(320px, 1fr));
}
-
- .entry-title-row {
+ .card-grid {
+ display: grid;
+ gap: 12px;
+ grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
+ }
+ .mini-card {
+ border: 1px solid #1a2b3c;
+ background: var(--panel-2);
+ padding: 12px 14px;
+ display: grid;
+ gap: 9px;
+ min-width: 0;
+ }
+ .card-header {
display: flex;
+ gap: 10px;
+ align-items: center;
flex-wrap: wrap;
- gap: 0.75rem;
- align-items: baseline;
}
-
- .entry-title {
+ .title-link {
+ font-size: 16px;
+ font-weight: 700;
+ color: #f2f8ff;
+ }
+ h1, h2, h3 {
margin: 0;
- font-size: 1.05rem;
- min-width: 0;
- overflow-wrap: anywhere;
+ line-height: 1.15;
}
-
- .entry-meta {
+ h2 { font-size: 19px; }
+ h3 { font-size: 14px; color: #c9d8e6; }
+ .prose {
+ margin: 0;
+ color: #dce9f6;
+ max-width: 92ch;
+ white-space: pre-wrap;
+ }
+ .muted { color: var(--muted); }
+ .meta-row {
display: flex;
flex-wrap: wrap;
- gap: 0.75rem;
- font-size: 0.9rem;
+ gap: 14px;
+ align-items: center;
+ font-size: 13px;
+ }
+ .kv-grid {
+ display: grid;
+ gap: 10px 14px;
+ grid-template-columns: repeat(auto-fit, minmax(220px, 1fr));
+ }
+ .kv {
+ display: grid;
+ gap: 4px;
min-width: 0;
}
-
- .class-badge, .field-type, .entry-tag {
- display: inline-block;
- padding: 0.08rem 0.4rem;
- border: 1px solid var(--line);
- background: var(--tag);
- font-size: 0.82rem;
+ .kv-label {
+ color: var(--muted);
+ font-size: 12px;
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
}
-
- .field-type.plottable {
- background: var(--accent-soft);
- border-color: var(--accent);
+ .kv-value {
+ overflow-wrap: anywhere;
}
-
- .tag-strip {
- display: inline-flex;
+ .chip-row, .link-list {
+ display: flex;
flex-wrap: wrap;
- gap: 0.35rem;
+ gap: 8px;
}
-
- .entry-body {
- margin-bottom: 0.9rem;
- min-width: 0;
+ .tag-chip, .kind-chip, .status-chip, .metric-pill, .link-chip {
+ border: 1px solid #24425b;
+ background: rgba(109, 199, 255, 0.06);
+ padding: 4px 8px;
+ font-size: 12px;
+ line-height: 1.2;
}
-
- .rich-text p {
- margin: 0 0 0.55rem;
- overflow-wrap: anywhere;
- word-break: break-word;
- max-width: 100%;
+ .link-chip {
+ display: inline-flex;
+ gap: 8px;
+ align-items: center;
}
-
- .rich-text p:last-child {
- margin-bottom: 0;
+ .kind-chip {
+ color: var(--muted);
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
}
-
- .field-list {
- display: grid;
- grid-template-columns: minmax(12rem, 18rem) minmax(0, 1fr);
- gap: 0.55rem 1rem;
- margin: 0;
+ .status-chip {
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
+ font-weight: 700;
+ }
+ .status-accepted { color: var(--accepted); border-color: rgba(124, 227, 139, 0.35); }
+ .status-kept { color: var(--kept); border-color: rgba(141, 224, 192, 0.35); }
+ .status-parked { color: var(--parked); border-color: rgba(217, 193, 125, 0.35); }
+ .status-rejected { color: var(--rejected); border-color: rgba(238, 122, 122, 0.35); }
+ .status-open { color: var(--accent); border-color: rgba(109, 199, 255, 0.35); }
+ .status-exploring { color: var(--accent); border-color: rgba(109, 199, 255, 0.35); }
+ .status-neutral, .classless { color: #a7c0d4; border-color: #2a4358; }
+ .status-archived { color: #7f8da0; border-color: #2b3540; }
+ .metric-table {
width: 100%;
- min-width: 0;
+ border-collapse: collapse;
+ font-size: 13px;
}
-
- .field-list dt {
+ .metric-table th,
+ .metric-table td {
+ padding: 7px 8px;
+ border-top: 1px solid #1b2d3e;
+ text-align: left;
+ vertical-align: top;
+ }
+ .metric-table th {
+ color: var(--muted);
font-weight: 700;
- display: flex;
- flex-wrap: wrap;
- gap: 0.4rem;
- align-items: center;
- overflow-wrap: anywhere;
- min-width: 0;
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
+ font-size: 12px;
}
-
- .field-list dd {
- margin: 0;
- min-width: 0;
+ .related-block {
+ display: grid;
+ gap: 8px;
}
-
- .json-value {
+ .roadmap-list, .simple-list {
margin: 0;
- padding: 0.6rem 0.7rem;
- background: #f3eee4;
- overflow: auto;
+ padding-left: 18px;
+ display: grid;
+ gap: 6px;
+ }
+ .code-block {
white-space: pre-wrap;
overflow-wrap: anywhere;
+ border: 1px solid #1a2b3c;
+ background: #0b131c;
+ padding: 12px 14px;
}
-
- .boolean.true { color: var(--accent); }
- .boolean.false { color: #8a2f2f; }
- .numeric { font-size: 1rem; }
-
- .diagnostics {
- margin-top: 1rem;
- padding-top: 0.8rem;
- border-top: 1px dashed var(--line);
- }
-
- .diagnostics h4 {
- margin: 0 0 0.4rem;
- font-size: 0.9rem;
- text-transform: lowercase;
- }
-
- .diagnostics ul {
- margin: 0;
- padding-left: 1.1rem;
- }
-
- .diag-severity {
- color: var(--warn);
- font-weight: 700;
+ code {
+ font-family: inherit;
+ font-size: 0.95em;
}
-
- @media (max-width: 900px) {
- .shell {
- grid-template-columns: 1fr;
- }
-
- .rail {
- position: static;
- height: auto;
- border-right: 0;
- border-bottom: 1px solid var(--line);
- padding: 1rem 0.85rem;
- }
-
- .field-list {
- grid-template-columns: minmax(0, 1fr);
- }
-
- .feed {
- padding: 1rem;
- }
-
- .entry, .empty-state {
- padding: 0.85rem 0.9rem;
- }
+ @media (max-width: 720px) {
+ .shell { padding: 12px; }
+ .card, .page-header { padding: 14px; }
+ .subcard, .mini-card { padding: 12px; }
+ .card-grid, .split, .kv-grid { grid-template-columns: 1fr; }
}
"#
}
diff --git a/crates/fidget-spinner-cli/tests/mcp_hardening.rs b/crates/fidget-spinner-cli/tests/mcp_hardening.rs
index 21a3d04..fad4937 100644
--- a/crates/fidget-spinner-cli/tests/mcp_hardening.rs
+++ b/crates/fidget-spinner-cli/tests/mcp_hardening.rs
@@ -1,22 +1,21 @@
use axum as _;
+use clap as _;
+use dirs as _;
use std::fs;
use std::io::{self, BufRead, BufReader, Write};
use std::path::PathBuf;
use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
use camino::Utf8PathBuf;
-use clap as _;
-use dirs as _;
use fidget_spinner_core::NonEmptyText;
-use fidget_spinner_store_sqlite::{ListNodesQuery, ProjectStore};
+use fidget_spinner_store_sqlite::ProjectStore;
use libmcp as _;
-use linkify as _;
use maud as _;
+use percent_encoding as _;
use serde as _;
use serde_json::{Value, json};
use time as _;
use tokio as _;
-use uuid as _;
type TestResult<T = ()> = Result<T, Box<dyn std::error::Error>>;
@@ -50,7 +49,6 @@ fn init_project(root: &Utf8PathBuf) -> TestResult {
ProjectStore::init(
root,
must(NonEmptyText::new("mcp test project"), "display name")?,
- must(NonEmptyText::new("local.mcp.test"), "namespace")?,
),
"init project store",
)?;
@@ -68,7 +66,7 @@ struct McpHarness {
}
impl McpHarness {
- fn spawn(project_root: Option<&Utf8PathBuf>, envs: &[(&str, String)]) -> TestResult<Self> {
+ fn spawn(project_root: Option<&Utf8PathBuf>) -> TestResult<Self> {
let mut command = Command::new(binary_path());
let _ = command
.arg("mcp")
@@ -79,9 +77,6 @@ impl McpHarness {
if let Some(project_root) = project_root {
let _ = command.arg("--project").arg(project_root.as_str());
}
- for (key, value) in envs {
- let _ = command.env(key, value);
- }
let mut child = must(command.spawn(), "spawn mcp host")?;
let stdin = must_some(child.stdin.take(), "host stdin")?;
let stdout = BufReader::new(must_some(child.stdout.take(), "host stdout")?);
@@ -137,6 +132,13 @@ impl McpHarness {
}))
}
+ fn call_tool_full(&mut self, id: u64, name: &str, arguments: Value) -> TestResult<Value> {
+ let mut arguments = arguments.as_object().cloned().unwrap_or_default();
+ let _ = arguments.insert("render".to_owned(), json!("json"));
+ let _ = arguments.insert("detail".to_owned(), json!("full"));
+ self.call_tool(id, name, Value::Object(arguments))
+ }
+
fn request(&mut self, message: Value) -> TestResult<Value> {
let encoded = must(serde_json::to_string(&message), "request json")?;
must(writeln!(self.stdin, "{encoded}"), "write request")?;
@@ -168,1401 +170,429 @@ fn tool_content(response: &Value) -> &Value {
&response["result"]["structuredContent"]
}
-fn tool_text(response: &Value) -> Option<&str> {
- response["result"]["content"]
- .as_array()
- .and_then(|content| content.first())
- .and_then(|entry| entry["text"].as_str())
-}
-
-fn fault_message(response: &Value) -> Option<&str> {
+fn tool_error_message(response: &Value) -> Option<&str> {
response["result"]["structuredContent"]["message"].as_str()
}
-#[test]
-fn cold_start_exposes_health_and_telemetry() -> TestResult {
- let project_root = temp_project_root("cold_start")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let initialize = harness.initialize()?;
- assert_eq!(
- initialize["result"]["protocolVersion"].as_str(),
- Some("2025-11-25")
- );
- harness.notify_initialized()?;
-
- let tools = harness.tools_list()?;
- let tool_count = must_some(tools["result"]["tools"].as_array(), "tools array")?.len();
- assert!(tool_count >= 20);
-
- let health = harness.call_tool(3, "system.health", json!({}))?;
- assert_eq!(tool_content(&health)["ready"].as_bool(), Some(true));
- assert_eq!(tool_content(&health)["bound"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(4, "system.telemetry", json!({}))?;
- assert!(tool_content(&telemetry)["requests"].as_u64().unwrap_or(0) >= 3);
-
- let skills = harness.call_tool(15, "skill.list", json!({}))?;
- let skill_names = must_some(
- tool_content(&skills)["skills"].as_array(),
- "bundled skills array",
- )?
- .iter()
- .filter_map(|skill| skill["name"].as_str())
- .collect::<Vec<_>>();
- assert!(skill_names.contains(&"fidget-spinner"));
- assert!(skill_names.contains(&"frontier-loop"));
-
- let base_skill = harness.call_tool(16, "skill.show", json!({"name": "fidget-spinner"}))?;
+fn assert_tool_ok(response: &Value) {
assert_eq!(
- tool_content(&base_skill)["name"].as_str(),
- Some("fidget-spinner")
+ response["result"]["isError"].as_bool(),
+ Some(false),
+ "tool response unexpectedly errored: {response:#}"
);
- Ok(())
}
-#[test]
-fn tool_output_defaults_to_porcelain_and_supports_json_render() -> TestResult {
- let project_root = temp_project_root("render_modes")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(21, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let porcelain = harness.call_tool(22, "project.status", json!({}))?;
- let porcelain_text = must_some(tool_text(&porcelain), "porcelain project.status text")?;
- assert!(porcelain_text.contains("root:"));
- assert!(!porcelain_text.contains("\"project_root\":"));
-
- let health = harness.call_tool(23, "system.health", json!({}))?;
- let health_text = must_some(tool_text(&health), "porcelain system.health text")?;
- assert!(health_text.contains("ready | bound"));
- assert!(health_text.contains("binary:"));
-
- let frontier = harness.call_tool(
- 24,
- "frontier.init",
- json!({
- "label": "render frontier",
- "objective": "exercise porcelain output",
- "contract_title": "render contract",
- "benchmark_suites": ["smoke"],
- "promotion_criteria": ["retain key fields in porcelain"],
- "primary_metric": {
- "key": "score",
- "unit": "count",
- "objective": "maximize"
- }
- }),
- )?;
- assert_eq!(frontier["result"]["isError"].as_bool(), Some(false));
-
- let frontier_list = harness.call_tool(25, "frontier.list", json!({}))?;
- let frontier_text = must_some(tool_text(&frontier_list), "porcelain frontier.list text")?;
- assert!(frontier_text.contains("render frontier"));
- assert!(!frontier_text.contains("root_contract_node_id"));
-
- let json_render = harness.call_tool(26, "project.status", json!({"render": "json"}))?;
- let json_text = must_some(tool_text(&json_render), "json project.status text")?;
- assert!(json_text.contains("\"project_root\":"));
- assert!(json_text.trim_start().starts_with('{'));
-
- let json_full = harness.call_tool(
- 27,
- "project.status",
- json!({"render": "json", "detail": "full"}),
- )?;
- let json_full_text = must_some(tool_text(&json_full), "json full project.status text")?;
- assert!(json_full_text.contains("\"schema\": {"));
- Ok(())
-}
-
-#[test]
-fn safe_request_retries_after_worker_crash() -> TestResult {
- let project_root = temp_project_root("crash_retry")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(
- None,
- &[(
- "FIDGET_SPINNER_MCP_TEST_HOST_CRASH_ONCE_KEY",
- "tools/call:project.status".to_owned(),
- )],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(3, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let response = harness.call_tool(5, "project.status", json!({}))?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(6, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(1));
+fn assert_tool_error(response: &Value) {
assert_eq!(
- tool_content(&telemetry)["worker_restarts"].as_u64(),
- Some(1)
+ response["result"]["isError"].as_bool(),
+ Some(true),
+ "tool response unexpectedly succeeded: {response:#}"
);
- Ok(())
}
-#[test]
-fn safe_request_retries_after_worker_transient_fault() -> TestResult {
- let project_root = temp_project_root("transient_retry")?;
- init_project(&project_root)?;
- let marker = project_root.join("transient_once.marker");
-
- let mut harness = McpHarness::spawn(
- None,
- &[
- (
- "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_KEY",
- "tools/call:project.status".to_owned(),
- ),
- (
- "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_MARKER",
- marker.to_string(),
- ),
- ],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(12, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let response = harness.call_tool(13, "project.status", json!({}))?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(14, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(1));
- assert_eq!(
- tool_content(&telemetry)["worker_restarts"].as_u64(),
- Some(1)
- );
- Ok(())
-}
-
-#[test]
-fn side_effecting_request_is_not_replayed_after_worker_crash() -> TestResult {
- let project_root = temp_project_root("no_replay")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(
- None,
- &[(
- "FIDGET_SPINNER_MCP_TEST_HOST_CRASH_ONCE_KEY",
- "tools/call:source.record".to_owned(),
- )],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(6, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let response = harness.call_tool(
- 7,
- "source.record",
- json!({
- "title": "should not duplicate",
- "summary": "dedupe check",
- "body": "host crash before worker execution",
- }),
- )?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(true));
-
- let nodes = harness.call_tool(8, "node.list", json!({}))?;
- assert_eq!(
- must_some(tool_content(&nodes).as_array(), "node list")?.len(),
- 0
- );
-
- let telemetry = harness.call_tool(9, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(0));
- Ok(())
+fn tool_names(response: &Value) -> Vec<&str> {
+ response["result"]["tools"]
+ .as_array()
+ .into_iter()
+ .flatten()
+ .filter_map(|tool| tool["name"].as_str())
+ .collect()
}
#[test]
-fn forced_rollout_preserves_initialized_state() -> TestResult {
- let project_root = temp_project_root("rollout")?;
+fn cold_start_exposes_bound_surface_and_new_toolset() -> TestResult {
+ let project_root = temp_project_root("cold_start")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(
- None,
- &[(
- "FIDGET_SPINNER_MCP_TEST_FORCE_ROLLOUT_KEY",
- "tools/call:project.status".to_owned(),
- )],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(9, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let first = harness.call_tool(10, "project.status", json!({}))?;
- assert_eq!(first["result"]["isError"].as_bool(), Some(false));
-
- let second = harness.call_tool(11, "project.status", json!({}))?;
- assert_eq!(second["result"]["isError"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(12, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["host_rollouts"].as_u64(), Some(1));
- Ok(())
-}
-
-#[test]
-fn unbound_project_tools_fail_with_bind_hint() -> TestResult {
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let response = harness.call_tool(20, "project.status", json!({}))?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(true));
- let message = response["result"]["structuredContent"]["message"].as_str();
- assert!(message.is_some_and(|message| message.contains("project.bind")));
- Ok(())
-}
-
-#[test]
-fn bind_bootstraps_empty_project_root() -> TestResult {
- let project_root = temp_project_root("bind_bootstrap")?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let bind = harness.bind_project(28, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&bind)["project_root"].as_str(),
- Some(project_root.as_str())
- );
-
- let status = harness.call_tool(29, "project.status", json!({}))?;
- assert_eq!(status["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&status)["project_root"].as_str(),
- Some(project_root.as_str())
- );
-
- let store = must(ProjectStore::open(&project_root), "open bootstrapped store")?;
- assert_eq!(store.project_root().as_str(), project_root.as_str());
- Ok(())
-}
-
-#[test]
-fn bind_rejects_nonempty_uninitialized_root() -> TestResult {
- let project_root = temp_project_root("bind_nonempty")?;
- must(
- fs::write(project_root.join("README.txt").as_std_path(), "occupied"),
- "seed nonempty directory",
- )?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let bind = harness.bind_project(30, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(true));
- Ok(())
-}
-
-#[test]
-fn successful_bind_clears_stale_fault_from_health() -> TestResult {
- let bad_root = temp_project_root("bind_fault_bad")?;
- must(
- fs::write(bad_root.join("README.txt").as_std_path(), "occupied"),
- "seed bad bind root",
- )?;
- let good_root = temp_project_root("bind_fault_good")?;
- init_project(&good_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let failed_bind = harness.bind_project(301, &bad_root)?;
- assert_eq!(failed_bind["result"]["isError"].as_bool(), Some(true));
-
- let failed_health = harness.call_tool(302, "system.health", json!({ "detail": "full" }))?;
- assert_eq!(
- tool_content(&failed_health)["last_fault"]["operation"].as_str(),
- Some("tools/call:project.bind")
- );
-
- let good_bind = harness.bind_project(303, &good_root)?;
- assert_eq!(good_bind["result"]["isError"].as_bool(), Some(false));
-
- let recovered_health = harness.call_tool(304, "system.health", json!({}))?;
- assert_eq!(recovered_health["result"]["isError"].as_bool(), Some(false));
- assert!(tool_content(&recovered_health).get("last_fault").is_none());
- assert!(!must_some(tool_text(&recovered_health), "recovered health text")?.contains("fault:"));
-
- let recovered_health_full =
- harness.call_tool(306, "system.health", json!({ "detail": "full" }))?;
- assert_eq!(
- tool_content(&recovered_health_full)["last_fault"],
- Value::Null,
- );
-
- let recovered_telemetry = harness.call_tool(305, "system.telemetry", json!({}))?;
- assert_eq!(
- recovered_telemetry["result"]["isError"].as_bool(),
- Some(false)
- );
+ let mut harness = McpHarness::spawn(None)?;
+ let initialize = harness.initialize()?;
assert_eq!(
- tool_content(&recovered_telemetry)["errors"].as_u64(),
- Some(1)
+ initialize["result"]["protocolVersion"].as_str(),
+ Some("2025-11-25")
);
- assert!(tool_content(&recovered_telemetry)["last_fault"].is_null());
- Ok(())
-}
-
-#[test]
-fn bind_retargets_writes_to_sibling_project_root() -> TestResult {
- let spinner_root = temp_project_root("spinner_root")?;
- let libgrid_root = temp_project_root("libgrid_root")?;
- init_project(&spinner_root)?;
- init_project(&libgrid_root)?;
- let notes_dir = libgrid_root.join("notes");
- must(
- fs::create_dir_all(notes_dir.as_std_path()),
- "create nested notes dir",
- )?;
-
- let mut harness = McpHarness::spawn(Some(&spinner_root), &[])?;
- let _ = harness.initialize()?;
harness.notify_initialized()?;
- let initial_status = harness.call_tool(31, "project.status", json!({}))?;
- assert_eq!(
- tool_content(&initial_status)["project_root"].as_str(),
- Some(spinner_root.as_str())
- );
+ let tools = harness.tools_list()?;
+ let tool_names = tool_names(&tools);
+ assert!(tool_names.contains(&"frontier.open"));
+ assert!(tool_names.contains(&"hypothesis.record"));
+ assert!(tool_names.contains(&"experiment.close"));
+ assert!(tool_names.contains(&"artifact.record"));
+ assert!(!tool_names.contains(&"node.list"));
+ assert!(!tool_names.contains(&"research.record"));
- let rebind = harness.bind_project(32, &notes_dir)?;
- assert_eq!(rebind["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&rebind)["project_root"].as_str(),
- Some(libgrid_root.as_str())
- );
+ let health = harness.call_tool(3, "system.health", json!({}))?;
+ assert_tool_ok(&health);
+ assert_eq!(tool_content(&health)["bound"].as_bool(), Some(false));
- let status = harness.call_tool(33, "project.status", json!({}))?;
+ let bind = harness.bind_project(4, &project_root)?;
+ assert_tool_ok(&bind);
assert_eq!(
- tool_content(&status)["project_root"].as_str(),
- Some(libgrid_root.as_str())
+ tool_content(&bind)["display_name"].as_str(),
+ Some("mcp test project")
);
- let note = harness.call_tool(
- 34,
- "note.quick",
- json!({
- "title": "libgrid dogfood note",
- "summary": "rebind summary",
- "body": "rebind should redirect writes",
- "tags": [],
- }),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
-
- let spinner_store = must(ProjectStore::open(&spinner_root), "open spinner store")?;
- let libgrid_store = must(ProjectStore::open(&libgrid_root), "open libgrid store")?;
- assert_eq!(
- must(
- spinner_store.list_nodes(ListNodesQuery::default()),
- "list spinner nodes after rebind"
- )?
- .len(),
- 0
- );
- assert_eq!(
- must(
- libgrid_store.list_nodes(ListNodesQuery::default()),
- "list libgrid nodes after rebind"
- )?
- .len(),
- 1
- );
+ let rebound_health = harness.call_tool(5, "system.health", json!({}))?;
+ assert_tool_ok(&rebound_health);
+ assert_eq!(tool_content(&rebound_health)["bound"].as_bool(), Some(true));
Ok(())
}
#[test]
-fn tag_registry_drives_note_creation_and_lookup() -> TestResult {
- let project_root = temp_project_root("tag_registry")?;
+fn frontier_open_is_the_grounding_surface_for_live_state() -> TestResult {
+ let project_root = temp_project_root("frontier_open")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(None, &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let bind = harness.bind_project(40, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- let missing_tags = harness.call_tool(
- 41,
- "note.quick",
- json!({
- "title": "untagged",
- "summary": "should fail without explicit tags",
- "body": "should fail",
- }),
- )?;
- assert_eq!(missing_tags["result"]["isError"].as_bool(), Some(true));
-
- let tag = harness.call_tool(
- 42,
+ assert_tool_ok(&harness.call_tool(
+ 10,
"tag.add",
+ json!({"name": "root-conquest", "description": "root work"}),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 11,
+ "metric.define",
json!({
- "name": "dogfood/mcp",
- "description": "MCP dogfood observations",
+ "key": "nodes_solved",
+ "unit": "count",
+ "objective": "maximize",
+ "visibility": "canonical",
}),
- )?;
- assert_eq!(tag["result"]["isError"].as_bool(), Some(false));
-
- let tag_list = harness.call_tool(43, "tag.list", json!({}))?;
- let tags = must_some(tool_content(&tag_list).as_array(), "tag list")?;
- assert_eq!(tags.len(), 1);
- assert_eq!(tags[0]["name"].as_str(), Some("dogfood/mcp"));
-
- let note = harness.call_tool(
- 44,
- "note.quick",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 12,
+ "run.dimension.define",
+ json!({"key": "instance", "value_type": "string"}),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 13,
+ "frontier.create",
+ json!({
+ "label": "LP root frontier",
+ "objective": "Drive root cash-out on braid rails",
+ "slug": "lp-root",
+ }),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 14,
+ "hypothesis.record",
+ json!({
+ "frontier": "lp-root",
+ "slug": "node-local-loop",
+ "title": "Node-local logical cut loop",
+ "summary": "Push cut cash-out below root.",
+ "body": "Thread node-local logical cuts through native LP reoptimization so the same intervention can cash out below root on parity rails without corrupting root ownership semantics.",
+ "tags": ["root-conquest"],
+ }),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 15,
+ "experiment.open",
json!({
- "title": "tagged note",
- "summary": "tagged lookup summary",
- "body": "tagged lookup should work",
- "tags": ["dogfood/mcp"],
+ "hypothesis": "node-local-loop",
+ "slug": "baseline-20s",
+ "title": "Baseline parity 20s",
+ "summary": "Reference rail.",
+ "tags": ["root-conquest"],
}),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
-
- let filtered = harness.call_tool(45, "node.list", json!({"tags": ["dogfood/mcp"]}))?;
- let nodes = must_some(tool_content(&filtered).as_array(), "filtered nodes")?;
- assert_eq!(nodes.len(), 1);
- assert_eq!(nodes[0]["tags"][0].as_str(), Some("dogfood/mcp"));
- Ok(())
-}
-
-#[test]
-fn source_record_accepts_tags_and_filtering() -> TestResult {
- let project_root = temp_project_root("research_tags")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(451, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let tag = harness.call_tool(
- 452,
- "tag.add",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 16,
+ "experiment.close",
json!({
- "name": "campaign/libgrid",
- "description": "libgrid migration campaign",
+ "experiment": "baseline-20s",
+ "backend": "manual",
+ "command": {"argv": ["baseline-20s"]},
+ "dimensions": {"instance": "4x5-braid"},
+ "primary_metric": {"key": "nodes_solved", "value": 220.0},
+ "verdict": "kept",
+ "rationale": "Baseline retained as the current comparison line for the slice."
}),
- )?;
- assert_eq!(tag["result"]["isError"].as_bool(), Some(false));
-
- let research = harness.call_tool(
- 453,
- "source.record",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 17,
+ "experiment.open",
json!({
- "title": "ingest tranche",
- "summary": "Import the next libgrid tranche.",
- "body": "Full import notes live here.",
- "tags": ["campaign/libgrid"],
+ "hypothesis": "node-local-loop",
+ "slug": "loop-20s",
+ "title": "Loop parity 20s",
+ "summary": "Live challenger.",
+ "tags": ["root-conquest"],
+ "parents": [{"kind": "experiment", "selector": "baseline-20s"}],
}),
- )?;
- assert_eq!(research["result"]["isError"].as_bool(), Some(false));
-
- let filtered = harness.call_tool(454, "node.list", json!({"tags": ["campaign/libgrid"]}))?;
- let nodes = must_some(tool_content(&filtered).as_array(), "filtered source nodes")?;
- assert_eq!(nodes.len(), 1);
- assert_eq!(nodes[0]["class"].as_str(), Some("source"));
- assert_eq!(nodes[0]["tags"][0].as_str(), Some("campaign/libgrid"));
- Ok(())
-}
-
-#[test]
-fn prose_tools_reject_invalid_shapes_over_mcp() -> TestResult {
- let project_root = temp_project_root("prose_invalid")?;
- init_project(&project_root)?;
+ )?);
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(46, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let missing_note_summary = harness.call_tool(
- 47,
- "note.quick",
- json!({
- "title": "untagged",
- "body": "body only",
- "tags": [],
- }),
- )?;
+ let frontier_open =
+ harness.call_tool_full(18, "frontier.open", json!({"frontier": "lp-root"}))?;
+ assert_tool_ok(&frontier_open);
+ let content = tool_content(&frontier_open);
+ assert_eq!(content["frontier"]["slug"].as_str(), Some("lp-root"));
assert_eq!(
- missing_note_summary["result"]["isError"].as_bool(),
- Some(true)
+ must_some(content["active_tags"].as_array(), "active tags array")?
+ .iter()
+ .filter_map(Value::as_str)
+ .collect::<Vec<_>>(),
+ vec!["root-conquest"]
);
assert!(
- fault_message(&missing_note_summary)
- .is_some_and(|message| message.contains("summary") || message.contains("missing field"))
+ must_some(
+ content["active_metric_keys"].as_array(),
+ "active metric keys array"
+ )?
+ .iter()
+ .any(|metric| metric["key"].as_str() == Some("nodes_solved"))
);
-
- let missing_source_summary = harness.call_tool(
- 48,
- "source.record",
- json!({
- "title": "source only",
- "body": "body only",
- }),
+ let active_hypotheses = must_some(
+ content["active_hypotheses"].as_array(),
+ "active hypotheses array",
)?;
+ assert_eq!(active_hypotheses.len(), 1);
assert_eq!(
- missing_source_summary["result"]["isError"].as_bool(),
- Some(true)
- );
- assert!(
- fault_message(&missing_source_summary)
- .is_some_and(|message| message.contains("summary") || message.contains("missing field"))
+ active_hypotheses[0]["hypothesis"]["slug"].as_str(),
+ Some("node-local-loop")
);
-
- let note_without_body = harness.call_tool(
- 49,
- "node.create",
- json!({
- "class": "note",
- "title": "missing body",
- "summary": "triage layer",
- "tags": [],
- "payload": {},
- }),
- )?;
- assert_eq!(note_without_body["result"]["isError"].as_bool(), Some(true));
- assert!(
- fault_message(&note_without_body)
- .is_some_and(|message| message.contains("payload field `body`"))
- );
-
- let source_without_summary = harness.call_tool(
- 50,
- "node.create",
- json!({
- "class": "source",
- "title": "missing summary",
- "payload": { "body": "full research body" },
- }),
- )?;
assert_eq!(
- source_without_summary["result"]["isError"].as_bool(),
- Some(true)
+ active_hypotheses[0]["latest_closed_experiment"]["slug"].as_str(),
+ Some("baseline-20s")
);
- assert!(
- fault_message(&source_without_summary)
- .is_some_and(|message| message.contains("non-empty summary"))
+ assert_eq!(
+ must_some(
+ content["open_experiments"].as_array(),
+ "open experiments array"
+ )?[0]["slug"]
+ .as_str(),
+ Some("loop-20s")
);
+ assert!(content.get("artifacts").is_none());
+ assert!(active_hypotheses[0]["hypothesis"].get("body").is_none());
Ok(())
}
#[test]
-fn concise_note_reads_do_not_leak_body_text() -> TestResult {
- let project_root = temp_project_root("concise_note_read")?;
+fn hypothesis_body_discipline_is_enforced_over_mcp() -> TestResult {
+ let project_root = temp_project_root("single_paragraph")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(None, &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let bind = harness.bind_project(50, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- let note = harness.call_tool(
- 51,
- "note.quick",
+ assert_tool_ok(&harness.call_tool(
+ 20,
+ "frontier.create",
json!({
- "title": "tagged note",
- "summary": "triage layer",
- "body": "full note body should stay out of concise reads",
- "tags": [],
+ "label": "Import frontier",
+ "objective": "Stress hypothesis discipline",
+ "slug": "discipline",
}),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
- let node_id = must_some(tool_content(&note)["id"].as_str(), "created note id")?.to_owned();
-
- let concise = harness.call_tool(52, "node.read", json!({ "node_id": node_id }))?;
- let concise_structured = tool_content(&concise);
- assert_eq!(concise_structured["summary"].as_str(), Some("triage layer"));
- assert!(concise_structured["payload_preview"].get("body").is_none());
- assert!(
- !must_some(tool_text(&concise), "concise note.read text")?
- .contains("full note body should stay out of concise reads")
- );
+ )?);
- let full = harness.call_tool(
- 53,
- "node.read",
- json!({ "node_id": node_id, "detail": "full" }),
- )?;
- assert_eq!(
- tool_content(&full)["payload"]["fields"]["body"].as_str(),
- Some("full note body should stay out of concise reads")
- );
- Ok(())
-}
-
-#[test]
-fn concise_prose_reads_only_surface_payload_field_names() -> TestResult {
- let project_root = temp_project_root("concise_prose_field_names")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(531, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let research = harness.call_tool(
- 532,
- "node.create",
+ let response = harness.call_tool(
+ 21,
+ "hypothesis.record",
json!({
- "class": "source",
- "title": "rich import",
- "summary": "triage layer only",
- "payload": {
- "body": "Body stays out of concise output.",
- "source_excerpt": "This imported excerpt is intentionally long and should never reappear in concise node reads as a value preview.",
- "verbatim_snippet": "Another long snippet that belongs in full payload inspection only, not in triage surfaces."
- }
+ "frontier": "discipline",
+ "title": "Paragraph discipline",
+ "summary": "Should reject multi-paragraph bodies.",
+ "body": "first paragraph\n\nsecond paragraph",
}),
)?;
- assert_eq!(research["result"]["isError"].as_bool(), Some(false));
- let node_id =
- must_some(tool_content(&research)["id"].as_str(), "created source id")?.to_owned();
-
- let concise = harness.call_tool(533, "node.read", json!({ "node_id": node_id }))?;
- let concise_structured = tool_content(&concise);
- assert_eq!(concise_structured["payload_field_count"].as_u64(), Some(2));
- let payload_fields = must_some(
- concise_structured["payload_fields"].as_array(),
- "concise prose payload fields",
- )?;
- assert!(
- payload_fields
- .iter()
- .any(|field| field.as_str() == Some("source_excerpt"))
- );
- assert!(concise_structured.get("payload_preview").is_none());
- let concise_text = must_some(tool_text(&concise), "concise prose read text")?;
- assert!(!concise_text.contains("This imported excerpt is intentionally long"));
- assert!(concise_text.contains("payload fields: source_excerpt, verbatim_snippet"));
+ assert_tool_error(&response);
+ assert!(must_some(tool_error_message(&response), "fault message")?.contains("paragraph"));
Ok(())
}
#[test]
-fn node_list_does_not_enumerate_full_prose_bodies() -> TestResult {
- let project_root = temp_project_root("node_list_no_body_leak")?;
+fn artifact_surface_preserves_reference_only() -> TestResult {
+ let project_root = temp_project_root("artifact_reference")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(None, &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let bind = harness.bind_project(54, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- let note = harness.call_tool(
- 55,
- "note.quick",
+ assert_tool_ok(&harness.call_tool(
+ 30,
+ "frontier.create",
json!({
- "title": "tagged note",
- "summary": "triage summary",
- "body": "full note body should never appear in list-like surfaces",
- "tags": [],
+ "label": "Artifacts frontier",
+ "objective": "Keep dumps out of the token hot path",
+ "slug": "artifacts",
}),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
-
- let listed = harness.call_tool(56, "node.list", json!({ "class": "note" }))?;
- let listed_rows = must_some(tool_content(&listed).as_array(), "listed note rows")?;
- assert_eq!(listed_rows.len(), 1);
- assert_eq!(listed_rows[0]["summary"].as_str(), Some("triage summary"));
- assert!(listed_rows[0].get("body").is_none());
- assert!(
- !must_some(tool_text(&listed), "node.list text")?
- .contains("full note body should never appear in list-like surfaces")
- );
- Ok(())
-}
-
-#[test]
-fn metric_tools_are_listed_for_discovery() -> TestResult {
- let project_root = temp_project_root("metric_tool_list")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(Some(&project_root), &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let tools = harness.tools_list()?;
- let names = must_some(tools["result"]["tools"].as_array(), "tool list")?
- .iter()
- .filter_map(|tool| tool["name"].as_str())
- .collect::<Vec<_>>();
- assert!(names.contains(&"metric.define"));
- assert!(names.contains(&"metric.keys"));
- assert!(names.contains(&"metric.best"));
- assert!(names.contains(&"metric.migrate"));
- assert!(names.contains(&"run.dimension.define"));
- assert!(names.contains(&"run.dimension.list"));
- assert!(names.contains(&"schema.field.upsert"));
- assert!(names.contains(&"schema.field.remove"));
- Ok(())
-}
-
-#[test]
-fn schema_field_tools_mutate_project_schema() -> TestResult {
- let project_root = temp_project_root("schema_field_tools")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(Some(&project_root), &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let upsert = harness.call_tool(
- 861,
- "schema.field.upsert",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 31,
+ "hypothesis.record",
json!({
- "name": "scenario",
- "node_classes": ["hypothesis", "analysis"],
- "presence": "recommended",
- "severity": "warning",
- "role": "projection_gate",
- "inference_policy": "manual_only",
- "value_type": "string"
+ "frontier": "artifacts",
+ "slug": "sourced-hypothesis",
+ "title": "Sourced hypothesis",
+ "summary": "Attach a large external source by reference only.",
+ "body": "Treat large external writeups as artifact references rather than inline context so the ledger stays scientifically austere.",
}),
- )?;
- assert_eq!(upsert["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&upsert)["field"]["name"].as_str(),
- Some("scenario")
- );
- assert_eq!(
- tool_content(&upsert)["field"]["node_classes"],
- json!(["hypothesis", "analysis"])
- );
-
- let schema = harness.call_tool(862, "project.schema", json!({ "detail": "full" }))?;
- assert_eq!(schema["result"]["isError"].as_bool(), Some(false));
- let fields = must_some(tool_content(&schema)["fields"].as_array(), "schema fields")?;
- assert!(fields.iter().any(|field| {
- field["name"].as_str() == Some("scenario") && field["value_type"].as_str() == Some("string")
- }));
-
- let remove = harness.call_tool(
- 863,
- "schema.field.remove",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 32,
+ "artifact.record",
json!({
- "name": "scenario",
- "node_classes": ["hypothesis", "analysis"]
+ "kind": "document",
+ "slug": "lp-review-doc",
+ "label": "LP review tranche",
+ "summary": "External markdown tranche.",
+ "locator": "/tmp/lp-review.md",
+ "attachments": [{"kind": "hypothesis", "selector": "sourced-hypothesis"}],
}),
- )?;
- assert_eq!(remove["result"]["isError"].as_bool(), Some(false));
- assert_eq!(tool_content(&remove)["removed_count"].as_u64(), Some(1));
-
- let schema_after = harness.call_tool(864, "project.schema", json!({ "detail": "full" }))?;
- let fields_after = must_some(
- tool_content(&schema_after)["fields"].as_array(),
- "schema fields after remove",
- )?;
- assert!(
- !fields_after
- .iter()
- .any(|field| field["name"].as_str() == Some("scenario"))
- );
- Ok(())
-}
-
-#[test]
-fn bind_open_backfills_legacy_missing_summary() -> TestResult {
- let project_root = temp_project_root("bind_backfill")?;
- init_project(&project_root)?;
-
- let node_id = {
- let mut store = must(ProjectStore::open(&project_root), "open project store")?;
- let node = must(
- store.add_node(fidget_spinner_store_sqlite::CreateNodeRequest {
- class: fidget_spinner_core::NodeClass::Source,
- frontier_id: None,
- title: must(NonEmptyText::new("legacy source"), "legacy title")?,
- summary: Some(must(
- NonEmptyText::new("temporary summary"),
- "temporary summary",
- )?),
- tags: None,
- payload: fidget_spinner_core::NodePayload::with_schema(
- store.schema().schema_ref(),
- serde_json::from_value(json!({
- "body": "Derived summary first paragraph.\n\nLonger body follows."
- }))
- .map_err(|error| io::Error::other(format!("payload object: {error}")))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- }),
- "create legacy source node",
- )?;
- node.id.to_string()
- };
-
- let database_path = project_root.join(".fidget_spinner").join("state.sqlite");
- let clear_output = must(
- Command::new("sqlite3")
- .current_dir(project_root.as_std_path())
- .arg(database_path.as_str())
- .arg(format!(
- "UPDATE nodes SET summary = NULL WHERE id = '{node_id}';"
- ))
- .output(),
- "spawn sqlite3 for direct summary clear",
- )?;
- if !clear_output.status.success() {
- return Err(io::Error::other(format!(
- "sqlite3 summary clear failed: {}",
- String::from_utf8_lossy(&clear_output.stderr)
- ))
- .into());
- }
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(60, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
+ )?);
- let read = harness.call_tool(61, "node.read", json!({ "node_id": node_id }))?;
- assert_eq!(read["result"]["isError"].as_bool(), Some(false));
+ let artifact =
+ harness.call_tool_full(33, "artifact.read", json!({"artifact": "lp-review-doc"}))?;
+ assert_tool_ok(&artifact);
+ let content = tool_content(&artifact);
assert_eq!(
- tool_content(&read)["summary"].as_str(),
- Some("Derived summary first paragraph.")
+ content["record"]["locator"].as_str(),
+ Some("/tmp/lp-review.md")
);
-
- let listed = harness.call_tool(62, "node.list", json!({ "class": "source" }))?;
- let items = must_some(tool_content(&listed).as_array(), "source node list")?;
- assert_eq!(items.len(), 1);
+ assert!(content["record"].get("body").is_none());
assert_eq!(
- items[0]["summary"].as_str(),
- Some("Derived summary first paragraph.")
+ must_some(content["attachments"].as_array(), "artifact attachments")?[0]["kind"].as_str(),
+ Some("hypothesis")
);
Ok(())
}
#[test]
-fn metric_tools_rank_closed_experiments_and_enforce_disambiguation() -> TestResult {
- let project_root = temp_project_root("metric_rank_e2e")?;
+fn experiment_close_drives_metric_best_and_analysis() -> TestResult {
+ let project_root = temp_project_root("metric_best")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(Some(&project_root), &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let frontier = harness.call_tool(
- 70,
- "frontier.init",
- json!({
- "label": "metric frontier",
- "objective": "exercise metric ranking",
- "contract_title": "metric contract",
- "benchmark_suites": ["smoke"],
- "promotion_criteria": ["rank by one key"],
- "primary_metric": {
- "key": "wall_clock_s",
- "unit": "seconds",
- "objective": "minimize"
- }
- }),
- )?;
- assert_eq!(frontier["result"]["isError"].as_bool(), Some(false));
- let frontier_id = must_some(
- tool_content(&frontier)["frontier_id"].as_str(),
- "frontier id",
- )?
- .to_owned();
- let metric_define = harness.call_tool(
- 701,
+ assert_tool_ok(&harness.call_tool(
+ 40,
"metric.define",
json!({
- "key": "wall_clock_s",
- "unit": "seconds",
- "objective": "minimize",
- "description": "elapsed wall time"
+ "key": "nodes_solved",
+ "unit": "count",
+ "objective": "maximize",
+ "visibility": "canonical",
}),
- )?;
- assert_eq!(metric_define["result"]["isError"].as_bool(), Some(false));
-
- let scenario_dimension = harness.call_tool(
- 702,
- "run.dimension.define",
- json!({
- "key": "scenario",
- "value_type": "string",
- "description": "workload family"
- }),
- )?;
- assert_eq!(
- scenario_dimension["result"]["isError"].as_bool(),
- Some(false)
- );
-
- let duration_dimension = harness.call_tool(
- 703,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 41,
"run.dimension.define",
+ json!({"key": "instance", "value_type": "string"}),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 42,
+ "frontier.create",
json!({
- "key": "duration_s",
- "value_type": "numeric",
- "description": "time budget in seconds"
- }),
- )?;
- assert_eq!(
- duration_dimension["result"]["isError"].as_bool(),
- Some(false)
- );
-
- let dimensions = harness.call_tool(704, "run.dimension.list", json!({}))?;
- assert_eq!(dimensions["result"]["isError"].as_bool(), Some(false));
- let dimension_rows = must_some(tool_content(&dimensions).as_array(), "run dimension rows")?;
- assert!(dimension_rows.iter().any(|row| {
- row["key"].as_str() == Some("benchmark_suite")
- && row["value_type"].as_str() == Some("string")
- }));
- assert!(dimension_rows.iter().any(|row| {
- row["key"].as_str() == Some("scenario")
- && row["description"].as_str() == Some("workload family")
- }));
- assert!(dimension_rows.iter().any(|row| {
- row["key"].as_str() == Some("duration_s") && row["value_type"].as_str() == Some("numeric")
- }));
-
- let first_change = harness.call_tool(
- 71,
- "node.create",
- json!({
- "class": "hypothesis",
- "frontier_id": frontier_id,
- "title": "first change",
- "summary": "first change summary",
- "payload": {
- "body": "first change body",
- "wall_clock_s": 14.0
- }
- }),
- )?;
- assert_eq!(first_change["result"]["isError"].as_bool(), Some(false));
- let first_change_id = must_some(
- tool_content(&first_change)["id"].as_str(),
- "first change id",
- )?;
- let first_experiment = harness.call_tool(
- 711,
- "experiment.open",
- json!({
- "frontier_id": frontier_id,
- "hypothesis_node_id": first_change_id,
- "title": "first experiment",
- "summary": "first experiment summary"
- }),
- )?;
- assert_eq!(first_experiment["result"]["isError"].as_bool(), Some(false));
- let first_experiment_id = must_some(
- tool_content(&first_experiment)["experiment_id"].as_str(),
- "first experiment id",
- )?;
-
- let first_close = harness.call_tool(
- 72,
- "experiment.close",
- json!({
- "experiment_id": first_experiment_id,
- "run": {
- "title": "first run",
- "summary": "first run summary",
- "backend": "worktree_process",
- "dimensions": {
- "benchmark_suite": "smoke",
- "scenario": "belt_4x5",
- "duration_s": 20.0
- },
- "command": {
- "working_directory": project_root.as_str(),
- "argv": ["true"]
- }
- },
- "primary_metric": {
- "key": "wall_clock_s",
- "value": 10.0
- },
- "note": {
- "summary": "first run note"
- },
- "verdict": "kept",
- "decision_title": "first decision",
- "decision_rationale": "keep first candidate around"
+ "label": "Metric frontier",
+ "objective": "Test best-of ranking",
+ "slug": "metric-frontier",
}),
- )?;
- assert_eq!(first_close["result"]["isError"].as_bool(), Some(false));
-
- let second_change = harness.call_tool(
- 73,
- "node.create",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 43,
+ "hypothesis.record",
json!({
- "class": "hypothesis",
- "frontier_id": frontier_id,
- "title": "second change",
- "summary": "second change summary",
- "payload": {
- "body": "second change body",
- "wall_clock_s": 7.0
- }
+ "frontier": "metric-frontier",
+ "slug": "reopt-dominance",
+ "title": "Node reopt dominates native LP spend",
+ "summary": "Track node LP wallclock concentration on braid rails.",
+ "body": "Matched LP site traces indicate native LP spend is dominated by node reoptimization on the braid rails, so the next interventions should target node-local LP churn instead of root-only machinery.",
}),
- )?;
- assert_eq!(second_change["result"]["isError"].as_bool(), Some(false));
- let second_change_id = must_some(
- tool_content(&second_change)["id"].as_str(),
- "second change id",
- )?;
- let second_experiment = harness.call_tool(
- 712,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 44,
"experiment.open",
json!({
- "frontier_id": frontier_id,
- "hypothesis_node_id": second_change_id,
- "title": "second experiment",
- "summary": "second experiment summary"
+ "hypothesis": "reopt-dominance",
+ "slug": "trace-baseline",
+ "title": "Trace baseline",
+ "summary": "First matched trace.",
}),
- )?;
- assert_eq!(
- second_experiment["result"]["isError"].as_bool(),
- Some(false)
- );
- let second_experiment_id = must_some(
- tool_content(&second_experiment)["experiment_id"].as_str(),
- "second experiment id",
- )?;
-
- let second_close = harness.call_tool(
- 74,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 45,
"experiment.close",
json!({
- "experiment_id": second_experiment_id,
- "run": {
- "title": "second run",
- "summary": "second run summary",
- "backend": "worktree_process",
- "dimensions": {
- "benchmark_suite": "smoke",
- "scenario": "belt_4x5",
- "duration_s": 60.0
- },
- "command": {
- "working_directory": project_root.as_str(),
- "argv": ["true"]
- }
- },
- "primary_metric": {
- "key": "wall_clock_s",
- "value": 5.0
- },
- "note": {
- "summary": "second run note"
- },
+ "experiment": "trace-baseline",
+ "backend": "manual",
+ "command": {"argv": ["trace-baseline"]},
+ "dimensions": {"instance": "4x5-braid"},
+ "primary_metric": {"key": "nodes_solved", "value": 217.0},
"verdict": "kept",
- "decision_title": "second decision",
- "decision_rationale": "second candidate looks stronger"
+ "rationale": "Baseline trace is real but not dominant.",
}),
- )?;
- assert_eq!(second_close["result"]["isError"].as_bool(), Some(false));
-
- let second_frontier = harness.call_tool(
- 80,
- "frontier.init",
- json!({
- "label": "metric frontier two",
- "objective": "exercise frontier filtering",
- "contract_title": "metric contract two",
- "benchmark_suites": ["smoke"],
- "promotion_criteria": ["frontier filters should isolate rankings"],
- "primary_metric": {
- "key": "wall_clock_s",
- "unit": "seconds",
- "objective": "minimize"
- }
- }),
- )?;
- assert_eq!(second_frontier["result"]["isError"].as_bool(), Some(false));
- let second_frontier_id = must_some(
- tool_content(&second_frontier)["frontier_id"].as_str(),
- "second frontier id",
- )?
- .to_owned();
-
- let third_change = harness.call_tool(
- 81,
- "node.create",
- json!({
- "class": "hypothesis",
- "frontier_id": second_frontier_id,
- "title": "third change",
- "summary": "third change summary",
- "payload": {
- "body": "third change body",
- "wall_clock_s": 3.0
- }
- }),
- )?;
- assert_eq!(third_change["result"]["isError"].as_bool(), Some(false));
- let third_change_id = must_some(
- tool_content(&third_change)["id"].as_str(),
- "third change id",
- )?;
- let third_experiment = harness.call_tool(
- 811,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 46,
"experiment.open",
json!({
- "frontier_id": second_frontier_id,
- "hypothesis_node_id": third_change_id,
- "title": "third experiment",
- "summary": "third experiment summary"
+ "hypothesis": "reopt-dominance",
+ "slug": "trace-node-reopt",
+ "title": "Trace node reopt",
+ "summary": "Matched LP site traces with node focus.",
+ "parents": [{"kind": "experiment", "selector": "trace-baseline"}],
}),
- )?;
- assert_eq!(third_experiment["result"]["isError"].as_bool(), Some(false));
- let third_experiment_id = must_some(
- tool_content(&third_experiment)["experiment_id"].as_str(),
- "third experiment id",
- )?;
-
- let third_close = harness.call_tool(
- 82,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 47,
"experiment.close",
json!({
- "experiment_id": third_experiment_id,
- "run": {
- "title": "third run",
- "summary": "third run summary",
- "backend": "worktree_process",
- "dimensions": {
- "benchmark_suite": "smoke",
- "scenario": "belt_4x5_alt",
- "duration_s": 60.0
- },
- "command": {
- "working_directory": project_root.as_str(),
- "argv": ["true"]
- }
- },
- "primary_metric": {
- "key": "wall_clock_s",
- "value": 3.0
- },
- "note": {
- "summary": "third run note"
- },
- "verdict": "kept",
- "decision_title": "third decision",
- "decision_rationale": "third candidate is best overall but not in the first frontier"
- }),
- )?;
- assert_eq!(third_close["result"]["isError"].as_bool(), Some(false));
-
- let keys = harness.call_tool(75, "metric.keys", json!({}))?;
- assert_eq!(keys["result"]["isError"].as_bool(), Some(false));
- let key_rows = must_some(tool_content(&keys).as_array(), "metric keys array")?;
- assert!(key_rows.iter().any(|row| {
- row["key"].as_str() == Some("wall_clock_s") && row["source"].as_str() == Some("run_metric")
- }));
- assert!(key_rows.iter().any(|row| {
- row["key"].as_str() == Some("wall_clock_s")
- && row["source"].as_str() == Some("run_metric")
- && row["description"].as_str() == Some("elapsed wall time")
- && row["requires_order"].as_bool() == Some(false)
- }));
- assert!(key_rows.iter().any(|row| {
- row["key"].as_str() == Some("wall_clock_s")
- && row["source"].as_str() == Some("hypothesis_payload")
- }));
-
- let filtered_keys = harness.call_tool(
- 750,
- "metric.keys",
- json!({
- "source": "run_metric",
- "dimensions": {
- "scenario": "belt_4x5",
- "duration_s": 60.0
+ "experiment": "trace-node-reopt",
+ "backend": "manual",
+ "command": {"argv": ["matched-lp-site-traces"]},
+ "dimensions": {"instance": "4x5-braid"},
+ "primary_metric": {"key": "nodes_solved", "value": 273.0},
+ "verdict": "accepted",
+ "rationale": "Matched LP site traces show node reoptimization as the dominant sink.",
+ "analysis": {
+ "summary": "Node LP work is now the primary native sink.",
+ "body": "The differential traces isolate node reoptimization as the dominant native LP wallclock site on the matched braid rail, which justifies prioritizing node-local LP control work over further root-only tuning."
}
}),
- )?;
- assert_eq!(filtered_keys["result"]["isError"].as_bool(), Some(false));
- let filtered_key_rows = must_some(
- tool_content(&filtered_keys).as_array(),
- "filtered metric keys array",
- )?;
- assert_eq!(filtered_key_rows.len(), 1);
- assert_eq!(filtered_key_rows[0]["key"].as_str(), Some("wall_clock_s"));
- assert_eq!(filtered_key_rows[0]["experiment_count"].as_u64(), Some(1));
-
- let ambiguous = harness.call_tool(76, "metric.best", json!({ "key": "wall_clock_s" }))?;
- assert_eq!(ambiguous["result"]["isError"].as_bool(), Some(true));
- assert!(
- fault_message(&ambiguous)
- .is_some_and(|message| message.contains("ambiguous across sources"))
- );
-
- let run_metric_best = harness.call_tool(
- 77,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "run_metric",
- "dimensions": {
- "scenario": "belt_4x5",
- "duration_s": 60.0
- },
- "limit": 5
- }),
- )?;
- assert_eq!(run_metric_best["result"]["isError"].as_bool(), Some(false));
- let run_best_rows = must_some(
- tool_content(&run_metric_best).as_array(),
- "run metric best array",
- )?;
- assert_eq!(run_best_rows[0]["value"].as_f64(), Some(5.0));
- assert_eq!(run_best_rows.len(), 1);
- assert_eq!(
- run_best_rows[0]["experiment_title"].as_str(),
- Some("second experiment")
- );
- assert_eq!(run_best_rows[0]["verdict"].as_str(), Some("kept"));
- assert_eq!(
- run_best_rows[0]["dimensions"]["scenario"].as_str(),
- Some("belt_4x5")
- );
- assert_eq!(
- run_best_rows[0]["dimensions"]["duration_s"].as_f64(),
- Some(60.0)
- );
- assert!(
- must_some(tool_text(&run_metric_best), "run metric best text")?.contains("hypothesis=")
- );
- assert!(must_some(tool_text(&run_metric_best), "run metric best text")?.contains("dims:"));
-
- let payload_requires_order = harness.call_tool(
- 78,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "hypothesis_payload"
- }),
- )?;
- assert_eq!(
- payload_requires_order["result"]["isError"].as_bool(),
- Some(true)
- );
- assert!(
- fault_message(&payload_requires_order)
- .is_some_and(|message| message.contains("explicit order"))
- );
-
- let payload_best = harness.call_tool(
- 79,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "hypothesis_payload",
- "dimensions": {
- "scenario": "belt_4x5",
- "duration_s": 60.0
- },
- "order": "asc"
- }),
- )?;
- assert_eq!(payload_best["result"]["isError"].as_bool(), Some(false));
- let payload_best_rows = must_some(
- tool_content(&payload_best).as_array(),
- "payload metric best array",
- )?;
- assert_eq!(payload_best_rows[0]["value"].as_f64(), Some(7.0));
- assert_eq!(payload_best_rows.len(), 1);
- assert_eq!(
- payload_best_rows[0]["experiment_title"].as_str(),
- Some("second experiment")
- );
+ )?);
- let filtered_best = harness.call_tool(
- 83,
+ let best = harness.call_tool_full(
+ 48,
"metric.best",
json!({
- "key": "wall_clock_s",
- "source": "run_metric",
- "frontier_id": frontier_id,
- "dimensions": {
- "scenario": "belt_4x5"
- },
- "limit": 5
+ "frontier": "metric-frontier",
+ "hypothesis": "reopt-dominance",
+ "key": "nodes_solved",
}),
)?;
- assert_eq!(filtered_best["result"]["isError"].as_bool(), Some(false));
- let filtered_rows = must_some(
- tool_content(&filtered_best).as_array(),
- "filtered metric best array",
+ assert_tool_ok(&best);
+ let entries = must_some(
+ tool_content(&best)["entries"].as_array(),
+ "metric best entries",
)?;
- assert_eq!(filtered_rows.len(), 2);
assert_eq!(
- filtered_rows[0]["experiment_title"].as_str(),
- Some("second experiment")
- );
- assert!(
- filtered_rows
- .iter()
- .all(|row| row["frontier_id"].as_str() == Some(frontier_id.as_str()))
+ entries[0]["experiment"]["slug"].as_str(),
+ Some("trace-node-reopt")
);
+ assert_eq!(entries[0]["value"].as_f64(), Some(273.0));
- let global_best = harness.call_tool(
- 84,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "run_metric",
- "limit": 5
- }),
- )?;
- assert_eq!(global_best["result"]["isError"].as_bool(), Some(false));
- let global_rows = must_some(
- tool_content(&global_best).as_array(),
- "global metric best array",
+ let detail = harness.call_tool_full(
+ 49,
+ "experiment.read",
+ json!({"experiment": "trace-node-reopt"}),
)?;
+ assert_tool_ok(&detail);
+ let content = tool_content(&detail);
assert_eq!(
- global_rows[0]["experiment_title"].as_str(),
- Some("third experiment")
- );
- assert_eq!(
- global_rows[0]["frontier_id"].as_str(),
- Some(second_frontier_id.as_str())
- );
-
- let migrate = harness.call_tool(85, "metric.migrate", json!({}))?;
- assert_eq!(migrate["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&migrate)["inserted_metric_definitions"].as_u64(),
- Some(0)
- );
- assert_eq!(
- tool_content(&migrate)["inserted_dimension_definitions"].as_u64(),
- Some(0)
+ content["record"]["outcome"]["verdict"].as_str(),
+ Some("accepted")
);
assert_eq!(
- tool_content(&migrate)["inserted_dimension_values"].as_u64(),
- Some(0)
+ content["record"]["outcome"]["analysis"]["summary"].as_str(),
+ Some("Node LP work is now the primary native sink.")
);
Ok(())
}
diff --git a/crates/fidget-spinner-core/Cargo.toml b/crates/fidget-spinner-core/Cargo.toml
index c147ee2..d27163c 100644
--- a/crates/fidget-spinner-core/Cargo.toml
+++ b/crates/fidget-spinner-core/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "fidget-spinner-core"
categories.workspace = true
-description = "Core domain model for the Fidget Spinner experimental DAG"
+description = "Core frontier ledger domain model for Fidget Spinner"
edition.workspace = true
keywords.workspace = true
license.workspace = true
diff --git a/crates/fidget-spinner-core/src/error.rs b/crates/fidget-spinner-core/src/error.rs
index eb05ba7..a095f57 100644
--- a/crates/fidget-spinner-core/src/error.rs
+++ b/crates/fidget-spinner-core/src/error.rs
@@ -10,6 +10,12 @@ pub enum CoreError {
"invalid tag name `{0}`; expected lowercase ascii alphanumerics separated by `-`, `_`, or `/`"
)]
InvalidTagName(String),
+ #[error("slug values must not be blank")]
+ EmptySlug,
+ #[error("invalid slug `{0}`; expected lowercase ascii alphanumerics separated by `-` or `_`")]
+ InvalidSlug(String),
+ #[error("slug `{0}` is ambiguous with a UUID selector")]
+ UuidLikeSlug(String),
#[error("command recipes must contain at least one argv element")]
EmptyCommand,
}
diff --git a/crates/fidget-spinner-core/src/id.rs b/crates/fidget-spinner-core/src/id.rs
index 7f696a3..5e22f0e 100644
--- a/crates/fidget-spinner-core/src/id.rs
+++ b/crates/fidget-spinner-core/src/id.rs
@@ -36,10 +36,7 @@ macro_rules! define_id {
};
}
-define_id!(AgentSessionId);
-define_id!(AnnotationId);
define_id!(ArtifactId);
define_id!(ExperimentId);
define_id!(FrontierId);
-define_id!(NodeId);
-define_id!(RunId);
+define_id!(HypothesisId);
diff --git a/crates/fidget-spinner-core/src/lib.rs b/crates/fidget-spinner-core/src/lib.rs
index 1c4108a..903e740 100644
--- a/crates/fidget-spinner-core/src/lib.rs
+++ b/crates/fidget-spinner-core/src/lib.rs
@@ -1,27 +1,21 @@
//! Core domain types for the Fidget Spinner frontier machine.
//!
-//! The product direction is intentionally local-first and agent-first: the DAG
-//! is the canonical truth, while frontier state is a derived operational
-//! projection over that graph. The global spine is intentionally narrow so
-//! projects can carry richer payloads and annotations without fossilizing the
-//! whole system into one universal schema.
+//! Fidget Spinner is intentionally austere. The canonical ledger is a narrow
+//! experimental spine: frontiers scope work, hypotheses and experiments are the
+//! only graph vertices, and bulky context lives off the hot path as artifact
+//! references.
mod error;
mod id;
mod model;
pub use crate::error::CoreError;
-pub use crate::id::{
- AgentSessionId, AnnotationId, ArtifactId, ExperimentId, FrontierId, NodeId, RunId,
-};
+pub use crate::id::{ArtifactId, ExperimentId, FrontierId, HypothesisId};
pub use crate::model::{
- AdmissionState, AnnotationVisibility, ArtifactKind, ArtifactRef, CommandRecipe,
- CompletedExperiment, DagEdge, DagNode, DiagnosticSeverity, EdgeKind, EvaluationProtocol,
- ExecutionBackend, ExperimentResult, FieldPresence, FieldRole, FieldValueType, FrontierContract,
- FrontierNote, FrontierProjection, FrontierRecord, FrontierStatus, FrontierVerdict,
- FrontierVerdictCounts, InferencePolicy, JsonObject, MetricDefinition, MetricObservation,
- MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass, NodeDiagnostics, NodePayload,
- NodeTrack, NonEmptyText, OpenExperiment, OptimizationObjective, PayloadSchemaRef,
- ProjectFieldSpec, ProjectSchema, RunDimensionDefinition, RunDimensionValue, RunRecord,
- RunStatus, TagName, TagRecord, ValidationDiagnostic,
+ ArtifactKind, ArtifactRecord, AttachmentTargetKind, AttachmentTargetRef, CommandRecipe,
+ ExecutionBackend, ExperimentAnalysis, ExperimentOutcome, ExperimentRecord, ExperimentStatus,
+ FieldValueType, FrontierBrief, FrontierRecord, FrontierRoadmapItem, FrontierStatus,
+ FrontierVerdict, HypothesisRecord, MetricDefinition, MetricUnit, MetricValue, MetricVisibility,
+ NonEmptyText, OptimizationObjective, RunDimensionDefinition, RunDimensionValue, Slug, TagName,
+ TagRecord, VertexKind, VertexRef,
};
diff --git a/crates/fidget-spinner-core/src/model.rs b/crates/fidget-spinner-core/src/model.rs
index 88050a2..cedd882 100644
--- a/crates/fidget-spinner-core/src/model.rs
+++ b/crates/fidget-spinner-core/src/model.rs
@@ -1,15 +1,14 @@
-use std::collections::{BTreeMap, BTreeSet};
+use std::collections::BTreeMap;
use std::fmt::{self, Display, Formatter};
use camino::Utf8PathBuf;
use serde::{Deserialize, Serialize};
-use serde_json::{Map, Value};
+use serde_json::Value;
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
+use uuid::Uuid;
-use crate::{
- AgentSessionId, AnnotationId, ArtifactId, CoreError, ExperimentId, FrontierId, NodeId, RunId,
-};
+use crate::{ArtifactId, CoreError, ExperimentId, FrontierId, HypothesisId};
#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
#[serde(transparent)]
@@ -90,203 +89,159 @@ impl Display for TagName {
}
}
-pub type JsonObject = Map<String, Value>;
-
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum NodeClass {
- Contract,
- Hypothesis,
- Run,
- Analysis,
- Decision,
- Source,
- Note,
-}
+#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd, Serialize, Deserialize)]
+#[serde(try_from = "String", into = "String")]
+pub struct Slug(String);
-impl NodeClass {
- #[must_use]
- pub const fn as_str(self) -> &'static str {
- match self {
- Self::Contract => "contract",
- Self::Hypothesis => "hypothesis",
- Self::Run => "run",
- Self::Analysis => "analysis",
- Self::Decision => "decision",
- Self::Source => "source",
- Self::Note => "note",
+impl Slug {
+ pub fn new(value: impl Into<String>) -> Result<Self, CoreError> {
+ let normalized = value.into().trim().to_ascii_lowercase();
+ if normalized.is_empty() {
+ return Err(CoreError::EmptySlug);
}
- }
-
- #[must_use]
- pub const fn default_track(self) -> NodeTrack {
- match self {
- Self::Contract | Self::Hypothesis | Self::Run | Self::Analysis | Self::Decision => {
- NodeTrack::CorePath
+ if Uuid::parse_str(&normalized).is_ok() {
+ return Err(CoreError::UuidLikeSlug(normalized));
+ }
+ let mut previous_was_separator = true;
+ for character in normalized.chars() {
+ if character.is_ascii_lowercase() || character.is_ascii_digit() {
+ previous_was_separator = false;
+ continue;
+ }
+ if matches!(character, '-' | '_') && !previous_was_separator {
+ previous_was_separator = true;
+ continue;
}
- Self::Source | Self::Note => NodeTrack::OffPath,
+ return Err(CoreError::InvalidSlug(normalized));
}
+ if previous_was_separator {
+ return Err(CoreError::InvalidSlug(normalized));
+ }
+ Ok(Self(normalized))
}
-}
-impl Display for NodeClass {
- fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
- formatter.write_str(self.as_str())
+ #[must_use]
+ pub fn as_str(&self) -> &str {
+ &self.0
}
}
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum NodeTrack {
- CorePath,
- OffPath,
-}
+impl TryFrom<String> for Slug {
+ type Error = CoreError;
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum AnnotationVisibility {
- HiddenByDefault,
- Visible,
+ fn try_from(value: String) -> Result<Self, Self::Error> {
+ Self::new(value)
+ }
}
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum DiagnosticSeverity {
- Error,
- Warning,
- Info,
+impl From<Slug> for String {
+ fn from(value: Slug) -> Self {
+ value.0
+ }
}
-impl DiagnosticSeverity {
- #[must_use]
- pub const fn as_str(self) -> &'static str {
- match self {
- Self::Error => "error",
- Self::Warning => "warning",
- Self::Info => "info",
- }
+impl Display for Slug {
+ fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ formatter.write_str(&self.0)
}
}
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum FieldPresence {
- Required,
- Recommended,
- Optional,
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(rename_all = "snake_case")]
+pub enum FrontierStatus {
+ Exploring,
+ Paused,
+ Archived,
}
-impl FieldPresence {
+impl FrontierStatus {
#[must_use]
pub const fn as_str(self) -> &'static str {
match self {
- Self::Required => "required",
- Self::Recommended => "recommended",
- Self::Optional => "optional",
+ Self::Exploring => "exploring",
+ Self::Paused => "paused",
+ Self::Archived => "archived",
}
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum FieldRole {
- Index,
- ProjectionGate,
- RenderOnly,
- Opaque,
+#[serde(rename_all = "snake_case")]
+pub enum MetricUnit {
+ Seconds,
+ Bytes,
+ Count,
+ Ratio,
+ Custom,
}
-impl FieldRole {
+impl MetricUnit {
#[must_use]
pub const fn as_str(self) -> &'static str {
match self {
- Self::Index => "index",
- Self::ProjectionGate => "projection_gate",
- Self::RenderOnly => "render_only",
- Self::Opaque => "opaque",
+ Self::Seconds => "seconds",
+ Self::Bytes => "bytes",
+ Self::Count => "count",
+ Self::Ratio => "ratio",
+ Self::Custom => "custom",
}
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum InferencePolicy {
- ManualOnly,
- ModelMayInfer,
+#[serde(rename_all = "snake_case")]
+pub enum OptimizationObjective {
+ Minimize,
+ Maximize,
+ Target,
}
-impl InferencePolicy {
+impl OptimizationObjective {
#[must_use]
pub const fn as_str(self) -> &'static str {
match self {
- Self::ManualOnly => "manual_only",
- Self::ModelMayInfer => "model_may_infer",
+ Self::Minimize => "minimize",
+ Self::Maximize => "maximize",
+ Self::Target => "target",
}
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
#[serde(rename_all = "snake_case")]
-pub enum FieldValueType {
- String,
- Numeric,
- Boolean,
- Timestamp,
+pub enum MetricVisibility {
+ Canonical,
+ Minor,
+ Hidden,
+ Archived,
}
-impl FieldValueType {
+impl MetricVisibility {
#[must_use]
- pub const fn is_plottable(self) -> bool {
- matches!(self, Self::Numeric | Self::Timestamp)
- }
-
- #[must_use]
- pub fn accepts(self, value: &Value) -> bool {
+ pub const fn as_str(self) -> &'static str {
match self {
- Self::String => value.is_string(),
- Self::Numeric => value.is_number(),
- Self::Boolean => value.is_boolean(),
- Self::Timestamp => value
- .as_str()
- .is_some_and(|raw| OffsetDateTime::parse(raw, &Rfc3339).is_ok()),
+ Self::Canonical => "canonical",
+ Self::Minor => "minor",
+ Self::Hidden => "hidden",
+ Self::Archived => "archived",
}
}
#[must_use]
- pub const fn as_str(self) -> &'static str {
- match self {
- Self::String => "string",
- Self::Numeric => "numeric",
- Self::Boolean => "boolean",
- Self::Timestamp => "timestamp",
- }
+ pub const fn is_default_visible(self) -> bool {
+ matches!(self, Self::Canonical | Self::Minor)
}
}
-#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub enum FrontierStatus {
- Exploring,
- Paused,
- Saturated,
- Archived,
-}
-
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum MetricUnit {
- Seconds,
- Bytes,
- Count,
- Ratio,
- Custom,
-}
-
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum OptimizationObjective {
- Minimize,
- Maximize,
- Target,
-}
-
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub struct MetricDefinition {
pub key: NonEmptyText,
pub unit: MetricUnit,
pub objective: OptimizationObjective,
+ pub visibility: MetricVisibility,
pub description: Option<NonEmptyText>,
pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
}
impl MetricDefinition {
@@ -295,14 +250,51 @@ impl MetricDefinition {
key: NonEmptyText,
unit: MetricUnit,
objective: OptimizationObjective,
+ visibility: MetricVisibility,
description: Option<NonEmptyText>,
) -> Self {
+ let now = OffsetDateTime::now_utc();
Self {
key,
unit,
objective,
+ visibility,
description,
- created_at: OffsetDateTime::now_utc(),
+ created_at: now,
+ updated_at: now,
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+#[serde(rename_all = "snake_case")]
+pub enum FieldValueType {
+ String,
+ Numeric,
+ Boolean,
+ Timestamp,
+}
+
+impl FieldValueType {
+ #[must_use]
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::String => "string",
+ Self::Numeric => "numeric",
+ Self::Boolean => "boolean",
+ Self::Timestamp => "timestamp",
+ }
+ }
+
+ #[must_use]
+ pub fn accepts(self, value: &Value) -> bool {
+ match self {
+ Self::String => value.is_string(),
+ Self::Numeric => value.is_number(),
+ Self::Boolean => value.is_boolean(),
+ Self::Timestamp => value
+ .as_str()
+ .is_some_and(|raw| OffsetDateTime::parse(raw, &Rfc3339).is_ok()),
}
}
}
@@ -345,6 +337,7 @@ pub struct RunDimensionDefinition {
pub value_type: FieldValueType,
pub description: Option<NonEmptyText>,
pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
}
impl RunDimensionDefinition {
@@ -354,38 +347,44 @@ impl RunDimensionDefinition {
value_type: FieldValueType,
description: Option<NonEmptyText>,
) -> Self {
+ let now = OffsetDateTime::now_utc();
Self {
key,
value_type,
description,
- created_at: OffsetDateTime::now_utc(),
+ created_at: now,
+ updated_at: now,
}
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct MetricValue {
- #[serde(alias = "metric_key")]
pub key: NonEmptyText,
pub value: f64,
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub enum RunStatus {
- Queued,
- Running,
- Succeeded,
- Failed,
- Cancelled,
-}
-
-#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(rename_all = "snake_case")]
pub enum ExecutionBackend {
+ Manual,
LocalProcess,
WorktreeProcess,
SshProcess,
}
+impl ExecutionBackend {
+ #[must_use]
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::Manual => "manual",
+ Self::LocalProcess => "local_process",
+ Self::WorktreeProcess => "worktree_process",
+ Self::SshProcess => "ssh_process",
+ }
+ }
+}
+
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum FrontierVerdict {
@@ -395,65 +394,14 @@ pub enum FrontierVerdict {
Rejected,
}
-#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub enum AdmissionState {
- Admitted,
- Rejected,
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub struct PayloadSchemaRef {
- pub namespace: NonEmptyText,
- pub version: u32,
-}
-
-#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct NodePayload {
- pub schema: Option<PayloadSchemaRef>,
- pub fields: JsonObject,
-}
-
-impl NodePayload {
+impl FrontierVerdict {
#[must_use]
- pub fn empty() -> Self {
- Self {
- schema: None,
- fields: JsonObject::new(),
- }
- }
-
- #[must_use]
- pub fn with_schema(schema: PayloadSchemaRef, fields: JsonObject) -> Self {
- Self {
- schema: Some(schema),
- fields,
- }
- }
-
- #[must_use]
- pub fn field(&self, name: &str) -> Option<&Value> {
- self.fields.get(name)
- }
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct NodeAnnotation {
- pub id: AnnotationId,
- pub visibility: AnnotationVisibility,
- pub label: Option<NonEmptyText>,
- pub body: NonEmptyText,
- pub created_at: OffsetDateTime,
-}
-
-impl NodeAnnotation {
- #[must_use]
- pub fn hidden(body: NonEmptyText) -> Self {
- Self {
- id: AnnotationId::fresh(),
- visibility: AnnotationVisibility::HiddenByDefault,
- label: None,
- body,
- created_at: OffsetDateTime::now_utc(),
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::Accepted => "accepted",
+ Self::Kept => "kept",
+ Self::Parked => "parked",
+ Self::Rejected => "rejected",
}
}
}
@@ -466,490 +414,256 @@ pub struct TagRecord {
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct ValidationDiagnostic {
- pub severity: DiagnosticSeverity,
- pub code: String,
- pub message: NonEmptyText,
- pub field_name: Option<String>,
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct NodeDiagnostics {
- pub admission: AdmissionState,
- pub items: Vec<ValidationDiagnostic>,
+pub struct CommandRecipe {
+ #[serde(default)]
+ pub working_directory: Option<Utf8PathBuf>,
+ pub argv: Vec<NonEmptyText>,
+ #[serde(default)]
+ pub env: BTreeMap<String, String>,
}
-impl NodeDiagnostics {
- #[must_use]
- pub const fn admitted() -> Self {
- Self {
- admission: AdmissionState::Admitted,
- items: Vec::new(),
+impl CommandRecipe {
+ pub fn new(
+ working_directory: Option<Utf8PathBuf>,
+ argv: Vec<NonEmptyText>,
+ env: BTreeMap<String, String>,
+ ) -> Result<Self, CoreError> {
+ if argv.is_empty() {
+ return Err(CoreError::EmptyCommand);
}
+ Ok(Self {
+ working_directory,
+ argv,
+ env,
+ })
}
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct ProjectFieldSpec {
- pub name: NonEmptyText,
- pub node_classes: BTreeSet<NodeClass>,
- pub presence: FieldPresence,
- pub severity: DiagnosticSeverity,
- pub role: FieldRole,
- pub inference_policy: InferencePolicy,
- #[serde(default)]
- pub value_type: Option<FieldValueType>,
+pub struct FrontierRoadmapItem {
+ pub rank: u32,
+ pub hypothesis_id: HypothesisId,
+ pub summary: Option<NonEmptyText>,
}
-impl ProjectFieldSpec {
- #[must_use]
- pub fn applies_to(&self, class: NodeClass) -> bool {
- self.node_classes.is_empty() || self.node_classes.contains(&class)
- }
-
- #[must_use]
- pub fn is_plottable(&self) -> bool {
- self.value_type.is_some_and(FieldValueType::is_plottable)
- }
+#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
+pub struct FrontierBrief {
+ pub situation: Option<NonEmptyText>,
+ pub roadmap: Vec<FrontierRoadmapItem>,
+ pub unknowns: Vec<NonEmptyText>,
+ pub revision: u64,
+ pub updated_at: Option<OffsetDateTime>,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct ProjectSchema {
- pub namespace: NonEmptyText,
- pub version: u32,
- pub fields: Vec<ProjectFieldSpec>,
+pub struct FrontierRecord {
+ pub id: FrontierId,
+ pub slug: Slug,
+ pub label: NonEmptyText,
+ pub objective: NonEmptyText,
+ pub status: FrontierStatus,
+ pub brief: FrontierBrief,
+ pub revision: u64,
+ pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
}
-impl ProjectSchema {
- #[must_use]
- pub fn default_with_namespace(namespace: NonEmptyText) -> Self {
- Self {
- namespace,
- version: 1,
- fields: Vec::new(),
- }
- }
-
- #[must_use]
- pub fn schema_ref(&self) -> PayloadSchemaRef {
- PayloadSchemaRef {
- namespace: self.namespace.clone(),
- version: self.version,
- }
- }
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct HypothesisRecord {
+ pub id: HypothesisId,
+ pub slug: Slug,
+ pub frontier_id: FrontierId,
+ pub archived: bool,
+ pub title: NonEmptyText,
+ pub summary: NonEmptyText,
+ pub body: NonEmptyText,
+ pub tags: Vec<TagName>,
+ pub revision: u64,
+ pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
+}
- #[must_use]
- pub fn field_spec(&self, class: NodeClass, name: &str) -> Option<&ProjectFieldSpec> {
- self.fields
- .iter()
- .find(|field| field.applies_to(class) && field.name.as_str() == name)
- }
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(rename_all = "snake_case")]
+pub enum ExperimentStatus {
+ Open,
+ Closed,
+}
+impl ExperimentStatus {
#[must_use]
- pub fn validate_node(&self, class: NodeClass, payload: &NodePayload) -> NodeDiagnostics {
- let items = self
- .fields
- .iter()
- .filter(|field| field.applies_to(class))
- .filter_map(|field| {
- let value = payload.field(field.name.as_str());
- let is_missing = value.is_none();
- if !is_missing || field.presence == FieldPresence::Optional {
- if let (Some(value), Some(value_type)) = (value, field.value_type)
- && !value_type.accepts(value)
- {
- return Some(ValidationDiagnostic {
- severity: field.severity,
- code: format!("type.{}", field.name.as_str()),
- message: validation_message(format!(
- "project payload field `{}` expected {}, found {}",
- field.name.as_str(),
- value_type.as_str(),
- json_value_kind(value)
- )),
- field_name: Some(field.name.as_str().to_owned()),
- });
- }
- return None;
- }
- Some(ValidationDiagnostic {
- severity: field.severity,
- code: format!("missing.{}", field.name.as_str()),
- message: validation_message(format!(
- "missing project payload field `{}`",
- field.name.as_str()
- )),
- field_name: Some(field.name.as_str().to_owned()),
- })
- })
- .collect();
- NodeDiagnostics {
- admission: AdmissionState::Admitted,
- items,
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::Open => "open",
+ Self::Closed => "closed",
}
}
}
-fn validation_message(value: String) -> NonEmptyText {
- match NonEmptyText::new(value) {
- Ok(message) => message,
- Err(_) => unreachable!("validation diagnostics are never empty"),
- }
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ExperimentAnalysis {
+ pub summary: NonEmptyText,
+ pub body: NonEmptyText,
}
-fn json_value_kind(value: &Value) -> &'static str {
- match value {
- Value::Null => "null",
- Value::Bool(_) => "boolean",
- Value::Number(_) => "numeric",
- Value::String(_) => "string",
- Value::Array(_) => "array",
- Value::Object(_) => "object",
- }
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct ExperimentOutcome {
+ pub backend: ExecutionBackend,
+ pub command: CommandRecipe,
+ pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
+ pub primary_metric: MetricValue,
+ pub supporting_metrics: Vec<MetricValue>,
+ pub verdict: FrontierVerdict,
+ pub rationale: NonEmptyText,
+ pub analysis: Option<ExperimentAnalysis>,
+ pub closed_at: OffsetDateTime,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct DagNode {
- pub id: NodeId,
- pub class: NodeClass,
- pub track: NodeTrack,
- pub frontier_id: Option<FrontierId>,
+pub struct ExperimentRecord {
+ pub id: ExperimentId,
+ pub slug: Slug,
+ pub frontier_id: FrontierId,
+ pub hypothesis_id: HypothesisId,
pub archived: bool,
pub title: NonEmptyText,
pub summary: Option<NonEmptyText>,
- pub tags: BTreeSet<TagName>,
- pub payload: NodePayload,
- pub annotations: Vec<NodeAnnotation>,
- pub diagnostics: NodeDiagnostics,
- pub agent_session_id: Option<AgentSessionId>,
+ pub tags: Vec<TagName>,
+ pub status: ExperimentStatus,
+ pub outcome: Option<ExperimentOutcome>,
+ pub revision: u64,
pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
-impl DagNode {
- #[must_use]
- pub fn new(
- class: NodeClass,
- frontier_id: Option<FrontierId>,
- title: NonEmptyText,
- summary: Option<NonEmptyText>,
- payload: NodePayload,
- diagnostics: NodeDiagnostics,
- ) -> Self {
- let now = OffsetDateTime::now_utc();
- Self {
- id: NodeId::fresh(),
- class,
- track: class.default_track(),
- frontier_id,
- archived: false,
- title,
- summary,
- tags: BTreeSet::new(),
- payload,
- annotations: Vec::new(),
- diagnostics,
- agent_session_id: None,
- created_at: now,
- updated_at: now,
- }
- }
-
- #[must_use]
- pub fn is_core_path(&self) -> bool {
- self.track == NodeTrack::CorePath
- }
-}
-
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub enum EdgeKind {
- Lineage,
- Evidence,
- Comparison,
- Supersedes,
- Annotation,
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct DagEdge {
- pub source_id: NodeId,
- pub target_id: NodeId,
- pub kind: EdgeKind,
-}
-
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(rename_all = "snake_case")]
pub enum ArtifactKind {
- Note,
- Patch,
- BenchmarkBundle,
- MetricSeries,
+ Document,
+ Link,
+ Log,
Table,
Plot,
- Log,
+ Dump,
Binary,
- Checkpoint,
+ Other,
+}
+
+impl ArtifactKind {
+ #[must_use]
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::Document => "document",
+ Self::Link => "link",
+ Self::Log => "log",
+ Self::Table => "table",
+ Self::Plot => "plot",
+ Self::Dump => "dump",
+ Self::Binary => "binary",
+ Self::Other => "other",
+ }
+ }
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct ArtifactRef {
+pub struct ArtifactRecord {
pub id: ArtifactId,
+ pub slug: Slug,
pub kind: ArtifactKind,
pub label: NonEmptyText,
- pub path: Utf8PathBuf,
+ pub summary: Option<NonEmptyText>,
+ pub locator: NonEmptyText,
pub media_type: Option<NonEmptyText>,
- pub produced_by_run: Option<RunId>,
+ pub revision: u64,
+ pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
}
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct CommandRecipe {
- pub working_directory: Utf8PathBuf,
- pub argv: Vec<NonEmptyText>,
- pub env: BTreeMap<String, String>,
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+#[serde(rename_all = "snake_case")]
+pub enum VertexKind {
+ Hypothesis,
+ Experiment,
}
-impl CommandRecipe {
- pub fn new(
- working_directory: Utf8PathBuf,
- argv: Vec<NonEmptyText>,
- env: BTreeMap<String, String>,
- ) -> Result<Self, CoreError> {
- if argv.is_empty() {
- return Err(CoreError::EmptyCommand);
+impl VertexKind {
+ #[must_use]
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::Hypothesis => "hypothesis",
+ Self::Experiment => "experiment",
}
- Ok(Self {
- working_directory,
- argv,
- env,
- })
}
}
-#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-pub struct MetricSpec {
- pub metric_key: NonEmptyText,
- pub unit: MetricUnit,
- pub objective: OptimizationObjective,
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct EvaluationProtocol {
- pub benchmark_suites: BTreeSet<NonEmptyText>,
- pub primary_metric: MetricSpec,
- pub supporting_metrics: BTreeSet<MetricSpec>,
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct FrontierContract {
- pub objective: NonEmptyText,
- pub evaluation: EvaluationProtocol,
- pub promotion_criteria: Vec<NonEmptyText>,
-}
-
-#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct MetricObservation {
- pub metric_key: NonEmptyText,
- pub unit: MetricUnit,
- pub objective: OptimizationObjective,
- pub value: f64,
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct FrontierRecord {
- pub id: FrontierId,
- pub label: NonEmptyText,
- pub root_contract_node_id: NodeId,
- pub status: FrontierStatus,
- pub created_at: OffsetDateTime,
- pub updated_at: OffsetDateTime,
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(tag = "kind", content = "id", rename_all = "snake_case")]
+pub enum VertexRef {
+ Hypothesis(HypothesisId),
+ Experiment(ExperimentId),
}
-impl FrontierRecord {
+impl VertexRef {
#[must_use]
- pub fn new(label: NonEmptyText, root_contract_node_id: NodeId) -> Self {
- Self::with_id(FrontierId::fresh(), label, root_contract_node_id)
+ pub const fn kind(self) -> VertexKind {
+ match self {
+ Self::Hypothesis(_) => VertexKind::Hypothesis,
+ Self::Experiment(_) => VertexKind::Experiment,
+ }
}
#[must_use]
- pub fn with_id(id: FrontierId, label: NonEmptyText, root_contract_node_id: NodeId) -> Self {
- let now = OffsetDateTime::now_utc();
- Self {
- id,
- label,
- root_contract_node_id,
- status: FrontierStatus::Exploring,
- created_at: now,
- updated_at: now,
+ pub fn opaque_id(self) -> String {
+ match self {
+ Self::Hypothesis(id) => id.to_string(),
+ Self::Experiment(id) => id.to_string(),
}
}
}
-#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct RunRecord {
- pub node_id: NodeId,
- pub run_id: RunId,
- pub frontier_id: Option<FrontierId>,
- pub status: RunStatus,
- pub backend: ExecutionBackend,
- pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
- pub command: CommandRecipe,
- pub started_at: Option<OffsetDateTime>,
- pub finished_at: Option<OffsetDateTime>,
-}
-
-#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct ExperimentResult {
- pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
- pub primary_metric: MetricValue,
- pub supporting_metrics: Vec<MetricValue>,
- pub benchmark_bundle: Option<ArtifactId>,
-}
-
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct OpenExperiment {
- pub id: ExperimentId,
- pub frontier_id: FrontierId,
- pub hypothesis_node_id: NodeId,
- pub title: NonEmptyText,
- pub summary: Option<NonEmptyText>,
- pub created_at: OffsetDateTime,
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(rename_all = "snake_case")]
+pub enum AttachmentTargetKind {
+ Frontier,
+ Hypothesis,
+ Experiment,
}
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct FrontierNote {
- pub summary: NonEmptyText,
- pub next_hypotheses: Vec<NonEmptyText>,
+impl AttachmentTargetKind {
+ #[must_use]
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::Frontier => "frontier",
+ Self::Hypothesis => "hypothesis",
+ Self::Experiment => "experiment",
+ }
+ }
}
-#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct CompletedExperiment {
- pub id: ExperimentId,
- pub frontier_id: FrontierId,
- pub hypothesis_node_id: NodeId,
- pub run_node_id: NodeId,
- pub run_id: RunId,
- pub analysis_node_id: Option<NodeId>,
- pub decision_node_id: NodeId,
- pub title: NonEmptyText,
- pub summary: Option<NonEmptyText>,
- pub result: ExperimentResult,
- pub note: FrontierNote,
- pub verdict: FrontierVerdict,
- pub created_at: OffsetDateTime,
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(tag = "kind", content = "id", rename_all = "snake_case")]
+pub enum AttachmentTargetRef {
+ Frontier(FrontierId),
+ Hypothesis(HypothesisId),
+ Experiment(ExperimentId),
}
-#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
-pub struct FrontierVerdictCounts {
- pub accepted: u64,
- pub kept: u64,
- pub parked: u64,
- pub rejected: u64,
-}
+impl AttachmentTargetRef {
+ #[must_use]
+ pub const fn kind(self) -> AttachmentTargetKind {
+ match self {
+ Self::Frontier(_) => AttachmentTargetKind::Frontier,
+ Self::Hypothesis(_) => AttachmentTargetKind::Hypothesis,
+ Self::Experiment(_) => AttachmentTargetKind::Experiment,
+ }
+ }
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct FrontierProjection {
- pub frontier: FrontierRecord,
- pub open_experiment_count: u64,
- pub completed_experiment_count: u64,
- pub verdict_counts: FrontierVerdictCounts,
-}
-
-#[cfg(test)]
-mod tests {
- use std::collections::{BTreeMap, BTreeSet};
-
- use camino::Utf8PathBuf;
- use serde_json::json;
-
- use super::{
- CommandRecipe, DagNode, DiagnosticSeverity, FieldPresence, FieldRole, FieldValueType,
- InferencePolicy, JsonObject, NodeClass, NodePayload, NonEmptyText, ProjectFieldSpec,
- ProjectSchema,
- };
- use crate::CoreError;
-
- #[test]
- fn non_empty_text_rejects_blank_input() {
- let text = NonEmptyText::new(" ");
- assert_eq!(text, Err(CoreError::EmptyText));
- }
-
- #[test]
- fn command_recipe_requires_argv() {
- let recipe = CommandRecipe::new(
- Utf8PathBuf::from("/tmp/worktree"),
- Vec::new(),
- BTreeMap::new(),
- );
- assert_eq!(recipe, Err(CoreError::EmptyCommand));
- }
-
- #[test]
- fn schema_validation_warns_without_rejecting_ingest() -> Result<(), CoreError> {
- let schema = ProjectSchema {
- namespace: NonEmptyText::new("local.libgrid")?,
- version: 1,
- fields: vec![ProjectFieldSpec {
- name: NonEmptyText::new("hypothesis")?,
- node_classes: BTreeSet::from([NodeClass::Hypothesis]),
- presence: FieldPresence::Required,
- severity: DiagnosticSeverity::Warning,
- role: FieldRole::ProjectionGate,
- inference_policy: InferencePolicy::ManualOnly,
- value_type: None,
- }],
- };
- let payload = NodePayload::with_schema(schema.schema_ref(), JsonObject::new());
- let diagnostics = schema.validate_node(NodeClass::Hypothesis, &payload);
-
- assert_eq!(diagnostics.admission, super::AdmissionState::Admitted);
- assert_eq!(diagnostics.items.len(), 1);
- assert_eq!(diagnostics.items[0].severity, DiagnosticSeverity::Warning);
- Ok(())
- }
-
- #[test]
- fn schema_validation_warns_on_type_mismatch() -> Result<(), CoreError> {
- let schema = ProjectSchema {
- namespace: NonEmptyText::new("local.libgrid")?,
- version: 1,
- fields: vec![ProjectFieldSpec {
- name: NonEmptyText::new("improvement")?,
- node_classes: BTreeSet::from([NodeClass::Analysis]),
- presence: FieldPresence::Recommended,
- severity: DiagnosticSeverity::Warning,
- role: FieldRole::RenderOnly,
- inference_policy: InferencePolicy::ManualOnly,
- value_type: Some(FieldValueType::Numeric),
- }],
- };
- let payload = NodePayload::with_schema(
- schema.schema_ref(),
- JsonObject::from_iter([("improvement".to_owned(), json!("not a number"))]),
- );
- let diagnostics = schema.validate_node(NodeClass::Analysis, &payload);
-
- assert_eq!(diagnostics.admission, super::AdmissionState::Admitted);
- assert_eq!(diagnostics.items.len(), 1);
- assert_eq!(diagnostics.items[0].code, "type.improvement");
- Ok(())
- }
-
- #[test]
- fn source_nodes_default_to_off_path() -> Result<(), CoreError> {
- let payload = NodePayload {
- schema: None,
- fields: JsonObject::from_iter([("topic".to_owned(), json!("ideas"))]),
- };
- let node = DagNode::new(
- NodeClass::Source,
- None,
- NonEmptyText::new("feature scouting")?,
- None,
- payload,
- super::NodeDiagnostics::admitted(),
- );
-
- assert!(!node.is_core_path());
- Ok(())
+ #[must_use]
+ pub fn opaque_id(self) -> String {
+ match self {
+ Self::Frontier(id) => id.to_string(),
+ Self::Hypothesis(id) => id.to_string(),
+ Self::Experiment(id) => id.to_string(),
+ }
}
}
diff --git a/crates/fidget-spinner-store-sqlite/Cargo.toml b/crates/fidget-spinner-store-sqlite/Cargo.toml
index 00fd070..01d6f44 100644
--- a/crates/fidget-spinner-store-sqlite/Cargo.toml
+++ b/crates/fidget-spinner-store-sqlite/Cargo.toml
@@ -1,7 +1,7 @@
[package]
name = "fidget-spinner-store-sqlite"
categories.workspace = true
-description = "SQLite-backed per-project store for Fidget Spinner DAG projects"
+description = "SQLite-backed per-project frontier ledger store for Fidget Spinner"
edition.workspace = true
keywords.workspace = true
license.workspace = true
diff --git a/crates/fidget-spinner-store-sqlite/src/lib.rs b/crates/fidget-spinner-store-sqlite/src/lib.rs
index bbe7038..3680471 100644
--- a/crates/fidget-spinner-store-sqlite/src/lib.rs
+++ b/crates/fidget-spinner-store-sqlite/src/lib.rs
@@ -1,23 +1,20 @@
-use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet};
-use std::fmt::Write as _;
use std::fs;
use std::io;
use camino::{Utf8Path, Utf8PathBuf};
use fidget_spinner_core::{
- AnnotationVisibility, CommandRecipe, CompletedExperiment, DagEdge, DagNode, DiagnosticSeverity,
- EdgeKind, ExecutionBackend, ExperimentResult, FieldPresence, FieldRole, FieldValueType,
- FrontierContract, FrontierNote, FrontierProjection, FrontierRecord, FrontierStatus,
- FrontierVerdict, FrontierVerdictCounts, InferencePolicy, JsonObject, MetricDefinition,
- MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass, NodeDiagnostics, NodePayload,
- NonEmptyText, OpenExperiment, OptimizationObjective, ProjectFieldSpec, ProjectSchema,
- RunDimensionDefinition, RunDimensionValue, RunRecord, RunStatus, TagName, TagRecord,
+ ArtifactId, ArtifactKind, ArtifactRecord, AttachmentTargetRef, CommandRecipe, CoreError,
+ ExecutionBackend, ExperimentAnalysis, ExperimentId, ExperimentOutcome, ExperimentRecord,
+ ExperimentStatus, FieldValueType, FrontierBrief, FrontierId, FrontierRecord,
+ FrontierRoadmapItem, FrontierStatus, FrontierVerdict, HypothesisId, HypothesisRecord,
+ MetricDefinition, MetricUnit, MetricValue, MetricVisibility, NonEmptyText,
+ OptimizationObjective, RunDimensionDefinition, RunDimensionValue, Slug, TagName, TagRecord,
+ VertexRef,
};
-use rusqlite::types::Value as SqlValue;
-use rusqlite::{Connection, OptionalExtension, Transaction, params, params_from_iter};
+use rusqlite::{Connection, OptionalExtension, Transaction, params};
use serde::{Deserialize, Serialize};
-use serde_json::{Value, json};
+use serde_json::Value;
use thiserror::Error;
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
@@ -26,8 +23,7 @@ use uuid::Uuid;
pub const STORE_DIR_NAME: &str = ".fidget_spinner";
pub const STATE_DB_NAME: &str = "state.sqlite";
pub const PROJECT_CONFIG_NAME: &str = "project.json";
-pub const PROJECT_SCHEMA_NAME: &str = "schema.json";
-pub const CURRENT_STORE_FORMAT_VERSION: u32 = 3;
+pub const CURRENT_STORE_FORMAT_VERSION: u32 = 4;
#[derive(Debug, Error)]
pub enum StoreError {
@@ -49,17 +45,11 @@ pub enum StoreError {
#[error("time format failure")]
TimeFormat(#[from] time::error::Format),
#[error("core domain failure")]
- Core(#[from] fidget_spinner_core::CoreError),
+ Core(#[from] CoreError),
#[error("UUID parse failure")]
Uuid(#[from] uuid::Error),
- #[error("node {0} was not found")]
- NodeNotFound(fidget_spinner_core::NodeId),
- #[error("frontier {0} was not found")]
- FrontierNotFound(fidget_spinner_core::FrontierId),
- #[error("experiment {0} was not found")]
- ExperimentNotFound(fidget_spinner_core::ExperimentId),
- #[error("node {0} is not a hypothesis node")]
- NodeNotHypothesis(fidget_spinner_core::NodeId),
+ #[error("{0}")]
+ InvalidInput(String),
#[error(
"project store format {observed} is incompatible with this binary (expected {expected}); reinitialize the store"
)]
@@ -68,50 +58,53 @@ pub enum StoreError {
UnknownTag(TagName),
#[error("tag `{0}` already exists")]
DuplicateTag(TagName),
- #[error("note nodes require an explicit tag list; use an empty list if no tags apply")]
- NoteTagsRequired,
- #[error("{0} nodes require a non-empty summary")]
- ProseSummaryRequired(NodeClass),
- #[error("{0} nodes require a non-empty string payload field `body`")]
- ProseBodyRequired(NodeClass),
#[error("metric `{0}` is not registered")]
UnknownMetricDefinition(NonEmptyText),
- #[error(
- "metric `{key}` conflicts with existing definition ({existing_unit}/{existing_objective} vs {new_unit}/{new_objective})"
- )]
- ConflictingMetricDefinition {
- key: String,
- existing_unit: String,
- existing_objective: String,
- new_unit: String,
- new_objective: String,
- },
+ #[error("metric `{0}` already exists")]
+ DuplicateMetricDefinition(NonEmptyText),
#[error("run dimension `{0}` is not registered")]
UnknownRunDimension(NonEmptyText),
#[error("run dimension `{0}` already exists")]
DuplicateRunDimension(NonEmptyText),
+ #[error("frontier selector `{0}` did not resolve")]
+ UnknownFrontierSelector(String),
+ #[error("hypothesis selector `{0}` did not resolve")]
+ UnknownHypothesisSelector(String),
+ #[error("experiment selector `{0}` did not resolve")]
+ UnknownExperimentSelector(String),
+ #[error("artifact selector `{0}` did not resolve")]
+ UnknownArtifactSelector(String),
#[error(
- "run dimension `{key}` conflicts with existing definition ({existing_type} vs {new_type})"
+ "entity revision mismatch for {kind} `{selector}`: expected {expected}, observed {observed}"
)]
- ConflictingRunDimensionDefinition {
- key: String,
- existing_type: String,
- new_type: String,
+ RevisionMismatch {
+ kind: &'static str,
+ selector: String,
+ expected: u64,
+ observed: u64,
},
- #[error("run dimension `{key}` expects {expected} values, got {observed}")]
- InvalidRunDimensionValue {
- key: String,
- expected: String,
- observed: String,
- },
- #[error("schema field `{0}` was not found")]
- SchemaFieldNotFound(String),
- #[error("metric key `{key}` is ambiguous across sources: {sources}")]
- AmbiguousMetricKey { key: String, sources: String },
- #[error("metric key `{key}` for source `{metric_source}` requires an explicit order")]
- MetricOrderRequired { key: String, metric_source: String },
- #[error("metric key `{key}` for source `{metric_source}` has conflicting semantics")]
- MetricSemanticsAmbiguous { key: String, metric_source: String },
+ #[error("hypothesis body must be exactly one paragraph")]
+ HypothesisBodyMustBeSingleParagraph,
+ #[error("experiments must hang off exactly one hypothesis")]
+ ExperimentHypothesisRequired,
+ #[error("experiment `{0}` is already closed")]
+ ExperimentAlreadyClosed(ExperimentId),
+ #[error("experiment `{0}` is still open")]
+ ExperimentStillOpen(ExperimentId),
+ #[error("influence edge crosses frontier scope")]
+ CrossFrontierInfluence,
+ #[error("self edges are not allowed")]
+ SelfEdge,
+ #[error("unknown roadmap hypothesis `{0}`")]
+ UnknownRoadmapHypothesis(String),
+ #[error(
+ "manual experiments may omit command context only by using an empty argv surrogate explicitly"
+ )]
+ ManualExperimentRequiresCommand,
+ #[error("metric key `{key}` requires an explicit ranking order")]
+ MetricOrderRequired { key: String },
+ #[error("dimension filter references unknown run dimension `{0}`")]
+ UnknownDimensionFilter(String),
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
@@ -132,181 +125,294 @@ impl ProjectConfig {
}
}
-#[derive(Clone, Debug)]
-pub struct CreateNodeRequest {
- pub class: NodeClass,
- pub frontier_id: Option<fidget_spinner_core::FrontierId>,
- pub title: NonEmptyText,
- pub summary: Option<NonEmptyText>,
- pub tags: Option<BTreeSet<TagName>>,
- pub payload: NodePayload,
- pub annotations: Vec<NodeAnnotation>,
- pub attachments: Vec<EdgeAttachment>,
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ProjectStatus {
+ pub project_root: Utf8PathBuf,
+ pub display_name: NonEmptyText,
+ pub store_format_version: u32,
+ pub frontier_count: u64,
+ pub hypothesis_count: u64,
+ pub experiment_count: u64,
+ pub open_experiment_count: u64,
+ pub artifact_count: u64,
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub enum EdgeAttachmentDirection {
- ExistingToNew,
- NewToExisting,
+#[serde(rename_all = "snake_case")]
+pub enum MetricScope {
+ Live,
+ Visible,
+ All,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(rename_all = "snake_case")]
+pub enum MetricRankOrder {
+ Asc,
+ Desc,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct EdgeAttachment {
- pub node_id: fidget_spinner_core::NodeId,
- pub kind: EdgeKind,
- pub direction: EdgeAttachmentDirection,
+#[serde(tag = "kind", content = "selector", rename_all = "snake_case")]
+pub enum VertexSelector {
+ Hypothesis(String),
+ Experiment(String),
}
-impl EdgeAttachment {
- #[must_use]
- pub fn materialize(&self, new_node_id: fidget_spinner_core::NodeId) -> DagEdge {
- match self.direction {
- EdgeAttachmentDirection::ExistingToNew => DagEdge {
- source_id: self.node_id,
- target_id: new_node_id,
- kind: self.kind,
- },
- EdgeAttachmentDirection::NewToExisting => DagEdge {
- source_id: new_node_id,
- target_id: self.node_id,
- kind: self.kind,
- },
- }
- }
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(tag = "kind", content = "selector", rename_all = "snake_case")]
+pub enum AttachmentSelector {
+ Frontier(String),
+ Hypothesis(String),
+ Experiment(String),
+}
+
+#[derive(Clone, Debug)]
+pub struct CreateFrontierRequest {
+ pub label: NonEmptyText,
+ pub objective: NonEmptyText,
+ pub slug: Option<Slug>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct FrontierSummary {
+ pub id: FrontierId,
+ pub slug: Slug,
+ pub label: NonEmptyText,
+ pub objective: NonEmptyText,
+ pub status: FrontierStatus,
+ pub active_hypothesis_count: u64,
+ pub open_experiment_count: u64,
+ pub updated_at: OffsetDateTime,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct FrontierRoadmapItemDraft {
+ pub rank: u32,
+ pub hypothesis: String,
+ pub summary: Option<NonEmptyText>,
+}
+
+#[derive(Clone, Debug)]
+pub enum TextPatch<T> {
+ Set(T),
+ Clear,
+}
+
+#[derive(Clone, Debug)]
+pub struct UpdateFrontierBriefRequest {
+ pub frontier: String,
+ pub expected_revision: Option<u64>,
+ pub situation: Option<TextPatch<NonEmptyText>>,
+ pub roadmap: Option<Vec<FrontierRoadmapItemDraft>>,
+ pub unknowns: Option<Vec<NonEmptyText>>,
}
#[derive(Clone, Debug)]
-pub struct ListNodesQuery {
- pub frontier_id: Option<fidget_spinner_core::FrontierId>,
- pub class: Option<NodeClass>,
+pub struct CreateHypothesisRequest {
+ pub frontier: String,
+ pub slug: Option<Slug>,
+ pub title: NonEmptyText,
+ pub summary: NonEmptyText,
+ pub body: NonEmptyText,
pub tags: BTreeSet<TagName>,
- pub include_archived: bool,
- pub limit: u32,
+ pub parents: Vec<VertexSelector>,
}
-impl Default for ListNodesQuery {
- fn default() -> Self {
- Self {
- frontier_id: None,
- class: None,
- tags: BTreeSet::new(),
- include_archived: false,
- limit: 20,
- }
- }
+#[derive(Clone, Debug)]
+pub struct UpdateHypothesisRequest {
+ pub hypothesis: String,
+ pub expected_revision: Option<u64>,
+ pub title: Option<NonEmptyText>,
+ pub summary: Option<NonEmptyText>,
+ pub body: Option<NonEmptyText>,
+ pub tags: Option<BTreeSet<TagName>>,
+ pub parents: Option<Vec<VertexSelector>>,
+ pub archived: Option<bool>,
+}
+
+#[derive(Clone, Debug, Default)]
+pub struct ListHypothesesQuery {
+ pub frontier: Option<String>,
+ pub tags: BTreeSet<TagName>,
+ pub include_archived: bool,
+ pub limit: Option<u32>,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct NodeSummary {
- pub id: fidget_spinner_core::NodeId,
- pub class: NodeClass,
- pub track: fidget_spinner_core::NodeTrack,
- pub frontier_id: Option<fidget_spinner_core::FrontierId>,
+pub struct VertexSummary {
+ pub vertex: VertexRef,
+ pub frontier_id: FrontierId,
+ pub slug: Slug,
pub archived: bool,
pub title: NonEmptyText,
pub summary: Option<NonEmptyText>,
- pub tags: BTreeSet<TagName>,
- pub diagnostic_count: u64,
- pub hidden_annotation_count: u64,
- pub created_at: OffsetDateTime,
pub updated_at: OffsetDateTime,
}
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-#[serde(rename_all = "snake_case")]
-pub enum MetricFieldSource {
- RunMetric,
- HypothesisPayload,
- RunPayload,
- AnalysisPayload,
- DecisionPayload,
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct HypothesisSummary {
+ pub id: HypothesisId,
+ pub slug: Slug,
+ pub frontier_id: FrontierId,
+ pub archived: bool,
+ pub title: NonEmptyText,
+ pub summary: NonEmptyText,
+ pub tags: Vec<TagName>,
+ pub open_experiment_count: u64,
+ pub latest_verdict: Option<FrontierVerdict>,
+ pub updated_at: OffsetDateTime,
}
-impl MetricFieldSource {
- #[must_use]
- pub const fn as_str(self) -> &'static str {
- match self {
- Self::RunMetric => "run_metric",
- Self::HypothesisPayload => "hypothesis_payload",
- Self::RunPayload => "run_payload",
- Self::AnalysisPayload => "analysis_payload",
- Self::DecisionPayload => "decision_payload",
- }
- }
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct HypothesisDetail {
+ pub record: HypothesisRecord,
+ pub parents: Vec<VertexSummary>,
+ pub children: Vec<VertexSummary>,
+ pub open_experiments: Vec<ExperimentSummary>,
+ pub closed_experiments: Vec<ExperimentSummary>,
+ pub artifacts: Vec<ArtifactSummary>,
+}
- #[must_use]
- pub const fn from_payload_class(class: NodeClass) -> Option<Self> {
- match class {
- NodeClass::Hypothesis => Some(Self::HypothesisPayload),
- NodeClass::Run => Some(Self::RunPayload),
- NodeClass::Analysis => Some(Self::AnalysisPayload),
- NodeClass::Decision => Some(Self::DecisionPayload),
- NodeClass::Contract | NodeClass::Source | NodeClass::Note => None,
- }
- }
+#[derive(Clone, Debug)]
+pub struct OpenExperimentRequest {
+ pub hypothesis: String,
+ pub slug: Option<Slug>,
+ pub title: NonEmptyText,
+ pub summary: Option<NonEmptyText>,
+ pub tags: BTreeSet<TagName>,
+ pub parents: Vec<VertexSelector>,
}
-#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
-#[serde(rename_all = "snake_case")]
-pub enum MetricRankOrder {
- Asc,
- Desc,
+#[derive(Clone, Debug)]
+pub struct UpdateExperimentRequest {
+ pub experiment: String,
+ pub expected_revision: Option<u64>,
+ pub title: Option<NonEmptyText>,
+ pub summary: Option<TextPatch<NonEmptyText>>,
+ pub tags: Option<BTreeSet<TagName>>,
+ pub parents: Option<Vec<VertexSelector>>,
+ pub archived: Option<bool>,
+ pub outcome: Option<ExperimentOutcomePatch>,
}
-impl MetricRankOrder {
- #[must_use]
- pub const fn as_str(self) -> &'static str {
- match self {
- Self::Asc => "asc",
- Self::Desc => "desc",
- }
- }
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct ExperimentOutcomePatch {
+ pub backend: ExecutionBackend,
+ pub command: CommandRecipe,
+ pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
+ pub primary_metric: MetricValue,
+ pub supporting_metrics: Vec<MetricValue>,
+ pub verdict: FrontierVerdict,
+ pub rationale: NonEmptyText,
+ pub analysis: Option<ExperimentAnalysis>,
}
#[derive(Clone, Debug)]
-pub struct MetricBestQuery {
- pub key: NonEmptyText,
- pub frontier_id: Option<fidget_spinner_core::FrontierId>,
- pub source: Option<MetricFieldSource>,
+pub struct CloseExperimentRequest {
+ pub experiment: String,
+ pub expected_revision: Option<u64>,
+ pub backend: ExecutionBackend,
+ pub command: CommandRecipe,
pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
- pub order: Option<MetricRankOrder>,
- pub limit: u32,
+ pub primary_metric: MetricValue,
+ pub supporting_metrics: Vec<MetricValue>,
+ pub verdict: FrontierVerdict,
+ pub rationale: NonEmptyText,
+ pub analysis: Option<ExperimentAnalysis>,
}
-#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct MetricKeySummary {
- pub key: NonEmptyText,
- pub source: MetricFieldSource,
- pub experiment_count: u64,
- pub unit: Option<MetricUnit>,
- pub objective: Option<OptimizationObjective>,
- pub description: Option<NonEmptyText>,
- pub requires_order: bool,
+#[derive(Clone, Debug, Default)]
+pub struct ListExperimentsQuery {
+ pub frontier: Option<String>,
+ pub hypothesis: Option<String>,
+ pub tags: BTreeSet<TagName>,
+ pub include_archived: bool,
+ pub status: Option<ExperimentStatus>,
+ pub limit: Option<u32>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct MetricBestEntry {
+pub struct MetricObservationSummary {
pub key: NonEmptyText,
- pub source: MetricFieldSource,
pub value: f64,
- pub order: MetricRankOrder,
- pub experiment_id: fidget_spinner_core::ExperimentId,
- pub experiment_title: NonEmptyText,
- pub frontier_id: fidget_spinner_core::FrontierId,
- pub hypothesis_node_id: fidget_spinner_core::NodeId,
- pub hypothesis_title: NonEmptyText,
- pub run_id: fidget_spinner_core::RunId,
- pub verdict: FrontierVerdict,
- pub unit: Option<MetricUnit>,
- pub objective: Option<OptimizationObjective>,
- pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
+ pub unit: MetricUnit,
+ pub objective: OptimizationObjective,
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct ExperimentSummary {
+ pub id: ExperimentId,
+ pub slug: Slug,
+ pub frontier_id: FrontierId,
+ pub hypothesis_id: HypothesisId,
+ pub archived: bool,
+ pub title: NonEmptyText,
+ pub summary: Option<NonEmptyText>,
+ pub tags: Vec<TagName>,
+ pub status: ExperimentStatus,
+ pub verdict: Option<FrontierVerdict>,
+ pub primary_metric: Option<MetricObservationSummary>,
+ pub updated_at: OffsetDateTime,
+ pub closed_at: Option<OffsetDateTime>,
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct ExperimentDetail {
+ pub record: ExperimentRecord,
+ pub owning_hypothesis: HypothesisSummary,
+ pub parents: Vec<VertexSummary>,
+ pub children: Vec<VertexSummary>,
+ pub artifacts: Vec<ArtifactSummary>,
+}
+
+#[derive(Clone, Debug)]
+pub struct CreateArtifactRequest {
+ pub slug: Option<Slug>,
+ pub kind: ArtifactKind,
+ pub label: NonEmptyText,
+ pub summary: Option<NonEmptyText>,
+ pub locator: NonEmptyText,
+ pub media_type: Option<NonEmptyText>,
+ pub attachments: Vec<AttachmentSelector>,
+}
+
+#[derive(Clone, Debug)]
+pub struct UpdateArtifactRequest {
+ pub artifact: String,
+ pub expected_revision: Option<u64>,
+ pub kind: Option<ArtifactKind>,
+ pub label: Option<NonEmptyText>,
+ pub summary: Option<TextPatch<NonEmptyText>>,
+ pub locator: Option<NonEmptyText>,
+ pub media_type: Option<TextPatch<NonEmptyText>>,
+ pub attachments: Option<Vec<AttachmentSelector>>,
}
#[derive(Clone, Debug, Default)]
-pub struct MetricKeyQuery {
- pub frontier_id: Option<fidget_spinner_core::FrontierId>,
- pub source: Option<MetricFieldSource>,
- pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
+pub struct ListArtifactsQuery {
+ pub frontier: Option<String>,
+ pub kind: Option<ArtifactKind>,
+ pub attached_to: Option<AttachmentSelector>,
+ pub limit: Option<u32>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ArtifactSummary {
+ pub id: ArtifactId,
+ pub slug: Slug,
+ pub kind: ArtifactKind,
+ pub label: NonEmptyText,
+ pub summary: Option<NonEmptyText>,
+ pub locator: NonEmptyText,
+ pub media_type: Option<NonEmptyText>,
+ pub updated_at: OffsetDateTime,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ArtifactDetail {
+ pub record: ArtifactRecord,
+ pub attachments: Vec<AttachmentTargetRef>,
}
#[derive(Clone, Debug)]
@@ -314,6 +420,7 @@ pub struct DefineMetricRequest {
pub key: NonEmptyText,
pub unit: MetricUnit,
pub objective: OptimizationObjective,
+ pub visibility: MetricVisibility,
pub description: Option<NonEmptyText>,
}
@@ -325,154 +432,131 @@ pub struct DefineRunDimensionRequest {
}
#[derive(Clone, Debug)]
-pub struct UpsertSchemaFieldRequest {
- pub name: NonEmptyText,
- pub node_classes: BTreeSet<NodeClass>,
- pub presence: FieldPresence,
- pub severity: DiagnosticSeverity,
- pub role: FieldRole,
- pub inference_policy: InferencePolicy,
- pub value_type: Option<FieldValueType>,
-}
-
-#[derive(Clone, Debug)]
-pub struct RemoveSchemaFieldRequest {
- pub name: NonEmptyText,
- pub node_classes: Option<BTreeSet<NodeClass>>,
+pub struct MetricKeysQuery {
+ pub frontier: Option<String>,
+ pub scope: MetricScope,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct RunDimensionSummary {
+pub struct MetricKeySummary {
pub key: NonEmptyText,
- pub value_type: FieldValueType,
+ pub unit: MetricUnit,
+ pub objective: OptimizationObjective,
+ pub visibility: MetricVisibility,
pub description: Option<NonEmptyText>,
- pub observed_run_count: u64,
- pub distinct_value_count: u64,
- pub sample_values: Vec<Value>,
-}
-
-#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
-pub struct MetricPlaneMigrationReport {
- pub inserted_metric_definitions: u64,
- pub inserted_dimension_definitions: u64,
- pub inserted_dimension_values: u64,
+ pub reference_count: u64,
}
#[derive(Clone, Debug)]
-pub struct CreateFrontierRequest {
- pub label: NonEmptyText,
- pub contract_title: NonEmptyText,
- pub contract_summary: Option<NonEmptyText>,
- pub contract: FrontierContract,
+pub struct MetricBestQuery {
+ pub frontier: Option<String>,
+ pub hypothesis: Option<String>,
+ pub key: NonEmptyText,
+ pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
+ pub include_rejected: bool,
+ pub limit: Option<u32>,
+ pub order: Option<MetricRankOrder>,
}
-#[derive(Clone, Debug)]
-pub struct OpenExperimentRequest {
- pub frontier_id: fidget_spinner_core::FrontierId,
- pub hypothesis_node_id: fidget_spinner_core::NodeId,
- pub title: NonEmptyText,
- pub summary: Option<NonEmptyText>,
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct MetricBestEntry {
+ pub experiment: ExperimentSummary,
+ pub hypothesis: HypothesisSummary,
+ pub value: f64,
+ pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
}
#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
-pub struct OpenExperimentSummary {
- pub id: fidget_spinner_core::ExperimentId,
- pub frontier_id: fidget_spinner_core::FrontierId,
- pub hypothesis_node_id: fidget_spinner_core::NodeId,
- pub title: NonEmptyText,
- pub summary: Option<NonEmptyText>,
- pub created_at: OffsetDateTime,
+pub struct EntityHistoryEntry {
+ pub revision: u64,
+ pub event_kind: NonEmptyText,
+ pub occurred_at: OffsetDateTime,
+ pub snapshot: Value,
}
-#[derive(Clone, Debug)]
-pub struct ExperimentAnalysisDraft {
- pub title: NonEmptyText,
- pub summary: NonEmptyText,
- pub body: NonEmptyText,
-}
-
-#[derive(Clone, Debug)]
-pub struct CloseExperimentRequest {
- pub experiment_id: fidget_spinner_core::ExperimentId,
- pub run_title: NonEmptyText,
- pub run_summary: Option<NonEmptyText>,
- pub backend: ExecutionBackend,
- pub dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
- pub command: CommandRecipe,
- pub primary_metric: MetricValue,
- pub supporting_metrics: Vec<MetricValue>,
- pub note: FrontierNote,
- pub verdict: FrontierVerdict,
- pub analysis: Option<ExperimentAnalysisDraft>,
- pub decision_title: NonEmptyText,
- pub decision_rationale: NonEmptyText,
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct HypothesisCurrentState {
+ pub hypothesis: HypothesisSummary,
+ pub open_experiments: Vec<ExperimentSummary>,
+ pub latest_closed_experiment: Option<ExperimentSummary>,
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
-pub struct ExperimentReceipt {
- pub open_experiment: OpenExperiment,
- pub run_node: DagNode,
- pub run: RunRecord,
- pub analysis_node: Option<DagNode>,
- pub decision_node: DagNode,
- pub experiment: CompletedExperiment,
+pub struct FrontierOpenProjection {
+ pub frontier: FrontierRecord,
+ pub active_tags: Vec<TagName>,
+ pub active_metric_keys: Vec<MetricKeySummary>,
+ pub active_hypotheses: Vec<HypothesisCurrentState>,
+ pub open_experiments: Vec<ExperimentSummary>,
}
pub struct ProjectStore {
project_root: Utf8PathBuf,
state_root: Utf8PathBuf,
- connection: Connection,
config: ProjectConfig,
- schema: ProjectSchema,
+ connection: Connection,
}
impl ProjectStore {
pub fn init(
project_root: impl AsRef<Utf8Path>,
display_name: NonEmptyText,
- schema_namespace: NonEmptyText,
) -> Result<Self, StoreError> {
let project_root = project_root.as_ref().to_path_buf();
+ fs::create_dir_all(project_root.as_std_path())?;
let state_root = state_root(&project_root);
- fs::create_dir_all(state_root.join("blobs"))?;
+ fs::create_dir_all(state_root.as_std_path())?;
let config = ProjectConfig::new(display_name);
write_json_file(&state_root.join(PROJECT_CONFIG_NAME), &config)?;
- let schema = ProjectSchema::default_with_namespace(schema_namespace);
- write_json_file(&state_root.join(PROJECT_SCHEMA_NAME), &schema)?;
- let mut connection = Connection::open(state_root.join(STATE_DB_NAME).as_std_path())?;
- upgrade_store(&mut connection)?;
+ let database_path = state_root.join(STATE_DB_NAME);
+ let connection = Connection::open(database_path.as_std_path())?;
+ connection.pragma_update(None, "foreign_keys", 1_i64)?;
+ connection.pragma_update(
+ None,
+ "user_version",
+ i64::from(CURRENT_STORE_FORMAT_VERSION),
+ )?;
+ install_schema(&connection)?;
Ok(Self {
project_root,
state_root,
- connection,
config,
- schema,
+ connection,
})
}
pub fn open(project_root: impl AsRef<Utf8Path>) -> Result<Self, StoreError> {
- let requested_root = project_root.as_ref().to_path_buf();
- let project_root = discover_project_root(&requested_root)
- .ok_or(StoreError::MissingProjectStore(requested_root))?;
+ let project_root = project_root.as_ref().to_path_buf();
let state_root = state_root(&project_root);
- let config = read_json_file::<ProjectConfig>(&state_root.join(PROJECT_CONFIG_NAME))?;
+ if !state_root.exists() {
+ return Err(StoreError::MissingProjectStore(project_root));
+ }
+ let config: ProjectConfig = read_json_file(&state_root.join(PROJECT_CONFIG_NAME))?;
if config.store_format_version != CURRENT_STORE_FORMAT_VERSION {
return Err(StoreError::IncompatibleStoreFormatVersion {
observed: config.store_format_version,
expected: CURRENT_STORE_FORMAT_VERSION,
});
}
- let schema = read_json_file::<ProjectSchema>(&state_root.join(PROJECT_SCHEMA_NAME))?;
- let mut connection = Connection::open(state_root.join(STATE_DB_NAME).as_std_path())?;
- upgrade_store(&mut connection)?;
+ let database_path = state_root.join(STATE_DB_NAME);
+ let connection = Connection::open(database_path.as_std_path())?;
+ connection.pragma_update(None, "foreign_keys", 1_i64)?;
+ let observed_version: i64 =
+ connection.pragma_query_value(None, "user_version", |row| row.get(0))?;
+ if u32::try_from(observed_version).ok() != Some(CURRENT_STORE_FORMAT_VERSION) {
+ return Err(StoreError::IncompatibleStoreFormatVersion {
+ observed: u32::try_from(observed_version).unwrap_or(0),
+ expected: CURRENT_STORE_FORMAT_VERSION,
+ });
+ }
+
Ok(Self {
project_root,
state_root,
- connection,
config,
- schema,
+ connection,
})
}
@@ -482,64 +566,6 @@ impl ProjectStore {
}
#[must_use]
- pub fn schema(&self) -> &ProjectSchema {
- &self.schema
- }
-
- pub fn upsert_schema_field(
- &mut self,
- request: UpsertSchemaFieldRequest,
- ) -> Result<ProjectFieldSpec, StoreError> {
- let field = ProjectFieldSpec {
- name: request.name,
- node_classes: request.node_classes,
- presence: request.presence,
- severity: request.severity,
- role: request.role,
- inference_policy: request.inference_policy,
- value_type: request.value_type,
- };
- if let Some(existing) = self.schema.fields.iter_mut().find(|existing| {
- existing.name == field.name && existing.node_classes == field.node_classes
- }) {
- if *existing == field {
- return Ok(field);
- }
- *existing = field.clone();
- } else {
- self.schema.fields.push(field.clone());
- }
- sort_schema_fields(&mut self.schema.fields);
- self.bump_schema_version();
- self.save_schema()?;
- Ok(field)
- }
-
- pub fn remove_schema_field(
- &mut self,
- request: RemoveSchemaFieldRequest,
- ) -> Result<u64, StoreError> {
- let before = self.schema.fields.len();
- self.schema.fields.retain(|field| {
- field.name != request.name
- || request
- .node_classes
- .as_ref()
- .is_some_and(|node_classes| field.node_classes != *node_classes)
- });
- let removed = before.saturating_sub(self.schema.fields.len()) as u64;
- if removed == 0 {
- return Err(StoreError::SchemaFieldNotFound(
- request.name.as_str().to_owned(),
- ));
- }
- sort_schema_fields(&mut self.schema.fields);
- self.bump_schema_version();
- self.save_schema()?;
- Ok(removed)
- }
-
- #[must_use]
pub fn project_root(&self) -> &Utf8Path {
&self.project_root
}
@@ -549,3634 +575,2745 @@ impl ProjectStore {
&self.state_root
}
- fn bump_schema_version(&mut self) {
- self.schema.version = self.schema.version.saturating_add(1);
- }
-
- fn save_schema(&self) -> Result<(), StoreError> {
- write_json_file(&self.state_root.join(PROJECT_SCHEMA_NAME), &self.schema)
+ pub fn status(&self) -> Result<ProjectStatus, StoreError> {
+ Ok(ProjectStatus {
+ project_root: self.project_root.clone(),
+ display_name: self.config.display_name.clone(),
+ store_format_version: self.config.store_format_version,
+ frontier_count: count_rows(&self.connection, "frontiers")?,
+ hypothesis_count: count_rows(&self.connection, "hypotheses")?,
+ experiment_count: count_rows(&self.connection, "experiments")?,
+ open_experiment_count: count_rows_where(
+ &self.connection,
+ "experiments",
+ "status = 'open'",
+ )?,
+ artifact_count: count_rows(&self.connection, "artifacts")?,
+ })
}
- pub fn create_frontier(
+ pub fn register_tag(
&mut self,
- request: CreateFrontierRequest,
- ) -> Result<FrontierProjection, StoreError> {
- let frontier_id = fidget_spinner_core::FrontierId::fresh();
- let payload = NodePayload::with_schema(
- self.schema.schema_ref(),
- frontier_contract_payload(&request.contract)?,
- );
- let diagnostics = self.schema.validate_node(NodeClass::Contract, &payload);
- let contract_node = DagNode::new(
- NodeClass::Contract,
- Some(frontier_id),
- request.contract_title,
- request.contract_summary,
- payload,
- diagnostics,
- );
- let frontier = FrontierRecord::with_id(frontier_id, request.label, contract_node.id);
-
- let tx = self.connection.transaction()?;
- let _ = upsert_metric_definition_tx(
- &tx,
- &MetricDefinition::new(
- request
- .contract
- .evaluation
- .primary_metric
- .metric_key
- .clone(),
- request.contract.evaluation.primary_metric.unit,
- request.contract.evaluation.primary_metric.objective,
- None,
- ),
- )?;
- for metric in &request.contract.evaluation.supporting_metrics {
- let _ = upsert_metric_definition_tx(
- &tx,
- &MetricDefinition::new(
- metric.metric_key.clone(),
- metric.unit,
- metric.objective,
- None,
- ),
- )?;
+ name: TagName,
+ description: NonEmptyText,
+ ) -> Result<TagRecord, StoreError> {
+ if self
+ .connection
+ .query_row(
+ "SELECT 1 FROM tags WHERE name = ?1",
+ params![name.as_str()],
+ |_| Ok(()),
+ )
+ .optional()?
+ .is_some()
+ {
+ return Err(StoreError::DuplicateTag(name));
}
- insert_node(&tx, &contract_node)?;
- insert_frontier(&tx, &frontier)?;
- insert_event(
- &tx,
- "frontier",
- &frontier.id.to_string(),
- "frontier.created",
- json!({"root_contract_node_id": contract_node.id}),
+ let created_at = OffsetDateTime::now_utc();
+ let _ = self.connection.execute(
+ "INSERT INTO tags (name, description, created_at) VALUES (?1, ?2, ?3)",
+ params![
+ name.as_str(),
+ description.as_str(),
+ encode_timestamp(created_at)?
+ ],
)?;
- tx.commit()?;
+ Ok(TagRecord {
+ name,
+ description,
+ created_at,
+ })
+ }
- self.frontier_projection(frontier.id)
+ pub fn list_tags(&self) -> Result<Vec<TagRecord>, StoreError> {
+ let mut statement = self
+ .connection
+ .prepare("SELECT name, description, created_at FROM tags ORDER BY name ASC")?;
+ let rows = statement.query_map([], |row| {
+ Ok(TagRecord {
+ name: parse_tag_name(&row.get::<_, String>(0)?)?,
+ description: parse_non_empty_text(&row.get::<_, String>(1)?)?,
+ created_at: parse_timestamp_sql(&row.get::<_, String>(2)?)?,
+ })
+ })?;
+ rows.collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
}
pub fn define_metric(
&mut self,
request: DefineMetricRequest,
) -> Result<MetricDefinition, StoreError> {
+ if self.metric_definition(&request.key)?.is_some() {
+ return Err(StoreError::DuplicateMetricDefinition(request.key));
+ }
let record = MetricDefinition::new(
request.key,
request.unit,
request.objective,
+ request.visibility,
request.description,
);
- let tx = self.connection.transaction()?;
- let _ = upsert_metric_definition_tx(&tx, &record)?;
- tx.commit()?;
+ let _ = self.connection.execute(
+ "INSERT INTO metric_definitions (key, unit, objective, visibility, description, created_at, updated_at)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+ params![
+ record.key.as_str(),
+ record.unit.as_str(),
+ record.objective.as_str(),
+ record.visibility.as_str(),
+ record.description.as_ref().map(NonEmptyText::as_str),
+ encode_timestamp(record.created_at)?,
+ encode_timestamp(record.updated_at)?,
+ ],
+ )?;
Ok(record)
}
pub fn list_metric_definitions(&self) -> Result<Vec<MetricDefinition>, StoreError> {
let mut statement = self.connection.prepare(
- "SELECT metric_key, unit, objective, description, created_at
+ "SELECT key, unit, objective, visibility, description, created_at, updated_at
FROM metric_definitions
- ORDER BY metric_key ASC",
+ ORDER BY key ASC",
)?;
- let mut rows = statement.query([])?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- items.push(MetricDefinition {
- key: NonEmptyText::new(row.get::<_, String>(0)?)?,
- unit: decode_metric_unit(&row.get::<_, String>(1)?)?,
- objective: decode_optimization_objective(&row.get::<_, String>(2)?)?,
- description: row
- .get::<_, Option<String>>(3)?
- .map(NonEmptyText::new)
- .transpose()?,
- created_at: decode_timestamp(&row.get::<_, String>(4)?)?,
- });
- }
- Ok(items)
+ let rows = statement.query_map([], decode_metric_definition_row)?;
+ rows.collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
}
pub fn define_run_dimension(
&mut self,
request: DefineRunDimensionRequest,
) -> Result<RunDimensionDefinition, StoreError> {
+ if self.run_dimension_definition(&request.key)?.is_some() {
+ return Err(StoreError::DuplicateRunDimension(request.key));
+ }
let record =
RunDimensionDefinition::new(request.key, request.value_type, request.description);
- let tx = self.connection.transaction()?;
- let _ = insert_run_dimension_definition_tx(&tx, &record)?;
- tx.commit()?;
+ let _ = self.connection.execute(
+ "INSERT INTO run_dimension_definitions (key, value_type, description, created_at, updated_at)
+ VALUES (?1, ?2, ?3, ?4, ?5)",
+ params![
+ record.key.as_str(),
+ record.value_type.as_str(),
+ record.description.as_ref().map(NonEmptyText::as_str),
+ encode_timestamp(record.created_at)?,
+ encode_timestamp(record.updated_at)?,
+ ],
+ )?;
Ok(record)
}
- pub fn list_run_dimensions(&self) -> Result<Vec<RunDimensionSummary>, StoreError> {
- load_run_dimension_summaries(self)
- }
-
- pub fn coerce_run_dimensions(
- &self,
- raw_dimensions: BTreeMap<String, Value>,
- ) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, StoreError> {
- coerce_run_dimension_map(&run_dimension_definitions_by_key(self)?, raw_dimensions)
- }
-
- pub fn migrate_metric_plane(&mut self) -> Result<MetricPlaneMigrationReport, StoreError> {
- let tx = self.connection.transaction()?;
- let report = normalize_metric_plane_tx(&tx)?;
- tx.commit()?;
- Ok(report)
+ pub fn list_run_dimensions(&self) -> Result<Vec<RunDimensionDefinition>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT key, value_type, description, created_at, updated_at
+ FROM run_dimension_definitions
+ ORDER BY key ASC",
+ )?;
+ let rows = statement.query_map([], decode_run_dimension_definition_row)?;
+ rows.collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
}
- pub fn add_tag(
+ pub fn create_frontier(
&mut self,
- name: TagName,
- description: NonEmptyText,
- ) -> Result<TagRecord, StoreError> {
- let record = TagRecord {
- name,
- description,
- created_at: OffsetDateTime::now_utc(),
+ request: CreateFrontierRequest,
+ ) -> Result<FrontierRecord, StoreError> {
+ let id = FrontierId::fresh();
+ let slug = self.unique_frontier_slug(request.slug, &request.label)?;
+ let now = OffsetDateTime::now_utc();
+ let record = FrontierRecord {
+ id,
+ slug,
+ label: request.label,
+ objective: request.objective,
+ status: FrontierStatus::Exploring,
+ brief: FrontierBrief::default(),
+ revision: 1,
+ created_at: now,
+ updated_at: now,
};
- let tx = self.connection.transaction()?;
- insert_tag(&tx, &record)?;
- insert_event(
- &tx,
- "tag",
- record.name.as_str(),
- "tag.created",
- json!({"description": record.description.as_str()}),
+ let transaction = self.connection.transaction()?;
+ insert_frontier(&transaction, &record)?;
+ record_event(
+ &transaction,
+ "frontier",
+ &record.id.to_string(),
+ 1,
+ "created",
+ &record,
)?;
- tx.commit()?;
+ transaction.commit()?;
Ok(record)
}
- pub fn list_tags(&self) -> Result<Vec<TagRecord>, StoreError> {
+ pub fn list_frontiers(&self) -> Result<Vec<FrontierSummary>, StoreError> {
let mut statement = self.connection.prepare(
- "SELECT name, description, created_at
- FROM tags
- ORDER BY name ASC",
+ "SELECT id, slug, label, objective, status, brief_json, revision, created_at, updated_at
+ FROM frontiers
+ ORDER BY updated_at DESC, created_at DESC",
)?;
- let mut rows = statement.query([])?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- items.push(TagRecord {
- name: TagName::new(row.get::<_, String>(0)?)?,
- description: NonEmptyText::new(row.get::<_, String>(1)?)?,
- created_at: decode_timestamp(&row.get::<_, String>(2)?)?,
- });
- }
- Ok(items)
+ let rows = statement.query_map([], decode_frontier_row)?;
+ rows.collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)?
+ .into_iter()
+ .map(|record| {
+ Ok(FrontierSummary {
+ active_hypothesis_count: self.active_hypothesis_count(record.id)?,
+ open_experiment_count: self.open_experiment_count(Some(record.id))?,
+ id: record.id,
+ slug: record.slug,
+ label: record.label,
+ objective: record.objective,
+ status: record.status,
+ updated_at: record.updated_at,
+ })
+ })
+ .collect()
}
- pub fn add_node(&mut self, request: CreateNodeRequest) -> Result<DagNode, StoreError> {
- validate_prose_node_request(&request)?;
- let diagnostics = self.schema.validate_node(request.class, &request.payload);
- let mut node = DagNode::new(
- request.class,
- request.frontier_id,
- request.title,
- request.summary,
- request.payload,
- diagnostics,
- );
- node.tags = match (request.class, request.tags) {
- (NodeClass::Note, Some(tags)) => tags,
- (NodeClass::Note, None) => return Err(StoreError::NoteTagsRequired),
- (_, Some(tags)) => tags,
- (_, None) => BTreeSet::new(),
- };
- node.annotations = request.annotations;
+ pub fn read_frontier(&self, selector: &str) -> Result<FrontierRecord, StoreError> {
+ self.resolve_frontier(selector)
+ }
- let tx = self.connection.transaction()?;
- ensure_known_tags(&tx, &node.tags)?;
- insert_node(&tx, &node)?;
- for attachment in &request.attachments {
- insert_edge(&tx, &attachment.materialize(node.id))?;
- }
- insert_event(
- &tx,
- "node",
- &node.id.to_string(),
- "node.created",
- json!({"class": node.class.as_str(), "frontier_id": node.frontier_id}),
+ pub fn update_frontier_brief(
+ &mut self,
+ request: UpdateFrontierBriefRequest,
+ ) -> Result<FrontierRecord, StoreError> {
+ let frontier = self.resolve_frontier(&request.frontier)?;
+ enforce_revision(
+ "frontier",
+ &request.frontier,
+ request.expected_revision,
+ frontier.revision,
+ )?;
+ let now = OffsetDateTime::now_utc();
+ let brief = FrontierBrief {
+ situation: apply_optional_text_patch(
+ request.situation,
+ frontier.brief.situation.clone(),
+ ),
+ roadmap: match request.roadmap {
+ Some(items) => items
+ .into_iter()
+ .map(|item| {
+ Ok(FrontierRoadmapItem {
+ rank: item.rank,
+ hypothesis_id: self.resolve_hypothesis(&item.hypothesis)?.id,
+ summary: item.summary,
+ })
+ })
+ .collect::<Result<Vec<_>, StoreError>>()?,
+ None => frontier.brief.roadmap.clone(),
+ },
+ unknowns: request.unknowns.unwrap_or(frontier.brief.unknowns.clone()),
+ revision: frontier.brief.revision.saturating_add(1),
+ updated_at: Some(now),
+ };
+ let updated = FrontierRecord {
+ brief,
+ revision: frontier.revision.saturating_add(1),
+ updated_at: now,
+ ..frontier
+ };
+ let transaction = self.connection.transaction()?;
+ update_frontier(&transaction, &updated)?;
+ record_event(
+ &transaction,
+ "frontier",
+ &updated.id.to_string(),
+ updated.revision,
+ "brief_updated",
+ &updated,
)?;
- tx.commit()?;
- Ok(node)
+ transaction.commit()?;
+ Ok(updated)
}
- pub fn list_metric_keys(&self) -> Result<Vec<MetricKeySummary>, StoreError> {
- self.list_metric_keys_filtered(MetricKeyQuery::default())
+ pub fn create_hypothesis(
+ &mut self,
+ request: CreateHypothesisRequest,
+ ) -> Result<HypothesisRecord, StoreError> {
+ validate_hypothesis_body(&request.body)?;
+ self.assert_known_tags(&request.tags)?;
+ let frontier = self.resolve_frontier(&request.frontier)?;
+ let id = HypothesisId::fresh();
+ let slug = self.unique_hypothesis_slug(request.slug, &request.title)?;
+ let now = OffsetDateTime::now_utc();
+ let record = HypothesisRecord {
+ id,
+ slug,
+ frontier_id: frontier.id,
+ archived: false,
+ title: request.title,
+ summary: request.summary,
+ body: request.body,
+ tags: request.tags.iter().cloned().collect(),
+ revision: 1,
+ created_at: now,
+ updated_at: now,
+ };
+ let parents = self.resolve_vertex_parents(
+ frontier.id,
+ &request.parents,
+ Some(VertexRef::Hypothesis(id)),
+ )?;
+ let transaction = self.connection.transaction()?;
+ insert_hypothesis(&transaction, &record)?;
+ replace_hypothesis_tags(&transaction, record.id, &request.tags)?;
+ replace_influence_parents(&transaction, VertexRef::Hypothesis(id), &parents)?;
+ record_event(
+ &transaction,
+ "hypothesis",
+ &record.id.to_string(),
+ 1,
+ "created",
+ &record,
+ )?;
+ transaction.commit()?;
+ Ok(record)
}
- pub fn list_metric_keys_filtered(
+ pub fn list_hypotheses(
&self,
- query: MetricKeyQuery,
- ) -> Result<Vec<MetricKeySummary>, StoreError> {
- let mut summaries = collect_metric_samples(self, &query)?
+ query: ListHypothesesQuery,
+ ) -> Result<Vec<HypothesisSummary>, StoreError> {
+ let frontier_id = query
+ .frontier
+ .as_deref()
+ .map(|selector| self.resolve_frontier(selector).map(|frontier| frontier.id))
+ .transpose()?;
+ let records = self.load_hypothesis_records(frontier_id, query.include_archived)?;
+ let filtered = records
.into_iter()
- .fold(
- BTreeMap::<(MetricFieldSource, String), MetricKeyAccumulator>::new(),
- |mut accumulators, sample| {
- let key = (sample.source, sample.key.as_str().to_owned());
- let _ = accumulators
- .entry(key)
- .and_modify(|entry| entry.observe(&sample))
- .or_insert_with(|| MetricKeyAccumulator::from_sample(&sample));
- accumulators
- },
- )
- .into_values()
- .map(MetricKeyAccumulator::finish)
- .collect::<Vec<_>>();
- if query
- .source
- .is_none_or(|source| source == MetricFieldSource::RunMetric)
- {
- merge_registered_run_metric_summaries(self, &mut summaries)?;
- }
- summaries.sort_by(|left, right| {
- left.key
- .cmp(&right.key)
- .then(left.source.cmp(&right.source))
- });
- Ok(summaries)
+ .filter(|record| {
+ query.tags.is_empty() || query.tags.iter().all(|tag| record.tags.contains(tag))
+ })
+ .map(|record| self.hypothesis_summary_from_record(record))
+ .collect::<Result<Vec<_>, _>>()?;
+ Ok(apply_limit(filtered, query.limit))
}
- pub fn best_metrics(&self, query: MetricBestQuery) -> Result<Vec<MetricBestEntry>, StoreError> {
- let matching = collect_metric_samples(
- self,
- &MetricKeyQuery {
- frontier_id: query.frontier_id,
- source: query.source,
- dimensions: query.dimensions.clone(),
- },
- )?
- .into_iter()
- .filter(|sample| sample.key == query.key)
- .collect::<Vec<_>>();
- if matching.is_empty() {
- return Ok(Vec::new());
- }
-
- let source = if let Some(source) = query.source {
- source
- } else {
- let sources = matching
- .iter()
- .map(|sample| sample.source)
- .collect::<BTreeSet<_>>();
- if sources.len() != 1 {
- return Err(StoreError::AmbiguousMetricKey {
- key: query.key.as_str().to_owned(),
- sources: sources
- .into_iter()
- .map(MetricFieldSource::as_str)
- .collect::<Vec<_>>()
- .join(", "),
- });
- }
- let Some(source) = sources.iter().copied().next() else {
- return Ok(Vec::new());
- };
- source
- };
-
- let mut matching = matching
- .into_iter()
- .filter(|sample| sample.source == source)
- .collect::<Vec<_>>();
- if matching.is_empty() {
- return Ok(Vec::new());
- }
-
- let order = resolve_metric_order(&matching, &query, source)?;
- matching.sort_by(|left, right| compare_metric_samples(left, right, order));
- matching.truncate(query.limit as usize);
- Ok(matching
+ pub fn read_hypothesis(&self, selector: &str) -> Result<HypothesisDetail, StoreError> {
+ let record = self.resolve_hypothesis(selector)?;
+ let parents = self.load_vertex_parents(VertexRef::Hypothesis(record.id))?;
+ let children = self.load_vertex_children(VertexRef::Hypothesis(record.id))?;
+ let experiments = self.list_experiments(ListExperimentsQuery {
+ hypothesis: Some(record.id.to_string()),
+ include_archived: true,
+ limit: None,
+ ..ListExperimentsQuery::default()
+ })?;
+ let (open_experiments, closed_experiments): (Vec<_>, Vec<_>) = experiments
.into_iter()
- .map(|sample| sample.into_entry(order))
- .collect())
+ .partition(|experiment| experiment.status == ExperimentStatus::Open);
+ Ok(HypothesisDetail {
+ artifacts: self.list_artifacts(ListArtifactsQuery {
+ attached_to: Some(AttachmentSelector::Hypothesis(record.id.to_string())),
+ limit: None,
+ ..ListArtifactsQuery::default()
+ })?,
+ children,
+ closed_experiments,
+ open_experiments,
+ parents,
+ record,
+ })
}
- pub fn archive_node(&mut self, node_id: fidget_spinner_core::NodeId) -> Result<(), StoreError> {
- let updated_at = encode_timestamp(OffsetDateTime::now_utc())?;
- let changed = self.connection.execute(
- "UPDATE nodes SET archived = 1, updated_at = ?1 WHERE id = ?2",
- params![updated_at, node_id.to_string()],
+ pub fn update_hypothesis(
+ &mut self,
+ request: UpdateHypothesisRequest,
+ ) -> Result<HypothesisRecord, StoreError> {
+ let record = self.resolve_hypothesis(&request.hypothesis)?;
+ enforce_revision(
+ "hypothesis",
+ &request.hypothesis,
+ request.expected_revision,
+ record.revision,
)?;
- if changed == 0 {
- return Err(StoreError::NodeNotFound(node_id));
+ if let Some(body) = request.body.as_ref() {
+ validate_hypothesis_body(body)?;
}
- Ok(())
+ if let Some(tags) = request.tags.as_ref() {
+ self.assert_known_tags(tags)?;
+ }
+ let updated = HypothesisRecord {
+ title: request.title.unwrap_or(record.title.clone()),
+ summary: request.summary.unwrap_or(record.summary.clone()),
+ body: request.body.unwrap_or(record.body.clone()),
+ tags: request
+ .tags
+ .clone()
+ .map_or_else(|| record.tags.clone(), |tags| tags.into_iter().collect()),
+ archived: request.archived.unwrap_or(record.archived),
+ revision: record.revision.saturating_add(1),
+ updated_at: OffsetDateTime::now_utc(),
+ ..record
+ };
+ let parents = request
+ .parents
+ .as_ref()
+ .map(|selectors| {
+ self.resolve_vertex_parents(
+ updated.frontier_id,
+ selectors,
+ Some(VertexRef::Hypothesis(updated.id)),
+ )
+ })
+ .transpose()?;
+ let transaction = self.connection.transaction()?;
+ update_hypothesis_row(&transaction, &updated)?;
+ replace_hypothesis_tags(
+ &transaction,
+ updated.id,
+ &updated.tags.iter().cloned().collect::<BTreeSet<_>>(),
+ )?;
+ if let Some(parents) = parents.as_ref() {
+ replace_influence_parents(&transaction, VertexRef::Hypothesis(updated.id), parents)?;
+ }
+ record_event(
+ &transaction,
+ "hypothesis",
+ &updated.id.to_string(),
+ updated.revision,
+ "updated",
+ &updated,
+ )?;
+ transaction.commit()?;
+ Ok(updated)
}
- pub fn annotate_node(
+ pub fn open_experiment(
&mut self,
- node_id: fidget_spinner_core::NodeId,
- annotation: NodeAnnotation,
- ) -> Result<(), StoreError> {
- let tx = self.connection.transaction()?;
- let exists = tx
- .query_row(
- "SELECT 1 FROM nodes WHERE id = ?1",
- params![node_id.to_string()],
- |row| row.get::<_, i64>(0),
- )
- .optional()?;
- if exists.is_none() {
- return Err(StoreError::NodeNotFound(node_id));
- }
- insert_annotation(&tx, node_id, &annotation)?;
- let _ = tx.execute(
- "UPDATE nodes SET updated_at = ?1 WHERE id = ?2",
- params![
- encode_timestamp(OffsetDateTime::now_utc())?,
- node_id.to_string()
- ],
+ request: OpenExperimentRequest,
+ ) -> Result<ExperimentRecord, StoreError> {
+ self.assert_known_tags(&request.tags)?;
+ let hypothesis = self.resolve_hypothesis(&request.hypothesis)?;
+ let id = ExperimentId::fresh();
+ let slug = self.unique_experiment_slug(request.slug, &request.title)?;
+ let now = OffsetDateTime::now_utc();
+ let record = ExperimentRecord {
+ id,
+ slug,
+ frontier_id: hypothesis.frontier_id,
+ hypothesis_id: hypothesis.id,
+ archived: false,
+ title: request.title,
+ summary: request.summary,
+ tags: request.tags.iter().cloned().collect(),
+ status: ExperimentStatus::Open,
+ outcome: None,
+ revision: 1,
+ created_at: now,
+ updated_at: now,
+ };
+ let parents = self.resolve_vertex_parents(
+ hypothesis.frontier_id,
+ &request.parents,
+ Some(VertexRef::Experiment(id)),
)?;
- insert_event(
- &tx,
- "node",
- &node_id.to_string(),
- "node.annotated",
- json!({"visibility": format!("{:?}", annotation.visibility)}),
+ let transaction = self.connection.transaction()?;
+ insert_experiment(&transaction, &record)?;
+ replace_experiment_tags(&transaction, record.id, &request.tags)?;
+ replace_influence_parents(&transaction, VertexRef::Experiment(id), &parents)?;
+ record_event(
+ &transaction,
+ "experiment",
+ &record.id.to_string(),
+ 1,
+ "opened",
+ &record,
)?;
- tx.commit()?;
- Ok(())
+ transaction.commit()?;
+ Ok(record)
}
- pub fn get_node(
+ pub fn list_experiments(
&self,
- node_id: fidget_spinner_core::NodeId,
- ) -> Result<Option<DagNode>, StoreError> {
- let mut statement = self.connection.prepare(
- "SELECT
- id,
- class,
- track,
- frontier_id,
- archived,
- title,
- summary,
- payload_schema_namespace,
- payload_schema_version,
- payload_json,
- diagnostics_json,
- agent_session_id,
- created_at,
- updated_at
- FROM nodes
- WHERE id = ?1",
- )?;
- let node = statement
- .query_row(params![node_id.to_string()], read_node_row)
- .optional()?;
- node.map(|mut item| {
- item.tags = self.load_tags(item.id)?;
- item.annotations = self.load_annotations(item.id)?;
- Ok(item)
+ query: ListExperimentsQuery,
+ ) -> Result<Vec<ExperimentSummary>, StoreError> {
+ let frontier_id = query
+ .frontier
+ .as_deref()
+ .map(|selector| self.resolve_frontier(selector).map(|frontier| frontier.id))
+ .transpose()?;
+ let hypothesis_id = query
+ .hypothesis
+ .as_deref()
+ .map(|selector| {
+ self.resolve_hypothesis(selector)
+ .map(|hypothesis| hypothesis.id)
+ })
+ .transpose()?;
+ let records =
+ self.load_experiment_records(frontier_id, hypothesis_id, query.include_archived)?;
+ let filtered = records
+ .into_iter()
+ .filter(|record| query.status.is_none_or(|status| record.status == status))
+ .filter(|record| {
+ query.tags.is_empty() || query.tags.iter().all(|tag| record.tags.contains(tag))
+ })
+ .map(|record| self.experiment_summary_from_record(record))
+ .collect::<Result<Vec<_>, _>>()?;
+ Ok(apply_limit(filtered, query.limit))
+ }
+
+ pub fn read_experiment(&self, selector: &str) -> Result<ExperimentDetail, StoreError> {
+ let record = self.resolve_experiment(selector)?;
+ Ok(ExperimentDetail {
+ artifacts: self.list_artifacts(ListArtifactsQuery {
+ attached_to: Some(AttachmentSelector::Experiment(record.id.to_string())),
+ limit: None,
+ ..ListArtifactsQuery::default()
+ })?,
+ children: self.load_vertex_children(VertexRef::Experiment(record.id))?,
+ owning_hypothesis: self
+ .hypothesis_summary_from_record(self.hypothesis_by_id(record.hypothesis_id)?)?,
+ parents: self.load_vertex_parents(VertexRef::Experiment(record.id))?,
+ record,
})
- .transpose()
}
- pub fn list_nodes(&self, query: ListNodesQuery) -> Result<Vec<NodeSummary>, StoreError> {
- let frontier_id = query.frontier_id.map(|id| id.to_string());
- let class = query.class.map(|item| item.as_str().to_owned());
- let mut sql = String::from(
- "SELECT
- n.id,
- n.class,
- n.track,
- n.frontier_id,
- n.archived,
- n.title,
- n.summary,
- n.diagnostics_json,
- n.created_at,
- n.updated_at,
- (
- SELECT COUNT(*)
- FROM node_annotations AS a
- WHERE a.node_id = n.id AND a.visibility = 'hidden'
- ) AS hidden_annotation_count
- FROM nodes AS n
- WHERE (?1 IS NULL OR n.frontier_id = ?1)
- AND (?2 IS NULL OR n.class = ?2)
- AND (?3 = 1 OR n.archived = 0)",
- );
- let mut parameters = vec![
- frontier_id.map_or(SqlValue::Null, SqlValue::Text),
- class.map_or(SqlValue::Null, SqlValue::Text),
- SqlValue::Integer(i64::from(query.include_archived)),
- ];
- for (index, tag) in query.tags.iter().enumerate() {
- let placeholder = parameters.len() + 1;
- let _ = write!(
- sql,
- "
- AND EXISTS (
- SELECT 1
- FROM node_tags AS nt{index}
- WHERE nt{index}.node_id = n.id AND nt{index}.tag_name = ?{placeholder}
- )"
- );
- parameters.push(SqlValue::Text(tag.as_str().to_owned()));
- }
- let limit_placeholder = parameters.len() + 1;
- let _ = write!(
- sql,
- "
- ORDER BY n.updated_at DESC
- LIMIT ?{limit_placeholder}"
- );
- parameters.push(SqlValue::Integer(i64::from(query.limit)));
- let mut statement = self.connection.prepare(&sql)?;
- let mut rows = statement.query(params_from_iter(parameters.iter()))?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- let diagnostics = decode_json::<NodeDiagnostics>(&row.get::<_, String>(7)?)?;
- let node_id = parse_node_id(&row.get::<_, String>(0)?)?;
- items.push(NodeSummary {
- id: node_id,
- class: parse_node_class(&row.get::<_, String>(1)?)?,
- track: parse_node_track(&row.get::<_, String>(2)?)?,
- frontier_id: row
- .get::<_, Option<String>>(3)?
- .map(|raw| parse_frontier_id(&raw))
- .transpose()?,
- archived: row.get::<_, i64>(4)? != 0,
- title: NonEmptyText::new(row.get::<_, String>(5)?)?,
- summary: row
- .get::<_, Option<String>>(6)?
- .map(NonEmptyText::new)
- .transpose()?,
- tags: self.load_tags(node_id)?,
- diagnostic_count: diagnostics.items.len() as u64,
- hidden_annotation_count: row.get::<_, i64>(10)? as u64,
- created_at: decode_timestamp(&row.get::<_, String>(8)?)?,
- updated_at: decode_timestamp(&row.get::<_, String>(9)?)?,
- });
+ pub fn update_experiment(
+ &mut self,
+ request: UpdateExperimentRequest,
+ ) -> Result<ExperimentRecord, StoreError> {
+ let record = self.resolve_experiment(&request.experiment)?;
+ enforce_revision(
+ "experiment",
+ &request.experiment,
+ request.expected_revision,
+ record.revision,
+ )?;
+ if let Some(tags) = request.tags.as_ref() {
+ self.assert_known_tags(tags)?;
}
- Ok(items)
- }
-
- pub fn list_frontiers(&self) -> Result<Vec<FrontierRecord>, StoreError> {
- let mut statement = self.connection.prepare(
- "SELECT id, label, root_contract_node_id, status, created_at, updated_at
- FROM frontiers
- ORDER BY updated_at DESC",
+ let outcome = match request.outcome {
+ Some(patch) => Some(self.materialize_outcome(&patch)?),
+ None => record.outcome.clone(),
+ };
+ let updated = ExperimentRecord {
+ title: request.title.unwrap_or(record.title.clone()),
+ summary: apply_optional_text_patch(request.summary, record.summary.clone()),
+ tags: request
+ .tags
+ .clone()
+ .map_or_else(|| record.tags.clone(), |tags| tags.into_iter().collect()),
+ archived: request.archived.unwrap_or(record.archived),
+ status: if outcome.is_some() {
+ ExperimentStatus::Closed
+ } else {
+ record.status
+ },
+ outcome,
+ revision: record.revision.saturating_add(1),
+ updated_at: OffsetDateTime::now_utc(),
+ ..record
+ };
+ let parents = request
+ .parents
+ .as_ref()
+ .map(|selectors| {
+ self.resolve_vertex_parents(
+ updated.frontier_id,
+ selectors,
+ Some(VertexRef::Experiment(updated.id)),
+ )
+ })
+ .transpose()?;
+ let transaction = self.connection.transaction()?;
+ update_experiment_row(&transaction, &updated)?;
+ replace_experiment_tags(
+ &transaction,
+ updated.id,
+ &updated.tags.iter().cloned().collect::<BTreeSet<_>>(),
)?;
- let mut rows = statement.query([])?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- items.push(read_frontier_row(row)?);
+ replace_experiment_dimensions(&transaction, updated.id, updated.outcome.as_ref())?;
+ replace_experiment_metrics(&transaction, updated.id, updated.outcome.as_ref())?;
+ if let Some(parents) = parents.as_ref() {
+ replace_influence_parents(&transaction, VertexRef::Experiment(updated.id), parents)?;
}
- Ok(items)
+ record_event(
+ &transaction,
+ "experiment",
+ &updated.id.to_string(),
+ updated.revision,
+ "updated",
+ &updated,
+ )?;
+ transaction.commit()?;
+ Ok(updated)
}
- pub fn frontier_projection(
- &self,
- frontier_id: fidget_spinner_core::FrontierId,
- ) -> Result<FrontierProjection, StoreError> {
- let frontier = self.load_frontier(frontier_id)?;
- let open_experiment_count = self.connection.query_row(
- "SELECT COUNT(*) FROM open_experiments WHERE frontier_id = ?1",
- params![frontier_id.to_string()],
- |row| row.get::<_, i64>(0),
- )? as u64;
- let completed_experiment_count = self.connection.query_row(
- "SELECT COUNT(*) FROM experiments WHERE frontier_id = ?1",
- params![frontier_id.to_string()],
- |row| row.get::<_, i64>(0),
- )? as u64;
- let verdict_counts = self.connection.query_row(
- "SELECT
- SUM(CASE WHEN verdict = 'accepted' THEN 1 ELSE 0 END),
- SUM(CASE WHEN verdict = 'kept' THEN 1 ELSE 0 END),
- SUM(CASE WHEN verdict = 'parked' THEN 1 ELSE 0 END),
- SUM(CASE WHEN verdict = 'rejected' THEN 1 ELSE 0 END)
- FROM experiments
- WHERE frontier_id = ?1",
- params![frontier_id.to_string()],
- |row| {
- Ok(FrontierVerdictCounts {
- accepted: row.get::<_, Option<i64>>(0)?.unwrap_or(0) as u64,
- kept: row.get::<_, Option<i64>>(1)?.unwrap_or(0) as u64,
- parked: row.get::<_, Option<i64>>(2)?.unwrap_or(0) as u64,
- rejected: row.get::<_, Option<i64>>(3)?.unwrap_or(0) as u64,
- })
- },
+ pub fn close_experiment(
+ &mut self,
+ request: CloseExperimentRequest,
+ ) -> Result<ExperimentRecord, StoreError> {
+ let record = self.resolve_experiment(&request.experiment)?;
+ if record.status == ExperimentStatus::Closed {
+ return Err(StoreError::ExperimentAlreadyClosed(record.id));
+ }
+ enforce_revision(
+ "experiment",
+ &request.experiment,
+ request.expected_revision,
+ record.revision,
)?;
-
- Ok(FrontierProjection {
- frontier,
- open_experiment_count,
- completed_experiment_count,
- verdict_counts,
- })
+ let outcome = self.materialize_outcome(&ExperimentOutcomePatch {
+ backend: request.backend,
+ command: request.command,
+ dimensions: request.dimensions,
+ primary_metric: request.primary_metric,
+ supporting_metrics: request.supporting_metrics,
+ verdict: request.verdict,
+ rationale: request.rationale,
+ analysis: request.analysis,
+ })?;
+ let updated = ExperimentRecord {
+ status: ExperimentStatus::Closed,
+ outcome: Some(outcome),
+ revision: record.revision.saturating_add(1),
+ updated_at: OffsetDateTime::now_utc(),
+ ..record
+ };
+ let transaction = self.connection.transaction()?;
+ update_experiment_row(&transaction, &updated)?;
+ replace_experiment_dimensions(&transaction, updated.id, updated.outcome.as_ref())?;
+ replace_experiment_metrics(&transaction, updated.id, updated.outcome.as_ref())?;
+ record_event(
+ &transaction,
+ "experiment",
+ &updated.id.to_string(),
+ updated.revision,
+ "closed",
+ &updated,
+ )?;
+ transaction.commit()?;
+ Ok(updated)
}
- pub fn open_experiment(
+ pub fn create_artifact(
&mut self,
- request: OpenExperimentRequest,
- ) -> Result<OpenExperimentSummary, StoreError> {
- let hypothesis_node = self
- .get_node(request.hypothesis_node_id)?
- .ok_or(StoreError::NodeNotFound(request.hypothesis_node_id))?;
- if hypothesis_node.class != NodeClass::Hypothesis {
- return Err(StoreError::NodeNotHypothesis(request.hypothesis_node_id));
- }
- if hypothesis_node.frontier_id != Some(request.frontier_id) {
- return Err(StoreError::FrontierNotFound(request.frontier_id));
- }
- let experiment = OpenExperiment {
- id: fidget_spinner_core::ExperimentId::fresh(),
- frontier_id: request.frontier_id,
- hypothesis_node_id: request.hypothesis_node_id,
- title: request.title,
+ request: CreateArtifactRequest,
+ ) -> Result<ArtifactRecord, StoreError> {
+ let id = ArtifactId::fresh();
+ let slug = self.unique_artifact_slug(request.slug, &request.label)?;
+ let now = OffsetDateTime::now_utc();
+ let record = ArtifactRecord {
+ id,
+ slug,
+ kind: request.kind,
+ label: request.label,
summary: request.summary,
- created_at: OffsetDateTime::now_utc(),
+ locator: request.locator,
+ media_type: request.media_type,
+ revision: 1,
+ created_at: now,
+ updated_at: now,
};
- let tx = self.connection.transaction()?;
- insert_open_experiment(&tx, &experiment)?;
- touch_frontier(&tx, request.frontier_id)?;
- insert_event(
- &tx,
- "experiment",
- &experiment.id.to_string(),
- "experiment.opened",
- json!({
- "frontier_id": experiment.frontier_id,
- "hypothesis_node_id": experiment.hypothesis_node_id,
- }),
+ let attachments = self.resolve_attachment_targets(&request.attachments)?;
+ let transaction = self.connection.transaction()?;
+ insert_artifact(&transaction, &record)?;
+ replace_artifact_attachments(&transaction, record.id, &attachments)?;
+ record_event(
+ &transaction,
+ "artifact",
+ &record.id.to_string(),
+ 1,
+ "created",
+ &record,
)?;
- tx.commit()?;
- Ok(summarize_open_experiment(&experiment))
+ transaction.commit()?;
+ Ok(record)
}
- pub fn list_open_experiments(
+ pub fn list_artifacts(
&self,
- frontier_id: Option<fidget_spinner_core::FrontierId>,
- ) -> Result<Vec<OpenExperimentSummary>, StoreError> {
- let mut statement = self.connection.prepare(
- "SELECT
- id,
- frontier_id,
- hypothesis_node_id,
- title,
- summary,
- created_at
- FROM open_experiments
- WHERE (?1 IS NULL OR frontier_id = ?1)
- ORDER BY created_at DESC",
- )?;
- let mut rows = statement.query(params![frontier_id.map(|id| id.to_string())])?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- items.push(OpenExperimentSummary {
- id: parse_experiment_id(&row.get::<_, String>(0)?)?,
- frontier_id: parse_frontier_id(&row.get::<_, String>(1)?)?,
- hypothesis_node_id: parse_node_id(&row.get::<_, String>(2)?)?,
- title: NonEmptyText::new(row.get::<_, String>(3)?)?,
- summary: row
- .get::<_, Option<String>>(4)?
- .map(NonEmptyText::new)
- .transpose()?,
- created_at: decode_timestamp(&row.get::<_, String>(5)?)?,
- });
+ query: ListArtifactsQuery,
+ ) -> Result<Vec<ArtifactSummary>, StoreError> {
+ let records = self.load_artifact_records()?;
+ let frontier_id = query
+ .frontier
+ .as_deref()
+ .map(|selector| self.resolve_frontier(selector).map(|frontier| frontier.id))
+ .transpose()?;
+ let mut filtered = Vec::new();
+ for record in records {
+ if query.kind.is_some_and(|kind| record.kind != kind) {
+ continue;
+ }
+ if let Some(frontier_id) = frontier_id
+ && !self.artifact_attached_to_frontier(record.id, frontier_id)?
+ {
+ continue;
+ }
+ filtered.push(record);
}
- Ok(items)
+ let attached_filtered = match query.attached_to {
+ Some(selector) => {
+ let target = self.resolve_attachment_target(&selector)?;
+ filtered
+ .into_iter()
+ .filter(|record| {
+ self.artifact_attachment_targets(record.id)
+ .map(|targets| targets.contains(&target))
+ .unwrap_or(false)
+ })
+ .collect()
+ }
+ None => filtered,
+ };
+ Ok(apply_limit(
+ attached_filtered
+ .into_iter()
+ .map(|record| ArtifactSummary {
+ id: record.id,
+ slug: record.slug,
+ kind: record.kind,
+ label: record.label,
+ summary: record.summary,
+ locator: record.locator,
+ media_type: record.media_type,
+ updated_at: record.updated_at,
+ })
+ .collect(),
+ query.limit,
+ ))
}
- pub fn read_open_experiment(
- &self,
- experiment_id: fidget_spinner_core::ExperimentId,
- ) -> Result<OpenExperimentSummary, StoreError> {
- load_open_experiment(&self.connection, experiment_id)?
- .map(|experiment| summarize_open_experiment(&experiment))
- .ok_or(StoreError::ExperimentNotFound(experiment_id))
+ pub fn read_artifact(&self, selector: &str) -> Result<ArtifactDetail, StoreError> {
+ let record = self.resolve_artifact(selector)?;
+ Ok(ArtifactDetail {
+ attachments: self.artifact_attachment_targets(record.id)?,
+ record,
+ })
}
- pub fn close_experiment(
+ pub fn update_artifact(
&mut self,
- request: CloseExperimentRequest,
- ) -> Result<ExperimentReceipt, StoreError> {
- let open_experiment = load_open_experiment(&self.connection, request.experiment_id)?
- .ok_or(StoreError::ExperimentNotFound(request.experiment_id))?;
- let hypothesis_node = self
- .get_node(open_experiment.hypothesis_node_id)?
- .ok_or(StoreError::NodeNotFound(open_experiment.hypothesis_node_id))?;
- if hypothesis_node.class != NodeClass::Hypothesis {
- return Err(StoreError::NodeNotHypothesis(
- open_experiment.hypothesis_node_id,
- ));
+ request: UpdateArtifactRequest,
+ ) -> Result<ArtifactRecord, StoreError> {
+ let record = self.resolve_artifact(&request.artifact)?;
+ enforce_revision(
+ "artifact",
+ &request.artifact,
+ request.expected_revision,
+ record.revision,
+ )?;
+ let updated = ArtifactRecord {
+ kind: request.kind.unwrap_or(record.kind),
+ label: request.label.unwrap_or(record.label.clone()),
+ summary: apply_optional_text_patch(request.summary, record.summary.clone()),
+ locator: request.locator.unwrap_or(record.locator.clone()),
+ media_type: apply_optional_text_patch(request.media_type, record.media_type.clone()),
+ revision: record.revision.saturating_add(1),
+ updated_at: OffsetDateTime::now_utc(),
+ ..record
+ };
+ let attachments = request
+ .attachments
+ .as_ref()
+ .map(|selectors| self.resolve_attachment_targets(selectors))
+ .transpose()?;
+ let transaction = self.connection.transaction()?;
+ update_artifact_row(&transaction, &updated)?;
+ if let Some(attachments) = attachments.as_ref() {
+ replace_artifact_attachments(&transaction, updated.id, attachments)?;
}
- let tx = self.connection.transaction()?;
- let dimensions = validate_run_dimensions_tx(&tx, &request.dimensions)?;
- let primary_metric_definition =
- load_metric_definition_tx(&tx, &request.primary_metric.key)?.ok_or_else(|| {
- StoreError::UnknownMetricDefinition(request.primary_metric.key.clone())
- })?;
- let supporting_metric_definitions = request
- .supporting_metrics
- .iter()
- .map(|metric| {
- load_metric_definition_tx(&tx, &metric.key)?
- .ok_or_else(|| StoreError::UnknownMetricDefinition(metric.key.clone()))
+ record_event(
+ &transaction,
+ "artifact",
+ &updated.id.to_string(),
+ updated.revision,
+ "updated",
+ &updated,
+ )?;
+ transaction.commit()?;
+ Ok(updated)
+ }
+
+ pub fn frontier_open(&self, selector: &str) -> Result<FrontierOpenProjection, StoreError> {
+ let frontier = self.resolve_frontier(selector)?;
+ let active_hypothesis_ids = self.active_hypothesis_ids(frontier.id, &frontier.brief)?;
+ let active_hypotheses = active_hypothesis_ids
+ .into_iter()
+ .map(|hypothesis_id| {
+ let summary =
+ self.hypothesis_summary_from_record(self.hypothesis_by_id(hypothesis_id)?)?;
+ let open_experiments = self.list_experiments(ListExperimentsQuery {
+ hypothesis: Some(hypothesis_id.to_string()),
+ status: Some(ExperimentStatus::Open),
+ limit: None,
+ ..ListExperimentsQuery::default()
+ })?;
+ let latest_closed_experiment = self
+ .list_experiments(ListExperimentsQuery {
+ hypothesis: Some(hypothesis_id.to_string()),
+ status: Some(ExperimentStatus::Closed),
+ limit: Some(1),
+ ..ListExperimentsQuery::default()
+ })?
+ .into_iter()
+ .next();
+ Ok(HypothesisCurrentState {
+ hypothesis: summary,
+ open_experiments,
+ latest_closed_experiment,
+ })
})
.collect::<Result<Vec<_>, StoreError>>()?;
- let benchmark_suite = benchmark_suite_label(&dimensions);
-
- let run_payload = NodePayload::with_schema(
- self.schema.schema_ref(),
- json_object(json!({
- "dimensions": run_dimensions_json(&dimensions),
- "backend": format!("{:?}", request.backend),
- "command": request.command.argv.iter().map(NonEmptyText::as_str).collect::<Vec<_>>(),
- }))?,
- );
- let run_diagnostics = self.schema.validate_node(NodeClass::Run, &run_payload);
- let run_node = DagNode::new(
- NodeClass::Run,
- Some(open_experiment.frontier_id),
- request.run_title,
- request.run_summary,
- run_payload,
- run_diagnostics,
- );
- let run_id = fidget_spinner_core::RunId::fresh();
- let now = OffsetDateTime::now_utc();
- let run = RunRecord {
- node_id: run_node.id,
- run_id,
- frontier_id: Some(open_experiment.frontier_id),
- status: RunStatus::Succeeded,
- backend: request.backend,
- dimensions: dimensions.clone(),
- command: request.command,
- started_at: Some(now),
- finished_at: Some(now),
- };
+ let open_experiments = self.list_experiments(ListExperimentsQuery {
+ frontier: Some(frontier.id.to_string()),
+ status: Some(ExperimentStatus::Open),
+ limit: None,
+ ..ListExperimentsQuery::default()
+ })?;
+ let active_tags = derive_active_tags(&active_hypotheses, &open_experiments);
+ let active_metric_keys =
+ self.live_metric_keys(frontier.id, &active_hypotheses, &open_experiments)?;
+ Ok(FrontierOpenProjection {
+ frontier,
+ active_tags,
+ active_metric_keys,
+ active_hypotheses,
+ open_experiments,
+ })
+ }
- let analysis_node = request
- .analysis
- .map(|analysis| -> Result<DagNode, StoreError> {
- let payload = NodePayload::with_schema(
- self.schema.schema_ref(),
- json_object(json!({
- "body": analysis.body.as_str(),
- }))?,
- );
- let diagnostics = self.schema.validate_node(NodeClass::Analysis, &payload);
- Ok(DagNode::new(
- NodeClass::Analysis,
- Some(open_experiment.frontier_id),
- analysis.title,
- Some(analysis.summary),
- payload,
- diagnostics,
- ))
+ pub fn metric_keys(&self, query: MetricKeysQuery) -> Result<Vec<MetricKeySummary>, StoreError> {
+ let frontier_id = query
+ .frontier
+ .as_deref()
+ .map(|selector| self.resolve_frontier(selector).map(|frontier| frontier.id))
+ .transpose()?;
+ let definitions = self.list_metric_definitions()?;
+ let live_keys = frontier_id
+ .map(|frontier_id| self.live_metric_key_names(frontier_id))
+ .transpose()?
+ .unwrap_or_default();
+ let mut keys = definitions
+ .into_iter()
+ .filter(|definition| match query.scope {
+ MetricScope::Live => live_keys.contains(definition.key.as_str()),
+ MetricScope::Visible => definition.visibility.is_default_visible(),
+ MetricScope::All => true,
})
+ .map(|definition| {
+ Ok(MetricKeySummary {
+ reference_count: self.metric_reference_count(frontier_id, &definition.key)?,
+ key: definition.key,
+ unit: definition.unit,
+ objective: definition.objective,
+ visibility: definition.visibility,
+ description: definition.description,
+ })
+ })
+ .collect::<Result<Vec<_>, StoreError>>()?;
+ keys.sort_by(|left, right| left.key.as_str().cmp(right.key.as_str()));
+ Ok(keys)
+ }
+
+ pub fn metric_best(&self, query: MetricBestQuery) -> Result<Vec<MetricBestEntry>, StoreError> {
+ let definition = self
+ .metric_definition(&query.key)?
+ .ok_or_else(|| StoreError::UnknownMetricDefinition(query.key.clone()))?;
+ let frontier_id = query
+ .frontier
+ .as_deref()
+ .map(|selector| self.resolve_frontier(selector).map(|frontier| frontier.id))
.transpose()?;
+ let hypothesis_id = query
+ .hypothesis
+ .as_deref()
+ .map(|selector| {
+ self.resolve_hypothesis(selector)
+ .map(|hypothesis| hypothesis.id)
+ })
+ .transpose()?;
+ let order = query.order.unwrap_or(match definition.objective {
+ OptimizationObjective::Minimize => MetricRankOrder::Asc,
+ OptimizationObjective::Maximize => MetricRankOrder::Desc,
+ OptimizationObjective::Target => {
+ return Err(StoreError::MetricOrderRequired {
+ key: query.key.to_string(),
+ });
+ }
+ });
+ let experiments = self
+ .load_experiment_records(frontier_id, hypothesis_id, true)?
+ .into_iter()
+ .filter(|record| record.status == ExperimentStatus::Closed)
+ .filter(|record| {
+ query.include_rejected
+ || record
+ .outcome
+ .as_ref()
+ .is_some_and(|outcome| outcome.verdict != FrontierVerdict::Rejected)
+ })
+ .collect::<Vec<_>>();
+ let mut entries = experiments
+ .into_iter()
+ .filter_map(|record| {
+ let outcome = record.outcome.clone()?;
+ if !dimension_subset_matches(&query.dimensions, &outcome.dimensions) {
+ return None;
+ }
+ let metric = all_metrics(&outcome)
+ .into_iter()
+ .find(|metric| metric.key == query.key)?;
+ Some((record, outcome.dimensions.clone(), metric.value))
+ })
+ .map(|(record, dimensions, value)| {
+ Ok(MetricBestEntry {
+ experiment: self.experiment_summary_from_record(record.clone())?,
+ hypothesis: self.hypothesis_summary_from_record(
+ self.hypothesis_by_id(record.hypothesis_id)?,
+ )?,
+ value,
+ dimensions,
+ })
+ })
+ .collect::<Result<Vec<_>, StoreError>>()?;
+ entries.sort_by(|left, right| compare_metric_values(left.value, right.value, order));
+ Ok(apply_limit(entries, query.limit))
+ }
- let decision_payload = NodePayload::with_schema(
- self.schema.schema_ref(),
- json_object(json!({
- "verdict": format!("{:?}", request.verdict),
- "rationale": request.decision_rationale.as_str(),
- }))?,
- );
- let decision_diagnostics = self
- .schema
- .validate_node(NodeClass::Decision, &decision_payload);
- let decision_node = DagNode::new(
- NodeClass::Decision,
- Some(open_experiment.frontier_id),
- request.decision_title,
- Some(request.decision_rationale.clone()),
- decision_payload,
- decision_diagnostics,
- );
+ pub fn frontier_history(&self, selector: &str) -> Result<Vec<EntityHistoryEntry>, StoreError> {
+ let frontier = self.resolve_frontier(selector)?;
+ self.entity_history("frontier", &frontier.id.to_string())
+ }
- let experiment = CompletedExperiment {
- id: open_experiment.id,
- frontier_id: open_experiment.frontier_id,
- hypothesis_node_id: open_experiment.hypothesis_node_id,
- run_node_id: run_node.id,
- run_id,
- analysis_node_id: analysis_node.as_ref().map(|node| node.id),
- decision_node_id: decision_node.id,
- title: open_experiment.title.clone(),
- summary: open_experiment.summary.clone(),
- result: ExperimentResult {
- dimensions: dimensions.clone(),
- primary_metric: request.primary_metric,
- supporting_metrics: request.supporting_metrics,
- benchmark_bundle: None,
- },
- note: request.note,
- verdict: request.verdict,
- created_at: now,
+ pub fn hypothesis_history(
+ &self,
+ selector: &str,
+ ) -> Result<Vec<EntityHistoryEntry>, StoreError> {
+ let hypothesis = self.resolve_hypothesis(selector)?;
+ self.entity_history("hypothesis", &hypothesis.id.to_string())
+ }
+
+ pub fn experiment_history(
+ &self,
+ selector: &str,
+ ) -> Result<Vec<EntityHistoryEntry>, StoreError> {
+ let experiment = self.resolve_experiment(selector)?;
+ self.entity_history("experiment", &experiment.id.to_string())
+ }
+
+ pub fn artifact_history(&self, selector: &str) -> Result<Vec<EntityHistoryEntry>, StoreError> {
+ let artifact = self.resolve_artifact(selector)?;
+ self.entity_history("artifact", &artifact.id.to_string())
+ }
+
+ fn metric_definition(
+ &self,
+ key: &NonEmptyText,
+ ) -> Result<Option<MetricDefinition>, StoreError> {
+ self.connection
+ .query_row(
+ "SELECT key, unit, objective, visibility, description, created_at, updated_at
+ FROM metric_definitions
+ WHERE key = ?1",
+ params![key.as_str()],
+ decode_metric_definition_row,
+ )
+ .optional()
+ .map_err(StoreError::from)
+ }
+
+ fn run_dimension_definition(
+ &self,
+ key: &NonEmptyText,
+ ) -> Result<Option<RunDimensionDefinition>, StoreError> {
+ self.connection
+ .query_row(
+ "SELECT key, value_type, description, created_at, updated_at
+ FROM run_dimension_definitions
+ WHERE key = ?1",
+ params![key.as_str()],
+ decode_run_dimension_definition_row,
+ )
+ .optional()
+ .map_err(StoreError::from)
+ }
+
+ fn hypothesis_by_id(&self, id: HypothesisId) -> Result<HypothesisRecord, StoreError> {
+ self.connection
+ .query_row(
+ "SELECT id, slug, frontier_id, archived, title, summary, body, revision, created_at, updated_at
+ FROM hypotheses WHERE id = ?1",
+ params![id.to_string()],
+ |row| self.decode_hypothesis_row(row),
+ )
+ .map_err(StoreError::from)
+ }
+
+ fn resolve_frontier(&self, selector: &str) -> Result<FrontierRecord, StoreError> {
+ let record = match resolve_selector(selector)? {
+ Selector::Id(uuid) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, label, objective, status, brief_json, revision, created_at, updated_at
+ FROM frontiers WHERE id = ?1",
+ params![uuid.to_string()],
+ decode_frontier_row,
+ )
+ .optional()?,
+ Selector::Slug(slug) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, label, objective, status, brief_json, revision, created_at, updated_at
+ FROM frontiers WHERE slug = ?1",
+ params![slug.as_str()],
+ decode_frontier_row,
+ )
+ .optional()?,
};
- insert_node(&tx, &run_node)?;
- if let Some(node) = analysis_node.as_ref() {
- insert_node(&tx, node)?;
+ record.ok_or_else(|| StoreError::UnknownFrontierSelector(selector.to_owned()))
+ }
+
+ fn resolve_hypothesis(&self, selector: &str) -> Result<HypothesisRecord, StoreError> {
+ let record = match resolve_selector(selector)? {
+ Selector::Id(uuid) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, frontier_id, archived, title, summary, body, revision, created_at, updated_at
+ FROM hypotheses WHERE id = ?1",
+ params![uuid.to_string()],
+ |row| self.decode_hypothesis_row(row),
+ )
+ .optional()?,
+ Selector::Slug(slug) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, frontier_id, archived, title, summary, body, revision, created_at, updated_at
+ FROM hypotheses WHERE slug = ?1",
+ params![slug.as_str()],
+ |row| self.decode_hypothesis_row(row),
+ )
+ .optional()?,
+ };
+ record.ok_or_else(|| StoreError::UnknownHypothesisSelector(selector.to_owned()))
+ }
+
+ fn resolve_experiment(&self, selector: &str) -> Result<ExperimentRecord, StoreError> {
+ let record = match resolve_selector(selector)? {
+ Selector::Id(uuid) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, frontier_id, hypothesis_id, archived, title, summary, tags_json, status, outcome_json, revision, created_at, updated_at
+ FROM experiments WHERE id = ?1",
+ params![uuid.to_string()],
+ decode_experiment_row,
+ )
+ .optional()?,
+ Selector::Slug(slug) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, frontier_id, hypothesis_id, archived, title, summary, tags_json, status, outcome_json, revision, created_at, updated_at
+ FROM experiments WHERE slug = ?1",
+ params![slug.as_str()],
+ decode_experiment_row,
+ )
+ .optional()?,
+ };
+ record.ok_or_else(|| StoreError::UnknownExperimentSelector(selector.to_owned()))
+ }
+
+ fn resolve_artifact(&self, selector: &str) -> Result<ArtifactRecord, StoreError> {
+ let record = match resolve_selector(selector)? {
+ Selector::Id(uuid) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, kind, label, summary, locator, media_type, revision, created_at, updated_at
+ FROM artifacts WHERE id = ?1",
+ params![uuid.to_string()],
+ decode_artifact_row,
+ )
+ .optional()?,
+ Selector::Slug(slug) => self
+ .connection
+ .query_row(
+ "SELECT id, slug, kind, label, summary, locator, media_type, revision, created_at, updated_at
+ FROM artifacts WHERE slug = ?1",
+ params![slug.as_str()],
+ decode_artifact_row,
+ )
+ .optional()?,
+ };
+ record.ok_or_else(|| StoreError::UnknownArtifactSelector(selector.to_owned()))
+ }
+
+ fn resolve_vertex_parents(
+ &self,
+ frontier_id: FrontierId,
+ selectors: &[VertexSelector],
+ child: Option<VertexRef>,
+ ) -> Result<Vec<VertexRef>, StoreError> {
+ selectors
+ .iter()
+ .map(|selector| {
+ let vertex = match selector {
+ VertexSelector::Hypothesis(selector) => {
+ VertexRef::Hypothesis(self.resolve_hypothesis(selector)?.id)
+ }
+ VertexSelector::Experiment(selector) => {
+ VertexRef::Experiment(self.resolve_experiment(selector)?.id)
+ }
+ };
+ let parent_frontier_id = match vertex {
+ VertexRef::Hypothesis(id) => self.hypothesis_by_id(id)?.frontier_id,
+ VertexRef::Experiment(id) => {
+ self.resolve_experiment(&id.to_string())?.frontier_id
+ }
+ };
+ if parent_frontier_id != frontier_id {
+ return Err(StoreError::CrossFrontierInfluence);
+ }
+ if child.is_some_and(|child| child == vertex) {
+ return Err(StoreError::SelfEdge);
+ }
+ Ok(vertex)
+ })
+ .collect()
+ }
+
+ fn resolve_attachment_targets(
+ &self,
+ selectors: &[AttachmentSelector],
+ ) -> Result<Vec<AttachmentTargetRef>, StoreError> {
+ selectors
+ .iter()
+ .map(|selector| match selector {
+ AttachmentSelector::Frontier(selector) => Ok(AttachmentTargetRef::Frontier(
+ self.resolve_frontier(selector)?.id,
+ )),
+ AttachmentSelector::Hypothesis(selector) => Ok(AttachmentTargetRef::Hypothesis(
+ self.resolve_hypothesis(selector)?.id,
+ )),
+ AttachmentSelector::Experiment(selector) => Ok(AttachmentTargetRef::Experiment(
+ self.resolve_experiment(selector)?.id,
+ )),
+ })
+ .collect()
+ }
+
+ fn resolve_attachment_target(
+ &self,
+ selector: &AttachmentSelector,
+ ) -> Result<AttachmentTargetRef, StoreError> {
+ match selector {
+ AttachmentSelector::Frontier(selector) => Ok(AttachmentTargetRef::Frontier(
+ self.resolve_frontier(selector)?.id,
+ )),
+ AttachmentSelector::Hypothesis(selector) => Ok(AttachmentTargetRef::Hypothesis(
+ self.resolve_hypothesis(selector)?.id,
+ )),
+ AttachmentSelector::Experiment(selector) => Ok(AttachmentTargetRef::Experiment(
+ self.resolve_experiment(selector)?.id,
+ )),
}
- insert_node(&tx, &decision_node)?;
- insert_edge(
- &tx,
- &DagEdge {
- source_id: open_experiment.hypothesis_node_id,
- target_id: run_node.id,
- kind: EdgeKind::Lineage,
- },
- )?;
- if let Some(node) = analysis_node.as_ref() {
- insert_edge(
- &tx,
- &DagEdge {
- source_id: run_node.id,
- target_id: node.id,
- kind: EdgeKind::Evidence,
- },
- )?;
- insert_edge(
- &tx,
- &DagEdge {
- source_id: node.id,
- target_id: decision_node.id,
- kind: EdgeKind::Evidence,
- },
+ }
+
+ fn load_hypothesis_records(
+ &self,
+ frontier_id: Option<FrontierId>,
+ include_archived: bool,
+ ) -> Result<Vec<HypothesisRecord>, StoreError> {
+ let mut records = if let Some(frontier_id) = frontier_id {
+ let mut statement = self.connection.prepare(
+ "SELECT id, slug, frontier_id, archived, title, summary, body, revision, created_at, updated_at
+ FROM hypotheses
+ WHERE frontier_id = ?1
+ ORDER BY updated_at DESC, created_at DESC",
)?;
+ let rows = statement.query_map(params![frontier_id.to_string()], |row| {
+ self.decode_hypothesis_row(row)
+ })?;
+ rows.collect::<Result<Vec<_>, _>>()?
} else {
- insert_edge(
- &tx,
- &DagEdge {
- source_id: run_node.id,
- target_id: decision_node.id,
- kind: EdgeKind::Evidence,
- },
+ let mut statement = self.connection.prepare(
+ "SELECT id, slug, frontier_id, archived, title, summary, body, revision, created_at, updated_at
+ FROM hypotheses
+ ORDER BY updated_at DESC, created_at DESC",
)?;
+ let rows = statement.query_map([], |row| self.decode_hypothesis_row(row))?;
+ rows.collect::<Result<Vec<_>, _>>()?
+ };
+ if !include_archived {
+ records.retain(|record| !record.archived);
}
- insert_run(
- &tx,
- &run,
- benchmark_suite.as_deref(),
- &experiment.result.primary_metric,
- &primary_metric_definition,
- &experiment.result.supporting_metrics,
- supporting_metric_definitions.as_slice(),
- )?;
- insert_run_dimensions(&tx, run.run_id, &dimensions)?;
- insert_experiment(&tx, &experiment)?;
- delete_open_experiment(&tx, open_experiment.id)?;
- touch_frontier(&tx, open_experiment.frontier_id)?;
- insert_event(
- &tx,
- "experiment",
- &experiment.id.to_string(),
- "experiment.closed",
- json!({
- "frontier_id": open_experiment.frontier_id,
- "hypothesis_node_id": open_experiment.hypothesis_node_id,
- "verdict": format!("{:?}", request.verdict),
- }),
- )?;
- tx.commit()?;
-
- Ok(ExperimentReceipt {
- open_experiment,
- run_node,
- run,
- analysis_node,
- decision_node,
- experiment,
- })
+ Ok(records)
}
- fn load_annotations(
+ fn load_experiment_records(
&self,
- node_id: fidget_spinner_core::NodeId,
- ) -> Result<Vec<NodeAnnotation>, StoreError> {
+ frontier_id: Option<FrontierId>,
+ hypothesis_id: Option<HypothesisId>,
+ include_archived: bool,
+ ) -> Result<Vec<ExperimentRecord>, StoreError> {
+ let base_sql = "SELECT id, slug, frontier_id, hypothesis_id, archived, title, summary, tags_json, status, outcome_json, revision, created_at, updated_at FROM experiments";
+ let records = match (frontier_id, hypothesis_id) {
+ (Some(frontier_id), Some(hypothesis_id)) => {
+ let mut statement = self.connection.prepare(&format!(
+ "{base_sql} WHERE frontier_id = ?1 AND hypothesis_id = ?2 ORDER BY updated_at DESC, created_at DESC"
+ ))?;
+ let rows = statement.query_map(
+ params![frontier_id.to_string(), hypothesis_id.to_string()],
+ decode_experiment_row,
+ )?;
+ rows.collect::<Result<Vec<_>, _>>()?
+ }
+ (Some(frontier_id), None) => {
+ let mut statement = self.connection.prepare(&format!(
+ "{base_sql} WHERE frontier_id = ?1 ORDER BY updated_at DESC, created_at DESC"
+ ))?;
+ let rows =
+ statement.query_map(params![frontier_id.to_string()], decode_experiment_row)?;
+ rows.collect::<Result<Vec<_>, _>>()?
+ }
+ (None, Some(hypothesis_id)) => {
+ let mut statement = self.connection.prepare(&format!(
+ "{base_sql} WHERE hypothesis_id = ?1 ORDER BY updated_at DESC, created_at DESC"
+ ))?;
+ let rows = statement
+ .query_map(params![hypothesis_id.to_string()], decode_experiment_row)?;
+ rows.collect::<Result<Vec<_>, _>>()?
+ }
+ (None, None) => {
+ let mut statement = self.connection.prepare(&format!(
+ "{base_sql} ORDER BY updated_at DESC, created_at DESC"
+ ))?;
+ let rows = statement.query_map([], decode_experiment_row)?;
+ rows.collect::<Result<Vec<_>, _>>()?
+ }
+ };
+ Ok(if include_archived {
+ records
+ } else {
+ records
+ .into_iter()
+ .filter(|record| !record.archived)
+ .collect()
+ })
+ }
+
+ fn load_artifact_records(&self) -> Result<Vec<ArtifactRecord>, StoreError> {
let mut statement = self.connection.prepare(
- "SELECT id, visibility, label, body, created_at
- FROM node_annotations
- WHERE node_id = ?1
- ORDER BY created_at ASC",
+ "SELECT id, slug, kind, label, summary, locator, media_type, revision, created_at, updated_at
+ FROM artifacts
+ ORDER BY updated_at DESC, created_at DESC",
)?;
- let mut rows = statement.query(params![node_id.to_string()])?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- items.push(NodeAnnotation {
- id: parse_annotation_id(&row.get::<_, String>(0)?)?,
- visibility: parse_annotation_visibility(&row.get::<_, String>(1)?)?,
- label: row
- .get::<_, Option<String>>(2)?
- .map(NonEmptyText::new)
- .transpose()?,
- body: NonEmptyText::new(row.get::<_, String>(3)?)?,
- created_at: decode_timestamp(&row.get::<_, String>(4)?)?,
- });
- }
- Ok(items)
+ let rows = statement.query_map([], decode_artifact_row)?;
+ rows.collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
}
- fn load_tags(
+ fn decode_hypothesis_row(
&self,
- node_id: fidget_spinner_core::NodeId,
- ) -> Result<BTreeSet<TagName>, StoreError> {
+ row: &rusqlite::Row<'_>,
+ ) -> Result<HypothesisRecord, rusqlite::Error> {
+ let id = HypothesisId::from_uuid(parse_uuid_sql(&row.get::<_, String>(0)?)?);
+ Ok(HypothesisRecord {
+ id,
+ slug: parse_slug(&row.get::<_, String>(1)?)?,
+ frontier_id: FrontierId::from_uuid(parse_uuid_sql(&row.get::<_, String>(2)?)?),
+ archived: row.get::<_, i64>(3)? != 0,
+ title: parse_non_empty_text(&row.get::<_, String>(4)?)?,
+ summary: parse_non_empty_text(&row.get::<_, String>(5)?)?,
+ body: parse_non_empty_text(&row.get::<_, String>(6)?)?,
+ tags: self.hypothesis_tags(id)?,
+ revision: row.get::<_, u64>(7)?,
+ created_at: parse_timestamp_sql(&row.get::<_, String>(8)?)?,
+ updated_at: parse_timestamp_sql(&row.get::<_, String>(9)?)?,
+ })
+ }
+
+ fn hypothesis_tags(&self, id: HypothesisId) -> Result<Vec<TagName>, rusqlite::Error> {
let mut statement = self.connection.prepare(
- "SELECT tag_name
- FROM node_tags
- WHERE node_id = ?1
- ORDER BY tag_name ASC",
+ "SELECT tag_name FROM hypothesis_tags WHERE hypothesis_id = ?1 ORDER BY tag_name ASC",
)?;
- let mut rows = statement.query(params![node_id.to_string()])?;
- let mut items = BTreeSet::new();
- while let Some(row) = rows.next()? {
- let _ = items.insert(TagName::new(row.get::<_, String>(0)?)?);
- }
- Ok(items)
+ let rows = statement.query_map(params![id.to_string()], |row| {
+ parse_tag_name(&row.get::<_, String>(0)?)
+ })?;
+ rows.collect::<Result<Vec<_>, _>>()
}
- fn load_frontier(
+ fn hypothesis_summary_from_record(
&self,
- frontier_id: fidget_spinner_core::FrontierId,
- ) -> Result<FrontierRecord, StoreError> {
- let mut statement = self.connection.prepare(
- "SELECT id, label, root_contract_node_id, status, created_at, updated_at
- FROM frontiers
- WHERE id = ?1",
- )?;
- let frontier = statement
- .query_row(params![frontier_id.to_string()], |row| {
- read_frontier_row(row).map_err(to_sql_conversion_error)
- })
- .optional()?;
- frontier.ok_or(StoreError::FrontierNotFound(frontier_id))
+ record: HypothesisRecord,
+ ) -> Result<HypothesisSummary, StoreError> {
+ let latest_verdict = self
+ .latest_closed_experiment(record.id)?
+ .and_then(|experiment| experiment.outcome.map(|outcome| outcome.verdict));
+ Ok(HypothesisSummary {
+ id: record.id,
+ slug: record.slug,
+ frontier_id: record.frontier_id,
+ archived: record.archived,
+ title: record.title,
+ summary: record.summary,
+ tags: record.tags,
+ open_experiment_count: self
+ .list_experiments(ListExperimentsQuery {
+ hypothesis: Some(record.id.to_string()),
+ status: Some(ExperimentStatus::Open),
+ limit: None,
+ ..ListExperimentsQuery::default()
+ })?
+ .len() as u64,
+ latest_verdict,
+ updated_at: record.updated_at,
+ })
}
-}
-fn upgrade_store(connection: &mut Connection) -> Result<(), StoreError> {
- migrate(connection)?;
- backfill_prose_summaries(connection)?;
- let tx = connection.transaction()?;
- let _ = normalize_metric_plane_tx(&tx)?;
- tx.commit()?;
- Ok(())
-}
+ fn experiment_summary_from_record(
+ &self,
+ record: ExperimentRecord,
+ ) -> Result<ExperimentSummary, StoreError> {
+ Ok(ExperimentSummary {
+ id: record.id,
+ slug: record.slug,
+ frontier_id: record.frontier_id,
+ hypothesis_id: record.hypothesis_id,
+ archived: record.archived,
+ title: record.title,
+ summary: record.summary,
+ tags: record.tags,
+ status: record.status,
+ verdict: record.outcome.as_ref().map(|outcome| outcome.verdict),
+ primary_metric: record
+ .outcome
+ .as_ref()
+ .map(|outcome| self.metric_observation_summary(&outcome.primary_metric))
+ .transpose()?,
+ updated_at: record.updated_at,
+ closed_at: record.outcome.as_ref().map(|outcome| outcome.closed_at),
+ })
+ }
-fn validate_prose_node_request(request: &CreateNodeRequest) -> Result<(), StoreError> {
- if !matches!(request.class, NodeClass::Note | NodeClass::Source) {
- return Ok(());
+ fn metric_observation_summary(
+ &self,
+ metric: &MetricValue,
+ ) -> Result<MetricObservationSummary, StoreError> {
+ let definition = self
+ .metric_definition(&metric.key)?
+ .ok_or_else(|| StoreError::UnknownMetricDefinition(metric.key.clone()))?;
+ Ok(MetricObservationSummary {
+ key: metric.key.clone(),
+ value: metric.value,
+ unit: definition.unit,
+ objective: definition.objective,
+ })
}
- if request.summary.is_none() {
- return Err(StoreError::ProseSummaryRequired(request.class));
+
+ fn latest_closed_experiment(
+ &self,
+ hypothesis_id: HypothesisId,
+ ) -> Result<Option<ExperimentRecord>, StoreError> {
+ self.load_experiment_records(None, Some(hypothesis_id), true)
+ .map(|records| {
+ records
+ .into_iter()
+ .filter(|record| record.status == ExperimentStatus::Closed)
+ .max_by_key(|record| {
+ record
+ .outcome
+ .as_ref()
+ .map(|outcome| outcome.closed_at)
+ .unwrap_or(record.updated_at)
+ })
+ })
}
- match request.payload.field("body") {
- Some(Value::String(body)) if !body.trim().is_empty() => Ok(()),
- _ => Err(StoreError::ProseBodyRequired(request.class)),
+
+ fn load_vertex_parents(&self, child: VertexRef) -> Result<Vec<VertexSummary>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT parent_kind, parent_id
+ FROM influence_edges
+ WHERE child_kind = ?1 AND child_id = ?2
+ ORDER BY ordinal ASC, parent_kind ASC, parent_id ASC",
+ )?;
+ let rows = statement.query_map(
+ params![vertex_kind_name(child), child.opaque_id()],
+ |row| -> Result<VertexRef, rusqlite::Error> {
+ decode_vertex_ref(&row.get::<_, String>(0)?, &row.get::<_, String>(1)?)
+ },
+ )?;
+ rows.collect::<Result<Vec<_>, _>>()?
+ .into_iter()
+ .map(|parent| self.vertex_summary(parent))
+ .collect()
}
-}
-#[derive(Clone, Debug)]
-struct MetricSample {
- key: NonEmptyText,
- source: MetricFieldSource,
- value: f64,
- frontier_id: fidget_spinner_core::FrontierId,
- experiment_id: fidget_spinner_core::ExperimentId,
- experiment_title: NonEmptyText,
- hypothesis_node_id: fidget_spinner_core::NodeId,
- hypothesis_title: NonEmptyText,
- run_id: fidget_spinner_core::RunId,
- verdict: FrontierVerdict,
- unit: Option<MetricUnit>,
- objective: Option<OptimizationObjective>,
- dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
-}
+ fn load_vertex_children(&self, parent: VertexRef) -> Result<Vec<VertexSummary>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT child_kind, child_id
+ FROM influence_edges
+ WHERE parent_kind = ?1 AND parent_id = ?2
+ ORDER BY ordinal ASC, child_kind ASC, child_id ASC",
+ )?;
+ let rows = statement.query_map(
+ params![vertex_kind_name(parent), parent.opaque_id()],
+ |row| -> Result<VertexRef, rusqlite::Error> {
+ decode_vertex_ref(&row.get::<_, String>(0)?, &row.get::<_, String>(1)?)
+ },
+ )?;
+ rows.collect::<Result<Vec<_>, _>>()?
+ .into_iter()
+ .map(|child| self.vertex_summary(child))
+ .collect()
+ }
-impl MetricSample {
- fn into_entry(self, order: MetricRankOrder) -> MetricBestEntry {
- MetricBestEntry {
- key: self.key,
- source: self.source,
- value: self.value,
- order,
- experiment_id: self.experiment_id,
- experiment_title: self.experiment_title,
- frontier_id: self.frontier_id,
- hypothesis_node_id: self.hypothesis_node_id,
- hypothesis_title: self.hypothesis_title,
- run_id: self.run_id,
- verdict: self.verdict,
- unit: self.unit,
- objective: self.objective,
- dimensions: self.dimensions,
+ fn vertex_summary(&self, vertex: VertexRef) -> Result<VertexSummary, StoreError> {
+ match vertex {
+ VertexRef::Hypothesis(id) => {
+ let record = self.hypothesis_by_id(id)?;
+ Ok(VertexSummary {
+ vertex,
+ frontier_id: record.frontier_id,
+ slug: record.slug,
+ archived: record.archived,
+ title: record.title,
+ summary: Some(record.summary),
+ updated_at: record.updated_at,
+ })
+ }
+ VertexRef::Experiment(id) => {
+ let record = self.resolve_experiment(&id.to_string())?;
+ Ok(VertexSummary {
+ vertex,
+ frontier_id: record.frontier_id,
+ slug: record.slug,
+ archived: record.archived,
+ title: record.title,
+ summary: record.summary,
+ updated_at: record.updated_at,
+ })
+ }
}
}
-}
-#[derive(Clone, Debug)]
-struct MetricKeyAccumulator {
- key: NonEmptyText,
- source: MetricFieldSource,
- experiment_ids: BTreeSet<fidget_spinner_core::ExperimentId>,
- unit: Option<MetricUnit>,
- objective: Option<OptimizationObjective>,
- ambiguous_semantics: bool,
-}
+ fn artifact_attachment_targets(
+ &self,
+ artifact_id: ArtifactId,
+ ) -> Result<Vec<AttachmentTargetRef>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT target_kind, target_id
+ FROM artifact_attachments
+ WHERE artifact_id = ?1
+ ORDER BY ordinal ASC, target_kind ASC, target_id ASC",
+ )?;
+ let rows = statement.query_map(params![artifact_id.to_string()], |row| {
+ decode_attachment_target(&row.get::<_, String>(0)?, &row.get::<_, String>(1)?)
+ })?;
+ rows.collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
+ }
-impl MetricKeyAccumulator {
- fn from_sample(sample: &MetricSample) -> Self {
- Self {
- key: sample.key.clone(),
- source: sample.source,
- experiment_ids: BTreeSet::from([sample.experiment_id]),
- unit: sample.unit,
- objective: sample.objective,
- ambiguous_semantics: false,
+ fn artifact_attached_to_frontier(
+ &self,
+ artifact_id: ArtifactId,
+ frontier_id: FrontierId,
+ ) -> Result<bool, StoreError> {
+ let targets = self.artifact_attachment_targets(artifact_id)?;
+ if targets.contains(&AttachmentTargetRef::Frontier(frontier_id)) {
+ return Ok(true);
+ }
+ for target in targets {
+ match target {
+ AttachmentTargetRef::Hypothesis(hypothesis_id) => {
+ if self.hypothesis_by_id(hypothesis_id)?.frontier_id == frontier_id {
+ return Ok(true);
+ }
+ }
+ AttachmentTargetRef::Experiment(experiment_id) => {
+ if self
+ .resolve_experiment(&experiment_id.to_string())?
+ .frontier_id
+ == frontier_id
+ {
+ return Ok(true);
+ }
+ }
+ AttachmentTargetRef::Frontier(_) => {}
+ }
}
+ Ok(false)
}
- fn observe(&mut self, sample: &MetricSample) {
- let _ = self.experiment_ids.insert(sample.experiment_id);
- if self.unit != sample.unit || self.objective != sample.objective {
- self.ambiguous_semantics = true;
- self.unit = None;
- self.objective = None;
+ fn active_hypothesis_ids(
+ &self,
+ frontier_id: FrontierId,
+ brief: &FrontierBrief,
+ ) -> Result<BTreeSet<HypothesisId>, StoreError> {
+ let mut ids = brief
+ .roadmap
+ .iter()
+ .map(|item| item.hypothesis_id)
+ .collect::<BTreeSet<_>>();
+ for experiment in self.list_experiments(ListExperimentsQuery {
+ frontier: Some(frontier_id.to_string()),
+ status: Some(ExperimentStatus::Open),
+ limit: None,
+ ..ListExperimentsQuery::default()
+ })? {
+ let _ = ids.insert(experiment.hypothesis_id);
}
+ Ok(ids)
}
- fn finish(self) -> MetricKeySummary {
- MetricKeySummary {
- key: self.key,
- source: self.source,
- experiment_count: self.experiment_ids.len() as u64,
- unit: self.unit,
- objective: self.objective,
- description: None,
- requires_order: self.source != MetricFieldSource::RunMetric
- || self.ambiguous_semantics
- || !matches!(
- self.objective,
- Some(OptimizationObjective::Minimize | OptimizationObjective::Maximize)
- ),
- }
+ fn active_hypothesis_count(&self, frontier_id: FrontierId) -> Result<u64, StoreError> {
+ let frontier = self.read_frontier(&frontier_id.to_string())?;
+ Ok(self
+ .active_hypothesis_ids(frontier_id, &frontier.brief)?
+ .len() as u64)
}
-}
-fn collect_metric_samples(
- store: &ProjectStore,
- query: &MetricKeyQuery,
-) -> Result<Vec<MetricSample>, StoreError> {
- let rows = load_experiment_rows(store)?;
- let metric_definitions = metric_definitions_by_key(store)?;
- let mut samples = Vec::new();
- for row in rows {
- if query
- .frontier_id
- .is_some_and(|frontier_id| row.frontier_id != frontier_id)
- {
- continue;
+ fn open_experiment_count(&self, frontier_id: Option<FrontierId>) -> Result<u64, StoreError> {
+ Ok(self
+ .load_experiment_records(frontier_id, None, false)?
+ .into_iter()
+ .filter(|record| record.status == ExperimentStatus::Open)
+ .count() as u64)
+ }
+
+ fn live_metric_keys(
+ &self,
+ frontier_id: FrontierId,
+ active_hypotheses: &[HypothesisCurrentState],
+ open_experiments: &[ExperimentSummary],
+ ) -> Result<Vec<MetricKeySummary>, StoreError> {
+ let live_names = self.live_metric_key_names_with_context(
+ frontier_id,
+ active_hypotheses,
+ open_experiments,
+ )?;
+ let mut keys = self
+ .list_metric_definitions()?
+ .into_iter()
+ .filter(|definition| live_names.contains(definition.key.as_str()))
+ .filter(|definition| definition.visibility.is_default_visible())
+ .map(|definition| {
+ Ok(MetricKeySummary {
+ reference_count: self
+ .metric_reference_count(Some(frontier_id), &definition.key)?,
+ key: definition.key,
+ unit: definition.unit,
+ objective: definition.objective,
+ visibility: definition.visibility,
+ description: definition.description,
+ })
+ })
+ .collect::<Result<Vec<_>, StoreError>>()?;
+ keys.sort_by(|left, right| left.key.as_str().cmp(right.key.as_str()));
+ Ok(keys)
+ }
+
+ fn live_metric_key_names(
+ &self,
+ frontier_id: FrontierId,
+ ) -> Result<BTreeSet<String>, StoreError> {
+ let frontier = self.read_frontier(&frontier_id.to_string())?;
+ let active_hypotheses = self
+ .active_hypothesis_ids(frontier_id, &frontier.brief)?
+ .into_iter()
+ .map(|hypothesis_id| {
+ let summary =
+ self.hypothesis_summary_from_record(self.hypothesis_by_id(hypothesis_id)?)?;
+ let open_experiments = self.list_experiments(ListExperimentsQuery {
+ hypothesis: Some(hypothesis_id.to_string()),
+ status: Some(ExperimentStatus::Open),
+ limit: None,
+ ..ListExperimentsQuery::default()
+ })?;
+ let latest_closed_experiment = self
+ .list_experiments(ListExperimentsQuery {
+ hypothesis: Some(hypothesis_id.to_string()),
+ status: Some(ExperimentStatus::Closed),
+ limit: Some(1),
+ ..ListExperimentsQuery::default()
+ })?
+ .into_iter()
+ .next();
+ Ok(HypothesisCurrentState {
+ hypothesis: summary,
+ open_experiments,
+ latest_closed_experiment,
+ })
+ })
+ .collect::<Result<Vec<_>, StoreError>>()?;
+ let open_experiments = self.list_experiments(ListExperimentsQuery {
+ frontier: Some(frontier_id.to_string()),
+ status: Some(ExperimentStatus::Open),
+ limit: None,
+ ..ListExperimentsQuery::default()
+ })?;
+ self.live_metric_key_names_with_context(frontier_id, &active_hypotheses, &open_experiments)
+ }
+
+ fn live_metric_key_names_with_context(
+ &self,
+ _frontier_id: FrontierId,
+ active_hypotheses: &[HypothesisCurrentState],
+ open_experiments: &[ExperimentSummary],
+ ) -> Result<BTreeSet<String>, StoreError> {
+ let mut keys = BTreeSet::new();
+ for state in active_hypotheses {
+ if let Some(experiment) = state.latest_closed_experiment.as_ref() {
+ keys.extend(self.experiment_metric_key_names(experiment.id)?);
+ }
}
- if !dimensions_match(&row.dimensions, &query.dimensions) {
- continue;
+ for experiment in open_experiments {
+ for parent in self.load_vertex_parents(VertexRef::Experiment(experiment.id))? {
+ if let VertexRef::Experiment(parent_id) = parent.vertex {
+ keys.extend(self.experiment_metric_key_names(parent_id)?);
+ }
+ }
}
- samples.extend(metric_samples_for_row(
- store.schema(),
- &row,
- &metric_definitions,
- ));
+ Ok(keys)
}
- Ok(if let Some(source) = query.source {
- samples
- .into_iter()
- .filter(|sample| sample.source == source)
- .collect()
- } else {
- samples
- })
-}
-fn resolve_metric_order(
- matching: &[MetricSample],
- query: &MetricBestQuery,
- source: MetricFieldSource,
-) -> Result<MetricRankOrder, StoreError> {
- if let Some(order) = query.order {
- return Ok(order);
+ fn experiment_metric_key_names(
+ &self,
+ experiment_id: ExperimentId,
+ ) -> Result<BTreeSet<String>, StoreError> {
+ let record = self.resolve_experiment(&experiment_id.to_string())?;
+ Ok(record
+ .outcome
+ .as_ref()
+ .map(all_metrics)
+ .unwrap_or_default()
+ .into_iter()
+ .map(|metric| metric.key.to_string())
+ .collect())
}
- if source != MetricFieldSource::RunMetric {
- return Err(StoreError::MetricOrderRequired {
- key: query.key.as_str().to_owned(),
- metric_source: source.as_str().to_owned(),
- });
+
+ fn metric_reference_count(
+ &self,
+ frontier_id: Option<FrontierId>,
+ key: &NonEmptyText,
+ ) -> Result<u64, StoreError> {
+ let base_sql = "SELECT COUNT(*)
+ FROM experiment_metrics metrics
+ JOIN experiments experiments ON experiments.id = metrics.experiment_id";
+ let count = if let Some(frontier_id) = frontier_id {
+ self.connection.query_row(
+ &format!("{base_sql} WHERE metrics.key = ?1 AND experiments.frontier_id = ?2"),
+ params![key.as_str(), frontier_id.to_string()],
+ |row| row.get::<_, u64>(0),
+ )?
+ } else {
+ self.connection.query_row(
+ &format!("{base_sql} WHERE metrics.key = ?1"),
+ params![key.as_str()],
+ |row| row.get::<_, u64>(0),
+ )?
+ };
+ Ok(count)
}
- let objectives = matching
- .iter()
- .map(|sample| sample.objective)
- .collect::<BTreeSet<_>>();
- match objectives.len() {
- 1 => match objectives.into_iter().next().flatten() {
- Some(OptimizationObjective::Minimize) => Ok(MetricRankOrder::Asc),
- Some(OptimizationObjective::Maximize) => Ok(MetricRankOrder::Desc),
- Some(OptimizationObjective::Target) | None => Err(StoreError::MetricOrderRequired {
- key: query.key.as_str().to_owned(),
- metric_source: source.as_str().to_owned(),
- }),
- },
- _ => Err(StoreError::MetricSemanticsAmbiguous {
- key: query.key.as_str().to_owned(),
- metric_source: source.as_str().to_owned(),
- }),
+
+ fn materialize_outcome(
+ &self,
+ patch: &ExperimentOutcomePatch,
+ ) -> Result<ExperimentOutcome, StoreError> {
+ if patch.backend == ExecutionBackend::Manual && patch.command.argv.is_empty() {
+ return Err(StoreError::ManualExperimentRequiresCommand);
+ }
+ for key in patch.dimensions.keys() {
+ let definition = self
+ .run_dimension_definition(key)?
+ .ok_or_else(|| StoreError::UnknownRunDimension(key.clone()))?;
+ let observed = patch
+ .dimensions
+ .get(key)
+ .map(RunDimensionValue::value_type)
+ .ok_or_else(|| StoreError::UnknownRunDimension(key.clone()))?;
+ if definition.value_type != observed {
+ return Err(StoreError::UnknownDimensionFilter(key.to_string()));
+ }
+ }
+ let _ = self
+ .metric_definition(&patch.primary_metric.key)?
+ .ok_or_else(|| StoreError::UnknownMetricDefinition(patch.primary_metric.key.clone()))?;
+ for metric in &patch.supporting_metrics {
+ let _ = self
+ .metric_definition(&metric.key)?
+ .ok_or_else(|| StoreError::UnknownMetricDefinition(metric.key.clone()))?;
+ }
+ Ok(ExperimentOutcome {
+ backend: patch.backend,
+ command: patch.command.clone(),
+ dimensions: patch.dimensions.clone(),
+ primary_metric: patch.primary_metric.clone(),
+ supporting_metrics: patch.supporting_metrics.clone(),
+ verdict: patch.verdict,
+ rationale: patch.rationale.clone(),
+ analysis: patch.analysis.clone(),
+ closed_at: OffsetDateTime::now_utc(),
+ })
}
-}
-fn compare_metric_samples(
- left: &MetricSample,
- right: &MetricSample,
- order: MetricRankOrder,
-) -> Ordering {
- let metric_order = match order {
- MetricRankOrder::Asc => left
- .value
- .partial_cmp(&right.value)
- .unwrap_or(Ordering::Equal),
- MetricRankOrder::Desc => right
- .value
- .partial_cmp(&left.value)
- .unwrap_or(Ordering::Equal),
- };
- metric_order
- .then_with(|| right.experiment_id.cmp(&left.experiment_id))
- .then_with(|| left.key.cmp(&right.key))
-}
+ fn assert_known_tags(&self, tags: &BTreeSet<TagName>) -> Result<(), StoreError> {
+ for tag in tags {
+ if self
+ .connection
+ .query_row(
+ "SELECT 1 FROM tags WHERE name = ?1",
+ params![tag.as_str()],
+ |_| Ok(()),
+ )
+ .optional()?
+ .is_none()
+ {
+ return Err(StoreError::UnknownTag(tag.clone()));
+ }
+ }
+ Ok(())
+ }
-#[derive(Clone, Debug)]
-struct ExperimentMetricRow {
- experiment_id: fidget_spinner_core::ExperimentId,
- experiment_title: NonEmptyText,
- frontier_id: fidget_spinner_core::FrontierId,
- run_id: fidget_spinner_core::RunId,
- verdict: FrontierVerdict,
- hypothesis_node: DagNode,
- run_node: DagNode,
- analysis_node: Option<DagNode>,
- decision_node: DagNode,
- primary_metric: MetricValue,
- supporting_metrics: Vec<MetricValue>,
- dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
-}
+ fn unique_frontier_slug(
+ &self,
+ explicit: Option<Slug>,
+ label: &NonEmptyText,
+ ) -> Result<Slug, StoreError> {
+ self.unique_slug("frontiers", "slug", explicit, label)
+ }
-fn load_experiment_rows(store: &ProjectStore) -> Result<Vec<ExperimentMetricRow>, StoreError> {
- let run_dimensions = load_run_dimensions_by_run_id(store)?;
- let mut statement = store.connection.prepare(
- "SELECT
- id,
- title,
- frontier_id,
- run_id,
- hypothesis_node_id,
- run_node_id,
- analysis_node_id,
- decision_node_id,
- primary_metric_json,
- supporting_metrics_json,
- verdict
- FROM experiments",
- )?;
- let mut rows = statement.query([])?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- let hypothesis_node_id = parse_node_id(&row.get::<_, String>(4)?)?;
- let run_id = parse_run_id(&row.get::<_, String>(3)?)?;
- let run_node_id = parse_node_id(&row.get::<_, String>(5)?)?;
- let analysis_node_id = row
- .get::<_, Option<String>>(6)?
- .map(|raw| parse_node_id(&raw))
- .transpose()?;
- let decision_node_id = parse_node_id(&row.get::<_, String>(7)?)?;
- items.push(ExperimentMetricRow {
- experiment_id: parse_experiment_id(&row.get::<_, String>(0)?)?,
- experiment_title: NonEmptyText::new(row.get::<_, String>(1)?)?,
- frontier_id: parse_frontier_id(&row.get::<_, String>(2)?)?,
- run_id,
- verdict: parse_frontier_verdict(&row.get::<_, String>(10)?)?,
- hypothesis_node: store
- .get_node(hypothesis_node_id)?
- .ok_or(StoreError::NodeNotFound(hypothesis_node_id))?,
- run_node: store
- .get_node(run_node_id)?
- .ok_or(StoreError::NodeNotFound(run_node_id))?,
- analysis_node: analysis_node_id
- .map(|node_id| {
- store
- .get_node(node_id)?
- .ok_or(StoreError::NodeNotFound(node_id))
- })
- .transpose()?,
- decision_node: store
- .get_node(decision_node_id)?
- .ok_or(StoreError::NodeNotFound(decision_node_id))?,
- primary_metric: decode_json(&row.get::<_, String>(8)?)?,
- supporting_metrics: decode_json(&row.get::<_, String>(9)?)?,
- dimensions: run_dimensions.get(&run_id).cloned().unwrap_or_default(),
- });
+ fn unique_hypothesis_slug(
+ &self,
+ explicit: Option<Slug>,
+ title: &NonEmptyText,
+ ) -> Result<Slug, StoreError> {
+ self.unique_slug("hypotheses", "slug", explicit, title)
}
- Ok(items)
-}
-fn metric_samples_for_row(
- schema: &ProjectSchema,
- row: &ExperimentMetricRow,
- metric_definitions: &BTreeMap<String, MetricDefinition>,
-) -> Vec<MetricSample> {
- let mut samples = vec![metric_sample_from_observation(
- row,
- &row.primary_metric,
- metric_definitions,
- MetricFieldSource::RunMetric,
- )];
- samples.extend(row.supporting_metrics.iter().map(|metric| {
- metric_sample_from_observation(
- row,
- metric,
- metric_definitions,
- MetricFieldSource::RunMetric,
- )
- }));
- samples.extend(metric_samples_from_payload(
- schema,
- row,
- &row.hypothesis_node,
- ));
- samples.extend(metric_samples_from_payload(schema, row, &row.run_node));
- if let Some(node) = row.analysis_node.as_ref() {
- samples.extend(metric_samples_from_payload(schema, row, node));
+ fn unique_experiment_slug(
+ &self,
+ explicit: Option<Slug>,
+ title: &NonEmptyText,
+ ) -> Result<Slug, StoreError> {
+ self.unique_slug("experiments", "slug", explicit, title)
}
- samples.extend(metric_samples_from_payload(schema, row, &row.decision_node));
- samples
-}
-fn metric_sample_from_observation(
- row: &ExperimentMetricRow,
- metric: &MetricValue,
- metric_definitions: &BTreeMap<String, MetricDefinition>,
- source: MetricFieldSource,
-) -> MetricSample {
- let registry = metric_definitions.get(metric.key.as_str());
- MetricSample {
- key: metric.key.clone(),
- source,
- value: metric.value,
- frontier_id: row.frontier_id,
- experiment_id: row.experiment_id,
- experiment_title: row.experiment_title.clone(),
- hypothesis_node_id: row.hypothesis_node.id,
- hypothesis_title: row.hypothesis_node.title.clone(),
- run_id: row.run_id,
- verdict: row.verdict,
- unit: registry.map(|definition| definition.unit),
- objective: registry.map(|definition| definition.objective),
- dimensions: row.dimensions.clone(),
+ fn unique_artifact_slug(
+ &self,
+ explicit: Option<Slug>,
+ label: &NonEmptyText,
+ ) -> Result<Slug, StoreError> {
+ self.unique_slug("artifacts", "slug", explicit, label)
}
-}
-fn metric_samples_from_payload(
- schema: &ProjectSchema,
- row: &ExperimentMetricRow,
- node: &DagNode,
-) -> Vec<MetricSample> {
- let Some(source) = MetricFieldSource::from_payload_class(node.class) else {
- return Vec::new();
- };
- node.payload
- .fields
- .iter()
- .filter_map(|(key, value)| {
- let value = value.as_f64()?;
- let spec = schema.field_spec(node.class, key);
- if spec.is_some_and(|field| {
- field
- .value_type
- .is_some_and(|kind| kind != FieldValueType::Numeric)
- }) {
- return None;
+ fn unique_slug(
+ &self,
+ table: &str,
+ column: &str,
+ explicit: Option<Slug>,
+ seed: &NonEmptyText,
+ ) -> Result<Slug, StoreError> {
+ if let Some(explicit) = explicit {
+ return Ok(explicit);
+ }
+ let base = slugify(seed.as_str())?;
+ if !self.slug_exists(table, column, &base)? {
+ return Ok(base);
+ }
+ for ordinal in 2..10_000 {
+ let candidate = Slug::new(format!("{}-{ordinal}", base.as_str()))?;
+ if !self.slug_exists(table, column, &candidate)? {
+ return Ok(candidate);
}
- Some(MetricSample {
- key: NonEmptyText::new(key.clone()).ok()?,
- source,
- value,
- frontier_id: row.frontier_id,
- experiment_id: row.experiment_id,
- experiment_title: row.experiment_title.clone(),
- hypothesis_node_id: row.hypothesis_node.id,
- hypothesis_title: row.hypothesis_node.title.clone(),
- run_id: row.run_id,
- verdict: row.verdict,
- unit: None,
- objective: None,
- dimensions: row.dimensions.clone(),
+ }
+ Slug::new(format!("{}-{}", base.as_str(), Uuid::now_v7().simple()))
+ .map_err(StoreError::from)
+ }
+
+ fn slug_exists(&self, table: &str, column: &str, slug: &Slug) -> Result<bool, StoreError> {
+ let sql = format!("SELECT 1 FROM {table} WHERE {column} = ?1");
+ self.connection
+ .query_row(&sql, params![slug.as_str()], |_| Ok(()))
+ .optional()
+ .map(|value| value.is_some())
+ .map_err(StoreError::from)
+ }
+
+ fn entity_history(
+ &self,
+ entity_kind: &str,
+ entity_id: &str,
+ ) -> Result<Vec<EntityHistoryEntry>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT revision, event_kind, occurred_at, snapshot_json
+ FROM events
+ WHERE entity_kind = ?1 AND entity_id = ?2
+ ORDER BY revision DESC, occurred_at DESC",
+ )?;
+ let rows = statement.query_map(params![entity_kind, entity_id], |row| {
+ Ok(EntityHistoryEntry {
+ revision: row.get(0)?,
+ event_kind: parse_non_empty_text(&row.get::<_, String>(1)?)?,
+ occurred_at: parse_timestamp_sql(&row.get::<_, String>(2)?)?,
+ snapshot: decode_json(&row.get::<_, String>(3)?)
+ .map_err(to_sql_conversion_error)?,
})
- })
- .collect()
+ })?;
+ rows.collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
+ }
}
-fn migrate(connection: &Connection) -> Result<(), StoreError> {
+fn install_schema(connection: &Connection) -> Result<(), StoreError> {
connection.execute_batch(
"
- PRAGMA foreign_keys = ON;
+ CREATE TABLE IF NOT EXISTS tags (
+ name TEXT PRIMARY KEY NOT NULL,
+ description TEXT NOT NULL,
+ created_at TEXT NOT NULL
+ );
- CREATE TABLE IF NOT EXISTS nodes (
- id TEXT PRIMARY KEY,
- class TEXT NOT NULL,
- track TEXT NOT NULL,
- frontier_id TEXT,
+ CREATE TABLE IF NOT EXISTS frontiers (
+ id TEXT PRIMARY KEY NOT NULL,
+ slug TEXT NOT NULL UNIQUE,
+ label TEXT NOT NULL,
+ objective TEXT NOT NULL,
+ status TEXT NOT NULL,
+ brief_json TEXT NOT NULL,
+ revision INTEGER NOT NULL,
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL
+ );
+
+ CREATE TABLE IF NOT EXISTS hypotheses (
+ id TEXT PRIMARY KEY NOT NULL,
+ slug TEXT NOT NULL UNIQUE,
+ frontier_id TEXT NOT NULL REFERENCES frontiers(id) ON DELETE CASCADE,
archived INTEGER NOT NULL,
title TEXT NOT NULL,
- summary TEXT,
- payload_schema_namespace TEXT,
- payload_schema_version INTEGER,
- payload_json TEXT NOT NULL,
- diagnostics_json TEXT NOT NULL,
- agent_session_id TEXT,
+ summary TEXT NOT NULL,
+ body TEXT NOT NULL,
+ revision INTEGER NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
- CREATE TABLE IF NOT EXISTS node_annotations (
- id TEXT PRIMARY KEY,
- node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
- visibility TEXT NOT NULL,
- label TEXT,
- body TEXT NOT NULL,
- created_at TEXT NOT NULL
+ CREATE TABLE IF NOT EXISTS hypothesis_tags (
+ hypothesis_id TEXT NOT NULL REFERENCES hypotheses(id) ON DELETE CASCADE,
+ tag_name TEXT NOT NULL REFERENCES tags(name) ON DELETE CASCADE,
+ PRIMARY KEY (hypothesis_id, tag_name)
);
- CREATE TABLE IF NOT EXISTS tags (
- name TEXT PRIMARY KEY,
- description TEXT NOT NULL,
- created_at TEXT NOT NULL
+ CREATE TABLE IF NOT EXISTS experiments (
+ id TEXT PRIMARY KEY NOT NULL,
+ slug TEXT NOT NULL UNIQUE,
+ frontier_id TEXT NOT NULL REFERENCES frontiers(id) ON DELETE CASCADE,
+ hypothesis_id TEXT NOT NULL REFERENCES hypotheses(id) ON DELETE CASCADE,
+ archived INTEGER NOT NULL,
+ title TEXT NOT NULL,
+ summary TEXT,
+ tags_json TEXT NOT NULL,
+ status TEXT NOT NULL,
+ outcome_json TEXT,
+ revision INTEGER NOT NULL,
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL
);
- CREATE TABLE IF NOT EXISTS node_tags (
- node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
- tag_name TEXT NOT NULL REFERENCES tags(name) ON DELETE RESTRICT,
- PRIMARY KEY (node_id, tag_name)
+ CREATE TABLE IF NOT EXISTS experiment_tags (
+ experiment_id TEXT NOT NULL REFERENCES experiments(id) ON DELETE CASCADE,
+ tag_name TEXT NOT NULL REFERENCES tags(name) ON DELETE CASCADE,
+ PRIMARY KEY (experiment_id, tag_name)
);
- CREATE TABLE IF NOT EXISTS node_edges (
- source_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
- target_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
- kind TEXT NOT NULL,
- PRIMARY KEY (source_id, target_id, kind)
+ CREATE TABLE IF NOT EXISTS influence_edges (
+ parent_kind TEXT NOT NULL,
+ parent_id TEXT NOT NULL,
+ child_kind TEXT NOT NULL,
+ child_id TEXT NOT NULL,
+ ordinal INTEGER NOT NULL,
+ PRIMARY KEY (parent_kind, parent_id, child_kind, child_id)
);
- CREATE TABLE IF NOT EXISTS frontiers (
- id TEXT PRIMARY KEY,
+ CREATE TABLE IF NOT EXISTS artifacts (
+ id TEXT PRIMARY KEY NOT NULL,
+ slug TEXT NOT NULL UNIQUE,
+ kind TEXT NOT NULL,
label TEXT NOT NULL,
- root_contract_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
- status TEXT NOT NULL,
+ summary TEXT,
+ locator TEXT NOT NULL,
+ media_type TEXT,
+ revision INTEGER NOT NULL,
created_at TEXT NOT NULL,
updated_at TEXT NOT NULL
);
- CREATE TABLE IF NOT EXISTS runs (
- run_id TEXT PRIMARY KEY,
- node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
- frontier_id TEXT REFERENCES frontiers(id) ON DELETE SET NULL,
- status TEXT NOT NULL,
- backend TEXT NOT NULL,
- benchmark_suite TEXT,
- working_directory TEXT NOT NULL,
- argv_json TEXT NOT NULL,
- env_json TEXT NOT NULL,
- started_at TEXT,
- finished_at TEXT
- );
-
- CREATE TABLE IF NOT EXISTS metrics (
- run_id TEXT NOT NULL REFERENCES runs(run_id) ON DELETE CASCADE,
- metric_key TEXT NOT NULL,
- unit TEXT NOT NULL,
- objective TEXT NOT NULL,
- value REAL NOT NULL
+ CREATE TABLE IF NOT EXISTS artifact_attachments (
+ artifact_id TEXT NOT NULL REFERENCES artifacts(id) ON DELETE CASCADE,
+ target_kind TEXT NOT NULL,
+ target_id TEXT NOT NULL,
+ ordinal INTEGER NOT NULL,
+ PRIMARY KEY (artifact_id, target_kind, target_id)
);
CREATE TABLE IF NOT EXISTS metric_definitions (
- metric_key TEXT PRIMARY KEY,
+ key TEXT PRIMARY KEY NOT NULL,
unit TEXT NOT NULL,
objective TEXT NOT NULL,
+ visibility TEXT NOT NULL,
description TEXT,
- created_at TEXT NOT NULL
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL
);
CREATE TABLE IF NOT EXISTS run_dimension_definitions (
- dimension_key TEXT PRIMARY KEY,
+ key TEXT PRIMARY KEY NOT NULL,
value_type TEXT NOT NULL,
description TEXT,
- created_at TEXT NOT NULL
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL
);
- CREATE TABLE IF NOT EXISTS run_dimensions (
- run_id TEXT NOT NULL REFERENCES runs(run_id) ON DELETE CASCADE,
- dimension_key TEXT NOT NULL REFERENCES run_dimension_definitions(dimension_key) ON DELETE RESTRICT,
- value_type TEXT NOT NULL,
- value_text TEXT,
- value_numeric REAL,
- value_boolean INTEGER,
- value_timestamp TEXT,
- PRIMARY KEY (run_id, dimension_key)
+ CREATE TABLE IF NOT EXISTS experiment_dimensions (
+ experiment_id TEXT NOT NULL REFERENCES experiments(id) ON DELETE CASCADE,
+ key TEXT NOT NULL REFERENCES run_dimension_definitions(key) ON DELETE CASCADE,
+ value_json TEXT NOT NULL,
+ PRIMARY KEY (experiment_id, key)
);
- CREATE TABLE IF NOT EXISTS open_experiments (
- id TEXT PRIMARY KEY,
- frontier_id TEXT NOT NULL REFERENCES frontiers(id) ON DELETE CASCADE,
- hypothesis_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
- title TEXT NOT NULL,
- summary TEXT,
- created_at TEXT NOT NULL
+ CREATE TABLE IF NOT EXISTS experiment_metrics (
+ experiment_id TEXT NOT NULL REFERENCES experiments(id) ON DELETE CASCADE,
+ key TEXT NOT NULL REFERENCES metric_definitions(key) ON DELETE CASCADE,
+ ordinal INTEGER NOT NULL,
+ is_primary INTEGER NOT NULL,
+ value REAL NOT NULL,
+ PRIMARY KEY (experiment_id, key, ordinal)
);
- CREATE TABLE IF NOT EXISTS experiments (
- id TEXT PRIMARY KEY,
- frontier_id TEXT NOT NULL REFERENCES frontiers(id) ON DELETE CASCADE,
- hypothesis_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
- run_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
- run_id TEXT NOT NULL REFERENCES runs(run_id) ON DELETE RESTRICT,
- analysis_node_id TEXT REFERENCES nodes(id) ON DELETE RESTRICT,
- decision_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
- title TEXT NOT NULL,
- summary TEXT,
- benchmark_suite TEXT NOT NULL,
- primary_metric_json TEXT NOT NULL,
- supporting_metrics_json TEXT NOT NULL,
- note_summary TEXT NOT NULL,
- note_next_json TEXT NOT NULL,
- verdict TEXT NOT NULL,
- created_at TEXT NOT NULL
- );
-
- CREATE INDEX IF NOT EXISTS metrics_by_key ON metrics(metric_key);
- CREATE INDEX IF NOT EXISTS run_dimensions_by_key_text ON run_dimensions(dimension_key, value_text);
- CREATE INDEX IF NOT EXISTS run_dimensions_by_key_numeric ON run_dimensions(dimension_key, value_numeric);
- CREATE INDEX IF NOT EXISTS run_dimensions_by_run ON run_dimensions(run_id, dimension_key);
- CREATE INDEX IF NOT EXISTS open_experiments_by_frontier ON open_experiments(frontier_id, created_at DESC);
- CREATE INDEX IF NOT EXISTS experiments_by_frontier ON experiments(frontier_id, created_at DESC);
-
CREATE TABLE IF NOT EXISTS events (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
entity_kind TEXT NOT NULL,
entity_id TEXT NOT NULL,
+ revision INTEGER NOT NULL,
event_kind TEXT NOT NULL,
- payload_json TEXT NOT NULL,
- created_at TEXT NOT NULL
+ occurred_at TEXT NOT NULL,
+ snapshot_json TEXT NOT NULL,
+ PRIMARY KEY (entity_kind, entity_id, revision)
);
",
)?;
Ok(())
}
-fn backfill_prose_summaries(connection: &Connection) -> Result<(), StoreError> {
- let mut statement = connection.prepare(
- "SELECT id, payload_json
- FROM nodes
- WHERE class IN ('note', 'source')
- AND (summary IS NULL OR trim(summary) = '')",
+fn insert_frontier(
+ transaction: &Transaction<'_>,
+ frontier: &FrontierRecord,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "INSERT INTO frontiers (id, slug, label, objective, status, brief_json, revision, created_at, updated_at)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
+ params![
+ frontier.id.to_string(),
+ frontier.slug.as_str(),
+ frontier.label.as_str(),
+ frontier.objective.as_str(),
+ frontier.status.as_str(),
+ encode_json(&frontier.brief)?,
+ frontier.revision,
+ encode_timestamp(frontier.created_at)?,
+ encode_timestamp(frontier.updated_at)?,
+ ],
)?;
- let mut rows = statement.query([])?;
- let mut updates = Vec::new();
- while let Some(row) = rows.next()? {
- let node_id = row.get::<_, String>(0)?;
- let payload = decode_json::<NodePayload>(&row.get::<_, String>(1)?)?;
- let Some(Value::String(body)) = payload.field("body") else {
- continue;
- };
- let Some(summary) = derive_summary_from_body(body) else {
- continue;
- };
- updates.push((node_id, summary));
- }
- for (node_id, summary) in updates {
- let _ = connection.execute(
- "UPDATE nodes SET summary = ?1 WHERE id = ?2",
- params![summary.as_str(), node_id],
- )?;
- }
Ok(())
}
-fn sort_schema_fields(fields: &mut [ProjectFieldSpec]) {
- fields.sort_by(|left, right| {
- left.name
- .cmp(&right.name)
- .then_with(|| left.node_classes.iter().cmp(right.node_classes.iter()))
- });
-}
-
-fn normalize_metric_plane_tx(
- tx: &Transaction<'_>,
-) -> Result<MetricPlaneMigrationReport, StoreError> {
- let mut report = MetricPlaneMigrationReport::default();
-
- if insert_run_dimension_definition_tx(
- tx,
- &RunDimensionDefinition::new(
- NonEmptyText::new("benchmark_suite")?,
- FieldValueType::String,
- Some(NonEmptyText::new("Legacy coarse benchmark label")?),
- ),
- )? {
- report.inserted_dimension_definitions += 1;
- }
-
- {
- let mut statement = tx.prepare(
- "SELECT DISTINCT metric_key, unit, objective
- FROM metrics
- ORDER BY metric_key ASC",
- )?;
- let mut rows = statement.query([])?;
- while let Some(row) = rows.next()? {
- let definition = MetricDefinition::new(
- NonEmptyText::new(row.get::<_, String>(0)?)?,
- decode_metric_unit(&row.get::<_, String>(1)?)?,
- decode_optimization_objective(&row.get::<_, String>(2)?)?,
- None,
- );
- if upsert_metric_definition_tx(tx, &definition)? {
- report.inserted_metric_definitions += 1;
- }
- }
- }
-
- {
- let mut statement = tx.prepare(
- "SELECT payload_json
- FROM nodes
- WHERE class = 'contract'",
- )?;
- let mut rows = statement.query([])?;
- while let Some(row) = rows.next()? {
- let payload = decode_json::<NodePayload>(&row.get::<_, String>(0)?)?;
- for definition in contract_metric_definitions(&payload)? {
- if upsert_metric_definition_tx(tx, &definition)? {
- report.inserted_metric_definitions += 1;
- }
- }
- }
- }
-
- {
- let mut statement = tx.prepare(
- "SELECT run_id, benchmark_suite
- FROM runs
- WHERE benchmark_suite IS NOT NULL
- AND trim(benchmark_suite) != ''",
- )?;
- let mut rows = statement.query([])?;
- while let Some(row) = rows.next()? {
- let run_id = parse_run_id(&row.get::<_, String>(0)?)?;
- let value = RunDimensionValue::String(NonEmptyText::new(row.get::<_, String>(1)?)?);
- if insert_run_dimension_value_tx(
- tx,
- run_id,
- &NonEmptyText::new("benchmark_suite")?,
- &value,
- )? {
- report.inserted_dimension_values += 1;
- }
- }
- }
-
- Ok(report)
-}
-
-fn contract_metric_definitions(payload: &NodePayload) -> Result<Vec<MetricDefinition>, StoreError> {
- let mut definitions = Vec::new();
- if let Some(primary) = payload.field("primary_metric") {
- definitions.push(metric_definition_from_json(primary, None)?);
- }
- if let Some(Value::Array(items)) = payload.field("supporting_metrics") {
- for item in items {
- definitions.push(metric_definition_from_json(item, None)?);
- }
- }
- Ok(definitions)
-}
-
-fn metric_definition_from_json(
- value: &Value,
- description: Option<NonEmptyText>,
-) -> Result<MetricDefinition, StoreError> {
- let Some(object) = value.as_object() else {
- return Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "metric definition payload must be an object",
- ))));
- };
- let key = object
- .get("metric_key")
- .or_else(|| object.get("key"))
- .and_then(Value::as_str)
- .ok_or_else(|| {
- StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "metric definition missing key",
- )))
- })?;
- let unit = object.get("unit").and_then(Value::as_str).ok_or_else(|| {
- StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "metric definition missing unit",
- )))
- })?;
- let objective = object
- .get("objective")
- .and_then(Value::as_str)
- .ok_or_else(|| {
- StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "metric definition missing objective",
- )))
- })?;
- Ok(MetricDefinition::new(
- NonEmptyText::new(key)?,
- decode_metric_unit(unit)?,
- decode_optimization_objective(objective)?,
- description,
- ))
-}
-
-fn upsert_metric_definition_tx(
- tx: &Transaction<'_>,
- definition: &MetricDefinition,
-) -> Result<bool, StoreError> {
- let existing = tx
- .query_row(
- "SELECT unit, objective, description
- FROM metric_definitions
- WHERE metric_key = ?1",
- params![definition.key.as_str()],
- |row| {
- Ok((
- row.get::<_, String>(0)?,
- row.get::<_, String>(1)?,
- row.get::<_, Option<String>>(2)?,
- ))
- },
- )
- .optional()?;
- if let Some((existing_unit, existing_objective, existing_description)) = existing {
- let new_unit = encode_metric_unit(definition.unit).to_owned();
- let new_objective = encode_optimization_objective(definition.objective).to_owned();
- if existing_unit != new_unit || existing_objective != new_objective {
- return Err(StoreError::ConflictingMetricDefinition {
- key: definition.key.as_str().to_owned(),
- existing_unit,
- existing_objective,
- new_unit,
- new_objective,
- });
- }
- if existing_description.is_none() && definition.description.is_some() {
- let _ = tx.execute(
- "UPDATE metric_definitions SET description = ?2 WHERE metric_key = ?1",
- params![
- definition.key.as_str(),
- definition.description.as_ref().map(NonEmptyText::as_str)
- ],
- )?;
- }
- Ok(false)
- } else {
- let _ = tx.execute(
- "INSERT INTO metric_definitions (metric_key, unit, objective, description, created_at)
- VALUES (?1, ?2, ?3, ?4, ?5)",
- params![
- definition.key.as_str(),
- encode_metric_unit(definition.unit),
- encode_optimization_objective(definition.objective),
- definition.description.as_ref().map(NonEmptyText::as_str),
- encode_timestamp(definition.created_at)?,
- ],
- )?;
- Ok(true)
- }
-}
-
-fn insert_run_dimension_definition_tx(
- tx: &Transaction<'_>,
- definition: &RunDimensionDefinition,
-) -> Result<bool, StoreError> {
- let existing = tx
- .query_row(
- "SELECT value_type, description
- FROM run_dimension_definitions
- WHERE dimension_key = ?1",
- params![definition.key.as_str()],
- |row| Ok((row.get::<_, String>(0)?, row.get::<_, Option<String>>(1)?)),
- )
- .optional()?;
- if let Some((existing_type, existing_description)) = existing {
- let new_type = encode_field_value_type(definition.value_type).to_owned();
- if existing_type != new_type {
- return Err(StoreError::ConflictingRunDimensionDefinition {
- key: definition.key.as_str().to_owned(),
- existing_type,
- new_type,
- });
- }
- if existing_description.is_none() && definition.description.is_some() {
- let _ = tx.execute(
- "UPDATE run_dimension_definitions SET description = ?2 WHERE dimension_key = ?1",
- params![
- definition.key.as_str(),
- definition.description.as_ref().map(NonEmptyText::as_str)
- ],
- )?;
- }
- Ok(false)
- } else {
- let _ = tx.execute(
- "INSERT INTO run_dimension_definitions (dimension_key, value_type, description, created_at)
- VALUES (?1, ?2, ?3, ?4)",
- params![
- definition.key.as_str(),
- encode_field_value_type(definition.value_type),
- definition.description.as_ref().map(NonEmptyText::as_str),
- encode_timestamp(definition.created_at)?,
- ],
- )?;
- Ok(true)
- }
-}
-
-fn load_metric_definition_tx(
- tx: &Transaction<'_>,
- key: &NonEmptyText,
-) -> Result<Option<MetricDefinition>, StoreError> {
- tx.query_row(
- "SELECT metric_key, unit, objective, description, created_at
- FROM metric_definitions
- WHERE metric_key = ?1",
- params![key.as_str()],
- |row| {
- Ok(MetricDefinition {
- key: NonEmptyText::new(row.get::<_, String>(0)?)
- .map_err(core_to_sql_conversion_error)?,
- unit: decode_metric_unit(&row.get::<_, String>(1)?)
- .map_err(to_sql_conversion_error)?,
- objective: decode_optimization_objective(&row.get::<_, String>(2)?)
- .map_err(to_sql_conversion_error)?,
- description: row
- .get::<_, Option<String>>(3)?
- .map(NonEmptyText::new)
- .transpose()
- .map_err(core_to_sql_conversion_error)?,
- created_at: decode_timestamp(&row.get::<_, String>(4)?)
- .map_err(to_sql_conversion_error)?,
- })
- },
- )
- .optional()
- .map_err(StoreError::from)
-}
-
-fn metric_definitions_by_key(
- store: &ProjectStore,
-) -> Result<BTreeMap<String, MetricDefinition>, StoreError> {
- Ok(store
- .list_metric_definitions()?
- .into_iter()
- .map(|definition| (definition.key.as_str().to_owned(), definition))
- .collect())
-}
-
-fn run_dimension_definitions_by_key(
- store: &ProjectStore,
-) -> Result<BTreeMap<String, RunDimensionDefinition>, StoreError> {
- let mut statement = store.connection.prepare(
- "SELECT dimension_key, value_type, description, created_at
- FROM run_dimension_definitions",
+fn update_frontier(
+ transaction: &Transaction<'_>,
+ frontier: &FrontierRecord,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "UPDATE frontiers
+ SET slug = ?2, label = ?3, objective = ?4, status = ?5, brief_json = ?6, revision = ?7, updated_at = ?8
+ WHERE id = ?1",
+ params![
+ frontier.id.to_string(),
+ frontier.slug.as_str(),
+ frontier.label.as_str(),
+ frontier.objective.as_str(),
+ frontier.status.as_str(),
+ encode_json(&frontier.brief)?,
+ frontier.revision,
+ encode_timestamp(frontier.updated_at)?,
+ ],
)?;
- let mut rows = statement.query([])?;
- let mut items = BTreeMap::new();
- while let Some(row) = rows.next()? {
- let definition = RunDimensionDefinition {
- key: NonEmptyText::new(row.get::<_, String>(0)?)?,
- value_type: decode_field_value_type(&row.get::<_, String>(1)?)?,
- description: row
- .get::<_, Option<String>>(2)?
- .map(NonEmptyText::new)
- .transpose()?,
- created_at: decode_timestamp(&row.get::<_, String>(3)?)?,
- };
- let _ = items.insert(definition.key.as_str().to_owned(), definition);
- }
- Ok(items)
-}
-
-fn coerce_run_dimension_map(
- definitions: &BTreeMap<String, RunDimensionDefinition>,
- raw_dimensions: BTreeMap<String, Value>,
-) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, StoreError> {
- let mut dimensions = BTreeMap::new();
- for (raw_key, raw_value) in raw_dimensions {
- let key = NonEmptyText::new(raw_key)?;
- let Some(definition) = definitions.get(key.as_str()) else {
- return Err(StoreError::UnknownRunDimension(key));
- };
- let value = coerce_run_dimension_value(definition, raw_value)?;
- let _ = dimensions.insert(key, value);
- }
- Ok(dimensions)
-}
-
-fn coerce_run_dimension_value(
- definition: &RunDimensionDefinition,
- raw_value: Value,
-) -> Result<RunDimensionValue, StoreError> {
- match definition.value_type {
- FieldValueType::String => match raw_value {
- Value::String(value) => Ok(RunDimensionValue::String(NonEmptyText::new(value)?)),
- other => Err(StoreError::InvalidRunDimensionValue {
- key: definition.key.as_str().to_owned(),
- expected: definition.value_type.as_str().to_owned(),
- observed: value_kind_name(&other).to_owned(),
- }),
- },
- FieldValueType::Numeric => match raw_value.as_f64() {
- Some(value) => Ok(RunDimensionValue::Numeric(value)),
- None => Err(StoreError::InvalidRunDimensionValue {
- key: definition.key.as_str().to_owned(),
- expected: definition.value_type.as_str().to_owned(),
- observed: value_kind_name(&raw_value).to_owned(),
- }),
- },
- FieldValueType::Boolean => match raw_value {
- Value::Bool(value) => Ok(RunDimensionValue::Boolean(value)),
- other => Err(StoreError::InvalidRunDimensionValue {
- key: definition.key.as_str().to_owned(),
- expected: definition.value_type.as_str().to_owned(),
- observed: value_kind_name(&other).to_owned(),
- }),
- },
- FieldValueType::Timestamp => match raw_value {
- Value::String(value) => {
- let _ = OffsetDateTime::parse(&value, &Rfc3339)?;
- Ok(RunDimensionValue::Timestamp(NonEmptyText::new(value)?))
- }
- other => Err(StoreError::InvalidRunDimensionValue {
- key: definition.key.as_str().to_owned(),
- expected: definition.value_type.as_str().to_owned(),
- observed: value_kind_name(&other).to_owned(),
- }),
- },
- }
+ Ok(())
}
-fn insert_run_dimension_value_tx(
- tx: &Transaction<'_>,
- run_id: fidget_spinner_core::RunId,
- key: &NonEmptyText,
- value: &RunDimensionValue,
-) -> Result<bool, StoreError> {
- let (value_text, value_numeric, value_boolean, value_timestamp) =
- encode_run_dimension_columns(value)?;
- let changed = tx.execute(
- "INSERT OR IGNORE INTO run_dimensions (
- run_id,
- dimension_key,
- value_type,
- value_text,
- value_numeric,
- value_boolean,
- value_timestamp
- ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+fn insert_hypothesis(
+ transaction: &Transaction<'_>,
+ hypothesis: &HypothesisRecord,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "INSERT INTO hypotheses (id, slug, frontier_id, archived, title, summary, body, revision, created_at, updated_at)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
params![
- run_id.to_string(),
- key.as_str(),
- encode_field_value_type(value.value_type()),
- value_text,
- value_numeric,
- value_boolean,
- value_timestamp,
+ hypothesis.id.to_string(),
+ hypothesis.slug.as_str(),
+ hypothesis.frontier_id.to_string(),
+ bool_to_sql(hypothesis.archived),
+ hypothesis.title.as_str(),
+ hypothesis.summary.as_str(),
+ hypothesis.body.as_str(),
+ hypothesis.revision,
+ encode_timestamp(hypothesis.created_at)?,
+ encode_timestamp(hypothesis.updated_at)?,
],
)?;
- Ok(changed > 0)
+ Ok(())
}
-fn insert_run_dimensions(
- tx: &Transaction<'_>,
- run_id: fidget_spinner_core::RunId,
- dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>,
+fn update_hypothesis_row(
+ transaction: &Transaction<'_>,
+ hypothesis: &HypothesisRecord,
) -> Result<(), StoreError> {
- for (key, value) in dimensions {
- let _ = insert_run_dimension_value_tx(tx, run_id, key, value)?;
- }
+ let _ = transaction.execute(
+ "UPDATE hypotheses
+ SET slug = ?2, archived = ?3, title = ?4, summary = ?5, body = ?6, revision = ?7, updated_at = ?8
+ WHERE id = ?1",
+ params![
+ hypothesis.id.to_string(),
+ hypothesis.slug.as_str(),
+ bool_to_sql(hypothesis.archived),
+ hypothesis.title.as_str(),
+ hypothesis.summary.as_str(),
+ hypothesis.body.as_str(),
+ hypothesis.revision,
+ encode_timestamp(hypothesis.updated_at)?,
+ ],
+ )?;
Ok(())
}
-fn validate_run_dimensions_tx(
- tx: &Transaction<'_>,
- dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>,
-) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, StoreError> {
- for (key, value) in dimensions {
- let Some(expected_type) = tx
- .query_row(
- "SELECT value_type
- FROM run_dimension_definitions
- WHERE dimension_key = ?1",
- params![key.as_str()],
- |row| row.get::<_, String>(0),
- )
- .optional()?
- else {
- return Err(StoreError::UnknownRunDimension(key.clone()));
- };
- let expected_type = decode_field_value_type(&expected_type)?;
- let observed_type = value.value_type();
- if expected_type != observed_type {
- return Err(StoreError::InvalidRunDimensionValue {
- key: key.as_str().to_owned(),
- expected: expected_type.as_str().to_owned(),
- observed: observed_type.as_str().to_owned(),
- });
- }
- if matches!(value, RunDimensionValue::Timestamp(raw) if OffsetDateTime::parse(raw.as_str(), &Rfc3339).is_err())
- {
- return Err(StoreError::InvalidRunDimensionValue {
- key: key.as_str().to_owned(),
- expected: FieldValueType::Timestamp.as_str().to_owned(),
- observed: "string".to_owned(),
- });
- }
- }
- Ok(dimensions.clone())
-}
-
-fn load_run_dimensions_by_run_id(
- store: &ProjectStore,
-) -> Result<
- BTreeMap<fidget_spinner_core::RunId, BTreeMap<NonEmptyText, RunDimensionValue>>,
- StoreError,
-> {
- let mut statement = store.connection.prepare(
- "SELECT run_id, dimension_key, value_type, value_text, value_numeric, value_boolean, value_timestamp
- FROM run_dimensions
- ORDER BY dimension_key ASC",
+fn replace_hypothesis_tags(
+ transaction: &Transaction<'_>,
+ hypothesis_id: HypothesisId,
+ tags: &BTreeSet<TagName>,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "DELETE FROM hypothesis_tags WHERE hypothesis_id = ?1",
+ params![hypothesis_id.to_string()],
)?;
- let mut rows = statement.query([])?;
- let mut values =
- BTreeMap::<fidget_spinner_core::RunId, BTreeMap<NonEmptyText, RunDimensionValue>>::new();
- while let Some(row) = rows.next()? {
- let run_id = parse_run_id(&row.get::<_, String>(0)?)?;
- let key = NonEmptyText::new(row.get::<_, String>(1)?)?;
- let value_type = decode_field_value_type(&row.get::<_, String>(2)?)?;
- let value = decode_run_dimension_value(
- value_type,
- row.get::<_, Option<String>>(3)?,
- row.get::<_, Option<f64>>(4)?,
- row.get::<_, Option<i64>>(5)?,
- row.get::<_, Option<String>>(6)?,
- )?;
- let _ = values.entry(run_id).or_default().insert(key, value);
- }
- Ok(values)
-}
-
-fn load_run_dimension_summaries(
- store: &ProjectStore,
-) -> Result<Vec<RunDimensionSummary>, StoreError> {
- let definitions = {
- let mut statement = store.connection.prepare(
- "SELECT dimension_key, value_type, description, created_at
- FROM run_dimension_definitions
- ORDER BY dimension_key ASC",
- )?;
- let mut rows = statement.query([])?;
- let mut items = Vec::new();
- while let Some(row) = rows.next()? {
- items.push(RunDimensionDefinition {
- key: NonEmptyText::new(row.get::<_, String>(0)?)?,
- value_type: decode_field_value_type(&row.get::<_, String>(1)?)?,
- description: row
- .get::<_, Option<String>>(2)?
- .map(NonEmptyText::new)
- .transpose()?,
- created_at: decode_timestamp(&row.get::<_, String>(3)?)?,
- });
- }
- items
- };
-
- let mut summaries = Vec::new();
- for definition in definitions {
- let mut statement = store.connection.prepare(
- "SELECT value_text, value_numeric, value_boolean, value_timestamp
- FROM run_dimensions
- WHERE dimension_key = ?1",
+ for tag in tags {
+ let _ = transaction.execute(
+ "INSERT INTO hypothesis_tags (hypothesis_id, tag_name) VALUES (?1, ?2)",
+ params![hypothesis_id.to_string(), tag.as_str()],
)?;
- let mut rows = statement.query(params![definition.key.as_str()])?;
- let mut observed_run_count = 0_u64;
- let mut distinct = BTreeSet::new();
- let mut sample_values = Vec::new();
- while let Some(row) = rows.next()? {
- observed_run_count += 1;
- let value = decode_run_dimension_value(
- definition.value_type,
- row.get::<_, Option<String>>(0)?,
- row.get::<_, Option<f64>>(1)?,
- row.get::<_, Option<i64>>(2)?,
- row.get::<_, Option<String>>(3)?,
- )?;
- let serialized = encode_json(&value.as_json())?;
- if distinct.insert(serialized) && sample_values.len() < 5 {
- sample_values.push(value.as_json());
- }
- }
- summaries.push(RunDimensionSummary {
- key: definition.key,
- value_type: definition.value_type,
- description: definition.description,
- observed_run_count,
- distinct_value_count: distinct.len() as u64,
- sample_values,
- });
- }
- Ok(summaries)
-}
-
-fn merge_registered_run_metric_summaries(
- store: &ProjectStore,
- summaries: &mut Vec<MetricKeySummary>,
-) -> Result<(), StoreError> {
- let definitions = store.list_metric_definitions()?;
- for definition in definitions {
- if let Some(summary) = summaries.iter_mut().find(|summary| {
- summary.source == MetricFieldSource::RunMetric && summary.key == definition.key
- }) {
- summary.unit = Some(definition.unit);
- summary.objective = Some(definition.objective);
- summary.description.clone_from(&definition.description);
- summary.requires_order = matches!(definition.objective, OptimizationObjective::Target);
- continue;
- }
- summaries.push(MetricKeySummary {
- key: definition.key,
- source: MetricFieldSource::RunMetric,
- experiment_count: 0,
- unit: Some(definition.unit),
- objective: Some(definition.objective),
- description: definition.description,
- requires_order: matches!(definition.objective, OptimizationObjective::Target),
- });
}
Ok(())
}
-fn dimensions_match(
- haystack: &BTreeMap<NonEmptyText, RunDimensionValue>,
- needle: &BTreeMap<NonEmptyText, RunDimensionValue>,
-) -> bool {
- needle
- .iter()
- .all(|(key, value)| haystack.get(key) == Some(value))
-}
-
-fn run_dimensions_json(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> Value {
- Value::Object(
- dimensions
- .iter()
- .map(|(key, value)| (key.to_string(), value.as_json()))
- .collect::<serde_json::Map<String, Value>>(),
- )
-}
-
-fn benchmark_suite_label(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> Option<String> {
- dimensions
- .get(&NonEmptyText::new("benchmark_suite").ok()?)
- .and_then(|value| match value {
- RunDimensionValue::String(item) => Some(item.to_string()),
- _ => None,
- })
- .or_else(|| {
- if dimensions.is_empty() {
- None
- } else {
- Some(
- dimensions
- .iter()
- .map(|(key, value)| format!("{key}={}", dimension_value_text(value)))
- .collect::<Vec<_>>()
- .join(", "),
- )
- }
- })
-}
-
-fn derive_summary_from_body(body: &str) -> Option<NonEmptyText> {
- const MAX_SUMMARY_CHARS: usize = 240;
-
- let paragraph = body
- .split("\n\n")
- .map(collapse_inline_whitespace)
- .map(|text| text.trim().to_owned())
- .find(|text| !text.is_empty())?;
- let summary = truncate_chars(&paragraph, MAX_SUMMARY_CHARS);
- NonEmptyText::new(summary).ok()
-}
-
-fn collapse_inline_whitespace(raw: &str) -> String {
- raw.split_whitespace().collect::<Vec<_>>().join(" ")
-}
-
-fn truncate_chars(value: &str, max_chars: usize) -> String {
- if value.chars().count() <= max_chars {
- return value.to_owned();
- }
- let mut truncated = value.chars().take(max_chars).collect::<String>();
- if let Some(index) = truncated.rfind(char::is_whitespace) {
- truncated.truncate(index);
- }
- format!("{}…", truncated.trim_end())
-}
-
-fn insert_node(tx: &Transaction<'_>, node: &DagNode) -> Result<(), StoreError> {
- let schema_namespace = node
- .payload
- .schema
- .as_ref()
- .map(|schema| schema.namespace.as_str());
- let schema_version = node
- .payload
- .schema
- .as_ref()
- .map(|schema| i64::from(schema.version));
- let _ = tx.execute(
- "INSERT INTO nodes (
- id,
- class,
- track,
- frontier_id,
- archived,
- title,
- summary,
- payload_schema_namespace,
- payload_schema_version,
- payload_json,
- diagnostics_json,
- agent_session_id,
- created_at,
- updated_at
- ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)",
+fn insert_experiment(
+ transaction: &Transaction<'_>,
+ experiment: &ExperimentRecord,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "INSERT INTO experiments (id, slug, frontier_id, hypothesis_id, archived, title, summary, tags_json, status, outcome_json, revision, created_at, updated_at)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)",
params![
- node.id.to_string(),
- node.class.as_str(),
- encode_node_track(node.track),
- node.frontier_id.map(|id| id.to_string()),
- i64::from(node.archived),
- node.title.as_str(),
- node.summary.as_ref().map(NonEmptyText::as_str),
- schema_namespace,
- schema_version,
- encode_json(&node.payload)?,
- encode_json(&node.diagnostics)?,
- node.agent_session_id.map(|id| id.to_string()),
- encode_timestamp(node.created_at)?,
- encode_timestamp(node.updated_at)?,
+ experiment.id.to_string(),
+ experiment.slug.as_str(),
+ experiment.frontier_id.to_string(),
+ experiment.hypothesis_id.to_string(),
+ bool_to_sql(experiment.archived),
+ experiment.title.as_str(),
+ experiment.summary.as_ref().map(NonEmptyText::as_str),
+ encode_json(&experiment.tags)?,
+ experiment.status.as_str(),
+ experiment.outcome.as_ref().map(encode_json).transpose()?,
+ experiment.revision,
+ encode_timestamp(experiment.created_at)?,
+ encode_timestamp(experiment.updated_at)?,
],
)?;
- for annotation in &node.annotations {
- insert_annotation(tx, node.id, annotation)?;
- }
- for tag in &node.tags {
- insert_node_tag(tx, node.id, tag)?;
- }
Ok(())
}
-fn insert_tag(tx: &Transaction<'_>, tag: &TagRecord) -> Result<(), StoreError> {
- let existing = tx
- .query_row(
- "SELECT 1 FROM tags WHERE name = ?1",
- params![tag.name.as_str()],
- |row| row.get::<_, i64>(0),
- )
- .optional()?;
- if existing.is_some() {
- return Err(StoreError::DuplicateTag(tag.name.clone()));
- }
- let _ = tx.execute(
- "INSERT INTO tags (name, description, created_at)
- VALUES (?1, ?2, ?3)",
+fn update_experiment_row(
+ transaction: &Transaction<'_>,
+ experiment: &ExperimentRecord,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "UPDATE experiments
+ SET slug = ?2, archived = ?3, title = ?4, summary = ?5, tags_json = ?6, status = ?7, outcome_json = ?8, revision = ?9, updated_at = ?10
+ WHERE id = ?1",
params![
- tag.name.as_str(),
- tag.description.as_str(),
- encode_timestamp(tag.created_at)?,
+ experiment.id.to_string(),
+ experiment.slug.as_str(),
+ bool_to_sql(experiment.archived),
+ experiment.title.as_str(),
+ experiment.summary.as_ref().map(NonEmptyText::as_str),
+ encode_json(&experiment.tags)?,
+ experiment.status.as_str(),
+ experiment.outcome.as_ref().map(encode_json).transpose()?,
+ experiment.revision,
+ encode_timestamp(experiment.updated_at)?,
],
)?;
Ok(())
}
-fn insert_annotation(
- tx: &Transaction<'_>,
- node_id: fidget_spinner_core::NodeId,
- annotation: &NodeAnnotation,
+fn replace_experiment_tags(
+ transaction: &Transaction<'_>,
+ experiment_id: ExperimentId,
+ tags: &BTreeSet<TagName>,
) -> Result<(), StoreError> {
- let _ = tx.execute(
- "INSERT INTO node_annotations (id, node_id, visibility, label, body, created_at)
- VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
- params![
- annotation.id.to_string(),
- node_id.to_string(),
- encode_annotation_visibility(annotation.visibility),
- annotation.label.as_ref().map(NonEmptyText::as_str),
- annotation.body.as_str(),
- encode_timestamp(annotation.created_at)?,
- ],
+ let _ = transaction.execute(
+ "DELETE FROM experiment_tags WHERE experiment_id = ?1",
+ params![experiment_id.to_string()],
)?;
+ for tag in tags {
+ let _ = transaction.execute(
+ "INSERT INTO experiment_tags (experiment_id, tag_name) VALUES (?1, ?2)",
+ params![experiment_id.to_string(), tag.as_str()],
+ )?;
+ }
Ok(())
}
-fn insert_node_tag(
- tx: &Transaction<'_>,
- node_id: fidget_spinner_core::NodeId,
- tag: &TagName,
+fn replace_influence_parents(
+ transaction: &Transaction<'_>,
+ child: VertexRef,
+ parents: &[VertexRef],
) -> Result<(), StoreError> {
- let _ = tx.execute(
- "INSERT INTO node_tags (node_id, tag_name)
- VALUES (?1, ?2)",
- params![node_id.to_string(), tag.as_str()],
+ let _ = transaction.execute(
+ "DELETE FROM influence_edges WHERE child_kind = ?1 AND child_id = ?2",
+ params![vertex_kind_name(child), child.opaque_id()],
)?;
- Ok(())
-}
-
-fn ensure_known_tags(tx: &Transaction<'_>, tags: &BTreeSet<TagName>) -> Result<(), StoreError> {
- let mut statement = tx.prepare("SELECT 1 FROM tags WHERE name = ?1")?;
- for tag in tags {
- let exists = statement
- .query_row(params![tag.as_str()], |row| row.get::<_, i64>(0))
- .optional()?;
- if exists.is_none() {
- return Err(StoreError::UnknownTag(tag.clone()));
- }
+ for (ordinal, parent) in parents.iter().enumerate() {
+ let _ = transaction.execute(
+ "INSERT INTO influence_edges (parent_kind, parent_id, child_kind, child_id, ordinal)
+ VALUES (?1, ?2, ?3, ?4, ?5)",
+ params![
+ vertex_kind_name(*parent),
+ parent.opaque_id(),
+ vertex_kind_name(child),
+ child.opaque_id(),
+ i64::try_from(ordinal).unwrap_or(i64::MAX),
+ ],
+ )?;
}
Ok(())
}
-fn insert_edge(tx: &Transaction<'_>, edge: &DagEdge) -> Result<(), StoreError> {
- let _ = tx.execute(
- "INSERT OR IGNORE INTO node_edges (source_id, target_id, kind)
- VALUES (?1, ?2, ?3)",
+fn insert_artifact(
+ transaction: &Transaction<'_>,
+ artifact: &ArtifactRecord,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "INSERT INTO artifacts (id, slug, kind, label, summary, locator, media_type, revision, created_at, updated_at)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
params![
- edge.source_id.to_string(),
- edge.target_id.to_string(),
- encode_edge_kind(edge.kind),
+ artifact.id.to_string(),
+ artifact.slug.as_str(),
+ artifact.kind.as_str(),
+ artifact.label.as_str(),
+ artifact.summary.as_ref().map(NonEmptyText::as_str),
+ artifact.locator.as_str(),
+ artifact.media_type.as_ref().map(NonEmptyText::as_str),
+ artifact.revision,
+ encode_timestamp(artifact.created_at)?,
+ encode_timestamp(artifact.updated_at)?,
],
)?;
Ok(())
}
-fn insert_frontier(tx: &Transaction<'_>, frontier: &FrontierRecord) -> Result<(), StoreError> {
- let _ = tx.execute(
- "INSERT INTO frontiers (id, label, root_contract_node_id, status, created_at, updated_at)
- VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
+fn update_artifact_row(
+ transaction: &Transaction<'_>,
+ artifact: &ArtifactRecord,
+) -> Result<(), StoreError> {
+ let _ = transaction.execute(
+ "UPDATE artifacts
+ SET slug = ?2, kind = ?3, label = ?4, summary = ?5, locator = ?6, media_type = ?7, revision = ?8, updated_at = ?9
+ WHERE id = ?1",
params![
- frontier.id.to_string(),
- frontier.label.as_str(),
- frontier.root_contract_node_id.to_string(),
- encode_frontier_status(frontier.status),
- encode_timestamp(frontier.created_at)?,
- encode_timestamp(frontier.updated_at)?,
+ artifact.id.to_string(),
+ artifact.slug.as_str(),
+ artifact.kind.as_str(),
+ artifact.label.as_str(),
+ artifact.summary.as_ref().map(NonEmptyText::as_str),
+ artifact.locator.as_str(),
+ artifact.media_type.as_ref().map(NonEmptyText::as_str),
+ artifact.revision,
+ encode_timestamp(artifact.updated_at)?,
],
)?;
Ok(())
}
-fn insert_run(
- tx: &Transaction<'_>,
- run: &RunRecord,
- benchmark_suite: Option<&str>,
- primary_metric: &MetricValue,
- primary_metric_definition: &MetricDefinition,
- supporting_metrics: &[MetricValue],
- supporting_metric_definitions: &[MetricDefinition],
+fn replace_artifact_attachments(
+ transaction: &Transaction<'_>,
+ artifact_id: ArtifactId,
+ attachments: &[AttachmentTargetRef],
) -> Result<(), StoreError> {
- let started_at = match run.started_at {
- Some(timestamp) => Some(encode_timestamp(timestamp)?),
- None => None,
- };
- let finished_at = match run.finished_at {
- Some(timestamp) => Some(encode_timestamp(timestamp)?),
- None => None,
- };
- let _ = tx.execute(
- "INSERT INTO runs (
- run_id,
- node_id,
- frontier_id,
- status,
- backend,
- benchmark_suite,
- working_directory,
- argv_json,
- env_json,
- started_at,
- finished_at
- ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)",
- params![
- run.run_id.to_string(),
- run.node_id.to_string(),
- run.frontier_id.map(|id| id.to_string()),
- encode_run_status(run.status),
- encode_backend(run.backend),
- benchmark_suite,
- run.command.working_directory.as_str(),
- encode_json(&run.command.argv)?,
- encode_json(&run.command.env)?,
- started_at,
- finished_at,
- ],
+ let _ = transaction.execute(
+ "DELETE FROM artifact_attachments WHERE artifact_id = ?1",
+ params![artifact_id.to_string()],
)?;
-
- for (metric, definition) in std::iter::once((primary_metric, primary_metric_definition)).chain(
- supporting_metrics
- .iter()
- .zip(supporting_metric_definitions.iter()),
- ) {
- let _ = tx.execute(
- "INSERT INTO metrics (run_id, metric_key, unit, objective, value)
- VALUES (?1, ?2, ?3, ?4, ?5)",
+ for (ordinal, attachment) in attachments.iter().enumerate() {
+ let _ = transaction.execute(
+ "INSERT INTO artifact_attachments (artifact_id, target_kind, target_id, ordinal)
+ VALUES (?1, ?2, ?3, ?4)",
params![
- run.run_id.to_string(),
- metric.key.as_str(),
- encode_metric_unit(definition.unit),
- encode_optimization_objective(definition.objective),
- metric.value,
+ artifact_id.to_string(),
+ attachment_target_kind_name(*attachment),
+ attachment.opaque_id(),
+ i64::try_from(ordinal).unwrap_or(i64::MAX),
],
)?;
}
Ok(())
}
-fn insert_open_experiment(
- tx: &Transaction<'_>,
- experiment: &OpenExperiment,
-) -> Result<(), StoreError> {
- let _ = tx.execute(
- "INSERT INTO open_experiments (
- id,
- frontier_id,
- hypothesis_node_id,
- title,
- summary,
- created_at
- ) VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
- params![
- experiment.id.to_string(),
- experiment.frontier_id.to_string(),
- experiment.hypothesis_node_id.to_string(),
- experiment.title.as_str(),
- experiment.summary.as_ref().map(NonEmptyText::as_str),
- encode_timestamp(experiment.created_at)?,
- ],
- )?;
- Ok(())
-}
-
-fn delete_open_experiment(
- tx: &Transaction<'_>,
- experiment_id: fidget_spinner_core::ExperimentId,
+fn replace_experiment_dimensions(
+ transaction: &Transaction<'_>,
+ experiment_id: ExperimentId,
+ outcome: Option<&ExperimentOutcome>,
) -> Result<(), StoreError> {
- let _ = tx.execute(
- "DELETE FROM open_experiments WHERE id = ?1",
+ let _ = transaction.execute(
+ "DELETE FROM experiment_dimensions WHERE experiment_id = ?1",
params![experiment_id.to_string()],
)?;
+ if let Some(outcome) = outcome {
+ for (key, value) in &outcome.dimensions {
+ let _ = transaction.execute(
+ "INSERT INTO experiment_dimensions (experiment_id, key, value_json) VALUES (?1, ?2, ?3)",
+ params![experiment_id.to_string(), key.as_str(), encode_json(value)?],
+ )?;
+ }
+ }
Ok(())
}
-fn insert_experiment(
- tx: &Transaction<'_>,
- experiment: &CompletedExperiment,
+fn replace_experiment_metrics(
+ transaction: &Transaction<'_>,
+ experiment_id: ExperimentId,
+ outcome: Option<&ExperimentOutcome>,
) -> Result<(), StoreError> {
- let _ = tx.execute(
- "INSERT INTO experiments (
- id,
- frontier_id,
- hypothesis_node_id,
- run_node_id,
- run_id,
- analysis_node_id,
- decision_node_id,
- title,
- summary,
- benchmark_suite,
- primary_metric_json,
- supporting_metrics_json,
- note_summary,
- note_next_json,
- verdict,
- created_at
- ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16)",
- params![
- experiment.id.to_string(),
- experiment.frontier_id.to_string(),
- experiment.hypothesis_node_id.to_string(),
- experiment.run_node_id.to_string(),
- experiment.run_id.to_string(),
- experiment.analysis_node_id.map(|id| id.to_string()),
- experiment.decision_node_id.to_string(),
- experiment.title.as_str(),
- experiment.summary.as_ref().map(NonEmptyText::as_str),
- benchmark_suite_label(&experiment.result.dimensions),
- encode_json(&experiment.result.primary_metric)?,
- encode_json(&experiment.result.supporting_metrics)?,
- experiment.note.summary.as_str(),
- encode_json(&experiment.note.next_hypotheses)?,
- encode_frontier_verdict(experiment.verdict),
- encode_timestamp(experiment.created_at)?,
- ],
+ let _ = transaction.execute(
+ "DELETE FROM experiment_metrics WHERE experiment_id = ?1",
+ params![experiment_id.to_string()],
)?;
+ if let Some(outcome) = outcome {
+ for (ordinal, metric) in all_metrics(outcome).into_iter().enumerate() {
+ let _ = transaction.execute(
+ "INSERT INTO experiment_metrics (experiment_id, key, ordinal, is_primary, value)
+ VALUES (?1, ?2, ?3, ?4, ?5)",
+ params![
+ experiment_id.to_string(),
+ metric.key.as_str(),
+ i64::try_from(ordinal).unwrap_or(i64::MAX),
+ bool_to_sql(ordinal == 0),
+ metric.value,
+ ],
+ )?;
+ }
+ }
Ok(())
}
-fn insert_event(
- tx: &Transaction<'_>,
+fn record_event(
+ transaction: &Transaction<'_>,
entity_kind: &str,
entity_id: &str,
+ revision: u64,
event_kind: &str,
- payload: Value,
+ snapshot: &impl Serialize,
) -> Result<(), StoreError> {
- let _ = tx.execute(
- "INSERT INTO events (entity_kind, entity_id, event_kind, payload_json, created_at)
- VALUES (?1, ?2, ?3, ?4, ?5)",
+ let _ = transaction.execute(
+ "INSERT INTO events (entity_kind, entity_id, revision, event_kind, occurred_at, snapshot_json)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
params![
entity_kind,
entity_id,
+ revision,
event_kind,
- payload.to_string(),
encode_timestamp(OffsetDateTime::now_utc())?,
+ encode_json(snapshot)?,
],
)?;
Ok(())
}
-fn load_open_experiment(
- connection: &Connection,
- experiment_id: fidget_spinner_core::ExperimentId,
-) -> Result<Option<OpenExperiment>, StoreError> {
- let mut statement = connection.prepare(
- "SELECT
- id,
- frontier_id,
- hypothesis_node_id,
- title,
- summary,
- created_at
- FROM open_experiments
- WHERE id = ?1",
- )?;
- statement
- .query_row(params![experiment_id.to_string()], |row| {
- Ok(OpenExperiment {
- id: parse_experiment_id(&row.get::<_, String>(0)?)
- .map_err(to_sql_conversion_error)?,
- frontier_id: parse_frontier_id(&row.get::<_, String>(1)?)
- .map_err(to_sql_conversion_error)?,
- hypothesis_node_id: parse_node_id(&row.get::<_, String>(2)?)
- .map_err(to_sql_conversion_error)?,
- title: NonEmptyText::new(row.get::<_, String>(3)?)
- .map_err(core_to_sql_conversion_error)?,
- summary: row
- .get::<_, Option<String>>(4)?
- .map(NonEmptyText::new)
- .transpose()
- .map_err(core_to_sql_conversion_error)?,
- created_at: decode_timestamp(&row.get::<_, String>(5)?)
- .map_err(to_sql_conversion_error)?,
- })
- })
- .optional()
- .map_err(StoreError::from)
-}
-
-fn summarize_open_experiment(experiment: &OpenExperiment) -> OpenExperimentSummary {
- OpenExperimentSummary {
- id: experiment.id,
- frontier_id: experiment.frontier_id,
- hypothesis_node_id: experiment.hypothesis_node_id,
- title: experiment.title.clone(),
- summary: experiment.summary.clone(),
- created_at: experiment.created_at,
- }
-}
-
-fn touch_frontier(
- tx: &Transaction<'_>,
- frontier_id: fidget_spinner_core::FrontierId,
-) -> Result<(), StoreError> {
- let _ = tx.execute(
- "UPDATE frontiers SET updated_at = ?1 WHERE id = ?2",
- params![
- encode_timestamp(OffsetDateTime::now_utc())?,
- frontier_id.to_string()
- ],
- )?;
- Ok(())
+fn decode_frontier_row(row: &rusqlite::Row<'_>) -> Result<FrontierRecord, rusqlite::Error> {
+ Ok(FrontierRecord {
+ id: FrontierId::from_uuid(parse_uuid_sql(&row.get::<_, String>(0)?)?),
+ slug: parse_slug(&row.get::<_, String>(1)?)?,
+ label: parse_non_empty_text(&row.get::<_, String>(2)?)?,
+ objective: parse_non_empty_text(&row.get::<_, String>(3)?)?,
+ status: parse_frontier_status(&row.get::<_, String>(4)?)?,
+ brief: decode_json(&row.get::<_, String>(5)?).map_err(to_sql_conversion_error)?,
+ revision: row.get(6)?,
+ created_at: parse_timestamp_sql(&row.get::<_, String>(7)?)?,
+ updated_at: parse_timestamp_sql(&row.get::<_, String>(8)?)?,
+ })
}
-fn read_node_row(row: &rusqlite::Row<'_>) -> Result<DagNode, rusqlite::Error> {
- let payload_json = row.get::<_, String>(9)?;
- let diagnostics_json = row.get::<_, String>(10)?;
- let payload = decode_json::<NodePayload>(&payload_json).map_err(to_sql_conversion_error)?;
- let diagnostics =
- decode_json::<NodeDiagnostics>(&diagnostics_json).map_err(to_sql_conversion_error)?;
- Ok(DagNode {
- id: parse_node_id(&row.get::<_, String>(0)?).map_err(to_sql_conversion_error)?,
- class: parse_node_class(&row.get::<_, String>(1)?).map_err(to_sql_conversion_error)?,
- track: parse_node_track(&row.get::<_, String>(2)?).map_err(to_sql_conversion_error)?,
- frontier_id: row
- .get::<_, Option<String>>(3)?
- .map(|raw| parse_frontier_id(&raw))
- .transpose()
- .map_err(to_sql_conversion_error)?,
+fn decode_experiment_row(row: &rusqlite::Row<'_>) -> Result<ExperimentRecord, rusqlite::Error> {
+ Ok(ExperimentRecord {
+ id: ExperimentId::from_uuid(parse_uuid_sql(&row.get::<_, String>(0)?)?),
+ slug: parse_slug(&row.get::<_, String>(1)?)?,
+ frontier_id: FrontierId::from_uuid(parse_uuid_sql(&row.get::<_, String>(2)?)?),
+ hypothesis_id: HypothesisId::from_uuid(parse_uuid_sql(&row.get::<_, String>(3)?)?),
archived: row.get::<_, i64>(4)? != 0,
- title: NonEmptyText::new(row.get::<_, String>(5)?).map_err(core_to_sql_conversion_error)?,
- summary: row
- .get::<_, Option<String>>(6)?
- .map(NonEmptyText::new)
- .transpose()
- .map_err(core_to_sql_conversion_error)?,
- tags: BTreeSet::new(),
- payload,
- annotations: Vec::new(),
- diagnostics,
- agent_session_id: row
- .get::<_, Option<String>>(11)?
- .map(|raw| parse_agent_session_id(&raw))
- .transpose()
- .map_err(to_sql_conversion_error)?,
- created_at: decode_timestamp(&row.get::<_, String>(12)?)
- .map_err(to_sql_conversion_error)?,
- updated_at: decode_timestamp(&row.get::<_, String>(13)?)
- .map_err(to_sql_conversion_error)?,
+ title: parse_non_empty_text(&row.get::<_, String>(5)?)?,
+ summary: parse_optional_non_empty_text(row.get::<_, Option<String>>(6)?)?,
+ tags: decode_json(&row.get::<_, String>(7)?).map_err(to_sql_conversion_error)?,
+ status: parse_experiment_status(&row.get::<_, String>(8)?)?,
+ outcome: row
+ .get::<_, Option<String>>(9)?
+ .map(|raw| decode_json(&raw).map_err(to_sql_conversion_error))
+ .transpose()?,
+ revision: row.get(10)?,
+ created_at: parse_timestamp_sql(&row.get::<_, String>(11)?)?,
+ updated_at: parse_timestamp_sql(&row.get::<_, String>(12)?)?,
})
}
-fn read_frontier_row(row: &rusqlite::Row<'_>) -> Result<FrontierRecord, StoreError> {
- Ok(FrontierRecord {
- id: parse_frontier_id(&row.get::<_, String>(0)?)?,
- label: NonEmptyText::new(row.get::<_, String>(1)?)?,
- root_contract_node_id: parse_node_id(&row.get::<_, String>(2)?)?,
- status: parse_frontier_status(&row.get::<_, String>(3)?)?,
- created_at: decode_timestamp(&row.get::<_, String>(4)?)?,
- updated_at: decode_timestamp(&row.get::<_, String>(5)?)?,
+fn decode_artifact_row(row: &rusqlite::Row<'_>) -> Result<ArtifactRecord, rusqlite::Error> {
+ Ok(ArtifactRecord {
+ id: ArtifactId::from_uuid(parse_uuid_sql(&row.get::<_, String>(0)?)?),
+ slug: parse_slug(&row.get::<_, String>(1)?)?,
+ kind: parse_artifact_kind(&row.get::<_, String>(2)?)?,
+ label: parse_non_empty_text(&row.get::<_, String>(3)?)?,
+ summary: parse_optional_non_empty_text(row.get::<_, Option<String>>(4)?)?,
+ locator: parse_non_empty_text(&row.get::<_, String>(5)?)?,
+ media_type: parse_optional_non_empty_text(row.get::<_, Option<String>>(6)?)?,
+ revision: row.get(7)?,
+ created_at: parse_timestamp_sql(&row.get::<_, String>(8)?)?,
+ updated_at: parse_timestamp_sql(&row.get::<_, String>(9)?)?,
})
}
-fn frontier_contract_payload(contract: &FrontierContract) -> Result<JsonObject, StoreError> {
- json_object(json!({
- "objective": contract.objective.as_str(),
- "benchmark_suites": contract
- .evaluation
- .benchmark_suites
- .iter()
- .map(NonEmptyText::as_str)
- .collect::<Vec<_>>(),
- "primary_metric": metric_spec_json(&contract.evaluation.primary_metric),
- "supporting_metrics": contract
- .evaluation
- .supporting_metrics
- .iter()
- .map(metric_spec_json)
- .collect::<Vec<_>>(),
- "promotion_criteria": contract
- .promotion_criteria
- .iter()
- .map(NonEmptyText::as_str)
- .collect::<Vec<_>>(),
- }))
+fn decode_metric_definition_row(
+ row: &rusqlite::Row<'_>,
+) -> Result<MetricDefinition, rusqlite::Error> {
+ Ok(MetricDefinition {
+ key: parse_non_empty_text(&row.get::<_, String>(0)?)?,
+ unit: parse_metric_unit(&row.get::<_, String>(1)?)?,
+ objective: parse_optimization_objective(&row.get::<_, String>(2)?)?,
+ visibility: parse_metric_visibility(&row.get::<_, String>(3)?)?,
+ description: parse_optional_non_empty_text(row.get::<_, Option<String>>(4)?)?,
+ created_at: parse_timestamp_sql(&row.get::<_, String>(5)?)?,
+ updated_at: parse_timestamp_sql(&row.get::<_, String>(6)?)?,
+ })
}
-fn metric_spec_json(metric: &MetricSpec) -> Value {
- json!({
- "metric_key": metric.metric_key.as_str(),
- "unit": encode_metric_unit(metric.unit),
- "objective": encode_optimization_objective(metric.objective),
+fn decode_run_dimension_definition_row(
+ row: &rusqlite::Row<'_>,
+) -> Result<RunDimensionDefinition, rusqlite::Error> {
+ Ok(RunDimensionDefinition {
+ key: parse_non_empty_text(&row.get::<_, String>(0)?)?,
+ value_type: parse_field_value_type(&row.get::<_, String>(1)?)?,
+ description: parse_optional_non_empty_text(row.get::<_, Option<String>>(2)?)?,
+ created_at: parse_timestamp_sql(&row.get::<_, String>(3)?)?,
+ updated_at: parse_timestamp_sql(&row.get::<_, String>(4)?)?,
})
}
-fn json_object(value: Value) -> Result<JsonObject, StoreError> {
- match value {
- Value::Object(map) => Ok(map),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidInput,
- format!("expected JSON object, got {other:?}"),
- )))),
+fn enforce_revision(
+ kind: &'static str,
+ selector: &str,
+ expected: Option<u64>,
+ observed: u64,
+) -> Result<(), StoreError> {
+ if let Some(expected) = expected
+ && expected != observed
+ {
+ return Err(StoreError::RevisionMismatch {
+ kind,
+ selector: selector.to_owned(),
+ expected,
+ observed,
+ });
}
-}
-
-fn write_json_file<T: Serialize>(path: &Utf8Path, value: &T) -> Result<(), StoreError> {
- let serialized = serde_json::to_string_pretty(value)?;
- fs::write(path.as_std_path(), serialized)?;
Ok(())
}
-fn read_json_file<T: for<'de> Deserialize<'de>>(path: &Utf8Path) -> Result<T, StoreError> {
- let bytes = fs::read(path.as_std_path())?;
- serde_json::from_slice(&bytes).map_err(StoreError::from)
-}
-
-fn encode_json<T: Serialize>(value: &T) -> Result<String, StoreError> {
- serde_json::to_string(value).map_err(StoreError::from)
-}
-
-fn decode_json<T: for<'de> Deserialize<'de>>(raw: &str) -> Result<T, StoreError> {
- serde_json::from_str(raw).map_err(StoreError::from)
-}
-
-fn encode_timestamp(timestamp: OffsetDateTime) -> Result<String, StoreError> {
- timestamp.format(&Rfc3339).map_err(StoreError::from)
-}
-
-fn decode_timestamp(raw: &str) -> Result<OffsetDateTime, StoreError> {
- OffsetDateTime::parse(raw, &Rfc3339).map_err(StoreError::from)
-}
-
-fn state_root(project_root: &Utf8Path) -> Utf8PathBuf {
- project_root.join(STORE_DIR_NAME)
-}
-
-#[must_use]
-pub fn discover_project_root(path: impl AsRef<Utf8Path>) -> Option<Utf8PathBuf> {
- let mut cursor = discovery_start(path.as_ref());
- loop {
- if state_root(&cursor).exists() {
- return Some(cursor);
- }
- let parent = cursor.parent()?;
- cursor = parent.to_path_buf();
- }
-}
-
-fn discovery_start(path: &Utf8Path) -> Utf8PathBuf {
- match fs::metadata(path.as_std_path()) {
- Ok(metadata) if metadata.is_file() => path
- .parent()
- .map_or_else(|| path.to_path_buf(), Utf8Path::to_path_buf),
- _ => path.to_path_buf(),
+fn validate_hypothesis_body(body: &NonEmptyText) -> Result<(), StoreError> {
+ let raw = body.as_str().trim();
+ if raw.contains("\n\n")
+ || raw.lines().any(|line| {
+ let trimmed = line.trim_start();
+ trimmed.starts_with('-') || trimmed.starts_with('*') || trimmed.starts_with('#')
+ })
+ {
+ return Err(StoreError::HypothesisBodyMustBeSingleParagraph);
}
+ Ok(())
}
-fn to_sql_conversion_error(error: StoreError) -> rusqlite::Error {
- rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(error))
-}
-
-fn core_to_sql_conversion_error(error: fidget_spinner_core::CoreError) -> rusqlite::Error {
- to_sql_conversion_error(StoreError::from(error))
-}
-
-fn parse_uuid(raw: &str) -> Result<Uuid, StoreError> {
- Uuid::parse_str(raw).map_err(StoreError::from)
-}
-
-fn parse_node_id(raw: &str) -> Result<fidget_spinner_core::NodeId, StoreError> {
- Ok(fidget_spinner_core::NodeId::from_uuid(parse_uuid(raw)?))
-}
-
-fn parse_frontier_id(raw: &str) -> Result<fidget_spinner_core::FrontierId, StoreError> {
- Ok(fidget_spinner_core::FrontierId::from_uuid(parse_uuid(raw)?))
-}
-
-fn parse_experiment_id(raw: &str) -> Result<fidget_spinner_core::ExperimentId, StoreError> {
- Ok(fidget_spinner_core::ExperimentId::from_uuid(parse_uuid(
- raw,
- )?))
-}
-
-fn parse_run_id(raw: &str) -> Result<fidget_spinner_core::RunId, StoreError> {
- Ok(fidget_spinner_core::RunId::from_uuid(parse_uuid(raw)?))
-}
-
-fn parse_agent_session_id(raw: &str) -> Result<fidget_spinner_core::AgentSessionId, StoreError> {
- Ok(fidget_spinner_core::AgentSessionId::from_uuid(parse_uuid(
- raw,
- )?))
-}
-
-fn parse_annotation_id(raw: &str) -> Result<fidget_spinner_core::AnnotationId, StoreError> {
- Ok(fidget_spinner_core::AnnotationId::from_uuid(parse_uuid(
- raw,
- )?))
-}
-
-fn parse_node_class(raw: &str) -> Result<NodeClass, StoreError> {
+fn parse_frontier_status(raw: &str) -> Result<FrontierStatus, rusqlite::Error> {
match raw {
- "contract" => Ok(NodeClass::Contract),
- "hypothesis" => Ok(NodeClass::Hypothesis),
- "run" => Ok(NodeClass::Run),
- "analysis" => Ok(NodeClass::Analysis),
- "decision" => Ok(NodeClass::Decision),
- "source" => Ok(NodeClass::Source),
- "note" => Ok(NodeClass::Note),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown node class `{other}`"),
- )))),
- }
-}
-
-fn encode_node_track(track: fidget_spinner_core::NodeTrack) -> &'static str {
- match track {
- fidget_spinner_core::NodeTrack::CorePath => "core-path",
- fidget_spinner_core::NodeTrack::OffPath => "off-path",
+ "exploring" => Ok(FrontierStatus::Exploring),
+ "paused" => Ok(FrontierStatus::Paused),
+ "archived" => Ok(FrontierStatus::Archived),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid frontier status `{raw}`"),
+ )),
+ ))),
}
}
-fn parse_node_track(raw: &str) -> Result<fidget_spinner_core::NodeTrack, StoreError> {
+fn parse_metric_unit(raw: &str) -> Result<MetricUnit, rusqlite::Error> {
match raw {
- "core-path" => Ok(fidget_spinner_core::NodeTrack::CorePath),
- "off-path" => Ok(fidget_spinner_core::NodeTrack::OffPath),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown node track `{other}`"),
- )))),
- }
-}
-
-fn encode_annotation_visibility(visibility: AnnotationVisibility) -> &'static str {
- match visibility {
- AnnotationVisibility::HiddenByDefault => "hidden",
- AnnotationVisibility::Visible => "visible",
+ "seconds" => Ok(MetricUnit::Seconds),
+ "bytes" => Ok(MetricUnit::Bytes),
+ "count" => Ok(MetricUnit::Count),
+ "ratio" => Ok(MetricUnit::Ratio),
+ "custom" => Ok(MetricUnit::Custom),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid metric unit `{raw}`"),
+ )),
+ ))),
}
}
-fn parse_annotation_visibility(raw: &str) -> Result<AnnotationVisibility, StoreError> {
+fn parse_optimization_objective(raw: &str) -> Result<OptimizationObjective, rusqlite::Error> {
match raw {
- "hidden" => Ok(AnnotationVisibility::HiddenByDefault),
- "visible" => Ok(AnnotationVisibility::Visible),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown annotation visibility `{other}`"),
- )))),
+ "minimize" => Ok(OptimizationObjective::Minimize),
+ "maximize" => Ok(OptimizationObjective::Maximize),
+ "target" => Ok(OptimizationObjective::Target),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid objective `{raw}`"),
+ )),
+ ))),
}
}
-fn encode_edge_kind(kind: EdgeKind) -> &'static str {
- match kind {
- EdgeKind::Lineage => "lineage",
- EdgeKind::Evidence => "evidence",
- EdgeKind::Comparison => "comparison",
- EdgeKind::Supersedes => "supersedes",
- EdgeKind::Annotation => "annotation",
+fn parse_metric_visibility(raw: &str) -> Result<MetricVisibility, rusqlite::Error> {
+ match raw {
+ "canonical" => Ok(MetricVisibility::Canonical),
+ "minor" => Ok(MetricVisibility::Minor),
+ "hidden" => Ok(MetricVisibility::Hidden),
+ "archived" => Ok(MetricVisibility::Archived),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid metric visibility `{raw}`"),
+ )),
+ ))),
}
}
-fn encode_frontier_status(status: FrontierStatus) -> &'static str {
- match status {
- FrontierStatus::Exploring => "exploring",
- FrontierStatus::Paused => "paused",
- FrontierStatus::Saturated => "saturated",
- FrontierStatus::Archived => "archived",
+fn parse_field_value_type(raw: &str) -> Result<FieldValueType, rusqlite::Error> {
+ match raw {
+ "string" => Ok(FieldValueType::String),
+ "numeric" => Ok(FieldValueType::Numeric),
+ "boolean" => Ok(FieldValueType::Boolean),
+ "timestamp" => Ok(FieldValueType::Timestamp),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid field type `{raw}`"),
+ )),
+ ))),
}
}
-fn parse_frontier_status(raw: &str) -> Result<FrontierStatus, StoreError> {
+fn parse_experiment_status(raw: &str) -> Result<ExperimentStatus, rusqlite::Error> {
match raw {
- "exploring" => Ok(FrontierStatus::Exploring),
- "paused" => Ok(FrontierStatus::Paused),
- "saturated" => Ok(FrontierStatus::Saturated),
- "archived" => Ok(FrontierStatus::Archived),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown frontier status `{other}`"),
- )))),
+ "open" => Ok(ExperimentStatus::Open),
+ "closed" => Ok(ExperimentStatus::Closed),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid experiment status `{raw}`"),
+ )),
+ ))),
}
}
-fn encode_run_status(status: RunStatus) -> &'static str {
- match status {
- RunStatus::Queued => "queued",
- RunStatus::Running => "running",
- RunStatus::Succeeded => "succeeded",
- RunStatus::Failed => "failed",
- RunStatus::Cancelled => "cancelled",
+fn parse_artifact_kind(raw: &str) -> Result<ArtifactKind, rusqlite::Error> {
+ match raw {
+ "document" => Ok(ArtifactKind::Document),
+ "link" => Ok(ArtifactKind::Link),
+ "log" => Ok(ArtifactKind::Log),
+ "table" => Ok(ArtifactKind::Table),
+ "plot" => Ok(ArtifactKind::Plot),
+ "dump" => Ok(ArtifactKind::Dump),
+ "binary" => Ok(ArtifactKind::Binary),
+ "other" => Ok(ArtifactKind::Other),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid artifact kind `{raw}`"),
+ )),
+ ))),
}
}
-fn encode_backend(backend: ExecutionBackend) -> &'static str {
- match backend {
- ExecutionBackend::LocalProcess => "local-process",
- ExecutionBackend::WorktreeProcess => "worktree-process",
- ExecutionBackend::SshProcess => "ssh-process",
+fn resolve_selector(raw: &str) -> Result<Selector, StoreError> {
+ if let Ok(uuid) = Uuid::parse_str(raw) {
+ Ok(Selector::Id(uuid))
+ } else {
+ Ok(Selector::Slug(Slug::new(raw.to_owned())?))
}
}
-fn encode_field_value_type(value_type: FieldValueType) -> &'static str {
- value_type.as_str()
+enum Selector {
+ Id(Uuid),
+ Slug(Slug),
}
-fn decode_field_value_type(raw: &str) -> Result<FieldValueType, StoreError> {
- match raw {
- "string" => Ok(FieldValueType::String),
- "numeric" => Ok(FieldValueType::Numeric),
- "boolean" => Ok(FieldValueType::Boolean),
- "timestamp" => Ok(FieldValueType::Timestamp),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown field value type `{other}`"),
- )))),
+fn slugify(raw: &str) -> Result<Slug, CoreError> {
+ let mut slug = String::with_capacity(raw.len());
+ let mut last_was_separator = true;
+ for character in raw.chars().flat_map(char::to_lowercase) {
+ if character.is_ascii_alphanumeric() {
+ slug.push(character);
+ last_was_separator = false;
+ continue;
+ }
+ if matches!(character, ' ' | '-' | '_' | '/' | ':') && !last_was_separator {
+ slug.push('-');
+ last_was_separator = true;
+ }
}
-}
-
-fn encode_metric_unit(unit: MetricUnit) -> &'static str {
- match unit {
- MetricUnit::Seconds => "seconds",
- MetricUnit::Bytes => "bytes",
- MetricUnit::Count => "count",
- MetricUnit::Ratio => "ratio",
- MetricUnit::Custom => "custom",
+ if slug.ends_with('-') {
+ let _ = slug.pop();
}
-}
-
-fn decode_metric_unit(raw: &str) -> Result<MetricUnit, StoreError> {
- match raw {
- "seconds" => Ok(MetricUnit::Seconds),
- "bytes" => Ok(MetricUnit::Bytes),
- "count" => Ok(MetricUnit::Count),
- "ratio" => Ok(MetricUnit::Ratio),
- "custom" => Ok(MetricUnit::Custom),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown metric unit `{other}`"),
- )))),
+ if slug.is_empty() {
+ slug.push_str("untitled");
}
+ Slug::new(slug)
}
-fn encode_optimization_objective(objective: OptimizationObjective) -> &'static str {
- match objective {
- OptimizationObjective::Minimize => "minimize",
- OptimizationObjective::Maximize => "maximize",
- OptimizationObjective::Target => "target",
+fn vertex_kind_name(vertex: VertexRef) -> &'static str {
+ match vertex {
+ VertexRef::Hypothesis(_) => "hypothesis",
+ VertexRef::Experiment(_) => "experiment",
}
}
-fn decode_optimization_objective(raw: &str) -> Result<OptimizationObjective, StoreError> {
- match raw {
- "minimize" => Ok(OptimizationObjective::Minimize),
- "maximize" => Ok(OptimizationObjective::Maximize),
- "target" => Ok(OptimizationObjective::Target),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown optimization objective `{other}`"),
- )))),
+fn attachment_target_kind_name(target: AttachmentTargetRef) -> &'static str {
+ match target {
+ AttachmentTargetRef::Frontier(_) => "frontier",
+ AttachmentTargetRef::Hypothesis(_) => "hypothesis",
+ AttachmentTargetRef::Experiment(_) => "experiment",
}
}
-fn encode_frontier_verdict(verdict: FrontierVerdict) -> &'static str {
- match verdict {
- FrontierVerdict::Accepted => "accepted",
- FrontierVerdict::Kept => "kept",
- FrontierVerdict::Parked => "parked",
- FrontierVerdict::Rejected => "rejected",
+fn decode_vertex_ref(kind: &str, raw_id: &str) -> Result<VertexRef, rusqlite::Error> {
+ let uuid = parse_uuid_sql(raw_id)?;
+ match kind {
+ "hypothesis" => Ok(VertexRef::Hypothesis(HypothesisId::from_uuid(uuid))),
+ "experiment" => Ok(VertexRef::Experiment(ExperimentId::from_uuid(uuid))),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid vertex kind `{kind}`"),
+ )),
+ ))),
}
}
-fn parse_frontier_verdict(raw: &str) -> Result<FrontierVerdict, StoreError> {
- match raw {
- "accepted" => Ok(FrontierVerdict::Accepted),
- "kept" => Ok(FrontierVerdict::Kept),
- "parked" => Ok(FrontierVerdict::Parked),
- "rejected" => Ok(FrontierVerdict::Rejected),
- other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- format!("unknown frontier verdict `{other}`"),
- )))),
+fn decode_attachment_target(
+ kind: &str,
+ raw_id: &str,
+) -> Result<AttachmentTargetRef, rusqlite::Error> {
+ let uuid = parse_uuid_sql(raw_id)?;
+ match kind {
+ "frontier" => Ok(AttachmentTargetRef::Frontier(FrontierId::from_uuid(uuid))),
+ "hypothesis" => Ok(AttachmentTargetRef::Hypothesis(HypothesisId::from_uuid(
+ uuid,
+ ))),
+ "experiment" => Ok(AttachmentTargetRef::Experiment(ExperimentId::from_uuid(
+ uuid,
+ ))),
+ _ => Err(to_sql_conversion_error(StoreError::Json(
+ serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid attachment target kind `{kind}`"),
+ )),
+ ))),
}
}
-type RunDimensionColumns = (Option<String>, Option<f64>, Option<i64>, Option<String>);
-
-fn encode_run_dimension_columns(
- value: &RunDimensionValue,
-) -> Result<RunDimensionColumns, StoreError> {
- match value {
- RunDimensionValue::String(item) => Ok((Some(item.to_string()), None, None, None)),
- RunDimensionValue::Numeric(item) => Ok((None, Some(*item), None, None)),
- RunDimensionValue::Boolean(item) => Ok((None, None, Some(i64::from(*item)), None)),
- RunDimensionValue::Timestamp(item) => {
- let _ = OffsetDateTime::parse(item.as_str(), &Rfc3339)?;
- Ok((None, None, None, Some(item.to_string())))
+fn derive_active_tags(
+ active_hypotheses: &[HypothesisCurrentState],
+ open_experiments: &[ExperimentSummary],
+) -> Vec<TagName> {
+ let mut tags = BTreeSet::new();
+ for state in active_hypotheses {
+ tags.extend(state.hypothesis.tags.iter().cloned());
+ for experiment in &state.open_experiments {
+ tags.extend(experiment.tags.iter().cloned());
}
}
-}
-
-fn decode_run_dimension_value(
- value_type: FieldValueType,
- value_text: Option<String>,
- value_numeric: Option<f64>,
- value_boolean: Option<i64>,
- value_timestamp: Option<String>,
-) -> Result<RunDimensionValue, StoreError> {
- match value_type {
- FieldValueType::String => Ok(RunDimensionValue::String(NonEmptyText::new(
- value_text.ok_or_else(|| {
- StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "missing string dimension value",
- )))
- })?,
- )?)),
- FieldValueType::Numeric => Ok(RunDimensionValue::Numeric(value_numeric.ok_or_else(
- || {
- StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "missing numeric dimension value",
- )))
- },
- )?)),
- FieldValueType::Boolean => Ok(RunDimensionValue::Boolean(
- value_boolean.ok_or_else(|| {
- StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "missing boolean dimension value",
- )))
- })? != 0,
- )),
- FieldValueType::Timestamp => {
- let value = value_timestamp.ok_or_else(|| {
- StoreError::Json(serde_json::Error::io(io::Error::new(
- io::ErrorKind::InvalidData,
- "missing timestamp dimension value",
- )))
- })?;
- let _ = OffsetDateTime::parse(&value, &Rfc3339)?;
- Ok(RunDimensionValue::Timestamp(NonEmptyText::new(value)?))
- }
+ for experiment in open_experiments {
+ tags.extend(experiment.tags.iter().cloned());
}
+ tags.into_iter().collect()
}
-fn dimension_value_text(value: &RunDimensionValue) -> String {
- match value {
- RunDimensionValue::String(item) | RunDimensionValue::Timestamp(item) => item.to_string(),
- RunDimensionValue::Numeric(item) => item.to_string(),
- RunDimensionValue::Boolean(item) => item.to_string(),
- }
+fn dimension_subset_matches(
+ expected: &BTreeMap<NonEmptyText, RunDimensionValue>,
+ observed: &BTreeMap<NonEmptyText, RunDimensionValue>,
+) -> bool {
+ expected.iter().all(|(key, value)| {
+ observed
+ .get(key)
+ .is_some_and(|candidate| candidate == value)
+ })
}
-fn value_kind_name(value: &Value) -> &'static str {
- match value {
- Value::Null => "null",
- Value::Bool(_) => "boolean",
- Value::Number(_) => "numeric",
- Value::String(_) => "string",
- Value::Array(_) => "array",
- Value::Object(_) => "object",
+fn compare_metric_values(left: f64, right: f64, order: MetricRankOrder) -> std::cmp::Ordering {
+ let ordering = left
+ .partial_cmp(&right)
+ .unwrap_or(std::cmp::Ordering::Equal);
+ match order {
+ MetricRankOrder::Asc => ordering,
+ MetricRankOrder::Desc => ordering.reverse(),
}
}
-#[cfg(test)]
-mod tests {
- use std::collections::{BTreeMap, BTreeSet};
-
- use serde_json::json;
-
- use super::{
- CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest,
- DefineRunDimensionRequest, ListNodesQuery, MetricBestQuery, MetricFieldSource,
- MetricKeyQuery, MetricRankOrder, OpenExperimentRequest, PROJECT_SCHEMA_NAME, ProjectStore,
- RemoveSchemaFieldRequest, UpsertSchemaFieldRequest,
- };
- use fidget_spinner_core::{
- CommandRecipe, DiagnosticSeverity, EvaluationProtocol, FieldPresence, FieldRole,
- FieldValueType, FrontierContract, FrontierNote, FrontierVerdict, InferencePolicy,
- MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass, NodePayload, NonEmptyText,
- OptimizationObjective, RunDimensionValue, TagName,
- };
-
- fn temp_project_root(label: &str) -> camino::Utf8PathBuf {
- let mut path = std::env::temp_dir();
- path.push(format!(
- "fidget_spinner_store_test_{}_{}",
- label,
- uuid::Uuid::now_v7()
- ));
- camino::Utf8PathBuf::from(path.to_string_lossy().into_owned())
- }
-
- #[test]
- fn init_writes_model_facing_schema_file() -> Result<(), super::StoreError> {
- let root = temp_project_root("schema");
- let store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
+fn all_metrics(outcome: &ExperimentOutcome) -> Vec<MetricValue> {
+ std::iter::once(outcome.primary_metric.clone())
+ .chain(outcome.supporting_metrics.clone())
+ .collect()
+}
- assert!(store.state_root().join(PROJECT_SCHEMA_NAME).exists());
- Ok(())
- }
+fn bool_to_sql(value: bool) -> i64 {
+ i64::from(value)
+}
- #[test]
- fn add_node_persists_hidden_annotations() -> Result<(), super::StoreError> {
- let root = temp_project_root("notes");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: None,
- title: NonEmptyText::new("feature sketch")?,
- summary: Some(NonEmptyText::new("research note")?),
- tags: None,
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "freeform"}))?,
- ),
- annotations: vec![NodeAnnotation::hidden(NonEmptyText::new(
- "private scratch",
- )?)],
- attachments: Vec::new(),
- })?;
- let loaded = store
- .get_node(node.id)?
- .ok_or(super::StoreError::NodeNotFound(node.id))?;
-
- assert_eq!(loaded.annotations.len(), 1);
- assert_eq!(
- loaded.annotations[0].visibility,
- fidget_spinner_core::AnnotationVisibility::HiddenByDefault
- );
- Ok(())
- }
+fn count_rows(connection: &Connection, table: &str) -> Result<u64, StoreError> {
+ let sql = format!("SELECT COUNT(*) FROM {table}");
+ connection
+ .query_row(&sql, [], |row| row.get::<_, u64>(0))
+ .map_err(StoreError::from)
+}
- #[test]
- fn frontier_projection_tracks_experiment_counts() -> Result<(), super::StoreError> {
- let root = temp_project_root("frontier");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let projection = store.create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new("optimization frontier")?,
- contract_title: NonEmptyText::new("contract root")?,
- contract_summary: None,
- contract: FrontierContract {
- objective: NonEmptyText::new("improve wall time")?,
- evaluation: EvaluationProtocol {
- benchmark_suites: BTreeSet::from([NonEmptyText::new("smoke")?]),
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new("wall_clock_s")?,
- unit: MetricUnit::Seconds,
- objective: OptimizationObjective::Minimize,
- },
- supporting_metrics: BTreeSet::new(),
- },
- promotion_criteria: vec![NonEmptyText::new("strict speedup")?],
- },
- })?;
+fn count_rows_where(
+ connection: &Connection,
+ table: &str,
+ predicate: &str,
+) -> Result<u64, StoreError> {
+ let sql = format!("SELECT COUNT(*) FROM {table} WHERE {predicate}");
+ connection
+ .query_row(&sql, [], |row| row.get::<_, u64>(0))
+ .map_err(StoreError::from)
+}
- assert_eq!(projection.open_experiment_count, 0);
- assert_eq!(projection.completed_experiment_count, 0);
- assert_eq!(projection.verdict_counts.accepted, 0);
- assert_eq!(projection.verdict_counts.kept, 0);
- assert_eq!(projection.verdict_counts.parked, 0);
- assert_eq!(projection.verdict_counts.rejected, 0);
- Ok(())
+fn apply_limit<T>(items: Vec<T>, limit: Option<u32>) -> Vec<T> {
+ if let Some(limit) = limit {
+ items.into_iter().take(limit as usize).collect()
+ } else {
+ items
}
+}
- #[test]
- fn list_nodes_hides_archived_by_default() -> Result<(), super::StoreError> {
- let root = temp_project_root("archive");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: None,
- title: NonEmptyText::new("quick note")?,
- summary: Some(NonEmptyText::new("quick note summary")?),
- tags: Some(BTreeSet::new()),
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "hello"}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- })?;
- store.archive_node(node.id)?;
-
- let visible = store.list_nodes(ListNodesQuery::default())?;
- let hidden = store.list_nodes(ListNodesQuery {
- include_archived: true,
- ..ListNodesQuery::default()
- })?;
-
- assert!(visible.is_empty());
- assert_eq!(hidden.len(), 1);
- Ok(())
+fn apply_optional_text_patch<T>(patch: Option<TextPatch<T>>, current: Option<T>) -> Option<T> {
+ match patch {
+ None => current,
+ Some(TextPatch::Set(value)) => Some(value),
+ Some(TextPatch::Clear) => None,
}
+}
- #[test]
- fn frontier_filter_includes_root_contract_node() -> Result<(), super::StoreError> {
- let root = temp_project_root("contract-filter");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let projection = store.create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new("frontier")?,
- contract_title: NonEmptyText::new("root contract")?,
- contract_summary: None,
- contract: FrontierContract {
- objective: NonEmptyText::new("optimize")?,
- evaluation: EvaluationProtocol {
- benchmark_suites: BTreeSet::from([NonEmptyText::new("smoke")?]),
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new("wall_clock_s")?,
- unit: MetricUnit::Seconds,
- objective: OptimizationObjective::Minimize,
- },
- supporting_metrics: BTreeSet::new(),
- },
- promotion_criteria: vec![NonEmptyText::new("faster")?],
- },
- })?;
-
- let nodes = store.list_nodes(ListNodesQuery {
- frontier_id: Some(projection.frontier.id),
- ..ListNodesQuery::default()
- })?;
+fn write_json_file<T: Serialize>(path: &Utf8Path, value: &T) -> Result<(), StoreError> {
+ let bytes = serde_json::to_vec_pretty(value)?;
+ fs::write(path.as_std_path(), bytes)?;
+ Ok(())
+}
- assert_eq!(nodes.len(), 1);
- assert_eq!(nodes[0].class, NodeClass::Contract);
- Ok(())
- }
+fn read_json_file<T: for<'de> Deserialize<'de>>(path: &Utf8Path) -> Result<T, StoreError> {
+ let bytes = fs::read(path.as_std_path())?;
+ serde_json::from_slice(&bytes).map_err(StoreError::from)
+}
- #[test]
- fn notes_require_explicit_tags_even_when_empty() -> Result<(), super::StoreError> {
- let root = temp_project_root("note-tags-required");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
+fn encode_json<T: Serialize>(value: &T) -> Result<String, StoreError> {
+ serde_json::to_string(value).map_err(StoreError::from)
+}
- let result = store.add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: None,
- title: NonEmptyText::new("quick note")?,
- summary: Some(NonEmptyText::new("quick note summary")?),
- tags: None,
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "hello"}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- });
+fn decode_json<T: for<'de> Deserialize<'de>>(raw: &str) -> Result<T, StoreError> {
+ serde_json::from_str(raw).map_err(StoreError::from)
+}
- assert!(matches!(result, Err(super::StoreError::NoteTagsRequired)));
- Ok(())
- }
+fn encode_timestamp(timestamp: OffsetDateTime) -> Result<String, StoreError> {
+ timestamp.format(&Rfc3339).map_err(StoreError::from)
+}
- #[test]
- fn tags_round_trip_and_filter_node_list() -> Result<(), super::StoreError> {
- let root = temp_project_root("tag-roundtrip");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let cuts = store.add_tag(
- TagName::new("cuts/core")?,
- NonEmptyText::new("Core cutset work")?,
- )?;
- let heuristics = store.add_tag(
- TagName::new("heuristic")?,
- NonEmptyText::new("Heuristic tuning")?,
- )?;
- let note = store.add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: None,
- title: NonEmptyText::new("tagged note")?,
- summary: Some(NonEmptyText::new("tagged note summary")?),
- tags: Some(BTreeSet::from([cuts.name.clone(), heuristics.name.clone()])),
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "tagged"}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- })?;
+fn decode_timestamp(raw: &str) -> Result<OffsetDateTime, time::error::Parse> {
+ OffsetDateTime::parse(raw, &Rfc3339)
+}
- let loaded = store
- .get_node(note.id)?
- .ok_or(super::StoreError::NodeNotFound(note.id))?;
- assert_eq!(loaded.tags.len(), 2);
+fn state_root(project_root: &Utf8Path) -> Utf8PathBuf {
+ project_root.join(STORE_DIR_NAME)
+}
- let filtered = store.list_nodes(ListNodesQuery {
- tags: BTreeSet::from([cuts.name]),
- ..ListNodesQuery::default()
- })?;
- assert_eq!(filtered.len(), 1);
- assert_eq!(filtered[0].tags.len(), 2);
- Ok(())
+#[must_use]
+pub fn discover_project_root(path: impl AsRef<Utf8Path>) -> Option<Utf8PathBuf> {
+ let mut cursor = discovery_start(path.as_ref());
+ loop {
+ if state_root(&cursor).exists() {
+ return Some(cursor);
+ }
+ let parent = cursor.parent()?;
+ cursor = parent.to_path_buf();
}
+}
- #[test]
- fn prose_nodes_require_summary_and_body() -> Result<(), super::StoreError> {
- let root = temp_project_root("prose-summary");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
-
- let missing_summary = store.add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: None,
- title: NonEmptyText::new("research note")?,
- summary: None,
- tags: None,
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "research body"}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- });
- assert!(matches!(
- missing_summary,
- Err(super::StoreError::ProseSummaryRequired(NodeClass::Source))
- ));
-
- let missing_body = store.add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: None,
- title: NonEmptyText::new("quick note")?,
- summary: Some(NonEmptyText::new("quick note summary")?),
- tags: Some(BTreeSet::new()),
- payload: NodePayload::with_schema(store.schema().schema_ref(), serde_json::Map::new()),
- annotations: Vec::new(),
- attachments: Vec::new(),
- });
- assert!(matches!(
- missing_body,
- Err(super::StoreError::ProseBodyRequired(NodeClass::Note))
- ));
- Ok(())
+fn discovery_start(path: &Utf8Path) -> Utf8PathBuf {
+ match fs::metadata(path.as_std_path()) {
+ Ok(metadata) if metadata.is_file() => path
+ .parent()
+ .map_or_else(|| path.to_path_buf(), Utf8Path::to_path_buf),
+ _ => path.to_path_buf(),
}
+}
- #[test]
- fn opening_store_backfills_missing_prose_summaries() -> Result<(), super::StoreError> {
- let root = temp_project_root("summary-backfill");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: None,
- title: NonEmptyText::new("research note")?,
- summary: Some(NonEmptyText::new("temporary summary")?),
- tags: None,
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "First paragraph.\n\nSecond paragraph."}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- })?;
- drop(store);
-
- let connection = rusqlite::Connection::open(
- root.join(super::STORE_DIR_NAME)
- .join(super::STATE_DB_NAME)
- .as_std_path(),
- )?;
- let _ = connection.execute(
- "UPDATE nodes SET summary = NULL WHERE id = ?1",
- rusqlite::params![node.id.to_string()],
- )?;
- drop(connection);
-
- let reopened = ProjectStore::open(&root)?;
- let loaded = reopened
- .get_node(node.id)?
- .ok_or(super::StoreError::NodeNotFound(node.id))?;
- assert_eq!(
- loaded.summary.as_ref().map(NonEmptyText::as_str),
- Some("First paragraph.")
- );
- Ok(())
- }
+fn to_sql_conversion_error(error: StoreError) -> rusqlite::Error {
+ rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(error))
+}
- #[test]
- fn schema_field_upsert_remove_persists_and_bumps_version() -> Result<(), super::StoreError> {
- let root = temp_project_root("schema-upsert-remove");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let initial_version = store.schema().version;
-
- let field = store.upsert_schema_field(UpsertSchemaFieldRequest {
- name: NonEmptyText::new("scenario")?,
- node_classes: BTreeSet::from([NodeClass::Hypothesis, NodeClass::Analysis]),
- presence: FieldPresence::Recommended,
- severity: DiagnosticSeverity::Warning,
- role: FieldRole::ProjectionGate,
- inference_policy: InferencePolicy::ManualOnly,
- value_type: Some(FieldValueType::String),
- })?;
- assert_eq!(field.name.as_str(), "scenario");
- assert_eq!(store.schema().version, initial_version + 1);
- assert!(
- store
- .schema()
- .fields
- .iter()
- .any(|item| item.name.as_str() == "scenario")
- );
- drop(store);
-
- let mut reopened = ProjectStore::open(&root)?;
- assert_eq!(reopened.schema().version, initial_version + 1);
- assert!(
- reopened
- .schema()
- .fields
- .iter()
- .any(|item| item.name.as_str() == "scenario")
- );
+fn core_to_sql_conversion_error(error: CoreError) -> rusqlite::Error {
+ to_sql_conversion_error(StoreError::from(error))
+}
- let removed = reopened.remove_schema_field(RemoveSchemaFieldRequest {
- name: NonEmptyText::new("scenario")?,
- node_classes: Some(BTreeSet::from([NodeClass::Hypothesis, NodeClass::Analysis])),
- })?;
- assert_eq!(removed, 1);
- assert_eq!(reopened.schema().version, initial_version + 2);
- assert!(
- !reopened
- .schema()
- .fields
- .iter()
- .any(|item| item.name.as_str() == "scenario")
- );
- Ok(())
- }
+fn uuid_to_sql_conversion_error(error: uuid::Error) -> rusqlite::Error {
+ to_sql_conversion_error(StoreError::from(error))
+}
- #[test]
- fn metric_queries_surface_canonical_and_payload_numeric_fields() -> Result<(), super::StoreError>
- {
- let root = temp_project_root("metric-best");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let projection = store.create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new("optimization frontier")?,
- contract_title: NonEmptyText::new("contract root")?,
- contract_summary: None,
- contract: FrontierContract {
- objective: NonEmptyText::new("improve wall time")?,
- evaluation: EvaluationProtocol {
- benchmark_suites: BTreeSet::from([NonEmptyText::new("smoke")?]),
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new("wall_clock_s")?,
- unit: MetricUnit::Seconds,
- objective: OptimizationObjective::Minimize,
- },
- supporting_metrics: BTreeSet::new(),
- },
- promotion_criteria: vec![NonEmptyText::new("strict speedup")?],
- },
- })?;
- let frontier_id = projection.frontier.id;
- let _ = store.define_metric(DefineMetricRequest {
- key: NonEmptyText::new("wall_clock_s")?,
- unit: MetricUnit::Seconds,
- objective: OptimizationObjective::Minimize,
- description: Some(NonEmptyText::new("elapsed wall time")?),
- })?;
- let _ = store.define_run_dimension(DefineRunDimensionRequest {
- key: NonEmptyText::new("scenario")?,
- value_type: FieldValueType::String,
- description: Some(NonEmptyText::new("workload family")?),
- })?;
- let _ = store.define_run_dimension(DefineRunDimensionRequest {
- key: NonEmptyText::new("duration_s")?,
- value_type: FieldValueType::Numeric,
- description: Some(NonEmptyText::new("time budget in seconds")?),
- })?;
+fn time_to_sql_conversion_error(error: time::error::Parse) -> rusqlite::Error {
+ to_sql_conversion_error(StoreError::from(error))
+}
- let first_hypothesis = store.add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(frontier_id),
- title: NonEmptyText::new("first hypothesis")?,
- summary: Some(NonEmptyText::new("first hypothesis summary")?),
- tags: None,
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "first body", "latency_hint": 14.0}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- })?;
- let second_hypothesis = store.add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(frontier_id),
- title: NonEmptyText::new("second hypothesis")?,
- summary: Some(NonEmptyText::new("second hypothesis summary")?),
- tags: None,
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"body": "second body", "latency_hint": 7.0}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- })?;
- let first_experiment = store.open_experiment(open_experiment_request(
- frontier_id,
- first_hypothesis.id,
- "first experiment",
- )?)?;
- let second_experiment = store.open_experiment(open_experiment_request(
- frontier_id,
- second_hypothesis.id,
- "second experiment",
- )?)?;
-
- let _first_receipt = store.close_experiment(experiment_request(
- &root,
- first_experiment.id,
- "first run",
- 10.0,
- run_dimensions("belt_4x5", 20.0)?,
- )?)?;
- let second_receipt = store.close_experiment(experiment_request(
- &root,
- second_experiment.id,
- "second run",
- 5.0,
- run_dimensions("belt_4x5", 60.0)?,
- )?)?;
-
- let keys = store.list_metric_keys()?;
- assert!(keys.iter().any(|key| {
- key.key.as_str() == "wall_clock_s" && key.source == MetricFieldSource::RunMetric
- }));
- assert!(keys.iter().any(|key| {
- key.key.as_str() == "latency_hint" && key.source == MetricFieldSource::HypothesisPayload
- }));
- assert!(keys.iter().any(|key| {
- key.key.as_str() == "wall_clock_s"
- && key.source == MetricFieldSource::RunMetric
- && key.description.as_ref().map(NonEmptyText::as_str) == Some("elapsed wall time")
- }));
-
- let filtered_keys = store.list_metric_keys_filtered(MetricKeyQuery {
- frontier_id: Some(frontier_id),
- source: Some(MetricFieldSource::RunMetric),
- dimensions: run_dimensions("belt_4x5", 60.0)?,
- })?;
- assert_eq!(filtered_keys.len(), 1);
- assert_eq!(filtered_keys[0].experiment_count, 1);
-
- let dimension_summaries = store.list_run_dimensions()?;
- assert!(dimension_summaries.iter().any(|dimension| {
- dimension.key.as_str() == "benchmark_suite"
- && dimension.value_type == FieldValueType::String
- && dimension.observed_run_count == 2
- }));
- assert!(dimension_summaries.iter().any(|dimension| {
- dimension.key.as_str() == "scenario"
- && dimension.description.as_ref().map(NonEmptyText::as_str)
- == Some("workload family")
- }));
- assert!(dimension_summaries.iter().any(|dimension| {
- dimension.key.as_str() == "duration_s"
- && dimension.value_type == FieldValueType::Numeric
- && dimension.distinct_value_count == 2
- }));
-
- let canonical_best = store.best_metrics(MetricBestQuery {
- key: NonEmptyText::new("wall_clock_s")?,
- frontier_id: Some(frontier_id),
- source: Some(MetricFieldSource::RunMetric),
- dimensions: run_dimensions("belt_4x5", 60.0)?,
- order: None,
- limit: 5,
- })?;
- assert_eq!(canonical_best.len(), 1);
- assert_eq!(canonical_best[0].value, 5.0);
- assert_eq!(
- canonical_best[0].experiment_title.as_str(),
- "second experiment"
- );
- assert_eq!(canonical_best[0].verdict, FrontierVerdict::Kept);
- assert_eq!(
- canonical_best[0]
- .dimensions
- .get(&NonEmptyText::new("duration_s")?),
- Some(&RunDimensionValue::Numeric(60.0))
- );
+fn parse_non_empty_text(raw: &str) -> Result<NonEmptyText, rusqlite::Error> {
+ NonEmptyText::new(raw.to_owned()).map_err(core_to_sql_conversion_error)
+}
- let payload_best = store.best_metrics(MetricBestQuery {
- key: NonEmptyText::new("latency_hint")?,
- frontier_id: Some(frontier_id),
- source: Some(MetricFieldSource::HypothesisPayload),
- dimensions: run_dimensions("belt_4x5", 60.0)?,
- order: Some(MetricRankOrder::Asc),
- limit: 5,
- })?;
- assert_eq!(payload_best.len(), 1);
- assert_eq!(payload_best[0].value, 7.0);
- assert_eq!(payload_best[0].hypothesis_node_id, second_hypothesis.id);
-
- let missing_order = store.best_metrics(MetricBestQuery {
- key: NonEmptyText::new("latency_hint")?,
- frontier_id: Some(frontier_id),
- source: Some(MetricFieldSource::HypothesisPayload),
- dimensions: BTreeMap::new(),
- order: None,
- limit: 5,
- });
- assert!(matches!(
- missing_order,
- Err(super::StoreError::MetricOrderRequired { .. })
- ));
- assert_eq!(
- second_receipt.experiment.title.as_str(),
- "second experiment"
- );
- Ok(())
- }
+fn parse_optional_non_empty_text(
+ raw: Option<String>,
+) -> Result<Option<NonEmptyText>, rusqlite::Error> {
+ raw.map(|value| parse_non_empty_text(&value)).transpose()
+}
- #[test]
- fn opening_store_backfills_legacy_benchmark_suite_dimensions() -> Result<(), super::StoreError>
- {
- let root = temp_project_root("metric-plane-backfill");
- let mut store = ProjectStore::init(
- &root,
- NonEmptyText::new("test project")?,
- NonEmptyText::new("local.test")?,
- )?;
- let projection = store.create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new("migration frontier")?,
- contract_title: NonEmptyText::new("migration contract")?,
- contract_summary: None,
- contract: FrontierContract {
- objective: NonEmptyText::new("exercise metric migration")?,
- evaluation: EvaluationProtocol {
- benchmark_suites: BTreeSet::from([NonEmptyText::new("smoke")?]),
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new("wall_clock_s")?,
- unit: MetricUnit::Seconds,
- objective: OptimizationObjective::Minimize,
- },
- supporting_metrics: BTreeSet::new(),
- },
- promotion_criteria: vec![NonEmptyText::new("keep the metric plane queryable")?],
- },
- })?;
- let frontier_id = projection.frontier.id;
- let hypothesis = store.add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(frontier_id),
- title: NonEmptyText::new("candidate hypothesis")?,
- summary: Some(NonEmptyText::new("candidate hypothesis summary")?),
- tags: None,
- payload: NodePayload::with_schema(
- store.schema().schema_ref(),
- super::json_object(json!({"latency_hint": 9.0}))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- })?;
- let experiment = store.open_experiment(open_experiment_request(
- frontier_id,
- hypothesis.id,
- "migration experiment",
- )?)?;
- let _ = store.close_experiment(experiment_request(
- &root,
- experiment.id,
- "migration run",
- 11.0,
- BTreeMap::from([(
- NonEmptyText::new("benchmark_suite")?,
- RunDimensionValue::String(NonEmptyText::new("smoke")?),
- )]),
- )?)?;
- drop(store);
-
- let connection = rusqlite::Connection::open(
- root.join(super::STORE_DIR_NAME)
- .join(super::STATE_DB_NAME)
- .as_std_path(),
- )?;
- let _ = connection.execute("DELETE FROM run_dimensions", [])?;
- drop(connection);
-
- let reopened = ProjectStore::open(&root)?;
- let dimensions = reopened.list_run_dimensions()?;
- assert!(dimensions.iter().any(|dimension| {
- dimension.key.as_str() == "benchmark_suite" && dimension.observed_run_count == 1
- }));
-
- let best = reopened.best_metrics(MetricBestQuery {
- key: NonEmptyText::new("wall_clock_s")?,
- frontier_id: Some(frontier_id),
- source: Some(MetricFieldSource::RunMetric),
- dimensions: BTreeMap::from([(
- NonEmptyText::new("benchmark_suite")?,
- RunDimensionValue::String(NonEmptyText::new("smoke")?),
- )]),
- order: None,
- limit: 5,
- })?;
- assert_eq!(best.len(), 1);
- assert_eq!(best[0].value, 11.0);
- Ok(())
- }
+fn parse_slug(raw: &str) -> Result<Slug, rusqlite::Error> {
+ Slug::new(raw.to_owned()).map_err(core_to_sql_conversion_error)
+}
- fn open_experiment_request(
- frontier_id: fidget_spinner_core::FrontierId,
- hypothesis_node_id: fidget_spinner_core::NodeId,
- title: &str,
- ) -> Result<OpenExperimentRequest, super::StoreError> {
- Ok(OpenExperimentRequest {
- frontier_id,
- hypothesis_node_id,
- title: NonEmptyText::new(title)?,
- summary: Some(NonEmptyText::new(format!("{title} summary"))?),
- })
- }
+fn parse_tag_name(raw: &str) -> Result<TagName, rusqlite::Error> {
+ TagName::new(raw.to_owned()).map_err(core_to_sql_conversion_error)
+}
- fn experiment_request(
- root: &camino::Utf8Path,
- experiment_id: fidget_spinner_core::ExperimentId,
- run_title: &str,
- wall_clock_s: f64,
- dimensions: BTreeMap<NonEmptyText, RunDimensionValue>,
- ) -> Result<CloseExperimentRequest, super::StoreError> {
- Ok(CloseExperimentRequest {
- experiment_id,
- run_title: NonEmptyText::new(run_title)?,
- run_summary: Some(NonEmptyText::new("run summary")?),
- backend: fidget_spinner_core::ExecutionBackend::WorktreeProcess,
- dimensions,
- command: CommandRecipe::new(
- root.to_path_buf(),
- vec![NonEmptyText::new("true")?],
- BTreeMap::new(),
- )?,
- primary_metric: MetricValue {
- key: NonEmptyText::new("wall_clock_s")?,
- value: wall_clock_s,
- },
- supporting_metrics: Vec::new(),
- note: FrontierNote {
- summary: NonEmptyText::new("note summary")?,
- next_hypotheses: Vec::new(),
- },
- verdict: FrontierVerdict::Kept,
- analysis: None,
- decision_title: NonEmptyText::new("decision")?,
- decision_rationale: NonEmptyText::new("decision rationale")?,
- })
- }
+fn parse_uuid_sql(raw: &str) -> Result<Uuid, rusqlite::Error> {
+ Uuid::parse_str(raw).map_err(uuid_to_sql_conversion_error)
+}
- fn run_dimensions(
- scenario: &str,
- duration_s: f64,
- ) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, super::StoreError> {
- Ok(BTreeMap::from([
- (
- NonEmptyText::new("benchmark_suite")?,
- RunDimensionValue::String(NonEmptyText::new("smoke")?),
- ),
- (
- NonEmptyText::new("scenario")?,
- RunDimensionValue::String(NonEmptyText::new(scenario)?),
- ),
- (
- NonEmptyText::new("duration_s")?,
- RunDimensionValue::Numeric(duration_s),
- ),
- ]))
- }
+fn parse_timestamp_sql(raw: &str) -> Result<OffsetDateTime, rusqlite::Error> {
+ decode_timestamp(raw).map_err(time_to_sql_conversion_error)
}