swarm repositories / source
aboutsummaryrefslogtreecommitdiff
path: root/crates/fidget-spinner-cli
diff options
context:
space:
mode:
authormain <main@swarm.moe>2026-03-20 16:00:30 -0400
committermain <main@swarm.moe>2026-03-20 16:00:30 -0400
commit9d63844f3a28fde70b19500422f17379e99e588a (patch)
tree163cfbd65a8d3528346561410ef39eb1183a16f2 /crates/fidget-spinner-cli
parent22fe3d2ce7478450a1d7443c4ecbd85fd4c46716 (diff)
downloadfidget_spinner-9d63844f3a28fde70b19500422f17379e99e588a.zip
Refound Spinner as an austere frontier ledger
Diffstat (limited to 'crates/fidget-spinner-cli')
-rw-r--r--crates/fidget-spinner-cli/Cargo.toml3
-rw-r--r--crates/fidget-spinner-cli/src/main.rs2014
-rw-r--r--crates/fidget-spinner-cli/src/mcp/catalog.rs1380
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/runtime.rs53
-rw-r--r--crates/fidget-spinner-cli/src/mcp/service.rs3448
-rw-r--r--crates/fidget-spinner-cli/src/ui.rs1603
-rw-r--r--crates/fidget-spinner-cli/tests/mcp_hardening.rs1574
7 files changed, 4224 insertions, 5851 deletions
diff --git a/crates/fidget-spinner-cli/Cargo.toml b/crates/fidget-spinner-cli/Cargo.toml
index bf8ffb7..58263ad 100644
--- a/crates/fidget-spinner-cli/Cargo.toml
+++ b/crates/fidget-spinner-cli/Cargo.toml
@@ -18,14 +18,13 @@ clap.workspace = true
dirs.workspace = true
fidget-spinner-core = { path = "../fidget-spinner-core" }
fidget-spinner-store-sqlite = { path = "../fidget-spinner-store-sqlite" }
-linkify.workspace = true
libmcp = { git = "https://git.swarm.moe/libmcp.git", rev = "84e898d9ba699451d5d13fe384e7bbe220564bc1" }
maud.workspace = true
+percent-encoding.workspace = true
serde.workspace = true
serde_json.workspace = true
time.workspace = true
tokio.workspace = true
-uuid.workspace = true
[lints]
workspace = true
diff --git a/crates/fidget-spinner-cli/src/main.rs b/crates/fidget-spinner-cli/src/main.rs
index f56e751..9de2515 100644
--- a/crates/fidget-spinner-cli/src/main.rs
+++ b/crates/fidget-spinner-cli/src/main.rs
@@ -4,27 +4,28 @@ mod ui;
use std::collections::{BTreeMap, BTreeSet};
use std::fs;
+use std::io;
use std::net::SocketAddr;
use std::path::{Path, PathBuf};
use camino::{Utf8Path, Utf8PathBuf};
use clap::{Args, Parser, Subcommand, ValueEnum};
use fidget_spinner_core::{
- AnnotationVisibility, CommandRecipe, DiagnosticSeverity, ExecutionBackend, FieldPresence,
- FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierVerdict, InferencePolicy,
- MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass, NodePayload, NonEmptyText,
- OptimizationObjective, ProjectFieldSpec, TagName,
+ ArtifactKind, CommandRecipe, ExecutionBackend, ExperimentAnalysis, ExperimentStatus,
+ FieldValueType, FrontierVerdict, MetricUnit, MetricVisibility, NonEmptyText,
+ OptimizationObjective, RunDimensionValue, Slug, TagName,
};
use fidget_spinner_store_sqlite::{
- CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest,
- DefineRunDimensionRequest, EdgeAttachment, EdgeAttachmentDirection, ExperimentAnalysisDraft,
- ListNodesQuery, MetricBestQuery, MetricFieldSource, MetricKeyQuery, MetricRankOrder,
- OpenExperimentRequest, ProjectStore, RemoveSchemaFieldRequest, STORE_DIR_NAME, StoreError,
- UpsertSchemaFieldRequest,
+ AttachmentSelector, CloseExperimentRequest, CreateArtifactRequest, CreateFrontierRequest,
+ CreateHypothesisRequest, DefineMetricRequest, DefineRunDimensionRequest,
+ ExperimentOutcomePatch, FrontierRoadmapItemDraft, ListArtifactsQuery, ListExperimentsQuery,
+ ListHypothesesQuery, MetricBestQuery, MetricKeysQuery, MetricRankOrder, MetricScope,
+ OpenExperimentRequest, ProjectStore, STORE_DIR_NAME, StoreError, TextPatch,
+ UpdateArtifactRequest, UpdateExperimentRequest, UpdateFrontierBriefRequest,
+ UpdateHypothesisRequest, VertexSelector,
};
use serde::Serialize;
-use serde_json::{Map, Value, json};
-use uuid::Uuid;
+use serde_json::Value;
#[derive(Parser)]
#[command(
@@ -41,53 +42,52 @@ struct Cli {
enum Command {
/// Initialize a project-local `.fidget_spinner/` store.
Init(InitArgs),
- /// Read the local project payload schema.
- Schema {
+ /// Inspect project metadata and coarse counts.
+ Project {
#[command(subcommand)]
- command: SchemaCommand,
+ command: ProjectCommand,
},
- /// Create and inspect frontiers.
+ /// Manage the repo-local tag registry.
+ Tag {
+ #[command(subcommand)]
+ command: TagCommand,
+ },
+ /// Create and inspect frontier scopes.
Frontier {
#[command(subcommand)]
command: FrontierCommand,
},
- /// Create, inspect, and mutate DAG nodes.
- Node {
+ /// Record and inspect hypotheses.
+ Hypothesis {
#[command(subcommand)]
- command: NodeCommand,
+ command: HypothesisCommand,
},
- /// Record terse off-path notes.
- Note(NoteCommand),
- /// Record core-path hypotheses before experimental work begins.
- Hypothesis(HypothesisCommand),
- /// Manage the repo-local tag registry.
- Tag {
+ /// Open, inspect, update, and close experiments.
+ Experiment {
#[command(subcommand)]
- command: TagCommand,
+ command: ExperimentCommand,
},
- /// Record imported sources and documentary context.
- Source(SourceCommand),
- /// Inspect rankable metrics across closed experiments.
+ /// Register external references and attach them to the ledger.
+ Artifact {
+ #[command(subcommand)]
+ command: ArtifactCommand,
+ },
+ /// Manage project-level metric definitions and rankings.
Metric {
#[command(subcommand)]
command: MetricCommand,
},
- /// Define and inspect run dimensions used to slice experiment metrics.
+ /// Define the typed dimension vocabulary used to slice experiments.
Dimension {
#[command(subcommand)]
command: DimensionCommand,
},
- /// Close a core-path experiment atomically.
- Experiment {
- #[command(subcommand)]
- command: ExperimentCommand,
- },
/// Serve the hardened stdio MCP endpoint.
Mcp {
#[command(subcommand)]
command: McpCommand,
},
- /// Serve the minimal local web navigator.
+ /// Serve the local navigator.
Ui {
#[command(subcommand)]
command: UiCommand,
@@ -101,295 +101,148 @@ enum Command {
#[derive(Args)]
struct InitArgs {
- /// Project root to initialize.
#[arg(long, default_value = ".")]
project: PathBuf,
- /// Human-facing project name. Defaults to the directory name.
#[arg(long)]
name: Option<String>,
- /// Payload schema namespace written into `.fidget_spinner/schema.json`.
- #[arg(long, default_value = "local.project")]
- namespace: String,
}
#[derive(Subcommand)]
-enum SchemaCommand {
- /// Show the current project schema as JSON.
- Show(ProjectArg),
- /// Add or replace one project schema field definition.
- UpsertField(SchemaFieldUpsertArgs),
- /// Remove one project schema field definition.
- RemoveField(SchemaFieldRemoveArgs),
+enum ProjectCommand {
+ Status(ProjectArg),
}
#[derive(Subcommand)]
-enum FrontierCommand {
- /// Create a frontier and root contract node.
- Init(FrontierInitArgs),
- /// Show one frontier projection or list frontiers when omitted.
- Status(FrontierStatusArgs),
-}
-
-#[derive(Args)]
-struct FrontierInitArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- label: String,
- #[arg(long)]
- objective: String,
- #[arg(long, default_value = "frontier contract")]
- contract_title: String,
- #[arg(long)]
- contract_summary: Option<String>,
- #[arg(long = "benchmark-suite")]
- benchmark_suites: Vec<String>,
- #[arg(long = "promotion-criterion")]
- promotion_criteria: Vec<String>,
- #[arg(long = "primary-metric-key")]
- primary_metric_key: String,
- #[arg(long = "primary-metric-unit", value_enum)]
- primary_metric_unit: CliMetricUnit,
- #[arg(long = "primary-metric-objective", value_enum)]
- primary_metric_objective: CliOptimizationObjective,
-}
-
-#[derive(Args)]
-struct FrontierStatusArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- frontier: Option<String>,
+enum TagCommand {
+ Add(TagAddArgs),
+ List(ProjectArg),
}
#[derive(Subcommand)]
-enum NodeCommand {
- /// Create a generic DAG node.
- Add(NodeAddArgs),
- /// List recent nodes.
- List(NodeListArgs),
- /// Show one node in full.
- Show(NodeShowArgs),
- /// Attach an annotation to a node.
- Annotate(NodeAnnotateArgs),
- /// Archive a node without deleting it.
- Archive(NodeArchiveArgs),
-}
-
-#[derive(Args)]
-struct NodeAddArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long, value_enum)]
- class: CliNodeClass,
- #[arg(long)]
- frontier: Option<String>,
- #[arg(long)]
- title: String,
- #[arg(long)]
- /// Required for `note` and `source` nodes.
- summary: Option<String>,
- #[arg(long = "payload-json")]
- /// JSON object payload. `note` and `source` nodes require a non-empty `body` string.
- payload_json: Option<String>,
- #[arg(long = "payload-file")]
- payload_file: Option<PathBuf>,
- #[command(flatten)]
- tag_selection: ExplicitTagSelectionArgs,
- #[arg(long = "field")]
- fields: Vec<String>,
- #[arg(long = "annotation")]
- annotations: Vec<String>,
- #[arg(long = "parent")]
- parents: Vec<String>,
-}
-
-#[derive(Args)]
-struct NodeListArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- frontier: Option<String>,
- #[arg(long, value_enum)]
- class: Option<CliNodeClass>,
- #[arg(long = "tag")]
- tags: Vec<String>,
- #[arg(long)]
- include_archived: bool,
- #[arg(long, default_value_t = 20)]
- limit: u32,
-}
-
-#[derive(Args, Default)]
-struct ExplicitTagSelectionArgs {
- #[arg(long = "tag")]
- tags: Vec<String>,
- #[arg(long, conflicts_with = "tags")]
- no_tags: bool,
-}
-
-#[derive(Args)]
-struct NodeShowArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- node: String,
-}
-
-#[derive(Args)]
-struct NodeAnnotateArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- node: String,
- #[arg(long)]
- body: String,
- #[arg(long)]
- label: Option<String>,
- #[arg(long)]
- visible: bool,
-}
-
-#[derive(Args)]
-struct NodeArchiveArgs {
- #[command(flatten)]
- project: ProjectArg,
- #[arg(long)]
- node: String,
+enum FrontierCommand {
+ Create(FrontierCreateArgs),
+ List(ProjectArg),
+ Read(FrontierSelectorArgs),
+ Open(FrontierSelectorArgs),
+ UpdateBrief(FrontierBriefUpdateArgs),
+ History(FrontierSelectorArgs),
}
-#[derive(Args)]
-struct NoteCommand {
- #[command(subcommand)]
- command: NoteSubcommand,
+#[derive(Subcommand)]
+enum HypothesisCommand {
+ Record(HypothesisRecordArgs),
+ List(HypothesisListArgs),
+ Read(HypothesisSelectorArgs),
+ Update(HypothesisUpdateArgs),
+ History(HypothesisSelectorArgs),
}
-#[derive(Args)]
-struct HypothesisCommand {
- #[command(subcommand)]
- command: HypothesisSubcommand,
+#[derive(Subcommand)]
+enum ExperimentCommand {
+ Open(ExperimentOpenArgs),
+ List(ExperimentListArgs),
+ Read(ExperimentSelectorArgs),
+ Update(ExperimentUpdateArgs),
+ Close(ExperimentCloseArgs),
+ History(ExperimentSelectorArgs),
}
#[derive(Subcommand)]
-enum NoteSubcommand {
- /// Record a quick off-path note.
- Quick(QuickNoteArgs),
+enum ArtifactCommand {
+ Record(ArtifactRecordArgs),
+ List(ArtifactListArgs),
+ Read(ArtifactSelectorArgs),
+ Update(ArtifactUpdateArgs),
+ History(ArtifactSelectorArgs),
}
#[derive(Subcommand)]
-enum HypothesisSubcommand {
- /// Record a core-path hypothesis with low ceremony.
- Add(QuickHypothesisArgs),
+enum MetricCommand {
+ Define(MetricDefineArgs),
+ Keys(MetricKeysArgs),
+ Best(MetricBestArgs),
}
#[derive(Subcommand)]
-enum TagCommand {
- /// Register a new repo-local tag.
- Add(TagAddArgs),
- /// List registered repo-local tags.
+enum DimensionCommand {
+ Define(DimensionDefineArgs),
List(ProjectArg),
}
-#[derive(Args)]
-struct SourceCommand {
- #[command(subcommand)]
- command: SourceSubcommand,
+#[derive(Subcommand)]
+enum McpCommand {
+ Serve(McpServeArgs),
+ Worker(McpWorkerArgs),
}
#[derive(Subcommand)]
-enum SourceSubcommand {
- /// Record imported source material or documentary context.
- Add(QuickSourceArgs),
+enum UiCommand {
+ Serve(UiServeArgs),
}
#[derive(Subcommand)]
-enum MetricCommand {
- /// Register a project-level metric definition.
- Define(MetricDefineArgs),
- /// List rankable numeric keys observed in completed experiments.
- Keys(MetricKeysArgs),
- /// Rank completed experiments by one numeric key.
- Best(MetricBestArgs),
- /// Re-run the idempotent legacy metric-plane normalization.
- Migrate(ProjectArg),
+enum SkillCommand {
+ List,
+ Install(SkillInstallArgs),
+ Show(SkillShowArgs),
}
-#[derive(Subcommand)]
-enum DimensionCommand {
- /// Register a project-level run dimension definition.
- Define(DimensionDefineArgs),
- /// List run dimensions and sample values observed in completed runs.
- List(ProjectArg),
+#[derive(Args, Clone)]
+struct ProjectArg {
+ #[arg(long, default_value = ".")]
+ project: PathBuf,
}
#[derive(Args)]
-struct MetricDefineArgs {
+struct TagAddArgs {
#[command(flatten)]
project: ProjectArg,
- /// Metric key used in experiment closure and ranking.
#[arg(long)]
- key: String,
- /// Canonical unit for this metric key.
- #[arg(long, value_enum)]
- unit: CliMetricUnit,
- /// Optimization direction for this metric key.
- #[arg(long, value_enum)]
- objective: CliOptimizationObjective,
- /// Optional human description shown in metric listings.
+ name: String,
#[arg(long)]
- description: Option<String>,
+ description: String,
}
#[derive(Args)]
-struct MetricKeysArgs {
+struct FrontierCreateArgs {
#[command(flatten)]
project: ProjectArg,
- /// Restrict results to one frontier.
#[arg(long)]
- frontier: Option<String>,
- /// Restrict results to one metric source.
- #[arg(long, value_enum)]
- source: Option<CliMetricSource>,
- /// Exact run-dimension filter in the form `key=value`.
- #[arg(long = "dimension")]
- dimensions: Vec<String>,
+ label: String,
+ #[arg(long)]
+ objective: String,
+ #[arg(long)]
+ slug: Option<String>,
}
#[derive(Args)]
-struct DimensionDefineArgs {
+struct FrontierSelectorArgs {
#[command(flatten)]
project: ProjectArg,
- /// Run-dimension key used to slice experiments.
#[arg(long)]
- key: String,
- /// Canonical value type for this run dimension.
- #[arg(long = "type", value_enum)]
- value_type: CliFieldValueType,
- /// Optional human description shown in dimension listings.
- #[arg(long)]
- description: Option<String>,
+ frontier: String,
}
#[derive(Args)]
-struct QuickNoteArgs {
+struct FrontierBriefUpdateArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- frontier: Option<String>,
+ frontier: String,
#[arg(long)]
- title: String,
+ expected_revision: Option<u64>,
#[arg(long)]
- summary: String,
+ situation: Option<String>,
#[arg(long)]
- body: String,
- #[command(flatten)]
- tag_selection: ExplicitTagSelectionArgs,
- #[arg(long = "parent")]
- parents: Vec<String>,
+ clear_situation: bool,
+ #[arg(long = "unknown")]
+ unknowns: Vec<String>,
+ #[arg(long = "roadmap")]
+ roadmap: Vec<String>,
}
#[derive(Args)]
-struct QuickHypothesisArgs {
+struct HypothesisRecordArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
@@ -400,255 +253,330 @@ struct QuickHypothesisArgs {
summary: String,
#[arg(long)]
body: String,
+ #[arg(long)]
+ slug: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
#[arg(long = "parent")]
parents: Vec<String>,
}
#[derive(Args)]
-struct TagAddArgs {
+struct HypothesisListArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- name: String,
+ frontier: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
#[arg(long)]
- description: String,
+ include_archived: bool,
+ #[arg(long)]
+ limit: Option<u32>,
}
#[derive(Args)]
-struct QuickSourceArgs {
+struct HypothesisSelectorArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- frontier: Option<String>,
+ hypothesis: String,
+}
+
+#[derive(Args)]
+struct HypothesisUpdateArgs {
+ #[command(flatten)]
+ project: ProjectArg,
#[arg(long)]
- title: String,
+ hypothesis: String,
#[arg(long)]
- summary: String,
+ expected_revision: Option<u64>,
#[arg(long)]
- body: String,
+ title: Option<String>,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ body: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
+ #[arg(long = "replace-tags")]
+ replace_tags: bool,
+ #[arg(long = "parent")]
+ parents: Vec<String>,
+ #[arg(long = "replace-parents")]
+ replace_parents: bool,
+ #[arg(long, value_enum)]
+ state: Option<CliArchivePatch>,
+}
+
+#[derive(Args)]
+struct ExperimentOpenArgs {
#[command(flatten)]
- tag_selection: ExplicitTagSelectionArgs,
+ project: ProjectArg,
+ #[arg(long)]
+ hypothesis: String,
+ #[arg(long)]
+ title: String,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ slug: Option<String>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
#[arg(long = "parent")]
parents: Vec<String>,
}
#[derive(Args)]
-struct SchemaFieldUpsertArgs {
+struct ExperimentListArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- name: String,
- #[arg(long = "class", value_enum)]
- classes: Vec<CliNodeClass>,
- #[arg(long, value_enum)]
- presence: CliFieldPresence,
- #[arg(long, value_enum)]
- severity: CliDiagnosticSeverity,
+ frontier: Option<String>,
+ #[arg(long)]
+ hypothesis: Option<String>,
#[arg(long, value_enum)]
- role: CliFieldRole,
- #[arg(long = "inference", value_enum)]
- inference_policy: CliInferencePolicy,
- #[arg(long = "type", value_enum)]
- value_type: Option<CliFieldValueType>,
+ status: Option<CliExperimentStatus>,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
+ #[arg(long)]
+ include_archived: bool,
+ #[arg(long)]
+ limit: Option<u32>,
}
#[derive(Args)]
-struct SchemaFieldRemoveArgs {
+struct ExperimentSelectorArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- name: String,
- #[arg(long = "class", value_enum)]
- classes: Vec<CliNodeClass>,
+ experiment: String,
}
#[derive(Args)]
-struct MetricBestArgs {
+struct ExperimentUpdateArgs {
#[command(flatten)]
project: ProjectArg,
- /// Metric key to rank on.
#[arg(long)]
- key: String,
- /// Restrict results to one frontier.
+ experiment: String,
#[arg(long)]
- frontier: Option<String>,
- /// Restrict results to one metric source.
- #[arg(long, value_enum)]
- source: Option<CliMetricSource>,
- /// Explicit ordering for sources whose objective cannot be inferred.
+ expected_revision: Option<u64>,
+ #[arg(long)]
+ title: Option<String>,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ clear_summary: bool,
+ #[arg(long = "tag")]
+ tags: Vec<String>,
+ #[arg(long = "replace-tags")]
+ replace_tags: bool,
+ #[arg(long = "parent")]
+ parents: Vec<String>,
+ #[arg(long = "replace-parents")]
+ replace_parents: bool,
#[arg(long, value_enum)]
- order: Option<CliMetricOrder>,
- /// Exact run-dimension filter in the form `key=value`.
- #[arg(long = "dimension")]
- dimensions: Vec<String>,
- /// Maximum number of ranked experiments to return.
- #[arg(long, default_value_t = 10)]
- limit: u32,
-}
-
-#[derive(Subcommand)]
-enum ExperimentCommand {
- /// Open a stateful experiment against one hypothesis.
- Open(ExperimentOpenArgs),
- /// List open experiments, optionally narrowed to one frontier.
- List(ExperimentListArgs),
- /// Close a core-path experiment with run data, note, and verdict.
- Close(Box<ExperimentCloseArgs>),
-}
-
-#[derive(Subcommand)]
-enum McpCommand {
- /// Serve the public stdio MCP host. If `--project` is omitted, the host starts unbound.
- Serve(McpServeArgs),
- #[command(hide = true)]
- Worker(McpWorkerArgs),
-}
-
-#[derive(Subcommand)]
-enum UiCommand {
- /// Serve the local read-only navigator.
- Serve(UiServeArgs),
+ state: Option<CliArchivePatch>,
+ #[arg(long = "outcome-json")]
+ outcome_json: Option<String>,
+ #[arg(long = "outcome-file")]
+ outcome_file: Option<PathBuf>,
}
#[derive(Args)]
struct ExperimentCloseArgs {
#[command(flatten)]
project: ProjectArg,
- #[arg(long = "experiment")]
- experiment_id: String,
- #[arg(long = "run-title")]
- run_title: String,
- #[arg(long = "run-summary")]
- run_summary: Option<String>,
- /// Repeat for each run dimension as `key=value`.
- #[arg(long = "dimension")]
- dimensions: Vec<String>,
- #[arg(long = "backend", value_enum, default_value_t = CliExecutionBackend::Worktree)]
+ #[arg(long)]
+ experiment: String,
+ #[arg(long)]
+ expected_revision: Option<u64>,
+ #[arg(long, value_enum)]
backend: CliExecutionBackend,
- #[arg(long = "cwd")]
- working_directory: Option<PathBuf>,
- /// Repeat for each argv token passed to the recorded command.
#[arg(long = "argv")]
argv: Vec<String>,
- /// Repeat for each environment override as `KEY=VALUE`.
+ #[arg(long)]
+ working_directory: Option<PathBuf>,
#[arg(long = "env")]
env: Vec<String>,
- /// Primary metric in the form `key=value`; key must be preregistered.
+ #[arg(long = "dimension")]
+ dimensions: Vec<String>,
#[arg(long = "primary-metric")]
primary_metric: String,
- /// Supporting metric in the form `key=value`; repeat as needed.
#[arg(long = "metric")]
- metrics: Vec<String>,
- #[arg(long)]
- note: String,
- #[arg(long = "next-hypothesis")]
- next_hypotheses: Vec<String>,
- #[arg(long = "verdict", value_enum)]
+ supporting_metrics: Vec<String>,
+ #[arg(long, value_enum)]
verdict: CliFrontierVerdict,
- #[arg(long = "analysis-title")]
- analysis_title: Option<String>,
- #[arg(long = "analysis-summary")]
+ #[arg(long)]
+ rationale: String,
+ #[arg(long)]
analysis_summary: Option<String>,
- #[arg(long = "analysis-body")]
+ #[arg(long)]
analysis_body: Option<String>,
- #[arg(long = "decision-title")]
- decision_title: String,
- #[arg(long = "decision-rationale")]
- decision_rationale: String,
}
#[derive(Args)]
-struct ExperimentOpenArgs {
+struct ArtifactRecordArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
- frontier: String,
- #[arg(long = "hypothesis-node")]
- hypothesis_node: String,
+ kind: CliArtifactKind,
#[arg(long)]
- title: String,
+ label: String,
#[arg(long)]
summary: Option<String>,
+ #[arg(long)]
+ locator: String,
+ #[arg(long)]
+ media_type: Option<String>,
+ #[arg(long)]
+ slug: Option<String>,
+ #[arg(long = "attach")]
+ attachments: Vec<String>,
}
#[derive(Args)]
-struct ExperimentListArgs {
+struct ArtifactListArgs {
#[command(flatten)]
project: ProjectArg,
#[arg(long)]
frontier: Option<String>,
+ #[arg(long)]
+ kind: Option<CliArtifactKind>,
+ #[arg(long)]
+ attached_to: Option<String>,
+ #[arg(long)]
+ limit: Option<u32>,
}
-#[derive(Subcommand)]
-enum SkillCommand {
- /// List bundled skills.
- List,
- /// Install bundled skills into a Codex skill directory.
- Install(SkillInstallArgs),
- /// Print one bundled skill body.
- Show(SkillShowArgs),
+#[derive(Args)]
+struct ArtifactSelectorArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ artifact: String,
}
#[derive(Args)]
-struct SkillInstallArgs {
- /// Bundled skill name. Defaults to all bundled skills.
+struct ArtifactUpdateArgs {
+ #[command(flatten)]
+ project: ProjectArg,
#[arg(long)]
- name: Option<String>,
- /// Destination root. Defaults to `~/.codex/skills`.
+ artifact: String,
#[arg(long)]
- destination: Option<PathBuf>,
+ expected_revision: Option<u64>,
+ #[arg(long)]
+ kind: Option<CliArtifactKind>,
+ #[arg(long)]
+ label: Option<String>,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long)]
+ clear_summary: bool,
+ #[arg(long)]
+ locator: Option<String>,
+ #[arg(long)]
+ media_type: Option<String>,
+ #[arg(long)]
+ clear_media_type: bool,
+ #[arg(long = "attach")]
+ attachments: Vec<String>,
+ #[arg(long = "replace-attachments")]
+ replace_attachments: bool,
}
#[derive(Args)]
-struct SkillShowArgs {
- /// Bundled skill name. Defaults to `fidget-spinner`.
+struct MetricDefineArgs {
+ #[command(flatten)]
+ project: ProjectArg,
#[arg(long)]
- name: Option<String>,
+ key: String,
+ #[arg(long, value_enum)]
+ unit: CliMetricUnit,
+ #[arg(long, value_enum)]
+ objective: CliOptimizationObjective,
+ #[arg(long, value_enum, default_value_t = CliMetricVisibility::Canonical)]
+ visibility: CliMetricVisibility,
+ #[arg(long)]
+ description: Option<String>,
}
#[derive(Args)]
-struct ProjectArg {
- /// Project root or any nested path inside a project containing `.fidget_spinner/`.
- #[arg(long, default_value = ".")]
- project: PathBuf,
+struct MetricKeysArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long, value_enum, default_value_t = CliMetricScope::Live)]
+ scope: CliMetricScope,
+}
+
+#[derive(Args)]
+struct MetricBestArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long)]
+ hypothesis: Option<String>,
+ #[arg(long)]
+ key: String,
+ #[arg(long = "dimension")]
+ dimensions: Vec<String>,
+ #[arg(long)]
+ include_rejected: bool,
+ #[arg(long)]
+ limit: Option<u32>,
+ #[arg(long, value_enum)]
+ order: Option<CliMetricRankOrder>,
+}
+
+#[derive(Args)]
+struct DimensionDefineArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ key: String,
+ #[arg(long, value_enum)]
+ value_type: CliFieldValueType,
+ #[arg(long)]
+ description: Option<String>,
}
#[derive(Args)]
struct McpServeArgs {
- /// Optional initial project binding. When omitted, the MCP starts unbound.
#[arg(long)]
project: Option<PathBuf>,
}
#[derive(Args)]
struct McpWorkerArgs {
- #[arg(long)]
+ #[arg(long, default_value = ".")]
project: PathBuf,
}
#[derive(Args)]
struct UiServeArgs {
- /// Path to serve. Accepts a project root, `.fidget_spinner/`, descendants inside it,
- /// or a parent directory containing one unique descendant project store.
- #[arg(long = "path", alias = "project", default_value = ".")]
+ #[arg(long, default_value = ".")]
path: PathBuf,
- /// Bind address for the local navigator.
#[arg(long, default_value = "127.0.0.1:8913")]
bind: SocketAddr,
- /// Maximum rows rendered in list views.
- #[arg(long, default_value_t = 200)]
- limit: u32,
+ #[arg(long)]
+ limit: Option<u32>,
}
-#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliNodeClass {
- Contract,
- Hypothesis,
- Run,
- Analysis,
- Decision,
- Source,
- Note,
+#[derive(Args)]
+struct SkillInstallArgs {
+ #[arg(long)]
+ name: Option<String>,
+ #[arg(long)]
+ destination: Option<PathBuf>,
+}
+
+#[derive(Args)]
+struct SkillShowArgs {
+ #[arg(long)]
+ name: Option<String>,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
@@ -668,23 +596,22 @@ enum CliOptimizationObjective {
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliExecutionBackend {
- Local,
- Worktree,
- Ssh,
+enum CliMetricVisibility {
+ Canonical,
+ Minor,
+ Hidden,
+ Archived,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliMetricSource {
- RunMetric,
- HypothesisPayload,
- RunPayload,
- AnalysisPayload,
- DecisionPayload,
+enum CliMetricScope {
+ Live,
+ Visible,
+ All,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliMetricOrder {
+enum CliMetricRankOrder {
Asc,
Desc,
}
@@ -698,31 +625,23 @@ enum CliFieldValueType {
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliDiagnosticSeverity {
- Error,
- Warning,
- Info,
-}
-
-#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliFieldPresence {
- Required,
- Recommended,
- Optional,
+enum CliArtifactKind {
+ Document,
+ Link,
+ Log,
+ Table,
+ Plot,
+ Dump,
+ Binary,
+ Other,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliFieldRole {
- Index,
- ProjectionGate,
- RenderOnly,
- Opaque,
-}
-
-#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
-enum CliInferencePolicy {
- ManualOnly,
- ModelMayInfer,
+enum CliExecutionBackend {
+ Manual,
+ LocalProcess,
+ WorktreeProcess,
+ SshProcess,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
@@ -733,63 +652,89 @@ enum CliFrontierVerdict {
Rejected,
}
-fn main() {
- if let Err(error) = run() {
- eprintln!("error: {error}");
- std::process::exit(1);
- }
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliExperimentStatus {
+ Open,
+ Closed,
}
-fn run() -> Result<(), StoreError> {
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliArchivePatch {
+ Archive,
+ Restore,
+}
+
+fn main() -> Result<(), StoreError> {
let cli = Cli::parse();
match cli.command {
Command::Init(args) => run_init(args),
- Command::Schema { command } => match command {
- SchemaCommand::Show(project) => {
- let store = open_store(&project.project)?;
- print_json(store.schema())
- }
- SchemaCommand::UpsertField(args) => run_schema_field_upsert(args),
- SchemaCommand::RemoveField(args) => run_schema_field_remove(args),
+ Command::Project { command } => match command {
+ ProjectCommand::Status(args) => print_json(&open_store(&args.project)?.status()?),
},
- Command::Frontier { command } => match command {
- FrontierCommand::Init(args) => run_frontier_init(args),
- FrontierCommand::Status(args) => run_frontier_status(args),
- },
- Command::Node { command } => match command {
- NodeCommand::Add(args) => run_node_add(args),
- NodeCommand::List(args) => run_node_list(args),
- NodeCommand::Show(args) => run_node_show(args),
- NodeCommand::Annotate(args) => run_node_annotate(args),
- NodeCommand::Archive(args) => run_node_archive(args),
+ Command::Tag { command } => match command {
+ TagCommand::Add(args) => run_tag_add(args),
+ TagCommand::List(args) => print_json(&open_store(&args.project)?.list_tags()?),
},
- Command::Note(command) => match command.command {
- NoteSubcommand::Quick(args) => run_quick_note(args),
+ Command::Frontier { command } => match command {
+ FrontierCommand::Create(args) => run_frontier_create(args),
+ FrontierCommand::List(args) => {
+ print_json(&open_store(&args.project)?.list_frontiers()?)
+ }
+ FrontierCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_frontier(&args.frontier)?)
+ }
+ FrontierCommand::Open(args) => {
+ print_json(&open_store(&args.project.project)?.frontier_open(&args.frontier)?)
+ }
+ FrontierCommand::UpdateBrief(args) => run_frontier_brief_update(args),
+ FrontierCommand::History(args) => {
+ print_json(&open_store(&args.project.project)?.frontier_history(&args.frontier)?)
+ }
},
- Command::Hypothesis(command) => match command.command {
- HypothesisSubcommand::Add(args) => run_quick_hypothesis(args),
+ Command::Hypothesis { command } => match command {
+ HypothesisCommand::Record(args) => run_hypothesis_record(args),
+ HypothesisCommand::List(args) => run_hypothesis_list(args),
+ HypothesisCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_hypothesis(&args.hypothesis)?)
+ }
+ HypothesisCommand::Update(args) => run_hypothesis_update(args),
+ HypothesisCommand::History(args) => print_json(
+ &open_store(&args.project.project)?.hypothesis_history(&args.hypothesis)?,
+ ),
},
- Command::Tag { command } => match command {
- TagCommand::Add(args) => run_tag_add(args),
- TagCommand::List(project) => run_tag_list(project),
+ Command::Experiment { command } => match command {
+ ExperimentCommand::Open(args) => run_experiment_open(args),
+ ExperimentCommand::List(args) => run_experiment_list(args),
+ ExperimentCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_experiment(&args.experiment)?)
+ }
+ ExperimentCommand::Update(args) => run_experiment_update(args),
+ ExperimentCommand::Close(args) => run_experiment_close(args),
+ ExperimentCommand::History(args) => print_json(
+ &open_store(&args.project.project)?.experiment_history(&args.experiment)?,
+ ),
},
- Command::Source(command) => match command.command {
- SourceSubcommand::Add(args) => run_quick_source(args),
+ Command::Artifact { command } => match command {
+ ArtifactCommand::Record(args) => run_artifact_record(args),
+ ArtifactCommand::List(args) => run_artifact_list(args),
+ ArtifactCommand::Read(args) => {
+ print_json(&open_store(&args.project.project)?.read_artifact(&args.artifact)?)
+ }
+ ArtifactCommand::Update(args) => run_artifact_update(args),
+ ArtifactCommand::History(args) => {
+ print_json(&open_store(&args.project.project)?.artifact_history(&args.artifact)?)
+ }
},
Command::Metric { command } => match command {
MetricCommand::Define(args) => run_metric_define(args),
MetricCommand::Keys(args) => run_metric_keys(args),
MetricCommand::Best(args) => run_metric_best(args),
- MetricCommand::Migrate(project) => run_metric_migrate(project),
},
Command::Dimension { command } => match command {
DimensionCommand::Define(args) => run_dimension_define(args),
- DimensionCommand::List(project) => run_dimension_list(project),
- },
- Command::Experiment { command } => match command {
- ExperimentCommand::Open(args) => run_experiment_open(args),
- ExperimentCommand::List(args) => run_experiment_list(args),
- ExperimentCommand::Close(args) => run_experiment_close(*args),
+ DimensionCommand::List(args) => {
+ print_json(&open_store(&args.project)?.list_run_dimensions()?)
+ }
},
Command::Mcp { command } => match command {
McpCommand::Serve(args) => mcp::serve(args.project),
@@ -811,385 +756,278 @@ fn run() -> Result<(), StoreError> {
fn run_init(args: InitArgs) -> Result<(), StoreError> {
let project_root = utf8_path(args.project);
- let display_name = args
- .name
- .map(NonEmptyText::new)
- .transpose()?
- .unwrap_or(default_display_name_for_root(&project_root)?);
- let namespace = NonEmptyText::new(args.namespace)?;
- let store = ProjectStore::init(&project_root, display_name, namespace)?;
- println!("initialized {}", store.state_root());
- println!("project: {}", store.config().display_name);
- println!("schema: {}", store.state_root().join("schema.json"));
- maybe_print_gitignore_hint(&project_root)?;
- Ok(())
+ let store = ProjectStore::init(
+ &project_root,
+ args.name
+ .map(NonEmptyText::new)
+ .transpose()?
+ .unwrap_or(default_display_name_for_root(&project_root)?),
+ )?;
+ print_json(&store.status()?)
}
-fn run_frontier_init(args: FrontierInitArgs) -> Result<(), StoreError> {
+fn run_tag_add(args: TagAddArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let projection = store.create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new(args.label)?,
- contract_title: NonEmptyText::new(args.contract_title)?,
- contract_summary: args.contract_summary.map(NonEmptyText::new).transpose()?,
- contract: FrontierContract {
- objective: NonEmptyText::new(args.objective)?,
- evaluation: fidget_spinner_core::EvaluationProtocol {
- benchmark_suites: to_text_set(args.benchmark_suites)?,
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new(args.primary_metric_key)?,
- unit: args.primary_metric_unit.into(),
- objective: args.primary_metric_objective.into(),
- },
- supporting_metrics: BTreeSet::new(),
- },
- promotion_criteria: to_text_vec(args.promotion_criteria)?,
- },
- })?;
- print_json(&projection)
+ print_json(&store.register_tag(
+ TagName::new(args.name)?,
+ NonEmptyText::new(args.description)?,
+ )?)
}
-fn run_frontier_status(args: FrontierStatusArgs) -> Result<(), StoreError> {
- let store = open_store(&args.project.project)?;
- if let Some(frontier) = args.frontier {
- let projection = store.frontier_projection(parse_frontier_id(&frontier)?)?;
- return print_json(&projection);
- }
- let frontiers = store.list_frontiers()?;
- if frontiers.len() == 1 {
- return print_json(&store.frontier_projection(frontiers[0].id)?);
- }
- print_json(&frontiers)
+fn run_frontier_create(args: FrontierCreateArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ print_json(&store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new(args.label)?,
+ objective: NonEmptyText::new(args.objective)?,
+ slug: args.slug.map(Slug::new).transpose()?,
+ })?)
}
-fn run_schema_field_upsert(args: SchemaFieldUpsertArgs) -> Result<(), StoreError> {
- let mut store = open_store(&args.project.project)?;
- let field = store.upsert_schema_field(UpsertSchemaFieldRequest {
- name: NonEmptyText::new(args.name)?,
- node_classes: parse_node_class_set(args.classes),
- presence: args.presence.into(),
- severity: args.severity.into(),
- role: args.role.into(),
- inference_policy: args.inference_policy.into(),
- value_type: args.value_type.map(Into::into),
- })?;
- print_json(&json!({
- "schema": store.schema().schema_ref(),
- "field": schema_field_json(&field),
- }))
-}
-
-fn run_schema_field_remove(args: SchemaFieldRemoveArgs) -> Result<(), StoreError> {
+fn run_frontier_brief_update(args: FrontierBriefUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let removed_count = store.remove_schema_field(RemoveSchemaFieldRequest {
- name: NonEmptyText::new(args.name)?,
- node_classes: (!args.classes.is_empty()).then(|| parse_node_class_set(args.classes)),
- })?;
- print_json(&json!({
- "schema": store.schema().schema_ref(),
- "removed_count": removed_count,
- }))
+ let roadmap = if args.roadmap.is_empty() {
+ None
+ } else {
+ Some(
+ args.roadmap
+ .into_iter()
+ .map(parse_roadmap_item)
+ .collect::<Result<Vec<_>, _>>()?,
+ )
+ };
+ let unknowns = if args.unknowns.is_empty() {
+ None
+ } else {
+ Some(to_non_empty_texts(args.unknowns)?)
+ };
+ print_json(&store.update_frontier_brief(UpdateFrontierBriefRequest {
+ frontier: args.frontier,
+ expected_revision: args.expected_revision,
+ situation: cli_text_patch(args.situation, args.clear_situation)?,
+ roadmap,
+ unknowns,
+ })?)
}
-fn run_node_add(args: NodeAddArgs) -> Result<(), StoreError> {
+fn run_hypothesis_record(args: HypothesisRecordArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let class: NodeClass = args.class.into();
- let frontier_id = args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?;
- let tags = optional_cli_tags(args.tag_selection, class == NodeClass::Note)?;
- let payload = load_payload(
- store.schema().schema_ref(),
- args.payload_json,
- args.payload_file,
- args.fields,
- )?;
- validate_cli_prose_payload(class, args.summary.as_deref(), &payload)?;
- let annotations = args
- .annotations
- .into_iter()
- .map(|body| Ok(NodeAnnotation::hidden(NonEmptyText::new(body)?)))
- .collect::<Result<Vec<_>, StoreError>>()?;
- let node = store.add_node(CreateNodeRequest {
- class,
- frontier_id,
+ print_json(&store.create_hypothesis(CreateHypothesisRequest {
+ frontier: args.frontier,
+ slug: args.slug.map(Slug::new).transpose()?,
title: NonEmptyText::new(args.title)?,
- summary: args.summary.map(NonEmptyText::new).transpose()?,
- tags,
- payload,
- annotations,
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ summary: NonEmptyText::new(args.summary)?,
+ body: NonEmptyText::new(args.body)?,
+ tags: parse_tag_set(args.tags)?,
+ parents: parse_vertex_selectors(args.parents)?,
+ })?)
}
-fn run_node_list(args: NodeListArgs) -> Result<(), StoreError> {
+fn run_hypothesis_list(args: HypothesisListArgs) -> Result<(), StoreError> {
let store = open_store(&args.project.project)?;
- let items = store.list_nodes(ListNodesQuery {
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- class: args.class.map(Into::into),
+ print_json(&store.list_hypotheses(ListHypothesesQuery {
+ frontier: args.frontier,
tags: parse_tag_set(args.tags)?,
include_archived: args.include_archived,
limit: args.limit,
- })?;
- print_json(&items)
-}
-
-fn run_node_show(args: NodeShowArgs) -> Result<(), StoreError> {
- let store = open_store(&args.project.project)?;
- let node_id = parse_node_id(&args.node)?;
- let node = store
- .get_node(node_id)?
- .ok_or(StoreError::NodeNotFound(node_id))?;
- print_json(&node)
+ })?)
}
-fn run_node_annotate(args: NodeAnnotateArgs) -> Result<(), StoreError> {
+fn run_hypothesis_update(args: HypothesisUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let annotation = NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if args.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: args.label.map(NonEmptyText::new).transpose()?,
- body: NonEmptyText::new(args.body)?,
- created_at: time::OffsetDateTime::now_utc(),
+ let tags = if args.replace_tags {
+ Some(parse_tag_set(args.tags)?)
+ } else {
+ None
};
- store.annotate_node(parse_node_id(&args.node)?, annotation)?;
- println!("annotated {}", args.node);
- Ok(())
+ let parents = if args.replace_parents {
+ Some(parse_vertex_selectors(args.parents)?)
+ } else {
+ None
+ };
+ print_json(&store.update_hypothesis(UpdateHypothesisRequest {
+ hypothesis: args.hypothesis,
+ expected_revision: args.expected_revision,
+ title: args.title.map(NonEmptyText::new).transpose()?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ body: args.body.map(NonEmptyText::new).transpose()?,
+ tags,
+ parents,
+ archived: archive_patch(args.state),
+ })?)
}
-fn run_node_archive(args: NodeArchiveArgs) -> Result<(), StoreError> {
+fn run_experiment_open(args: ExperimentOpenArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- store.archive_node(parse_node_id(&args.node)?)?;
- println!("archived {}", args.node);
- Ok(())
+ print_json(&store.open_experiment(OpenExperimentRequest {
+ hypothesis: args.hypothesis,
+ slug: args.slug.map(Slug::new).transpose()?,
+ title: NonEmptyText::new(args.title)?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ tags: parse_tag_set(args.tags)?,
+ parents: parse_vertex_selectors(args.parents)?,
+ })?)
}
-fn run_quick_note(args: QuickNoteArgs) -> Result<(), StoreError> {
+fn run_experiment_list(args: ExperimentListArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ print_json(&store.list_experiments(ListExperimentsQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ tags: parse_tag_set(args.tags)?,
+ include_archived: args.include_archived,
+ status: args.status.map(Into::into),
+ limit: args.limit,
+ })?)
+}
+
+fn run_experiment_update(args: ExperimentUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let payload = NodePayload::with_schema(
- store.schema().schema_ref(),
- json_object(json!({ "body": args.body }))?,
- );
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- title: NonEmptyText::new(args.title)?,
- summary: Some(NonEmptyText::new(args.summary)?),
- tags: Some(explicit_cli_tags(args.tag_selection)?),
- payload,
- annotations: Vec::new(),
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ let outcome =
+ load_optional_json::<ExperimentOutcomePatch>(args.outcome_json, args.outcome_file)?;
+ print_json(&store.update_experiment(UpdateExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ title: args.title.map(NonEmptyText::new).transpose()?,
+ summary: cli_text_patch(args.summary, args.clear_summary)?,
+ tags: if args.replace_tags {
+ Some(parse_tag_set(args.tags)?)
+ } else {
+ None
+ },
+ parents: if args.replace_parents {
+ Some(parse_vertex_selectors(args.parents)?)
+ } else {
+ None
+ },
+ archived: archive_patch(args.state),
+ outcome,
+ })?)
}
-fn run_quick_hypothesis(args: QuickHypothesisArgs) -> Result<(), StoreError> {
+fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let payload = NodePayload::with_schema(
- store.schema().schema_ref(),
- json_object(json!({ "body": args.body }))?,
- );
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(parse_frontier_id(&args.frontier)?),
- title: NonEmptyText::new(args.title)?,
- summary: Some(NonEmptyText::new(args.summary)?),
- tags: None,
- payload,
- annotations: Vec::new(),
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ let analysis = match (args.analysis_summary, args.analysis_body) {
+ (Some(summary), Some(body)) => Some(ExperimentAnalysis {
+ summary: NonEmptyText::new(summary)?,
+ body: NonEmptyText::new(body)?,
+ }),
+ (None, None) => None,
+ _ => {
+ return Err(invalid_input(
+ "analysis requires both --analysis-summary and --analysis-body",
+ ));
+ }
+ };
+ print_json(
+ &store.close_experiment(CloseExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ backend: args.backend.into(),
+ command: CommandRecipe::new(
+ args.working_directory.map(utf8_path),
+ to_non_empty_texts(args.argv)?,
+ parse_env(args.env),
+ )?,
+ dimensions: parse_dimension_assignments(args.dimensions)?,
+ primary_metric: parse_metric_value_assignment(&args.primary_metric)?,
+ supporting_metrics: args
+ .supporting_metrics
+ .into_iter()
+ .map(|raw| parse_metric_value_assignment(&raw))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: args.verdict.into(),
+ rationale: NonEmptyText::new(args.rationale)?,
+ analysis,
+ })?,
+ )
}
-fn run_tag_add(args: TagAddArgs) -> Result<(), StoreError> {
+fn run_artifact_record(args: ArtifactRecordArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let tag = store.add_tag(
- TagName::new(args.name)?,
- NonEmptyText::new(args.description)?,
- )?;
- print_json(&tag)
+ print_json(&store.create_artifact(CreateArtifactRequest {
+ slug: args.slug.map(Slug::new).transpose()?,
+ kind: args.kind.into(),
+ label: NonEmptyText::new(args.label)?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ locator: NonEmptyText::new(args.locator)?,
+ media_type: args.media_type.map(NonEmptyText::new).transpose()?,
+ attachments: parse_attachment_selectors(args.attachments)?,
+ })?)
}
-fn run_tag_list(args: ProjectArg) -> Result<(), StoreError> {
- let store = open_store(&args.project)?;
- print_json(&store.list_tags()?)
+fn run_artifact_list(args: ArtifactListArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ print_json(
+ &store.list_artifacts(ListArtifactsQuery {
+ frontier: args.frontier,
+ kind: args.kind.map(Into::into),
+ attached_to: args
+ .attached_to
+ .as_deref()
+ .map(parse_attachment_selector)
+ .transpose()?,
+ limit: args.limit,
+ })?,
+ )
}
-fn run_quick_source(args: QuickSourceArgs) -> Result<(), StoreError> {
+fn run_artifact_update(args: ArtifactUpdateArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let payload = NodePayload::with_schema(
- store.schema().schema_ref(),
- json_object(json!({ "body": args.body }))?,
- );
- let node = store.add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- title: NonEmptyText::new(args.title)?,
- summary: Some(NonEmptyText::new(args.summary)?),
- tags: optional_cli_tags(args.tag_selection, false)?,
- payload,
- annotations: Vec::new(),
- attachments: lineage_attachments(args.parents)?,
- })?;
- print_json(&node)
+ print_json(&store.update_artifact(UpdateArtifactRequest {
+ artifact: args.artifact,
+ expected_revision: args.expected_revision,
+ kind: args.kind.map(Into::into),
+ label: args.label.map(NonEmptyText::new).transpose()?,
+ summary: cli_text_patch(args.summary, args.clear_summary)?,
+ locator: args.locator.map(NonEmptyText::new).transpose()?,
+ media_type: cli_text_patch(args.media_type, args.clear_media_type)?,
+ attachments: if args.replace_attachments {
+ Some(parse_attachment_selectors(args.attachments)?)
+ } else {
+ None
+ },
+ })?)
}
fn run_metric_define(args: MetricDefineArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let record = store.define_metric(DefineMetricRequest {
+ print_json(&store.define_metric(DefineMetricRequest {
key: NonEmptyText::new(args.key)?,
unit: args.unit.into(),
objective: args.objective.into(),
+ visibility: args.visibility.into(),
description: args.description.map(NonEmptyText::new).transpose()?,
- })?;
- print_json(&record)
+ })?)
}
fn run_metric_keys(args: MetricKeysArgs) -> Result<(), StoreError> {
let store = open_store(&args.project.project)?;
- print_json(
- &store.list_metric_keys_filtered(MetricKeyQuery {
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- source: args.source.map(Into::into),
- dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
- })?,
- )
+ print_json(&store.metric_keys(MetricKeysQuery {
+ frontier: args.frontier,
+ scope: args.scope.into(),
+ })?)
}
fn run_metric_best(args: MetricBestArgs) -> Result<(), StoreError> {
let store = open_store(&args.project.project)?;
- let entries = store.best_metrics(MetricBestQuery {
+ print_json(&store.metric_best(MetricBestQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
key: NonEmptyText::new(args.key)?,
- frontier_id: args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?,
- source: args.source.map(Into::into),
- dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
- order: args.order.map(Into::into),
+ dimensions: parse_dimension_assignments(args.dimensions)?,
+ include_rejected: args.include_rejected,
limit: args.limit,
- })?;
- print_json(&entries)
-}
-
-fn run_metric_migrate(args: ProjectArg) -> Result<(), StoreError> {
- let mut store = open_store(&args.project)?;
- print_json(&store.migrate_metric_plane()?)
+ order: args.order.map(Into::into),
+ })?)
}
fn run_dimension_define(args: DimensionDefineArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
- let record = store.define_run_dimension(DefineRunDimensionRequest {
+ print_json(&store.define_run_dimension(DefineRunDimensionRequest {
key: NonEmptyText::new(args.key)?,
value_type: args.value_type.into(),
description: args.description.map(NonEmptyText::new).transpose()?,
- })?;
- print_json(&record)
-}
-
-fn run_dimension_list(args: ProjectArg) -> Result<(), StoreError> {
- let store = open_store(&args.project)?;
- print_json(&store.list_run_dimensions()?)
-}
-
-fn run_experiment_open(args: ExperimentOpenArgs) -> Result<(), StoreError> {
- let mut store = open_store(&args.project.project)?;
- let summary = args.summary.map(NonEmptyText::new).transpose()?;
- let experiment = store.open_experiment(OpenExperimentRequest {
- frontier_id: parse_frontier_id(&args.frontier)?,
- hypothesis_node_id: parse_node_id(&args.hypothesis_node)?,
- title: NonEmptyText::new(args.title)?,
- summary,
- })?;
- print_json(&experiment)
-}
-
-fn run_experiment_list(args: ExperimentListArgs) -> Result<(), StoreError> {
- let store = open_store(&args.project.project)?;
- let frontier_id = args
- .frontier
- .as_deref()
- .map(parse_frontier_id)
- .transpose()?;
- print_json(&store.list_open_experiments(frontier_id)?)
-}
-
-fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> {
- let mut store = open_store(&args.project.project)?;
- let command = CommandRecipe::new(
- args.working_directory
- .map(utf8_path)
- .unwrap_or_else(|| store.project_root().to_path_buf()),
- to_text_vec(args.argv)?,
- parse_env(args.env),
- )?;
- let analysis = match (
- args.analysis_title,
- args.analysis_summary,
- args.analysis_body,
- ) {
- (Some(title), Some(summary), Some(body)) => Some(ExperimentAnalysisDraft {
- title: NonEmptyText::new(title)?,
- summary: NonEmptyText::new(summary)?,
- body: NonEmptyText::new(body)?,
- }),
- (None, None, None) => None,
- _ => {
- return Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- "analysis-title, analysis-summary, and analysis-body must be provided together",
- ),
- )));
- }
- };
- let receipt = store.close_experiment(CloseExperimentRequest {
- experiment_id: parse_experiment_id(&args.experiment_id)?,
- run_title: NonEmptyText::new(args.run_title)?,
- run_summary: args.run_summary.map(NonEmptyText::new).transpose()?,
- backend: args.backend.into(),
- dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
- command,
- primary_metric: parse_metric_value(args.primary_metric)?,
- supporting_metrics: args
- .metrics
- .into_iter()
- .map(parse_metric_value)
- .collect::<Result<Vec<_>, _>>()?,
- note: FrontierNote {
- summary: NonEmptyText::new(args.note)?,
- next_hypotheses: to_text_vec(args.next_hypotheses)?,
- },
- verdict: args.verdict.into(),
- analysis,
- decision_title: NonEmptyText::new(args.decision_title)?,
- decision_rationale: NonEmptyText::new(args.decision_rationale)?,
- })?;
- print_json(&receipt)
+ })?)
}
fn run_skill_install(args: SkillInstallArgs) -> Result<(), StoreError> {
@@ -1240,11 +1078,11 @@ fn install_skill(skill: bundled_skill::BundledSkill, destination: &Path) -> Resu
Ok(())
}
-fn open_store(path: &Path) -> Result<ProjectStore, StoreError> {
+pub(crate) fn open_store(path: &Path) -> Result<ProjectStore, StoreError> {
ProjectStore::open(utf8_path(path.to_path_buf()))
}
-fn resolve_ui_project_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
+pub(crate) fn resolve_ui_project_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
if let Some(project_root) = fidget_spinner_store_sqlite::discover_project_root(path) {
return Ok(project_root);
}
@@ -1266,7 +1104,7 @@ fn resolve_ui_project_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
}
}
-fn open_or_init_store_for_binding(path: &Path) -> Result<ProjectStore, StoreError> {
+pub(crate) fn open_or_init_store_for_binding(path: &Path) -> Result<ProjectStore, StoreError> {
let requested_root = utf8_path(path.to_path_buf());
match ProjectStore::open(requested_root.clone()) {
Ok(store) => Ok(store),
@@ -1275,17 +1113,13 @@ fn open_or_init_store_for_binding(path: &Path) -> Result<ProjectStore, StoreErro
if !is_empty_directory(&project_root)? {
return Err(StoreError::MissingProjectStore(requested_root));
}
- ProjectStore::init(
- &project_root,
- default_display_name_for_root(&project_root)?,
- default_namespace_for_root(&project_root)?,
- )
+ ProjectStore::init(&project_root, default_display_name_for_root(&project_root)?)
}
Err(error) => Err(error),
}
}
-fn utf8_path(path: impl Into<PathBuf>) -> Utf8PathBuf {
+pub(crate) fn utf8_path(path: impl Into<PathBuf>) -> Utf8PathBuf {
Utf8PathBuf::from(path.into().to_string_lossy().into_owned())
}
@@ -1295,7 +1129,7 @@ fn binding_bootstrap_root(path: &Utf8Path) -> Result<Utf8PathBuf, StoreError> {
.parent()
.map_or_else(|| path.to_path_buf(), Utf8Path::to_path_buf)),
Ok(_) => Ok(path.to_path_buf()),
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(path.to_path_buf()),
+ Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(path.to_path_buf()),
Err(error) => Err(StoreError::from(error)),
}
}
@@ -1307,7 +1141,7 @@ fn is_empty_directory(path: &Utf8Path) -> Result<bool, StoreError> {
Ok(entries.next().transpose()?.is_none())
}
Ok(_) => Ok(false),
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(false),
+ Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(false),
Err(error) => Err(StoreError::from(error)),
}
}
@@ -1325,7 +1159,7 @@ fn collect_descendant_project_roots(
) -> Result<(), StoreError> {
let metadata = match fs::metadata(path.as_std_path()) {
Ok(metadata) => metadata,
- Err(error) if error.kind() == std::io::ErrorKind::NotFound => return Ok(()),
+ Err(error) if error.kind() == io::ErrorKind::NotFound => return Ok(()),
Err(error) => return Err(StoreError::from(error)),
};
if metadata.is_file() {
@@ -1362,45 +1196,6 @@ fn default_display_name_for_root(project_root: &Utf8Path) -> Result<NonEmptyText
.map_err(StoreError::from)
}
-fn default_namespace_for_root(project_root: &Utf8Path) -> Result<NonEmptyText, StoreError> {
- let slug = slugify_namespace_component(project_root.file_name().unwrap_or("project"));
- NonEmptyText::new(format!("local.{slug}")).map_err(StoreError::from)
-}
-
-fn slugify_namespace_component(raw: &str) -> String {
- let mut slug = String::new();
- let mut previous_was_separator = false;
- for character in raw.chars().flat_map(char::to_lowercase) {
- if character.is_ascii_alphanumeric() {
- slug.push(character);
- previous_was_separator = false;
- continue;
- }
- if !previous_was_separator {
- slug.push('_');
- previous_was_separator = true;
- }
- }
- let slug = slug.trim_matches('_').to_owned();
- if slug.is_empty() {
- "project".to_owned()
- } else {
- slug
- }
-}
-
-fn to_text_vec(values: Vec<String>) -> Result<Vec<NonEmptyText>, StoreError> {
- values
- .into_iter()
- .map(NonEmptyText::new)
- .collect::<Result<Vec<_>, _>>()
- .map_err(StoreError::from)
-}
-
-fn to_text_set(values: Vec<String>) -> Result<BTreeSet<NonEmptyText>, StoreError> {
- to_text_vec(values).map(BTreeSet::from_iter)
-}
-
fn parse_tag_set(values: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
values
.into_iter()
@@ -1409,290 +1204,198 @@ fn parse_tag_set(values: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
.map_err(StoreError::from)
}
-fn explicit_cli_tags(selection: ExplicitTagSelectionArgs) -> Result<BTreeSet<TagName>, StoreError> {
- optional_cli_tags(selection, true)?.ok_or(StoreError::NoteTagsRequired)
-}
-
-fn optional_cli_tags(
- selection: ExplicitTagSelectionArgs,
- required: bool,
-) -> Result<Option<BTreeSet<TagName>>, StoreError> {
- if selection.no_tags {
- return Ok(Some(BTreeSet::new()));
- }
- if selection.tags.is_empty() {
- return if required {
- Err(StoreError::NoteTagsRequired)
- } else {
- Ok(None)
- };
- }
- Ok(Some(parse_tag_set(selection.tags)?))
-}
-
-fn parse_env(values: Vec<String>) -> BTreeMap<String, String> {
+pub(crate) fn parse_vertex_selectors(
+ values: Vec<String>,
+) -> Result<Vec<VertexSelector>, StoreError> {
values
.into_iter()
- .filter_map(|entry| {
- let (key, value) = entry.split_once('=')?;
- Some((key.to_owned(), value.to_owned()))
+ .map(|raw| {
+ let (kind, selector) = raw
+ .split_once(':')
+ .ok_or_else(|| invalid_input("expected parent selector in the form `hypothesis:<selector>` or `experiment:<selector>`"))?;
+ match kind {
+ "hypothesis" => Ok(VertexSelector::Hypothesis(selector.to_owned())),
+ "experiment" => Ok(VertexSelector::Experiment(selector.to_owned())),
+ _ => Err(invalid_input(format!("unknown parent kind `{kind}`"))),
+ }
})
.collect()
}
-fn lineage_attachments(parents: Vec<String>) -> Result<Vec<EdgeAttachment>, StoreError> {
- parents
+pub(crate) fn parse_attachment_selectors(
+ values: Vec<String>,
+) -> Result<Vec<AttachmentSelector>, StoreError> {
+ values
.into_iter()
- .map(|parent| {
- Ok(EdgeAttachment {
- node_id: parse_node_id(&parent)?,
- kind: fidget_spinner_core::EdgeKind::Lineage,
- direction: EdgeAttachmentDirection::ExistingToNew,
- })
- })
+ .map(|raw| parse_attachment_selector(&raw))
.collect()
}
-fn load_payload(
- schema: fidget_spinner_core::PayloadSchemaRef,
- payload_json: Option<String>,
- payload_file: Option<PathBuf>,
- fields: Vec<String>,
-) -> Result<NodePayload, StoreError> {
- let mut map = Map::new();
- if let Some(text) = payload_json {
- map.extend(json_object(serde_json::from_str::<Value>(&text)?)?);
- }
- if let Some(path) = payload_file {
- let text = fs::read_to_string(path)?;
- map.extend(json_object(serde_json::from_str::<Value>(&text)?)?);
+pub(crate) fn parse_attachment_selector(raw: &str) -> Result<AttachmentSelector, StoreError> {
+ let (kind, selector) = raw
+ .split_once(':')
+ .ok_or_else(|| invalid_input("expected attachment selector in the form `frontier:<selector>`, `hypothesis:<selector>`, or `experiment:<selector>`"))?;
+ match kind {
+ "frontier" => Ok(AttachmentSelector::Frontier(selector.to_owned())),
+ "hypothesis" => Ok(AttachmentSelector::Hypothesis(selector.to_owned())),
+ "experiment" => Ok(AttachmentSelector::Experiment(selector.to_owned())),
+ _ => Err(invalid_input(format!("unknown attachment kind `{kind}`"))),
}
- for field in fields {
- let Some((key, raw_value)) = field.split_once('=') else {
- continue;
- };
- let value = serde_json::from_str::<Value>(raw_value).unwrap_or_else(|_| json!(raw_value));
- let _ = map.insert(key.to_owned(), value);
- }
- Ok(NodePayload::with_schema(schema, map))
}
-fn validate_cli_prose_payload(
- class: NodeClass,
- summary: Option<&str>,
- payload: &NodePayload,
-) -> Result<(), StoreError> {
- if !matches!(class, NodeClass::Note | NodeClass::Source) {
- return Ok(());
- }
- if summary.is_none() {
- return Err(StoreError::ProseSummaryRequired(class));
- }
- match payload.field("body") {
- Some(Value::String(body)) if !body.trim().is_empty() => Ok(()),
- _ => Err(StoreError::ProseBodyRequired(class)),
- }
+fn parse_roadmap_item(raw: String) -> Result<FrontierRoadmapItemDraft, StoreError> {
+ let mut parts = raw.splitn(3, ':');
+ let rank = parts
+ .next()
+ .ok_or_else(|| invalid_input("roadmap items must look like `rank:hypothesis[:summary]`"))?
+ .parse::<u32>()
+ .map_err(|error| invalid_input(format!("invalid roadmap rank: {error}")))?;
+ let hypothesis = parts
+ .next()
+ .ok_or_else(|| invalid_input("roadmap items must include a hypothesis selector"))?
+ .to_owned();
+ let summary = parts
+ .next()
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(StoreError::from)?;
+ Ok(FrontierRoadmapItemDraft {
+ rank,
+ hypothesis,
+ summary,
+ })
}
-fn json_object(value: Value) -> Result<Map<String, Value>, StoreError> {
- match value {
- Value::Object(map) => Ok(map),
- other => Err(invalid_input(format!(
- "expected JSON object, got {other:?}"
- ))),
- }
+pub(crate) fn parse_env(values: Vec<String>) -> BTreeMap<String, String> {
+ values
+ .into_iter()
+ .filter_map(|entry| {
+ let (key, value) = entry.split_once('=')?;
+ Some((key.to_owned(), value.to_owned()))
+ })
+ .collect()
}
-fn schema_field_json(field: &ProjectFieldSpec) -> Value {
- json!({
- "name": field.name,
- "node_classes": field.node_classes.iter().map(ToString::to_string).collect::<Vec<_>>(),
- "presence": field.presence.as_str(),
- "severity": field.severity.as_str(),
- "role": field.role.as_str(),
- "inference_policy": field.inference_policy.as_str(),
- "value_type": field.value_type.map(FieldValueType::as_str),
+fn parse_metric_value_assignment(
+ raw: &str,
+) -> Result<fidget_spinner_core::MetricValue, StoreError> {
+ let (key, value) = raw
+ .split_once('=')
+ .ok_or_else(|| invalid_input("expected metric assignment in the form `key=value`"))?;
+ let value = value
+ .parse::<f64>()
+ .map_err(|error| invalid_input(format!("invalid metric value `{value}`: {error}")))?;
+ Ok(fidget_spinner_core::MetricValue {
+ key: NonEmptyText::new(key.to_owned())?,
+ value,
})
}
-fn parse_node_class_set(classes: Vec<CliNodeClass>) -> BTreeSet<NodeClass> {
- classes.into_iter().map(Into::into).collect()
-}
-
-fn run_git(project_root: &Utf8Path, args: &[&str]) -> Result<Option<String>, StoreError> {
- let output = std::process::Command::new("git")
- .arg("-C")
- .arg(project_root.as_str())
- .args(args)
- .output()?;
- if !output.status.success() {
- return Ok(None);
- }
- let text = String::from_utf8_lossy(&output.stdout).trim().to_owned();
- if text.is_empty() {
- return Ok(None);
- }
- Ok(Some(text))
+pub(crate) fn parse_dimension_assignments(
+ values: Vec<String>,
+) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, StoreError> {
+ values
+ .into_iter()
+ .map(|entry| {
+ let (key, raw_value) = entry.split_once('=').ok_or_else(|| {
+ invalid_input("expected dimension assignment in the form `key=value`")
+ })?;
+ let json_value = serde_json::from_str::<Value>(raw_value)
+ .unwrap_or_else(|_| Value::String(raw_value.to_owned()));
+ Ok((
+ NonEmptyText::new(key.to_owned())?,
+ json_to_dimension_value(json_value)?,
+ ))
+ })
+ .collect()
}
-fn maybe_print_gitignore_hint(project_root: &Utf8Path) -> Result<(), StoreError> {
- if run_git(project_root, &["rev-parse", "--show-toplevel"])?.is_none() {
- return Ok(());
- }
-
- let status = std::process::Command::new("git")
- .arg("-C")
- .arg(project_root.as_str())
- .args(["check-ignore", "-q", ".fidget_spinner"])
- .status()?;
-
- match status.code() {
- Some(0) => Ok(()),
- Some(1) => {
- println!(
- "note: add `.fidget_spinner/` to `.gitignore` or `.git/info/exclude` if you do not want local state in `git status`"
- );
- Ok(())
+fn json_to_dimension_value(value: Value) -> Result<RunDimensionValue, StoreError> {
+ match value {
+ Value::String(raw) => {
+ if time::OffsetDateTime::parse(&raw, &time::format_description::well_known::Rfc3339)
+ .is_ok()
+ {
+ Ok(RunDimensionValue::Timestamp(NonEmptyText::new(raw)?))
+ } else {
+ Ok(RunDimensionValue::String(NonEmptyText::new(raw)?))
+ }
}
- _ => Ok(()),
+ Value::Number(number) => number
+ .as_f64()
+ .map(RunDimensionValue::Numeric)
+ .ok_or_else(|| invalid_input("numeric dimension values must fit into f64")),
+ Value::Bool(value) => Ok(RunDimensionValue::Boolean(value)),
+ _ => Err(invalid_input(
+ "dimension values must be string, number, boolean, or RFC3339 timestamp",
+ )),
}
}
-fn parse_metric_value(raw: String) -> Result<MetricValue, StoreError> {
- let Some((key, value)) = raw.split_once('=') else {
- return Err(invalid_input("metrics must look like key=value"));
- };
- Ok(MetricValue {
- key: NonEmptyText::new(key)?,
- value: value
- .parse::<f64>()
- .map_err(|error| invalid_input(format!("invalid metric value: {error}")))?,
- })
-}
-
-fn coerce_cli_dimension_filters(
- store: &ProjectStore,
- raw_dimensions: Vec<String>,
-) -> Result<BTreeMap<NonEmptyText, fidget_spinner_core::RunDimensionValue>, StoreError> {
- let definitions = store
- .list_run_dimensions()?
- .into_iter()
- .map(|summary| (summary.key.to_string(), summary.value_type))
- .collect::<BTreeMap<_, _>>();
- let raw_dimensions = parse_dimension_assignments(raw_dimensions)?
- .into_iter()
- .map(|(key, raw_value)| {
- let Some(value_type) = definitions.get(&key) else {
- return Err(invalid_input(format!(
- "unknown run dimension `{key}`; register it first"
- )));
- };
- Ok((key, parse_cli_dimension_value(*value_type, &raw_value)?))
- })
- .collect::<Result<BTreeMap<_, _>, StoreError>>()?;
- store.coerce_run_dimensions(raw_dimensions)
-}
-
-fn parse_dimension_assignments(
- raw_dimensions: Vec<String>,
-) -> Result<BTreeMap<String, String>, StoreError> {
- raw_dimensions
+fn to_non_empty_texts(values: Vec<String>) -> Result<Vec<NonEmptyText>, StoreError> {
+ values
.into_iter()
- .map(|raw| {
- let Some((key, value)) = raw.split_once('=') else {
- return Err(invalid_input("dimensions must look like key=value"));
- };
- Ok((key.to_owned(), value.to_owned()))
- })
- .collect()
+ .map(NonEmptyText::new)
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
}
-fn parse_cli_dimension_value(value_type: FieldValueType, raw: &str) -> Result<Value, StoreError> {
- match value_type {
- FieldValueType::String | FieldValueType::Timestamp => Ok(Value::String(raw.to_owned())),
- FieldValueType::Numeric => Ok(json!(raw.parse::<f64>().map_err(|error| {
- invalid_input(format!("invalid numeric dimension value: {error}"))
- })?)),
- FieldValueType::Boolean => match raw {
- "true" => Ok(Value::Bool(true)),
- "false" => Ok(Value::Bool(false)),
- other => Err(invalid_input(format!(
- "invalid boolean dimension value `{other}`"
- ))),
- },
+fn load_optional_json<T: for<'de> serde::Deserialize<'de>>(
+ inline: Option<String>,
+ file: Option<PathBuf>,
+) -> Result<Option<T>, StoreError> {
+ match (inline, file) {
+ (Some(raw), None) => serde_json::from_str(&raw)
+ .map(Some)
+ .map_err(StoreError::from),
+ (None, Some(path)) => serde_json::from_slice(&fs::read(path)?)
+ .map(Some)
+ .map_err(StoreError::from),
+ (None, None) => Ok(None),
+ (Some(_), Some(_)) => Err(invalid_input(
+ "use only one of --outcome-json or --outcome-file",
+ )),
}
}
-fn parse_metric_unit(raw: &str) -> Result<MetricUnit, StoreError> {
- match raw {
- "seconds" => Ok(MetricUnit::Seconds),
- "bytes" => Ok(MetricUnit::Bytes),
- "count" => Ok(MetricUnit::Count),
- "ratio" => Ok(MetricUnit::Ratio),
- "custom" => Ok(MetricUnit::Custom),
- other => Err(invalid_input(format!("unknown metric unit `{other}`"))),
+const fn archive_patch(state: Option<CliArchivePatch>) -> Option<bool> {
+ match state {
+ None => None,
+ Some(CliArchivePatch::Archive) => Some(true),
+ Some(CliArchivePatch::Restore) => Some(false),
}
}
-fn parse_optimization_objective(raw: &str) -> Result<OptimizationObjective, StoreError> {
- match raw {
- "minimize" => Ok(OptimizationObjective::Minimize),
- "maximize" => Ok(OptimizationObjective::Maximize),
- "target" => Ok(OptimizationObjective::Target),
- other => Err(invalid_input(format!(
- "unknown optimization objective `{other}`"
- ))),
+fn cli_text_patch(
+ value: Option<String>,
+ clear: bool,
+) -> Result<Option<TextPatch<NonEmptyText>>, StoreError> {
+ if clear {
+ if value.is_some() {
+ return Err(invalid_input("cannot set and clear the same field"));
+ }
+ return Ok(Some(TextPatch::Clear));
}
+ value
+ .map(NonEmptyText::new)
+ .transpose()
+ .map(|value| value.map(TextPatch::Set))
+ .map_err(StoreError::from)
}
-fn parse_node_id(raw: &str) -> Result<fidget_spinner_core::NodeId, StoreError> {
- Ok(fidget_spinner_core::NodeId::from_uuid(Uuid::parse_str(
- raw,
- )?))
-}
-
-fn parse_frontier_id(raw: &str) -> Result<fidget_spinner_core::FrontierId, StoreError> {
- Ok(fidget_spinner_core::FrontierId::from_uuid(Uuid::parse_str(
- raw,
- )?))
+fn invalid_input(message: impl Into<String>) -> StoreError {
+ StoreError::InvalidInput(message.into())
}
-fn parse_experiment_id(raw: &str) -> Result<fidget_spinner_core::ExperimentId, StoreError> {
- Ok(fidget_spinner_core::ExperimentId::from_uuid(
- Uuid::parse_str(raw)?,
- ))
+pub(crate) fn to_pretty_json(value: &impl Serialize) -> Result<String, StoreError> {
+ serde_json::to_string_pretty(value).map_err(StoreError::from)
}
-fn print_json<T: Serialize>(value: &T) -> Result<(), StoreError> {
+fn print_json(value: &impl Serialize) -> Result<(), StoreError> {
println!("{}", to_pretty_json(value)?);
Ok(())
}
-fn to_pretty_json<T: Serialize>(value: &T) -> Result<String, StoreError> {
- serde_json::to_string_pretty(value).map_err(StoreError::from)
-}
-
-fn invalid_input(message: impl Into<String>) -> StoreError {
- StoreError::Json(serde_json::Error::io(std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- message.into(),
- )))
-}
-
-impl From<CliNodeClass> for NodeClass {
- fn from(value: CliNodeClass) -> Self {
- match value {
- CliNodeClass::Contract => Self::Contract,
- CliNodeClass::Hypothesis => Self::Hypothesis,
- CliNodeClass::Run => Self::Run,
- CliNodeClass::Analysis => Self::Analysis,
- CliNodeClass::Decision => Self::Decision,
- CliNodeClass::Source => Self::Source,
- CliNodeClass::Note => Self::Note,
- }
- }
-}
-
impl From<CliMetricUnit> for MetricUnit {
fn from(value: CliMetricUnit) -> Self {
match value {
@@ -1715,33 +1418,32 @@ impl From<CliOptimizationObjective> for OptimizationObjective {
}
}
-impl From<CliExecutionBackend> for ExecutionBackend {
- fn from(value: CliExecutionBackend) -> Self {
+impl From<CliMetricVisibility> for MetricVisibility {
+ fn from(value: CliMetricVisibility) -> Self {
match value {
- CliExecutionBackend::Local => Self::LocalProcess,
- CliExecutionBackend::Worktree => Self::WorktreeProcess,
- CliExecutionBackend::Ssh => Self::SshProcess,
+ CliMetricVisibility::Canonical => Self::Canonical,
+ CliMetricVisibility::Minor => Self::Minor,
+ CliMetricVisibility::Hidden => Self::Hidden,
+ CliMetricVisibility::Archived => Self::Archived,
}
}
}
-impl From<CliMetricSource> for MetricFieldSource {
- fn from(value: CliMetricSource) -> Self {
+impl From<CliMetricScope> for MetricScope {
+ fn from(value: CliMetricScope) -> Self {
match value {
- CliMetricSource::RunMetric => Self::RunMetric,
- CliMetricSource::HypothesisPayload => Self::HypothesisPayload,
- CliMetricSource::RunPayload => Self::RunPayload,
- CliMetricSource::AnalysisPayload => Self::AnalysisPayload,
- CliMetricSource::DecisionPayload => Self::DecisionPayload,
+ CliMetricScope::Live => Self::Live,
+ CliMetricScope::Visible => Self::Visible,
+ CliMetricScope::All => Self::All,
}
}
}
-impl From<CliMetricOrder> for MetricRankOrder {
- fn from(value: CliMetricOrder) -> Self {
+impl From<CliMetricRankOrder> for MetricRankOrder {
+ fn from(value: CliMetricRankOrder) -> Self {
match value {
- CliMetricOrder::Asc => Self::Asc,
- CliMetricOrder::Desc => Self::Desc,
+ CliMetricRankOrder::Asc => Self::Asc,
+ CliMetricRankOrder::Desc => Self::Desc,
}
}
}
@@ -1757,42 +1459,28 @@ impl From<CliFieldValueType> for FieldValueType {
}
}
-impl From<CliDiagnosticSeverity> for DiagnosticSeverity {
- fn from(value: CliDiagnosticSeverity) -> Self {
- match value {
- CliDiagnosticSeverity::Error => Self::Error,
- CliDiagnosticSeverity::Warning => Self::Warning,
- CliDiagnosticSeverity::Info => Self::Info,
- }
- }
-}
-
-impl From<CliFieldPresence> for FieldPresence {
- fn from(value: CliFieldPresence) -> Self {
- match value {
- CliFieldPresence::Required => Self::Required,
- CliFieldPresence::Recommended => Self::Recommended,
- CliFieldPresence::Optional => Self::Optional,
- }
- }
-}
-
-impl From<CliFieldRole> for FieldRole {
- fn from(value: CliFieldRole) -> Self {
+impl From<CliArtifactKind> for ArtifactKind {
+ fn from(value: CliArtifactKind) -> Self {
match value {
- CliFieldRole::Index => Self::Index,
- CliFieldRole::ProjectionGate => Self::ProjectionGate,
- CliFieldRole::RenderOnly => Self::RenderOnly,
- CliFieldRole::Opaque => Self::Opaque,
+ CliArtifactKind::Document => Self::Document,
+ CliArtifactKind::Link => Self::Link,
+ CliArtifactKind::Log => Self::Log,
+ CliArtifactKind::Table => Self::Table,
+ CliArtifactKind::Plot => Self::Plot,
+ CliArtifactKind::Dump => Self::Dump,
+ CliArtifactKind::Binary => Self::Binary,
+ CliArtifactKind::Other => Self::Other,
}
}
}
-impl From<CliInferencePolicy> for InferencePolicy {
- fn from(value: CliInferencePolicy) -> Self {
+impl From<CliExecutionBackend> for ExecutionBackend {
+ fn from(value: CliExecutionBackend) -> Self {
match value {
- CliInferencePolicy::ManualOnly => Self::ManualOnly,
- CliInferencePolicy::ModelMayInfer => Self::ModelMayInfer,
+ CliExecutionBackend::Manual => Self::Manual,
+ CliExecutionBackend::LocalProcess => Self::LocalProcess,
+ CliExecutionBackend::WorktreeProcess => Self::WorktreeProcess,
+ CliExecutionBackend::SshProcess => Self::SshProcess,
}
}
}
@@ -1808,89 +1496,11 @@ impl From<CliFrontierVerdict> for FrontierVerdict {
}
}
-#[cfg(test)]
-mod tests {
- use super::resolve_ui_project_root;
- use std::fs;
-
- use camino::Utf8PathBuf;
- use fidget_spinner_core::NonEmptyText;
- use fidget_spinner_store_sqlite::{
- PROJECT_CONFIG_NAME, ProjectStore, STORE_DIR_NAME, StoreError,
- };
-
- fn temp_project_root(label: &str) -> Utf8PathBuf {
- let mut path = std::env::temp_dir();
- path.push(format!(
- "fidget_spinner_cli_test_{}_{}",
- label,
- uuid::Uuid::now_v7()
- ));
- Utf8PathBuf::from(path.to_string_lossy().into_owned())
- }
-
- #[test]
- fn ui_resolver_accepts_state_root_and_descendants() -> Result<(), StoreError> {
- let project_root = temp_project_root("ui_resolve_state_root");
- let _store = ProjectStore::init(
- &project_root,
- NonEmptyText::new("ui dogfood")?,
- NonEmptyText::new("local.ui")?,
- )?;
- let state_root = project_root.join(STORE_DIR_NAME);
- let config_path = state_root.join(PROJECT_CONFIG_NAME);
-
- assert_eq!(resolve_ui_project_root(&state_root)?, project_root);
- assert_eq!(resolve_ui_project_root(&config_path)?, project_root);
- Ok(())
- }
-
- #[test]
- fn ui_resolver_accepts_unique_descendant_store_from_parent() -> Result<(), StoreError> {
- let parent_root = temp_project_root("ui_resolve_parent");
- let nested_project = parent_root.join("nested/libgrid");
- fs::create_dir_all(nested_project.as_std_path())?;
- let _store = ProjectStore::init(
- &nested_project,
- NonEmptyText::new("nested ui dogfood")?,
- NonEmptyText::new("local.nested.ui")?,
- )?;
-
- assert_eq!(resolve_ui_project_root(&parent_root)?, nested_project);
- Ok(())
- }
-
- #[test]
- fn ui_resolver_rejects_ambiguous_descendant_stores() -> Result<(), StoreError> {
- let parent_root = temp_project_root("ui_resolve_ambiguous");
- let alpha_project = parent_root.join("alpha");
- let beta_project = parent_root.join("beta");
- fs::create_dir_all(alpha_project.as_std_path())?;
- fs::create_dir_all(beta_project.as_std_path())?;
- let _alpha = ProjectStore::init(
- &alpha_project,
- NonEmptyText::new("alpha")?,
- NonEmptyText::new("local.alpha")?,
- )?;
- let _beta = ProjectStore::init(
- &beta_project,
- NonEmptyText::new("beta")?,
- NonEmptyText::new("local.beta")?,
- )?;
-
- let error = match resolve_ui_project_root(&parent_root) {
- Ok(project_root) => {
- return Err(StoreError::Io(std::io::Error::other(format!(
- "expected ambiguous descendant discovery failure, got {project_root}"
- ))));
- }
- Err(error) => error,
- };
- assert!(
- error
- .to_string()
- .contains("multiple descendant project stores")
- );
- Ok(())
+impl From<CliExperimentStatus> for ExperimentStatus {
+ fn from(value: CliExperimentStatus) -> Self {
+ match value {
+ CliExperimentStatus::Open => Self::Open,
+ CliExperimentStatus::Closed => Self::Closed,
+ }
}
}
diff --git a/crates/fidget-spinner-cli/src/mcp/catalog.rs b/crates/fidget-spinner-cli/src/mcp/catalog.rs
index ae3ca78..9b486bc 100644
--- a/crates/fidget-spinner-cli/src/mcp/catalog.rs
+++ b/crates/fidget-spinner-cli/src/mcp/catalog.rs
@@ -46,756 +46,814 @@ impl ToolSpec {
}
}
+const TOOL_SPECS: &[ToolSpec] = &[
+ ToolSpec {
+ name: "project.bind",
+ description: "Bind this MCP session to a project root or nested path inside a project store.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "project.status",
+ description: "Read coarse project metadata and ledger counts for the bound project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "tag.add",
+ description: "Register one repo-local tag with a required description.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "tag.list",
+ description: "List the repo-local tag registry.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.create",
+ description: "Create a new frontier scope.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "frontier.list",
+ description: "List frontier scopes in the current project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.read",
+ description: "Read one frontier record, including its brief.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.open",
+ description: "Open the bounded frontier overview: brief, active tags, live metrics, active hypotheses, and open experiments.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.brief.update",
+ description: "Replace or patch the singleton frontier brief.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "frontier.history",
+ description: "Read the frontier revision history.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.record",
+ description: "Record one hypothesis. The body must stay a single paragraph.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "hypothesis.list",
+ description: "List hypotheses, optionally narrowed by frontier or tag.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.read",
+ description: "Read one hypothesis with its local neighborhood, experiments, and artifacts.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.update",
+ description: "Patch hypothesis title, summary, body, tags, influence parents, or archive state.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "hypothesis.history",
+ description: "Read the revision history for one hypothesis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.open",
+ description: "Open one experiment anchored to exactly one hypothesis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.list",
+ description: "List experiments, optionally narrowed by frontier, hypothesis, status, or tags.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.read",
+ description: "Read one experiment with its owning hypothesis, local neighborhood, outcome, and artifacts.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.update",
+ description: "Patch experiment metadata, influence parents, archive state, or replace the closed outcome wholesale.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.close",
+ description: "Close one open experiment with typed dimensions, structured metrics, verdict, rationale, and optional analysis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.history",
+ description: "Read the revision history for one experiment.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.record",
+ description: "Register an external artifact reference and attach it to frontiers, hypotheses, or experiments. Artifact bodies are never read through Spinner.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "artifact.list",
+ description: "List artifact references, optionally narrowed by frontier, kind, or attachment target.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.read",
+ description: "Read one artifact reference and its attachment targets.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.update",
+ description: "Patch artifact metadata or replace its attachment set.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "artifact.history",
+ description: "Read the revision history for one artifact.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "metric.define",
+ description: "Register one project-level metric definition.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "metric.keys",
+ description: "List metric keys, defaulting to the live frontier comparison set.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "metric.best",
+ description: "Rank closed experiments by one metric key with optional frontier, hypothesis, or dimension narrowing.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "run.dimension.define",
+ description: "Register one typed run-dimension key.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "run.dimension.list",
+ description: "List registered run dimensions.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "skill.list",
+ description: "List bundled skills shipped with this package.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "skill.show",
+ description: "Return one bundled skill text shipped with this package. Defaults to `fidget-spinner` when name is omitted.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "system.health",
+ description: "Read MCP host health, session binding, worker generation, and rollout state.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "system.telemetry",
+ description: "Read aggregate MCP host telemetry for this session.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+];
+
+const RESOURCE_SPECS: &[ResourceSpec] = &[
+ ResourceSpec {
+ uri: "fidget-spinner://skill/fidget-spinner",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ResourceSpec {
+ uri: "fidget-spinner://skill/frontier-loop",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+];
+
#[must_use]
pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> {
- match name {
- "project.bind" => Some(ToolSpec {
- name: "project.bind",
- description: "Bind this MCP session to a project root or nested path inside a project store.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::NeverReplay,
- }),
- "project.status" => Some(ToolSpec {
- name: "project.status",
- description: "Read local project status, store paths, and git availability for the currently bound project.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "project.schema" => Some(ToolSpec {
- name: "project.schema",
- description: "Read the project-local payload schema and field validation tiers.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "schema.field.upsert" => Some(ToolSpec {
- name: "schema.field.upsert",
- description: "Add or replace one project-local payload schema field definition.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "schema.field.remove" => Some(ToolSpec {
- name: "schema.field.remove",
- description: "Remove one project-local payload schema field definition, optionally narrowed by node-class set.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "tag.add" => Some(ToolSpec {
- name: "tag.add",
- description: "Register one repo-local tag with a required description. Notes may only reference tags from this registry.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "tag.list" => Some(ToolSpec {
- name: "tag.list",
- description: "List repo-local tags available for note and node tagging.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.list" => Some(ToolSpec {
- name: "frontier.list",
- description: "List frontiers for the current project.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.status" => Some(ToolSpec {
- name: "frontier.status",
- description: "Read one frontier projection, including open/completed experiment counts and verdict totals.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.init" => Some(ToolSpec {
- name: "frontier.init",
- description: "Create a new frontier rooted in a contract node.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.create" => Some(ToolSpec {
- name: "node.create",
- description: "Create a generic DAG node with project payload fields and optional lineage parents.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "hypothesis.record" => Some(ToolSpec {
- name: "hypothesis.record",
- description: "Record a core-path hypothesis with low ceremony.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.list" => Some(ToolSpec {
- name: "node.list",
- description: "List recent nodes. Archived nodes are hidden unless explicitly requested.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "node.read" => Some(ToolSpec {
- name: "node.read",
- description: "Read one node including payload, diagnostics, and hidden annotations.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "node.annotate" => Some(ToolSpec {
- name: "node.annotate",
- description: "Attach a free-form annotation to any node.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.archive" => Some(ToolSpec {
- name: "node.archive",
- description: "Archive a node so it falls out of default enumeration without being deleted.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "note.quick" => Some(ToolSpec {
- name: "note.quick",
- description: "Push a quick off-path note without bureaucratic experiment closure.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "source.record" => Some(ToolSpec {
- name: "source.record",
- description: "Record imported sources and documentary context that should live in the DAG without polluting the core path.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "metric.define" => Some(ToolSpec {
- name: "metric.define",
- description: "Register one project-level metric definition so experiment ingestion only has to send key/value observations.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "run.dimension.define" => Some(ToolSpec {
- name: "run.dimension.define",
- description: "Register one project-level run dimension used to slice metrics across scenarios, budgets, and flags.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "run.dimension.list" => Some(ToolSpec {
- name: "run.dimension.list",
- description: "List registered run dimensions together with observed value counts and sample values.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.keys" => Some(ToolSpec {
- name: "metric.keys",
- description: "List rankable metric keys, including registered run metrics and observed payload-derived numeric fields.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.best" => Some(ToolSpec {
- name: "metric.best",
- description: "Rank completed experiments by one numeric key, with optional run-dimension filters.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.migrate" => Some(ToolSpec {
- name: "metric.migrate",
- description: "Re-run the idempotent legacy metric-plane normalization that registers canonical metrics and backfills benchmark_suite dimensions.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "experiment.open" => Some(ToolSpec {
- name: "experiment.open",
- description: "Open a stateful experiment against one hypothesis.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "experiment.list" => Some(ToolSpec {
- name: "experiment.list",
- description: "List currently open experiments, optionally narrowed to one frontier.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "experiment.read" => Some(ToolSpec {
- name: "experiment.read",
- description: "Read one currently open experiment by id.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "experiment.close" => Some(ToolSpec {
- name: "experiment.close",
- description: "Close one open experiment with typed run dimensions, preregistered metric observations, optional analysis, note, and verdict.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "skill.list" => Some(ToolSpec {
- name: "skill.list",
- description: "List bundled skills shipped with this package.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "skill.show" => Some(ToolSpec {
- name: "skill.show",
- description: "Return one bundled skill text shipped with this package. Defaults to `fidget-spinner` when name is omitted.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "system.health" => Some(ToolSpec {
- name: "system.health",
- description: "Read MCP host health, session binding, worker generation, rollout state, and the last fault.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "system.telemetry" => Some(ToolSpec {
- name: "system.telemetry",
- description: "Read aggregate request, retry, restart, and per-operation telemetry for this MCP session.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- _ => None,
- }
+ TOOL_SPECS.iter().copied().find(|spec| spec.name == name)
}
#[must_use]
pub(crate) fn resource_spec(uri: &str) -> Option<ResourceSpec> {
- match uri {
- "fidget-spinner://project/config" => Some(ResourceSpec {
- uri: "fidget-spinner://project/config",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://project/schema" => Some(ResourceSpec {
- uri: "fidget-spinner://project/schema",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://skill/fidget-spinner" => Some(ResourceSpec {
- uri: "fidget-spinner://skill/fidget-spinner",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://skill/frontier-loop" => Some(ResourceSpec {
- uri: "fidget-spinner://skill/frontier-loop",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- _ => None,
- }
+ RESOURCE_SPECS.iter().copied().find(|spec| spec.uri == uri)
}
#[must_use]
pub(crate) fn tool_definitions() -> Vec<Value> {
- [
- "project.bind",
- "project.status",
- "project.schema",
- "schema.field.upsert",
- "schema.field.remove",
- "tag.add",
- "tag.list",
- "frontier.list",
- "frontier.status",
- "frontier.init",
- "node.create",
- "hypothesis.record",
- "node.list",
- "node.read",
- "node.annotate",
- "node.archive",
- "note.quick",
- "source.record",
- "metric.define",
- "run.dimension.define",
- "run.dimension.list",
- "metric.keys",
- "metric.best",
- "metric.migrate",
- "experiment.open",
- "experiment.list",
- "experiment.read",
- "experiment.close",
- "skill.list",
- "skill.show",
- "system.health",
- "system.telemetry",
- ]
- .into_iter()
- .filter_map(tool_spec)
- .map(|spec| {
- json!({
- "name": spec.name,
- "description": spec.description,
- "inputSchema": with_common_presentation(input_schema(spec.name)),
- "annotations": spec.annotation_json(),
+ TOOL_SPECS
+ .iter()
+ .copied()
+ .map(|spec| {
+ json!({
+ "name": spec.name,
+ "description": spec.description,
+ "annotations": spec.annotation_json(),
+ "inputSchema": tool_input_schema(spec.name),
+ })
})
- })
- .collect()
+ .collect()
}
#[must_use]
pub(crate) fn list_resources() -> Vec<Value> {
- vec![
- json!({
- "uri": "fidget-spinner://project/config",
- "name": "project-config",
- "description": "Project-local store configuration",
- "mimeType": "application/json"
- }),
- json!({
- "uri": "fidget-spinner://project/schema",
- "name": "project-schema",
- "description": "Project-local payload schema and validation tiers",
- "mimeType": "application/json"
- }),
- json!({
- "uri": "fidget-spinner://skill/fidget-spinner",
- "name": "fidget-spinner-skill",
- "description": "Bundled base Fidget Spinner skill text for this package",
- "mimeType": "text/markdown"
- }),
- json!({
- "uri": "fidget-spinner://skill/frontier-loop",
- "name": "frontier-loop-skill",
- "description": "Bundled frontier-loop specialization skill text for this package",
- "mimeType": "text/markdown"
- }),
- ]
+ RESOURCE_SPECS
+ .iter()
+ .map(|spec| {
+ json!({
+ "uri": spec.uri,
+ "name": spec.uri.rsplit('/').next().unwrap_or(spec.uri),
+ "description": resource_description(spec.uri),
+ })
+ })
+ .collect()
}
-fn input_schema(name: &str) -> Value {
- match name {
- "project.status" | "project.schema" | "tag.list" | "skill.list" | "system.health"
- | "system.telemetry" | "run.dimension.list" | "metric.migrate" => {
- json!({"type":"object","additionalProperties":false})
- }
- "schema.field.upsert" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Project payload field name." },
- "node_classes": { "type": "array", "items": node_class_schema(), "description": "Optional node-class scope. Omit or pass [] for all classes." },
- "presence": field_presence_schema(),
- "severity": diagnostic_severity_schema(),
- "role": field_role_schema(),
- "inference_policy": inference_policy_schema(),
- "value_type": field_value_type_schema(),
- },
- "required": ["name", "presence", "severity", "role", "inference_policy"],
- "additionalProperties": false
- }),
- "schema.field.remove" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Project payload field name." },
- "node_classes": { "type": "array", "items": node_class_schema(), "description": "Optional exact node-class scope to remove." }
- },
- "required": ["name"],
- "additionalProperties": false
- }),
- "project.bind" => json!({
- "type": "object",
- "properties": {
- "path": { "type": "string", "description": "Project root or any nested path inside a project with .fidget_spinner state." }
- },
- "required": ["path"],
- "additionalProperties": false
- }),
- "tag.add" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Lowercase repo-local tag name." },
- "description": { "type": "string", "description": "Human-facing tag description." }
- },
- "required": ["name", "description"],
- "additionalProperties": false
- }),
- "skill.show" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Bundled skill name. Defaults to `fidget-spinner`." }
- },
- "additionalProperties": false
- }),
- "frontier.list" => json!({"type":"object","additionalProperties":false}),
- "frontier.status" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string", "description": "Frontier UUID" }
- },
- "required": ["frontier_id"],
- "additionalProperties": false
- }),
- "frontier.init" => json!({
- "type": "object",
- "properties": {
- "label": { "type": "string" },
- "objective": { "type": "string" },
- "contract_title": { "type": "string" },
- "contract_summary": { "type": "string" },
- "benchmark_suites": { "type": "array", "items": { "type": "string" } },
- "promotion_criteria": { "type": "array", "items": { "type": "string" } },
- "primary_metric": metric_spec_schema(),
- "supporting_metrics": { "type": "array", "items": metric_spec_schema() },
- "seed_summary": { "type": "string" }
- },
- "required": ["label", "objective", "contract_title", "benchmark_suites", "promotion_criteria", "primary_metric"],
- "additionalProperties": false
- }),
- "node.create" => json!({
- "type": "object",
- "properties": {
- "class": node_class_schema(),
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string", "description": "Required for `note` and `source` nodes." },
- "tags": { "type": "array", "items": tag_name_schema(), "description": "Required for `note` nodes; optional for other classes." },
- "payload": { "type": "object", "description": "`note` and `source` nodes require a non-empty string `body` field." },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["class", "title"],
- "additionalProperties": false
- }),
- "hypothesis.record" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["frontier_id", "title", "summary", "body"],
- "additionalProperties": false
- }),
- "node.list" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "class": node_class_schema(),
- "tags": { "type": "array", "items": tag_name_schema() },
- "include_archived": { "type": "boolean" },
- "limit": { "type": "integer", "minimum": 1, "maximum": 500 }
- },
- "additionalProperties": false
- }),
- "node.read" | "node.archive" => json!({
- "type": "object",
- "properties": {
- "node_id": { "type": "string" }
- },
- "required": ["node_id"],
- "additionalProperties": false
- }),
- "node.annotate" => json!({
- "type": "object",
- "properties": {
- "node_id": { "type": "string" },
- "body": { "type": "string" },
- "label": { "type": "string" },
- "visible": { "type": "boolean" }
- },
- "required": ["node_id", "body"],
- "additionalProperties": false
- }),
- "note.quick" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "tags": { "type": "array", "items": tag_name_schema() },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["title", "summary", "body", "tags"],
- "additionalProperties": false
- }),
- "source.record" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "tags": { "type": "array", "items": tag_name_schema() },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["title", "summary", "body"],
- "additionalProperties": false
- }),
- "metric.define" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "unit": metric_unit_schema(),
- "objective": optimization_objective_schema(),
- "description": { "type": "string" }
- },
- "required": ["key", "unit", "objective"],
- "additionalProperties": false
- }),
- "run.dimension.define" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "value_type": field_value_type_schema(),
- "description": { "type": "string" }
- },
- "required": ["key", "value_type"],
- "additionalProperties": false
- }),
- "metric.keys" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "source": metric_source_schema(),
- "dimensions": { "type": "object" }
- },
- "additionalProperties": false
- }),
- "metric.best" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "frontier_id": { "type": "string" },
- "source": metric_source_schema(),
- "dimensions": { "type": "object" },
- "order": metric_order_schema(),
- "limit": { "type": "integer", "minimum": 1, "maximum": 500 }
- },
- "required": ["key"],
- "additionalProperties": false
- }),
- "experiment.open" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "hypothesis_node_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" }
- },
- "required": ["frontier_id", "hypothesis_node_id", "title"],
- "additionalProperties": false
- }),
- "experiment.list" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" }
- },
- "additionalProperties": false
- }),
- "experiment.read" => json!({
- "type": "object",
- "properties": {
- "experiment_id": { "type": "string" }
- },
- "required": ["experiment_id"],
- "additionalProperties": false
- }),
- "experiment.close" => json!({
- "type": "object",
- "properties": {
- "experiment_id": { "type": "string" },
- "run": run_schema(),
- "primary_metric": metric_value_schema(),
- "supporting_metrics": { "type": "array", "items": metric_value_schema() },
- "note": note_schema(),
- "verdict": verdict_schema(),
- "decision_title": { "type": "string" },
- "decision_rationale": { "type": "string" },
- "analysis": analysis_schema()
- },
- "required": [
- "experiment_id",
- "run",
+fn resource_description(uri: &str) -> &'static str {
+ match uri {
+ "fidget-spinner://skill/fidget-spinner" => "Bundled Fidget Spinner operating doctrine.",
+ "fidget-spinner://skill/frontier-loop" => "Bundled frontier-loop specialization.",
+ _ => "Fidget Spinner resource.",
+ }
+}
+
+fn tool_input_schema(name: &str) -> Value {
+ let schema = match name {
+ "project.bind" => object_schema(
+ &[(
+ "path",
+ string_schema("Project root or any nested path inside it."),
+ )],
+ &["path"],
+ ),
+ "project.status" | "tag.list" | "frontier.list" | "run.dimension.list" | "skill.list"
+ | "system.health" | "system.telemetry" => empty_object_schema(),
+ "tag.add" => object_schema(
+ &[
+ ("name", string_schema("Repo-local tag token.")),
+ (
+ "description",
+ string_schema("Human-facing tag description."),
+ ),
+ ],
+ &["name", "description"],
+ ),
+ "frontier.create" => object_schema(
+ &[
+ ("label", string_schema("Short frontier label.")),
+ ("objective", string_schema("Frontier objective.")),
+ ("slug", string_schema("Optional stable frontier slug.")),
+ ],
+ &["label", "objective"],
+ ),
+ "frontier.read" | "frontier.open" | "frontier.history" => object_schema(
+ &[("frontier", selector_schema("Frontier UUID or slug."))],
+ &["frontier"],
+ ),
+ "frontier.brief.update" => object_schema(
+ &[
+ ("frontier", selector_schema("Frontier UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "situation",
+ nullable_string_schema("Optional frontier situation text."),
+ ),
+ ("roadmap", roadmap_schema()),
+ (
+ "unknowns",
+ string_array_schema("Ordered frontier unknowns."),
+ ),
+ ],
+ &["frontier"],
+ ),
+ "hypothesis.record" => object_schema(
+ &[
+ ("frontier", selector_schema("Owning frontier UUID or slug.")),
+ ("title", string_schema("Terse hypothesis title.")),
+ ("summary", string_schema("One-line hypothesis summary.")),
+ ("body", string_schema("Single-paragraph hypothesis body.")),
+ ("slug", string_schema("Optional stable hypothesis slug.")),
+ ("tags", string_array_schema("Tag names.")),
+ ("parents", vertex_selector_array_schema()),
+ ],
+ &["frontier", "title", "summary", "body"],
+ ),
+ "hypothesis.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ ("tags", string_array_schema("Require all listed tags.")),
+ (
+ "include_archived",
+ boolean_schema("Include archived hypotheses."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "hypothesis.read" | "hypothesis.history" => object_schema(
+ &[("hypothesis", selector_schema("Hypothesis UUID or slug."))],
+ &["hypothesis"],
+ ),
+ "hypothesis.update" => object_schema(
+ &[
+ ("hypothesis", selector_schema("Hypothesis UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ ("title", string_schema("Replacement title.")),
+ ("summary", string_schema("Replacement summary.")),
+ ("body", string_schema("Replacement single-paragraph body.")),
+ ("tags", string_array_schema("Replacement tag set.")),
+ ("parents", vertex_selector_array_schema()),
+ ("archived", boolean_schema("Archive state override.")),
+ ],
+ &["hypothesis"],
+ ),
+ "experiment.open" => object_schema(
+ &[
+ (
+ "hypothesis",
+ selector_schema("Owning hypothesis UUID or slug."),
+ ),
+ ("title", string_schema("Experiment title.")),
+ ("summary", string_schema("Optional experiment summary.")),
+ ("slug", string_schema("Optional stable experiment slug.")),
+ ("tags", string_array_schema("Tag names.")),
+ ("parents", vertex_selector_array_schema()),
+ ],
+ &["hypothesis", "title"],
+ ),
+ "experiment.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "hypothesis",
+ selector_schema("Optional hypothesis UUID or slug."),
+ ),
+ ("tags", string_array_schema("Require all listed tags.")),
+ (
+ "status",
+ enum_string_schema(&["open", "closed"], "Optional experiment status filter."),
+ ),
+ (
+ "include_archived",
+ boolean_schema("Include archived experiments."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "experiment.read" | "experiment.history" => object_schema(
+ &[("experiment", selector_schema("Experiment UUID or slug."))],
+ &["experiment"],
+ ),
+ "experiment.update" => object_schema(
+ &[
+ ("experiment", selector_schema("Experiment UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ ("title", string_schema("Replacement title.")),
+ (
+ "summary",
+ nullable_string_schema("Replacement summary or explicit null."),
+ ),
+ ("tags", string_array_schema("Replacement tag set.")),
+ ("parents", vertex_selector_array_schema()),
+ ("archived", boolean_schema("Archive state override.")),
+ ("outcome", experiment_outcome_schema()),
+ ],
+ &["experiment"],
+ ),
+ "experiment.close" => object_schema(
+ &[
+ ("experiment", selector_schema("Experiment UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "backend",
+ enum_string_schema(
+ &["manual", "local_process", "worktree_process", "ssh_process"],
+ "Execution backend.",
+ ),
+ ),
+ ("command", command_schema()),
+ ("dimensions", run_dimensions_schema()),
+ ("primary_metric", metric_value_schema()),
+ ("supporting_metrics", metric_value_array_schema()),
+ (
+ "verdict",
+ enum_string_schema(
+ &["accepted", "kept", "parked", "rejected"],
+ "Closed verdict.",
+ ),
+ ),
+ ("rationale", string_schema("Decision rationale.")),
+ ("analysis", experiment_analysis_schema()),
+ ],
+ &[
+ "experiment",
+ "backend",
+ "command",
+ "dimensions",
"primary_metric",
- "note",
"verdict",
- "decision_title",
- "decision_rationale"
+ "rationale",
],
- "additionalProperties": false
- }),
- _ => json!({"type":"object","additionalProperties":false}),
- }
+ ),
+ "artifact.record" => object_schema(
+ &[
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Artifact kind.",
+ ),
+ ),
+ ("label", string_schema("Human-facing artifact label.")),
+ ("summary", string_schema("Optional summary.")),
+ (
+ "locator",
+ string_schema(
+ "Opaque locator or URI. Artifact bodies are never read through Spinner.",
+ ),
+ ),
+ ("media_type", string_schema("Optional media type.")),
+ ("slug", string_schema("Optional stable artifact slug.")),
+ ("attachments", attachment_selector_array_schema()),
+ ],
+ &["kind", "label", "locator"],
+ ),
+ "artifact.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Optional artifact kind.",
+ ),
+ ),
+ ("attached_to", attachment_selector_schema()),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "artifact.read" | "artifact.history" => object_schema(
+ &[("artifact", selector_schema("Artifact UUID or slug."))],
+ &["artifact"],
+ ),
+ "artifact.update" => object_schema(
+ &[
+ ("artifact", selector_schema("Artifact UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Replacement artifact kind.",
+ ),
+ ),
+ ("label", string_schema("Replacement label.")),
+ (
+ "summary",
+ nullable_string_schema("Replacement summary or explicit null."),
+ ),
+ ("locator", string_schema("Replacement locator.")),
+ (
+ "media_type",
+ nullable_string_schema("Replacement media type or explicit null."),
+ ),
+ ("attachments", attachment_selector_array_schema()),
+ ],
+ &["artifact"],
+ ),
+ "metric.define" => object_schema(
+ &[
+ ("key", string_schema("Metric key.")),
+ (
+ "unit",
+ enum_string_schema(
+ &["seconds", "bytes", "count", "ratio", "custom"],
+ "Metric unit.",
+ ),
+ ),
+ (
+ "objective",
+ enum_string_schema(
+ &["minimize", "maximize", "target"],
+ "Optimization objective.",
+ ),
+ ),
+ (
+ "visibility",
+ enum_string_schema(
+ &["canonical", "minor", "hidden", "archived"],
+ "Metric visibility tier.",
+ ),
+ ),
+ ("description", string_schema("Optional description.")),
+ ],
+ &["key", "unit", "objective"],
+ ),
+ "metric.keys" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "scope",
+ enum_string_schema(&["live", "visible", "all"], "Registry slice to enumerate."),
+ ),
+ ],
+ &[],
+ ),
+ "metric.best" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "hypothesis",
+ selector_schema("Optional hypothesis UUID or slug."),
+ ),
+ ("key", string_schema("Metric key.")),
+ ("dimensions", run_dimensions_schema()),
+ (
+ "include_rejected",
+ boolean_schema("Include rejected experiments."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ (
+ "order",
+ enum_string_schema(&["asc", "desc"], "Optional explicit ranking direction."),
+ ),
+ ],
+ &["key"],
+ ),
+ "run.dimension.define" => object_schema(
+ &[
+ ("key", string_schema("Dimension key.")),
+ (
+ "value_type",
+ enum_string_schema(
+ &["string", "numeric", "boolean", "timestamp"],
+ "Dimension value type.",
+ ),
+ ),
+ ("description", string_schema("Optional description.")),
+ ],
+ &["key", "value_type"],
+ ),
+ "skill.show" => object_schema(&[("name", string_schema("Bundled skill name."))], &[]),
+ _ => empty_object_schema(),
+ };
+ with_common_presentation(schema)
}
-fn metric_spec_schema() -> Value {
+fn empty_object_schema() -> Value {
json!({
"type": "object",
- "properties": {
- "key": { "type": "string" },
- "unit": metric_unit_schema(),
- "objective": optimization_objective_schema()
- },
- "required": ["key", "unit", "objective"],
- "additionalProperties": false
+ "properties": {},
+ "additionalProperties": false,
})
}
-fn metric_value_schema() -> Value {
+fn object_schema(properties: &[(&str, Value)], required: &[&str]) -> Value {
+ let mut map = serde_json::Map::new();
+ for (key, value) in properties {
+ let _ = map.insert((*key).to_owned(), value.clone());
+ }
json!({
"type": "object",
- "properties": {
- "key": { "type": "string" },
- "value": { "type": "number" }
- },
- "required": ["key", "value"],
- "additionalProperties": false
+ "properties": Value::Object(map),
+ "required": required,
+ "additionalProperties": false,
})
}
-fn annotation_schema() -> Value {
- json!({
- "type": "object",
- "properties": {
- "body": { "type": "string" },
- "label": { "type": "string" },
- "visible": { "type": "boolean" }
- },
- "required": ["body"],
- "additionalProperties": false
- })
+fn string_schema(description: &str) -> Value {
+ json!({ "type": "string", "description": description })
}
-fn analysis_schema() -> Value {
+fn nullable_string_schema(description: &str) -> Value {
json!({
- "type": "object",
- "properties": {
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" }
- },
- "required": ["title", "summary", "body"],
- "additionalProperties": false
+ "description": description,
+ "oneOf": [
+ { "type": "string" },
+ { "type": "null" }
+ ]
})
}
-fn tag_name_schema() -> Value {
- json!({
- "type": "string",
- "pattern": "^[a-z0-9]+(?:[-_/][a-z0-9]+)*$"
- })
+fn integer_schema(description: &str) -> Value {
+ json!({ "type": "integer", "minimum": 0, "description": description })
}
-fn node_class_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["contract", "hypothesis", "run", "analysis", "decision", "source", "note"]
- })
+fn boolean_schema(description: &str) -> Value {
+ json!({ "type": "boolean", "description": description })
}
-fn metric_unit_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["seconds", "bytes", "count", "ratio", "custom"]
- })
+fn enum_string_schema(values: &[&str], description: &str) -> Value {
+ json!({ "type": "string", "enum": values, "description": description })
}
-fn metric_source_schema() -> Value {
+fn string_array_schema(description: &str) -> Value {
json!({
- "type": "string",
- "enum": [
- "run_metric",
- "hypothesis_payload",
- "run_payload",
- "analysis_payload",
- "decision_payload"
- ]
+ "type": "array",
+ "items": { "type": "string" },
+ "description": description
})
}
-fn metric_order_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["asc", "desc"]
- })
+fn selector_schema(description: &str) -> Value {
+ string_schema(description)
}
-fn field_value_type_schema() -> Value {
+fn vertex_selector_schema() -> Value {
json!({
- "type": "string",
- "enum": ["string", "numeric", "boolean", "timestamp"]
+ "type": "object",
+ "properties": {
+ "kind": { "type": "string", "enum": ["hypothesis", "experiment"] },
+ "selector": { "type": "string" }
+ },
+ "required": ["kind", "selector"],
+ "additionalProperties": false
})
}
-fn diagnostic_severity_schema() -> Value {
+fn attachment_selector_schema() -> Value {
json!({
- "type": "string",
- "enum": ["error", "warning", "info"]
+ "type": "object",
+ "properties": {
+ "kind": { "type": "string", "enum": ["frontier", "hypothesis", "experiment"] },
+ "selector": { "type": "string" }
+ },
+ "required": ["kind", "selector"],
+ "additionalProperties": false
})
}
-fn field_presence_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["required", "recommended", "optional"]
- })
+fn vertex_selector_array_schema() -> Value {
+ json!({ "type": "array", "items": vertex_selector_schema() })
+}
+
+fn attachment_selector_array_schema() -> Value {
+ json!({ "type": "array", "items": attachment_selector_schema() })
}
-fn field_role_schema() -> Value {
+fn roadmap_schema() -> Value {
json!({
- "type": "string",
- "enum": ["index", "projection_gate", "render_only", "opaque"]
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "rank": { "type": "integer", "minimum": 0 },
+ "hypothesis": { "type": "string" },
+ "summary": { "type": "string" }
+ },
+ "required": ["rank", "hypothesis"],
+ "additionalProperties": false
+ }
})
}
-fn inference_policy_schema() -> Value {
+fn command_schema() -> Value {
json!({
- "type": "string",
- "enum": ["manual_only", "model_may_infer"]
+ "type": "object",
+ "properties": {
+ "working_directory": { "type": "string" },
+ "argv": { "type": "array", "items": { "type": "string" } },
+ "env": {
+ "type": "object",
+ "additionalProperties": { "type": "string" }
+ }
+ },
+ "required": ["argv"],
+ "additionalProperties": false
})
}
-fn optimization_objective_schema() -> Value {
+fn metric_value_schema() -> Value {
json!({
- "type": "string",
- "enum": ["minimize", "maximize", "target"]
+ "type": "object",
+ "properties": {
+ "key": { "type": "string" },
+ "value": { "type": "number" }
+ },
+ "required": ["key", "value"],
+ "additionalProperties": false
})
}
-fn verdict_schema() -> Value {
+fn metric_value_array_schema() -> Value {
+ json!({ "type": "array", "items": metric_value_schema() })
+}
+
+fn run_dimensions_schema() -> Value {
json!({
- "type": "string",
- "enum": [
- "accepted",
- "kept",
- "parked",
- "rejected"
- ]
+ "type": "object",
+ "additionalProperties": true,
+ "description": "Exact run-dimension filter or outcome dimension map. Values may be strings, numbers, booleans, or RFC3339 timestamps."
})
}
-fn run_schema() -> Value {
+fn experiment_analysis_schema() -> Value {
json!({
"type": "object",
"properties": {
- "title": { "type": "string" },
"summary": { "type": "string" },
- "backend": {
- "type": "string",
- "enum": ["local_process", "worktree_process", "ssh_process"]
- },
- "dimensions": { "type": "object" },
- "command": {
- "type": "object",
- "properties": {
- "working_directory": { "type": "string" },
- "argv": { "type": "array", "items": { "type": "string" } },
- "env": {
- "type": "object",
- "additionalProperties": { "type": "string" }
- }
- },
- "required": ["argv"],
- "additionalProperties": false
- }
+ "body": { "type": "string" }
},
- "required": ["title", "backend", "dimensions", "command"],
+ "required": ["summary", "body"],
"additionalProperties": false
})
}
-fn note_schema() -> Value {
+fn experiment_outcome_schema() -> Value {
json!({
"type": "object",
"properties": {
- "summary": { "type": "string" },
- "next_hypotheses": { "type": "array", "items": { "type": "string" } }
+ "backend": { "type": "string", "enum": ["manual", "local_process", "worktree_process", "ssh_process"] },
+ "command": command_schema(),
+ "dimensions": run_dimensions_schema(),
+ "primary_metric": metric_value_schema(),
+ "supporting_metrics": metric_value_array_schema(),
+ "verdict": { "type": "string", "enum": ["accepted", "kept", "parked", "rejected"] },
+ "rationale": { "type": "string" },
+ "analysis": experiment_analysis_schema()
},
- "required": ["summary"],
+ "required": ["backend", "command", "dimensions", "primary_metric", "verdict", "rationale"],
"additionalProperties": false
})
}
diff --git a/crates/fidget-spinner-cli/src/mcp/host/runtime.rs b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
index d57a21e..bf0484a 100644
--- a/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
+++ b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
@@ -230,7 +230,7 @@ impl HostRuntime {
"name": SERVER_NAME,
"version": env!("CARGO_PKG_VERSION")
},
- "instructions": "The DAG is canonical truth. Frontier state is derived. Bind the session with project.bind before project-local DAG operations when the MCP is running unbound."
+ "instructions": "Bind the session with project.bind before project-local work when the MCP is unbound. Use frontier.open as the only overview surface, then walk hypotheses and experiments deliberately by selector. Artifacts are references only; Spinner does not read artifact bodies."
}))),
"notifications/initialized" => {
if !self.seed_captured() {
@@ -598,8 +598,11 @@ struct ProjectBindStatus {
project_root: String,
state_root: String,
display_name: fidget_spinner_core::NonEmptyText,
- schema: fidget_spinner_core::PayloadSchemaRef,
- git_repo_detected: bool,
+ frontier_count: u64,
+ hypothesis_count: u64,
+ experiment_count: u64,
+ open_experiment_count: u64,
+ artifact_count: u64,
}
struct ResolvedProjectBinding {
@@ -611,6 +614,7 @@ fn resolve_project_binding(
requested_path: PathBuf,
) -> Result<ResolvedProjectBinding, fidget_spinner_store_sqlite::StoreError> {
let store = crate::open_or_init_store_for_binding(&requested_path)?;
+ let project_status = store.status()?;
Ok(ResolvedProjectBinding {
binding: ProjectBinding {
requested_path: requested_path.clone(),
@@ -621,12 +625,11 @@ fn resolve_project_binding(
project_root: store.project_root().to_string(),
state_root: store.state_root().to_string(),
display_name: store.config().display_name.clone(),
- schema: store.schema().schema_ref(),
- git_repo_detected: crate::run_git(
- store.project_root(),
- &["rev-parse", "--show-toplevel"],
- )?
- .is_some(),
+ frontier_count: project_status.frontier_count,
+ hypothesis_count: project_status.hypothesis_count,
+ experiment_count: project_status.experiment_count,
+ open_experiment_count: project_status.open_experiment_count,
+ artifact_count: project_status.artifact_count,
},
})
}
@@ -728,17 +731,20 @@ fn project_bind_output(status: &ProjectBindStatus) -> Result<ToolOutput, FaultRe
let _ = concise.insert("project_root".to_owned(), json!(status.project_root));
let _ = concise.insert("state_root".to_owned(), json!(status.state_root));
let _ = concise.insert("display_name".to_owned(), json!(status.display_name));
+ let _ = concise.insert("frontier_count".to_owned(), json!(status.frontier_count));
let _ = concise.insert(
- "schema".to_owned(),
- json!(format!(
- "{}@{}",
- status.schema.namespace, status.schema.version
- )),
+ "hypothesis_count".to_owned(),
+ json!(status.hypothesis_count),
);
let _ = concise.insert(
- "git_repo_detected".to_owned(),
- json!(status.git_repo_detected),
+ "experiment_count".to_owned(),
+ json!(status.experiment_count),
);
+ let _ = concise.insert(
+ "open_experiment_count".to_owned(),
+ json!(status.open_experiment_count),
+ );
+ let _ = concise.insert("artifact_count".to_owned(), json!(status.artifact_count));
if status.requested_path != status.project_root {
let _ = concise.insert("requested_path".to_owned(), json!(status.requested_path));
}
@@ -749,18 +755,13 @@ fn project_bind_output(status: &ProjectBindStatus) -> Result<ToolOutput, FaultRe
format!("bound project {}", status.display_name),
format!("root: {}", status.project_root),
format!("state: {}", status.state_root),
+ format!("frontiers: {}", status.frontier_count),
+ format!("hypotheses: {}", status.hypothesis_count),
format!(
- "schema: {}@{}",
- status.schema.namespace, status.schema.version
- ),
- format!(
- "git: {}",
- if status.git_repo_detected {
- "detected"
- } else {
- "not detected"
- }
+ "experiments: {} total, {} open",
+ status.experiment_count, status.open_experiment_count
),
+ format!("artifacts: {}", status.artifact_count),
]
.join("\n"),
None,
diff --git a/crates/fidget-spinner-cli/src/mcp/service.rs b/crates/fidget-spinner-cli/src/mcp/service.rs
index f0cca1e..adc29f9 100644
--- a/crates/fidget-spinner-cli/src/mcp/service.rs
+++ b/crates/fidget-spinner-cli/src/mcp/service.rs
@@ -1,20 +1,22 @@
use std::collections::{BTreeMap, BTreeSet};
+use std::fmt::Write as _;
use std::fs;
use camino::{Utf8Path, Utf8PathBuf};
use fidget_spinner_core::{
- AdmissionState, AnnotationVisibility, CommandRecipe, DiagnosticSeverity, ExecutionBackend,
- FieldPresence, FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierProjection,
- FrontierRecord, FrontierVerdict, InferencePolicy, MetricSpec, MetricUnit, MetricValue,
- NodeAnnotation, NodeClass, NodePayload, NonEmptyText, ProjectFieldSpec, ProjectSchema,
- RunDimensionValue, TagName, TagRecord,
+ ArtifactKind, CommandRecipe, ExecutionBackend, ExperimentAnalysis, ExperimentStatus,
+ FieldValueType, FrontierVerdict, MetricUnit, MetricVisibility, NonEmptyText,
+ OptimizationObjective, RunDimensionValue, Slug, TagName,
};
use fidget_spinner_store_sqlite::{
- CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest,
- DefineRunDimensionRequest, EdgeAttachment, EdgeAttachmentDirection, ExperimentAnalysisDraft,
- ExperimentReceipt, ListNodesQuery, MetricBestQuery, MetricFieldSource, MetricKeyQuery,
- MetricKeySummary, MetricRankOrder, NodeSummary, OpenExperimentRequest, OpenExperimentSummary,
- ProjectStore, RemoveSchemaFieldRequest, StoreError, UpsertSchemaFieldRequest,
+ AttachmentSelector, CloseExperimentRequest, CreateArtifactRequest, CreateFrontierRequest,
+ CreateHypothesisRequest, DefineMetricRequest, DefineRunDimensionRequest, EntityHistoryEntry,
+ ExperimentOutcomePatch, FrontierOpenProjection, FrontierRoadmapItemDraft, FrontierSummary,
+ ListArtifactsQuery, ListExperimentsQuery, ListHypothesesQuery, MetricBestEntry,
+ MetricBestQuery, MetricKeySummary, MetricKeysQuery, MetricRankOrder, MetricScope,
+ OpenExperimentRequest, ProjectStatus, ProjectStore, StoreError, TextPatch,
+ UpdateArtifactRequest, UpdateExperimentRequest, UpdateFrontierBriefRequest,
+ UpdateHypothesisRequest, VertexSelector,
};
use serde::Deserialize;
use serde_json::{Map, Value, json};
@@ -42,10 +44,9 @@ impl WorkerService {
WorkerOperation::ReadResource { uri } => format!("resources/read:{uri}"),
};
Self::maybe_inject_transient(&operation_key)?;
-
match operation {
WorkerOperation::CallTool { name, arguments } => self.call_tool(&name, arguments),
- WorkerOperation::ReadResource { uri } => self.read_resource(&uri),
+ WorkerOperation::ReadResource { uri } => Self::read_resource(&uri),
}
}
@@ -53,796 +54,449 @@ impl WorkerService {
let operation = format!("tools/call:{name}");
let (presentation, arguments) =
split_presentation(arguments, &operation, FaultStage::Worker)?;
- match name {
- "project.status" => {
- let status = json!({
- "project_root": self.store.project_root(),
- "state_root": self.store.state_root(),
- "display_name": self.store.config().display_name,
- "schema": self.store.schema().schema_ref(),
- "git_repo_detected": crate::run_git(self.store.project_root(), &["rev-parse", "--show-toplevel"])
- .map_err(store_fault("tools/call:project.status"))?
- .is_some(),
- });
- tool_success(
- project_status_output(&status, self.store.schema()),
- presentation,
- FaultStage::Worker,
- "tools/call:project.status",
- )
+ macro_rules! lift {
+ ($expr:expr) => {
+ with_fault($expr, &operation)?
+ };
+ }
+ let output = match name {
+ "project.status" => project_status_output(&lift!(self.store.status()), &operation)?,
+ "tag.add" => {
+ let args = deserialize::<TagAddArgs>(arguments)?;
+ let tag = lift!(self.store.register_tag(
+ TagName::new(args.name).map_err(store_fault(&operation))?,
+ NonEmptyText::new(args.description).map_err(store_fault(&operation))?,
+ ));
+ tool_output(&tag, FaultStage::Worker, &operation)?
}
- "project.schema" => tool_success(
- project_schema_output(self.store.schema())?,
- presentation,
- FaultStage::Worker,
- "tools/call:project.schema",
- ),
- "schema.field.upsert" => {
- let args = deserialize::<SchemaFieldUpsertToolArgs>(arguments)?;
- let field = self
- .store
- .upsert_schema_field(UpsertSchemaFieldRequest {
- name: NonEmptyText::new(args.name)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- node_classes: args
- .node_classes
- .unwrap_or_default()
- .into_iter()
- .map(|class| {
- parse_node_class_name(&class)
- .map_err(store_fault("tools/call:schema.field.upsert"))
- })
- .collect::<Result<_, _>>()?,
- presence: parse_field_presence_name(&args.presence)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- severity: parse_diagnostic_severity_name(&args.severity)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- role: parse_field_role_name(&args.role)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- inference_policy: parse_inference_policy_name(&args.inference_policy)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- value_type: args
- .value_type
- .as_deref()
- .map(parse_field_value_type_name)
+ "tag.list" => tag_list_output(&lift!(self.store.list_tags()), &operation)?,
+ "frontier.create" => {
+ let args = deserialize::<FrontierCreateArgs>(arguments)?;
+ let frontier = lift!(
+ self.store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new(args.label).map_err(store_fault(&operation))?,
+ objective: NonEmptyText::new(args.objective)
+ .map_err(store_fault(&operation))?,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
+ .map_err(store_fault(&operation))?,
})
- .map_err(store_fault("tools/call:schema.field.upsert"))?;
- tool_success(
- schema_field_upsert_output(self.store.schema(), &field)?,
- presentation,
- FaultStage::Worker,
- "tools/call:schema.field.upsert",
- )
+ );
+ frontier_record_output(&frontier, &operation)?
}
- "schema.field.remove" => {
- let args = deserialize::<SchemaFieldRemoveToolArgs>(arguments)?;
- let removed_count = self
- .store
- .remove_schema_field(RemoveSchemaFieldRequest {
- name: NonEmptyText::new(args.name)
- .map_err(store_fault("tools/call:schema.field.remove"))?,
- node_classes: args
- .node_classes
- .map(|node_classes| {
- node_classes
- .into_iter()
- .map(|class| {
- parse_node_class_name(&class)
- .map_err(store_fault("tools/call:schema.field.remove"))
- })
- .collect::<Result<_, _>>()
- })
- .transpose()?,
- })
- .map_err(store_fault("tools/call:schema.field.remove"))?;
- tool_success(
- schema_field_remove_output(self.store.schema(), removed_count)?,
- presentation,
- FaultStage::Worker,
- "tools/call:schema.field.remove",
- )
+ "frontier.list" => {
+ frontier_list_output(&lift!(self.store.list_frontiers()), &operation)?
}
- "tag.add" => {
- let args = deserialize::<TagAddToolArgs>(arguments)?;
- let tag = self
- .store
- .add_tag(
- TagName::new(args.name).map_err(store_fault("tools/call:tag.add"))?,
- NonEmptyText::new(args.description)
- .map_err(store_fault("tools/call:tag.add"))?,
- )
- .map_err(store_fault("tools/call:tag.add"))?;
- tool_success(
- tag_add_output(&tag)?,
- presentation,
- FaultStage::Worker,
- "tools/call:tag.add",
- )
+ "frontier.read" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ frontier_record_output(
+ &lift!(self.store.read_frontier(&args.frontier)),
+ &operation,
+ )?
}
- "tag.list" => {
- let tags = self
- .store
- .list_tags()
- .map_err(store_fault("tools/call:tag.list"))?;
- tool_success(
- tag_list_output(tags.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:tag.list",
- )
+ "frontier.open" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ frontier_open_output(&lift!(self.store.frontier_open(&args.frontier)), &operation)?
}
- "frontier.list" => {
- let frontiers = self
- .store
- .list_frontiers()
- .map_err(store_fault("tools/call:frontier.list"))?;
- tool_success(
- frontier_list_output(frontiers.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.list",
- )
+ "frontier.brief.update" => {
+ let args = deserialize::<FrontierBriefUpdateArgs>(arguments)?;
+ let frontier = lift!(
+ self.store
+ .update_frontier_brief(UpdateFrontierBriefRequest {
+ frontier: args.frontier,
+ expected_revision: args.expected_revision,
+ situation: nullable_text_patch_from_wire(args.situation, &operation)?,
+ roadmap: args
+ .roadmap
+ .map(|items| {
+ items
+ .into_iter()
+ .map(|item| {
+ Ok(FrontierRoadmapItemDraft {
+ rank: item.rank,
+ hypothesis: item.hypothesis,
+ summary: item
+ .summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ })
+ .collect::<Result<Vec<_>, FaultRecord>>()
+ })
+ .transpose()?,
+ unknowns: args
+ .unknowns
+ .map(|items| {
+ items
+ .into_iter()
+ .map(NonEmptyText::new)
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(store_fault(&operation))
+ })
+ .transpose()?,
+ })
+ );
+ frontier_record_output(&frontier, &operation)?
}
- "frontier.status" => {
- let args = deserialize::<FrontierStatusToolArgs>(arguments)?;
- let projection = self
- .store
- .frontier_projection(
- crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:frontier.status"))?,
- )
- .map_err(store_fault("tools/call:frontier.status"))?;
- tool_success(
- frontier_status_output(&projection)?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.status",
- )
+ "frontier.history" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.frontier_history(&args.frontier)),
+ &operation,
+ )?
}
- "frontier.init" => {
- let args = deserialize::<FrontierInitToolArgs>(arguments)?;
- let projection = self
- .store
- .create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new(args.label)
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract_title: NonEmptyText::new(args.contract_title)
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract_summary: args
- .contract_summary
- .map(NonEmptyText::new)
+ "hypothesis.record" => {
+ let args = deserialize::<HypothesisRecordArgs>(arguments)?;
+ let hypothesis = lift!(
+ self.store.create_hypothesis(CreateHypothesisRequest {
+ frontier: args.frontier,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract: FrontierContract {
- objective: NonEmptyText::new(args.objective)
- .map_err(store_fault("tools/call:frontier.init"))?,
- evaluation: fidget_spinner_core::EvaluationProtocol {
- benchmark_suites: crate::to_text_set(args.benchmark_suites)
- .map_err(store_fault("tools/call:frontier.init"))?,
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new(args.primary_metric.key)
- .map_err(store_fault("tools/call:frontier.init"))?,
- unit: parse_metric_unit_name(&args.primary_metric.unit)
- .map_err(store_fault("tools/call:frontier.init"))?,
- objective: crate::parse_optimization_objective(
- &args.primary_metric.objective,
- )
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
- supporting_metrics: args
- .supporting_metrics
- .into_iter()
- .map(metric_spec_from_wire)
- .collect::<Result<_, _>>()
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
- promotion_criteria: crate::to_text_vec(args.promotion_criteria)
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
+ .map_err(store_fault(&operation))?,
+ title: NonEmptyText::new(args.title).map_err(store_fault(&operation))?,
+ summary: NonEmptyText::new(args.summary)
+ .map_err(store_fault(&operation))?,
+ body: NonEmptyText::new(args.body).map_err(store_fault(&operation))?,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ parents: args.parents.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:frontier.init"))?;
- tool_success(
- frontier_created_output(&projection)?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.init",
- )
+ );
+ hypothesis_record_output(&hypothesis, &operation)?
+ }
+ "hypothesis.list" => {
+ let args = deserialize::<HypothesisListArgs>(arguments)?;
+ let hypotheses = lift!(
+ self.store.list_hypotheses(ListHypothesesQuery {
+ frontier: args.frontier,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ include_archived: args.include_archived.unwrap_or(false),
+ limit: args.limit,
+ })
+ );
+ hypothesis_list_output(&hypotheses, &operation)?
}
- "node.create" => {
- let args = deserialize::<NodeCreateToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: parse_node_class_name(&args.class)
- .map_err(store_fault("tools/call:node.create"))?,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "hypothesis.read" => {
+ let args = deserialize::<HypothesisSelectorArgs>(arguments)?;
+ hypothesis_detail_output(
+ &lift!(self.store.read_hypothesis(&args.hypothesis)),
+ &operation,
+ )?
+ }
+ "hypothesis.update" => {
+ let args = deserialize::<HypothesisUpdateArgs>(arguments)?;
+ let hypothesis = lift!(
+ self.store.update_hypothesis(UpdateHypothesisRequest {
+ hypothesis: args.hypothesis,
+ expected_revision: args.expected_revision,
+ title: args
+ .title
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
summary: args
.summary
.map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
+ body: args
+ .body
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
tags: args
.tags
- .map(parse_tag_set)
+ .map(tags_to_set)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- args.payload.unwrap_or_default(),
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:node.create"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
+ parents: args.parents,
+ archived: args.archived,
})
- .map_err(store_fault("tools/call:node.create"))?;
- tool_success(
- created_node_output("created node", &node, "tools/call:node.create")?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.create",
- )
+ );
+ hypothesis_record_output(&hypothesis, &operation)?
}
- "hypothesis.record" => {
- let args = deserialize::<HypothesisRecordToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(
- crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- tags: None,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- })
- .map_err(store_fault("tools/call:hypothesis.record"))?;
- tool_success(
- created_node_output(
- "recorded hypothesis",
- &node,
- "tools/call:hypothesis.record",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:hypothesis.record",
- )
+ "hypothesis.history" => {
+ let args = deserialize::<HypothesisSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.hypothesis_history(&args.hypothesis)),
+ &operation,
+ )?
}
- "node.list" => {
- let args = deserialize::<NodeListToolArgs>(arguments)?;
- let nodes = self
- .store
- .list_nodes(ListNodesQuery {
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "experiment.open" => {
+ let args = deserialize::<ExperimentOpenArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.open_experiment(OpenExperimentRequest {
+ hypothesis: args.hypothesis,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:node.list"))?,
- class: args
- .class
- .as_deref()
- .map(parse_node_class_name)
+ .map_err(store_fault(&operation))?,
+ title: NonEmptyText::new(args.title).map_err(store_fault(&operation))?,
+ summary: args
+ .summary
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.list"))?,
- tags: parse_tag_set(args.tags)
- .map_err(store_fault("tools/call:node.list"))?,
- include_archived: args.include_archived,
- limit: args.limit.unwrap_or(20),
+ .map_err(store_fault(&operation))?,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ parents: args.parents.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:node.list"))?;
- tool_success(
- node_list_output(nodes.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.list",
- )
- }
- "node.read" => {
- let args = deserialize::<NodeReadToolArgs>(arguments)?;
- let node_id = crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.read"))?;
- let node = self
- .store
- .get_node(node_id)
- .map_err(store_fault("tools/call:node.read"))?
- .ok_or_else(|| {
- FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Store,
- "tools/call:node.read",
- format!("node {node_id} was not found"),
- )
- })?;
- tool_success(
- node_read_output(&node)?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.read",
- )
- }
- "node.annotate" => {
- let args = deserialize::<NodeAnnotateToolArgs>(arguments)?;
- let annotation = NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if args.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: args
- .label
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:node.annotate"))?,
- body: NonEmptyText::new(args.body)
- .map_err(store_fault("tools/call:node.annotate"))?,
- created_at: time::OffsetDateTime::now_utc(),
- };
- self.store
- .annotate_node(
- crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.annotate"))?,
- annotation,
- )
- .map_err(store_fault("tools/call:node.annotate"))?;
- tool_success(
- tool_output(
- &json!({"annotated": args.node_id}),
- FaultStage::Worker,
- "tools/call:node.annotate",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.annotate",
- )
- }
- "node.archive" => {
- let args = deserialize::<NodeArchiveToolArgs>(arguments)?;
- self.store
- .archive_node(
- crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.archive"))?,
- )
- .map_err(store_fault("tools/call:node.archive"))?;
- tool_success(
- tool_output(
- &json!({"archived": args.node_id}),
- FaultStage::Worker,
- "tools/call:node.archive",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.archive",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "note.quick" => {
- let args = deserialize::<QuickNoteToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:note.quick"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:note.quick"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- tags: Some(
- parse_tag_set(args.tags)
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:note.quick"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:note.quick"))?,
+ "experiment.list" => {
+ let args = deserialize::<ExperimentListArgs>(arguments)?;
+ let experiments = lift!(
+ self.store.list_experiments(ListExperimentsQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ include_archived: args.include_archived.unwrap_or(false),
+ status: args.status,
+ limit: args.limit,
})
- .map_err(store_fault("tools/call:note.quick"))?;
- tool_success(
- created_node_output("recorded note", &node, "tools/call:note.quick")?,
- presentation,
- FaultStage::Worker,
- "tools/call:note.quick",
- )
+ );
+ experiment_list_output(&experiments, &operation)?
+ }
+ "experiment.read" => {
+ let args = deserialize::<ExperimentSelectorArgs>(arguments)?;
+ experiment_detail_output(
+ &lift!(self.store.read_experiment(&args.experiment)),
+ &operation,
+ )?
}
- "source.record" => {
- let args = deserialize::<SourceRecordToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "experiment.update" => {
+ let args = deserialize::<ExperimentUpdateArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.update_experiment(UpdateExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ title: args
+ .title
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:source.record"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:source.record"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:source.record"))?,
- ),
+ .map_err(store_fault(&operation))?,
+ summary: nullable_text_patch_from_wire(args.summary, &operation)?,
tags: args
.tags
- .map(parse_tag_set)
+ .map(tags_to_set)
.transpose()
- .map_err(store_fault("tools/call:source.record"))?,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:source.record"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:source.record"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:source.record"))?,
+ .map_err(store_fault(&operation))?,
+ parents: args.parents,
+ archived: args.archived,
+ outcome: args
+ .outcome
+ .map(|wire| experiment_outcome_patch_from_wire(wire, &operation))
+ .transpose()?,
})
- .map_err(store_fault("tools/call:source.record"))?;
- tool_success(
- created_node_output("recorded source", &node, "tools/call:source.record")?,
- presentation,
- FaultStage::Worker,
- "tools/call:source.record",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "metric.define" => {
- let args = deserialize::<MetricDefineToolArgs>(arguments)?;
- let metric = self
- .store
- .define_metric(DefineMetricRequest {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:metric.define"))?,
- unit: parse_metric_unit_name(&args.unit)
- .map_err(store_fault("tools/call:metric.define"))?,
- objective: crate::parse_optimization_objective(&args.objective)
- .map_err(store_fault("tools/call:metric.define"))?,
- description: args
- .description
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:metric.define"))?,
+ "experiment.close" => {
+ let args = deserialize::<ExperimentCloseArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.close_experiment(CloseExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ backend: args.backend,
+ command: args.command,
+ dimensions: dimension_map_from_wire(args.dimensions)?,
+ primary_metric: metric_value_from_wire(args.primary_metric, &operation)?,
+ supporting_metrics: args
+ .supporting_metrics
+ .unwrap_or_default()
+ .into_iter()
+ .map(|metric| metric_value_from_wire(metric, &operation))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: args.verdict,
+ rationale: NonEmptyText::new(args.rationale)
+ .map_err(store_fault(&operation))?,
+ analysis: args
+ .analysis
+ .map(|analysis| experiment_analysis_from_wire(analysis, &operation))
+ .transpose()?,
})
- .map_err(store_fault("tools/call:metric.define"))?;
- tool_success(
- json_created_output(
- "registered metric",
- json!({
- "key": metric.key,
- "unit": metric_unit_name(metric.unit),
- "objective": metric_objective_name(metric.objective),
- "description": metric.description,
- }),
- "tools/call:metric.define",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.define",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "run.dimension.define" => {
- let args = deserialize::<RunDimensionDefineToolArgs>(arguments)?;
- let dimension = self
- .store
- .define_run_dimension(DefineRunDimensionRequest {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:run.dimension.define"))?,
- value_type: parse_field_value_type_name(&args.value_type)
- .map_err(store_fault("tools/call:run.dimension.define"))?,
- description: args
- .description
+ "experiment.history" => {
+ let args = deserialize::<ExperimentSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.experiment_history(&args.experiment)),
+ &operation,
+ )?
+ }
+ "artifact.record" => {
+ let args = deserialize::<ArtifactRecordArgs>(arguments)?;
+ let artifact = lift!(
+ self.store.create_artifact(CreateArtifactRequest {
+ slug: args
+ .slug
+ .map(Slug::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ kind: args.kind,
+ label: NonEmptyText::new(args.label).map_err(store_fault(&operation))?,
+ summary: args
+ .summary
.map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:run.dimension.define"))?,
+ .map_err(store_fault(&operation))?,
+ locator: NonEmptyText::new(args.locator)
+ .map_err(store_fault(&operation))?,
+ media_type: args
+ .media_type
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ attachments: args.attachments.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:run.dimension.define"))?;
- tool_success(
- json_created_output(
- "registered run dimension",
- json!({
- "key": dimension.key,
- "value_type": dimension.value_type.as_str(),
- "description": dimension.description,
- }),
- "tools/call:run.dimension.define",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:run.dimension.define",
- )
+ );
+ artifact_record_output(&artifact, &operation)?
}
- "run.dimension.list" => {
- let items = self
- .store
- .list_run_dimensions()
- .map_err(store_fault("tools/call:run.dimension.list"))?;
- tool_success(
- run_dimension_list_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:run.dimension.list",
- )
+ "artifact.list" => {
+ let args = deserialize::<ArtifactListArgs>(arguments)?;
+ let artifacts = lift!(self.store.list_artifacts(ListArtifactsQuery {
+ frontier: args.frontier,
+ kind: args.kind,
+ attached_to: args.attached_to,
+ limit: args.limit,
+ }));
+ artifact_list_output(&artifacts, &operation)?
}
- "metric.keys" => {
- let args = deserialize::<MetricKeysToolArgs>(arguments)?;
- let keys = self
- .store
- .list_metric_keys_filtered(MetricKeyQuery {
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:metric.keys"))?,
- source: args
- .source
- .as_deref()
- .map(parse_metric_source_name)
- .transpose()
- .map_err(store_fault("tools/call:metric.keys"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.dimensions.unwrap_or_default(),
- "tools/call:metric.keys",
- )?,
- })
- .map_err(store_fault("tools/call:metric.keys"))?;
- tool_success(
- metric_keys_output(keys.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.keys",
- )
+ "artifact.read" => {
+ let args = deserialize::<ArtifactSelectorArgs>(arguments)?;
+ artifact_detail_output(
+ &lift!(self.store.read_artifact(&args.artifact)),
+ &operation,
+ )?
}
- "metric.best" => {
- let args = deserialize::<MetricBestToolArgs>(arguments)?;
- let items = self
- .store
- .best_metrics(MetricBestQuery {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:metric.best"))?,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- source: args
- .source
- .as_deref()
- .map(parse_metric_source_name)
+ "artifact.update" => {
+ let args = deserialize::<ArtifactUpdateArgs>(arguments)?;
+ let artifact = lift!(
+ self.store.update_artifact(UpdateArtifactRequest {
+ artifact: args.artifact,
+ expected_revision: args.expected_revision,
+ kind: args.kind,
+ label: args
+ .label
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.dimensions.unwrap_or_default(),
- "tools/call:metric.best",
- )?,
- order: args
- .order
- .as_deref()
- .map(parse_metric_order_name)
+ .map_err(store_fault(&operation))?,
+ summary: nullable_text_patch_from_wire(args.summary, &operation)?,
+ locator: args
+ .locator
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- limit: args.limit.unwrap_or(10),
+ .map_err(store_fault(&operation))?,
+ media_type: nullable_text_patch_from_wire(args.media_type, &operation)?,
+ attachments: args.attachments,
})
- .map_err(store_fault("tools/call:metric.best"))?;
- tool_success(
- metric_best_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.best",
- )
+ );
+ artifact_record_output(&artifact, &operation)?
}
- "metric.migrate" => {
- let report = self
- .store
- .migrate_metric_plane()
- .map_err(store_fault("tools/call:metric.migrate"))?;
- tool_success(
- json_created_output(
- "normalized legacy metric plane",
- json!(report),
- "tools/call:metric.migrate",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.migrate",
- )
+ "artifact.history" => {
+ let args = deserialize::<ArtifactSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.artifact_history(&args.artifact)),
+ &operation,
+ )?
}
- "experiment.open" => {
- let args = deserialize::<ExperimentOpenToolArgs>(arguments)?;
- let item = self
- .store
- .open_experiment(OpenExperimentRequest {
- frontier_id: crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:experiment.open"))?,
- hypothesis_node_id: crate::parse_node_id(&args.hypothesis_node_id)
- .map_err(store_fault("tools/call:experiment.open"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:experiment.open"))?,
- summary: args
- .summary
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:experiment.open"))?,
- })
- .map_err(store_fault("tools/call:experiment.open"))?;
- tool_success(
- experiment_open_output(
- &item,
- "tools/call:experiment.open",
- "opened experiment",
- )?,
- presentation,
+ "metric.define" => {
+ let args = deserialize::<MetricDefineArgs>(arguments)?;
+ tool_output(
+ &lift!(
+ self.store.define_metric(DefineMetricRequest {
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ unit: args.unit,
+ objective: args.objective,
+ visibility: args.visibility.unwrap_or(MetricVisibility::Canonical),
+ description: args
+ .description
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ ),
FaultStage::Worker,
- "tools/call:experiment.open",
- )
+ &operation,
+ )?
}
- "experiment.list" => {
- let args = deserialize::<ExperimentListToolArgs>(arguments)?;
- let items = self
- .store
- .list_open_experiments(
- args.frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:experiment.list"))?,
- )
- .map_err(store_fault("tools/call:experiment.list"))?;
- tool_success(
- experiment_list_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:experiment.list",
- )
+ "metric.keys" => {
+ let args = deserialize::<MetricKeysArgs>(arguments)?;
+ metric_keys_output(
+ &lift!(self.store.metric_keys(MetricKeysQuery {
+ frontier: args.frontier,
+ scope: args.scope.unwrap_or(MetricScope::Live),
+ })),
+ &operation,
+ )?
}
- "experiment.read" => {
- let args = deserialize::<ExperimentReadToolArgs>(arguments)?;
- let item = self
- .store
- .read_open_experiment(
- crate::parse_experiment_id(&args.experiment_id)
- .map_err(store_fault("tools/call:experiment.read"))?,
- )
- .map_err(store_fault("tools/call:experiment.read"))?;
- tool_success(
- experiment_open_output(&item, "tools/call:experiment.read", "open experiment")?,
- presentation,
- FaultStage::Worker,
- "tools/call:experiment.read",
- )
+ "metric.best" => {
+ let args = deserialize::<MetricBestArgs>(arguments)?;
+ metric_best_output(
+ &lift!(self.store.metric_best(MetricBestQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ dimensions: dimension_map_from_wire(args.dimensions)?,
+ include_rejected: args.include_rejected.unwrap_or(false),
+ limit: args.limit,
+ order: args.order,
+ })),
+ &operation,
+ )?
}
- "experiment.close" => {
- let args = deserialize::<ExperimentCloseToolArgs>(arguments)?;
- let receipt = self
- .store
- .close_experiment(CloseExperimentRequest {
- experiment_id: crate::parse_experiment_id(&args.experiment_id)
- .map_err(store_fault("tools/call:experiment.close"))?,
- run_title: NonEmptyText::new(args.run.title)
- .map_err(store_fault("tools/call:experiment.close"))?,
- run_summary: args
- .run
- .summary
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:experiment.close"))?,
- backend: parse_backend_name(&args.run.backend)
- .map_err(store_fault("tools/call:experiment.close"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.run.dimensions,
- "tools/call:experiment.close",
- )?,
- command: command_recipe_from_wire(
- args.run.command,
- self.store.project_root(),
- )
- .map_err(store_fault("tools/call:experiment.close"))?,
- primary_metric: metric_value_from_wire(args.primary_metric)
- .map_err(store_fault("tools/call:experiment.close"))?,
- supporting_metrics: args
- .supporting_metrics
- .into_iter()
- .map(metric_value_from_wire)
- .collect::<Result<Vec<_>, _>>()
- .map_err(store_fault("tools/call:experiment.close"))?,
- note: FrontierNote {
- summary: NonEmptyText::new(args.note.summary)
- .map_err(store_fault("tools/call:experiment.close"))?,
- next_hypotheses: crate::to_text_vec(args.note.next_hypotheses)
- .map_err(store_fault("tools/call:experiment.close"))?,
- },
- verdict: parse_verdict_name(&args.verdict)
- .map_err(store_fault("tools/call:experiment.close"))?,
- analysis: args
- .analysis
- .map(experiment_analysis_from_wire)
- .transpose()
- .map_err(store_fault("tools/call:experiment.close"))?,
- decision_title: NonEmptyText::new(args.decision_title)
- .map_err(store_fault("tools/call:experiment.close"))?,
- decision_rationale: NonEmptyText::new(args.decision_rationale)
- .map_err(store_fault("tools/call:experiment.close"))?,
- })
- .map_err(store_fault("tools/call:experiment.close"))?;
- tool_success(
- experiment_close_output(&self.store, &receipt)?,
- presentation,
+ "run.dimension.define" => {
+ let args = deserialize::<DimensionDefineArgs>(arguments)?;
+ tool_output(
+ &lift!(
+ self.store.define_run_dimension(DefineRunDimensionRequest {
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ value_type: args.value_type,
+ description: args
+ .description
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ ),
FaultStage::Worker,
- "tools/call:experiment.close",
- )
+ &operation,
+ )?
}
- other => Err(FaultRecord::new(
- FaultKind::InvalidInput,
+ "run.dimension.list" => tool_output(
+ &lift!(self.store.list_run_dimensions()),
FaultStage::Worker,
- format!("tools/call:{other}"),
- format!("unknown tool `{other}`"),
- )),
- }
+ &operation,
+ )?,
+ other => {
+ return Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ &operation,
+ format!("unknown worker tool `{other}`"),
+ ));
+ }
+ };
+ tool_success(output, presentation, FaultStage::Worker, &operation)
}
- fn read_resource(&mut self, uri: &str) -> Result<Value, FaultRecord> {
- match uri {
- "fidget-spinner://project/config" => Ok(json!({
- "contents": [{
- "uri": uri,
- "mimeType": "application/json",
- "text": crate::to_pretty_json(self.store.config())
- .map_err(store_fault("resources/read:fidget-spinner://project/config"))?,
- }]
- })),
- "fidget-spinner://project/schema" => Ok(json!({
- "contents": [{
- "uri": uri,
- "mimeType": "application/json",
- "text": crate::to_pretty_json(self.store.schema())
- .map_err(store_fault("resources/read:fidget-spinner://project/schema"))?,
- }]
- })),
- _ => Err(FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Worker,
- format!("resources/read:{uri}"),
- format!("unknown resource `{uri}`"),
- )),
- }
+ fn read_resource(uri: &str) -> Result<Value, FaultRecord> {
+ Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ format!("resources/read:{uri}"),
+ format!("unknown worker resource `{uri}`"),
+ ))
}
fn maybe_inject_transient(operation: &str) -> Result<(), FaultRecord> {
@@ -877,6 +531,227 @@ impl WorkerService {
}
}
+#[derive(Debug, Deserialize)]
+struct TagAddArgs {
+ name: String,
+ description: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierCreateArgs {
+ label: String,
+ objective: String,
+ slug: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierSelectorArgs {
+ frontier: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierBriefUpdateArgs {
+ frontier: String,
+ expected_revision: Option<u64>,
+ situation: Option<NullableStringArg>,
+ roadmap: Option<Vec<FrontierRoadmapItemWire>>,
+ unknowns: Option<Vec<String>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierRoadmapItemWire {
+ rank: u32,
+ hypothesis: String,
+ summary: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisRecordArgs {
+ frontier: String,
+ title: String,
+ summary: String,
+ body: String,
+ slug: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisListArgs {
+ frontier: Option<String>,
+ tags: Option<Vec<String>>,
+ include_archived: Option<bool>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisSelectorArgs {
+ hypothesis: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisUpdateArgs {
+ hypothesis: String,
+ expected_revision: Option<u64>,
+ title: Option<String>,
+ summary: Option<String>,
+ body: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+ archived: Option<bool>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentOpenArgs {
+ hypothesis: String,
+ title: String,
+ summary: Option<String>,
+ slug: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentListArgs {
+ frontier: Option<String>,
+ hypothesis: Option<String>,
+ tags: Option<Vec<String>>,
+ include_archived: Option<bool>,
+ status: Option<ExperimentStatus>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentSelectorArgs {
+ experiment: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentUpdateArgs {
+ experiment: String,
+ expected_revision: Option<u64>,
+ title: Option<String>,
+ summary: Option<NullableStringArg>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+ archived: Option<bool>,
+ outcome: Option<ExperimentOutcomeWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentCloseArgs {
+ experiment: String,
+ expected_revision: Option<u64>,
+ backend: ExecutionBackend,
+ command: CommandRecipe,
+ dimensions: Option<Map<String, Value>>,
+ primary_metric: MetricValueWire,
+ supporting_metrics: Option<Vec<MetricValueWire>>,
+ verdict: FrontierVerdict,
+ rationale: String,
+ analysis: Option<ExperimentAnalysisWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentOutcomeWire {
+ backend: ExecutionBackend,
+ command: CommandRecipe,
+ dimensions: Option<Map<String, Value>>,
+ primary_metric: MetricValueWire,
+ supporting_metrics: Option<Vec<MetricValueWire>>,
+ verdict: FrontierVerdict,
+ rationale: String,
+ analysis: Option<ExperimentAnalysisWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentAnalysisWire {
+ summary: String,
+ body: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricValueWire {
+ key: String,
+ value: f64,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactRecordArgs {
+ kind: ArtifactKind,
+ label: String,
+ summary: Option<String>,
+ locator: String,
+ media_type: Option<String>,
+ slug: Option<String>,
+ attachments: Option<Vec<AttachmentSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactListArgs {
+ frontier: Option<String>,
+ kind: Option<ArtifactKind>,
+ attached_to: Option<AttachmentSelector>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactSelectorArgs {
+ artifact: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactUpdateArgs {
+ artifact: String,
+ expected_revision: Option<u64>,
+ kind: Option<ArtifactKind>,
+ label: Option<String>,
+ summary: Option<NullableStringArg>,
+ locator: Option<String>,
+ media_type: Option<NullableStringArg>,
+ attachments: Option<Vec<AttachmentSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(untagged)]
+enum NullableStringArg {
+ Set(String),
+ Clear(()),
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricDefineArgs {
+ key: String,
+ unit: MetricUnit,
+ objective: OptimizationObjective,
+ visibility: Option<MetricVisibility>,
+ description: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricKeysArgs {
+ frontier: Option<String>,
+ scope: Option<MetricScope>,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricBestArgs {
+ frontier: Option<String>,
+ hypothesis: Option<String>,
+ key: String,
+ dimensions: Option<Map<String, Value>>,
+ include_rejected: Option<bool>,
+ limit: Option<u32>,
+ order: Option<MetricRankOrder>,
+}
+
+#[derive(Debug, Deserialize)]
+struct DimensionDefineArgs {
+ key: String,
+ value_type: FieldValueType,
+ description: Option<String>,
+}
+
fn deserialize<T: for<'de> Deserialize<'de>>(value: Value) -> Result<T, FaultRecord> {
serde_json::from_value(value).map_err(|error| {
FaultRecord::new(
@@ -888,256 +763,302 @@ fn deserialize<T: for<'de> Deserialize<'de>>(value: Value) -> Result<T, FaultRec
})
}
-fn project_status_output(full: &Value, schema: &ProjectSchema) -> ToolOutput {
- let concise = json!({
- "display_name": full["display_name"],
- "project_root": full["project_root"],
- "state_root": full["state_root"],
- "schema": schema_label(schema),
- "git_repo_detected": full["git_repo_detected"],
- });
- let git = if full["git_repo_detected"].as_bool().unwrap_or(false) {
- "detected"
- } else {
- "not detected"
- };
- ToolOutput::from_values(
- concise,
- full.clone(),
- [
- format!("project {}", value_summary(&full["display_name"])),
- format!("root: {}", value_summary(&full["project_root"])),
- format!("state: {}", value_summary(&full["state_root"])),
- format!("schema: {}", schema_label(schema)),
- format!("git: {git}"),
- ]
- .join("\n"),
- None,
- )
+fn store_fault<E>(operation: &str) -> impl FnOnce(E) -> FaultRecord + '_
+where
+ E: Into<StoreError>,
+{
+ move |error| {
+ let error: StoreError = error.into();
+ let kind = match error {
+ StoreError::MissingProjectStore(_)
+ | StoreError::AmbiguousProjectStoreDiscovery { .. }
+ | StoreError::UnknownTag(_)
+ | StoreError::UnknownMetricDefinition(_)
+ | StoreError::UnknownRunDimension(_)
+ | StoreError::UnknownFrontierSelector(_)
+ | StoreError::UnknownHypothesisSelector(_)
+ | StoreError::UnknownExperimentSelector(_)
+ | StoreError::UnknownArtifactSelector(_)
+ | StoreError::RevisionMismatch { .. }
+ | StoreError::HypothesisBodyMustBeSingleParagraph
+ | StoreError::ExperimentHypothesisRequired
+ | StoreError::ExperimentAlreadyClosed(_)
+ | StoreError::ExperimentStillOpen(_)
+ | StoreError::CrossFrontierInfluence
+ | StoreError::SelfEdge
+ | StoreError::UnknownRoadmapHypothesis(_)
+ | StoreError::ManualExperimentRequiresCommand
+ | StoreError::MetricOrderRequired { .. }
+ | StoreError::UnknownDimensionFilter(_)
+ | StoreError::DuplicateTag(_)
+ | StoreError::DuplicateMetricDefinition(_)
+ | StoreError::DuplicateRunDimension(_)
+ | StoreError::InvalidInput(_) => FaultKind::InvalidInput,
+ StoreError::IncompatibleStoreFormatVersion { .. } => FaultKind::Unavailable,
+ StoreError::Io(_)
+ | StoreError::Sql(_)
+ | StoreError::Json(_)
+ | StoreError::TimeParse(_)
+ | StoreError::TimeFormat(_)
+ | StoreError::Core(_)
+ | StoreError::Uuid(_) => FaultKind::Internal,
+ };
+ FaultRecord::new(kind, FaultStage::Store, operation, error.to_string())
+ }
}
-fn project_schema_output(schema: &ProjectSchema) -> Result<ToolOutput, FaultRecord> {
- let field_previews = schema
- .fields
- .iter()
- .take(8)
- .map(project_schema_field_value)
- .collect::<Vec<_>>();
- let concise = json!({
- "namespace": schema.namespace,
- "version": schema.version,
- "field_count": schema.fields.len(),
- "fields": field_previews,
- "truncated": schema.fields.len() > 8,
- });
- let mut lines = vec![
- format!("schema {}", schema_label(schema)),
- format!("{} field(s)", schema.fields.len()),
- ];
- for field in schema.fields.iter().take(8) {
- lines.push(format!(
- "{} [{}] {} {}",
- field.name,
- if field.node_classes.is_empty() {
- "any".to_owned()
- } else {
- field
- .node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(",")
- },
- field.presence.as_str(),
- field.role.as_str(),
- ));
- }
- if schema.fields.len() > 8 {
- lines.push(format!("... +{} more field(s)", schema.fields.len() - 8));
+fn with_fault<T, E>(result: Result<T, E>, operation: &str) -> Result<T, FaultRecord>
+where
+ E: Into<StoreError>,
+{
+ result.map_err(store_fault(operation))
+}
+
+fn tags_to_set(tags: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
+ tags.into_iter()
+ .map(TagName::new)
+ .collect::<Result<BTreeSet<_>, _>>()
+ .map_err(StoreError::from)
+}
+
+fn metric_value_from_wire(
+ wire: MetricValueWire,
+ operation: &str,
+) -> Result<fidget_spinner_core::MetricValue, FaultRecord> {
+ Ok(fidget_spinner_core::MetricValue {
+ key: NonEmptyText::new(wire.key).map_err(store_fault(operation))?,
+ value: wire.value,
+ })
+}
+
+fn experiment_analysis_from_wire(
+ wire: ExperimentAnalysisWire,
+ operation: &str,
+) -> Result<ExperimentAnalysis, FaultRecord> {
+ Ok(ExperimentAnalysis {
+ summary: NonEmptyText::new(wire.summary).map_err(store_fault(operation))?,
+ body: NonEmptyText::new(wire.body).map_err(store_fault(operation))?,
+ })
+}
+
+fn experiment_outcome_patch_from_wire(
+ wire: ExperimentOutcomeWire,
+ operation: &str,
+) -> Result<ExperimentOutcomePatch, FaultRecord> {
+ Ok(ExperimentOutcomePatch {
+ backend: wire.backend,
+ command: wire.command,
+ dimensions: dimension_map_from_wire(wire.dimensions)?,
+ primary_metric: metric_value_from_wire(wire.primary_metric, operation)?,
+ supporting_metrics: wire
+ .supporting_metrics
+ .unwrap_or_default()
+ .into_iter()
+ .map(|metric| metric_value_from_wire(metric, operation))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: wire.verdict,
+ rationale: NonEmptyText::new(wire.rationale).map_err(store_fault(operation))?,
+ analysis: wire
+ .analysis
+ .map(|analysis| experiment_analysis_from_wire(analysis, operation))
+ .transpose()?,
+ })
+}
+
+fn nullable_text_patch_from_wire(
+ patch: Option<NullableStringArg>,
+ operation: &str,
+) -> Result<Option<TextPatch<NonEmptyText>>, FaultRecord> {
+ match patch {
+ None => Ok(None),
+ Some(NullableStringArg::Clear(())) => Ok(Some(TextPatch::Clear)),
+ Some(NullableStringArg::Set(value)) => Ok(Some(TextPatch::Set(
+ NonEmptyText::new(value).map_err(store_fault(operation))?,
+ ))),
}
- detailed_tool_output(
- &concise,
- schema,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:project.schema",
- )
}
-fn schema_field_upsert_output(
- schema: &ProjectSchema,
- field: &ProjectFieldSpec,
-) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "schema": schema.schema_ref(),
- "field": project_schema_field_value(field),
- });
- detailed_tool_output(
- &concise,
- &concise,
- format!(
- "upserted schema field {}\nschema: {}\nclasses: {}\npresence: {}\nseverity: {}\nrole: {}\ninference: {}{}",
- field.name,
- schema_label(schema),
- render_schema_node_classes(&field.node_classes),
- field.presence.as_str(),
- field.severity.as_str(),
- field.role.as_str(),
- field.inference_policy.as_str(),
- field
- .value_type
- .map(|value_type| format!("\nvalue_type: {}", value_type.as_str()))
- .unwrap_or_default(),
- ),
- None,
- FaultStage::Worker,
- "tools/call:schema.field.upsert",
- )
+fn dimension_map_from_wire(
+ dimensions: Option<Map<String, Value>>,
+) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, FaultRecord> {
+ dimensions
+ .unwrap_or_default()
+ .into_iter()
+ .map(|(key, value)| {
+ Ok((
+ NonEmptyText::new(key).map_err(store_fault("dimension-map"))?,
+ json_value_to_dimension(value)?,
+ ))
+ })
+ .collect()
+}
+
+fn json_value_to_dimension(value: Value) -> Result<RunDimensionValue, FaultRecord> {
+ match value {
+ Value::String(raw) => {
+ if time::OffsetDateTime::parse(&raw, &time::format_description::well_known::Rfc3339)
+ .is_ok()
+ {
+ NonEmptyText::new(raw)
+ .map(RunDimensionValue::Timestamp)
+ .map_err(store_fault("dimension-map"))
+ } else {
+ NonEmptyText::new(raw)
+ .map(RunDimensionValue::String)
+ .map_err(store_fault("dimension-map"))
+ }
+ }
+ Value::Number(number) => number
+ .as_f64()
+ .map(RunDimensionValue::Numeric)
+ .ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "dimension-map",
+ "numeric dimension values must fit into f64",
+ )
+ }),
+ Value::Bool(value) => Ok(RunDimensionValue::Boolean(value)),
+ _ => Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "dimension-map",
+ "dimension values must be string, number, boolean, or RFC3339 timestamp",
+ )),
+ }
}
-fn schema_field_remove_output(
- schema: &ProjectSchema,
- removed_count: u64,
+fn project_status_output(
+ status: &ProjectStatus,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
let concise = json!({
- "schema": schema.schema_ref(),
- "removed_count": removed_count,
+ "display_name": status.display_name,
+ "project_root": status.project_root,
+ "frontier_count": status.frontier_count,
+ "hypothesis_count": status.hypothesis_count,
+ "experiment_count": status.experiment_count,
+ "open_experiment_count": status.open_experiment_count,
+ "artifact_count": status.artifact_count,
});
detailed_tool_output(
&concise,
- &concise,
- format!(
- "removed {} schema field definition(s)\nschema: {}",
- removed_count,
- schema_label(schema),
- ),
+ status,
+ [
+ format!("project {}", status.display_name),
+ format!("root: {}", status.project_root),
+ format!("frontiers: {}", status.frontier_count),
+ format!("hypotheses: {}", status.hypothesis_count),
+ format!(
+ "experiments: {} (open {})",
+ status.experiment_count, status.open_experiment_count
+ ),
+ format!("artifacts: {}", status.artifact_count),
+ ]
+ .join("\n"),
None,
FaultStage::Worker,
- "tools/call:schema.field.remove",
+ operation,
)
}
-fn tag_add_output(tag: &TagRecord) -> Result<ToolOutput, FaultRecord> {
+fn tag_list_output(
+ tags: &[fidget_spinner_core::TagRecord],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
let concise = json!({
- "name": tag.name,
- "description": tag.description,
+ "count": tags.len(),
+ "tags": tags,
});
detailed_tool_output(
&concise,
- tag,
- format!("registered tag {}\n{}", tag.name, tag.description),
- None,
- FaultStage::Worker,
- "tools/call:tag.add",
- )
-}
-
-fn tag_list_output(tags: &[TagRecord]) -> Result<ToolOutput, FaultRecord> {
- let concise = tags
- .iter()
- .map(|tag| {
- json!({
- "name": tag.name,
- "description": tag.description,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} tag(s)", tags.len())];
- lines.extend(
- tags.iter()
- .map(|tag| format!("{}: {}", tag.name, tag.description)),
- );
- detailed_tool_output(
- &concise,
- &tags,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:tag.list",
- )
-}
-
-fn frontier_list_output(frontiers: &[FrontierRecord]) -> Result<ToolOutput, FaultRecord> {
- let concise = frontiers
- .iter()
- .map(|frontier| {
- json!({
- "frontier_id": frontier.id,
- "label": frontier.label,
- "status": format!("{:?}", frontier.status).to_ascii_lowercase(),
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} frontier(s)", frontiers.len())];
- lines.extend(frontiers.iter().map(|frontier| {
- format!(
- "{} {} {}",
- frontier.id,
- format!("{:?}", frontier.status).to_ascii_lowercase(),
- frontier.label,
- )
- }));
- detailed_tool_output(
&concise,
- &frontiers,
- lines.join("\n"),
+ if tags.is_empty() {
+ "no tags".to_owned()
+ } else {
+ tags.iter()
+ .map(|tag| format!("{} — {}", tag.name, tag.description))
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:frontier.list",
+ operation,
)
}
-fn frontier_status_output(projection: &FrontierProjection) -> Result<ToolOutput, FaultRecord> {
- let concise = frontier_projection_summary_value(projection);
+fn frontier_list_output(
+ frontiers: &[FrontierSummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": frontiers.len(), "frontiers": frontiers });
detailed_tool_output(
&concise,
- projection,
- frontier_projection_text("frontier", projection),
- None,
- FaultStage::Worker,
- "tools/call:frontier.status",
- )
-}
-
-fn frontier_created_output(projection: &FrontierProjection) -> Result<ToolOutput, FaultRecord> {
- let concise = frontier_projection_summary_value(projection);
- detailed_tool_output(
&concise,
- projection,
- frontier_projection_text("created frontier", projection),
+ if frontiers.is_empty() {
+ "no frontiers".to_owned()
+ } else {
+ frontiers
+ .iter()
+ .map(|frontier| {
+ format!(
+ "{} — {} | active hypotheses {} | open experiments {}",
+ frontier.slug,
+ frontier.objective,
+ frontier.active_hypothesis_count,
+ frontier.open_experiment_count
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:frontier.init",
+ operation,
)
}
-fn created_node_output(
- action: &str,
- node: &fidget_spinner_core::DagNode,
- operation: &'static str,
+fn frontier_record_output(
+ frontier: &fidget_spinner_core::FrontierRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = node_brief_value(node);
- let mut lines = vec![format!("{action}: {} {}", node.class, node.id)];
- lines.push(format!("title: {}", node.title));
- if let Some(summary) = node.summary.as_ref() {
- lines.push(format!("summary: {summary}"));
- }
- if !node.tags.is_empty() {
- lines.push(format!("tags: {}", format_tags(&node.tags)));
- }
- if let Some(frontier_id) = node.frontier_id {
- lines.push(format!("frontier: {frontier_id}"));
+ let mut lines = vec![format!(
+ "frontier {} — {}",
+ frontier.slug, frontier.objective
+ )];
+ lines.push(format!("status: {}", frontier.status.as_str()));
+ if let Some(situation) = frontier.brief.situation.as_ref() {
+ lines.push(format!("situation: {}", situation));
+ }
+ if !frontier.brief.roadmap.is_empty() {
+ lines.push("roadmap:".to_owned());
+ for item in &frontier.brief.roadmap {
+ lines.push(format!(
+ " {}. {}{}",
+ item.rank,
+ item.hypothesis_id,
+ item.summary
+ .as_ref()
+ .map_or_else(String::new, |summary| format!(" — {summary}"))
+ ));
+ }
}
- if !node.diagnostics.items.is_empty() {
+ if !frontier.brief.unknowns.is_empty() {
lines.push(format!(
- "diagnostics: {}",
- diagnostic_summary_text(&node.diagnostics)
+ "unknowns: {}",
+ frontier
+ .brief
+ .unknowns
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join("; ")
));
}
detailed_tool_output(
- &concise,
- node,
+ &frontier,
+ frontier,
lines.join("\n"),
None,
FaultStage::Worker,
@@ -1145,434 +1066,285 @@ fn created_node_output(
)
}
-fn node_list_output(nodes: &[NodeSummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = nodes.iter().map(node_summary_value).collect::<Vec<_>>();
- let mut lines = vec![format!("{} node(s)", nodes.len())];
- lines.extend(nodes.iter().map(render_node_summary_line));
- detailed_tool_output(
- &concise,
- &nodes,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:node.list",
- )
-}
-
-fn node_read_output(node: &fidget_spinner_core::DagNode) -> Result<ToolOutput, FaultRecord> {
- let visible_annotations = node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::Visible)
- .map(|annotation| {
- let mut value = Map::new();
- if let Some(label) = annotation.label.as_ref() {
- let _ = value.insert("label".to_owned(), json!(label));
- }
- let _ = value.insert("body".to_owned(), json!(annotation.body));
- Value::Object(value)
- })
- .collect::<Vec<_>>();
- let visible_annotation_count = visible_annotations.len();
- let hidden_annotation_count = node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::HiddenByDefault)
- .count();
- let mut concise = Map::new();
- let _ = concise.insert("id".to_owned(), json!(node.id));
- let _ = concise.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = concise.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = concise.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = concise.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = concise.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if !node.payload.fields.is_empty() {
- let filtered_fields =
- filtered_payload_fields(node.class, &node.payload.fields).collect::<Vec<_>>();
- if !filtered_fields.is_empty() {
- let _ = concise.insert(
- "payload_field_count".to_owned(),
- json!(filtered_fields.len()),
- );
- if is_prose_node(node.class) {
- let _ = concise.insert(
- "payload_fields".to_owned(),
- json!(
- filtered_fields
- .iter()
- .take(6)
- .map(|(name, _)| (*name).clone())
- .collect::<Vec<_>>()
- ),
- );
- } else {
- let payload_preview = payload_preview_value(node.class, &node.payload.fields);
- if let Value::Object(object) = &payload_preview
- && !object.is_empty()
- {
- let _ = concise.insert("payload_preview".to_owned(), payload_preview);
- }
- }
- }
- }
- if !node.diagnostics.items.is_empty() {
- let _ = concise.insert(
- "diagnostics".to_owned(),
- diagnostic_summary_value(&node.diagnostics),
- );
- }
- if visible_annotation_count > 0 {
- let _ = concise.insert(
- "visible_annotations".to_owned(),
- Value::Array(visible_annotations),
- );
- }
- if hidden_annotation_count > 0 {
- let _ = concise.insert(
- "hidden_annotation_count".to_owned(),
- json!(hidden_annotation_count),
- );
- }
-
- let mut lines = vec![format!("{} {} {}", node.class, node.id, node.title)];
- if let Some(summary) = node.summary.as_ref() {
- lines.push(format!("summary: {summary}"));
- }
- if let Some(frontier_id) = node.frontier_id {
- lines.push(format!("frontier: {frontier_id}"));
- }
- if !node.tags.is_empty() {
- lines.push(format!("tags: {}", format_tags(&node.tags)));
+fn frontier_open_output(
+ projection: &FrontierOpenProjection,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![format!(
+ "frontier {} — {}",
+ projection.frontier.slug, projection.frontier.objective
+ )];
+ if let Some(situation) = projection.frontier.brief.situation.as_ref() {
+ lines.push(format!("situation: {}", situation));
+ }
+ if !projection.active_tags.is_empty() {
+ lines.push(format!(
+ "active tags: {}",
+ projection
+ .active_tags
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(", ")
+ ));
}
- lines.extend(payload_preview_lines(node.class, &node.payload.fields));
- if !node.diagnostics.items.is_empty() {
+ if !projection.active_metric_keys.is_empty() {
lines.push(format!(
- "diagnostics: {}",
- diagnostic_summary_text(&node.diagnostics)
+ "live metrics: {}",
+ projection
+ .active_metric_keys
+ .iter()
+ .map(|metric| metric.key.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
));
}
- if visible_annotation_count > 0 {
- lines.push(format!("visible annotations: {}", visible_annotation_count));
- for annotation in node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::Visible)
- .take(4)
- {
- let label = annotation
- .label
+ if !projection.active_hypotheses.is_empty() {
+ lines.push("active hypotheses:".to_owned());
+ for state in &projection.active_hypotheses {
+ let status = state
+ .latest_closed_experiment
.as_ref()
- .map(|label| format!("{label}: "))
- .unwrap_or_default();
- lines.push(format!("annotation: {label}{}", annotation.body));
- }
- if visible_annotation_count > 4 {
+ .and_then(|experiment| experiment.verdict)
+ .map_or_else(
+ || "unjudged".to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
lines.push(format!(
- "... +{} more visible annotation(s)",
- visible_annotation_count - 4
+ " {} — {} | open {} | latest {}",
+ state.hypothesis.slug,
+ state.hypothesis.summary,
+ state.open_experiments.len(),
+ status
));
}
}
- if hidden_annotation_count > 0 {
- lines.push(format!("hidden annotations: {hidden_annotation_count}"));
+ if !projection.open_experiments.is_empty() {
+ lines.push("open experiments:".to_owned());
+ for experiment in &projection.open_experiments {
+ lines.push(format!(
+ " {} — {}",
+ experiment.slug,
+ experiment
+ .summary
+ .as_ref()
+ .map_or_else(|| experiment.title.to_string(), ToString::to_string)
+ ));
+ }
}
detailed_tool_output(
- &Value::Object(concise),
- node,
+ projection,
+ projection,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:node.read",
+ operation,
)
}
-fn experiment_close_output(
- store: &ProjectStore,
- receipt: &ExperimentReceipt,
+fn hypothesis_record_output(
+ hypothesis: &fidget_spinner_core::HypothesisRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "experiment_id": receipt.experiment.id,
- "frontier_id": receipt.experiment.frontier_id,
- "experiment_title": receipt.experiment.title,
- "verdict": metric_verdict_name(receipt.experiment.verdict),
- "run_id": receipt.run.run_id,
- "hypothesis_node_id": receipt.experiment.hypothesis_node_id,
- "decision_node_id": receipt.decision_node.id,
- "dimensions": run_dimensions_value(&receipt.experiment.result.dimensions),
- "primary_metric": metric_value(store, &receipt.experiment.result.primary_metric)?,
- });
detailed_tool_output(
- &concise,
- receipt,
- [
- format!(
- "closed experiment {} on frontier {}",
- receipt.experiment.id, receipt.experiment.frontier_id
- ),
- format!("title: {}", receipt.experiment.title),
- format!("hypothesis: {}", receipt.experiment.hypothesis_node_id),
- format!(
- "verdict: {}",
- metric_verdict_name(receipt.experiment.verdict)
- ),
- format!(
- "primary metric: {}",
- metric_text(store, &receipt.experiment.result.primary_metric)?
- ),
- format!(
- "dimensions: {}",
- render_dimension_kv(&receipt.experiment.result.dimensions)
- ),
- format!("run: {}", receipt.run.run_id),
- ]
- .join("\n"),
+ hypothesis,
+ hypothesis,
+ format!("hypothesis {} — {}", hypothesis.slug, hypothesis.summary),
None,
FaultStage::Worker,
- "tools/call:experiment.close",
+ operation,
)
}
-fn experiment_open_output(
- item: &OpenExperimentSummary,
- operation: &'static str,
- action: &'static str,
+fn hypothesis_list_output(
+ hypotheses: &[fidget_spinner_store_sqlite::HypothesisSummary],
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "experiment_id": item.id,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "title": item.title,
- "summary": item.summary,
- });
+ let concise = json!({ "count": hypotheses.len(), "hypotheses": hypotheses });
detailed_tool_output(
&concise,
- item,
- [
- format!("{action} {}", item.id),
- format!("frontier: {}", item.frontier_id),
- format!("hypothesis: {}", item.hypothesis_node_id),
- format!("title: {}", item.title),
- item.summary
- .as_ref()
- .map(|summary| format!("summary: {summary}"))
- .unwrap_or_else(|| "summary: <none>".to_owned()),
- ]
- .join("\n"),
+ &concise,
+ if hypotheses.is_empty() {
+ "no hypotheses".to_owned()
+ } else {
+ hypotheses
+ .iter()
+ .map(|hypothesis| {
+ let verdict = hypothesis.latest_verdict.map_or_else(
+ || "unjudged".to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
+ format!(
+ "{} — {} | open {} | latest {}",
+ hypothesis.slug,
+ hypothesis.summary,
+ hypothesis.open_experiment_count,
+ verdict
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
operation,
)
}
-fn experiment_list_output(items: &[OpenExperimentSummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .map(|item| {
- json!({
- "experiment_id": item.id,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "title": item.title,
- "summary": item.summary,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} open experiment(s)", items.len())];
- lines.extend(items.iter().map(|item| {
+fn hypothesis_detail_output(
+ detail: &fidget_spinner_store_sqlite::HypothesisDetail,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![
format!(
- "{} {} | hypothesis={}",
- item.id, item.title, item.hypothesis_node_id,
- )
- }));
+ "hypothesis {} — {}",
+ detail.record.slug, detail.record.summary
+ ),
+ detail.record.body.to_string(),
+ ];
+ if !detail.record.tags.is_empty() {
+ lines.push(format!(
+ "tags: {}",
+ detail
+ .record
+ .tags
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(", ")
+ ));
+ }
+ lines.push(format!(
+ "parents: {} | children: {} | open experiments: {} | closed experiments: {} | artifacts: {}",
+ detail.parents.len(),
+ detail.children.len(),
+ detail.open_experiments.len(),
+ detail.closed_experiments.len(),
+ detail.artifacts.len()
+ ));
detailed_tool_output(
- &concise,
- &items,
+ detail,
+ detail,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:experiment.list",
+ operation,
)
}
-fn metric_keys_output(keys: &[MetricKeySummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = keys
- .iter()
- .map(|key| {
- json!({
- "key": key.key,
- "source": key.source.as_str(),
- "experiment_count": key.experiment_count,
- "unit": key.unit.map(metric_unit_name),
- "objective": key.objective.map(metric_objective_name),
- "description": key.description,
- "requires_order": key.requires_order,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} metric key(s)", keys.len())];
- lines.extend(keys.iter().map(|key| {
- let mut line = format!(
- "{} [{}] experiments={}",
- key.key,
- key.source.as_str(),
- key.experiment_count
+fn experiment_record_output(
+ experiment: &fidget_spinner_core::ExperimentRecord,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut line = format!("experiment {} — {}", experiment.slug, experiment.title);
+ if let Some(outcome) = experiment.outcome.as_ref() {
+ let _ = write!(
+ line,
+ " | {} {}={}",
+ outcome.verdict.as_str(),
+ outcome.primary_metric.key,
+ outcome.primary_metric.value
);
- if let Some(unit) = key.unit {
- line.push_str(format!(" unit={}", metric_unit_name(unit)).as_str());
- }
- if let Some(objective) = key.objective {
- line.push_str(format!(" objective={}", metric_objective_name(objective)).as_str());
- }
- if let Some(description) = key.description.as_ref() {
- line.push_str(format!(" | {description}").as_str());
- }
- if key.requires_order {
- line.push_str(" order=required");
- }
- line
- }));
+ } else {
+ let _ = write!(line, " | open");
+ }
detailed_tool_output(
- &concise,
- &keys,
- lines.join("\n"),
+ experiment,
+ experiment,
+ line,
None,
FaultStage::Worker,
- "tools/call:metric.keys",
+ operation,
)
}
-fn metric_best_output(
- items: &[fidget_spinner_store_sqlite::MetricBestEntry],
+fn experiment_list_output(
+ experiments: &[fidget_spinner_store_sqlite::ExperimentSummary],
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .enumerate()
- .map(|(index, item)| {
- json!({
- "rank": index + 1,
- "key": item.key,
- "source": item.source.as_str(),
- "value": item.value,
- "order": item.order.as_str(),
- "experiment_id": item.experiment_id,
- "experiment_title": item.experiment_title,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "hypothesis_title": item.hypothesis_title,
- "verdict": metric_verdict_name(item.verdict),
- "run_id": item.run_id,
- "unit": item.unit.map(metric_unit_name),
- "objective": item.objective.map(metric_objective_name),
- "dimensions": run_dimensions_value(&item.dimensions),
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} ranked experiment(s)", items.len())];
- lines.extend(items.iter().enumerate().map(|(index, item)| {
- format!(
- "{}. {}={} [{}] {} | verdict={} | hypothesis={}",
- index + 1,
- item.key,
- item.value,
- item.source.as_str(),
- item.experiment_title,
- metric_verdict_name(item.verdict),
- item.hypothesis_title,
- )
- }));
- lines.extend(
- items
- .iter()
- .map(|item| format!(" dims: {}", render_dimension_kv(&item.dimensions))),
- );
+ let concise = json!({ "count": experiments.len(), "experiments": experiments });
detailed_tool_output(
&concise,
- &items,
- lines.join("\n"),
+ &concise,
+ if experiments.is_empty() {
+ "no experiments".to_owned()
+ } else {
+ experiments
+ .iter()
+ .map(|experiment| {
+ let status = experiment.verdict.map_or_else(
+ || experiment.status.as_str().to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
+ let metric = experiment
+ .primary_metric
+ .as_ref()
+ .map_or_else(String::new, |metric| {
+ format!(" | {}={}", metric.key, metric.value)
+ });
+ format!(
+ "{} — {} | {}{}",
+ experiment.slug, experiment.title, status, metric
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:metric.best",
+ operation,
)
}
-fn run_dimension_list_output(
- items: &[fidget_spinner_store_sqlite::RunDimensionSummary],
+fn experiment_detail_output(
+ detail: &fidget_spinner_store_sqlite::ExperimentDetail,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .map(|item| {
- json!({
- "key": item.key,
- "value_type": item.value_type.as_str(),
- "description": item.description,
- "observed_run_count": item.observed_run_count,
- "distinct_value_count": item.distinct_value_count,
- "sample_values": item.sample_values,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} run dimension(s)", items.len())];
- lines.extend(items.iter().map(|item| {
- let mut line = format!(
- "{} [{}] runs={} distinct={}",
- item.key,
- item.value_type.as_str(),
- item.observed_run_count,
- item.distinct_value_count
- );
- if let Some(description) = item.description.as_ref() {
- line.push_str(format!(" | {description}").as_str());
- }
- if !item.sample_values.is_empty() {
- line.push_str(
- format!(
- " | samples={}",
- item.sample_values
- .iter()
- .map(value_summary)
- .collect::<Vec<_>>()
- .join(", ")
- )
- .as_str(),
- );
- }
- line
- }));
+ let mut lines = vec![format!(
+ "experiment {} — {}",
+ detail.record.slug, detail.record.title
+ )];
+ lines.push(format!("hypothesis: {}", detail.owning_hypothesis.slug));
+ lines.push(format!(
+ "status: {}",
+ detail.record.outcome.as_ref().map_or_else(
+ || "open".to_owned(),
+ |outcome| outcome.verdict.as_str().to_owned()
+ )
+ ));
+ if let Some(outcome) = detail.record.outcome.as_ref() {
+ lines.push(format!(
+ "primary metric: {}={}",
+ outcome.primary_metric.key, outcome.primary_metric.value
+ ));
+ lines.push(format!("rationale: {}", outcome.rationale));
+ }
+ lines.push(format!(
+ "parents: {} | children: {} | artifacts: {}",
+ detail.parents.len(),
+ detail.children.len(),
+ detail.artifacts.len()
+ ));
detailed_tool_output(
- &concise,
- &items,
+ detail,
+ detail,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:run.dimension.list",
+ operation,
)
}
-fn json_created_output(
- headline: &str,
- structured: Value,
- operation: &'static str,
+fn artifact_record_output(
+ artifact: &fidget_spinner_core::ArtifactRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
detailed_tool_output(
- &structured,
- &structured,
+ artifact,
+ artifact,
format!(
- "{headline}\n{}",
- crate::to_pretty_json(&structured).map_err(store_fault(operation))?
+ "artifact {} — {} -> {}",
+ artifact.slug, artifact.label, artifact.locator
),
None,
FaultStage::Worker,
@@ -1580,938 +1352,146 @@ fn json_created_output(
)
}
-fn project_schema_field_value(field: &ProjectFieldSpec) -> Value {
- let mut value = Map::new();
- let _ = value.insert("name".to_owned(), json!(field.name));
- if !field.node_classes.is_empty() {
- let _ = value.insert(
- "node_classes".to_owned(),
- json!(
- field
- .node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- let _ = value.insert("presence".to_owned(), json!(field.presence.as_str()));
- let _ = value.insert("severity".to_owned(), json!(field.severity.as_str()));
- let _ = value.insert("role".to_owned(), json!(field.role.as_str()));
- let _ = value.insert(
- "inference_policy".to_owned(),
- json!(field.inference_policy.as_str()),
- );
- if let Some(value_type) = field.value_type {
- let _ = value.insert("value_type".to_owned(), json!(value_type.as_str()));
- }
- Value::Object(value)
-}
-
-fn render_schema_node_classes(node_classes: &BTreeSet<NodeClass>) -> String {
- if node_classes.is_empty() {
- return "any".to_owned();
- }
- node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn frontier_projection_summary_value(projection: &FrontierProjection) -> Value {
- json!({
- "frontier_id": projection.frontier.id,
- "label": projection.frontier.label,
- "status": format!("{:?}", projection.frontier.status).to_ascii_lowercase(),
- "open_experiment_count": projection.open_experiment_count,
- "completed_experiment_count": projection.completed_experiment_count,
- "verdict_counts": projection.verdict_counts,
- })
-}
-
-fn frontier_projection_text(prefix: &str, projection: &FrontierProjection) -> String {
- [
- format!(
- "{prefix} {} {}",
- projection.frontier.id, projection.frontier.label
- ),
- format!(
- "status: {}",
- format!("{:?}", projection.frontier.status).to_ascii_lowercase()
- ),
- format!("open experiments: {}", projection.open_experiment_count),
- format!(
- "completed experiments: {}",
- projection.completed_experiment_count
- ),
- format!(
- "verdicts: accepted={} kept={} parked={} rejected={}",
- projection.verdict_counts.accepted,
- projection.verdict_counts.kept,
- projection.verdict_counts.parked,
- projection.verdict_counts.rejected,
- ),
- ]
- .join("\n")
-}
-
-fn node_summary_value(node: &NodeSummary) -> Value {
- let mut value = Map::new();
- let _ = value.insert("id".to_owned(), json!(node.id));
- let _ = value.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = value.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = value.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = value.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = value.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if node.archived {
- let _ = value.insert("archived".to_owned(), json!(true));
- }
- if node.diagnostic_count > 0 {
- let _ = value.insert("diagnostic_count".to_owned(), json!(node.diagnostic_count));
- }
- if node.hidden_annotation_count > 0 {
- let _ = value.insert(
- "hidden_annotation_count".to_owned(),
- json!(node.hidden_annotation_count),
- );
- }
- Value::Object(value)
-}
-
-fn node_brief_value(node: &fidget_spinner_core::DagNode) -> Value {
- let mut value = Map::new();
- let _ = value.insert("id".to_owned(), json!(node.id));
- let _ = value.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = value.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = value.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = value.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = value.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if !node.diagnostics.items.is_empty() {
- let _ = value.insert(
- "diagnostics".to_owned(),
- diagnostic_summary_value(&node.diagnostics),
- );
- }
- Value::Object(value)
-}
-
-fn render_node_summary_line(node: &NodeSummary) -> String {
- let mut line = format!("{} {} {}", node.class, node.id, node.title);
- if let Some(summary) = node.summary.as_ref() {
- line.push_str(format!(" | {summary}").as_str());
- }
- if let Some(frontier_id) = node.frontier_id {
- line.push_str(format!(" | frontier={frontier_id}").as_str());
- }
- if !node.tags.is_empty() {
- line.push_str(format!(" | tags={}", format_tags(&node.tags)).as_str());
- }
- if node.diagnostic_count > 0 {
- line.push_str(format!(" | diag={}", node.diagnostic_count).as_str());
- }
- if node.hidden_annotation_count > 0 {
- line.push_str(format!(" | hidden-ann={}", node.hidden_annotation_count).as_str());
- }
- if node.archived {
- line.push_str(" | archived");
- }
- line
-}
-
-fn diagnostic_summary_value(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> Value {
- let tally = diagnostic_tally(diagnostics);
- json!({
- "admission": match diagnostics.admission {
- AdmissionState::Admitted => "admitted",
- AdmissionState::Rejected => "rejected",
- },
- "count": tally.total,
- "error_count": tally.errors,
- "warning_count": tally.warnings,
- "info_count": tally.infos,
- })
-}
-
-fn diagnostic_summary_text(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> String {
- let tally = diagnostic_tally(diagnostics);
- let mut parts = vec![format!("{}", tally.total)];
- if tally.errors > 0 {
- parts.push(format!("{} error", tally.errors));
- }
- if tally.warnings > 0 {
- parts.push(format!("{} warning", tally.warnings));
- }
- if tally.infos > 0 {
- parts.push(format!("{} info", tally.infos));
- }
- format!(
- "{} ({})",
- match diagnostics.admission {
- AdmissionState::Admitted => "admitted",
- AdmissionState::Rejected => "rejected",
+fn artifact_list_output(
+ artifacts: &[fidget_spinner_store_sqlite::ArtifactSummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": artifacts.len(), "artifacts": artifacts });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if artifacts.is_empty() {
+ "no artifacts".to_owned()
+ } else {
+ artifacts
+ .iter()
+ .map(|artifact| {
+ format!(
+ "{} — {} -> {}",
+ artifact.slug, artifact.label, artifact.locator
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
},
- parts.join(", ")
+ None,
+ FaultStage::Worker,
+ operation,
)
}
-fn diagnostic_tally(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> DiagnosticTally {
- diagnostics
- .items
- .iter()
- .fold(DiagnosticTally::default(), |mut tally, item| {
- tally.total += 1;
- match item.severity {
- DiagnosticSeverity::Error => tally.errors += 1,
- DiagnosticSeverity::Warning => tally.warnings += 1,
- DiagnosticSeverity::Info => tally.infos += 1,
- }
- tally
- })
-}
-
-fn payload_preview_value(class: NodeClass, fields: &Map<String, Value>) -> Value {
- let mut preview = Map::new();
- for (index, (name, value)) in filtered_payload_fields(class, fields).enumerate() {
- if index == 6 {
- let _ = preview.insert(
- "...".to_owned(),
- json!(format!("+{} more field(s)", fields.len() - index)),
- );
- break;
- }
- let _ = preview.insert(name.clone(), payload_value_preview(value));
- }
- Value::Object(preview)
-}
-
-fn payload_preview_lines(class: NodeClass, fields: &Map<String, Value>) -> Vec<String> {
- let filtered = filtered_payload_fields(class, fields).collect::<Vec<_>>();
- if filtered.is_empty() {
- return Vec::new();
- }
- if is_prose_node(class) {
- let preview_names = filtered
- .iter()
- .take(6)
- .map(|(name, _)| (*name).clone())
- .collect::<Vec<_>>();
- let mut lines = vec![format!("payload fields: {}", preview_names.join(", "))];
- if filtered.len() > preview_names.len() {
- lines.push(format!(
- "payload fields: +{} more field(s)",
- filtered.len() - preview_names.len()
- ));
- }
- return lines;
- }
- let mut lines = vec![format!("payload fields: {}", filtered.len())];
- for (index, (name, value)) in filtered.iter().enumerate() {
- if index == 6 {
- lines.push(format!(
- "payload: +{} more field(s)",
- filtered.len() - index
- ));
- break;
- }
- lines.push(format!(
- "payload.{}: {}",
- name,
- value_summary(&payload_value_preview(value))
- ));
+fn artifact_detail_output(
+ detail: &fidget_spinner_store_sqlite::ArtifactDetail,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![format!(
+ "artifact {} — {} -> {}",
+ detail.record.slug, detail.record.label, detail.record.locator
+ )];
+ if !detail.attachments.is_empty() {
+ lines.push(format!("attachments: {}", detail.attachments.len()));
}
- lines
+ detailed_tool_output(
+ detail,
+ detail,
+ lines.join("\n"),
+ None,
+ FaultStage::Worker,
+ operation,
+ )
}
-fn filtered_payload_fields(
- class: NodeClass,
- fields: &Map<String, Value>,
-) -> impl Iterator<Item = (&String, &Value)> + '_ {
- fields.iter().filter(move |(name, _)| {
- !matches!(class, NodeClass::Note | NodeClass::Source) || name.as_str() != "body"
- })
+fn metric_keys_output(
+ keys: &[MetricKeySummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": keys.len(), "metrics": keys });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if keys.is_empty() {
+ "no metrics".to_owned()
+ } else {
+ keys.iter()
+ .map(|metric| {
+ format!(
+ "{} [{} {} {}] refs={}",
+ metric.key,
+ metric.unit.as_str(),
+ metric.objective.as_str(),
+ metric.visibility.as_str(),
+ metric.reference_count
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
+ )
}
-fn payload_value_preview(value: &Value) -> Value {
- match value {
- Value::Null | Value::Bool(_) | Value::Number(_) => value.clone(),
- Value::String(text) => Value::String(truncated_inline_preview(text, 96)),
- Value::Array(items) => {
- let preview = items
+fn metric_best_output(
+ entries: &[MetricBestEntry],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": entries.len(), "entries": entries });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if entries.is_empty() {
+ "no matching experiments".to_owned()
+ } else {
+ entries
.iter()
- .take(3)
- .map(payload_value_preview)
- .collect::<Vec<_>>();
- if items.len() > 3 {
- json!({
- "items": preview,
- "truncated": true,
- "total_count": items.len(),
+ .enumerate()
+ .map(|(index, entry)| {
+ format!(
+ "{}. {} / {} = {} ({})",
+ index + 1,
+ entry.experiment.slug,
+ entry.hypothesis.slug,
+ entry.value,
+ entry.experiment.verdict.map_or_else(
+ || entry.experiment.status.as_str().to_owned(),
+ |verdict| verdict.as_str().to_owned()
+ )
+ )
})
- } else {
- Value::Array(preview)
- }
- }
- Value::Object(object) => {
- let mut preview = Map::new();
- for (index, (name, nested)) in object.iter().enumerate() {
- if index == 4 {
- let _ = preview.insert(
- "...".to_owned(),
- json!(format!("+{} more field(s)", object.len() - index)),
- );
- break;
- }
- let _ = preview.insert(name.clone(), payload_value_preview(nested));
- }
- Value::Object(preview)
- }
- }
-}
-
-fn is_prose_node(class: NodeClass) -> bool {
- matches!(class, NodeClass::Note | NodeClass::Source)
-}
-
-fn truncated_inline_preview(text: &str, limit: usize) -> String {
- let collapsed = libmcp::collapse_inline_whitespace(text);
- let truncated = libmcp::render::truncate_chars(&collapsed, Some(limit));
- if truncated.truncated {
- format!("{}...", truncated.text)
- } else {
- truncated.text
- }
-}
-
-fn metric_value(store: &ProjectStore, metric: &MetricValue) -> Result<Value, FaultRecord> {
- let definition = metric_definition(store, &metric.key)?;
- Ok(json!({
- "key": metric.key,
- "value": metric.value,
- "unit": metric_unit_name(definition.unit),
- "objective": metric_objective_name(definition.objective),
- }))
-}
-
-fn metric_text(store: &ProjectStore, metric: &MetricValue) -> Result<String, FaultRecord> {
- let definition = metric_definition(store, &metric.key)?;
- Ok(format!(
- "{}={} {} ({})",
- metric.key,
- metric.value,
- metric_unit_name(definition.unit),
- metric_objective_name(definition.objective),
- ))
-}
-
-fn metric_unit_name(unit: MetricUnit) -> &'static str {
- match unit {
- MetricUnit::Seconds => "seconds",
- MetricUnit::Bytes => "bytes",
- MetricUnit::Count => "count",
- MetricUnit::Ratio => "ratio",
- MetricUnit::Custom => "custom",
- }
-}
-
-fn metric_objective_name(objective: fidget_spinner_core::OptimizationObjective) -> &'static str {
- match objective {
- fidget_spinner_core::OptimizationObjective::Minimize => "minimize",
- fidget_spinner_core::OptimizationObjective::Maximize => "maximize",
- fidget_spinner_core::OptimizationObjective::Target => "target",
- }
-}
-
-fn metric_verdict_name(verdict: FrontierVerdict) -> &'static str {
- match verdict {
- FrontierVerdict::Accepted => "accepted",
- FrontierVerdict::Kept => "kept",
- FrontierVerdict::Parked => "parked",
- FrontierVerdict::Rejected => "rejected",
- }
-}
-
-fn run_dimensions_value(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> Value {
- Value::Object(
- dimensions
- .iter()
- .map(|(key, value)| (key.to_string(), value.as_json()))
- .collect::<Map<String, Value>>(),
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
)
}
-fn render_dimension_kv(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> String {
- if dimensions.is_empty() {
- return "none".to_owned();
- }
- dimensions
- .iter()
- .map(|(key, value)| format!("{key}={}", value_summary(&value.as_json())))
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn format_tags(tags: &BTreeSet<TagName>) -> String {
- tags.iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn schema_label(schema: &ProjectSchema) -> String {
- format!("{}@{}", schema.namespace, schema.version)
-}
-
-fn value_summary(value: &Value) -> String {
- match value {
- Value::Null => "null".to_owned(),
- Value::Bool(flag) => flag.to_string(),
- Value::Number(number) => number.to_string(),
- Value::String(text) => text.clone(),
- Value::Array(items) => format!("{} item(s)", items.len()),
- Value::Object(object) => format!("{} field(s)", object.len()),
- }
-}
-
-#[derive(Default)]
-struct DiagnosticTally {
- total: usize,
- errors: usize,
- warnings: usize,
- infos: usize,
-}
-
-fn store_fault<E>(operation: &'static str) -> impl FnOnce(E) -> FaultRecord
-where
- E: std::fmt::Display,
-{
- move |error| {
- FaultRecord::new(
- classify_fault_kind(&error.to_string()),
- FaultStage::Store,
- operation,
- error.to_string(),
- )
- }
-}
-
-fn classify_fault_kind(message: &str) -> FaultKind {
- if message.contains("was not found")
- || message.contains("invalid")
- || message.contains("unknown")
- || message.contains("empty")
- || message.contains("already exists")
- || message.contains("require an explicit tag list")
- || message.contains("requires a non-empty summary")
- || message.contains("requires a non-empty string payload field `body`")
- || message.contains("requires an explicit order")
- || message.contains("is ambiguous across sources")
- || message.contains("has conflicting semantics")
- || message.contains("conflicts with existing definition")
- {
- FaultKind::InvalidInput
- } else {
- FaultKind::Internal
- }
-}
-
-fn tool_annotations(raw: Vec<WireAnnotation>) -> Result<Vec<NodeAnnotation>, StoreError> {
- raw.into_iter()
- .map(|annotation| {
- Ok(NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if annotation.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: annotation.label.map(NonEmptyText::new).transpose()?,
- body: NonEmptyText::new(annotation.body)?,
- created_at: time::OffsetDateTime::now_utc(),
- })
- })
- .collect()
-}
-
-fn lineage_attachments(parents: Vec<String>) -> Result<Vec<EdgeAttachment>, StoreError> {
- parents
- .into_iter()
- .map(|parent| {
- Ok(EdgeAttachment {
- node_id: crate::parse_node_id(&parent)?,
- kind: fidget_spinner_core::EdgeKind::Lineage,
- direction: EdgeAttachmentDirection::ExistingToNew,
- })
- })
- .collect()
-}
-
-fn parse_tag_set(values: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
- values
- .into_iter()
- .map(TagName::new)
- .collect::<Result<BTreeSet<_>, _>>()
- .map_err(StoreError::from)
-}
-
-fn metric_spec_from_wire(raw: WireMetricSpec) -> Result<MetricSpec, StoreError> {
- Ok(MetricSpec {
- metric_key: NonEmptyText::new(raw.key)?,
- unit: parse_metric_unit_name(&raw.unit)?,
- objective: crate::parse_optimization_objective(&raw.objective)?,
- })
-}
-
-fn metric_value_from_wire(raw: WireMetricValue) -> Result<MetricValue, StoreError> {
- Ok(MetricValue {
- key: NonEmptyText::new(raw.key)?,
- value: raw.value,
- })
-}
-
-fn experiment_analysis_from_wire(raw: WireAnalysis) -> Result<ExperimentAnalysisDraft, StoreError> {
- Ok(ExperimentAnalysisDraft {
- title: NonEmptyText::new(raw.title)?,
- summary: NonEmptyText::new(raw.summary)?,
- body: NonEmptyText::new(raw.body)?,
- })
-}
-
-fn metric_definition(store: &ProjectStore, key: &NonEmptyText) -> Result<MetricSpec, FaultRecord> {
- store
- .list_metric_definitions()
- .map_err(store_fault("tools/call:experiment.close"))?
- .into_iter()
- .find(|definition| definition.key == *key)
- .map(|definition| MetricSpec {
- metric_key: definition.key,
- unit: definition.unit,
- objective: definition.objective,
- })
- .ok_or_else(|| {
- FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Store,
- "tools/call:experiment.close",
- format!("metric `{key}` is not registered"),
- )
- })
-}
-
-fn coerce_tool_dimensions(
- store: &ProjectStore,
- raw_dimensions: BTreeMap<String, Value>,
- operation: &'static str,
-) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, FaultRecord> {
- store
- .coerce_run_dimensions(raw_dimensions)
- .map_err(store_fault(operation))
-}
-
-fn command_recipe_from_wire(
- raw: WireRunCommand,
- project_root: &Utf8Path,
-) -> Result<CommandRecipe, StoreError> {
- let working_directory = raw
- .working_directory
- .map(Utf8PathBuf::from)
- .unwrap_or_else(|| project_root.to_path_buf());
- CommandRecipe::new(
- working_directory,
- crate::to_text_vec(raw.argv)?,
- raw.env.into_iter().collect::<BTreeMap<_, _>>(),
+fn history_output(
+ history: &[EntityHistoryEntry],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": history.len(), "history": history });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if history.is_empty() {
+ "no history".to_owned()
+ } else {
+ history
+ .iter()
+ .map(|entry| {
+ format!(
+ "rev {} {} @ {}",
+ entry.revision, entry.event_kind, entry.occurred_at
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
)
- .map_err(StoreError::from)
-}
-
-fn parse_node_class_name(raw: &str) -> Result<NodeClass, StoreError> {
- match raw {
- "contract" => Ok(NodeClass::Contract),
- "hypothesis" => Ok(NodeClass::Hypothesis),
- "run" => Ok(NodeClass::Run),
- "analysis" => Ok(NodeClass::Analysis),
- "decision" => Ok(NodeClass::Decision),
- "source" => Ok(NodeClass::Source),
- "note" => Ok(NodeClass::Note),
- other => Err(crate::invalid_input(format!(
- "unknown node class `{other}`"
- ))),
- }
-}
-
-fn parse_metric_unit_name(raw: &str) -> Result<MetricUnit, StoreError> {
- crate::parse_metric_unit(raw)
-}
-
-fn parse_metric_source_name(raw: &str) -> Result<MetricFieldSource, StoreError> {
- match raw {
- "run_metric" => Ok(MetricFieldSource::RunMetric),
- "hypothesis_payload" => Ok(MetricFieldSource::HypothesisPayload),
- "run_payload" => Ok(MetricFieldSource::RunPayload),
- "analysis_payload" => Ok(MetricFieldSource::AnalysisPayload),
- "decision_payload" => Ok(MetricFieldSource::DecisionPayload),
- other => Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- format!("unknown metric source `{other}`"),
- ),
- ))),
- }
-}
-
-fn parse_metric_order_name(raw: &str) -> Result<MetricRankOrder, StoreError> {
- match raw {
- "asc" => Ok(MetricRankOrder::Asc),
- "desc" => Ok(MetricRankOrder::Desc),
- other => Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- format!("unknown metric order `{other}`"),
- ),
- ))),
- }
-}
-
-fn parse_field_value_type_name(raw: &str) -> Result<FieldValueType, StoreError> {
- match raw {
- "string" => Ok(FieldValueType::String),
- "numeric" => Ok(FieldValueType::Numeric),
- "boolean" => Ok(FieldValueType::Boolean),
- "timestamp" => Ok(FieldValueType::Timestamp),
- other => Err(crate::invalid_input(format!(
- "unknown field value type `{other}`"
- ))),
- }
-}
-
-fn parse_diagnostic_severity_name(raw: &str) -> Result<DiagnosticSeverity, StoreError> {
- match raw {
- "error" => Ok(DiagnosticSeverity::Error),
- "warning" => Ok(DiagnosticSeverity::Warning),
- "info" => Ok(DiagnosticSeverity::Info),
- other => Err(crate::invalid_input(format!(
- "unknown diagnostic severity `{other}`"
- ))),
- }
-}
-
-fn parse_field_presence_name(raw: &str) -> Result<FieldPresence, StoreError> {
- match raw {
- "required" => Ok(FieldPresence::Required),
- "recommended" => Ok(FieldPresence::Recommended),
- "optional" => Ok(FieldPresence::Optional),
- other => Err(crate::invalid_input(format!(
- "unknown field presence `{other}`"
- ))),
- }
-}
-
-fn parse_field_role_name(raw: &str) -> Result<FieldRole, StoreError> {
- match raw {
- "index" => Ok(FieldRole::Index),
- "projection_gate" => Ok(FieldRole::ProjectionGate),
- "render_only" => Ok(FieldRole::RenderOnly),
- "opaque" => Ok(FieldRole::Opaque),
- other => Err(crate::invalid_input(format!(
- "unknown field role `{other}`"
- ))),
- }
-}
-
-fn parse_inference_policy_name(raw: &str) -> Result<InferencePolicy, StoreError> {
- match raw {
- "manual_only" => Ok(InferencePolicy::ManualOnly),
- "model_may_infer" => Ok(InferencePolicy::ModelMayInfer),
- other => Err(crate::invalid_input(format!(
- "unknown inference policy `{other}`"
- ))),
- }
-}
-
-fn parse_backend_name(raw: &str) -> Result<ExecutionBackend, StoreError> {
- match raw {
- "local_process" => Ok(ExecutionBackend::LocalProcess),
- "worktree_process" => Ok(ExecutionBackend::WorktreeProcess),
- "ssh_process" => Ok(ExecutionBackend::SshProcess),
- other => Err(crate::invalid_input(format!("unknown backend `{other}`"))),
- }
-}
-
-fn parse_verdict_name(raw: &str) -> Result<FrontierVerdict, StoreError> {
- match raw {
- "accepted" => Ok(FrontierVerdict::Accepted),
- "kept" => Ok(FrontierVerdict::Kept),
- "parked" => Ok(FrontierVerdict::Parked),
- "rejected" => Ok(FrontierVerdict::Rejected),
- other => Err(crate::invalid_input(format!("unknown verdict `{other}`"))),
- }
-}
-
-#[derive(Debug, Deserialize)]
-struct FrontierStatusToolArgs {
- frontier_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct TagAddToolArgs {
- name: String,
- description: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct FrontierInitToolArgs {
- label: String,
- objective: String,
- contract_title: String,
- contract_summary: Option<String>,
- benchmark_suites: Vec<String>,
- promotion_criteria: Vec<String>,
- primary_metric: WireMetricSpec,
- #[serde(default)]
- supporting_metrics: Vec<WireMetricSpec>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeCreateToolArgs {
- class: String,
- frontier_id: Option<String>,
- title: String,
- summary: Option<String>,
- tags: Option<Vec<String>>,
- #[serde(default)]
- payload: Option<Map<String, Value>>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct HypothesisRecordToolArgs {
- frontier_id: String,
- title: String,
- summary: String,
- body: String,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeListToolArgs {
- frontier_id: Option<String>,
- class: Option<String>,
- #[serde(default)]
- tags: Vec<String>,
- #[serde(default)]
- include_archived: bool,
- limit: Option<u32>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeReadToolArgs {
- node_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeAnnotateToolArgs {
- node_id: String,
- body: String,
- label: Option<String>,
- #[serde(default)]
- visible: bool,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeArchiveToolArgs {
- node_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct QuickNoteToolArgs {
- frontier_id: Option<String>,
- title: String,
- summary: String,
- body: String,
- tags: Vec<String>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SourceRecordToolArgs {
- frontier_id: Option<String>,
- title: String,
- summary: String,
- body: String,
- tags: Option<Vec<String>>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SchemaFieldUpsertToolArgs {
- name: String,
- node_classes: Option<Vec<String>>,
- presence: String,
- severity: String,
- role: String,
- inference_policy: String,
- value_type: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SchemaFieldRemoveToolArgs {
- name: String,
- node_classes: Option<Vec<String>>,
-}
-
-#[derive(Debug, Deserialize)]
-struct MetricDefineToolArgs {
- key: String,
- unit: String,
- objective: String,
- description: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct RunDimensionDefineToolArgs {
- key: String,
- value_type: String,
- description: Option<String>,
-}
-
-#[derive(Debug, Deserialize, Default)]
-struct MetricKeysToolArgs {
- frontier_id: Option<String>,
- source: Option<String>,
- dimensions: Option<BTreeMap<String, Value>>,
-}
-
-#[derive(Debug, Deserialize)]
-struct MetricBestToolArgs {
- key: String,
- frontier_id: Option<String>,
- source: Option<String>,
- dimensions: Option<BTreeMap<String, Value>>,
- order: Option<String>,
- limit: Option<u32>,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentOpenToolArgs {
- frontier_id: String,
- hypothesis_node_id: String,
- title: String,
- summary: Option<String>,
-}
-
-#[derive(Debug, Deserialize, Default)]
-struct ExperimentListToolArgs {
- frontier_id: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentReadToolArgs {
- experiment_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentCloseToolArgs {
- experiment_id: String,
- run: WireRun,
- primary_metric: WireMetricValue,
- #[serde(default)]
- supporting_metrics: Vec<WireMetricValue>,
- note: WireFrontierNote,
- verdict: String,
- decision_title: String,
- decision_rationale: String,
- analysis: Option<WireAnalysis>,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireAnnotation {
- body: String,
- label: Option<String>,
- #[serde(default)]
- visible: bool,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireMetricSpec {
- key: String,
- unit: String,
- objective: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireMetricValue {
- key: String,
- value: f64,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireRun {
- title: String,
- summary: Option<String>,
- backend: String,
- #[serde(default)]
- dimensions: BTreeMap<String, Value>,
- command: WireRunCommand,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireAnalysis {
- title: String,
- summary: String,
- body: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireRunCommand {
- working_directory: Option<String>,
- argv: Vec<String>,
- #[serde(default)]
- env: BTreeMap<String, String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireFrontierNote {
- summary: String,
- #[serde(default)]
- next_hypotheses: Vec<String>,
}
diff --git a/crates/fidget-spinner-cli/src/ui.rs b/crates/fidget-spinner-cli/src/ui.rs
index 29b5058..98cc95d 100644
--- a/crates/fidget-spinner-cli/src/ui.rs
+++ b/crates/fidget-spinner-cli/src/ui.rs
@@ -1,79 +1,113 @@
-use std::collections::BTreeMap;
use std::io;
use std::net::SocketAddr;
use axum::Router;
-use axum::extract::{Query, State};
+use axum::extract::{Path, State};
use axum::http::StatusCode;
use axum::response::{Html, IntoResponse, Response};
use axum::routing::get;
use camino::Utf8PathBuf;
-use fidget_spinner_core::{DagNode, FieldValueType, NodeClass, ProjectSchema, TagName};
-use linkify::{LinkFinder, LinkKind};
+use fidget_spinner_core::{
+ AttachmentTargetRef, ExperimentAnalysis, ExperimentOutcome, ExperimentStatus, FrontierRecord,
+ FrontierVerdict, MetricUnit, RunDimensionValue, Slug, VertexRef,
+};
+use fidget_spinner_store_sqlite::{
+ ExperimentDetail, ExperimentSummary, FrontierOpenProjection, FrontierSummary,
+ HypothesisCurrentState, HypothesisDetail, ProjectStatus, StoreError, VertexSummary,
+};
use maud::{DOCTYPE, Markup, PreEscaped, html};
-use serde::Deserialize;
-use serde_json::Value;
+use percent_encoding::{NON_ALPHANUMERIC, utf8_percent_encode};
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
use time::macros::format_description;
-use crate::{open_store, to_pretty_json};
+use crate::open_store;
#[derive(Clone)]
struct NavigatorState {
project_root: Utf8PathBuf,
- limit: u32,
+ limit: Option<u32>,
}
-#[derive(Debug, Default, Deserialize)]
-struct NavigatorQuery {
- tag: Option<String>,
-}
-
-struct NavigatorEntry {
- node: DagNode,
- frontier_label: Option<String>,
-}
-
-struct TagFacet {
- name: TagName,
- description: String,
- count: usize,
+struct AttachmentDisplay {
+ kind: &'static str,
+ href: String,
+ title: String,
+ summary: Option<String>,
}
pub(crate) fn serve(
project_root: Utf8PathBuf,
bind: SocketAddr,
- limit: u32,
-) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ limit: Option<u32>,
+) -> Result<(), StoreError> {
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_io()
.build()
- .map_err(fidget_spinner_store_sqlite::StoreError::from)?;
+ .map_err(StoreError::from)?;
runtime.block_on(async move {
let state = NavigatorState {
project_root,
limit,
};
let app = Router::new()
- .route("/", get(navigator))
+ .route("/", get(project_home))
+ .route("/frontier/{selector}", get(frontier_detail))
+ .route("/hypothesis/{selector}", get(hypothesis_detail))
+ .route("/experiment/{selector}", get(experiment_detail))
+ .route("/artifact/{selector}", get(artifact_detail))
.with_state(state.clone());
let listener = tokio::net::TcpListener::bind(bind)
.await
- .map_err(fidget_spinner_store_sqlite::StoreError::from)?;
+ .map_err(StoreError::from)?;
println!("navigator: http://{bind}/");
- axum::serve(listener, app).await.map_err(|error| {
- fidget_spinner_store_sqlite::StoreError::Io(io::Error::other(error.to_string()))
- })
+ axum::serve(listener, app)
+ .await
+ .map_err(|error| StoreError::Io(io::Error::other(error.to_string())))
})
}
-async fn navigator(
+async fn project_home(State(state): State<NavigatorState>) -> Response {
+ render_response(render_project_home(state))
+}
+
+async fn frontier_detail(
State(state): State<NavigatorState>,
- Query(query): Query<NavigatorQuery>,
+ Path(selector): Path<String>,
) -> Response {
- match render_navigator(state, query) {
+ render_response(render_frontier_detail(state, selector))
+}
+
+async fn hypothesis_detail(
+ State(state): State<NavigatorState>,
+ Path(selector): Path<String>,
+) -> Response {
+ render_response(render_hypothesis_detail(state, selector))
+}
+
+async fn experiment_detail(
+ State(state): State<NavigatorState>,
+ Path(selector): Path<String>,
+) -> Response {
+ render_response(render_experiment_detail(state, selector))
+}
+
+async fn artifact_detail(
+ State(state): State<NavigatorState>,
+ Path(selector): Path<String>,
+) -> Response {
+ render_response(render_artifact_detail(state, selector))
+}
+
+fn render_response(result: Result<Markup, StoreError>) -> Response {
+ match result {
Ok(markup) => Html(markup.into_string()).into_response(),
+ Err(StoreError::UnknownFrontierSelector(_))
+ | Err(StoreError::UnknownHypothesisSelector(_))
+ | Err(StoreError::UnknownExperimentSelector(_))
+ | Err(StoreError::UnknownArtifactSelector(_)) => {
+ (StatusCode::NOT_FOUND, "not found".to_owned()).into_response()
+ }
Err(error) => (
StatusCode::INTERNAL_SERVER_ERROR,
format!("navigator render failed: {error}"),
@@ -82,565 +116,1226 @@ async fn navigator(
}
}
-fn render_navigator(
- state: NavigatorState,
- query: NavigatorQuery,
-) -> Result<Markup, fidget_spinner_store_sqlite::StoreError> {
+fn render_project_home(state: NavigatorState) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let project_status = store.status()?;
+ let frontiers = store.list_frontiers()?;
+ let title = format!("{} navigator", project_status.display_name);
+ let content = html! {
+ (render_project_status(&project_status))
+ (render_frontier_grid(&frontiers, state.limit))
+ };
+ Ok(render_shell(
+ &title,
+ Some(&project_status.display_name.to_string()),
+ None,
+ content,
+ ))
+}
+
+fn render_frontier_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let projection = store.frontier_open(&selector)?;
+ let title = format!("{} · frontier", projection.frontier.label);
+ let subtitle = format!(
+ "{} hypotheses active · {} experiments open",
+ projection.active_hypotheses.len(),
+ projection.open_experiments.len()
+ );
+ let content = html! {
+ (render_frontier_header(&projection.frontier))
+ (render_frontier_brief(&projection))
+ (render_frontier_active_sets(&projection))
+ (render_hypothesis_current_state_grid(
+ &projection.active_hypotheses,
+ state.limit,
+ ))
+ (render_open_experiment_grid(
+ &projection.open_experiments,
+ state.limit,
+ ))
+ };
+ Ok(render_shell(&title, Some(&subtitle), None, content))
+}
+
+fn render_hypothesis_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let detail = store.read_hypothesis(&selector)?;
+ let frontier = store.read_frontier(&detail.record.frontier_id.to_string())?;
+ let title = format!("{} · hypothesis", detail.record.title);
+ let subtitle = detail.record.summary.to_string();
+ let content = html! {
+ (render_hypothesis_header(&detail, &frontier))
+ (render_prose_block("Body", detail.record.body.as_str()))
+ (render_vertex_relation_sections(&detail.parents, &detail.children, state.limit))
+ (render_artifact_section(&detail.artifacts, state.limit))
+ (render_experiment_section(
+ "Open Experiments",
+ &detail.open_experiments,
+ state.limit,
+ ))
+ (render_experiment_section(
+ "Closed Experiments",
+ &detail.closed_experiments,
+ state.limit,
+ ))
+ };
+ Ok(render_shell(
+ &title,
+ Some(&subtitle),
+ Some((frontier.label.as_str(), frontier_href(&frontier.slug))),
+ content,
+ ))
+}
+
+fn render_experiment_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
let store = open_store(state.project_root.as_std_path())?;
- let selected_tag = query.tag.map(TagName::new).transpose()?;
- let schema = store.schema().clone();
- let frontiers = store
- .list_frontiers()?
- .into_iter()
- .map(|frontier| (frontier.id, frontier.label.to_string()))
- .collect::<BTreeMap<_, _>>();
-
- let recent_nodes = load_recent_nodes(&store, None, state.limit)?;
- let visible_nodes = load_recent_nodes(&store, selected_tag.clone(), state.limit)?;
- let tag_facets = store
- .list_tags()?
- .into_iter()
- .map(|tag| TagFacet {
- count: recent_nodes
- .iter()
- .filter(|node| node.tags.contains(&tag.name))
- .count(),
- description: tag.description.to_string(),
- name: tag.name,
- })
- .collect::<Vec<_>>();
- let entries = visible_nodes
- .into_iter()
- .map(|node| NavigatorEntry {
- frontier_label: node
- .frontier_id
- .and_then(|frontier_id| frontiers.get(&frontier_id).cloned()),
- node,
- })
- .collect::<Vec<_>>();
-
- let title = selected_tag.as_ref().map_or_else(
- || "all recent nodes".to_owned(),
- |tag| format!("tag: {tag}"),
+ let detail = store.read_experiment(&selector)?;
+ let frontier = store.read_frontier(&detail.record.frontier_id.to_string())?;
+ let title = format!("{} · experiment", detail.record.title);
+ let subtitle = detail.record.summary.as_ref().map_or_else(
+ || detail.record.status.as_str().to_owned(),
+ ToString::to_string,
);
- let project_name = store.config().display_name.to_string();
+ let content = html! {
+ (render_experiment_header(&detail, &frontier))
+ (render_vertex_relation_sections(&detail.parents, &detail.children, state.limit))
+ (render_artifact_section(&detail.artifacts, state.limit))
+ @if let Some(outcome) = detail.record.outcome.as_ref() {
+ (render_experiment_outcome(outcome))
+ } @else {
+ section.card {
+ h2 { "Outcome" }
+ p.muted { "Open experiment. No outcome recorded yet." }
+ }
+ }
+ };
+ Ok(render_shell(
+ &title,
+ Some(&subtitle),
+ Some((frontier.label.as_str(), frontier_href(&frontier.slug))),
+ content,
+ ))
+}
- Ok(html! {
- (DOCTYPE)
- html {
- head {
- meta charset="utf-8";
- meta name="viewport" content="width=device-width, initial-scale=1";
- title { "Fidget Spinner Navigator" }
- style { (PreEscaped(stylesheet().to_owned())) }
+fn render_artifact_detail(state: NavigatorState, selector: String) -> Result<Markup, StoreError> {
+ let store = open_store(state.project_root.as_std_path())?;
+ let detail = store.read_artifact(&selector)?;
+ let attachments = detail
+ .attachments
+ .iter()
+ .map(|target| resolve_attachment_display(&store, *target))
+ .collect::<Result<Vec<_>, StoreError>>()?;
+ let title = format!("{} · artifact", detail.record.label);
+ let subtitle = detail.record.summary.as_ref().map_or_else(
+ || detail.record.kind.as_str().to_owned(),
+ ToString::to_string,
+ );
+ let content = html! {
+ section.card {
+ h2 { "Artifact" }
+ div.kv-grid {
+ (render_kv("Kind", detail.record.kind.as_str()))
+ (render_kv("Slug", detail.record.slug.as_str()))
+ (render_kv("Locator", detail.record.locator.as_str()))
+ @if let Some(media_type) = detail.record.media_type.as_ref() {
+ (render_kv("Media type", media_type.as_str()))
+ }
+ (render_kv("Updated", &format_timestamp(detail.record.updated_at)))
}
- body {
- main class="shell" {
- aside class="rail" {
- h1 { "Navigator" }
- p class="project" { (project_name) }
- nav class="tag-list" {
- a
- href="/"
- class={ "tag-link " (if selected_tag.is_none() { "selected" } else { "" }) } {
- span class="tag-name" { "all" }
- span class="tag-count" { (recent_nodes.len()) }
- }
- @for facet in &tag_facets {
- a
- href={ "/?tag=" (facet.name.as_str()) }
- class={ "tag-link " (if selected_tag.as_ref() == Some(&facet.name) { "selected" } else { "" }) } {
- span class="tag-name" { (facet.name.as_str()) }
- span class="tag-count" { (facet.count) }
- span class="tag-description" { (facet.description.as_str()) }
- }
- }
- }
+ @if let Some(summary) = detail.record.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ p.muted {
+ "Artifact bodies are intentionally out of band. Spinner only preserves references."
+ }
+ }
+ section.card {
+ h2 { "Attachments" }
+ @if attachments.is_empty() {
+ p.muted { "No attachments." }
+ } @else {
+ div.link-list {
+ @for attachment in &attachments {
+ (render_attachment_chip(attachment))
}
- section class="feed" {
- header class="feed-header" {
- h2 { (title) }
- p class="feed-meta" {
- (entries.len()) " shown"
- " · "
- (recent_nodes.len()) " recent"
- " · "
- (state.limit) " max"
+ }
+ }
+ }
+ };
+ Ok(render_shell(&title, Some(&subtitle), None, content))
+}
+
+fn render_frontier_grid(frontiers: &[FrontierSummary], limit: Option<u32>) -> Markup {
+ html! {
+ section.card {
+ h2 { "Frontiers" }
+ @if frontiers.is_empty() {
+ p.muted { "No frontiers yet." }
+ } @else {
+ div.card-grid {
+ @for frontier in limit_items(frontiers, limit) {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(frontier_href(&frontier.slug)) { (frontier.label) }
+ span.status-chip class=(frontier_status_class(frontier.status.as_str())) {
+ (frontier.status.as_str())
}
}
- @if entries.is_empty() {
- article class="empty-state" {
- h3 { "No matching nodes" }
- p { "Try clearing the tag filter or recording new notes." }
- }
- } @else {
- @for entry in &entries {
- (render_entry(entry, &schema))
- }
+ p.prose { (frontier.objective) }
+ div.meta-row {
+ span { (format!("{} active hypotheses", frontier.active_hypothesis_count)) }
+ span { (format!("{} open experiments", frontier.open_experiment_count)) }
+ }
+ div.meta-row.muted {
+ span { "updated " (format_timestamp(frontier.updated_at)) }
}
}
}
}
}
- })
+ }
+ }
}
-fn load_recent_nodes(
- store: &fidget_spinner_store_sqlite::ProjectStore,
- tag: Option<TagName>,
- limit: u32,
-) -> Result<Vec<DagNode>, fidget_spinner_store_sqlite::StoreError> {
- let summaries = store.list_nodes(fidget_spinner_store_sqlite::ListNodesQuery {
- tags: tag.into_iter().collect(),
- limit,
- ..fidget_spinner_store_sqlite::ListNodesQuery::default()
- })?;
- summaries
- .into_iter()
- .map(|summary| {
- store.get_node(summary.id)?.ok_or(
- fidget_spinner_store_sqlite::StoreError::NodeNotFound(summary.id),
- )
- })
- .collect()
-}
-
-fn render_entry(entry: &NavigatorEntry, schema: &ProjectSchema) -> Markup {
- let body = entry.node.payload.field("body").and_then(Value::as_str);
- let mut keys = entry
- .node
- .payload
- .fields
- .keys()
- .filter(|name| name.as_str() != "body")
- .cloned()
- .collect::<Vec<_>>();
- keys.sort_unstable();
+fn render_project_status(status: &ProjectStatus) -> Markup {
+ html! {
+ section.card {
+ h1 { (status.display_name) }
+ p.prose {
+ "Austere experimental ledger. Frontier overview is the only sanctioned dump; everything else is deliberate traversal."
+ }
+ div.kv-grid {
+ (render_kv("Project root", status.project_root.as_str()))
+ (render_kv("Store format", &status.store_format_version.to_string()))
+ (render_kv("Frontiers", &status.frontier_count.to_string()))
+ (render_kv("Hypotheses", &status.hypothesis_count.to_string()))
+ (render_kv("Experiments", &status.experiment_count.to_string()))
+ (render_kv("Open experiments", &status.open_experiment_count.to_string()))
+ (render_kv("Artifacts", &status.artifact_count.to_string()))
+ }
+ }
+ }
+}
+
+fn render_frontier_header(frontier: &FrontierRecord) -> Markup {
+ html! {
+ section.card {
+ h1 { (frontier.label) }
+ p.prose { (frontier.objective) }
+ div.meta-row {
+ span { "slug " code { (frontier.slug) } }
+ span.status-chip class=(frontier_status_class(frontier.status.as_str())) {
+ (frontier.status.as_str())
+ }
+ span.muted { "updated " (format_timestamp(frontier.updated_at)) }
+ }
+ }
+ }
+}
+fn render_frontier_brief(projection: &FrontierOpenProjection) -> Markup {
+ let frontier = &projection.frontier;
html! {
- article class="entry" id={ "node-" (entry.node.id) } {
- header class="entry-header" {
- div class="entry-title-row" {
- span class={ "class-badge class-" (entry.node.class.as_str()) } {
- (entry.node.class.as_str())
+ section.card {
+ h2 { "Frontier Brief" }
+ @if let Some(situation) = frontier.brief.situation.as_ref() {
+ div.block {
+ h3 { "Situation" }
+ p.prose { (situation) }
+ }
+ } @else {
+ p.muted { "No situation summary recorded." }
+ }
+ div.split {
+ div.subcard {
+ h3 { "Roadmap" }
+ @if frontier.brief.roadmap.is_empty() {
+ p.muted { "No roadmap ordering recorded." }
+ } @else {
+ ol.roadmap-list {
+ @for item in &frontier.brief.roadmap {
+ @let title = hypothesis_title_for_roadmap_item(projection, item.hypothesis_id);
+ li {
+ a href=(hypothesis_href_from_id(item.hypothesis_id)) {
+ (format!("{}.", item.rank)) " "
+ (title)
+ }
+ @if let Some(summary) = item.summary.as_ref() {
+ span.muted { " · " (summary) }
+ }
+ }
+ }
}
- h3 class="entry-title" {
- a href={ "#node-" (entry.node.id) } { (entry.node.title.as_str()) }
+ }
+ }
+ div.subcard {
+ h3 { "Unknowns" }
+ @if frontier.brief.unknowns.is_empty() {
+ p.muted { "No explicit unknowns." }
+ } @else {
+ ul.simple-list {
+ @for unknown in &frontier.brief.unknowns {
+ li { (unknown) }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_frontier_active_sets(projection: &FrontierOpenProjection) -> Markup {
+ html! {
+ section.card {
+ h2 { "Active Surface" }
+ div.split {
+ div.subcard {
+ h3 { "Active Tags" }
+ @if projection.active_tags.is_empty() {
+ p.muted { "No active tags." }
+ } @else {
+ div.chip-row {
+ @for tag in &projection.active_tags {
+ span.tag-chip { (tag) }
+ }
}
}
- div class="entry-meta" {
- span { (render_timestamp(entry.node.updated_at)) }
- @if let Some(label) = &entry.frontier_label {
- span { "frontier: " (label.as_str()) }
+ }
+ div.subcard {
+ h3 { "Live Metrics" }
+ @if projection.active_metric_keys.is_empty() {
+ p.muted { "No live metrics." }
+ } @else {
+ table.metric-table {
+ thead {
+ tr {
+ th { "Key" }
+ th { "Unit" }
+ th { "Objective" }
+ th { "Refs" }
+ }
+ }
+ tbody {
+ @for metric in &projection.active_metric_keys {
+ tr {
+ td { (metric.key) }
+ td { (metric.unit.as_str()) }
+ td { (metric.objective.as_str()) }
+ td { (metric.reference_count) }
+ }
+ }
+ }
}
- @if !entry.node.tags.is_empty() {
- span class="tag-strip" {
- @for tag in &entry.node.tags {
- a class="entry-tag" href={ "/?tag=" (tag.as_str()) } { (tag.as_str()) }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_hypothesis_current_state_grid(
+ states: &[HypothesisCurrentState],
+ limit: Option<u32>,
+) -> Markup {
+ html! {
+ section.card {
+ h2 { "Active Hypotheses" }
+ @if states.is_empty() {
+ p.muted { "No active hypotheses." }
+ } @else {
+ div.card-grid {
+ @for state in limit_items(states, limit) {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(hypothesis_href(&state.hypothesis.slug)) {
+ (state.hypothesis.title)
+ }
+ @if let Some(verdict) = state.hypothesis.latest_verdict {
+ span.status-chip class=(verdict_class(verdict)) {
+ (verdict.as_str())
+ }
+ }
+ }
+ p.prose { (state.hypothesis.summary) }
+ @if !state.hypothesis.tags.is_empty() {
+ div.chip-row {
+ @for tag in &state.hypothesis.tags {
+ span.tag-chip { (tag) }
+ }
+ }
+ }
+ div.meta-row {
+ span { (format!("{} open", state.open_experiments.len())) }
+ @if let Some(latest) = state.latest_closed_experiment.as_ref() {
+ span {
+ "latest "
+ a href=(experiment_href(&latest.slug)) { (latest.title) }
+ }
+ } @else {
+ span.muted { "no closed experiments" }
+ }
+ }
+ @if !state.open_experiments.is_empty() {
+ div.related-block {
+ h3 { "Open" }
+ div.link-list {
+ @for experiment in &state.open_experiments {
+ (render_experiment_link_chip(experiment))
+ }
+ }
+ }
+ }
+ @if let Some(latest) = state.latest_closed_experiment.as_ref() {
+ div.related-block {
+ h3 { "Latest Closed" }
+ (render_experiment_summary_line(latest))
}
}
}
}
}
- @if let Some(summary) = &entry.node.summary {
- p class="entry-summary" { (summary.as_str()) }
+ }
+ }
+ }
+}
+
+fn render_open_experiment_grid(experiments: &[ExperimentSummary], limit: Option<u32>) -> Markup {
+ html! {
+ section.card {
+ h2 { "Open Experiments" }
+ @if experiments.is_empty() {
+ p.muted { "No open experiments." }
+ } @else {
+ div.card-grid {
+ @for experiment in limit_items(experiments, limit) {
+ (render_experiment_card(experiment))
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_hypothesis_header(detail: &HypothesisDetail, frontier: &FrontierRecord) -> Markup {
+ html! {
+ section.card {
+ h1 { (detail.record.title) }
+ p.prose { (detail.record.summary) }
+ div.meta-row {
+ span { "frontier " a href=(frontier_href(&frontier.slug)) { (frontier.label) } }
+ span { "slug " code { (detail.record.slug) } }
+ @if detail.record.archived {
+ span.status-chip.archived { "archived" }
}
- @if let Some(body) = body {
- section class="entry-body" {
- (render_string_value(body))
+ span.muted { "updated " (format_timestamp(detail.record.updated_at)) }
+ }
+ @if !detail.record.tags.is_empty() {
+ div.chip-row {
+ @for tag in &detail.record.tags {
+ span.tag-chip { (tag) }
}
}
- @if !keys.is_empty() {
- dl class="field-list" {
- @for key in &keys {
- @if let Some(value) = entry.node.payload.field(key) {
- (render_field(entry.node.class, schema, key, value))
- }
- }
+ }
+ }
+ }
+}
+
+fn render_experiment_header(detail: &ExperimentDetail, frontier: &FrontierRecord) -> Markup {
+ html! {
+ section.card {
+ h1 { (detail.record.title) }
+ @if let Some(summary) = detail.record.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ div.meta-row {
+ span {
+ "frontier "
+ a href=(frontier_href(&frontier.slug)) { (frontier.label) }
+ }
+ span {
+ "hypothesis "
+ a href=(hypothesis_href(&detail.owning_hypothesis.slug)) {
+ (detail.owning_hypothesis.title)
}
}
- @if !entry.node.diagnostics.items.is_empty() {
- section class="diagnostics" {
- h4 { "diagnostics" }
- ul {
- @for item in &entry.node.diagnostics.items {
- li {
- span class="diag-severity" { (format!("{:?}", item.severity).to_ascii_lowercase()) }
- " "
- (item.message.as_str())
+ span.status-chip class=(experiment_status_class(detail.record.status)) {
+ (detail.record.status.as_str())
+ }
+ @if let Some(verdict) = detail
+ .record
+ .outcome
+ .as_ref()
+ .map(|outcome| outcome.verdict)
+ {
+ span.status-chip class=(verdict_class(verdict)) { (verdict.as_str()) }
+ }
+ span.muted { "updated " (format_timestamp(detail.record.updated_at)) }
+ }
+ @if !detail.record.tags.is_empty() {
+ div.chip-row {
+ @for tag in &detail.record.tags {
+ span.tag-chip { (tag) }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn render_experiment_outcome(outcome: &ExperimentOutcome) -> Markup {
+ html! {
+ section.card {
+ h2 { "Outcome" }
+ div.kv-grid {
+ (render_kv("Verdict", outcome.verdict.as_str()))
+ (render_kv("Backend", outcome.backend.as_str()))
+ (render_kv("Closed", &format_timestamp(outcome.closed_at)))
+ }
+ (render_command_recipe(&outcome.command))
+ (render_metric_panel("Primary metric", std::slice::from_ref(&outcome.primary_metric), outcome))
+ @if !outcome.supporting_metrics.is_empty() {
+ (render_metric_panel("Supporting metrics", &outcome.supporting_metrics, outcome))
+ }
+ @if !outcome.dimensions.is_empty() {
+ section.subcard {
+ h3 { "Dimensions" }
+ table.metric-table {
+ thead { tr { th { "Key" } th { "Value" } } }
+ tbody {
+ @for (key, value) in &outcome.dimensions {
+ tr {
+ td { (key) }
+ td { (render_dimension_value(value)) }
}
}
}
}
}
}
+ section.subcard {
+ h3 { "Rationale" }
+ p.prose { (outcome.rationale) }
+ }
+ @if let Some(analysis) = outcome.analysis.as_ref() {
+ (render_experiment_analysis(analysis))
+ }
+ }
}
}
-fn render_field(class: NodeClass, schema: &ProjectSchema, key: &str, value: &Value) -> Markup {
- let value_type = schema
- .field_spec(class, key)
- .and_then(|field| field.value_type);
- let is_plottable = schema
- .field_spec(class, key)
- .is_some_and(|field| field.is_plottable());
+fn render_experiment_analysis(analysis: &ExperimentAnalysis) -> Markup {
html! {
- dt {
- (key)
- @if let Some(value_type) = value_type {
- span class="field-type" { (value_type.as_str()) }
- }
- @if is_plottable {
- span class="field-type plottable" { "plot" }
+ section.subcard {
+ h3 { "Analysis" }
+ p.prose { (analysis.summary) }
+ div.code-block {
+ (analysis.body)
+ }
+ }
+ }
+}
+
+fn render_command_recipe(command: &fidget_spinner_core::CommandRecipe) -> Markup {
+ html! {
+ section.subcard {
+ h3 { "Command" }
+ div.kv-grid {
+ (render_kv(
+ "argv",
+ &command
+ .argv
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(" "),
+ ))
+ @if let Some(working_directory) = command.working_directory.as_ref() {
+ (render_kv("cwd", working_directory.as_str()))
}
}
- dd {
- @match value_type {
- Some(FieldValueType::String) => {
- @if let Some(text) = value.as_str() {
- (render_string_value(text))
- } @else {
- (render_json_value(value))
+ @if !command.env.is_empty() {
+ table.metric-table {
+ thead { tr { th { "Env" } th { "Value" } } }
+ tbody {
+ @for (key, value) in &command.env {
+ tr {
+ td { (key) }
+ td { (value) }
+ }
}
}
- Some(FieldValueType::Numeric) => {
- @if let Some(number) = value.as_f64() {
- code class="numeric" { (number) }
- } @else {
- (render_json_value(value))
+ }
+ }
+ }
+ }
+}
+
+fn render_metric_panel(
+ title: &str,
+ metrics: &[fidget_spinner_core::MetricValue],
+ outcome: &ExperimentOutcome,
+) -> Markup {
+ html! {
+ section.subcard {
+ h3 { (title) }
+ table.metric-table {
+ thead {
+ tr {
+ th { "Key" }
+ th { "Value" }
+ }
+ }
+ tbody {
+ @for metric in metrics {
+ tr {
+ td { (metric.key) }
+ td { (format_metric_value(metric.value, metric_unit_for(metric, outcome))) }
}
}
- Some(FieldValueType::Boolean) => {
- @if let Some(boolean) = value.as_bool() {
- span class={ "boolean " (if boolean { "true" } else { "false" }) } {
- (if boolean { "true" } else { "false" })
- }
+ }
+ }
+ }
+ }
+}
+
+fn metric_unit_for(
+ metric: &fidget_spinner_core::MetricValue,
+ outcome: &ExperimentOutcome,
+) -> MetricUnit {
+ if metric.key == outcome.primary_metric.key {
+ return MetricUnit::Custom;
+ }
+ MetricUnit::Custom
+}
+
+fn render_vertex_relation_sections(
+ parents: &[VertexSummary],
+ children: &[VertexSummary],
+ limit: Option<u32>,
+) -> Markup {
+ html! {
+ section.card {
+ h2 { "Influence Network" }
+ div.split {
+ div.subcard {
+ h3 { "Parents" }
+ @if parents.is_empty() {
+ p.muted { "No parent influences." }
} @else {
- (render_json_value(value))
+ div.link-list {
+ @for parent in limit_items(parents, limit) {
+ (render_vertex_chip(parent))
+ }
+ }
}
}
- Some(FieldValueType::Timestamp) => {
- @if let Some(raw) = value.as_str() {
- time datetime=(raw) { (render_timestamp_value(raw)) }
+ div.subcard {
+ h3 { "Children" }
+ @if children.is_empty() {
+ p.muted { "No downstream influences." }
} @else {
- (render_untyped_value(value))
+ div.link-list {
+ @for child in limit_items(children, limit) {
+ (render_vertex_chip(child))
+ }
+ }
}
}
- None => (render_untyped_value(value)),
}
}
}
}
-fn render_string_value(text: &str) -> Markup {
- let finder = LinkFinder::new();
+fn render_artifact_section(
+ artifacts: &[fidget_spinner_store_sqlite::ArtifactSummary],
+ limit: Option<u32>,
+) -> Markup {
html! {
- div class="rich-text" {
- @for line in text.lines() {
- p {
- @for span in finder.spans(line) {
- @match span.kind() {
- Some(LinkKind::Url) => a href=(span.as_str()) { (span.as_str()) },
- _ => (span.as_str()),
+ section.card {
+ h2 { "Artifacts" }
+ @if artifacts.is_empty() {
+ p.muted { "No attached artifacts." }
+ } @else {
+ div.card-grid {
+ @for artifact in limit_items(artifacts, limit) {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(artifact_href(&artifact.slug)) { (artifact.label) }
+ span.status-chip.classless { (artifact.kind.as_str()) }
+ }
+ @if let Some(summary) = artifact.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ div.meta-row {
+ span.muted { (artifact.locator) }
}
}
}
}
}
}
+ }
}
-fn render_json_value(value: &Value) -> Markup {
- let text = to_pretty_json(value).unwrap_or_else(|_| value.to_string());
+fn render_experiment_section(
+ title: &str,
+ experiments: &[ExperimentSummary],
+ limit: Option<u32>,
+) -> Markup {
html! {
- pre class="json-value" { (text) }
+ section.card {
+ h2 { (title) }
+ @if experiments.is_empty() {
+ p.muted { "None." }
+ } @else {
+ div.card-grid {
+ @for experiment in limit_items(experiments, limit) {
+ (render_experiment_card(experiment))
+ }
+ }
+ }
+ }
}
}
-fn render_untyped_value(value: &Value) -> Markup {
- match value {
- Value::String(text) => render_string_value(text),
- Value::Number(number) => html! {
- code class="numeric" { (number) }
- },
- Value::Bool(boolean) => html! {
- span class={ "boolean " (if *boolean { "true" } else { "false" }) } {
- (if *boolean { "true" } else { "false" })
+fn render_experiment_card(experiment: &ExperimentSummary) -> Markup {
+ html! {
+ article.mini-card {
+ div.card-header {
+ a.title-link href=(experiment_href(&experiment.slug)) { (experiment.title) }
+ span.status-chip class=(experiment_status_class(experiment.status)) {
+ (experiment.status.as_str())
+ }
+ @if let Some(verdict) = experiment.verdict {
+ span.status-chip class=(verdict_class(verdict)) { (verdict.as_str()) }
}
- },
- _ => render_json_value(value),
+ }
+ @if let Some(summary) = experiment.summary.as_ref() {
+ p.prose { (summary) }
+ }
+ @if let Some(metric) = experiment.primary_metric.as_ref() {
+ div.meta-row {
+ span.metric-pill {
+ (metric.key) ": "
+ (format_metric_value(metric.value, metric.unit))
+ }
+ }
+ }
+ @if !experiment.tags.is_empty() {
+ div.chip-row {
+ @for tag in &experiment.tags {
+ span.tag-chip { (tag) }
+ }
+ }
+ }
+ div.meta-row.muted {
+ span { "updated " (format_timestamp(experiment.updated_at)) }
+ }
+ }
}
}
-fn render_timestamp(timestamp: OffsetDateTime) -> String {
- timestamp
- .format(&format_description!(
- "[year]-[month]-[day] [hour]:[minute]:[second]Z"
- ))
- .unwrap_or_else(|_| timestamp.to_string())
+fn render_experiment_summary_line(experiment: &ExperimentSummary) -> Markup {
+ html! {
+ div.link-list {
+ (render_experiment_link_chip(experiment))
+ @if let Some(metric) = experiment.primary_metric.as_ref() {
+ span.metric-pill {
+ (metric.key) ": "
+ (format_metric_value(metric.value, metric.unit))
+ }
+ }
+ }
+ }
}
-fn render_timestamp_value(raw: &str) -> String {
- OffsetDateTime::parse(raw, &Rfc3339)
- .map(render_timestamp)
- .unwrap_or_else(|_| raw.to_owned())
+fn render_experiment_link_chip(experiment: &ExperimentSummary) -> Markup {
+ html! {
+ a.link-chip href=(experiment_href(&experiment.slug)) {
+ span { (experiment.title) }
+ @if let Some(verdict) = experiment.verdict {
+ span.status-chip class=(verdict_class(verdict)) { (verdict.as_str()) }
+ }
+ }
+ }
}
-fn stylesheet() -> &'static str {
- r#"
- :root {
- color-scheme: light;
- --bg: #f6f3ec;
- --panel: #fffdf8;
- --line: #d8d1c4;
- --text: #22201a;
- --muted: #746e62;
- --accent: #2d5c4d;
- --accent-soft: #dbe8e2;
- --tag: #ece5d8;
- --warn: #8b5b24;
+fn render_vertex_chip(summary: &VertexSummary) -> Markup {
+ let href = match summary.vertex {
+ VertexRef::Hypothesis(_) => hypothesis_href(&summary.slug),
+ VertexRef::Experiment(_) => experiment_href(&summary.slug),
+ };
+ let kind = match summary.vertex {
+ VertexRef::Hypothesis(_) => "hypothesis",
+ VertexRef::Experiment(_) => "experiment",
+ };
+ html! {
+ a.link-chip href=(href) {
+ span.kind-chip { (kind) }
+ span { (summary.title) }
+ @if let Some(summary_text) = summary.summary.as_ref() {
+ span.muted { " — " (summary_text) }
+ }
+ }
}
+}
- * { box-sizing: border-box; }
-
- body {
- margin: 0;
- background: var(--bg);
- color: var(--text);
- font: 15px/1.5 "Iosevka Web", "IBM Plex Mono", "SFMono-Regular", monospace;
+fn render_attachment_chip(attachment: &AttachmentDisplay) -> Markup {
+ html! {
+ a.link-chip href=(&attachment.href) {
+ span.kind-chip { (attachment.kind) }
+ span { (&attachment.title) }
+ @if let Some(summary) = attachment.summary.as_ref() {
+ span.muted { " — " (summary) }
+ }
+ }
}
+}
- a {
- color: var(--accent);
- text-decoration: none;
+fn render_prose_block(title: &str, body: &str) -> Markup {
+ html! {
+ section.card {
+ h2 { (title) }
+ p.prose { (body) }
+ }
}
+}
- a:hover {
- text-decoration: underline;
+fn render_shell(
+ title: &str,
+ subtitle: Option<&str>,
+ breadcrumb: Option<(&str, String)>,
+ content: Markup,
+) -> Markup {
+ html! {
+ (DOCTYPE)
+ html {
+ head {
+ meta charset="utf-8";
+ meta name="viewport" content="width=device-width, initial-scale=1";
+ title { (title) }
+ style { (PreEscaped(styles())) }
+ }
+ body {
+ main.shell {
+ header.page-header {
+ div.eyebrow {
+ a href="/" { "home" }
+ @if let Some((label, href)) = breadcrumb {
+ span.sep { "/" }
+ a href=(href) { (label) }
+ }
+ }
+ h1.page-title { (title) }
+ @if let Some(subtitle) = subtitle {
+ p.page-subtitle { (subtitle) }
+ }
+ }
+ (content)
+ }
+ }
+ }
}
+}
- .shell {
- display: grid;
- grid-template-columns: 18rem minmax(0, 1fr);
- min-height: 100vh;
+fn render_kv(label: &str, value: &str) -> Markup {
+ html! {
+ div.kv {
+ div.kv-label { (label) }
+ div.kv-value { (value) }
+ }
}
+}
- .rail {
- border-right: 1px solid var(--line);
- padding: 1.25rem 1rem;
- position: sticky;
- top: 0;
- align-self: start;
- height: 100vh;
- overflow: auto;
- background: rgba(255, 253, 248, 0.85);
- backdrop-filter: blur(6px);
+fn render_dimension_value(value: &RunDimensionValue) -> String {
+ match value {
+ RunDimensionValue::String(value) => value.to_string(),
+ RunDimensionValue::Numeric(value) => format_float(*value),
+ RunDimensionValue::Boolean(value) => value.to_string(),
+ RunDimensionValue::Timestamp(value) => value.to_string(),
}
+}
- .project, .feed-meta, .entry-meta, .entry-summary, .tag-description {
- color: var(--muted);
+fn format_metric_value(value: f64, unit: MetricUnit) -> String {
+ match unit {
+ MetricUnit::Bytes => format!("{} B", format_integerish(value)),
+ MetricUnit::Seconds => format!("{value:.3} s"),
+ MetricUnit::Count => format_integerish(value),
+ MetricUnit::Ratio => format!("{value:.4}"),
+ MetricUnit::Custom => format_float(value),
}
+}
- .tag-list {
- display: grid;
- gap: 0.5rem;
+fn format_float(value: f64) -> String {
+ if value.fract() == 0.0 {
+ format_integerish(value)
+ } else {
+ format!("{value:.4}")
}
+}
- .tag-link {
- display: grid;
- grid-template-columns: minmax(0, 1fr) auto;
- gap: 0.2rem 0.75rem;
- padding: 0.55rem 0.7rem;
- border: 1px solid var(--line);
- background: var(--panel);
+fn format_integerish(value: f64) -> String {
+ let negative = value.is_sign_negative();
+ let digits = format!("{:.0}", value.abs());
+ let mut grouped = String::with_capacity(digits.len() + (digits.len() / 3));
+ for (index, ch) in digits.chars().rev().enumerate() {
+ if index != 0 && index % 3 == 0 {
+ grouped.push(',');
+ }
+ grouped.push(ch);
}
+ let grouped: String = grouped.chars().rev().collect();
+ if negative {
+ format!("-{grouped}")
+ } else {
+ grouped
+ }
+}
+
+fn format_timestamp(value: OffsetDateTime) -> String {
+ const TIMESTAMP: &[time::format_description::FormatItem<'static>] =
+ format_description!("[year]-[month]-[day] [hour]:[minute]");
+ value.format(TIMESTAMP).unwrap_or_else(|_| {
+ value
+ .format(&Rfc3339)
+ .unwrap_or_else(|_| value.unix_timestamp().to_string())
+ })
+}
+
+fn frontier_href(slug: &Slug) -> String {
+ format!("/frontier/{}", encode_path_segment(slug.as_str()))
+}
+
+fn hypothesis_href(slug: &Slug) -> String {
+ format!("/hypothesis/{}", encode_path_segment(slug.as_str()))
+}
- .tag-link.selected {
- border-color: var(--accent);
- background: var(--accent-soft);
+fn hypothesis_href_from_id(id: fidget_spinner_core::HypothesisId) -> String {
+ format!("/hypothesis/{}", encode_path_segment(&id.to_string()))
+}
+
+fn hypothesis_title_for_roadmap_item(
+ projection: &FrontierOpenProjection,
+ hypothesis_id: fidget_spinner_core::HypothesisId,
+) -> String {
+ projection
+ .active_hypotheses
+ .iter()
+ .find(|state| state.hypothesis.id == hypothesis_id)
+ .map(|state| state.hypothesis.title.to_string())
+ .unwrap_or_else(|| hypothesis_id.to_string())
+}
+
+fn experiment_href(slug: &Slug) -> String {
+ format!("/experiment/{}", encode_path_segment(slug.as_str()))
+}
+
+fn artifact_href(slug: &Slug) -> String {
+ format!("/artifact/{}", encode_path_segment(slug.as_str()))
+}
+
+fn resolve_attachment_display(
+ store: &fidget_spinner_store_sqlite::ProjectStore,
+ target: AttachmentTargetRef,
+) -> Result<AttachmentDisplay, StoreError> {
+ match target {
+ AttachmentTargetRef::Frontier(id) => {
+ let frontier = store.read_frontier(&id.to_string())?;
+ Ok(AttachmentDisplay {
+ kind: "frontier",
+ href: frontier_href(&frontier.slug),
+ title: frontier.label.to_string(),
+ summary: Some(frontier.objective.to_string()),
+ })
+ }
+ AttachmentTargetRef::Hypothesis(id) => {
+ let detail = store.read_hypothesis(&id.to_string())?;
+ Ok(AttachmentDisplay {
+ kind: "hypothesis",
+ href: hypothesis_href(&detail.record.slug),
+ title: detail.record.title.to_string(),
+ summary: Some(detail.record.summary.to_string()),
+ })
+ }
+ AttachmentTargetRef::Experiment(id) => {
+ let detail = store.read_experiment(&id.to_string())?;
+ Ok(AttachmentDisplay {
+ kind: "experiment",
+ href: experiment_href(&detail.record.slug),
+ title: detail.record.title.to_string(),
+ summary: detail.record.summary.as_ref().map(ToString::to_string),
+ })
+ }
}
+}
- .tag-name {
- font-weight: 700;
- overflow-wrap: anywhere;
+fn encode_path_segment(value: &str) -> String {
+ utf8_percent_encode(value, NON_ALPHANUMERIC).to_string()
+}
+
+fn frontier_status_class(status: &str) -> &'static str {
+ match status {
+ "exploring" => "status-exploring",
+ "paused" => "status-parked",
+ "archived" => "status-archived",
+ _ => "status-neutral",
}
+}
- .tag-count {
- color: var(--muted);
+fn experiment_status_class(status: ExperimentStatus) -> &'static str {
+ match status {
+ ExperimentStatus::Open => "status-open",
+ ExperimentStatus::Closed => "status-neutral",
}
+}
- .tag-description {
- grid-column: 1 / -1;
- font-size: 0.9rem;
- overflow-wrap: anywhere;
+fn verdict_class(verdict: FrontierVerdict) -> &'static str {
+ match verdict {
+ FrontierVerdict::Accepted => "status-accepted",
+ FrontierVerdict::Kept => "status-kept",
+ FrontierVerdict::Parked => "status-parked",
+ FrontierVerdict::Rejected => "status-rejected",
}
+}
+
+fn limit_items<T>(items: &[T], limit: Option<u32>) -> &[T] {
+ let Some(limit) = limit else {
+ return items;
+ };
+ let Ok(limit) = usize::try_from(limit) else {
+ return items;
+ };
+ let end = items.len().min(limit);
+ &items[..end]
+}
- .feed {
- padding: 1.5rem;
+fn styles() -> &'static str {
+ r#"
+ :root {
+ color-scheme: dark;
+ --bg: #091019;
+ --panel: #0f1823;
+ --panel-2: #131f2d;
+ --border: #1e3850;
+ --text: #d8e6f3;
+ --muted: #87a0b8;
+ --accent: #6dc7ff;
+ --accepted: #7ce38b;
+ --kept: #8de0c0;
+ --parked: #d9c17d;
+ --rejected: #ee7a7a;
+ }
+ * { box-sizing: border-box; }
+ body {
+ margin: 0;
+ background: var(--bg);
+ color: var(--text);
+ font: 15px/1.5 "Iosevka Web", "Iosevka", "JetBrains Mono", monospace;
+ }
+ a {
+ color: var(--accent);
+ text-decoration: none;
+ }
+ a:hover { text-decoration: underline; }
+ .shell {
+ width: min(1500px, 100%);
+ margin: 0 auto;
+ padding: 20px;
display: grid;
- gap: 1rem;
- min-width: 0;
+ gap: 16px;
}
-
- .feed-header {
- padding-bottom: 0.5rem;
- border-bottom: 1px solid var(--line);
+ .page-header {
+ display: grid;
+ gap: 8px;
+ padding: 16px 18px;
+ border: 1px solid var(--border);
+ background: var(--panel);
}
-
- .entry, .empty-state {
+ .eyebrow {
+ display: flex;
+ gap: 10px;
+ color: var(--muted);
+ font-size: 13px;
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
+ }
+ .sep { color: #4d6478; }
+ .page-title {
+ margin: 0;
+ font-size: clamp(22px, 3.8vw, 34px);
+ line-height: 1.1;
+ }
+ .page-subtitle {
+ margin: 0;
+ color: var(--muted);
+ max-width: 90ch;
+ }
+ .card {
+ border: 1px solid var(--border);
background: var(--panel);
- border: 1px solid var(--line);
- padding: 1rem 1.1rem;
+ padding: 16px 18px;
+ display: grid;
+ gap: 12px;
+ }
+ .subcard {
+ border: 1px solid #1a2b3c;
+ background: var(--panel-2);
+ padding: 12px 14px;
+ display: grid;
+ gap: 10px;
min-width: 0;
- overflow: hidden;
}
-
- .entry-header {
+ .block { display: grid; gap: 10px; }
+ .split {
display: grid;
- gap: 0.35rem;
- margin-bottom: 0.75rem;
+ gap: 16px;
+ grid-template-columns: repeat(auto-fit, minmax(320px, 1fr));
}
-
- .entry-title-row {
+ .card-grid {
+ display: grid;
+ gap: 12px;
+ grid-template-columns: repeat(auto-fit, minmax(280px, 1fr));
+ }
+ .mini-card {
+ border: 1px solid #1a2b3c;
+ background: var(--panel-2);
+ padding: 12px 14px;
+ display: grid;
+ gap: 9px;
+ min-width: 0;
+ }
+ .card-header {
display: flex;
+ gap: 10px;
+ align-items: center;
flex-wrap: wrap;
- gap: 0.75rem;
- align-items: baseline;
}
-
- .entry-title {
+ .title-link {
+ font-size: 16px;
+ font-weight: 700;
+ color: #f2f8ff;
+ }
+ h1, h2, h3 {
margin: 0;
- font-size: 1.05rem;
- min-width: 0;
- overflow-wrap: anywhere;
+ line-height: 1.15;
}
-
- .entry-meta {
+ h2 { font-size: 19px; }
+ h3 { font-size: 14px; color: #c9d8e6; }
+ .prose {
+ margin: 0;
+ color: #dce9f6;
+ max-width: 92ch;
+ white-space: pre-wrap;
+ }
+ .muted { color: var(--muted); }
+ .meta-row {
display: flex;
flex-wrap: wrap;
- gap: 0.75rem;
- font-size: 0.9rem;
+ gap: 14px;
+ align-items: center;
+ font-size: 13px;
+ }
+ .kv-grid {
+ display: grid;
+ gap: 10px 14px;
+ grid-template-columns: repeat(auto-fit, minmax(220px, 1fr));
+ }
+ .kv {
+ display: grid;
+ gap: 4px;
min-width: 0;
}
-
- .class-badge, .field-type, .entry-tag {
- display: inline-block;
- padding: 0.08rem 0.4rem;
- border: 1px solid var(--line);
- background: var(--tag);
- font-size: 0.82rem;
+ .kv-label {
+ color: var(--muted);
+ font-size: 12px;
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
}
-
- .field-type.plottable {
- background: var(--accent-soft);
- border-color: var(--accent);
+ .kv-value {
+ overflow-wrap: anywhere;
}
-
- .tag-strip {
- display: inline-flex;
+ .chip-row, .link-list {
+ display: flex;
flex-wrap: wrap;
- gap: 0.35rem;
+ gap: 8px;
}
-
- .entry-body {
- margin-bottom: 0.9rem;
- min-width: 0;
+ .tag-chip, .kind-chip, .status-chip, .metric-pill, .link-chip {
+ border: 1px solid #24425b;
+ background: rgba(109, 199, 255, 0.06);
+ padding: 4px 8px;
+ font-size: 12px;
+ line-height: 1.2;
}
-
- .rich-text p {
- margin: 0 0 0.55rem;
- overflow-wrap: anywhere;
- word-break: break-word;
- max-width: 100%;
+ .link-chip {
+ display: inline-flex;
+ gap: 8px;
+ align-items: center;
}
-
- .rich-text p:last-child {
- margin-bottom: 0;
+ .kind-chip {
+ color: var(--muted);
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
}
-
- .field-list {
- display: grid;
- grid-template-columns: minmax(12rem, 18rem) minmax(0, 1fr);
- gap: 0.55rem 1rem;
- margin: 0;
+ .status-chip {
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
+ font-weight: 700;
+ }
+ .status-accepted { color: var(--accepted); border-color: rgba(124, 227, 139, 0.35); }
+ .status-kept { color: var(--kept); border-color: rgba(141, 224, 192, 0.35); }
+ .status-parked { color: var(--parked); border-color: rgba(217, 193, 125, 0.35); }
+ .status-rejected { color: var(--rejected); border-color: rgba(238, 122, 122, 0.35); }
+ .status-open { color: var(--accent); border-color: rgba(109, 199, 255, 0.35); }
+ .status-exploring { color: var(--accent); border-color: rgba(109, 199, 255, 0.35); }
+ .status-neutral, .classless { color: #a7c0d4; border-color: #2a4358; }
+ .status-archived { color: #7f8da0; border-color: #2b3540; }
+ .metric-table {
width: 100%;
- min-width: 0;
+ border-collapse: collapse;
+ font-size: 13px;
}
-
- .field-list dt {
+ .metric-table th,
+ .metric-table td {
+ padding: 7px 8px;
+ border-top: 1px solid #1b2d3e;
+ text-align: left;
+ vertical-align: top;
+ }
+ .metric-table th {
+ color: var(--muted);
font-weight: 700;
- display: flex;
- flex-wrap: wrap;
- gap: 0.4rem;
- align-items: center;
- overflow-wrap: anywhere;
- min-width: 0;
+ text-transform: uppercase;
+ letter-spacing: 0.05em;
+ font-size: 12px;
}
-
- .field-list dd {
- margin: 0;
- min-width: 0;
+ .related-block {
+ display: grid;
+ gap: 8px;
}
-
- .json-value {
+ .roadmap-list, .simple-list {
margin: 0;
- padding: 0.6rem 0.7rem;
- background: #f3eee4;
- overflow: auto;
+ padding-left: 18px;
+ display: grid;
+ gap: 6px;
+ }
+ .code-block {
white-space: pre-wrap;
overflow-wrap: anywhere;
+ border: 1px solid #1a2b3c;
+ background: #0b131c;
+ padding: 12px 14px;
}
-
- .boolean.true { color: var(--accent); }
- .boolean.false { color: #8a2f2f; }
- .numeric { font-size: 1rem; }
-
- .diagnostics {
- margin-top: 1rem;
- padding-top: 0.8rem;
- border-top: 1px dashed var(--line);
- }
-
- .diagnostics h4 {
- margin: 0 0 0.4rem;
- font-size: 0.9rem;
- text-transform: lowercase;
- }
-
- .diagnostics ul {
- margin: 0;
- padding-left: 1.1rem;
- }
-
- .diag-severity {
- color: var(--warn);
- font-weight: 700;
+ code {
+ font-family: inherit;
+ font-size: 0.95em;
}
-
- @media (max-width: 900px) {
- .shell {
- grid-template-columns: 1fr;
- }
-
- .rail {
- position: static;
- height: auto;
- border-right: 0;
- border-bottom: 1px solid var(--line);
- padding: 1rem 0.85rem;
- }
-
- .field-list {
- grid-template-columns: minmax(0, 1fr);
- }
-
- .feed {
- padding: 1rem;
- }
-
- .entry, .empty-state {
- padding: 0.85rem 0.9rem;
- }
+ @media (max-width: 720px) {
+ .shell { padding: 12px; }
+ .card, .page-header { padding: 14px; }
+ .subcard, .mini-card { padding: 12px; }
+ .card-grid, .split, .kv-grid { grid-template-columns: 1fr; }
}
"#
}
diff --git a/crates/fidget-spinner-cli/tests/mcp_hardening.rs b/crates/fidget-spinner-cli/tests/mcp_hardening.rs
index 21a3d04..fad4937 100644
--- a/crates/fidget-spinner-cli/tests/mcp_hardening.rs
+++ b/crates/fidget-spinner-cli/tests/mcp_hardening.rs
@@ -1,22 +1,21 @@
use axum as _;
+use clap as _;
+use dirs as _;
use std::fs;
use std::io::{self, BufRead, BufReader, Write};
use std::path::PathBuf;
use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
use camino::Utf8PathBuf;
-use clap as _;
-use dirs as _;
use fidget_spinner_core::NonEmptyText;
-use fidget_spinner_store_sqlite::{ListNodesQuery, ProjectStore};
+use fidget_spinner_store_sqlite::ProjectStore;
use libmcp as _;
-use linkify as _;
use maud as _;
+use percent_encoding as _;
use serde as _;
use serde_json::{Value, json};
use time as _;
use tokio as _;
-use uuid as _;
type TestResult<T = ()> = Result<T, Box<dyn std::error::Error>>;
@@ -50,7 +49,6 @@ fn init_project(root: &Utf8PathBuf) -> TestResult {
ProjectStore::init(
root,
must(NonEmptyText::new("mcp test project"), "display name")?,
- must(NonEmptyText::new("local.mcp.test"), "namespace")?,
),
"init project store",
)?;
@@ -68,7 +66,7 @@ struct McpHarness {
}
impl McpHarness {
- fn spawn(project_root: Option<&Utf8PathBuf>, envs: &[(&str, String)]) -> TestResult<Self> {
+ fn spawn(project_root: Option<&Utf8PathBuf>) -> TestResult<Self> {
let mut command = Command::new(binary_path());
let _ = command
.arg("mcp")
@@ -79,9 +77,6 @@ impl McpHarness {
if let Some(project_root) = project_root {
let _ = command.arg("--project").arg(project_root.as_str());
}
- for (key, value) in envs {
- let _ = command.env(key, value);
- }
let mut child = must(command.spawn(), "spawn mcp host")?;
let stdin = must_some(child.stdin.take(), "host stdin")?;
let stdout = BufReader::new(must_some(child.stdout.take(), "host stdout")?);
@@ -137,6 +132,13 @@ impl McpHarness {
}))
}
+ fn call_tool_full(&mut self, id: u64, name: &str, arguments: Value) -> TestResult<Value> {
+ let mut arguments = arguments.as_object().cloned().unwrap_or_default();
+ let _ = arguments.insert("render".to_owned(), json!("json"));
+ let _ = arguments.insert("detail".to_owned(), json!("full"));
+ self.call_tool(id, name, Value::Object(arguments))
+ }
+
fn request(&mut self, message: Value) -> TestResult<Value> {
let encoded = must(serde_json::to_string(&message), "request json")?;
must(writeln!(self.stdin, "{encoded}"), "write request")?;
@@ -168,1401 +170,429 @@ fn tool_content(response: &Value) -> &Value {
&response["result"]["structuredContent"]
}
-fn tool_text(response: &Value) -> Option<&str> {
- response["result"]["content"]
- .as_array()
- .and_then(|content| content.first())
- .and_then(|entry| entry["text"].as_str())
-}
-
-fn fault_message(response: &Value) -> Option<&str> {
+fn tool_error_message(response: &Value) -> Option<&str> {
response["result"]["structuredContent"]["message"].as_str()
}
-#[test]
-fn cold_start_exposes_health_and_telemetry() -> TestResult {
- let project_root = temp_project_root("cold_start")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let initialize = harness.initialize()?;
- assert_eq!(
- initialize["result"]["protocolVersion"].as_str(),
- Some("2025-11-25")
- );
- harness.notify_initialized()?;
-
- let tools = harness.tools_list()?;
- let tool_count = must_some(tools["result"]["tools"].as_array(), "tools array")?.len();
- assert!(tool_count >= 20);
-
- let health = harness.call_tool(3, "system.health", json!({}))?;
- assert_eq!(tool_content(&health)["ready"].as_bool(), Some(true));
- assert_eq!(tool_content(&health)["bound"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(4, "system.telemetry", json!({}))?;
- assert!(tool_content(&telemetry)["requests"].as_u64().unwrap_or(0) >= 3);
-
- let skills = harness.call_tool(15, "skill.list", json!({}))?;
- let skill_names = must_some(
- tool_content(&skills)["skills"].as_array(),
- "bundled skills array",
- )?
- .iter()
- .filter_map(|skill| skill["name"].as_str())
- .collect::<Vec<_>>();
- assert!(skill_names.contains(&"fidget-spinner"));
- assert!(skill_names.contains(&"frontier-loop"));
-
- let base_skill = harness.call_tool(16, "skill.show", json!({"name": "fidget-spinner"}))?;
+fn assert_tool_ok(response: &Value) {
assert_eq!(
- tool_content(&base_skill)["name"].as_str(),
- Some("fidget-spinner")
+ response["result"]["isError"].as_bool(),
+ Some(false),
+ "tool response unexpectedly errored: {response:#}"
);
- Ok(())
}
-#[test]
-fn tool_output_defaults_to_porcelain_and_supports_json_render() -> TestResult {
- let project_root = temp_project_root("render_modes")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(21, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let porcelain = harness.call_tool(22, "project.status", json!({}))?;
- let porcelain_text = must_some(tool_text(&porcelain), "porcelain project.status text")?;
- assert!(porcelain_text.contains("root:"));
- assert!(!porcelain_text.contains("\"project_root\":"));
-
- let health = harness.call_tool(23, "system.health", json!({}))?;
- let health_text = must_some(tool_text(&health), "porcelain system.health text")?;
- assert!(health_text.contains("ready | bound"));
- assert!(health_text.contains("binary:"));
-
- let frontier = harness.call_tool(
- 24,
- "frontier.init",
- json!({
- "label": "render frontier",
- "objective": "exercise porcelain output",
- "contract_title": "render contract",
- "benchmark_suites": ["smoke"],
- "promotion_criteria": ["retain key fields in porcelain"],
- "primary_metric": {
- "key": "score",
- "unit": "count",
- "objective": "maximize"
- }
- }),
- )?;
- assert_eq!(frontier["result"]["isError"].as_bool(), Some(false));
-
- let frontier_list = harness.call_tool(25, "frontier.list", json!({}))?;
- let frontier_text = must_some(tool_text(&frontier_list), "porcelain frontier.list text")?;
- assert!(frontier_text.contains("render frontier"));
- assert!(!frontier_text.contains("root_contract_node_id"));
-
- let json_render = harness.call_tool(26, "project.status", json!({"render": "json"}))?;
- let json_text = must_some(tool_text(&json_render), "json project.status text")?;
- assert!(json_text.contains("\"project_root\":"));
- assert!(json_text.trim_start().starts_with('{'));
-
- let json_full = harness.call_tool(
- 27,
- "project.status",
- json!({"render": "json", "detail": "full"}),
- )?;
- let json_full_text = must_some(tool_text(&json_full), "json full project.status text")?;
- assert!(json_full_text.contains("\"schema\": {"));
- Ok(())
-}
-
-#[test]
-fn safe_request_retries_after_worker_crash() -> TestResult {
- let project_root = temp_project_root("crash_retry")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(
- None,
- &[(
- "FIDGET_SPINNER_MCP_TEST_HOST_CRASH_ONCE_KEY",
- "tools/call:project.status".to_owned(),
- )],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(3, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let response = harness.call_tool(5, "project.status", json!({}))?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(6, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(1));
+fn assert_tool_error(response: &Value) {
assert_eq!(
- tool_content(&telemetry)["worker_restarts"].as_u64(),
- Some(1)
+ response["result"]["isError"].as_bool(),
+ Some(true),
+ "tool response unexpectedly succeeded: {response:#}"
);
- Ok(())
}
-#[test]
-fn safe_request_retries_after_worker_transient_fault() -> TestResult {
- let project_root = temp_project_root("transient_retry")?;
- init_project(&project_root)?;
- let marker = project_root.join("transient_once.marker");
-
- let mut harness = McpHarness::spawn(
- None,
- &[
- (
- "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_KEY",
- "tools/call:project.status".to_owned(),
- ),
- (
- "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_MARKER",
- marker.to_string(),
- ),
- ],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(12, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let response = harness.call_tool(13, "project.status", json!({}))?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(14, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(1));
- assert_eq!(
- tool_content(&telemetry)["worker_restarts"].as_u64(),
- Some(1)
- );
- Ok(())
-}
-
-#[test]
-fn side_effecting_request_is_not_replayed_after_worker_crash() -> TestResult {
- let project_root = temp_project_root("no_replay")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(
- None,
- &[(
- "FIDGET_SPINNER_MCP_TEST_HOST_CRASH_ONCE_KEY",
- "tools/call:source.record".to_owned(),
- )],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(6, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let response = harness.call_tool(
- 7,
- "source.record",
- json!({
- "title": "should not duplicate",
- "summary": "dedupe check",
- "body": "host crash before worker execution",
- }),
- )?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(true));
-
- let nodes = harness.call_tool(8, "node.list", json!({}))?;
- assert_eq!(
- must_some(tool_content(&nodes).as_array(), "node list")?.len(),
- 0
- );
-
- let telemetry = harness.call_tool(9, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(0));
- Ok(())
+fn tool_names(response: &Value) -> Vec<&str> {
+ response["result"]["tools"]
+ .as_array()
+ .into_iter()
+ .flatten()
+ .filter_map(|tool| tool["name"].as_str())
+ .collect()
}
#[test]
-fn forced_rollout_preserves_initialized_state() -> TestResult {
- let project_root = temp_project_root("rollout")?;
+fn cold_start_exposes_bound_surface_and_new_toolset() -> TestResult {
+ let project_root = temp_project_root("cold_start")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(
- None,
- &[(
- "FIDGET_SPINNER_MCP_TEST_FORCE_ROLLOUT_KEY",
- "tools/call:project.status".to_owned(),
- )],
- )?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(9, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let first = harness.call_tool(10, "project.status", json!({}))?;
- assert_eq!(first["result"]["isError"].as_bool(), Some(false));
-
- let second = harness.call_tool(11, "project.status", json!({}))?;
- assert_eq!(second["result"]["isError"].as_bool(), Some(false));
-
- let telemetry = harness.call_tool(12, "system.telemetry", json!({}))?;
- assert_eq!(tool_content(&telemetry)["host_rollouts"].as_u64(), Some(1));
- Ok(())
-}
-
-#[test]
-fn unbound_project_tools_fail_with_bind_hint() -> TestResult {
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let response = harness.call_tool(20, "project.status", json!({}))?;
- assert_eq!(response["result"]["isError"].as_bool(), Some(true));
- let message = response["result"]["structuredContent"]["message"].as_str();
- assert!(message.is_some_and(|message| message.contains("project.bind")));
- Ok(())
-}
-
-#[test]
-fn bind_bootstraps_empty_project_root() -> TestResult {
- let project_root = temp_project_root("bind_bootstrap")?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let bind = harness.bind_project(28, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&bind)["project_root"].as_str(),
- Some(project_root.as_str())
- );
-
- let status = harness.call_tool(29, "project.status", json!({}))?;
- assert_eq!(status["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&status)["project_root"].as_str(),
- Some(project_root.as_str())
- );
-
- let store = must(ProjectStore::open(&project_root), "open bootstrapped store")?;
- assert_eq!(store.project_root().as_str(), project_root.as_str());
- Ok(())
-}
-
-#[test]
-fn bind_rejects_nonempty_uninitialized_root() -> TestResult {
- let project_root = temp_project_root("bind_nonempty")?;
- must(
- fs::write(project_root.join("README.txt").as_std_path(), "occupied"),
- "seed nonempty directory",
- )?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let bind = harness.bind_project(30, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(true));
- Ok(())
-}
-
-#[test]
-fn successful_bind_clears_stale_fault_from_health() -> TestResult {
- let bad_root = temp_project_root("bind_fault_bad")?;
- must(
- fs::write(bad_root.join("README.txt").as_std_path(), "occupied"),
- "seed bad bind root",
- )?;
- let good_root = temp_project_root("bind_fault_good")?;
- init_project(&good_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let failed_bind = harness.bind_project(301, &bad_root)?;
- assert_eq!(failed_bind["result"]["isError"].as_bool(), Some(true));
-
- let failed_health = harness.call_tool(302, "system.health", json!({ "detail": "full" }))?;
- assert_eq!(
- tool_content(&failed_health)["last_fault"]["operation"].as_str(),
- Some("tools/call:project.bind")
- );
-
- let good_bind = harness.bind_project(303, &good_root)?;
- assert_eq!(good_bind["result"]["isError"].as_bool(), Some(false));
-
- let recovered_health = harness.call_tool(304, "system.health", json!({}))?;
- assert_eq!(recovered_health["result"]["isError"].as_bool(), Some(false));
- assert!(tool_content(&recovered_health).get("last_fault").is_none());
- assert!(!must_some(tool_text(&recovered_health), "recovered health text")?.contains("fault:"));
-
- let recovered_health_full =
- harness.call_tool(306, "system.health", json!({ "detail": "full" }))?;
- assert_eq!(
- tool_content(&recovered_health_full)["last_fault"],
- Value::Null,
- );
-
- let recovered_telemetry = harness.call_tool(305, "system.telemetry", json!({}))?;
- assert_eq!(
- recovered_telemetry["result"]["isError"].as_bool(),
- Some(false)
- );
+ let mut harness = McpHarness::spawn(None)?;
+ let initialize = harness.initialize()?;
assert_eq!(
- tool_content(&recovered_telemetry)["errors"].as_u64(),
- Some(1)
+ initialize["result"]["protocolVersion"].as_str(),
+ Some("2025-11-25")
);
- assert!(tool_content(&recovered_telemetry)["last_fault"].is_null());
- Ok(())
-}
-
-#[test]
-fn bind_retargets_writes_to_sibling_project_root() -> TestResult {
- let spinner_root = temp_project_root("spinner_root")?;
- let libgrid_root = temp_project_root("libgrid_root")?;
- init_project(&spinner_root)?;
- init_project(&libgrid_root)?;
- let notes_dir = libgrid_root.join("notes");
- must(
- fs::create_dir_all(notes_dir.as_std_path()),
- "create nested notes dir",
- )?;
-
- let mut harness = McpHarness::spawn(Some(&spinner_root), &[])?;
- let _ = harness.initialize()?;
harness.notify_initialized()?;
- let initial_status = harness.call_tool(31, "project.status", json!({}))?;
- assert_eq!(
- tool_content(&initial_status)["project_root"].as_str(),
- Some(spinner_root.as_str())
- );
+ let tools = harness.tools_list()?;
+ let tool_names = tool_names(&tools);
+ assert!(tool_names.contains(&"frontier.open"));
+ assert!(tool_names.contains(&"hypothesis.record"));
+ assert!(tool_names.contains(&"experiment.close"));
+ assert!(tool_names.contains(&"artifact.record"));
+ assert!(!tool_names.contains(&"node.list"));
+ assert!(!tool_names.contains(&"research.record"));
- let rebind = harness.bind_project(32, &notes_dir)?;
- assert_eq!(rebind["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&rebind)["project_root"].as_str(),
- Some(libgrid_root.as_str())
- );
+ let health = harness.call_tool(3, "system.health", json!({}))?;
+ assert_tool_ok(&health);
+ assert_eq!(tool_content(&health)["bound"].as_bool(), Some(false));
- let status = harness.call_tool(33, "project.status", json!({}))?;
+ let bind = harness.bind_project(4, &project_root)?;
+ assert_tool_ok(&bind);
assert_eq!(
- tool_content(&status)["project_root"].as_str(),
- Some(libgrid_root.as_str())
+ tool_content(&bind)["display_name"].as_str(),
+ Some("mcp test project")
);
- let note = harness.call_tool(
- 34,
- "note.quick",
- json!({
- "title": "libgrid dogfood note",
- "summary": "rebind summary",
- "body": "rebind should redirect writes",
- "tags": [],
- }),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
-
- let spinner_store = must(ProjectStore::open(&spinner_root), "open spinner store")?;
- let libgrid_store = must(ProjectStore::open(&libgrid_root), "open libgrid store")?;
- assert_eq!(
- must(
- spinner_store.list_nodes(ListNodesQuery::default()),
- "list spinner nodes after rebind"
- )?
- .len(),
- 0
- );
- assert_eq!(
- must(
- libgrid_store.list_nodes(ListNodesQuery::default()),
- "list libgrid nodes after rebind"
- )?
- .len(),
- 1
- );
+ let rebound_health = harness.call_tool(5, "system.health", json!({}))?;
+ assert_tool_ok(&rebound_health);
+ assert_eq!(tool_content(&rebound_health)["bound"].as_bool(), Some(true));
Ok(())
}
#[test]
-fn tag_registry_drives_note_creation_and_lookup() -> TestResult {
- let project_root = temp_project_root("tag_registry")?;
+fn frontier_open_is_the_grounding_surface_for_live_state() -> TestResult {
+ let project_root = temp_project_root("frontier_open")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(None, &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let bind = harness.bind_project(40, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- let missing_tags = harness.call_tool(
- 41,
- "note.quick",
- json!({
- "title": "untagged",
- "summary": "should fail without explicit tags",
- "body": "should fail",
- }),
- )?;
- assert_eq!(missing_tags["result"]["isError"].as_bool(), Some(true));
-
- let tag = harness.call_tool(
- 42,
+ assert_tool_ok(&harness.call_tool(
+ 10,
"tag.add",
+ json!({"name": "root-conquest", "description": "root work"}),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 11,
+ "metric.define",
json!({
- "name": "dogfood/mcp",
- "description": "MCP dogfood observations",
+ "key": "nodes_solved",
+ "unit": "count",
+ "objective": "maximize",
+ "visibility": "canonical",
}),
- )?;
- assert_eq!(tag["result"]["isError"].as_bool(), Some(false));
-
- let tag_list = harness.call_tool(43, "tag.list", json!({}))?;
- let tags = must_some(tool_content(&tag_list).as_array(), "tag list")?;
- assert_eq!(tags.len(), 1);
- assert_eq!(tags[0]["name"].as_str(), Some("dogfood/mcp"));
-
- let note = harness.call_tool(
- 44,
- "note.quick",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 12,
+ "run.dimension.define",
+ json!({"key": "instance", "value_type": "string"}),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 13,
+ "frontier.create",
+ json!({
+ "label": "LP root frontier",
+ "objective": "Drive root cash-out on braid rails",
+ "slug": "lp-root",
+ }),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 14,
+ "hypothesis.record",
+ json!({
+ "frontier": "lp-root",
+ "slug": "node-local-loop",
+ "title": "Node-local logical cut loop",
+ "summary": "Push cut cash-out below root.",
+ "body": "Thread node-local logical cuts through native LP reoptimization so the same intervention can cash out below root on parity rails without corrupting root ownership semantics.",
+ "tags": ["root-conquest"],
+ }),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 15,
+ "experiment.open",
json!({
- "title": "tagged note",
- "summary": "tagged lookup summary",
- "body": "tagged lookup should work",
- "tags": ["dogfood/mcp"],
+ "hypothesis": "node-local-loop",
+ "slug": "baseline-20s",
+ "title": "Baseline parity 20s",
+ "summary": "Reference rail.",
+ "tags": ["root-conquest"],
}),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
-
- let filtered = harness.call_tool(45, "node.list", json!({"tags": ["dogfood/mcp"]}))?;
- let nodes = must_some(tool_content(&filtered).as_array(), "filtered nodes")?;
- assert_eq!(nodes.len(), 1);
- assert_eq!(nodes[0]["tags"][0].as_str(), Some("dogfood/mcp"));
- Ok(())
-}
-
-#[test]
-fn source_record_accepts_tags_and_filtering() -> TestResult {
- let project_root = temp_project_root("research_tags")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(451, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let tag = harness.call_tool(
- 452,
- "tag.add",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 16,
+ "experiment.close",
json!({
- "name": "campaign/libgrid",
- "description": "libgrid migration campaign",
+ "experiment": "baseline-20s",
+ "backend": "manual",
+ "command": {"argv": ["baseline-20s"]},
+ "dimensions": {"instance": "4x5-braid"},
+ "primary_metric": {"key": "nodes_solved", "value": 220.0},
+ "verdict": "kept",
+ "rationale": "Baseline retained as the current comparison line for the slice."
}),
- )?;
- assert_eq!(tag["result"]["isError"].as_bool(), Some(false));
-
- let research = harness.call_tool(
- 453,
- "source.record",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 17,
+ "experiment.open",
json!({
- "title": "ingest tranche",
- "summary": "Import the next libgrid tranche.",
- "body": "Full import notes live here.",
- "tags": ["campaign/libgrid"],
+ "hypothesis": "node-local-loop",
+ "slug": "loop-20s",
+ "title": "Loop parity 20s",
+ "summary": "Live challenger.",
+ "tags": ["root-conquest"],
+ "parents": [{"kind": "experiment", "selector": "baseline-20s"}],
}),
- )?;
- assert_eq!(research["result"]["isError"].as_bool(), Some(false));
-
- let filtered = harness.call_tool(454, "node.list", json!({"tags": ["campaign/libgrid"]}))?;
- let nodes = must_some(tool_content(&filtered).as_array(), "filtered source nodes")?;
- assert_eq!(nodes.len(), 1);
- assert_eq!(nodes[0]["class"].as_str(), Some("source"));
- assert_eq!(nodes[0]["tags"][0].as_str(), Some("campaign/libgrid"));
- Ok(())
-}
-
-#[test]
-fn prose_tools_reject_invalid_shapes_over_mcp() -> TestResult {
- let project_root = temp_project_root("prose_invalid")?;
- init_project(&project_root)?;
+ )?);
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(46, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let missing_note_summary = harness.call_tool(
- 47,
- "note.quick",
- json!({
- "title": "untagged",
- "body": "body only",
- "tags": [],
- }),
- )?;
+ let frontier_open =
+ harness.call_tool_full(18, "frontier.open", json!({"frontier": "lp-root"}))?;
+ assert_tool_ok(&frontier_open);
+ let content = tool_content(&frontier_open);
+ assert_eq!(content["frontier"]["slug"].as_str(), Some("lp-root"));
assert_eq!(
- missing_note_summary["result"]["isError"].as_bool(),
- Some(true)
+ must_some(content["active_tags"].as_array(), "active tags array")?
+ .iter()
+ .filter_map(Value::as_str)
+ .collect::<Vec<_>>(),
+ vec!["root-conquest"]
);
assert!(
- fault_message(&missing_note_summary)
- .is_some_and(|message| message.contains("summary") || message.contains("missing field"))
+ must_some(
+ content["active_metric_keys"].as_array(),
+ "active metric keys array"
+ )?
+ .iter()
+ .any(|metric| metric["key"].as_str() == Some("nodes_solved"))
);
-
- let missing_source_summary = harness.call_tool(
- 48,
- "source.record",
- json!({
- "title": "source only",
- "body": "body only",
- }),
+ let active_hypotheses = must_some(
+ content["active_hypotheses"].as_array(),
+ "active hypotheses array",
)?;
+ assert_eq!(active_hypotheses.len(), 1);
assert_eq!(
- missing_source_summary["result"]["isError"].as_bool(),
- Some(true)
- );
- assert!(
- fault_message(&missing_source_summary)
- .is_some_and(|message| message.contains("summary") || message.contains("missing field"))
+ active_hypotheses[0]["hypothesis"]["slug"].as_str(),
+ Some("node-local-loop")
);
-
- let note_without_body = harness.call_tool(
- 49,
- "node.create",
- json!({
- "class": "note",
- "title": "missing body",
- "summary": "triage layer",
- "tags": [],
- "payload": {},
- }),
- )?;
- assert_eq!(note_without_body["result"]["isError"].as_bool(), Some(true));
- assert!(
- fault_message(&note_without_body)
- .is_some_and(|message| message.contains("payload field `body`"))
- );
-
- let source_without_summary = harness.call_tool(
- 50,
- "node.create",
- json!({
- "class": "source",
- "title": "missing summary",
- "payload": { "body": "full research body" },
- }),
- )?;
assert_eq!(
- source_without_summary["result"]["isError"].as_bool(),
- Some(true)
+ active_hypotheses[0]["latest_closed_experiment"]["slug"].as_str(),
+ Some("baseline-20s")
);
- assert!(
- fault_message(&source_without_summary)
- .is_some_and(|message| message.contains("non-empty summary"))
+ assert_eq!(
+ must_some(
+ content["open_experiments"].as_array(),
+ "open experiments array"
+ )?[0]["slug"]
+ .as_str(),
+ Some("loop-20s")
);
+ assert!(content.get("artifacts").is_none());
+ assert!(active_hypotheses[0]["hypothesis"].get("body").is_none());
Ok(())
}
#[test]
-fn concise_note_reads_do_not_leak_body_text() -> TestResult {
- let project_root = temp_project_root("concise_note_read")?;
+fn hypothesis_body_discipline_is_enforced_over_mcp() -> TestResult {
+ let project_root = temp_project_root("single_paragraph")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(None, &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let bind = harness.bind_project(50, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- let note = harness.call_tool(
- 51,
- "note.quick",
+ assert_tool_ok(&harness.call_tool(
+ 20,
+ "frontier.create",
json!({
- "title": "tagged note",
- "summary": "triage layer",
- "body": "full note body should stay out of concise reads",
- "tags": [],
+ "label": "Import frontier",
+ "objective": "Stress hypothesis discipline",
+ "slug": "discipline",
}),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
- let node_id = must_some(tool_content(&note)["id"].as_str(), "created note id")?.to_owned();
-
- let concise = harness.call_tool(52, "node.read", json!({ "node_id": node_id }))?;
- let concise_structured = tool_content(&concise);
- assert_eq!(concise_structured["summary"].as_str(), Some("triage layer"));
- assert!(concise_structured["payload_preview"].get("body").is_none());
- assert!(
- !must_some(tool_text(&concise), "concise note.read text")?
- .contains("full note body should stay out of concise reads")
- );
+ )?);
- let full = harness.call_tool(
- 53,
- "node.read",
- json!({ "node_id": node_id, "detail": "full" }),
- )?;
- assert_eq!(
- tool_content(&full)["payload"]["fields"]["body"].as_str(),
- Some("full note body should stay out of concise reads")
- );
- Ok(())
-}
-
-#[test]
-fn concise_prose_reads_only_surface_payload_field_names() -> TestResult {
- let project_root = temp_project_root("concise_prose_field_names")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(531, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
-
- let research = harness.call_tool(
- 532,
- "node.create",
+ let response = harness.call_tool(
+ 21,
+ "hypothesis.record",
json!({
- "class": "source",
- "title": "rich import",
- "summary": "triage layer only",
- "payload": {
- "body": "Body stays out of concise output.",
- "source_excerpt": "This imported excerpt is intentionally long and should never reappear in concise node reads as a value preview.",
- "verbatim_snippet": "Another long snippet that belongs in full payload inspection only, not in triage surfaces."
- }
+ "frontier": "discipline",
+ "title": "Paragraph discipline",
+ "summary": "Should reject multi-paragraph bodies.",
+ "body": "first paragraph\n\nsecond paragraph",
}),
)?;
- assert_eq!(research["result"]["isError"].as_bool(), Some(false));
- let node_id =
- must_some(tool_content(&research)["id"].as_str(), "created source id")?.to_owned();
-
- let concise = harness.call_tool(533, "node.read", json!({ "node_id": node_id }))?;
- let concise_structured = tool_content(&concise);
- assert_eq!(concise_structured["payload_field_count"].as_u64(), Some(2));
- let payload_fields = must_some(
- concise_structured["payload_fields"].as_array(),
- "concise prose payload fields",
- )?;
- assert!(
- payload_fields
- .iter()
- .any(|field| field.as_str() == Some("source_excerpt"))
- );
- assert!(concise_structured.get("payload_preview").is_none());
- let concise_text = must_some(tool_text(&concise), "concise prose read text")?;
- assert!(!concise_text.contains("This imported excerpt is intentionally long"));
- assert!(concise_text.contains("payload fields: source_excerpt, verbatim_snippet"));
+ assert_tool_error(&response);
+ assert!(must_some(tool_error_message(&response), "fault message")?.contains("paragraph"));
Ok(())
}
#[test]
-fn node_list_does_not_enumerate_full_prose_bodies() -> TestResult {
- let project_root = temp_project_root("node_list_no_body_leak")?;
+fn artifact_surface_preserves_reference_only() -> TestResult {
+ let project_root = temp_project_root("artifact_reference")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(None, &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let bind = harness.bind_project(54, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
- let note = harness.call_tool(
- 55,
- "note.quick",
+ assert_tool_ok(&harness.call_tool(
+ 30,
+ "frontier.create",
json!({
- "title": "tagged note",
- "summary": "triage summary",
- "body": "full note body should never appear in list-like surfaces",
- "tags": [],
+ "label": "Artifacts frontier",
+ "objective": "Keep dumps out of the token hot path",
+ "slug": "artifacts",
}),
- )?;
- assert_eq!(note["result"]["isError"].as_bool(), Some(false));
-
- let listed = harness.call_tool(56, "node.list", json!({ "class": "note" }))?;
- let listed_rows = must_some(tool_content(&listed).as_array(), "listed note rows")?;
- assert_eq!(listed_rows.len(), 1);
- assert_eq!(listed_rows[0]["summary"].as_str(), Some("triage summary"));
- assert!(listed_rows[0].get("body").is_none());
- assert!(
- !must_some(tool_text(&listed), "node.list text")?
- .contains("full note body should never appear in list-like surfaces")
- );
- Ok(())
-}
-
-#[test]
-fn metric_tools_are_listed_for_discovery() -> TestResult {
- let project_root = temp_project_root("metric_tool_list")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(Some(&project_root), &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let tools = harness.tools_list()?;
- let names = must_some(tools["result"]["tools"].as_array(), "tool list")?
- .iter()
- .filter_map(|tool| tool["name"].as_str())
- .collect::<Vec<_>>();
- assert!(names.contains(&"metric.define"));
- assert!(names.contains(&"metric.keys"));
- assert!(names.contains(&"metric.best"));
- assert!(names.contains(&"metric.migrate"));
- assert!(names.contains(&"run.dimension.define"));
- assert!(names.contains(&"run.dimension.list"));
- assert!(names.contains(&"schema.field.upsert"));
- assert!(names.contains(&"schema.field.remove"));
- Ok(())
-}
-
-#[test]
-fn schema_field_tools_mutate_project_schema() -> TestResult {
- let project_root = temp_project_root("schema_field_tools")?;
- init_project(&project_root)?;
-
- let mut harness = McpHarness::spawn(Some(&project_root), &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
-
- let upsert = harness.call_tool(
- 861,
- "schema.field.upsert",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 31,
+ "hypothesis.record",
json!({
- "name": "scenario",
- "node_classes": ["hypothesis", "analysis"],
- "presence": "recommended",
- "severity": "warning",
- "role": "projection_gate",
- "inference_policy": "manual_only",
- "value_type": "string"
+ "frontier": "artifacts",
+ "slug": "sourced-hypothesis",
+ "title": "Sourced hypothesis",
+ "summary": "Attach a large external source by reference only.",
+ "body": "Treat large external writeups as artifact references rather than inline context so the ledger stays scientifically austere.",
}),
- )?;
- assert_eq!(upsert["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&upsert)["field"]["name"].as_str(),
- Some("scenario")
- );
- assert_eq!(
- tool_content(&upsert)["field"]["node_classes"],
- json!(["hypothesis", "analysis"])
- );
-
- let schema = harness.call_tool(862, "project.schema", json!({ "detail": "full" }))?;
- assert_eq!(schema["result"]["isError"].as_bool(), Some(false));
- let fields = must_some(tool_content(&schema)["fields"].as_array(), "schema fields")?;
- assert!(fields.iter().any(|field| {
- field["name"].as_str() == Some("scenario") && field["value_type"].as_str() == Some("string")
- }));
-
- let remove = harness.call_tool(
- 863,
- "schema.field.remove",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 32,
+ "artifact.record",
json!({
- "name": "scenario",
- "node_classes": ["hypothesis", "analysis"]
+ "kind": "document",
+ "slug": "lp-review-doc",
+ "label": "LP review tranche",
+ "summary": "External markdown tranche.",
+ "locator": "/tmp/lp-review.md",
+ "attachments": [{"kind": "hypothesis", "selector": "sourced-hypothesis"}],
}),
- )?;
- assert_eq!(remove["result"]["isError"].as_bool(), Some(false));
- assert_eq!(tool_content(&remove)["removed_count"].as_u64(), Some(1));
-
- let schema_after = harness.call_tool(864, "project.schema", json!({ "detail": "full" }))?;
- let fields_after = must_some(
- tool_content(&schema_after)["fields"].as_array(),
- "schema fields after remove",
- )?;
- assert!(
- !fields_after
- .iter()
- .any(|field| field["name"].as_str() == Some("scenario"))
- );
- Ok(())
-}
-
-#[test]
-fn bind_open_backfills_legacy_missing_summary() -> TestResult {
- let project_root = temp_project_root("bind_backfill")?;
- init_project(&project_root)?;
-
- let node_id = {
- let mut store = must(ProjectStore::open(&project_root), "open project store")?;
- let node = must(
- store.add_node(fidget_spinner_store_sqlite::CreateNodeRequest {
- class: fidget_spinner_core::NodeClass::Source,
- frontier_id: None,
- title: must(NonEmptyText::new("legacy source"), "legacy title")?,
- summary: Some(must(
- NonEmptyText::new("temporary summary"),
- "temporary summary",
- )?),
- tags: None,
- payload: fidget_spinner_core::NodePayload::with_schema(
- store.schema().schema_ref(),
- serde_json::from_value(json!({
- "body": "Derived summary first paragraph.\n\nLonger body follows."
- }))
- .map_err(|error| io::Error::other(format!("payload object: {error}")))?,
- ),
- annotations: Vec::new(),
- attachments: Vec::new(),
- }),
- "create legacy source node",
- )?;
- node.id.to_string()
- };
-
- let database_path = project_root.join(".fidget_spinner").join("state.sqlite");
- let clear_output = must(
- Command::new("sqlite3")
- .current_dir(project_root.as_std_path())
- .arg(database_path.as_str())
- .arg(format!(
- "UPDATE nodes SET summary = NULL WHERE id = '{node_id}';"
- ))
- .output(),
- "spawn sqlite3 for direct summary clear",
- )?;
- if !clear_output.status.success() {
- return Err(io::Error::other(format!(
- "sqlite3 summary clear failed: {}",
- String::from_utf8_lossy(&clear_output.stderr)
- ))
- .into());
- }
-
- let mut harness = McpHarness::spawn(None, &[])?;
- let _ = harness.initialize()?;
- harness.notify_initialized()?;
- let bind = harness.bind_project(60, &project_root)?;
- assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
+ )?);
- let read = harness.call_tool(61, "node.read", json!({ "node_id": node_id }))?;
- assert_eq!(read["result"]["isError"].as_bool(), Some(false));
+ let artifact =
+ harness.call_tool_full(33, "artifact.read", json!({"artifact": "lp-review-doc"}))?;
+ assert_tool_ok(&artifact);
+ let content = tool_content(&artifact);
assert_eq!(
- tool_content(&read)["summary"].as_str(),
- Some("Derived summary first paragraph.")
+ content["record"]["locator"].as_str(),
+ Some("/tmp/lp-review.md")
);
-
- let listed = harness.call_tool(62, "node.list", json!({ "class": "source" }))?;
- let items = must_some(tool_content(&listed).as_array(), "source node list")?;
- assert_eq!(items.len(), 1);
+ assert!(content["record"].get("body").is_none());
assert_eq!(
- items[0]["summary"].as_str(),
- Some("Derived summary first paragraph.")
+ must_some(content["attachments"].as_array(), "artifact attachments")?[0]["kind"].as_str(),
+ Some("hypothesis")
);
Ok(())
}
#[test]
-fn metric_tools_rank_closed_experiments_and_enforce_disambiguation() -> TestResult {
- let project_root = temp_project_root("metric_rank_e2e")?;
+fn experiment_close_drives_metric_best_and_analysis() -> TestResult {
+ let project_root = temp_project_root("metric_best")?;
init_project(&project_root)?;
- let mut harness = McpHarness::spawn(Some(&project_root), &[])?;
+ let mut harness = McpHarness::spawn(Some(&project_root))?;
let _ = harness.initialize()?;
harness.notify_initialized()?;
- let frontier = harness.call_tool(
- 70,
- "frontier.init",
- json!({
- "label": "metric frontier",
- "objective": "exercise metric ranking",
- "contract_title": "metric contract",
- "benchmark_suites": ["smoke"],
- "promotion_criteria": ["rank by one key"],
- "primary_metric": {
- "key": "wall_clock_s",
- "unit": "seconds",
- "objective": "minimize"
- }
- }),
- )?;
- assert_eq!(frontier["result"]["isError"].as_bool(), Some(false));
- let frontier_id = must_some(
- tool_content(&frontier)["frontier_id"].as_str(),
- "frontier id",
- )?
- .to_owned();
- let metric_define = harness.call_tool(
- 701,
+ assert_tool_ok(&harness.call_tool(
+ 40,
"metric.define",
json!({
- "key": "wall_clock_s",
- "unit": "seconds",
- "objective": "minimize",
- "description": "elapsed wall time"
+ "key": "nodes_solved",
+ "unit": "count",
+ "objective": "maximize",
+ "visibility": "canonical",
}),
- )?;
- assert_eq!(metric_define["result"]["isError"].as_bool(), Some(false));
-
- let scenario_dimension = harness.call_tool(
- 702,
- "run.dimension.define",
- json!({
- "key": "scenario",
- "value_type": "string",
- "description": "workload family"
- }),
- )?;
- assert_eq!(
- scenario_dimension["result"]["isError"].as_bool(),
- Some(false)
- );
-
- let duration_dimension = harness.call_tool(
- 703,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 41,
"run.dimension.define",
+ json!({"key": "instance", "value_type": "string"}),
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 42,
+ "frontier.create",
json!({
- "key": "duration_s",
- "value_type": "numeric",
- "description": "time budget in seconds"
- }),
- )?;
- assert_eq!(
- duration_dimension["result"]["isError"].as_bool(),
- Some(false)
- );
-
- let dimensions = harness.call_tool(704, "run.dimension.list", json!({}))?;
- assert_eq!(dimensions["result"]["isError"].as_bool(), Some(false));
- let dimension_rows = must_some(tool_content(&dimensions).as_array(), "run dimension rows")?;
- assert!(dimension_rows.iter().any(|row| {
- row["key"].as_str() == Some("benchmark_suite")
- && row["value_type"].as_str() == Some("string")
- }));
- assert!(dimension_rows.iter().any(|row| {
- row["key"].as_str() == Some("scenario")
- && row["description"].as_str() == Some("workload family")
- }));
- assert!(dimension_rows.iter().any(|row| {
- row["key"].as_str() == Some("duration_s") && row["value_type"].as_str() == Some("numeric")
- }));
-
- let first_change = harness.call_tool(
- 71,
- "node.create",
- json!({
- "class": "hypothesis",
- "frontier_id": frontier_id,
- "title": "first change",
- "summary": "first change summary",
- "payload": {
- "body": "first change body",
- "wall_clock_s": 14.0
- }
- }),
- )?;
- assert_eq!(first_change["result"]["isError"].as_bool(), Some(false));
- let first_change_id = must_some(
- tool_content(&first_change)["id"].as_str(),
- "first change id",
- )?;
- let first_experiment = harness.call_tool(
- 711,
- "experiment.open",
- json!({
- "frontier_id": frontier_id,
- "hypothesis_node_id": first_change_id,
- "title": "first experiment",
- "summary": "first experiment summary"
- }),
- )?;
- assert_eq!(first_experiment["result"]["isError"].as_bool(), Some(false));
- let first_experiment_id = must_some(
- tool_content(&first_experiment)["experiment_id"].as_str(),
- "first experiment id",
- )?;
-
- let first_close = harness.call_tool(
- 72,
- "experiment.close",
- json!({
- "experiment_id": first_experiment_id,
- "run": {
- "title": "first run",
- "summary": "first run summary",
- "backend": "worktree_process",
- "dimensions": {
- "benchmark_suite": "smoke",
- "scenario": "belt_4x5",
- "duration_s": 20.0
- },
- "command": {
- "working_directory": project_root.as_str(),
- "argv": ["true"]
- }
- },
- "primary_metric": {
- "key": "wall_clock_s",
- "value": 10.0
- },
- "note": {
- "summary": "first run note"
- },
- "verdict": "kept",
- "decision_title": "first decision",
- "decision_rationale": "keep first candidate around"
+ "label": "Metric frontier",
+ "objective": "Test best-of ranking",
+ "slug": "metric-frontier",
}),
- )?;
- assert_eq!(first_close["result"]["isError"].as_bool(), Some(false));
-
- let second_change = harness.call_tool(
- 73,
- "node.create",
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 43,
+ "hypothesis.record",
json!({
- "class": "hypothesis",
- "frontier_id": frontier_id,
- "title": "second change",
- "summary": "second change summary",
- "payload": {
- "body": "second change body",
- "wall_clock_s": 7.0
- }
+ "frontier": "metric-frontier",
+ "slug": "reopt-dominance",
+ "title": "Node reopt dominates native LP spend",
+ "summary": "Track node LP wallclock concentration on braid rails.",
+ "body": "Matched LP site traces indicate native LP spend is dominated by node reoptimization on the braid rails, so the next interventions should target node-local LP churn instead of root-only machinery.",
}),
- )?;
- assert_eq!(second_change["result"]["isError"].as_bool(), Some(false));
- let second_change_id = must_some(
- tool_content(&second_change)["id"].as_str(),
- "second change id",
- )?;
- let second_experiment = harness.call_tool(
- 712,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 44,
"experiment.open",
json!({
- "frontier_id": frontier_id,
- "hypothesis_node_id": second_change_id,
- "title": "second experiment",
- "summary": "second experiment summary"
+ "hypothesis": "reopt-dominance",
+ "slug": "trace-baseline",
+ "title": "Trace baseline",
+ "summary": "First matched trace.",
}),
- )?;
- assert_eq!(
- second_experiment["result"]["isError"].as_bool(),
- Some(false)
- );
- let second_experiment_id = must_some(
- tool_content(&second_experiment)["experiment_id"].as_str(),
- "second experiment id",
- )?;
-
- let second_close = harness.call_tool(
- 74,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 45,
"experiment.close",
json!({
- "experiment_id": second_experiment_id,
- "run": {
- "title": "second run",
- "summary": "second run summary",
- "backend": "worktree_process",
- "dimensions": {
- "benchmark_suite": "smoke",
- "scenario": "belt_4x5",
- "duration_s": 60.0
- },
- "command": {
- "working_directory": project_root.as_str(),
- "argv": ["true"]
- }
- },
- "primary_metric": {
- "key": "wall_clock_s",
- "value": 5.0
- },
- "note": {
- "summary": "second run note"
- },
+ "experiment": "trace-baseline",
+ "backend": "manual",
+ "command": {"argv": ["trace-baseline"]},
+ "dimensions": {"instance": "4x5-braid"},
+ "primary_metric": {"key": "nodes_solved", "value": 217.0},
"verdict": "kept",
- "decision_title": "second decision",
- "decision_rationale": "second candidate looks stronger"
+ "rationale": "Baseline trace is real but not dominant.",
}),
- )?;
- assert_eq!(second_close["result"]["isError"].as_bool(), Some(false));
-
- let second_frontier = harness.call_tool(
- 80,
- "frontier.init",
- json!({
- "label": "metric frontier two",
- "objective": "exercise frontier filtering",
- "contract_title": "metric contract two",
- "benchmark_suites": ["smoke"],
- "promotion_criteria": ["frontier filters should isolate rankings"],
- "primary_metric": {
- "key": "wall_clock_s",
- "unit": "seconds",
- "objective": "minimize"
- }
- }),
- )?;
- assert_eq!(second_frontier["result"]["isError"].as_bool(), Some(false));
- let second_frontier_id = must_some(
- tool_content(&second_frontier)["frontier_id"].as_str(),
- "second frontier id",
- )?
- .to_owned();
-
- let third_change = harness.call_tool(
- 81,
- "node.create",
- json!({
- "class": "hypothesis",
- "frontier_id": second_frontier_id,
- "title": "third change",
- "summary": "third change summary",
- "payload": {
- "body": "third change body",
- "wall_clock_s": 3.0
- }
- }),
- )?;
- assert_eq!(third_change["result"]["isError"].as_bool(), Some(false));
- let third_change_id = must_some(
- tool_content(&third_change)["id"].as_str(),
- "third change id",
- )?;
- let third_experiment = harness.call_tool(
- 811,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 46,
"experiment.open",
json!({
- "frontier_id": second_frontier_id,
- "hypothesis_node_id": third_change_id,
- "title": "third experiment",
- "summary": "third experiment summary"
+ "hypothesis": "reopt-dominance",
+ "slug": "trace-node-reopt",
+ "title": "Trace node reopt",
+ "summary": "Matched LP site traces with node focus.",
+ "parents": [{"kind": "experiment", "selector": "trace-baseline"}],
}),
- )?;
- assert_eq!(third_experiment["result"]["isError"].as_bool(), Some(false));
- let third_experiment_id = must_some(
- tool_content(&third_experiment)["experiment_id"].as_str(),
- "third experiment id",
- )?;
-
- let third_close = harness.call_tool(
- 82,
+ )?);
+ assert_tool_ok(&harness.call_tool(
+ 47,
"experiment.close",
json!({
- "experiment_id": third_experiment_id,
- "run": {
- "title": "third run",
- "summary": "third run summary",
- "backend": "worktree_process",
- "dimensions": {
- "benchmark_suite": "smoke",
- "scenario": "belt_4x5_alt",
- "duration_s": 60.0
- },
- "command": {
- "working_directory": project_root.as_str(),
- "argv": ["true"]
- }
- },
- "primary_metric": {
- "key": "wall_clock_s",
- "value": 3.0
- },
- "note": {
- "summary": "third run note"
- },
- "verdict": "kept",
- "decision_title": "third decision",
- "decision_rationale": "third candidate is best overall but not in the first frontier"
- }),
- )?;
- assert_eq!(third_close["result"]["isError"].as_bool(), Some(false));
-
- let keys = harness.call_tool(75, "metric.keys", json!({}))?;
- assert_eq!(keys["result"]["isError"].as_bool(), Some(false));
- let key_rows = must_some(tool_content(&keys).as_array(), "metric keys array")?;
- assert!(key_rows.iter().any(|row| {
- row["key"].as_str() == Some("wall_clock_s") && row["source"].as_str() == Some("run_metric")
- }));
- assert!(key_rows.iter().any(|row| {
- row["key"].as_str() == Some("wall_clock_s")
- && row["source"].as_str() == Some("run_metric")
- && row["description"].as_str() == Some("elapsed wall time")
- && row["requires_order"].as_bool() == Some(false)
- }));
- assert!(key_rows.iter().any(|row| {
- row["key"].as_str() == Some("wall_clock_s")
- && row["source"].as_str() == Some("hypothesis_payload")
- }));
-
- let filtered_keys = harness.call_tool(
- 750,
- "metric.keys",
- json!({
- "source": "run_metric",
- "dimensions": {
- "scenario": "belt_4x5",
- "duration_s": 60.0
+ "experiment": "trace-node-reopt",
+ "backend": "manual",
+ "command": {"argv": ["matched-lp-site-traces"]},
+ "dimensions": {"instance": "4x5-braid"},
+ "primary_metric": {"key": "nodes_solved", "value": 273.0},
+ "verdict": "accepted",
+ "rationale": "Matched LP site traces show node reoptimization as the dominant sink.",
+ "analysis": {
+ "summary": "Node LP work is now the primary native sink.",
+ "body": "The differential traces isolate node reoptimization as the dominant native LP wallclock site on the matched braid rail, which justifies prioritizing node-local LP control work over further root-only tuning."
}
}),
- )?;
- assert_eq!(filtered_keys["result"]["isError"].as_bool(), Some(false));
- let filtered_key_rows = must_some(
- tool_content(&filtered_keys).as_array(),
- "filtered metric keys array",
- )?;
- assert_eq!(filtered_key_rows.len(), 1);
- assert_eq!(filtered_key_rows[0]["key"].as_str(), Some("wall_clock_s"));
- assert_eq!(filtered_key_rows[0]["experiment_count"].as_u64(), Some(1));
-
- let ambiguous = harness.call_tool(76, "metric.best", json!({ "key": "wall_clock_s" }))?;
- assert_eq!(ambiguous["result"]["isError"].as_bool(), Some(true));
- assert!(
- fault_message(&ambiguous)
- .is_some_and(|message| message.contains("ambiguous across sources"))
- );
-
- let run_metric_best = harness.call_tool(
- 77,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "run_metric",
- "dimensions": {
- "scenario": "belt_4x5",
- "duration_s": 60.0
- },
- "limit": 5
- }),
- )?;
- assert_eq!(run_metric_best["result"]["isError"].as_bool(), Some(false));
- let run_best_rows = must_some(
- tool_content(&run_metric_best).as_array(),
- "run metric best array",
- )?;
- assert_eq!(run_best_rows[0]["value"].as_f64(), Some(5.0));
- assert_eq!(run_best_rows.len(), 1);
- assert_eq!(
- run_best_rows[0]["experiment_title"].as_str(),
- Some("second experiment")
- );
- assert_eq!(run_best_rows[0]["verdict"].as_str(), Some("kept"));
- assert_eq!(
- run_best_rows[0]["dimensions"]["scenario"].as_str(),
- Some("belt_4x5")
- );
- assert_eq!(
- run_best_rows[0]["dimensions"]["duration_s"].as_f64(),
- Some(60.0)
- );
- assert!(
- must_some(tool_text(&run_metric_best), "run metric best text")?.contains("hypothesis=")
- );
- assert!(must_some(tool_text(&run_metric_best), "run metric best text")?.contains("dims:"));
-
- let payload_requires_order = harness.call_tool(
- 78,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "hypothesis_payload"
- }),
- )?;
- assert_eq!(
- payload_requires_order["result"]["isError"].as_bool(),
- Some(true)
- );
- assert!(
- fault_message(&payload_requires_order)
- .is_some_and(|message| message.contains("explicit order"))
- );
-
- let payload_best = harness.call_tool(
- 79,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "hypothesis_payload",
- "dimensions": {
- "scenario": "belt_4x5",
- "duration_s": 60.0
- },
- "order": "asc"
- }),
- )?;
- assert_eq!(payload_best["result"]["isError"].as_bool(), Some(false));
- let payload_best_rows = must_some(
- tool_content(&payload_best).as_array(),
- "payload metric best array",
- )?;
- assert_eq!(payload_best_rows[0]["value"].as_f64(), Some(7.0));
- assert_eq!(payload_best_rows.len(), 1);
- assert_eq!(
- payload_best_rows[0]["experiment_title"].as_str(),
- Some("second experiment")
- );
+ )?);
- let filtered_best = harness.call_tool(
- 83,
+ let best = harness.call_tool_full(
+ 48,
"metric.best",
json!({
- "key": "wall_clock_s",
- "source": "run_metric",
- "frontier_id": frontier_id,
- "dimensions": {
- "scenario": "belt_4x5"
- },
- "limit": 5
+ "frontier": "metric-frontier",
+ "hypothesis": "reopt-dominance",
+ "key": "nodes_solved",
}),
)?;
- assert_eq!(filtered_best["result"]["isError"].as_bool(), Some(false));
- let filtered_rows = must_some(
- tool_content(&filtered_best).as_array(),
- "filtered metric best array",
+ assert_tool_ok(&best);
+ let entries = must_some(
+ tool_content(&best)["entries"].as_array(),
+ "metric best entries",
)?;
- assert_eq!(filtered_rows.len(), 2);
assert_eq!(
- filtered_rows[0]["experiment_title"].as_str(),
- Some("second experiment")
- );
- assert!(
- filtered_rows
- .iter()
- .all(|row| row["frontier_id"].as_str() == Some(frontier_id.as_str()))
+ entries[0]["experiment"]["slug"].as_str(),
+ Some("trace-node-reopt")
);
+ assert_eq!(entries[0]["value"].as_f64(), Some(273.0));
- let global_best = harness.call_tool(
- 84,
- "metric.best",
- json!({
- "key": "wall_clock_s",
- "source": "run_metric",
- "limit": 5
- }),
- )?;
- assert_eq!(global_best["result"]["isError"].as_bool(), Some(false));
- let global_rows = must_some(
- tool_content(&global_best).as_array(),
- "global metric best array",
+ let detail = harness.call_tool_full(
+ 49,
+ "experiment.read",
+ json!({"experiment": "trace-node-reopt"}),
)?;
+ assert_tool_ok(&detail);
+ let content = tool_content(&detail);
assert_eq!(
- global_rows[0]["experiment_title"].as_str(),
- Some("third experiment")
- );
- assert_eq!(
- global_rows[0]["frontier_id"].as_str(),
- Some(second_frontier_id.as_str())
- );
-
- let migrate = harness.call_tool(85, "metric.migrate", json!({}))?;
- assert_eq!(migrate["result"]["isError"].as_bool(), Some(false));
- assert_eq!(
- tool_content(&migrate)["inserted_metric_definitions"].as_u64(),
- Some(0)
- );
- assert_eq!(
- tool_content(&migrate)["inserted_dimension_definitions"].as_u64(),
- Some(0)
+ content["record"]["outcome"]["verdict"].as_str(),
+ Some("accepted")
);
assert_eq!(
- tool_content(&migrate)["inserted_dimension_values"].as_u64(),
- Some(0)
+ content["record"]["outcome"]["analysis"]["summary"].as_str(),
+ Some("Node LP work is now the primary native sink.")
);
Ok(())
}