swarm repositories / source
aboutsummaryrefslogtreecommitdiff
path: root/crates/fidget-spinner-cli/src/mcp
diff options
context:
space:
mode:
Diffstat (limited to 'crates/fidget-spinner-cli/src/mcp')
-rw-r--r--crates/fidget-spinner-cli/src/mcp/catalog.rs1380
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/runtime.rs53
-rw-r--r--crates/fidget-spinner-cli/src/mcp/service.rs3448
3 files changed, 1960 insertions, 2921 deletions
diff --git a/crates/fidget-spinner-cli/src/mcp/catalog.rs b/crates/fidget-spinner-cli/src/mcp/catalog.rs
index ae3ca78..9b486bc 100644
--- a/crates/fidget-spinner-cli/src/mcp/catalog.rs
+++ b/crates/fidget-spinner-cli/src/mcp/catalog.rs
@@ -46,756 +46,814 @@ impl ToolSpec {
}
}
+const TOOL_SPECS: &[ToolSpec] = &[
+ ToolSpec {
+ name: "project.bind",
+ description: "Bind this MCP session to a project root or nested path inside a project store.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "project.status",
+ description: "Read coarse project metadata and ledger counts for the bound project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "tag.add",
+ description: "Register one repo-local tag with a required description.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "tag.list",
+ description: "List the repo-local tag registry.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.create",
+ description: "Create a new frontier scope.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "frontier.list",
+ description: "List frontier scopes in the current project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.read",
+ description: "Read one frontier record, including its brief.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.open",
+ description: "Open the bounded frontier overview: brief, active tags, live metrics, active hypotheses, and open experiments.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "frontier.brief.update",
+ description: "Replace or patch the singleton frontier brief.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "frontier.history",
+ description: "Read the frontier revision history.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.record",
+ description: "Record one hypothesis. The body must stay a single paragraph.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "hypothesis.list",
+ description: "List hypotheses, optionally narrowed by frontier or tag.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.read",
+ description: "Read one hypothesis with its local neighborhood, experiments, and artifacts.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "hypothesis.update",
+ description: "Patch hypothesis title, summary, body, tags, influence parents, or archive state.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "hypothesis.history",
+ description: "Read the revision history for one hypothesis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.open",
+ description: "Open one experiment anchored to exactly one hypothesis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.list",
+ description: "List experiments, optionally narrowed by frontier, hypothesis, status, or tags.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.read",
+ description: "Read one experiment with its owning hypothesis, local neighborhood, outcome, and artifacts.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "experiment.update",
+ description: "Patch experiment metadata, influence parents, archive state, or replace the closed outcome wholesale.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.close",
+ description: "Close one open experiment with typed dimensions, structured metrics, verdict, rationale, and optional analysis.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "experiment.history",
+ description: "Read the revision history for one experiment.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.record",
+ description: "Register an external artifact reference and attach it to frontiers, hypotheses, or experiments. Artifact bodies are never read through Spinner.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "artifact.list",
+ description: "List artifact references, optionally narrowed by frontier, kind, or attachment target.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.read",
+ description: "Read one artifact reference and its attachment targets.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "artifact.update",
+ description: "Patch artifact metadata or replace its attachment set.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "artifact.history",
+ description: "Read the revision history for one artifact.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "metric.define",
+ description: "Register one project-level metric definition.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "metric.keys",
+ description: "List metric keys, defaulting to the live frontier comparison set.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "metric.best",
+ description: "Rank closed experiments by one metric key with optional frontier, hypothesis, or dimension narrowing.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "run.dimension.define",
+ description: "Register one typed run-dimension key.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ },
+ ToolSpec {
+ name: "run.dimension.list",
+ description: "List registered run dimensions.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "skill.list",
+ description: "List bundled skills shipped with this package.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "skill.show",
+ description: "Return one bundled skill text shipped with this package. Defaults to `fidget-spinner` when name is omitted.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "system.health",
+ description: "Read MCP host health, session binding, worker generation, and rollout state.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ToolSpec {
+ name: "system.telemetry",
+ description: "Read aggregate MCP host telemetry for this session.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+];
+
+const RESOURCE_SPECS: &[ResourceSpec] = &[
+ ResourceSpec {
+ uri: "fidget-spinner://skill/fidget-spinner",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+ ResourceSpec {
+ uri: "fidget-spinner://skill/frontier-loop",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::Convergent,
+ },
+];
+
#[must_use]
pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> {
- match name {
- "project.bind" => Some(ToolSpec {
- name: "project.bind",
- description: "Bind this MCP session to a project root or nested path inside a project store.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::NeverReplay,
- }),
- "project.status" => Some(ToolSpec {
- name: "project.status",
- description: "Read local project status, store paths, and git availability for the currently bound project.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "project.schema" => Some(ToolSpec {
- name: "project.schema",
- description: "Read the project-local payload schema and field validation tiers.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "schema.field.upsert" => Some(ToolSpec {
- name: "schema.field.upsert",
- description: "Add or replace one project-local payload schema field definition.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "schema.field.remove" => Some(ToolSpec {
- name: "schema.field.remove",
- description: "Remove one project-local payload schema field definition, optionally narrowed by node-class set.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "tag.add" => Some(ToolSpec {
- name: "tag.add",
- description: "Register one repo-local tag with a required description. Notes may only reference tags from this registry.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "tag.list" => Some(ToolSpec {
- name: "tag.list",
- description: "List repo-local tags available for note and node tagging.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.list" => Some(ToolSpec {
- name: "frontier.list",
- description: "List frontiers for the current project.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.status" => Some(ToolSpec {
- name: "frontier.status",
- description: "Read one frontier projection, including open/completed experiment counts and verdict totals.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "frontier.init" => Some(ToolSpec {
- name: "frontier.init",
- description: "Create a new frontier rooted in a contract node.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.create" => Some(ToolSpec {
- name: "node.create",
- description: "Create a generic DAG node with project payload fields and optional lineage parents.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "hypothesis.record" => Some(ToolSpec {
- name: "hypothesis.record",
- description: "Record a core-path hypothesis with low ceremony.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.list" => Some(ToolSpec {
- name: "node.list",
- description: "List recent nodes. Archived nodes are hidden unless explicitly requested.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "node.read" => Some(ToolSpec {
- name: "node.read",
- description: "Read one node including payload, diagnostics, and hidden annotations.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "node.annotate" => Some(ToolSpec {
- name: "node.annotate",
- description: "Attach a free-form annotation to any node.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "node.archive" => Some(ToolSpec {
- name: "node.archive",
- description: "Archive a node so it falls out of default enumeration without being deleted.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "note.quick" => Some(ToolSpec {
- name: "note.quick",
- description: "Push a quick off-path note without bureaucratic experiment closure.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "source.record" => Some(ToolSpec {
- name: "source.record",
- description: "Record imported sources and documentary context that should live in the DAG without polluting the core path.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "metric.define" => Some(ToolSpec {
- name: "metric.define",
- description: "Register one project-level metric definition so experiment ingestion only has to send key/value observations.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "run.dimension.define" => Some(ToolSpec {
- name: "run.dimension.define",
- description: "Register one project-level run dimension used to slice metrics across scenarios, budgets, and flags.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "run.dimension.list" => Some(ToolSpec {
- name: "run.dimension.list",
- description: "List registered run dimensions together with observed value counts and sample values.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.keys" => Some(ToolSpec {
- name: "metric.keys",
- description: "List rankable metric keys, including registered run metrics and observed payload-derived numeric fields.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.best" => Some(ToolSpec {
- name: "metric.best",
- description: "Rank completed experiments by one numeric key, with optional run-dimension filters.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "metric.migrate" => Some(ToolSpec {
- name: "metric.migrate",
- description: "Re-run the idempotent legacy metric-plane normalization that registers canonical metrics and backfills benchmark_suite dimensions.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "experiment.open" => Some(ToolSpec {
- name: "experiment.open",
- description: "Open a stateful experiment against one hypothesis.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "experiment.list" => Some(ToolSpec {
- name: "experiment.list",
- description: "List currently open experiments, optionally narrowed to one frontier.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "experiment.read" => Some(ToolSpec {
- name: "experiment.read",
- description: "Read one currently open experiment by id.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "experiment.close" => Some(ToolSpec {
- name: "experiment.close",
- description: "Close one open experiment with typed run dimensions, preregistered metric observations, optional analysis, note, and verdict.",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::NeverReplay,
- }),
- "skill.list" => Some(ToolSpec {
- name: "skill.list",
- description: "List bundled skills shipped with this package.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "skill.show" => Some(ToolSpec {
- name: "skill.show",
- description: "Return one bundled skill text shipped with this package. Defaults to `fidget-spinner` when name is omitted.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "system.health" => Some(ToolSpec {
- name: "system.health",
- description: "Read MCP host health, session binding, worker generation, rollout state, and the last fault.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "system.telemetry" => Some(ToolSpec {
- name: "system.telemetry",
- description: "Read aggregate request, retry, restart, and per-operation telemetry for this MCP session.",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- _ => None,
- }
+ TOOL_SPECS.iter().copied().find(|spec| spec.name == name)
}
#[must_use]
pub(crate) fn resource_spec(uri: &str) -> Option<ResourceSpec> {
- match uri {
- "fidget-spinner://project/config" => Some(ResourceSpec {
- uri: "fidget-spinner://project/config",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://project/schema" => Some(ResourceSpec {
- uri: "fidget-spinner://project/schema",
- dispatch: DispatchTarget::Worker,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://skill/fidget-spinner" => Some(ResourceSpec {
- uri: "fidget-spinner://skill/fidget-spinner",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- "fidget-spinner://skill/frontier-loop" => Some(ResourceSpec {
- uri: "fidget-spinner://skill/frontier-loop",
- dispatch: DispatchTarget::Host,
- replay: ReplayContract::Convergent,
- }),
- _ => None,
- }
+ RESOURCE_SPECS.iter().copied().find(|spec| spec.uri == uri)
}
#[must_use]
pub(crate) fn tool_definitions() -> Vec<Value> {
- [
- "project.bind",
- "project.status",
- "project.schema",
- "schema.field.upsert",
- "schema.field.remove",
- "tag.add",
- "tag.list",
- "frontier.list",
- "frontier.status",
- "frontier.init",
- "node.create",
- "hypothesis.record",
- "node.list",
- "node.read",
- "node.annotate",
- "node.archive",
- "note.quick",
- "source.record",
- "metric.define",
- "run.dimension.define",
- "run.dimension.list",
- "metric.keys",
- "metric.best",
- "metric.migrate",
- "experiment.open",
- "experiment.list",
- "experiment.read",
- "experiment.close",
- "skill.list",
- "skill.show",
- "system.health",
- "system.telemetry",
- ]
- .into_iter()
- .filter_map(tool_spec)
- .map(|spec| {
- json!({
- "name": spec.name,
- "description": spec.description,
- "inputSchema": with_common_presentation(input_schema(spec.name)),
- "annotations": spec.annotation_json(),
+ TOOL_SPECS
+ .iter()
+ .copied()
+ .map(|spec| {
+ json!({
+ "name": spec.name,
+ "description": spec.description,
+ "annotations": spec.annotation_json(),
+ "inputSchema": tool_input_schema(spec.name),
+ })
})
- })
- .collect()
+ .collect()
}
#[must_use]
pub(crate) fn list_resources() -> Vec<Value> {
- vec![
- json!({
- "uri": "fidget-spinner://project/config",
- "name": "project-config",
- "description": "Project-local store configuration",
- "mimeType": "application/json"
- }),
- json!({
- "uri": "fidget-spinner://project/schema",
- "name": "project-schema",
- "description": "Project-local payload schema and validation tiers",
- "mimeType": "application/json"
- }),
- json!({
- "uri": "fidget-spinner://skill/fidget-spinner",
- "name": "fidget-spinner-skill",
- "description": "Bundled base Fidget Spinner skill text for this package",
- "mimeType": "text/markdown"
- }),
- json!({
- "uri": "fidget-spinner://skill/frontier-loop",
- "name": "frontier-loop-skill",
- "description": "Bundled frontier-loop specialization skill text for this package",
- "mimeType": "text/markdown"
- }),
- ]
+ RESOURCE_SPECS
+ .iter()
+ .map(|spec| {
+ json!({
+ "uri": spec.uri,
+ "name": spec.uri.rsplit('/').next().unwrap_or(spec.uri),
+ "description": resource_description(spec.uri),
+ })
+ })
+ .collect()
}
-fn input_schema(name: &str) -> Value {
- match name {
- "project.status" | "project.schema" | "tag.list" | "skill.list" | "system.health"
- | "system.telemetry" | "run.dimension.list" | "metric.migrate" => {
- json!({"type":"object","additionalProperties":false})
- }
- "schema.field.upsert" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Project payload field name." },
- "node_classes": { "type": "array", "items": node_class_schema(), "description": "Optional node-class scope. Omit or pass [] for all classes." },
- "presence": field_presence_schema(),
- "severity": diagnostic_severity_schema(),
- "role": field_role_schema(),
- "inference_policy": inference_policy_schema(),
- "value_type": field_value_type_schema(),
- },
- "required": ["name", "presence", "severity", "role", "inference_policy"],
- "additionalProperties": false
- }),
- "schema.field.remove" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Project payload field name." },
- "node_classes": { "type": "array", "items": node_class_schema(), "description": "Optional exact node-class scope to remove." }
- },
- "required": ["name"],
- "additionalProperties": false
- }),
- "project.bind" => json!({
- "type": "object",
- "properties": {
- "path": { "type": "string", "description": "Project root or any nested path inside a project with .fidget_spinner state." }
- },
- "required": ["path"],
- "additionalProperties": false
- }),
- "tag.add" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Lowercase repo-local tag name." },
- "description": { "type": "string", "description": "Human-facing tag description." }
- },
- "required": ["name", "description"],
- "additionalProperties": false
- }),
- "skill.show" => json!({
- "type": "object",
- "properties": {
- "name": { "type": "string", "description": "Bundled skill name. Defaults to `fidget-spinner`." }
- },
- "additionalProperties": false
- }),
- "frontier.list" => json!({"type":"object","additionalProperties":false}),
- "frontier.status" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string", "description": "Frontier UUID" }
- },
- "required": ["frontier_id"],
- "additionalProperties": false
- }),
- "frontier.init" => json!({
- "type": "object",
- "properties": {
- "label": { "type": "string" },
- "objective": { "type": "string" },
- "contract_title": { "type": "string" },
- "contract_summary": { "type": "string" },
- "benchmark_suites": { "type": "array", "items": { "type": "string" } },
- "promotion_criteria": { "type": "array", "items": { "type": "string" } },
- "primary_metric": metric_spec_schema(),
- "supporting_metrics": { "type": "array", "items": metric_spec_schema() },
- "seed_summary": { "type": "string" }
- },
- "required": ["label", "objective", "contract_title", "benchmark_suites", "promotion_criteria", "primary_metric"],
- "additionalProperties": false
- }),
- "node.create" => json!({
- "type": "object",
- "properties": {
- "class": node_class_schema(),
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string", "description": "Required for `note` and `source` nodes." },
- "tags": { "type": "array", "items": tag_name_schema(), "description": "Required for `note` nodes; optional for other classes." },
- "payload": { "type": "object", "description": "`note` and `source` nodes require a non-empty string `body` field." },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["class", "title"],
- "additionalProperties": false
- }),
- "hypothesis.record" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["frontier_id", "title", "summary", "body"],
- "additionalProperties": false
- }),
- "node.list" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "class": node_class_schema(),
- "tags": { "type": "array", "items": tag_name_schema() },
- "include_archived": { "type": "boolean" },
- "limit": { "type": "integer", "minimum": 1, "maximum": 500 }
- },
- "additionalProperties": false
- }),
- "node.read" | "node.archive" => json!({
- "type": "object",
- "properties": {
- "node_id": { "type": "string" }
- },
- "required": ["node_id"],
- "additionalProperties": false
- }),
- "node.annotate" => json!({
- "type": "object",
- "properties": {
- "node_id": { "type": "string" },
- "body": { "type": "string" },
- "label": { "type": "string" },
- "visible": { "type": "boolean" }
- },
- "required": ["node_id", "body"],
- "additionalProperties": false
- }),
- "note.quick" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "tags": { "type": "array", "items": tag_name_schema() },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["title", "summary", "body", "tags"],
- "additionalProperties": false
- }),
- "source.record" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" },
- "tags": { "type": "array", "items": tag_name_schema() },
- "annotations": { "type": "array", "items": annotation_schema() },
- "parents": { "type": "array", "items": { "type": "string" } }
- },
- "required": ["title", "summary", "body"],
- "additionalProperties": false
- }),
- "metric.define" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "unit": metric_unit_schema(),
- "objective": optimization_objective_schema(),
- "description": { "type": "string" }
- },
- "required": ["key", "unit", "objective"],
- "additionalProperties": false
- }),
- "run.dimension.define" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "value_type": field_value_type_schema(),
- "description": { "type": "string" }
- },
- "required": ["key", "value_type"],
- "additionalProperties": false
- }),
- "metric.keys" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "source": metric_source_schema(),
- "dimensions": { "type": "object" }
- },
- "additionalProperties": false
- }),
- "metric.best" => json!({
- "type": "object",
- "properties": {
- "key": { "type": "string" },
- "frontier_id": { "type": "string" },
- "source": metric_source_schema(),
- "dimensions": { "type": "object" },
- "order": metric_order_schema(),
- "limit": { "type": "integer", "minimum": 1, "maximum": 500 }
- },
- "required": ["key"],
- "additionalProperties": false
- }),
- "experiment.open" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" },
- "hypothesis_node_id": { "type": "string" },
- "title": { "type": "string" },
- "summary": { "type": "string" }
- },
- "required": ["frontier_id", "hypothesis_node_id", "title"],
- "additionalProperties": false
- }),
- "experiment.list" => json!({
- "type": "object",
- "properties": {
- "frontier_id": { "type": "string" }
- },
- "additionalProperties": false
- }),
- "experiment.read" => json!({
- "type": "object",
- "properties": {
- "experiment_id": { "type": "string" }
- },
- "required": ["experiment_id"],
- "additionalProperties": false
- }),
- "experiment.close" => json!({
- "type": "object",
- "properties": {
- "experiment_id": { "type": "string" },
- "run": run_schema(),
- "primary_metric": metric_value_schema(),
- "supporting_metrics": { "type": "array", "items": metric_value_schema() },
- "note": note_schema(),
- "verdict": verdict_schema(),
- "decision_title": { "type": "string" },
- "decision_rationale": { "type": "string" },
- "analysis": analysis_schema()
- },
- "required": [
- "experiment_id",
- "run",
+fn resource_description(uri: &str) -> &'static str {
+ match uri {
+ "fidget-spinner://skill/fidget-spinner" => "Bundled Fidget Spinner operating doctrine.",
+ "fidget-spinner://skill/frontier-loop" => "Bundled frontier-loop specialization.",
+ _ => "Fidget Spinner resource.",
+ }
+}
+
+fn tool_input_schema(name: &str) -> Value {
+ let schema = match name {
+ "project.bind" => object_schema(
+ &[(
+ "path",
+ string_schema("Project root or any nested path inside it."),
+ )],
+ &["path"],
+ ),
+ "project.status" | "tag.list" | "frontier.list" | "run.dimension.list" | "skill.list"
+ | "system.health" | "system.telemetry" => empty_object_schema(),
+ "tag.add" => object_schema(
+ &[
+ ("name", string_schema("Repo-local tag token.")),
+ (
+ "description",
+ string_schema("Human-facing tag description."),
+ ),
+ ],
+ &["name", "description"],
+ ),
+ "frontier.create" => object_schema(
+ &[
+ ("label", string_schema("Short frontier label.")),
+ ("objective", string_schema("Frontier objective.")),
+ ("slug", string_schema("Optional stable frontier slug.")),
+ ],
+ &["label", "objective"],
+ ),
+ "frontier.read" | "frontier.open" | "frontier.history" => object_schema(
+ &[("frontier", selector_schema("Frontier UUID or slug."))],
+ &["frontier"],
+ ),
+ "frontier.brief.update" => object_schema(
+ &[
+ ("frontier", selector_schema("Frontier UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "situation",
+ nullable_string_schema("Optional frontier situation text."),
+ ),
+ ("roadmap", roadmap_schema()),
+ (
+ "unknowns",
+ string_array_schema("Ordered frontier unknowns."),
+ ),
+ ],
+ &["frontier"],
+ ),
+ "hypothesis.record" => object_schema(
+ &[
+ ("frontier", selector_schema("Owning frontier UUID or slug.")),
+ ("title", string_schema("Terse hypothesis title.")),
+ ("summary", string_schema("One-line hypothesis summary.")),
+ ("body", string_schema("Single-paragraph hypothesis body.")),
+ ("slug", string_schema("Optional stable hypothesis slug.")),
+ ("tags", string_array_schema("Tag names.")),
+ ("parents", vertex_selector_array_schema()),
+ ],
+ &["frontier", "title", "summary", "body"],
+ ),
+ "hypothesis.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ ("tags", string_array_schema("Require all listed tags.")),
+ (
+ "include_archived",
+ boolean_schema("Include archived hypotheses."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "hypothesis.read" | "hypothesis.history" => object_schema(
+ &[("hypothesis", selector_schema("Hypothesis UUID or slug."))],
+ &["hypothesis"],
+ ),
+ "hypothesis.update" => object_schema(
+ &[
+ ("hypothesis", selector_schema("Hypothesis UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ ("title", string_schema("Replacement title.")),
+ ("summary", string_schema("Replacement summary.")),
+ ("body", string_schema("Replacement single-paragraph body.")),
+ ("tags", string_array_schema("Replacement tag set.")),
+ ("parents", vertex_selector_array_schema()),
+ ("archived", boolean_schema("Archive state override.")),
+ ],
+ &["hypothesis"],
+ ),
+ "experiment.open" => object_schema(
+ &[
+ (
+ "hypothesis",
+ selector_schema("Owning hypothesis UUID or slug."),
+ ),
+ ("title", string_schema("Experiment title.")),
+ ("summary", string_schema("Optional experiment summary.")),
+ ("slug", string_schema("Optional stable experiment slug.")),
+ ("tags", string_array_schema("Tag names.")),
+ ("parents", vertex_selector_array_schema()),
+ ],
+ &["hypothesis", "title"],
+ ),
+ "experiment.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "hypothesis",
+ selector_schema("Optional hypothesis UUID or slug."),
+ ),
+ ("tags", string_array_schema("Require all listed tags.")),
+ (
+ "status",
+ enum_string_schema(&["open", "closed"], "Optional experiment status filter."),
+ ),
+ (
+ "include_archived",
+ boolean_schema("Include archived experiments."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "experiment.read" | "experiment.history" => object_schema(
+ &[("experiment", selector_schema("Experiment UUID or slug."))],
+ &["experiment"],
+ ),
+ "experiment.update" => object_schema(
+ &[
+ ("experiment", selector_schema("Experiment UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ ("title", string_schema("Replacement title.")),
+ (
+ "summary",
+ nullable_string_schema("Replacement summary or explicit null."),
+ ),
+ ("tags", string_array_schema("Replacement tag set.")),
+ ("parents", vertex_selector_array_schema()),
+ ("archived", boolean_schema("Archive state override.")),
+ ("outcome", experiment_outcome_schema()),
+ ],
+ &["experiment"],
+ ),
+ "experiment.close" => object_schema(
+ &[
+ ("experiment", selector_schema("Experiment UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "backend",
+ enum_string_schema(
+ &["manual", "local_process", "worktree_process", "ssh_process"],
+ "Execution backend.",
+ ),
+ ),
+ ("command", command_schema()),
+ ("dimensions", run_dimensions_schema()),
+ ("primary_metric", metric_value_schema()),
+ ("supporting_metrics", metric_value_array_schema()),
+ (
+ "verdict",
+ enum_string_schema(
+ &["accepted", "kept", "parked", "rejected"],
+ "Closed verdict.",
+ ),
+ ),
+ ("rationale", string_schema("Decision rationale.")),
+ ("analysis", experiment_analysis_schema()),
+ ],
+ &[
+ "experiment",
+ "backend",
+ "command",
+ "dimensions",
"primary_metric",
- "note",
"verdict",
- "decision_title",
- "decision_rationale"
+ "rationale",
],
- "additionalProperties": false
- }),
- _ => json!({"type":"object","additionalProperties":false}),
- }
+ ),
+ "artifact.record" => object_schema(
+ &[
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Artifact kind.",
+ ),
+ ),
+ ("label", string_schema("Human-facing artifact label.")),
+ ("summary", string_schema("Optional summary.")),
+ (
+ "locator",
+ string_schema(
+ "Opaque locator or URI. Artifact bodies are never read through Spinner.",
+ ),
+ ),
+ ("media_type", string_schema("Optional media type.")),
+ ("slug", string_schema("Optional stable artifact slug.")),
+ ("attachments", attachment_selector_array_schema()),
+ ],
+ &["kind", "label", "locator"],
+ ),
+ "artifact.list" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Optional artifact kind.",
+ ),
+ ),
+ ("attached_to", attachment_selector_schema()),
+ ("limit", integer_schema("Optional row cap.")),
+ ],
+ &[],
+ ),
+ "artifact.read" | "artifact.history" => object_schema(
+ &[("artifact", selector_schema("Artifact UUID or slug."))],
+ &["artifact"],
+ ),
+ "artifact.update" => object_schema(
+ &[
+ ("artifact", selector_schema("Artifact UUID or slug.")),
+ (
+ "expected_revision",
+ integer_schema("Optimistic concurrency guard."),
+ ),
+ (
+ "kind",
+ enum_string_schema(
+ &[
+ "document", "link", "log", "table", "plot", "dump", "binary", "other",
+ ],
+ "Replacement artifact kind.",
+ ),
+ ),
+ ("label", string_schema("Replacement label.")),
+ (
+ "summary",
+ nullable_string_schema("Replacement summary or explicit null."),
+ ),
+ ("locator", string_schema("Replacement locator.")),
+ (
+ "media_type",
+ nullable_string_schema("Replacement media type or explicit null."),
+ ),
+ ("attachments", attachment_selector_array_schema()),
+ ],
+ &["artifact"],
+ ),
+ "metric.define" => object_schema(
+ &[
+ ("key", string_schema("Metric key.")),
+ (
+ "unit",
+ enum_string_schema(
+ &["seconds", "bytes", "count", "ratio", "custom"],
+ "Metric unit.",
+ ),
+ ),
+ (
+ "objective",
+ enum_string_schema(
+ &["minimize", "maximize", "target"],
+ "Optimization objective.",
+ ),
+ ),
+ (
+ "visibility",
+ enum_string_schema(
+ &["canonical", "minor", "hidden", "archived"],
+ "Metric visibility tier.",
+ ),
+ ),
+ ("description", string_schema("Optional description.")),
+ ],
+ &["key", "unit", "objective"],
+ ),
+ "metric.keys" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "scope",
+ enum_string_schema(&["live", "visible", "all"], "Registry slice to enumerate."),
+ ),
+ ],
+ &[],
+ ),
+ "metric.best" => object_schema(
+ &[
+ (
+ "frontier",
+ selector_schema("Optional frontier UUID or slug."),
+ ),
+ (
+ "hypothesis",
+ selector_schema("Optional hypothesis UUID or slug."),
+ ),
+ ("key", string_schema("Metric key.")),
+ ("dimensions", run_dimensions_schema()),
+ (
+ "include_rejected",
+ boolean_schema("Include rejected experiments."),
+ ),
+ ("limit", integer_schema("Optional row cap.")),
+ (
+ "order",
+ enum_string_schema(&["asc", "desc"], "Optional explicit ranking direction."),
+ ),
+ ],
+ &["key"],
+ ),
+ "run.dimension.define" => object_schema(
+ &[
+ ("key", string_schema("Dimension key.")),
+ (
+ "value_type",
+ enum_string_schema(
+ &["string", "numeric", "boolean", "timestamp"],
+ "Dimension value type.",
+ ),
+ ),
+ ("description", string_schema("Optional description.")),
+ ],
+ &["key", "value_type"],
+ ),
+ "skill.show" => object_schema(&[("name", string_schema("Bundled skill name."))], &[]),
+ _ => empty_object_schema(),
+ };
+ with_common_presentation(schema)
}
-fn metric_spec_schema() -> Value {
+fn empty_object_schema() -> Value {
json!({
"type": "object",
- "properties": {
- "key": { "type": "string" },
- "unit": metric_unit_schema(),
- "objective": optimization_objective_schema()
- },
- "required": ["key", "unit", "objective"],
- "additionalProperties": false
+ "properties": {},
+ "additionalProperties": false,
})
}
-fn metric_value_schema() -> Value {
+fn object_schema(properties: &[(&str, Value)], required: &[&str]) -> Value {
+ let mut map = serde_json::Map::new();
+ for (key, value) in properties {
+ let _ = map.insert((*key).to_owned(), value.clone());
+ }
json!({
"type": "object",
- "properties": {
- "key": { "type": "string" },
- "value": { "type": "number" }
- },
- "required": ["key", "value"],
- "additionalProperties": false
+ "properties": Value::Object(map),
+ "required": required,
+ "additionalProperties": false,
})
}
-fn annotation_schema() -> Value {
- json!({
- "type": "object",
- "properties": {
- "body": { "type": "string" },
- "label": { "type": "string" },
- "visible": { "type": "boolean" }
- },
- "required": ["body"],
- "additionalProperties": false
- })
+fn string_schema(description: &str) -> Value {
+ json!({ "type": "string", "description": description })
}
-fn analysis_schema() -> Value {
+fn nullable_string_schema(description: &str) -> Value {
json!({
- "type": "object",
- "properties": {
- "title": { "type": "string" },
- "summary": { "type": "string" },
- "body": { "type": "string" }
- },
- "required": ["title", "summary", "body"],
- "additionalProperties": false
+ "description": description,
+ "oneOf": [
+ { "type": "string" },
+ { "type": "null" }
+ ]
})
}
-fn tag_name_schema() -> Value {
- json!({
- "type": "string",
- "pattern": "^[a-z0-9]+(?:[-_/][a-z0-9]+)*$"
- })
+fn integer_schema(description: &str) -> Value {
+ json!({ "type": "integer", "minimum": 0, "description": description })
}
-fn node_class_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["contract", "hypothesis", "run", "analysis", "decision", "source", "note"]
- })
+fn boolean_schema(description: &str) -> Value {
+ json!({ "type": "boolean", "description": description })
}
-fn metric_unit_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["seconds", "bytes", "count", "ratio", "custom"]
- })
+fn enum_string_schema(values: &[&str], description: &str) -> Value {
+ json!({ "type": "string", "enum": values, "description": description })
}
-fn metric_source_schema() -> Value {
+fn string_array_schema(description: &str) -> Value {
json!({
- "type": "string",
- "enum": [
- "run_metric",
- "hypothesis_payload",
- "run_payload",
- "analysis_payload",
- "decision_payload"
- ]
+ "type": "array",
+ "items": { "type": "string" },
+ "description": description
})
}
-fn metric_order_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["asc", "desc"]
- })
+fn selector_schema(description: &str) -> Value {
+ string_schema(description)
}
-fn field_value_type_schema() -> Value {
+fn vertex_selector_schema() -> Value {
json!({
- "type": "string",
- "enum": ["string", "numeric", "boolean", "timestamp"]
+ "type": "object",
+ "properties": {
+ "kind": { "type": "string", "enum": ["hypothesis", "experiment"] },
+ "selector": { "type": "string" }
+ },
+ "required": ["kind", "selector"],
+ "additionalProperties": false
})
}
-fn diagnostic_severity_schema() -> Value {
+fn attachment_selector_schema() -> Value {
json!({
- "type": "string",
- "enum": ["error", "warning", "info"]
+ "type": "object",
+ "properties": {
+ "kind": { "type": "string", "enum": ["frontier", "hypothesis", "experiment"] },
+ "selector": { "type": "string" }
+ },
+ "required": ["kind", "selector"],
+ "additionalProperties": false
})
}
-fn field_presence_schema() -> Value {
- json!({
- "type": "string",
- "enum": ["required", "recommended", "optional"]
- })
+fn vertex_selector_array_schema() -> Value {
+ json!({ "type": "array", "items": vertex_selector_schema() })
+}
+
+fn attachment_selector_array_schema() -> Value {
+ json!({ "type": "array", "items": attachment_selector_schema() })
}
-fn field_role_schema() -> Value {
+fn roadmap_schema() -> Value {
json!({
- "type": "string",
- "enum": ["index", "projection_gate", "render_only", "opaque"]
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "rank": { "type": "integer", "minimum": 0 },
+ "hypothesis": { "type": "string" },
+ "summary": { "type": "string" }
+ },
+ "required": ["rank", "hypothesis"],
+ "additionalProperties": false
+ }
})
}
-fn inference_policy_schema() -> Value {
+fn command_schema() -> Value {
json!({
- "type": "string",
- "enum": ["manual_only", "model_may_infer"]
+ "type": "object",
+ "properties": {
+ "working_directory": { "type": "string" },
+ "argv": { "type": "array", "items": { "type": "string" } },
+ "env": {
+ "type": "object",
+ "additionalProperties": { "type": "string" }
+ }
+ },
+ "required": ["argv"],
+ "additionalProperties": false
})
}
-fn optimization_objective_schema() -> Value {
+fn metric_value_schema() -> Value {
json!({
- "type": "string",
- "enum": ["minimize", "maximize", "target"]
+ "type": "object",
+ "properties": {
+ "key": { "type": "string" },
+ "value": { "type": "number" }
+ },
+ "required": ["key", "value"],
+ "additionalProperties": false
})
}
-fn verdict_schema() -> Value {
+fn metric_value_array_schema() -> Value {
+ json!({ "type": "array", "items": metric_value_schema() })
+}
+
+fn run_dimensions_schema() -> Value {
json!({
- "type": "string",
- "enum": [
- "accepted",
- "kept",
- "parked",
- "rejected"
- ]
+ "type": "object",
+ "additionalProperties": true,
+ "description": "Exact run-dimension filter or outcome dimension map. Values may be strings, numbers, booleans, or RFC3339 timestamps."
})
}
-fn run_schema() -> Value {
+fn experiment_analysis_schema() -> Value {
json!({
"type": "object",
"properties": {
- "title": { "type": "string" },
"summary": { "type": "string" },
- "backend": {
- "type": "string",
- "enum": ["local_process", "worktree_process", "ssh_process"]
- },
- "dimensions": { "type": "object" },
- "command": {
- "type": "object",
- "properties": {
- "working_directory": { "type": "string" },
- "argv": { "type": "array", "items": { "type": "string" } },
- "env": {
- "type": "object",
- "additionalProperties": { "type": "string" }
- }
- },
- "required": ["argv"],
- "additionalProperties": false
- }
+ "body": { "type": "string" }
},
- "required": ["title", "backend", "dimensions", "command"],
+ "required": ["summary", "body"],
"additionalProperties": false
})
}
-fn note_schema() -> Value {
+fn experiment_outcome_schema() -> Value {
json!({
"type": "object",
"properties": {
- "summary": { "type": "string" },
- "next_hypotheses": { "type": "array", "items": { "type": "string" } }
+ "backend": { "type": "string", "enum": ["manual", "local_process", "worktree_process", "ssh_process"] },
+ "command": command_schema(),
+ "dimensions": run_dimensions_schema(),
+ "primary_metric": metric_value_schema(),
+ "supporting_metrics": metric_value_array_schema(),
+ "verdict": { "type": "string", "enum": ["accepted", "kept", "parked", "rejected"] },
+ "rationale": { "type": "string" },
+ "analysis": experiment_analysis_schema()
},
- "required": ["summary"],
+ "required": ["backend", "command", "dimensions", "primary_metric", "verdict", "rationale"],
"additionalProperties": false
})
}
diff --git a/crates/fidget-spinner-cli/src/mcp/host/runtime.rs b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
index d57a21e..bf0484a 100644
--- a/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
+++ b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
@@ -230,7 +230,7 @@ impl HostRuntime {
"name": SERVER_NAME,
"version": env!("CARGO_PKG_VERSION")
},
- "instructions": "The DAG is canonical truth. Frontier state is derived. Bind the session with project.bind before project-local DAG operations when the MCP is running unbound."
+ "instructions": "Bind the session with project.bind before project-local work when the MCP is unbound. Use frontier.open as the only overview surface, then walk hypotheses and experiments deliberately by selector. Artifacts are references only; Spinner does not read artifact bodies."
}))),
"notifications/initialized" => {
if !self.seed_captured() {
@@ -598,8 +598,11 @@ struct ProjectBindStatus {
project_root: String,
state_root: String,
display_name: fidget_spinner_core::NonEmptyText,
- schema: fidget_spinner_core::PayloadSchemaRef,
- git_repo_detected: bool,
+ frontier_count: u64,
+ hypothesis_count: u64,
+ experiment_count: u64,
+ open_experiment_count: u64,
+ artifact_count: u64,
}
struct ResolvedProjectBinding {
@@ -611,6 +614,7 @@ fn resolve_project_binding(
requested_path: PathBuf,
) -> Result<ResolvedProjectBinding, fidget_spinner_store_sqlite::StoreError> {
let store = crate::open_or_init_store_for_binding(&requested_path)?;
+ let project_status = store.status()?;
Ok(ResolvedProjectBinding {
binding: ProjectBinding {
requested_path: requested_path.clone(),
@@ -621,12 +625,11 @@ fn resolve_project_binding(
project_root: store.project_root().to_string(),
state_root: store.state_root().to_string(),
display_name: store.config().display_name.clone(),
- schema: store.schema().schema_ref(),
- git_repo_detected: crate::run_git(
- store.project_root(),
- &["rev-parse", "--show-toplevel"],
- )?
- .is_some(),
+ frontier_count: project_status.frontier_count,
+ hypothesis_count: project_status.hypothesis_count,
+ experiment_count: project_status.experiment_count,
+ open_experiment_count: project_status.open_experiment_count,
+ artifact_count: project_status.artifact_count,
},
})
}
@@ -728,17 +731,20 @@ fn project_bind_output(status: &ProjectBindStatus) -> Result<ToolOutput, FaultRe
let _ = concise.insert("project_root".to_owned(), json!(status.project_root));
let _ = concise.insert("state_root".to_owned(), json!(status.state_root));
let _ = concise.insert("display_name".to_owned(), json!(status.display_name));
+ let _ = concise.insert("frontier_count".to_owned(), json!(status.frontier_count));
let _ = concise.insert(
- "schema".to_owned(),
- json!(format!(
- "{}@{}",
- status.schema.namespace, status.schema.version
- )),
+ "hypothesis_count".to_owned(),
+ json!(status.hypothesis_count),
);
let _ = concise.insert(
- "git_repo_detected".to_owned(),
- json!(status.git_repo_detected),
+ "experiment_count".to_owned(),
+ json!(status.experiment_count),
);
+ let _ = concise.insert(
+ "open_experiment_count".to_owned(),
+ json!(status.open_experiment_count),
+ );
+ let _ = concise.insert("artifact_count".to_owned(), json!(status.artifact_count));
if status.requested_path != status.project_root {
let _ = concise.insert("requested_path".to_owned(), json!(status.requested_path));
}
@@ -749,18 +755,13 @@ fn project_bind_output(status: &ProjectBindStatus) -> Result<ToolOutput, FaultRe
format!("bound project {}", status.display_name),
format!("root: {}", status.project_root),
format!("state: {}", status.state_root),
+ format!("frontiers: {}", status.frontier_count),
+ format!("hypotheses: {}", status.hypothesis_count),
format!(
- "schema: {}@{}",
- status.schema.namespace, status.schema.version
- ),
- format!(
- "git: {}",
- if status.git_repo_detected {
- "detected"
- } else {
- "not detected"
- }
+ "experiments: {} total, {} open",
+ status.experiment_count, status.open_experiment_count
),
+ format!("artifacts: {}", status.artifact_count),
]
.join("\n"),
None,
diff --git a/crates/fidget-spinner-cli/src/mcp/service.rs b/crates/fidget-spinner-cli/src/mcp/service.rs
index f0cca1e..adc29f9 100644
--- a/crates/fidget-spinner-cli/src/mcp/service.rs
+++ b/crates/fidget-spinner-cli/src/mcp/service.rs
@@ -1,20 +1,22 @@
use std::collections::{BTreeMap, BTreeSet};
+use std::fmt::Write as _;
use std::fs;
use camino::{Utf8Path, Utf8PathBuf};
use fidget_spinner_core::{
- AdmissionState, AnnotationVisibility, CommandRecipe, DiagnosticSeverity, ExecutionBackend,
- FieldPresence, FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierProjection,
- FrontierRecord, FrontierVerdict, InferencePolicy, MetricSpec, MetricUnit, MetricValue,
- NodeAnnotation, NodeClass, NodePayload, NonEmptyText, ProjectFieldSpec, ProjectSchema,
- RunDimensionValue, TagName, TagRecord,
+ ArtifactKind, CommandRecipe, ExecutionBackend, ExperimentAnalysis, ExperimentStatus,
+ FieldValueType, FrontierVerdict, MetricUnit, MetricVisibility, NonEmptyText,
+ OptimizationObjective, RunDimensionValue, Slug, TagName,
};
use fidget_spinner_store_sqlite::{
- CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest,
- DefineRunDimensionRequest, EdgeAttachment, EdgeAttachmentDirection, ExperimentAnalysisDraft,
- ExperimentReceipt, ListNodesQuery, MetricBestQuery, MetricFieldSource, MetricKeyQuery,
- MetricKeySummary, MetricRankOrder, NodeSummary, OpenExperimentRequest, OpenExperimentSummary,
- ProjectStore, RemoveSchemaFieldRequest, StoreError, UpsertSchemaFieldRequest,
+ AttachmentSelector, CloseExperimentRequest, CreateArtifactRequest, CreateFrontierRequest,
+ CreateHypothesisRequest, DefineMetricRequest, DefineRunDimensionRequest, EntityHistoryEntry,
+ ExperimentOutcomePatch, FrontierOpenProjection, FrontierRoadmapItemDraft, FrontierSummary,
+ ListArtifactsQuery, ListExperimentsQuery, ListHypothesesQuery, MetricBestEntry,
+ MetricBestQuery, MetricKeySummary, MetricKeysQuery, MetricRankOrder, MetricScope,
+ OpenExperimentRequest, ProjectStatus, ProjectStore, StoreError, TextPatch,
+ UpdateArtifactRequest, UpdateExperimentRequest, UpdateFrontierBriefRequest,
+ UpdateHypothesisRequest, VertexSelector,
};
use serde::Deserialize;
use serde_json::{Map, Value, json};
@@ -42,10 +44,9 @@ impl WorkerService {
WorkerOperation::ReadResource { uri } => format!("resources/read:{uri}"),
};
Self::maybe_inject_transient(&operation_key)?;
-
match operation {
WorkerOperation::CallTool { name, arguments } => self.call_tool(&name, arguments),
- WorkerOperation::ReadResource { uri } => self.read_resource(&uri),
+ WorkerOperation::ReadResource { uri } => Self::read_resource(&uri),
}
}
@@ -53,796 +54,449 @@ impl WorkerService {
let operation = format!("tools/call:{name}");
let (presentation, arguments) =
split_presentation(arguments, &operation, FaultStage::Worker)?;
- match name {
- "project.status" => {
- let status = json!({
- "project_root": self.store.project_root(),
- "state_root": self.store.state_root(),
- "display_name": self.store.config().display_name,
- "schema": self.store.schema().schema_ref(),
- "git_repo_detected": crate::run_git(self.store.project_root(), &["rev-parse", "--show-toplevel"])
- .map_err(store_fault("tools/call:project.status"))?
- .is_some(),
- });
- tool_success(
- project_status_output(&status, self.store.schema()),
- presentation,
- FaultStage::Worker,
- "tools/call:project.status",
- )
+ macro_rules! lift {
+ ($expr:expr) => {
+ with_fault($expr, &operation)?
+ };
+ }
+ let output = match name {
+ "project.status" => project_status_output(&lift!(self.store.status()), &operation)?,
+ "tag.add" => {
+ let args = deserialize::<TagAddArgs>(arguments)?;
+ let tag = lift!(self.store.register_tag(
+ TagName::new(args.name).map_err(store_fault(&operation))?,
+ NonEmptyText::new(args.description).map_err(store_fault(&operation))?,
+ ));
+ tool_output(&tag, FaultStage::Worker, &operation)?
}
- "project.schema" => tool_success(
- project_schema_output(self.store.schema())?,
- presentation,
- FaultStage::Worker,
- "tools/call:project.schema",
- ),
- "schema.field.upsert" => {
- let args = deserialize::<SchemaFieldUpsertToolArgs>(arguments)?;
- let field = self
- .store
- .upsert_schema_field(UpsertSchemaFieldRequest {
- name: NonEmptyText::new(args.name)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- node_classes: args
- .node_classes
- .unwrap_or_default()
- .into_iter()
- .map(|class| {
- parse_node_class_name(&class)
- .map_err(store_fault("tools/call:schema.field.upsert"))
- })
- .collect::<Result<_, _>>()?,
- presence: parse_field_presence_name(&args.presence)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- severity: parse_diagnostic_severity_name(&args.severity)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- role: parse_field_role_name(&args.role)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- inference_policy: parse_inference_policy_name(&args.inference_policy)
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
- value_type: args
- .value_type
- .as_deref()
- .map(parse_field_value_type_name)
+ "tag.list" => tag_list_output(&lift!(self.store.list_tags()), &operation)?,
+ "frontier.create" => {
+ let args = deserialize::<FrontierCreateArgs>(arguments)?;
+ let frontier = lift!(
+ self.store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new(args.label).map_err(store_fault(&operation))?,
+ objective: NonEmptyText::new(args.objective)
+ .map_err(store_fault(&operation))?,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:schema.field.upsert"))?,
+ .map_err(store_fault(&operation))?,
})
- .map_err(store_fault("tools/call:schema.field.upsert"))?;
- tool_success(
- schema_field_upsert_output(self.store.schema(), &field)?,
- presentation,
- FaultStage::Worker,
- "tools/call:schema.field.upsert",
- )
+ );
+ frontier_record_output(&frontier, &operation)?
}
- "schema.field.remove" => {
- let args = deserialize::<SchemaFieldRemoveToolArgs>(arguments)?;
- let removed_count = self
- .store
- .remove_schema_field(RemoveSchemaFieldRequest {
- name: NonEmptyText::new(args.name)
- .map_err(store_fault("tools/call:schema.field.remove"))?,
- node_classes: args
- .node_classes
- .map(|node_classes| {
- node_classes
- .into_iter()
- .map(|class| {
- parse_node_class_name(&class)
- .map_err(store_fault("tools/call:schema.field.remove"))
- })
- .collect::<Result<_, _>>()
- })
- .transpose()?,
- })
- .map_err(store_fault("tools/call:schema.field.remove"))?;
- tool_success(
- schema_field_remove_output(self.store.schema(), removed_count)?,
- presentation,
- FaultStage::Worker,
- "tools/call:schema.field.remove",
- )
+ "frontier.list" => {
+ frontier_list_output(&lift!(self.store.list_frontiers()), &operation)?
}
- "tag.add" => {
- let args = deserialize::<TagAddToolArgs>(arguments)?;
- let tag = self
- .store
- .add_tag(
- TagName::new(args.name).map_err(store_fault("tools/call:tag.add"))?,
- NonEmptyText::new(args.description)
- .map_err(store_fault("tools/call:tag.add"))?,
- )
- .map_err(store_fault("tools/call:tag.add"))?;
- tool_success(
- tag_add_output(&tag)?,
- presentation,
- FaultStage::Worker,
- "tools/call:tag.add",
- )
+ "frontier.read" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ frontier_record_output(
+ &lift!(self.store.read_frontier(&args.frontier)),
+ &operation,
+ )?
}
- "tag.list" => {
- let tags = self
- .store
- .list_tags()
- .map_err(store_fault("tools/call:tag.list"))?;
- tool_success(
- tag_list_output(tags.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:tag.list",
- )
+ "frontier.open" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ frontier_open_output(&lift!(self.store.frontier_open(&args.frontier)), &operation)?
}
- "frontier.list" => {
- let frontiers = self
- .store
- .list_frontiers()
- .map_err(store_fault("tools/call:frontier.list"))?;
- tool_success(
- frontier_list_output(frontiers.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.list",
- )
+ "frontier.brief.update" => {
+ let args = deserialize::<FrontierBriefUpdateArgs>(arguments)?;
+ let frontier = lift!(
+ self.store
+ .update_frontier_brief(UpdateFrontierBriefRequest {
+ frontier: args.frontier,
+ expected_revision: args.expected_revision,
+ situation: nullable_text_patch_from_wire(args.situation, &operation)?,
+ roadmap: args
+ .roadmap
+ .map(|items| {
+ items
+ .into_iter()
+ .map(|item| {
+ Ok(FrontierRoadmapItemDraft {
+ rank: item.rank,
+ hypothesis: item.hypothesis,
+ summary: item
+ .summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ })
+ .collect::<Result<Vec<_>, FaultRecord>>()
+ })
+ .transpose()?,
+ unknowns: args
+ .unknowns
+ .map(|items| {
+ items
+ .into_iter()
+ .map(NonEmptyText::new)
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(store_fault(&operation))
+ })
+ .transpose()?,
+ })
+ );
+ frontier_record_output(&frontier, &operation)?
}
- "frontier.status" => {
- let args = deserialize::<FrontierStatusToolArgs>(arguments)?;
- let projection = self
- .store
- .frontier_projection(
- crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:frontier.status"))?,
- )
- .map_err(store_fault("tools/call:frontier.status"))?;
- tool_success(
- frontier_status_output(&projection)?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.status",
- )
+ "frontier.history" => {
+ let args = deserialize::<FrontierSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.frontier_history(&args.frontier)),
+ &operation,
+ )?
}
- "frontier.init" => {
- let args = deserialize::<FrontierInitToolArgs>(arguments)?;
- let projection = self
- .store
- .create_frontier(CreateFrontierRequest {
- label: NonEmptyText::new(args.label)
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract_title: NonEmptyText::new(args.contract_title)
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract_summary: args
- .contract_summary
- .map(NonEmptyText::new)
+ "hypothesis.record" => {
+ let args = deserialize::<HypothesisRecordArgs>(arguments)?;
+ let hypothesis = lift!(
+ self.store.create_hypothesis(CreateHypothesisRequest {
+ frontier: args.frontier,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:frontier.init"))?,
- contract: FrontierContract {
- objective: NonEmptyText::new(args.objective)
- .map_err(store_fault("tools/call:frontier.init"))?,
- evaluation: fidget_spinner_core::EvaluationProtocol {
- benchmark_suites: crate::to_text_set(args.benchmark_suites)
- .map_err(store_fault("tools/call:frontier.init"))?,
- primary_metric: MetricSpec {
- metric_key: NonEmptyText::new(args.primary_metric.key)
- .map_err(store_fault("tools/call:frontier.init"))?,
- unit: parse_metric_unit_name(&args.primary_metric.unit)
- .map_err(store_fault("tools/call:frontier.init"))?,
- objective: crate::parse_optimization_objective(
- &args.primary_metric.objective,
- )
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
- supporting_metrics: args
- .supporting_metrics
- .into_iter()
- .map(metric_spec_from_wire)
- .collect::<Result<_, _>>()
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
- promotion_criteria: crate::to_text_vec(args.promotion_criteria)
- .map_err(store_fault("tools/call:frontier.init"))?,
- },
+ .map_err(store_fault(&operation))?,
+ title: NonEmptyText::new(args.title).map_err(store_fault(&operation))?,
+ summary: NonEmptyText::new(args.summary)
+ .map_err(store_fault(&operation))?,
+ body: NonEmptyText::new(args.body).map_err(store_fault(&operation))?,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ parents: args.parents.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:frontier.init"))?;
- tool_success(
- frontier_created_output(&projection)?,
- presentation,
- FaultStage::Worker,
- "tools/call:frontier.init",
- )
+ );
+ hypothesis_record_output(&hypothesis, &operation)?
+ }
+ "hypothesis.list" => {
+ let args = deserialize::<HypothesisListArgs>(arguments)?;
+ let hypotheses = lift!(
+ self.store.list_hypotheses(ListHypothesesQuery {
+ frontier: args.frontier,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ include_archived: args.include_archived.unwrap_or(false),
+ limit: args.limit,
+ })
+ );
+ hypothesis_list_output(&hypotheses, &operation)?
}
- "node.create" => {
- let args = deserialize::<NodeCreateToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: parse_node_class_name(&args.class)
- .map_err(store_fault("tools/call:node.create"))?,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "hypothesis.read" => {
+ let args = deserialize::<HypothesisSelectorArgs>(arguments)?;
+ hypothesis_detail_output(
+ &lift!(self.store.read_hypothesis(&args.hypothesis)),
+ &operation,
+ )?
+ }
+ "hypothesis.update" => {
+ let args = deserialize::<HypothesisUpdateArgs>(arguments)?;
+ let hypothesis = lift!(
+ self.store.update_hypothesis(UpdateHypothesisRequest {
+ hypothesis: args.hypothesis,
+ expected_revision: args.expected_revision,
+ title: args
+ .title
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
summary: args
.summary
.map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
+ body: args
+ .body
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
tags: args
.tags
- .map(parse_tag_set)
+ .map(tags_to_set)
.transpose()
- .map_err(store_fault("tools/call:node.create"))?,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- args.payload.unwrap_or_default(),
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:node.create"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:node.create"))?,
+ .map_err(store_fault(&operation))?,
+ parents: args.parents,
+ archived: args.archived,
})
- .map_err(store_fault("tools/call:node.create"))?;
- tool_success(
- created_node_output("created node", &node, "tools/call:node.create")?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.create",
- )
+ );
+ hypothesis_record_output(&hypothesis, &operation)?
}
- "hypothesis.record" => {
- let args = deserialize::<HypothesisRecordToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Hypothesis,
- frontier_id: Some(
- crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- tags: None,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:hypothesis.record"))?,
- })
- .map_err(store_fault("tools/call:hypothesis.record"))?;
- tool_success(
- created_node_output(
- "recorded hypothesis",
- &node,
- "tools/call:hypothesis.record",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:hypothesis.record",
- )
+ "hypothesis.history" => {
+ let args = deserialize::<HypothesisSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.hypothesis_history(&args.hypothesis)),
+ &operation,
+ )?
}
- "node.list" => {
- let args = deserialize::<NodeListToolArgs>(arguments)?;
- let nodes = self
- .store
- .list_nodes(ListNodesQuery {
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "experiment.open" => {
+ let args = deserialize::<ExperimentOpenArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.open_experiment(OpenExperimentRequest {
+ hypothesis: args.hypothesis,
+ slug: args
+ .slug
+ .map(Slug::new)
.transpose()
- .map_err(store_fault("tools/call:node.list"))?,
- class: args
- .class
- .as_deref()
- .map(parse_node_class_name)
+ .map_err(store_fault(&operation))?,
+ title: NonEmptyText::new(args.title).map_err(store_fault(&operation))?,
+ summary: args
+ .summary
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:node.list"))?,
- tags: parse_tag_set(args.tags)
- .map_err(store_fault("tools/call:node.list"))?,
- include_archived: args.include_archived,
- limit: args.limit.unwrap_or(20),
+ .map_err(store_fault(&operation))?,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ parents: args.parents.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:node.list"))?;
- tool_success(
- node_list_output(nodes.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.list",
- )
- }
- "node.read" => {
- let args = deserialize::<NodeReadToolArgs>(arguments)?;
- let node_id = crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.read"))?;
- let node = self
- .store
- .get_node(node_id)
- .map_err(store_fault("tools/call:node.read"))?
- .ok_or_else(|| {
- FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Store,
- "tools/call:node.read",
- format!("node {node_id} was not found"),
- )
- })?;
- tool_success(
- node_read_output(&node)?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.read",
- )
- }
- "node.annotate" => {
- let args = deserialize::<NodeAnnotateToolArgs>(arguments)?;
- let annotation = NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if args.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: args
- .label
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:node.annotate"))?,
- body: NonEmptyText::new(args.body)
- .map_err(store_fault("tools/call:node.annotate"))?,
- created_at: time::OffsetDateTime::now_utc(),
- };
- self.store
- .annotate_node(
- crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.annotate"))?,
- annotation,
- )
- .map_err(store_fault("tools/call:node.annotate"))?;
- tool_success(
- tool_output(
- &json!({"annotated": args.node_id}),
- FaultStage::Worker,
- "tools/call:node.annotate",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.annotate",
- )
- }
- "node.archive" => {
- let args = deserialize::<NodeArchiveToolArgs>(arguments)?;
- self.store
- .archive_node(
- crate::parse_node_id(&args.node_id)
- .map_err(store_fault("tools/call:node.archive"))?,
- )
- .map_err(store_fault("tools/call:node.archive"))?;
- tool_success(
- tool_output(
- &json!({"archived": args.node_id}),
- FaultStage::Worker,
- "tools/call:node.archive",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:node.archive",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "note.quick" => {
- let args = deserialize::<QuickNoteToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Note,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:note.quick"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:note.quick"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- tags: Some(
- parse_tag_set(args.tags)
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:note.quick"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:note.quick"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:note.quick"))?,
+ "experiment.list" => {
+ let args = deserialize::<ExperimentListArgs>(arguments)?;
+ let experiments = lift!(
+ self.store.list_experiments(ListExperimentsQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ tags: tags_to_set(args.tags.unwrap_or_default())
+ .map_err(store_fault(&operation))?,
+ include_archived: args.include_archived.unwrap_or(false),
+ status: args.status,
+ limit: args.limit,
})
- .map_err(store_fault("tools/call:note.quick"))?;
- tool_success(
- created_node_output("recorded note", &node, "tools/call:note.quick")?,
- presentation,
- FaultStage::Worker,
- "tools/call:note.quick",
- )
+ );
+ experiment_list_output(&experiments, &operation)?
+ }
+ "experiment.read" => {
+ let args = deserialize::<ExperimentSelectorArgs>(arguments)?;
+ experiment_detail_output(
+ &lift!(self.store.read_experiment(&args.experiment)),
+ &operation,
+ )?
}
- "source.record" => {
- let args = deserialize::<SourceRecordToolArgs>(arguments)?;
- let node = self
- .store
- .add_node(CreateNodeRequest {
- class: NodeClass::Source,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
+ "experiment.update" => {
+ let args = deserialize::<ExperimentUpdateArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.update_experiment(UpdateExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ title: args
+ .title
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:source.record"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:source.record"))?,
- summary: Some(
- NonEmptyText::new(args.summary)
- .map_err(store_fault("tools/call:source.record"))?,
- ),
+ .map_err(store_fault(&operation))?,
+ summary: nullable_text_patch_from_wire(args.summary, &operation)?,
tags: args
.tags
- .map(parse_tag_set)
+ .map(tags_to_set)
.transpose()
- .map_err(store_fault("tools/call:source.record"))?,
- payload: NodePayload::with_schema(
- self.store.schema().schema_ref(),
- crate::json_object(json!({ "body": args.body }))
- .map_err(store_fault("tools/call:source.record"))?,
- ),
- annotations: tool_annotations(args.annotations)
- .map_err(store_fault("tools/call:source.record"))?,
- attachments: lineage_attachments(args.parents)
- .map_err(store_fault("tools/call:source.record"))?,
+ .map_err(store_fault(&operation))?,
+ parents: args.parents,
+ archived: args.archived,
+ outcome: args
+ .outcome
+ .map(|wire| experiment_outcome_patch_from_wire(wire, &operation))
+ .transpose()?,
})
- .map_err(store_fault("tools/call:source.record"))?;
- tool_success(
- created_node_output("recorded source", &node, "tools/call:source.record")?,
- presentation,
- FaultStage::Worker,
- "tools/call:source.record",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "metric.define" => {
- let args = deserialize::<MetricDefineToolArgs>(arguments)?;
- let metric = self
- .store
- .define_metric(DefineMetricRequest {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:metric.define"))?,
- unit: parse_metric_unit_name(&args.unit)
- .map_err(store_fault("tools/call:metric.define"))?,
- objective: crate::parse_optimization_objective(&args.objective)
- .map_err(store_fault("tools/call:metric.define"))?,
- description: args
- .description
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:metric.define"))?,
+ "experiment.close" => {
+ let args = deserialize::<ExperimentCloseArgs>(arguments)?;
+ let experiment = lift!(
+ self.store.close_experiment(CloseExperimentRequest {
+ experiment: args.experiment,
+ expected_revision: args.expected_revision,
+ backend: args.backend,
+ command: args.command,
+ dimensions: dimension_map_from_wire(args.dimensions)?,
+ primary_metric: metric_value_from_wire(args.primary_metric, &operation)?,
+ supporting_metrics: args
+ .supporting_metrics
+ .unwrap_or_default()
+ .into_iter()
+ .map(|metric| metric_value_from_wire(metric, &operation))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: args.verdict,
+ rationale: NonEmptyText::new(args.rationale)
+ .map_err(store_fault(&operation))?,
+ analysis: args
+ .analysis
+ .map(|analysis| experiment_analysis_from_wire(analysis, &operation))
+ .transpose()?,
})
- .map_err(store_fault("tools/call:metric.define"))?;
- tool_success(
- json_created_output(
- "registered metric",
- json!({
- "key": metric.key,
- "unit": metric_unit_name(metric.unit),
- "objective": metric_objective_name(metric.objective),
- "description": metric.description,
- }),
- "tools/call:metric.define",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.define",
- )
+ );
+ experiment_record_output(&experiment, &operation)?
}
- "run.dimension.define" => {
- let args = deserialize::<RunDimensionDefineToolArgs>(arguments)?;
- let dimension = self
- .store
- .define_run_dimension(DefineRunDimensionRequest {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:run.dimension.define"))?,
- value_type: parse_field_value_type_name(&args.value_type)
- .map_err(store_fault("tools/call:run.dimension.define"))?,
- description: args
- .description
+ "experiment.history" => {
+ let args = deserialize::<ExperimentSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.experiment_history(&args.experiment)),
+ &operation,
+ )?
+ }
+ "artifact.record" => {
+ let args = deserialize::<ArtifactRecordArgs>(arguments)?;
+ let artifact = lift!(
+ self.store.create_artifact(CreateArtifactRequest {
+ slug: args
+ .slug
+ .map(Slug::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ kind: args.kind,
+ label: NonEmptyText::new(args.label).map_err(store_fault(&operation))?,
+ summary: args
+ .summary
.map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:run.dimension.define"))?,
+ .map_err(store_fault(&operation))?,
+ locator: NonEmptyText::new(args.locator)
+ .map_err(store_fault(&operation))?,
+ media_type: args
+ .media_type
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ attachments: args.attachments.unwrap_or_default(),
})
- .map_err(store_fault("tools/call:run.dimension.define"))?;
- tool_success(
- json_created_output(
- "registered run dimension",
- json!({
- "key": dimension.key,
- "value_type": dimension.value_type.as_str(),
- "description": dimension.description,
- }),
- "tools/call:run.dimension.define",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:run.dimension.define",
- )
+ );
+ artifact_record_output(&artifact, &operation)?
}
- "run.dimension.list" => {
- let items = self
- .store
- .list_run_dimensions()
- .map_err(store_fault("tools/call:run.dimension.list"))?;
- tool_success(
- run_dimension_list_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:run.dimension.list",
- )
+ "artifact.list" => {
+ let args = deserialize::<ArtifactListArgs>(arguments)?;
+ let artifacts = lift!(self.store.list_artifacts(ListArtifactsQuery {
+ frontier: args.frontier,
+ kind: args.kind,
+ attached_to: args.attached_to,
+ limit: args.limit,
+ }));
+ artifact_list_output(&artifacts, &operation)?
}
- "metric.keys" => {
- let args = deserialize::<MetricKeysToolArgs>(arguments)?;
- let keys = self
- .store
- .list_metric_keys_filtered(MetricKeyQuery {
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:metric.keys"))?,
- source: args
- .source
- .as_deref()
- .map(parse_metric_source_name)
- .transpose()
- .map_err(store_fault("tools/call:metric.keys"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.dimensions.unwrap_or_default(),
- "tools/call:metric.keys",
- )?,
- })
- .map_err(store_fault("tools/call:metric.keys"))?;
- tool_success(
- metric_keys_output(keys.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.keys",
- )
+ "artifact.read" => {
+ let args = deserialize::<ArtifactSelectorArgs>(arguments)?;
+ artifact_detail_output(
+ &lift!(self.store.read_artifact(&args.artifact)),
+ &operation,
+ )?
}
- "metric.best" => {
- let args = deserialize::<MetricBestToolArgs>(arguments)?;
- let items = self
- .store
- .best_metrics(MetricBestQuery {
- key: NonEmptyText::new(args.key)
- .map_err(store_fault("tools/call:metric.best"))?,
- frontier_id: args
- .frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- source: args
- .source
- .as_deref()
- .map(parse_metric_source_name)
+ "artifact.update" => {
+ let args = deserialize::<ArtifactUpdateArgs>(arguments)?;
+ let artifact = lift!(
+ self.store.update_artifact(UpdateArtifactRequest {
+ artifact: args.artifact,
+ expected_revision: args.expected_revision,
+ kind: args.kind,
+ label: args
+ .label
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.dimensions.unwrap_or_default(),
- "tools/call:metric.best",
- )?,
- order: args
- .order
- .as_deref()
- .map(parse_metric_order_name)
+ .map_err(store_fault(&operation))?,
+ summary: nullable_text_patch_from_wire(args.summary, &operation)?,
+ locator: args
+ .locator
+ .map(NonEmptyText::new)
.transpose()
- .map_err(store_fault("tools/call:metric.best"))?,
- limit: args.limit.unwrap_or(10),
+ .map_err(store_fault(&operation))?,
+ media_type: nullable_text_patch_from_wire(args.media_type, &operation)?,
+ attachments: args.attachments,
})
- .map_err(store_fault("tools/call:metric.best"))?;
- tool_success(
- metric_best_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.best",
- )
+ );
+ artifact_record_output(&artifact, &operation)?
}
- "metric.migrate" => {
- let report = self
- .store
- .migrate_metric_plane()
- .map_err(store_fault("tools/call:metric.migrate"))?;
- tool_success(
- json_created_output(
- "normalized legacy metric plane",
- json!(report),
- "tools/call:metric.migrate",
- )?,
- presentation,
- FaultStage::Worker,
- "tools/call:metric.migrate",
- )
+ "artifact.history" => {
+ let args = deserialize::<ArtifactSelectorArgs>(arguments)?;
+ history_output(
+ &lift!(self.store.artifact_history(&args.artifact)),
+ &operation,
+ )?
}
- "experiment.open" => {
- let args = deserialize::<ExperimentOpenToolArgs>(arguments)?;
- let item = self
- .store
- .open_experiment(OpenExperimentRequest {
- frontier_id: crate::parse_frontier_id(&args.frontier_id)
- .map_err(store_fault("tools/call:experiment.open"))?,
- hypothesis_node_id: crate::parse_node_id(&args.hypothesis_node_id)
- .map_err(store_fault("tools/call:experiment.open"))?,
- title: NonEmptyText::new(args.title)
- .map_err(store_fault("tools/call:experiment.open"))?,
- summary: args
- .summary
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:experiment.open"))?,
- })
- .map_err(store_fault("tools/call:experiment.open"))?;
- tool_success(
- experiment_open_output(
- &item,
- "tools/call:experiment.open",
- "opened experiment",
- )?,
- presentation,
+ "metric.define" => {
+ let args = deserialize::<MetricDefineArgs>(arguments)?;
+ tool_output(
+ &lift!(
+ self.store.define_metric(DefineMetricRequest {
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ unit: args.unit,
+ objective: args.objective,
+ visibility: args.visibility.unwrap_or(MetricVisibility::Canonical),
+ description: args
+ .description
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ ),
FaultStage::Worker,
- "tools/call:experiment.open",
- )
+ &operation,
+ )?
}
- "experiment.list" => {
- let args = deserialize::<ExperimentListToolArgs>(arguments)?;
- let items = self
- .store
- .list_open_experiments(
- args.frontier_id
- .as_deref()
- .map(crate::parse_frontier_id)
- .transpose()
- .map_err(store_fault("tools/call:experiment.list"))?,
- )
- .map_err(store_fault("tools/call:experiment.list"))?;
- tool_success(
- experiment_list_output(items.as_slice())?,
- presentation,
- FaultStage::Worker,
- "tools/call:experiment.list",
- )
+ "metric.keys" => {
+ let args = deserialize::<MetricKeysArgs>(arguments)?;
+ metric_keys_output(
+ &lift!(self.store.metric_keys(MetricKeysQuery {
+ frontier: args.frontier,
+ scope: args.scope.unwrap_or(MetricScope::Live),
+ })),
+ &operation,
+ )?
}
- "experiment.read" => {
- let args = deserialize::<ExperimentReadToolArgs>(arguments)?;
- let item = self
- .store
- .read_open_experiment(
- crate::parse_experiment_id(&args.experiment_id)
- .map_err(store_fault("tools/call:experiment.read"))?,
- )
- .map_err(store_fault("tools/call:experiment.read"))?;
- tool_success(
- experiment_open_output(&item, "tools/call:experiment.read", "open experiment")?,
- presentation,
- FaultStage::Worker,
- "tools/call:experiment.read",
- )
+ "metric.best" => {
+ let args = deserialize::<MetricBestArgs>(arguments)?;
+ metric_best_output(
+ &lift!(self.store.metric_best(MetricBestQuery {
+ frontier: args.frontier,
+ hypothesis: args.hypothesis,
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ dimensions: dimension_map_from_wire(args.dimensions)?,
+ include_rejected: args.include_rejected.unwrap_or(false),
+ limit: args.limit,
+ order: args.order,
+ })),
+ &operation,
+ )?
}
- "experiment.close" => {
- let args = deserialize::<ExperimentCloseToolArgs>(arguments)?;
- let receipt = self
- .store
- .close_experiment(CloseExperimentRequest {
- experiment_id: crate::parse_experiment_id(&args.experiment_id)
- .map_err(store_fault("tools/call:experiment.close"))?,
- run_title: NonEmptyText::new(args.run.title)
- .map_err(store_fault("tools/call:experiment.close"))?,
- run_summary: args
- .run
- .summary
- .map(NonEmptyText::new)
- .transpose()
- .map_err(store_fault("tools/call:experiment.close"))?,
- backend: parse_backend_name(&args.run.backend)
- .map_err(store_fault("tools/call:experiment.close"))?,
- dimensions: coerce_tool_dimensions(
- &self.store,
- args.run.dimensions,
- "tools/call:experiment.close",
- )?,
- command: command_recipe_from_wire(
- args.run.command,
- self.store.project_root(),
- )
- .map_err(store_fault("tools/call:experiment.close"))?,
- primary_metric: metric_value_from_wire(args.primary_metric)
- .map_err(store_fault("tools/call:experiment.close"))?,
- supporting_metrics: args
- .supporting_metrics
- .into_iter()
- .map(metric_value_from_wire)
- .collect::<Result<Vec<_>, _>>()
- .map_err(store_fault("tools/call:experiment.close"))?,
- note: FrontierNote {
- summary: NonEmptyText::new(args.note.summary)
- .map_err(store_fault("tools/call:experiment.close"))?,
- next_hypotheses: crate::to_text_vec(args.note.next_hypotheses)
- .map_err(store_fault("tools/call:experiment.close"))?,
- },
- verdict: parse_verdict_name(&args.verdict)
- .map_err(store_fault("tools/call:experiment.close"))?,
- analysis: args
- .analysis
- .map(experiment_analysis_from_wire)
- .transpose()
- .map_err(store_fault("tools/call:experiment.close"))?,
- decision_title: NonEmptyText::new(args.decision_title)
- .map_err(store_fault("tools/call:experiment.close"))?,
- decision_rationale: NonEmptyText::new(args.decision_rationale)
- .map_err(store_fault("tools/call:experiment.close"))?,
- })
- .map_err(store_fault("tools/call:experiment.close"))?;
- tool_success(
- experiment_close_output(&self.store, &receipt)?,
- presentation,
+ "run.dimension.define" => {
+ let args = deserialize::<DimensionDefineArgs>(arguments)?;
+ tool_output(
+ &lift!(
+ self.store.define_run_dimension(DefineRunDimensionRequest {
+ key: NonEmptyText::new(args.key).map_err(store_fault(&operation))?,
+ value_type: args.value_type,
+ description: args
+ .description
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault(&operation))?,
+ })
+ ),
FaultStage::Worker,
- "tools/call:experiment.close",
- )
+ &operation,
+ )?
}
- other => Err(FaultRecord::new(
- FaultKind::InvalidInput,
+ "run.dimension.list" => tool_output(
+ &lift!(self.store.list_run_dimensions()),
FaultStage::Worker,
- format!("tools/call:{other}"),
- format!("unknown tool `{other}`"),
- )),
- }
+ &operation,
+ )?,
+ other => {
+ return Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ &operation,
+ format!("unknown worker tool `{other}`"),
+ ));
+ }
+ };
+ tool_success(output, presentation, FaultStage::Worker, &operation)
}
- fn read_resource(&mut self, uri: &str) -> Result<Value, FaultRecord> {
- match uri {
- "fidget-spinner://project/config" => Ok(json!({
- "contents": [{
- "uri": uri,
- "mimeType": "application/json",
- "text": crate::to_pretty_json(self.store.config())
- .map_err(store_fault("resources/read:fidget-spinner://project/config"))?,
- }]
- })),
- "fidget-spinner://project/schema" => Ok(json!({
- "contents": [{
- "uri": uri,
- "mimeType": "application/json",
- "text": crate::to_pretty_json(self.store.schema())
- .map_err(store_fault("resources/read:fidget-spinner://project/schema"))?,
- }]
- })),
- _ => Err(FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Worker,
- format!("resources/read:{uri}"),
- format!("unknown resource `{uri}`"),
- )),
- }
+ fn read_resource(uri: &str) -> Result<Value, FaultRecord> {
+ Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ format!("resources/read:{uri}"),
+ format!("unknown worker resource `{uri}`"),
+ ))
}
fn maybe_inject_transient(operation: &str) -> Result<(), FaultRecord> {
@@ -877,6 +531,227 @@ impl WorkerService {
}
}
+#[derive(Debug, Deserialize)]
+struct TagAddArgs {
+ name: String,
+ description: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierCreateArgs {
+ label: String,
+ objective: String,
+ slug: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierSelectorArgs {
+ frontier: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierBriefUpdateArgs {
+ frontier: String,
+ expected_revision: Option<u64>,
+ situation: Option<NullableStringArg>,
+ roadmap: Option<Vec<FrontierRoadmapItemWire>>,
+ unknowns: Option<Vec<String>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierRoadmapItemWire {
+ rank: u32,
+ hypothesis: String,
+ summary: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisRecordArgs {
+ frontier: String,
+ title: String,
+ summary: String,
+ body: String,
+ slug: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisListArgs {
+ frontier: Option<String>,
+ tags: Option<Vec<String>>,
+ include_archived: Option<bool>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisSelectorArgs {
+ hypothesis: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct HypothesisUpdateArgs {
+ hypothesis: String,
+ expected_revision: Option<u64>,
+ title: Option<String>,
+ summary: Option<String>,
+ body: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+ archived: Option<bool>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentOpenArgs {
+ hypothesis: String,
+ title: String,
+ summary: Option<String>,
+ slug: Option<String>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentListArgs {
+ frontier: Option<String>,
+ hypothesis: Option<String>,
+ tags: Option<Vec<String>>,
+ include_archived: Option<bool>,
+ status: Option<ExperimentStatus>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentSelectorArgs {
+ experiment: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentUpdateArgs {
+ experiment: String,
+ expected_revision: Option<u64>,
+ title: Option<String>,
+ summary: Option<NullableStringArg>,
+ tags: Option<Vec<String>>,
+ parents: Option<Vec<VertexSelector>>,
+ archived: Option<bool>,
+ outcome: Option<ExperimentOutcomeWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentCloseArgs {
+ experiment: String,
+ expected_revision: Option<u64>,
+ backend: ExecutionBackend,
+ command: CommandRecipe,
+ dimensions: Option<Map<String, Value>>,
+ primary_metric: MetricValueWire,
+ supporting_metrics: Option<Vec<MetricValueWire>>,
+ verdict: FrontierVerdict,
+ rationale: String,
+ analysis: Option<ExperimentAnalysisWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentOutcomeWire {
+ backend: ExecutionBackend,
+ command: CommandRecipe,
+ dimensions: Option<Map<String, Value>>,
+ primary_metric: MetricValueWire,
+ supporting_metrics: Option<Vec<MetricValueWire>>,
+ verdict: FrontierVerdict,
+ rationale: String,
+ analysis: Option<ExperimentAnalysisWire>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentAnalysisWire {
+ summary: String,
+ body: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricValueWire {
+ key: String,
+ value: f64,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactRecordArgs {
+ kind: ArtifactKind,
+ label: String,
+ summary: Option<String>,
+ locator: String,
+ media_type: Option<String>,
+ slug: Option<String>,
+ attachments: Option<Vec<AttachmentSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactListArgs {
+ frontier: Option<String>,
+ kind: Option<ArtifactKind>,
+ attached_to: Option<AttachmentSelector>,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactSelectorArgs {
+ artifact: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct ArtifactUpdateArgs {
+ artifact: String,
+ expected_revision: Option<u64>,
+ kind: Option<ArtifactKind>,
+ label: Option<String>,
+ summary: Option<NullableStringArg>,
+ locator: Option<String>,
+ media_type: Option<NullableStringArg>,
+ attachments: Option<Vec<AttachmentSelector>>,
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(untagged)]
+enum NullableStringArg {
+ Set(String),
+ Clear(()),
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricDefineArgs {
+ key: String,
+ unit: MetricUnit,
+ objective: OptimizationObjective,
+ visibility: Option<MetricVisibility>,
+ description: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricKeysArgs {
+ frontier: Option<String>,
+ scope: Option<MetricScope>,
+}
+
+#[derive(Debug, Deserialize)]
+struct MetricBestArgs {
+ frontier: Option<String>,
+ hypothesis: Option<String>,
+ key: String,
+ dimensions: Option<Map<String, Value>>,
+ include_rejected: Option<bool>,
+ limit: Option<u32>,
+ order: Option<MetricRankOrder>,
+}
+
+#[derive(Debug, Deserialize)]
+struct DimensionDefineArgs {
+ key: String,
+ value_type: FieldValueType,
+ description: Option<String>,
+}
+
fn deserialize<T: for<'de> Deserialize<'de>>(value: Value) -> Result<T, FaultRecord> {
serde_json::from_value(value).map_err(|error| {
FaultRecord::new(
@@ -888,256 +763,302 @@ fn deserialize<T: for<'de> Deserialize<'de>>(value: Value) -> Result<T, FaultRec
})
}
-fn project_status_output(full: &Value, schema: &ProjectSchema) -> ToolOutput {
- let concise = json!({
- "display_name": full["display_name"],
- "project_root": full["project_root"],
- "state_root": full["state_root"],
- "schema": schema_label(schema),
- "git_repo_detected": full["git_repo_detected"],
- });
- let git = if full["git_repo_detected"].as_bool().unwrap_or(false) {
- "detected"
- } else {
- "not detected"
- };
- ToolOutput::from_values(
- concise,
- full.clone(),
- [
- format!("project {}", value_summary(&full["display_name"])),
- format!("root: {}", value_summary(&full["project_root"])),
- format!("state: {}", value_summary(&full["state_root"])),
- format!("schema: {}", schema_label(schema)),
- format!("git: {git}"),
- ]
- .join("\n"),
- None,
- )
+fn store_fault<E>(operation: &str) -> impl FnOnce(E) -> FaultRecord + '_
+where
+ E: Into<StoreError>,
+{
+ move |error| {
+ let error: StoreError = error.into();
+ let kind = match error {
+ StoreError::MissingProjectStore(_)
+ | StoreError::AmbiguousProjectStoreDiscovery { .. }
+ | StoreError::UnknownTag(_)
+ | StoreError::UnknownMetricDefinition(_)
+ | StoreError::UnknownRunDimension(_)
+ | StoreError::UnknownFrontierSelector(_)
+ | StoreError::UnknownHypothesisSelector(_)
+ | StoreError::UnknownExperimentSelector(_)
+ | StoreError::UnknownArtifactSelector(_)
+ | StoreError::RevisionMismatch { .. }
+ | StoreError::HypothesisBodyMustBeSingleParagraph
+ | StoreError::ExperimentHypothesisRequired
+ | StoreError::ExperimentAlreadyClosed(_)
+ | StoreError::ExperimentStillOpen(_)
+ | StoreError::CrossFrontierInfluence
+ | StoreError::SelfEdge
+ | StoreError::UnknownRoadmapHypothesis(_)
+ | StoreError::ManualExperimentRequiresCommand
+ | StoreError::MetricOrderRequired { .. }
+ | StoreError::UnknownDimensionFilter(_)
+ | StoreError::DuplicateTag(_)
+ | StoreError::DuplicateMetricDefinition(_)
+ | StoreError::DuplicateRunDimension(_)
+ | StoreError::InvalidInput(_) => FaultKind::InvalidInput,
+ StoreError::IncompatibleStoreFormatVersion { .. } => FaultKind::Unavailable,
+ StoreError::Io(_)
+ | StoreError::Sql(_)
+ | StoreError::Json(_)
+ | StoreError::TimeParse(_)
+ | StoreError::TimeFormat(_)
+ | StoreError::Core(_)
+ | StoreError::Uuid(_) => FaultKind::Internal,
+ };
+ FaultRecord::new(kind, FaultStage::Store, operation, error.to_string())
+ }
}
-fn project_schema_output(schema: &ProjectSchema) -> Result<ToolOutput, FaultRecord> {
- let field_previews = schema
- .fields
- .iter()
- .take(8)
- .map(project_schema_field_value)
- .collect::<Vec<_>>();
- let concise = json!({
- "namespace": schema.namespace,
- "version": schema.version,
- "field_count": schema.fields.len(),
- "fields": field_previews,
- "truncated": schema.fields.len() > 8,
- });
- let mut lines = vec![
- format!("schema {}", schema_label(schema)),
- format!("{} field(s)", schema.fields.len()),
- ];
- for field in schema.fields.iter().take(8) {
- lines.push(format!(
- "{} [{}] {} {}",
- field.name,
- if field.node_classes.is_empty() {
- "any".to_owned()
- } else {
- field
- .node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(",")
- },
- field.presence.as_str(),
- field.role.as_str(),
- ));
- }
- if schema.fields.len() > 8 {
- lines.push(format!("... +{} more field(s)", schema.fields.len() - 8));
+fn with_fault<T, E>(result: Result<T, E>, operation: &str) -> Result<T, FaultRecord>
+where
+ E: Into<StoreError>,
+{
+ result.map_err(store_fault(operation))
+}
+
+fn tags_to_set(tags: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
+ tags.into_iter()
+ .map(TagName::new)
+ .collect::<Result<BTreeSet<_>, _>>()
+ .map_err(StoreError::from)
+}
+
+fn metric_value_from_wire(
+ wire: MetricValueWire,
+ operation: &str,
+) -> Result<fidget_spinner_core::MetricValue, FaultRecord> {
+ Ok(fidget_spinner_core::MetricValue {
+ key: NonEmptyText::new(wire.key).map_err(store_fault(operation))?,
+ value: wire.value,
+ })
+}
+
+fn experiment_analysis_from_wire(
+ wire: ExperimentAnalysisWire,
+ operation: &str,
+) -> Result<ExperimentAnalysis, FaultRecord> {
+ Ok(ExperimentAnalysis {
+ summary: NonEmptyText::new(wire.summary).map_err(store_fault(operation))?,
+ body: NonEmptyText::new(wire.body).map_err(store_fault(operation))?,
+ })
+}
+
+fn experiment_outcome_patch_from_wire(
+ wire: ExperimentOutcomeWire,
+ operation: &str,
+) -> Result<ExperimentOutcomePatch, FaultRecord> {
+ Ok(ExperimentOutcomePatch {
+ backend: wire.backend,
+ command: wire.command,
+ dimensions: dimension_map_from_wire(wire.dimensions)?,
+ primary_metric: metric_value_from_wire(wire.primary_metric, operation)?,
+ supporting_metrics: wire
+ .supporting_metrics
+ .unwrap_or_default()
+ .into_iter()
+ .map(|metric| metric_value_from_wire(metric, operation))
+ .collect::<Result<Vec<_>, _>>()?,
+ verdict: wire.verdict,
+ rationale: NonEmptyText::new(wire.rationale).map_err(store_fault(operation))?,
+ analysis: wire
+ .analysis
+ .map(|analysis| experiment_analysis_from_wire(analysis, operation))
+ .transpose()?,
+ })
+}
+
+fn nullable_text_patch_from_wire(
+ patch: Option<NullableStringArg>,
+ operation: &str,
+) -> Result<Option<TextPatch<NonEmptyText>>, FaultRecord> {
+ match patch {
+ None => Ok(None),
+ Some(NullableStringArg::Clear(())) => Ok(Some(TextPatch::Clear)),
+ Some(NullableStringArg::Set(value)) => Ok(Some(TextPatch::Set(
+ NonEmptyText::new(value).map_err(store_fault(operation))?,
+ ))),
}
- detailed_tool_output(
- &concise,
- schema,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:project.schema",
- )
}
-fn schema_field_upsert_output(
- schema: &ProjectSchema,
- field: &ProjectFieldSpec,
-) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "schema": schema.schema_ref(),
- "field": project_schema_field_value(field),
- });
- detailed_tool_output(
- &concise,
- &concise,
- format!(
- "upserted schema field {}\nschema: {}\nclasses: {}\npresence: {}\nseverity: {}\nrole: {}\ninference: {}{}",
- field.name,
- schema_label(schema),
- render_schema_node_classes(&field.node_classes),
- field.presence.as_str(),
- field.severity.as_str(),
- field.role.as_str(),
- field.inference_policy.as_str(),
- field
- .value_type
- .map(|value_type| format!("\nvalue_type: {}", value_type.as_str()))
- .unwrap_or_default(),
- ),
- None,
- FaultStage::Worker,
- "tools/call:schema.field.upsert",
- )
+fn dimension_map_from_wire(
+ dimensions: Option<Map<String, Value>>,
+) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, FaultRecord> {
+ dimensions
+ .unwrap_or_default()
+ .into_iter()
+ .map(|(key, value)| {
+ Ok((
+ NonEmptyText::new(key).map_err(store_fault("dimension-map"))?,
+ json_value_to_dimension(value)?,
+ ))
+ })
+ .collect()
+}
+
+fn json_value_to_dimension(value: Value) -> Result<RunDimensionValue, FaultRecord> {
+ match value {
+ Value::String(raw) => {
+ if time::OffsetDateTime::parse(&raw, &time::format_description::well_known::Rfc3339)
+ .is_ok()
+ {
+ NonEmptyText::new(raw)
+ .map(RunDimensionValue::Timestamp)
+ .map_err(store_fault("dimension-map"))
+ } else {
+ NonEmptyText::new(raw)
+ .map(RunDimensionValue::String)
+ .map_err(store_fault("dimension-map"))
+ }
+ }
+ Value::Number(number) => number
+ .as_f64()
+ .map(RunDimensionValue::Numeric)
+ .ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "dimension-map",
+ "numeric dimension values must fit into f64",
+ )
+ }),
+ Value::Bool(value) => Ok(RunDimensionValue::Boolean(value)),
+ _ => Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "dimension-map",
+ "dimension values must be string, number, boolean, or RFC3339 timestamp",
+ )),
+ }
}
-fn schema_field_remove_output(
- schema: &ProjectSchema,
- removed_count: u64,
+fn project_status_output(
+ status: &ProjectStatus,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
let concise = json!({
- "schema": schema.schema_ref(),
- "removed_count": removed_count,
+ "display_name": status.display_name,
+ "project_root": status.project_root,
+ "frontier_count": status.frontier_count,
+ "hypothesis_count": status.hypothesis_count,
+ "experiment_count": status.experiment_count,
+ "open_experiment_count": status.open_experiment_count,
+ "artifact_count": status.artifact_count,
});
detailed_tool_output(
&concise,
- &concise,
- format!(
- "removed {} schema field definition(s)\nschema: {}",
- removed_count,
- schema_label(schema),
- ),
+ status,
+ [
+ format!("project {}", status.display_name),
+ format!("root: {}", status.project_root),
+ format!("frontiers: {}", status.frontier_count),
+ format!("hypotheses: {}", status.hypothesis_count),
+ format!(
+ "experiments: {} (open {})",
+ status.experiment_count, status.open_experiment_count
+ ),
+ format!("artifacts: {}", status.artifact_count),
+ ]
+ .join("\n"),
None,
FaultStage::Worker,
- "tools/call:schema.field.remove",
+ operation,
)
}
-fn tag_add_output(tag: &TagRecord) -> Result<ToolOutput, FaultRecord> {
+fn tag_list_output(
+ tags: &[fidget_spinner_core::TagRecord],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
let concise = json!({
- "name": tag.name,
- "description": tag.description,
+ "count": tags.len(),
+ "tags": tags,
});
detailed_tool_output(
&concise,
- tag,
- format!("registered tag {}\n{}", tag.name, tag.description),
- None,
- FaultStage::Worker,
- "tools/call:tag.add",
- )
-}
-
-fn tag_list_output(tags: &[TagRecord]) -> Result<ToolOutput, FaultRecord> {
- let concise = tags
- .iter()
- .map(|tag| {
- json!({
- "name": tag.name,
- "description": tag.description,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} tag(s)", tags.len())];
- lines.extend(
- tags.iter()
- .map(|tag| format!("{}: {}", tag.name, tag.description)),
- );
- detailed_tool_output(
- &concise,
- &tags,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:tag.list",
- )
-}
-
-fn frontier_list_output(frontiers: &[FrontierRecord]) -> Result<ToolOutput, FaultRecord> {
- let concise = frontiers
- .iter()
- .map(|frontier| {
- json!({
- "frontier_id": frontier.id,
- "label": frontier.label,
- "status": format!("{:?}", frontier.status).to_ascii_lowercase(),
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} frontier(s)", frontiers.len())];
- lines.extend(frontiers.iter().map(|frontier| {
- format!(
- "{} {} {}",
- frontier.id,
- format!("{:?}", frontier.status).to_ascii_lowercase(),
- frontier.label,
- )
- }));
- detailed_tool_output(
&concise,
- &frontiers,
- lines.join("\n"),
+ if tags.is_empty() {
+ "no tags".to_owned()
+ } else {
+ tags.iter()
+ .map(|tag| format!("{} — {}", tag.name, tag.description))
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:frontier.list",
+ operation,
)
}
-fn frontier_status_output(projection: &FrontierProjection) -> Result<ToolOutput, FaultRecord> {
- let concise = frontier_projection_summary_value(projection);
+fn frontier_list_output(
+ frontiers: &[FrontierSummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": frontiers.len(), "frontiers": frontiers });
detailed_tool_output(
&concise,
- projection,
- frontier_projection_text("frontier", projection),
- None,
- FaultStage::Worker,
- "tools/call:frontier.status",
- )
-}
-
-fn frontier_created_output(projection: &FrontierProjection) -> Result<ToolOutput, FaultRecord> {
- let concise = frontier_projection_summary_value(projection);
- detailed_tool_output(
&concise,
- projection,
- frontier_projection_text("created frontier", projection),
+ if frontiers.is_empty() {
+ "no frontiers".to_owned()
+ } else {
+ frontiers
+ .iter()
+ .map(|frontier| {
+ format!(
+ "{} — {} | active hypotheses {} | open experiments {}",
+ frontier.slug,
+ frontier.objective,
+ frontier.active_hypothesis_count,
+ frontier.open_experiment_count
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:frontier.init",
+ operation,
)
}
-fn created_node_output(
- action: &str,
- node: &fidget_spinner_core::DagNode,
- operation: &'static str,
+fn frontier_record_output(
+ frontier: &fidget_spinner_core::FrontierRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = node_brief_value(node);
- let mut lines = vec![format!("{action}: {} {}", node.class, node.id)];
- lines.push(format!("title: {}", node.title));
- if let Some(summary) = node.summary.as_ref() {
- lines.push(format!("summary: {summary}"));
- }
- if !node.tags.is_empty() {
- lines.push(format!("tags: {}", format_tags(&node.tags)));
- }
- if let Some(frontier_id) = node.frontier_id {
- lines.push(format!("frontier: {frontier_id}"));
+ let mut lines = vec![format!(
+ "frontier {} — {}",
+ frontier.slug, frontier.objective
+ )];
+ lines.push(format!("status: {}", frontier.status.as_str()));
+ if let Some(situation) = frontier.brief.situation.as_ref() {
+ lines.push(format!("situation: {}", situation));
+ }
+ if !frontier.brief.roadmap.is_empty() {
+ lines.push("roadmap:".to_owned());
+ for item in &frontier.brief.roadmap {
+ lines.push(format!(
+ " {}. {}{}",
+ item.rank,
+ item.hypothesis_id,
+ item.summary
+ .as_ref()
+ .map_or_else(String::new, |summary| format!(" — {summary}"))
+ ));
+ }
}
- if !node.diagnostics.items.is_empty() {
+ if !frontier.brief.unknowns.is_empty() {
lines.push(format!(
- "diagnostics: {}",
- diagnostic_summary_text(&node.diagnostics)
+ "unknowns: {}",
+ frontier
+ .brief
+ .unknowns
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join("; ")
));
}
detailed_tool_output(
- &concise,
- node,
+ &frontier,
+ frontier,
lines.join("\n"),
None,
FaultStage::Worker,
@@ -1145,434 +1066,285 @@ fn created_node_output(
)
}
-fn node_list_output(nodes: &[NodeSummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = nodes.iter().map(node_summary_value).collect::<Vec<_>>();
- let mut lines = vec![format!("{} node(s)", nodes.len())];
- lines.extend(nodes.iter().map(render_node_summary_line));
- detailed_tool_output(
- &concise,
- &nodes,
- lines.join("\n"),
- None,
- FaultStage::Worker,
- "tools/call:node.list",
- )
-}
-
-fn node_read_output(node: &fidget_spinner_core::DagNode) -> Result<ToolOutput, FaultRecord> {
- let visible_annotations = node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::Visible)
- .map(|annotation| {
- let mut value = Map::new();
- if let Some(label) = annotation.label.as_ref() {
- let _ = value.insert("label".to_owned(), json!(label));
- }
- let _ = value.insert("body".to_owned(), json!(annotation.body));
- Value::Object(value)
- })
- .collect::<Vec<_>>();
- let visible_annotation_count = visible_annotations.len();
- let hidden_annotation_count = node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::HiddenByDefault)
- .count();
- let mut concise = Map::new();
- let _ = concise.insert("id".to_owned(), json!(node.id));
- let _ = concise.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = concise.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = concise.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = concise.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = concise.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if !node.payload.fields.is_empty() {
- let filtered_fields =
- filtered_payload_fields(node.class, &node.payload.fields).collect::<Vec<_>>();
- if !filtered_fields.is_empty() {
- let _ = concise.insert(
- "payload_field_count".to_owned(),
- json!(filtered_fields.len()),
- );
- if is_prose_node(node.class) {
- let _ = concise.insert(
- "payload_fields".to_owned(),
- json!(
- filtered_fields
- .iter()
- .take(6)
- .map(|(name, _)| (*name).clone())
- .collect::<Vec<_>>()
- ),
- );
- } else {
- let payload_preview = payload_preview_value(node.class, &node.payload.fields);
- if let Value::Object(object) = &payload_preview
- && !object.is_empty()
- {
- let _ = concise.insert("payload_preview".to_owned(), payload_preview);
- }
- }
- }
- }
- if !node.diagnostics.items.is_empty() {
- let _ = concise.insert(
- "diagnostics".to_owned(),
- diagnostic_summary_value(&node.diagnostics),
- );
- }
- if visible_annotation_count > 0 {
- let _ = concise.insert(
- "visible_annotations".to_owned(),
- Value::Array(visible_annotations),
- );
- }
- if hidden_annotation_count > 0 {
- let _ = concise.insert(
- "hidden_annotation_count".to_owned(),
- json!(hidden_annotation_count),
- );
- }
-
- let mut lines = vec![format!("{} {} {}", node.class, node.id, node.title)];
- if let Some(summary) = node.summary.as_ref() {
- lines.push(format!("summary: {summary}"));
- }
- if let Some(frontier_id) = node.frontier_id {
- lines.push(format!("frontier: {frontier_id}"));
- }
- if !node.tags.is_empty() {
- lines.push(format!("tags: {}", format_tags(&node.tags)));
+fn frontier_open_output(
+ projection: &FrontierOpenProjection,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![format!(
+ "frontier {} — {}",
+ projection.frontier.slug, projection.frontier.objective
+ )];
+ if let Some(situation) = projection.frontier.brief.situation.as_ref() {
+ lines.push(format!("situation: {}", situation));
+ }
+ if !projection.active_tags.is_empty() {
+ lines.push(format!(
+ "active tags: {}",
+ projection
+ .active_tags
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(", ")
+ ));
}
- lines.extend(payload_preview_lines(node.class, &node.payload.fields));
- if !node.diagnostics.items.is_empty() {
+ if !projection.active_metric_keys.is_empty() {
lines.push(format!(
- "diagnostics: {}",
- diagnostic_summary_text(&node.diagnostics)
+ "live metrics: {}",
+ projection
+ .active_metric_keys
+ .iter()
+ .map(|metric| metric.key.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
));
}
- if visible_annotation_count > 0 {
- lines.push(format!("visible annotations: {}", visible_annotation_count));
- for annotation in node
- .annotations
- .iter()
- .filter(|annotation| annotation.visibility == AnnotationVisibility::Visible)
- .take(4)
- {
- let label = annotation
- .label
+ if !projection.active_hypotheses.is_empty() {
+ lines.push("active hypotheses:".to_owned());
+ for state in &projection.active_hypotheses {
+ let status = state
+ .latest_closed_experiment
.as_ref()
- .map(|label| format!("{label}: "))
- .unwrap_or_default();
- lines.push(format!("annotation: {label}{}", annotation.body));
- }
- if visible_annotation_count > 4 {
+ .and_then(|experiment| experiment.verdict)
+ .map_or_else(
+ || "unjudged".to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
lines.push(format!(
- "... +{} more visible annotation(s)",
- visible_annotation_count - 4
+ " {} — {} | open {} | latest {}",
+ state.hypothesis.slug,
+ state.hypothesis.summary,
+ state.open_experiments.len(),
+ status
));
}
}
- if hidden_annotation_count > 0 {
- lines.push(format!("hidden annotations: {hidden_annotation_count}"));
+ if !projection.open_experiments.is_empty() {
+ lines.push("open experiments:".to_owned());
+ for experiment in &projection.open_experiments {
+ lines.push(format!(
+ " {} — {}",
+ experiment.slug,
+ experiment
+ .summary
+ .as_ref()
+ .map_or_else(|| experiment.title.to_string(), ToString::to_string)
+ ));
+ }
}
detailed_tool_output(
- &Value::Object(concise),
- node,
+ projection,
+ projection,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:node.read",
+ operation,
)
}
-fn experiment_close_output(
- store: &ProjectStore,
- receipt: &ExperimentReceipt,
+fn hypothesis_record_output(
+ hypothesis: &fidget_spinner_core::HypothesisRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "experiment_id": receipt.experiment.id,
- "frontier_id": receipt.experiment.frontier_id,
- "experiment_title": receipt.experiment.title,
- "verdict": metric_verdict_name(receipt.experiment.verdict),
- "run_id": receipt.run.run_id,
- "hypothesis_node_id": receipt.experiment.hypothesis_node_id,
- "decision_node_id": receipt.decision_node.id,
- "dimensions": run_dimensions_value(&receipt.experiment.result.dimensions),
- "primary_metric": metric_value(store, &receipt.experiment.result.primary_metric)?,
- });
detailed_tool_output(
- &concise,
- receipt,
- [
- format!(
- "closed experiment {} on frontier {}",
- receipt.experiment.id, receipt.experiment.frontier_id
- ),
- format!("title: {}", receipt.experiment.title),
- format!("hypothesis: {}", receipt.experiment.hypothesis_node_id),
- format!(
- "verdict: {}",
- metric_verdict_name(receipt.experiment.verdict)
- ),
- format!(
- "primary metric: {}",
- metric_text(store, &receipt.experiment.result.primary_metric)?
- ),
- format!(
- "dimensions: {}",
- render_dimension_kv(&receipt.experiment.result.dimensions)
- ),
- format!("run: {}", receipt.run.run_id),
- ]
- .join("\n"),
+ hypothesis,
+ hypothesis,
+ format!("hypothesis {} — {}", hypothesis.slug, hypothesis.summary),
None,
FaultStage::Worker,
- "tools/call:experiment.close",
+ operation,
)
}
-fn experiment_open_output(
- item: &OpenExperimentSummary,
- operation: &'static str,
- action: &'static str,
+fn hypothesis_list_output(
+ hypotheses: &[fidget_spinner_store_sqlite::HypothesisSummary],
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = json!({
- "experiment_id": item.id,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "title": item.title,
- "summary": item.summary,
- });
+ let concise = json!({ "count": hypotheses.len(), "hypotheses": hypotheses });
detailed_tool_output(
&concise,
- item,
- [
- format!("{action} {}", item.id),
- format!("frontier: {}", item.frontier_id),
- format!("hypothesis: {}", item.hypothesis_node_id),
- format!("title: {}", item.title),
- item.summary
- .as_ref()
- .map(|summary| format!("summary: {summary}"))
- .unwrap_or_else(|| "summary: <none>".to_owned()),
- ]
- .join("\n"),
+ &concise,
+ if hypotheses.is_empty() {
+ "no hypotheses".to_owned()
+ } else {
+ hypotheses
+ .iter()
+ .map(|hypothesis| {
+ let verdict = hypothesis.latest_verdict.map_or_else(
+ || "unjudged".to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
+ format!(
+ "{} — {} | open {} | latest {}",
+ hypothesis.slug,
+ hypothesis.summary,
+ hypothesis.open_experiment_count,
+ verdict
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
operation,
)
}
-fn experiment_list_output(items: &[OpenExperimentSummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .map(|item| {
- json!({
- "experiment_id": item.id,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "title": item.title,
- "summary": item.summary,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} open experiment(s)", items.len())];
- lines.extend(items.iter().map(|item| {
+fn hypothesis_detail_output(
+ detail: &fidget_spinner_store_sqlite::HypothesisDetail,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![
format!(
- "{} {} | hypothesis={}",
- item.id, item.title, item.hypothesis_node_id,
- )
- }));
+ "hypothesis {} — {}",
+ detail.record.slug, detail.record.summary
+ ),
+ detail.record.body.to_string(),
+ ];
+ if !detail.record.tags.is_empty() {
+ lines.push(format!(
+ "tags: {}",
+ detail
+ .record
+ .tags
+ .iter()
+ .map(ToString::to_string)
+ .collect::<Vec<_>>()
+ .join(", ")
+ ));
+ }
+ lines.push(format!(
+ "parents: {} | children: {} | open experiments: {} | closed experiments: {} | artifacts: {}",
+ detail.parents.len(),
+ detail.children.len(),
+ detail.open_experiments.len(),
+ detail.closed_experiments.len(),
+ detail.artifacts.len()
+ ));
detailed_tool_output(
- &concise,
- &items,
+ detail,
+ detail,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:experiment.list",
+ operation,
)
}
-fn metric_keys_output(keys: &[MetricKeySummary]) -> Result<ToolOutput, FaultRecord> {
- let concise = keys
- .iter()
- .map(|key| {
- json!({
- "key": key.key,
- "source": key.source.as_str(),
- "experiment_count": key.experiment_count,
- "unit": key.unit.map(metric_unit_name),
- "objective": key.objective.map(metric_objective_name),
- "description": key.description,
- "requires_order": key.requires_order,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} metric key(s)", keys.len())];
- lines.extend(keys.iter().map(|key| {
- let mut line = format!(
- "{} [{}] experiments={}",
- key.key,
- key.source.as_str(),
- key.experiment_count
+fn experiment_record_output(
+ experiment: &fidget_spinner_core::ExperimentRecord,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut line = format!("experiment {} — {}", experiment.slug, experiment.title);
+ if let Some(outcome) = experiment.outcome.as_ref() {
+ let _ = write!(
+ line,
+ " | {} {}={}",
+ outcome.verdict.as_str(),
+ outcome.primary_metric.key,
+ outcome.primary_metric.value
);
- if let Some(unit) = key.unit {
- line.push_str(format!(" unit={}", metric_unit_name(unit)).as_str());
- }
- if let Some(objective) = key.objective {
- line.push_str(format!(" objective={}", metric_objective_name(objective)).as_str());
- }
- if let Some(description) = key.description.as_ref() {
- line.push_str(format!(" | {description}").as_str());
- }
- if key.requires_order {
- line.push_str(" order=required");
- }
- line
- }));
+ } else {
+ let _ = write!(line, " | open");
+ }
detailed_tool_output(
- &concise,
- &keys,
- lines.join("\n"),
+ experiment,
+ experiment,
+ line,
None,
FaultStage::Worker,
- "tools/call:metric.keys",
+ operation,
)
}
-fn metric_best_output(
- items: &[fidget_spinner_store_sqlite::MetricBestEntry],
+fn experiment_list_output(
+ experiments: &[fidget_spinner_store_sqlite::ExperimentSummary],
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .enumerate()
- .map(|(index, item)| {
- json!({
- "rank": index + 1,
- "key": item.key,
- "source": item.source.as_str(),
- "value": item.value,
- "order": item.order.as_str(),
- "experiment_id": item.experiment_id,
- "experiment_title": item.experiment_title,
- "frontier_id": item.frontier_id,
- "hypothesis_node_id": item.hypothesis_node_id,
- "hypothesis_title": item.hypothesis_title,
- "verdict": metric_verdict_name(item.verdict),
- "run_id": item.run_id,
- "unit": item.unit.map(metric_unit_name),
- "objective": item.objective.map(metric_objective_name),
- "dimensions": run_dimensions_value(&item.dimensions),
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} ranked experiment(s)", items.len())];
- lines.extend(items.iter().enumerate().map(|(index, item)| {
- format!(
- "{}. {}={} [{}] {} | verdict={} | hypothesis={}",
- index + 1,
- item.key,
- item.value,
- item.source.as_str(),
- item.experiment_title,
- metric_verdict_name(item.verdict),
- item.hypothesis_title,
- )
- }));
- lines.extend(
- items
- .iter()
- .map(|item| format!(" dims: {}", render_dimension_kv(&item.dimensions))),
- );
+ let concise = json!({ "count": experiments.len(), "experiments": experiments });
detailed_tool_output(
&concise,
- &items,
- lines.join("\n"),
+ &concise,
+ if experiments.is_empty() {
+ "no experiments".to_owned()
+ } else {
+ experiments
+ .iter()
+ .map(|experiment| {
+ let status = experiment.verdict.map_or_else(
+ || experiment.status.as_str().to_owned(),
+ |verdict| verdict.as_str().to_owned(),
+ );
+ let metric = experiment
+ .primary_metric
+ .as_ref()
+ .map_or_else(String::new, |metric| {
+ format!(" | {}={}", metric.key, metric.value)
+ });
+ format!(
+ "{} — {} | {}{}",
+ experiment.slug, experiment.title, status, metric
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
None,
FaultStage::Worker,
- "tools/call:metric.best",
+ operation,
)
}
-fn run_dimension_list_output(
- items: &[fidget_spinner_store_sqlite::RunDimensionSummary],
+fn experiment_detail_output(
+ detail: &fidget_spinner_store_sqlite::ExperimentDetail,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
- let concise = items
- .iter()
- .map(|item| {
- json!({
- "key": item.key,
- "value_type": item.value_type.as_str(),
- "description": item.description,
- "observed_run_count": item.observed_run_count,
- "distinct_value_count": item.distinct_value_count,
- "sample_values": item.sample_values,
- })
- })
- .collect::<Vec<_>>();
- let mut lines = vec![format!("{} run dimension(s)", items.len())];
- lines.extend(items.iter().map(|item| {
- let mut line = format!(
- "{} [{}] runs={} distinct={}",
- item.key,
- item.value_type.as_str(),
- item.observed_run_count,
- item.distinct_value_count
- );
- if let Some(description) = item.description.as_ref() {
- line.push_str(format!(" | {description}").as_str());
- }
- if !item.sample_values.is_empty() {
- line.push_str(
- format!(
- " | samples={}",
- item.sample_values
- .iter()
- .map(value_summary)
- .collect::<Vec<_>>()
- .join(", ")
- )
- .as_str(),
- );
- }
- line
- }));
+ let mut lines = vec![format!(
+ "experiment {} — {}",
+ detail.record.slug, detail.record.title
+ )];
+ lines.push(format!("hypothesis: {}", detail.owning_hypothesis.slug));
+ lines.push(format!(
+ "status: {}",
+ detail.record.outcome.as_ref().map_or_else(
+ || "open".to_owned(),
+ |outcome| outcome.verdict.as_str().to_owned()
+ )
+ ));
+ if let Some(outcome) = detail.record.outcome.as_ref() {
+ lines.push(format!(
+ "primary metric: {}={}",
+ outcome.primary_metric.key, outcome.primary_metric.value
+ ));
+ lines.push(format!("rationale: {}", outcome.rationale));
+ }
+ lines.push(format!(
+ "parents: {} | children: {} | artifacts: {}",
+ detail.parents.len(),
+ detail.children.len(),
+ detail.artifacts.len()
+ ));
detailed_tool_output(
- &concise,
- &items,
+ detail,
+ detail,
lines.join("\n"),
None,
FaultStage::Worker,
- "tools/call:run.dimension.list",
+ operation,
)
}
-fn json_created_output(
- headline: &str,
- structured: Value,
- operation: &'static str,
+fn artifact_record_output(
+ artifact: &fidget_spinner_core::ArtifactRecord,
+ operation: &str,
) -> Result<ToolOutput, FaultRecord> {
detailed_tool_output(
- &structured,
- &structured,
+ artifact,
+ artifact,
format!(
- "{headline}\n{}",
- crate::to_pretty_json(&structured).map_err(store_fault(operation))?
+ "artifact {} — {} -> {}",
+ artifact.slug, artifact.label, artifact.locator
),
None,
FaultStage::Worker,
@@ -1580,938 +1352,146 @@ fn json_created_output(
)
}
-fn project_schema_field_value(field: &ProjectFieldSpec) -> Value {
- let mut value = Map::new();
- let _ = value.insert("name".to_owned(), json!(field.name));
- if !field.node_classes.is_empty() {
- let _ = value.insert(
- "node_classes".to_owned(),
- json!(
- field
- .node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- let _ = value.insert("presence".to_owned(), json!(field.presence.as_str()));
- let _ = value.insert("severity".to_owned(), json!(field.severity.as_str()));
- let _ = value.insert("role".to_owned(), json!(field.role.as_str()));
- let _ = value.insert(
- "inference_policy".to_owned(),
- json!(field.inference_policy.as_str()),
- );
- if let Some(value_type) = field.value_type {
- let _ = value.insert("value_type".to_owned(), json!(value_type.as_str()));
- }
- Value::Object(value)
-}
-
-fn render_schema_node_classes(node_classes: &BTreeSet<NodeClass>) -> String {
- if node_classes.is_empty() {
- return "any".to_owned();
- }
- node_classes
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn frontier_projection_summary_value(projection: &FrontierProjection) -> Value {
- json!({
- "frontier_id": projection.frontier.id,
- "label": projection.frontier.label,
- "status": format!("{:?}", projection.frontier.status).to_ascii_lowercase(),
- "open_experiment_count": projection.open_experiment_count,
- "completed_experiment_count": projection.completed_experiment_count,
- "verdict_counts": projection.verdict_counts,
- })
-}
-
-fn frontier_projection_text(prefix: &str, projection: &FrontierProjection) -> String {
- [
- format!(
- "{prefix} {} {}",
- projection.frontier.id, projection.frontier.label
- ),
- format!(
- "status: {}",
- format!("{:?}", projection.frontier.status).to_ascii_lowercase()
- ),
- format!("open experiments: {}", projection.open_experiment_count),
- format!(
- "completed experiments: {}",
- projection.completed_experiment_count
- ),
- format!(
- "verdicts: accepted={} kept={} parked={} rejected={}",
- projection.verdict_counts.accepted,
- projection.verdict_counts.kept,
- projection.verdict_counts.parked,
- projection.verdict_counts.rejected,
- ),
- ]
- .join("\n")
-}
-
-fn node_summary_value(node: &NodeSummary) -> Value {
- let mut value = Map::new();
- let _ = value.insert("id".to_owned(), json!(node.id));
- let _ = value.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = value.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = value.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = value.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = value.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if node.archived {
- let _ = value.insert("archived".to_owned(), json!(true));
- }
- if node.diagnostic_count > 0 {
- let _ = value.insert("diagnostic_count".to_owned(), json!(node.diagnostic_count));
- }
- if node.hidden_annotation_count > 0 {
- let _ = value.insert(
- "hidden_annotation_count".to_owned(),
- json!(node.hidden_annotation_count),
- );
- }
- Value::Object(value)
-}
-
-fn node_brief_value(node: &fidget_spinner_core::DagNode) -> Value {
- let mut value = Map::new();
- let _ = value.insert("id".to_owned(), json!(node.id));
- let _ = value.insert("class".to_owned(), json!(node.class.as_str()));
- let _ = value.insert("title".to_owned(), json!(node.title));
- if let Some(summary) = node.summary.as_ref() {
- let _ = value.insert("summary".to_owned(), json!(summary));
- }
- if let Some(frontier_id) = node.frontier_id {
- let _ = value.insert("frontier_id".to_owned(), json!(frontier_id));
- }
- if !node.tags.is_empty() {
- let _ = value.insert(
- "tags".to_owned(),
- json!(
- node.tags
- .iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- ),
- );
- }
- if !node.diagnostics.items.is_empty() {
- let _ = value.insert(
- "diagnostics".to_owned(),
- diagnostic_summary_value(&node.diagnostics),
- );
- }
- Value::Object(value)
-}
-
-fn render_node_summary_line(node: &NodeSummary) -> String {
- let mut line = format!("{} {} {}", node.class, node.id, node.title);
- if let Some(summary) = node.summary.as_ref() {
- line.push_str(format!(" | {summary}").as_str());
- }
- if let Some(frontier_id) = node.frontier_id {
- line.push_str(format!(" | frontier={frontier_id}").as_str());
- }
- if !node.tags.is_empty() {
- line.push_str(format!(" | tags={}", format_tags(&node.tags)).as_str());
- }
- if node.diagnostic_count > 0 {
- line.push_str(format!(" | diag={}", node.diagnostic_count).as_str());
- }
- if node.hidden_annotation_count > 0 {
- line.push_str(format!(" | hidden-ann={}", node.hidden_annotation_count).as_str());
- }
- if node.archived {
- line.push_str(" | archived");
- }
- line
-}
-
-fn diagnostic_summary_value(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> Value {
- let tally = diagnostic_tally(diagnostics);
- json!({
- "admission": match diagnostics.admission {
- AdmissionState::Admitted => "admitted",
- AdmissionState::Rejected => "rejected",
- },
- "count": tally.total,
- "error_count": tally.errors,
- "warning_count": tally.warnings,
- "info_count": tally.infos,
- })
-}
-
-fn diagnostic_summary_text(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> String {
- let tally = diagnostic_tally(diagnostics);
- let mut parts = vec![format!("{}", tally.total)];
- if tally.errors > 0 {
- parts.push(format!("{} error", tally.errors));
- }
- if tally.warnings > 0 {
- parts.push(format!("{} warning", tally.warnings));
- }
- if tally.infos > 0 {
- parts.push(format!("{} info", tally.infos));
- }
- format!(
- "{} ({})",
- match diagnostics.admission {
- AdmissionState::Admitted => "admitted",
- AdmissionState::Rejected => "rejected",
+fn artifact_list_output(
+ artifacts: &[fidget_spinner_store_sqlite::ArtifactSummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": artifacts.len(), "artifacts": artifacts });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if artifacts.is_empty() {
+ "no artifacts".to_owned()
+ } else {
+ artifacts
+ .iter()
+ .map(|artifact| {
+ format!(
+ "{} — {} -> {}",
+ artifact.slug, artifact.label, artifact.locator
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
},
- parts.join(", ")
+ None,
+ FaultStage::Worker,
+ operation,
)
}
-fn diagnostic_tally(diagnostics: &fidget_spinner_core::NodeDiagnostics) -> DiagnosticTally {
- diagnostics
- .items
- .iter()
- .fold(DiagnosticTally::default(), |mut tally, item| {
- tally.total += 1;
- match item.severity {
- DiagnosticSeverity::Error => tally.errors += 1,
- DiagnosticSeverity::Warning => tally.warnings += 1,
- DiagnosticSeverity::Info => tally.infos += 1,
- }
- tally
- })
-}
-
-fn payload_preview_value(class: NodeClass, fields: &Map<String, Value>) -> Value {
- let mut preview = Map::new();
- for (index, (name, value)) in filtered_payload_fields(class, fields).enumerate() {
- if index == 6 {
- let _ = preview.insert(
- "...".to_owned(),
- json!(format!("+{} more field(s)", fields.len() - index)),
- );
- break;
- }
- let _ = preview.insert(name.clone(), payload_value_preview(value));
- }
- Value::Object(preview)
-}
-
-fn payload_preview_lines(class: NodeClass, fields: &Map<String, Value>) -> Vec<String> {
- let filtered = filtered_payload_fields(class, fields).collect::<Vec<_>>();
- if filtered.is_empty() {
- return Vec::new();
- }
- if is_prose_node(class) {
- let preview_names = filtered
- .iter()
- .take(6)
- .map(|(name, _)| (*name).clone())
- .collect::<Vec<_>>();
- let mut lines = vec![format!("payload fields: {}", preview_names.join(", "))];
- if filtered.len() > preview_names.len() {
- lines.push(format!(
- "payload fields: +{} more field(s)",
- filtered.len() - preview_names.len()
- ));
- }
- return lines;
- }
- let mut lines = vec![format!("payload fields: {}", filtered.len())];
- for (index, (name, value)) in filtered.iter().enumerate() {
- if index == 6 {
- lines.push(format!(
- "payload: +{} more field(s)",
- filtered.len() - index
- ));
- break;
- }
- lines.push(format!(
- "payload.{}: {}",
- name,
- value_summary(&payload_value_preview(value))
- ));
+fn artifact_detail_output(
+ detail: &fidget_spinner_store_sqlite::ArtifactDetail,
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let mut lines = vec![format!(
+ "artifact {} — {} -> {}",
+ detail.record.slug, detail.record.label, detail.record.locator
+ )];
+ if !detail.attachments.is_empty() {
+ lines.push(format!("attachments: {}", detail.attachments.len()));
}
- lines
+ detailed_tool_output(
+ detail,
+ detail,
+ lines.join("\n"),
+ None,
+ FaultStage::Worker,
+ operation,
+ )
}
-fn filtered_payload_fields(
- class: NodeClass,
- fields: &Map<String, Value>,
-) -> impl Iterator<Item = (&String, &Value)> + '_ {
- fields.iter().filter(move |(name, _)| {
- !matches!(class, NodeClass::Note | NodeClass::Source) || name.as_str() != "body"
- })
+fn metric_keys_output(
+ keys: &[MetricKeySummary],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": keys.len(), "metrics": keys });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if keys.is_empty() {
+ "no metrics".to_owned()
+ } else {
+ keys.iter()
+ .map(|metric| {
+ format!(
+ "{} [{} {} {}] refs={}",
+ metric.key,
+ metric.unit.as_str(),
+ metric.objective.as_str(),
+ metric.visibility.as_str(),
+ metric.reference_count
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
+ )
}
-fn payload_value_preview(value: &Value) -> Value {
- match value {
- Value::Null | Value::Bool(_) | Value::Number(_) => value.clone(),
- Value::String(text) => Value::String(truncated_inline_preview(text, 96)),
- Value::Array(items) => {
- let preview = items
+fn metric_best_output(
+ entries: &[MetricBestEntry],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": entries.len(), "entries": entries });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if entries.is_empty() {
+ "no matching experiments".to_owned()
+ } else {
+ entries
.iter()
- .take(3)
- .map(payload_value_preview)
- .collect::<Vec<_>>();
- if items.len() > 3 {
- json!({
- "items": preview,
- "truncated": true,
- "total_count": items.len(),
+ .enumerate()
+ .map(|(index, entry)| {
+ format!(
+ "{}. {} / {} = {} ({})",
+ index + 1,
+ entry.experiment.slug,
+ entry.hypothesis.slug,
+ entry.value,
+ entry.experiment.verdict.map_or_else(
+ || entry.experiment.status.as_str().to_owned(),
+ |verdict| verdict.as_str().to_owned()
+ )
+ )
})
- } else {
- Value::Array(preview)
- }
- }
- Value::Object(object) => {
- let mut preview = Map::new();
- for (index, (name, nested)) in object.iter().enumerate() {
- if index == 4 {
- let _ = preview.insert(
- "...".to_owned(),
- json!(format!("+{} more field(s)", object.len() - index)),
- );
- break;
- }
- let _ = preview.insert(name.clone(), payload_value_preview(nested));
- }
- Value::Object(preview)
- }
- }
-}
-
-fn is_prose_node(class: NodeClass) -> bool {
- matches!(class, NodeClass::Note | NodeClass::Source)
-}
-
-fn truncated_inline_preview(text: &str, limit: usize) -> String {
- let collapsed = libmcp::collapse_inline_whitespace(text);
- let truncated = libmcp::render::truncate_chars(&collapsed, Some(limit));
- if truncated.truncated {
- format!("{}...", truncated.text)
- } else {
- truncated.text
- }
-}
-
-fn metric_value(store: &ProjectStore, metric: &MetricValue) -> Result<Value, FaultRecord> {
- let definition = metric_definition(store, &metric.key)?;
- Ok(json!({
- "key": metric.key,
- "value": metric.value,
- "unit": metric_unit_name(definition.unit),
- "objective": metric_objective_name(definition.objective),
- }))
-}
-
-fn metric_text(store: &ProjectStore, metric: &MetricValue) -> Result<String, FaultRecord> {
- let definition = metric_definition(store, &metric.key)?;
- Ok(format!(
- "{}={} {} ({})",
- metric.key,
- metric.value,
- metric_unit_name(definition.unit),
- metric_objective_name(definition.objective),
- ))
-}
-
-fn metric_unit_name(unit: MetricUnit) -> &'static str {
- match unit {
- MetricUnit::Seconds => "seconds",
- MetricUnit::Bytes => "bytes",
- MetricUnit::Count => "count",
- MetricUnit::Ratio => "ratio",
- MetricUnit::Custom => "custom",
- }
-}
-
-fn metric_objective_name(objective: fidget_spinner_core::OptimizationObjective) -> &'static str {
- match objective {
- fidget_spinner_core::OptimizationObjective::Minimize => "minimize",
- fidget_spinner_core::OptimizationObjective::Maximize => "maximize",
- fidget_spinner_core::OptimizationObjective::Target => "target",
- }
-}
-
-fn metric_verdict_name(verdict: FrontierVerdict) -> &'static str {
- match verdict {
- FrontierVerdict::Accepted => "accepted",
- FrontierVerdict::Kept => "kept",
- FrontierVerdict::Parked => "parked",
- FrontierVerdict::Rejected => "rejected",
- }
-}
-
-fn run_dimensions_value(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> Value {
- Value::Object(
- dimensions
- .iter()
- .map(|(key, value)| (key.to_string(), value.as_json()))
- .collect::<Map<String, Value>>(),
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
)
}
-fn render_dimension_kv(dimensions: &BTreeMap<NonEmptyText, RunDimensionValue>) -> String {
- if dimensions.is_empty() {
- return "none".to_owned();
- }
- dimensions
- .iter()
- .map(|(key, value)| format!("{key}={}", value_summary(&value.as_json())))
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn format_tags(tags: &BTreeSet<TagName>) -> String {
- tags.iter()
- .map(ToString::to_string)
- .collect::<Vec<_>>()
- .join(", ")
-}
-
-fn schema_label(schema: &ProjectSchema) -> String {
- format!("{}@{}", schema.namespace, schema.version)
-}
-
-fn value_summary(value: &Value) -> String {
- match value {
- Value::Null => "null".to_owned(),
- Value::Bool(flag) => flag.to_string(),
- Value::Number(number) => number.to_string(),
- Value::String(text) => text.clone(),
- Value::Array(items) => format!("{} item(s)", items.len()),
- Value::Object(object) => format!("{} field(s)", object.len()),
- }
-}
-
-#[derive(Default)]
-struct DiagnosticTally {
- total: usize,
- errors: usize,
- warnings: usize,
- infos: usize,
-}
-
-fn store_fault<E>(operation: &'static str) -> impl FnOnce(E) -> FaultRecord
-where
- E: std::fmt::Display,
-{
- move |error| {
- FaultRecord::new(
- classify_fault_kind(&error.to_string()),
- FaultStage::Store,
- operation,
- error.to_string(),
- )
- }
-}
-
-fn classify_fault_kind(message: &str) -> FaultKind {
- if message.contains("was not found")
- || message.contains("invalid")
- || message.contains("unknown")
- || message.contains("empty")
- || message.contains("already exists")
- || message.contains("require an explicit tag list")
- || message.contains("requires a non-empty summary")
- || message.contains("requires a non-empty string payload field `body`")
- || message.contains("requires an explicit order")
- || message.contains("is ambiguous across sources")
- || message.contains("has conflicting semantics")
- || message.contains("conflicts with existing definition")
- {
- FaultKind::InvalidInput
- } else {
- FaultKind::Internal
- }
-}
-
-fn tool_annotations(raw: Vec<WireAnnotation>) -> Result<Vec<NodeAnnotation>, StoreError> {
- raw.into_iter()
- .map(|annotation| {
- Ok(NodeAnnotation {
- id: fidget_spinner_core::AnnotationId::fresh(),
- visibility: if annotation.visible {
- AnnotationVisibility::Visible
- } else {
- AnnotationVisibility::HiddenByDefault
- },
- label: annotation.label.map(NonEmptyText::new).transpose()?,
- body: NonEmptyText::new(annotation.body)?,
- created_at: time::OffsetDateTime::now_utc(),
- })
- })
- .collect()
-}
-
-fn lineage_attachments(parents: Vec<String>) -> Result<Vec<EdgeAttachment>, StoreError> {
- parents
- .into_iter()
- .map(|parent| {
- Ok(EdgeAttachment {
- node_id: crate::parse_node_id(&parent)?,
- kind: fidget_spinner_core::EdgeKind::Lineage,
- direction: EdgeAttachmentDirection::ExistingToNew,
- })
- })
- .collect()
-}
-
-fn parse_tag_set(values: Vec<String>) -> Result<BTreeSet<TagName>, StoreError> {
- values
- .into_iter()
- .map(TagName::new)
- .collect::<Result<BTreeSet<_>, _>>()
- .map_err(StoreError::from)
-}
-
-fn metric_spec_from_wire(raw: WireMetricSpec) -> Result<MetricSpec, StoreError> {
- Ok(MetricSpec {
- metric_key: NonEmptyText::new(raw.key)?,
- unit: parse_metric_unit_name(&raw.unit)?,
- objective: crate::parse_optimization_objective(&raw.objective)?,
- })
-}
-
-fn metric_value_from_wire(raw: WireMetricValue) -> Result<MetricValue, StoreError> {
- Ok(MetricValue {
- key: NonEmptyText::new(raw.key)?,
- value: raw.value,
- })
-}
-
-fn experiment_analysis_from_wire(raw: WireAnalysis) -> Result<ExperimentAnalysisDraft, StoreError> {
- Ok(ExperimentAnalysisDraft {
- title: NonEmptyText::new(raw.title)?,
- summary: NonEmptyText::new(raw.summary)?,
- body: NonEmptyText::new(raw.body)?,
- })
-}
-
-fn metric_definition(store: &ProjectStore, key: &NonEmptyText) -> Result<MetricSpec, FaultRecord> {
- store
- .list_metric_definitions()
- .map_err(store_fault("tools/call:experiment.close"))?
- .into_iter()
- .find(|definition| definition.key == *key)
- .map(|definition| MetricSpec {
- metric_key: definition.key,
- unit: definition.unit,
- objective: definition.objective,
- })
- .ok_or_else(|| {
- FaultRecord::new(
- FaultKind::InvalidInput,
- FaultStage::Store,
- "tools/call:experiment.close",
- format!("metric `{key}` is not registered"),
- )
- })
-}
-
-fn coerce_tool_dimensions(
- store: &ProjectStore,
- raw_dimensions: BTreeMap<String, Value>,
- operation: &'static str,
-) -> Result<BTreeMap<NonEmptyText, RunDimensionValue>, FaultRecord> {
- store
- .coerce_run_dimensions(raw_dimensions)
- .map_err(store_fault(operation))
-}
-
-fn command_recipe_from_wire(
- raw: WireRunCommand,
- project_root: &Utf8Path,
-) -> Result<CommandRecipe, StoreError> {
- let working_directory = raw
- .working_directory
- .map(Utf8PathBuf::from)
- .unwrap_or_else(|| project_root.to_path_buf());
- CommandRecipe::new(
- working_directory,
- crate::to_text_vec(raw.argv)?,
- raw.env.into_iter().collect::<BTreeMap<_, _>>(),
+fn history_output(
+ history: &[EntityHistoryEntry],
+ operation: &str,
+) -> Result<ToolOutput, FaultRecord> {
+ let concise = json!({ "count": history.len(), "history": history });
+ detailed_tool_output(
+ &concise,
+ &concise,
+ if history.is_empty() {
+ "no history".to_owned()
+ } else {
+ history
+ .iter()
+ .map(|entry| {
+ format!(
+ "rev {} {} @ {}",
+ entry.revision, entry.event_kind, entry.occurred_at
+ )
+ })
+ .collect::<Vec<_>>()
+ .join("\n")
+ },
+ None,
+ FaultStage::Worker,
+ operation,
)
- .map_err(StoreError::from)
-}
-
-fn parse_node_class_name(raw: &str) -> Result<NodeClass, StoreError> {
- match raw {
- "contract" => Ok(NodeClass::Contract),
- "hypothesis" => Ok(NodeClass::Hypothesis),
- "run" => Ok(NodeClass::Run),
- "analysis" => Ok(NodeClass::Analysis),
- "decision" => Ok(NodeClass::Decision),
- "source" => Ok(NodeClass::Source),
- "note" => Ok(NodeClass::Note),
- other => Err(crate::invalid_input(format!(
- "unknown node class `{other}`"
- ))),
- }
-}
-
-fn parse_metric_unit_name(raw: &str) -> Result<MetricUnit, StoreError> {
- crate::parse_metric_unit(raw)
-}
-
-fn parse_metric_source_name(raw: &str) -> Result<MetricFieldSource, StoreError> {
- match raw {
- "run_metric" => Ok(MetricFieldSource::RunMetric),
- "hypothesis_payload" => Ok(MetricFieldSource::HypothesisPayload),
- "run_payload" => Ok(MetricFieldSource::RunPayload),
- "analysis_payload" => Ok(MetricFieldSource::AnalysisPayload),
- "decision_payload" => Ok(MetricFieldSource::DecisionPayload),
- other => Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- format!("unknown metric source `{other}`"),
- ),
- ))),
- }
-}
-
-fn parse_metric_order_name(raw: &str) -> Result<MetricRankOrder, StoreError> {
- match raw {
- "asc" => Ok(MetricRankOrder::Asc),
- "desc" => Ok(MetricRankOrder::Desc),
- other => Err(StoreError::Json(serde_json::Error::io(
- std::io::Error::new(
- std::io::ErrorKind::InvalidInput,
- format!("unknown metric order `{other}`"),
- ),
- ))),
- }
-}
-
-fn parse_field_value_type_name(raw: &str) -> Result<FieldValueType, StoreError> {
- match raw {
- "string" => Ok(FieldValueType::String),
- "numeric" => Ok(FieldValueType::Numeric),
- "boolean" => Ok(FieldValueType::Boolean),
- "timestamp" => Ok(FieldValueType::Timestamp),
- other => Err(crate::invalid_input(format!(
- "unknown field value type `{other}`"
- ))),
- }
-}
-
-fn parse_diagnostic_severity_name(raw: &str) -> Result<DiagnosticSeverity, StoreError> {
- match raw {
- "error" => Ok(DiagnosticSeverity::Error),
- "warning" => Ok(DiagnosticSeverity::Warning),
- "info" => Ok(DiagnosticSeverity::Info),
- other => Err(crate::invalid_input(format!(
- "unknown diagnostic severity `{other}`"
- ))),
- }
-}
-
-fn parse_field_presence_name(raw: &str) -> Result<FieldPresence, StoreError> {
- match raw {
- "required" => Ok(FieldPresence::Required),
- "recommended" => Ok(FieldPresence::Recommended),
- "optional" => Ok(FieldPresence::Optional),
- other => Err(crate::invalid_input(format!(
- "unknown field presence `{other}`"
- ))),
- }
-}
-
-fn parse_field_role_name(raw: &str) -> Result<FieldRole, StoreError> {
- match raw {
- "index" => Ok(FieldRole::Index),
- "projection_gate" => Ok(FieldRole::ProjectionGate),
- "render_only" => Ok(FieldRole::RenderOnly),
- "opaque" => Ok(FieldRole::Opaque),
- other => Err(crate::invalid_input(format!(
- "unknown field role `{other}`"
- ))),
- }
-}
-
-fn parse_inference_policy_name(raw: &str) -> Result<InferencePolicy, StoreError> {
- match raw {
- "manual_only" => Ok(InferencePolicy::ManualOnly),
- "model_may_infer" => Ok(InferencePolicy::ModelMayInfer),
- other => Err(crate::invalid_input(format!(
- "unknown inference policy `{other}`"
- ))),
- }
-}
-
-fn parse_backend_name(raw: &str) -> Result<ExecutionBackend, StoreError> {
- match raw {
- "local_process" => Ok(ExecutionBackend::LocalProcess),
- "worktree_process" => Ok(ExecutionBackend::WorktreeProcess),
- "ssh_process" => Ok(ExecutionBackend::SshProcess),
- other => Err(crate::invalid_input(format!("unknown backend `{other}`"))),
- }
-}
-
-fn parse_verdict_name(raw: &str) -> Result<FrontierVerdict, StoreError> {
- match raw {
- "accepted" => Ok(FrontierVerdict::Accepted),
- "kept" => Ok(FrontierVerdict::Kept),
- "parked" => Ok(FrontierVerdict::Parked),
- "rejected" => Ok(FrontierVerdict::Rejected),
- other => Err(crate::invalid_input(format!("unknown verdict `{other}`"))),
- }
-}
-
-#[derive(Debug, Deserialize)]
-struct FrontierStatusToolArgs {
- frontier_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct TagAddToolArgs {
- name: String,
- description: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct FrontierInitToolArgs {
- label: String,
- objective: String,
- contract_title: String,
- contract_summary: Option<String>,
- benchmark_suites: Vec<String>,
- promotion_criteria: Vec<String>,
- primary_metric: WireMetricSpec,
- #[serde(default)]
- supporting_metrics: Vec<WireMetricSpec>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeCreateToolArgs {
- class: String,
- frontier_id: Option<String>,
- title: String,
- summary: Option<String>,
- tags: Option<Vec<String>>,
- #[serde(default)]
- payload: Option<Map<String, Value>>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct HypothesisRecordToolArgs {
- frontier_id: String,
- title: String,
- summary: String,
- body: String,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeListToolArgs {
- frontier_id: Option<String>,
- class: Option<String>,
- #[serde(default)]
- tags: Vec<String>,
- #[serde(default)]
- include_archived: bool,
- limit: Option<u32>,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeReadToolArgs {
- node_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeAnnotateToolArgs {
- node_id: String,
- body: String,
- label: Option<String>,
- #[serde(default)]
- visible: bool,
-}
-
-#[derive(Debug, Deserialize)]
-struct NodeArchiveToolArgs {
- node_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct QuickNoteToolArgs {
- frontier_id: Option<String>,
- title: String,
- summary: String,
- body: String,
- tags: Vec<String>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SourceRecordToolArgs {
- frontier_id: Option<String>,
- title: String,
- summary: String,
- body: String,
- tags: Option<Vec<String>>,
- #[serde(default)]
- annotations: Vec<WireAnnotation>,
- #[serde(default)]
- parents: Vec<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SchemaFieldUpsertToolArgs {
- name: String,
- node_classes: Option<Vec<String>>,
- presence: String,
- severity: String,
- role: String,
- inference_policy: String,
- value_type: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct SchemaFieldRemoveToolArgs {
- name: String,
- node_classes: Option<Vec<String>>,
-}
-
-#[derive(Debug, Deserialize)]
-struct MetricDefineToolArgs {
- key: String,
- unit: String,
- objective: String,
- description: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct RunDimensionDefineToolArgs {
- key: String,
- value_type: String,
- description: Option<String>,
-}
-
-#[derive(Debug, Deserialize, Default)]
-struct MetricKeysToolArgs {
- frontier_id: Option<String>,
- source: Option<String>,
- dimensions: Option<BTreeMap<String, Value>>,
-}
-
-#[derive(Debug, Deserialize)]
-struct MetricBestToolArgs {
- key: String,
- frontier_id: Option<String>,
- source: Option<String>,
- dimensions: Option<BTreeMap<String, Value>>,
- order: Option<String>,
- limit: Option<u32>,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentOpenToolArgs {
- frontier_id: String,
- hypothesis_node_id: String,
- title: String,
- summary: Option<String>,
-}
-
-#[derive(Debug, Deserialize, Default)]
-struct ExperimentListToolArgs {
- frontier_id: Option<String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentReadToolArgs {
- experiment_id: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct ExperimentCloseToolArgs {
- experiment_id: String,
- run: WireRun,
- primary_metric: WireMetricValue,
- #[serde(default)]
- supporting_metrics: Vec<WireMetricValue>,
- note: WireFrontierNote,
- verdict: String,
- decision_title: String,
- decision_rationale: String,
- analysis: Option<WireAnalysis>,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireAnnotation {
- body: String,
- label: Option<String>,
- #[serde(default)]
- visible: bool,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireMetricSpec {
- key: String,
- unit: String,
- objective: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireMetricValue {
- key: String,
- value: f64,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireRun {
- title: String,
- summary: Option<String>,
- backend: String,
- #[serde(default)]
- dimensions: BTreeMap<String, Value>,
- command: WireRunCommand,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireAnalysis {
- title: String,
- summary: String,
- body: String,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireRunCommand {
- working_directory: Option<String>,
- argv: Vec<String>,
- #[serde(default)]
- env: BTreeMap<String, String>,
-}
-
-#[derive(Debug, Deserialize)]
-struct WireFrontierNote {
- summary: String,
- #[serde(default)]
- next_hypotheses: Vec<String>,
}