swarm repositories / source
aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormain <main@swarm.moe>2026-03-19 10:15:18 -0400
committermain <main@swarm.moe>2026-03-19 10:15:18 -0400
commit7b9bd8b42883f82b090718175b8316296ef18236 (patch)
tree16f2c70b0f630c7757d72a20bd90d17c2e3a8414
downloadfidget_spinner-7b9bd8b42883f82b090718175b8316296ef18236.zip
Initial Fidget Spinner MVP
-rw-r--r--.gitignore5
-rw-r--r--AGENTS.md34
-rw-r--r--Cargo.lock861
-rw-r--r--Cargo.toml112
-rw-r--r--README.md181
-rw-r--r--assets/codex-skills/fidget-spinner/SKILL.md55
-rw-r--r--assets/codex-skills/frontier-loop/SKILL.md129
-rw-r--r--check.py73
-rw-r--r--crates/fidget-spinner-cli/Cargo.toml22
-rw-r--r--crates/fidget-spinner-cli/src/bundled_skill.rs69
-rw-r--r--crates/fidget-spinner-cli/src/main.rs958
-rw-r--r--crates/fidget-spinner-cli/src/mcp/catalog.rs541
-rw-r--r--crates/fidget-spinner-cli/src/mcp/fault.rs99
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/binary.rs43
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/config.rs18
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/mod.rs8
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/process.rs246
-rw-r--r--crates/fidget-spinner-cli/src/mcp/host/runtime.rs719
-rw-r--r--crates/fidget-spinner-cli/src/mcp/mod.rs10
-rw-r--r--crates/fidget-spinner-cli/src/mcp/protocol.rs86
-rw-r--r--crates/fidget-spinner-cli/src/mcp/service.rs813
-rw-r--r--crates/fidget-spinner-cli/src/mcp/telemetry.rs103
-rw-r--r--crates/fidget-spinner-cli/src/mcp/worker.rs66
-rw-r--r--crates/fidget-spinner-cli/tests/mcp_hardening.rs424
-rw-r--r--crates/fidget-spinner-core/Cargo.toml19
-rw-r--r--crates/fidget-spinner-core/src/error.rs9
-rw-r--r--crates/fidget-spinner-core/src/id.rs46
-rw-r--r--crates/fidget-spinner-core/src/lib.rs26
-rw-r--r--crates/fidget-spinner-core/src/model.rs693
-rw-r--r--crates/fidget-spinner-store-sqlite/Cargo.toml21
-rw-r--r--crates/fidget-spinner-store-sqlite/src/lib.rs1810
-rw-r--r--docs/architecture.md527
-rw-r--r--docs/libgrid-dogfood.md202
-rw-r--r--docs/product-spec.md341
-rw-r--r--rust-toolchain.toml5
-rwxr-xr-xscripts/install-codex-skill.sh21
36 files changed, 9395 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c0efaa6
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+/target
+/.fidget_spinner
+.DS_Store
+*.swp
+*.swo
diff --git a/AGENTS.md b/AGENTS.md
new file mode 100644
index 0000000..a42e5c1
--- /dev/null
+++ b/AGENTS.md
@@ -0,0 +1,34 @@
+# Fidget Spinner
+
+Fidget Spinner is a local-first, agent-first experimental DAG for autonomous
+program optimization and research.
+
+Constraints that are part of the product:
+
+- no OAuth
+- no hosted control plane
+- no mandatory cloud resources
+- no managed-compute marketplace in the core design
+- DAG is canonical truth
+- frontier state is a derived projection
+- per-project state lives under `.fidget_spinner/`
+- project payload schemas are local and warning-heavy, not globally rigid
+- off-path nodes should remain cheap
+- core-path experiment closure should remain atomic
+
+Engineering posture:
+
+- root `Cargo.toml` owns lint policy and canonical check commands
+- every crate opts into `[lints] workspace = true`
+- pin an exact stable toolchain in `rust-toolchain.toml`
+- keep runners thin and orchestration-only
+- prefer precise domain types over loose bags of strings
+
+MVP target:
+
+- dogfood against `libgrid` worktrees
+- replace sprawling freeform experiment markdown with structured
+ contract/change/run/analysis/decision nodes plus cheap research/note side paths
+- make runs, comparisons, artifacts, and code snapshots first-class
+- bundle the frontier-loop skill with the MCP surface instead of treating it as
+ folklore
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000..aa09bdb
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,861 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 4
+
+[[package]]
+name = "anstream"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d"
+dependencies = [
+ "anstyle",
+ "anstyle-parse",
+ "anstyle-query",
+ "anstyle-wincon",
+ "colorchoice",
+ "is_terminal_polyfill",
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle"
+version = "1.0.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000"
+
+[[package]]
+name = "anstyle-parse"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e"
+dependencies = [
+ "utf8parse",
+]
+
+[[package]]
+name = "anstyle-query"
+version = "1.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc"
+dependencies = [
+ "windows-sys",
+]
+
+[[package]]
+name = "anstyle-wincon"
+version = "3.0.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d"
+dependencies = [
+ "anstyle",
+ "once_cell_polyfill",
+ "windows-sys",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.102"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
+
+[[package]]
+name = "bitflags"
+version = "2.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
+
+[[package]]
+name = "bumpalo"
+version = "3.20.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb"
+
+[[package]]
+name = "camino"
+version = "1.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e629a66d692cb9ff1a1c664e41771b3dcaf961985a9774c0eb0bd1b51cf60a48"
+dependencies = [
+ "serde_core",
+]
+
+[[package]]
+name = "cc"
+version = "1.2.57"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
+dependencies = [
+ "find-msvc-tools",
+ "shlex",
+]
+
+[[package]]
+name = "cfg-if"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
+
+[[package]]
+name = "clap"
+version = "4.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351"
+dependencies = [
+ "clap_builder",
+ "clap_derive",
+]
+
+[[package]]
+name = "clap_builder"
+version = "4.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f"
+dependencies = [
+ "anstream",
+ "anstyle",
+ "clap_lex",
+ "strsim",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9"
+
+[[package]]
+name = "colorchoice"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570"
+
+[[package]]
+name = "deranged"
+version = "0.5.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c"
+dependencies = [
+ "powerfmt",
+ "serde_core",
+]
+
+[[package]]
+name = "dirs"
+version = "6.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e"
+dependencies = [
+ "dirs-sys",
+]
+
+[[package]]
+name = "dirs-sys"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab"
+dependencies = [
+ "libc",
+ "option-ext",
+ "redox_users",
+ "windows-sys",
+]
+
+[[package]]
+name = "equivalent"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
+
+[[package]]
+name = "fallible-iterator"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649"
+
+[[package]]
+name = "fallible-streaming-iterator"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
+
+[[package]]
+name = "fidget-spinner-cli"
+version = "0.1.0"
+dependencies = [
+ "camino",
+ "clap",
+ "dirs",
+ "fidget-spinner-core",
+ "fidget-spinner-store-sqlite",
+ "serde",
+ "serde_json",
+ "time",
+ "uuid",
+]
+
+[[package]]
+name = "fidget-spinner-core"
+version = "0.1.0"
+dependencies = [
+ "camino",
+ "serde",
+ "serde_json",
+ "thiserror",
+ "time",
+ "uuid",
+]
+
+[[package]]
+name = "fidget-spinner-store-sqlite"
+version = "0.1.0"
+dependencies = [
+ "camino",
+ "fidget-spinner-core",
+ "rusqlite",
+ "serde",
+ "serde_json",
+ "thiserror",
+ "time",
+ "uuid",
+]
+
+[[package]]
+name = "find-msvc-tools"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582"
+
+[[package]]
+name = "foldhash"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
+
+[[package]]
+name = "getrandom"
+version = "0.2.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.4.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "r-efi",
+ "wasip2",
+ "wasip3",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.15.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1"
+dependencies = [
+ "foldhash",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.16.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
+
+[[package]]
+name = "hashlink"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
+dependencies = [
+ "hashbrown 0.15.5",
+]
+
+[[package]]
+name = "heck"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
+
+[[package]]
+name = "id-arena"
+version = "2.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954"
+
+[[package]]
+name = "indexmap"
+version = "2.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017"
+dependencies = [
+ "equivalent",
+ "hashbrown 0.16.1",
+ "serde",
+ "serde_core",
+]
+
+[[package]]
+name = "is_terminal_polyfill"
+version = "1.70.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695"
+
+[[package]]
+name = "itoa"
+version = "1.0.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
+
+[[package]]
+name = "js-sys"
+version = "0.3.91"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
+dependencies = [
+ "once_cell",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "leb128fmt"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
+
+[[package]]
+name = "libc"
+version = "0.2.183"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
+
+[[package]]
+name = "libredox"
+version = "0.1.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "libsqlite3-sys"
+version = "0.35.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f"
+dependencies = [
+ "cc",
+ "pkg-config",
+ "vcpkg",
+]
+
+[[package]]
+name = "log"
+version = "0.4.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
+
+[[package]]
+name = "memchr"
+version = "2.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
+
+[[package]]
+name = "num-conv"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
+
+[[package]]
+name = "once_cell"
+version = "1.21.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50"
+
+[[package]]
+name = "once_cell_polyfill"
+version = "1.70.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe"
+
+[[package]]
+name = "option-ext"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
+
+[[package]]
+name = "pkg-config"
+version = "0.3.32"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
+
+[[package]]
+name = "powerfmt"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
+
+[[package]]
+name = "prettyplease"
+version = "0.2.37"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
+dependencies = [
+ "proc-macro2",
+ "syn",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.106"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "r-efi"
+version = "6.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf"
+
+[[package]]
+name = "redox_users"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac"
+dependencies = [
+ "getrandom 0.2.17",
+ "libredox",
+ "thiserror",
+]
+
+[[package]]
+name = "rusqlite"
+version = "0.37.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f"
+dependencies = [
+ "bitflags",
+ "fallible-iterator",
+ "fallible-streaming-iterator",
+ "hashlink",
+ "libsqlite3-sys",
+ "smallvec",
+ "time",
+]
+
+[[package]]
+name = "rustversion"
+version = "1.0.22"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
+
+[[package]]
+name = "semver"
+version = "1.0.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
+
+[[package]]
+name = "serde"
+version = "1.0.228"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
+dependencies = [
+ "serde_core",
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_core"
+version = "1.0.228"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
+dependencies = [
+ "serde_derive",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.228"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.149"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86"
+dependencies = [
+ "itoa",
+ "memchr",
+ "serde",
+ "serde_core",
+ "zmij",
+]
+
+[[package]]
+name = "shlex"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
+
+[[package]]
+name = "smallvec"
+version = "1.15.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
+
+[[package]]
+name = "strsim"
+version = "0.11.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
+
+[[package]]
+name = "syn"
+version = "2.0.117"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "thiserror"
+version = "2.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "2.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "time"
+version = "0.3.47"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c"
+dependencies = [
+ "deranged",
+ "itoa",
+ "num-conv",
+ "powerfmt",
+ "serde_core",
+ "time-core",
+ "time-macros",
+]
+
+[[package]]
+name = "time-core"
+version = "0.1.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca"
+
+[[package]]
+name = "time-macros"
+version = "0.2.27"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215"
+dependencies = [
+ "num-conv",
+ "time-core",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.24"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75"
+
+[[package]]
+name = "unicode-xid"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
+
+[[package]]
+name = "utf8parse"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
+
+[[package]]
+name = "uuid"
+version = "1.22.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
+dependencies = [
+ "getrandom 0.4.2",
+ "js-sys",
+ "serde_core",
+ "wasm-bindgen",
+]
+
+[[package]]
+name = "vcpkg"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
+
+[[package]]
+name = "wasi"
+version = "0.11.1+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
+
+[[package]]
+name = "wasip2"
+version = "1.0.2+wasi-0.2.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5"
+dependencies = [
+ "wit-bindgen",
+]
+
+[[package]]
+name = "wasip3"
+version = "0.4.0+wasi-0.3.0-rc-2026-01-06"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5"
+dependencies = [
+ "wit-bindgen",
+]
+
+[[package]]
+name = "wasm-bindgen"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
+dependencies = [
+ "cfg-if",
+ "once_cell",
+ "rustversion",
+ "wasm-bindgen-macro",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-macro"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
+dependencies = [
+ "quote",
+ "wasm-bindgen-macro-support",
+]
+
+[[package]]
+name = "wasm-bindgen-macro-support"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
+dependencies = [
+ "bumpalo",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wasm-bindgen-shared",
+]
+
+[[package]]
+name = "wasm-bindgen-shared"
+version = "0.2.114"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "wasm-encoder"
+version = "0.244.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319"
+dependencies = [
+ "leb128fmt",
+ "wasmparser",
+]
+
+[[package]]
+name = "wasm-metadata"
+version = "0.244.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909"
+dependencies = [
+ "anyhow",
+ "indexmap",
+ "wasm-encoder",
+ "wasmparser",
+]
+
+[[package]]
+name = "wasmparser"
+version = "0.244.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe"
+dependencies = [
+ "bitflags",
+ "hashbrown 0.15.5",
+ "indexmap",
+ "semver",
+]
+
+[[package]]
+name = "windows-link"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
+
+[[package]]
+name = "windows-sys"
+version = "0.61.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
+dependencies = [
+ "windows-link",
+]
+
+[[package]]
+name = "wit-bindgen"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5"
+dependencies = [
+ "wit-bindgen-rust-macro",
+]
+
+[[package]]
+name = "wit-bindgen-core"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc"
+dependencies = [
+ "anyhow",
+ "heck",
+ "wit-parser",
+]
+
+[[package]]
+name = "wit-bindgen-rust"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21"
+dependencies = [
+ "anyhow",
+ "heck",
+ "indexmap",
+ "prettyplease",
+ "syn",
+ "wasm-metadata",
+ "wit-bindgen-core",
+ "wit-component",
+]
+
+[[package]]
+name = "wit-bindgen-rust-macro"
+version = "0.51.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a"
+dependencies = [
+ "anyhow",
+ "prettyplease",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "wit-bindgen-core",
+ "wit-bindgen-rust",
+]
+
+[[package]]
+name = "wit-component"
+version = "0.244.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2"
+dependencies = [
+ "anyhow",
+ "bitflags",
+ "indexmap",
+ "log",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "wasm-encoder",
+ "wasm-metadata",
+ "wasmparser",
+ "wit-parser",
+]
+
+[[package]]
+name = "wit-parser"
+version = "0.244.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736"
+dependencies = [
+ "anyhow",
+ "id-arena",
+ "indexmap",
+ "log",
+ "semver",
+ "serde",
+ "serde_derive",
+ "serde_json",
+ "unicode-xid",
+ "wasmparser",
+]
+
+[[package]]
+name = "zmij"
+version = "1.0.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa"
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..d829377
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,112 @@
+[workspace]
+members = [
+ "crates/fidget-spinner-core",
+ "crates/fidget-spinner-store-sqlite",
+ "crates/fidget-spinner-cli",
+]
+resolver = "3"
+
+[workspace.package]
+edition = "2024"
+license = "MIT"
+rust-version = "1.94"
+version = "0.1.0"
+
+[workspace.dependencies]
+camino = { version = "1", features = ["serde1"] }
+clap = { version = "4.5", features = ["derive"] }
+dirs = "6"
+rusqlite = { version = "0.37", features = ["bundled", "time"] }
+serde = { version = "1.0", features = ["derive"] }
+serde_json = "1"
+thiserror = "2"
+time = { version = "0.3", features = ["formatting", "macros", "parsing", "serde"] }
+uuid = { version = "1", features = ["serde", "v7"] }
+
+[workspace.lints.rust]
+elided_lifetimes_in_paths = "deny"
+unexpected_cfgs = "deny"
+unsafe_code = "deny"
+unused_crate_dependencies = "warn"
+unused_lifetimes = "deny"
+unused_qualifications = "deny"
+unused_results = "deny"
+
+[workspace.lints.rustdoc]
+bare_urls = "deny"
+broken_intra_doc_links = "deny"
+
+[workspace.lints.clippy]
+all = { level = "deny", priority = -2 }
+pedantic = { level = "deny", priority = -1 }
+cargo = { level = "warn", priority = -3 }
+
+dbg_macro = "deny"
+expect_used = "deny"
+panic = "deny"
+todo = "deny"
+unimplemented = "deny"
+unwrap_used = "deny"
+allow_attributes_without_reason = "deny"
+
+cargo_common_metadata = "allow"
+missing_errors_doc = "allow"
+missing_panics_doc = "allow"
+multiple_crate_versions = "allow"
+
+items_after_statements = "allow"
+many_single_char_names = "allow"
+match_same_arms = "allow"
+module_name_repetitions = "allow"
+similar_names = "allow"
+struct_field_names = "allow"
+too_many_arguments = "allow"
+too_many_lines = "allow"
+unnested_or_patterns = "allow"
+
+cast_lossless = "allow"
+cast_possible_truncation = "allow"
+cast_possible_wrap = "allow"
+cast_precision_loss = "allow"
+cast_sign_loss = "allow"
+float_cmp = "allow"
+implicit_hasher = "allow"
+manual_let_else = "allow"
+map_unwrap_or = "allow"
+uninlined_format_args = "allow"
+
+ignored_unit_patterns = "allow"
+must_use_candidate = "allow"
+needless_pass_by_value = "allow"
+no_effect_underscore_binding = "allow"
+redundant_closure_for_method_calls = "allow"
+ref_option = "allow"
+return_self_not_must_use = "allow"
+trivially_copy_pass_by_ref = "allow"
+unused_async = "allow"
+used_underscore_binding = "allow"
+
+[workspace.metadata.rust-starter]
+format_command = ["cargo", "fmt", "--all", "--check"]
+clippy_command = [
+ "cargo",
+ "clippy",
+ "--workspace",
+ "--all-targets",
+ "--all-features",
+ "--",
+ "-D",
+ "warnings",
+]
+test_command = ["cargo", "test", "--workspace", "--all-targets", "--all-features"]
+doc_command = ["cargo", "doc", "--workspace", "--all-features", "--no-deps"]
+fix_command = [
+ "cargo",
+ "clippy",
+ "--fix",
+ "--workspace",
+ "--all-targets",
+ "--all-features",
+ "--allow-dirty",
+ "--allow-staged",
+]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..2651497
--- /dev/null
+++ b/README.md
@@ -0,0 +1,181 @@
+# Fidget Spinner
+
+Fidget Spinner is a local-first, agent-first experimental DAG for autonomous
+program optimization and research.
+
+The current MVP is built around four ideas:
+
+- the DAG is canonical truth
+- frontier state is a derived projection
+- project payload schemas are local and flexible
+- core-path experiment closure is atomic
+
+The immediate target is not open-ended science in the abstract. It is the ugly,
+practical problem of replacing gigantic freeform experiment markdown in
+worktree-heavy optimization projects such as `libgrid`.
+
+## Current MVP
+
+Implemented today:
+
+- typed Rust core model
+- per-project SQLite store under `.fidget_spinner/`
+- project-local schema file
+- hidden and visible node annotations
+- core-path and off-path node classes
+- CLI for bootstrap and repair
+- hardened stdio MCP host via `mcp serve`
+- replay-aware disposable MCP worker runtime
+- MCP health and telemetry tools
+- bundled `fidget-spinner` base skill
+- bundled `frontier-loop` specialization
+
+Not implemented yet:
+
+- long-lived daemon
+- web UI
+- remote runners
+- strong markdown migration
+- cross-project indexing
+
+## Quickstart
+
+Initialize the current directory as a Fidget Spinner project:
+
+```bash
+cargo run -p fidget-spinner-cli -- init --project . --name fidget-spinner --namespace local.fidget-spinner
+```
+
+Create a frontier:
+
+```bash
+cargo run -p fidget-spinner-cli -- frontier init \
+ --project . \
+ --label "repo evolution" \
+ --objective "improve the local MVP" \
+ --contract-title "fidget spinner self-host frontier" \
+ --benchmark-suite smoke \
+ --promotion-criterion "cleaner and more capable" \
+ --primary-metric-key research_value \
+ --primary-metric-unit count \
+ --primary-metric-objective maximize
+```
+
+Record low-ceremony off-path work:
+
+```bash
+cargo run -p fidget-spinner-cli -- research add \
+ --project . \
+ --title "next feature slate" \
+ --body "Investigate pruning, richer projections, and libgrid schema presets."
+```
+
+Serve the local MCP surface in unbound mode:
+
+```bash
+cargo run -p fidget-spinner-cli -- mcp serve
+```
+
+Then bind the session from the client with:
+
+```json
+{"name":"project.bind","arguments":{"path":"<project-root-or-nested-path>"}}
+```
+
+Install the bundled skills into Codex:
+
+```bash
+./scripts/install-codex-skill.sh
+```
+
+## Store Layout
+
+Each initialized project gets:
+
+```text
+.fidget_spinner/
+ project.json
+ schema.json
+ state.sqlite
+ blobs/
+```
+
+`schema.json` is the model-facing contract for project-local payload fields and
+their validation tiers.
+
+## Model-Facing Surface
+
+The current MCP tools are:
+
+- `system.health`
+- `system.telemetry`
+- `project.bind`
+- `project.status`
+- `project.schema`
+- `frontier.list`
+- `frontier.status`
+- `frontier.init`
+- `node.create`
+- `change.record`
+- `node.list`
+- `node.read`
+- `node.annotate`
+- `node.archive`
+- `note.quick`
+- `research.record`
+- `experiment.close`
+- `skill.list`
+- `skill.show`
+
+Operationally, the MCP now runs as a stable host process that owns the public
+JSON-RPC session and delegates tool execution to an internal worker subprocess.
+Safe replay is only allowed for explicitly read-only operations and resources.
+Mutating tools are never auto-replayed after worker failure.
+
+The intended flow is:
+
+1. inspect `system.health`
+2. `project.bind` to the target project root or any nested path inside it
+3. read the schema and frontier
+4. pull context from the DAG
+5. use cheap off-path writes liberally
+6. record a `change` before core-path work
+7. seal core-path work with one atomic `experiment.close`
+
+## Dogfood Reality
+
+This repository is suitable for off-path dogfood even though it is not
+currently a git repo.
+
+That means:
+
+- `research add`
+- `note quick`
+- `node annotate`
+- `mcp serve`
+
+all work here today.
+
+Full core-path experiment closure needs a real git-backed project, such as the
+target `libgrid` worktree.
+
+## Workspace Layout
+
+- `crates/fidget-spinner-core`: domain model and invariants
+- `crates/fidget-spinner-store-sqlite`: per-project store and atomic writes
+- `crates/fidget-spinner-cli`: CLI plus hardened stdio MCP host and worker
+- `assets/codex-skills/fidget-spinner`: bundled base skill asset
+- `assets/codex-skills/frontier-loop`: bundled skill asset
+
+## Docs
+
+- [docs/product-spec.md](docs/product-spec.md)
+- [docs/architecture.md](docs/architecture.md)
+- [docs/libgrid-dogfood.md](docs/libgrid-dogfood.md)
+
+## Checks
+
+```bash
+./check.py
+./check.py deep
+```
diff --git a/assets/codex-skills/fidget-spinner/SKILL.md b/assets/codex-skills/fidget-spinner/SKILL.md
new file mode 100644
index 0000000..a61f412
--- /dev/null
+++ b/assets/codex-skills/fidget-spinner/SKILL.md
@@ -0,0 +1,55 @@
+---
+name: fidget-spinner
+description: Use Fidget Spinner as the local system of record for structured research and optimization work. Read health, schema, and frontier state first; prefer cheap off-path DAG writes; reserve atomic experiment closure for benchmarked core-path work.
+---
+
+# Fidget Spinner
+
+Use this skill when working inside a project initialized with Fidget Spinner or
+when the task is to inspect or mutate the project DAG through the packaged MCP.
+
+Start every session by reading `system.health`.
+
+If the session is unbound, or bound to the wrong repo, call `project.bind`
+with the target project root or any nested path inside that project.
+
+Then read:
+
+- `project.status`
+- `project.schema`
+- `frontier.list`
+- `frontier.status` for the active frontier
+
+If you need more context, pull it from:
+
+- `node.list`
+- `node.read`
+
+## Posture
+
+- the DAG is canonical truth
+- frontier state is a derived projection
+- project payload validation is warning-heavy at ingest
+- annotations are sidecar and hidden by default
+
+## Choose The Cheapest Tool
+
+- `research.record` for exploratory work, design notes, dead ends, and enabling ideas
+- `note.quick` for terse state pushes
+- `node.annotate` for scratch text that should stay off the main path
+- `change.record` before core-path work
+- `experiment.close` only when you have checkpoint, measured result, note, and verdict
+- `node.archive` to hide stale detritus without deleting evidence
+- `node.create` only as a true escape hatch
+
+## Discipline
+
+1. Pull context from the DAG, not from sprawling prompt prose.
+2. Prefer off-path records unless the work directly advances or judges the frontier.
+3. Do not let critical operational truth live only in annotations.
+4. If the MCP behaves oddly or resumes after interruption, inspect `system.health`
+ and `system.telemetry` before pushing further.
+5. Keep fetches narrow by default; widen only when stale or archived context is
+ actually needed.
+6. When the task becomes a true indefinite optimization push, pair this skill
+ with `frontier-loop`.
diff --git a/assets/codex-skills/frontier-loop/SKILL.md b/assets/codex-skills/frontier-loop/SKILL.md
new file mode 100644
index 0000000..b3ea44d
--- /dev/null
+++ b/assets/codex-skills/frontier-loop/SKILL.md
@@ -0,0 +1,129 @@
+---
+name: frontier-loop
+description: Run an indefinite autonomous experimental or optimization loop when progress can be measured. Use alongside `fidget-spinner` when the task is an open-ended frontier push with a real evaluation signal.
+---
+
+# Motivational Quote
+
+“The primary thing when you take a sword in your hands is your intention to cut
+the enemy, whatever the means. Whenever you parry, hit, spring, strike or touch
+the enemy's cutting sword, you must cut the enemy in the same movement. It is
+essential to attain this. If you think only of hitting, springing, striking or
+touching the enemy, you will not be able actually to cut him.”
+
+― Miyamoto Musashi
+
+# Summary
+
+Use this skill when the task is open-ended, but progress can still be measured
+credibly: a benchmark, a score, a win rate, a proof obligation, a test suite,
+or some other signal that distinguishes progress from churn.
+
+When using this skill, the `fidget-spinner` is the canonical recordkeeping surface.
+You MUST use that skill.
+
+DO NOT INVENT A PARALLEL LEDGER.
+DO NOT KEEP FRONTIER-RELEVANT STATE ONLY IN FREEFORM PROSE OR MEMORY.
+
+`frontier-loop` owns the control loop.
+`fidget-spinner` owns the ledger.
+
+Do not restate those rules ad hoc. Use the `fidget-spinner` surface.
+
+## Before Starting
+
+Do not begin the loop until these are reasonably clear:
+
+- the objective
+- the evaluation method
+- the current best known baseline
+
+Infer them from context when they are genuinely clear.
+If the evaluation signal is mushy, force it into focus before going rampant.
+
+Before the first serious iteration, read the frontier and recent evidence
+through `fidget-spinner`.
+
+On resume, after interruption, or after compaction, read them again.
+Do not trust memory when the DAG exists.
+
+## Loop
+
+LOOP UNTIL STOPPED BY THE USER OR BY A REAL EXTERNAL CONSTRAINT.
+
+DO NOT STOP BECAUSE YOU HAVE ONE PLAUSIBLE IDEA.
+DO NOT STOP BECAUSE YOU HAVE A CLEAN INTERMEDIATE RESULT.
+DO NOT STOP TO GIVE A PROGRESS REPORT.
+DO NOT EMIT A FINAL TURN UNLESS YOU ARE ACTUALLY BLOCKED OR EXPLICITLY TOLD TO STOP.
+
+ASSUME YOU ARE RUNNING OVERNIGHT.
+
+1. Start from the current best checkpoint or most credible live branch.
+2. Study existing evidence from `fidget-spinner`.
+3. Search outward if the local frontier looks exhausted or you are starting to take unambitious strides.
+4. Form a strong, falsifiable hypothesis.
+5. Make the change.
+6. Measure it.
+7. If the result is surprising, noisy, or broken, debug the implementation and
+ rerun only enough to understand the outcome.
+8. Record the outcome through `fidget-spinner`.
+9. Keep the line if it advances the objective or opens a genuinely strong new avenue.
+10. If the line is dead, record that too, re-anchor to the best known checkpoint,
+ and try a different attack.
+11. Repeat.
+
+I REPEAT: DO NOT STOP.
+
+## Research Posture
+
+Keep the search broad and continuous.
+
+Do not confine yourself to the obvious local neighborhood if better ideas may
+exist elsewhere. Use documentation, papers, source code, issue trackers,
+benchmarks, adjacent fields, and competing implementations whenever they can
+improve the next experiment.
+
+If progress stalls, widen the gyre instead of polishing the same weak idea to
+death.
+
+Prefer bold, testable moves over cosmetic churn.
+Do not overinvest in a line of attack that has ceased to earn belief.
+
+You are a researcher and explorer. Think big.
+
+Do not get stuck doing small tweaks.
+
+## Experiment Discipline
+
+Every real experiment must leave an auditable record in `fidget-spinner`.
+
+If something matters to the frontier, put it in the DAG.
+
+Use off-path records liberally for enabling work, side investigations, and dead
+ends.
+
+When a line becomes a real measured experiment, close it through the proper
+`fidget-spinner` path instead of improvising a chain of half-recorded steps.
+
+## Resume Discipline
+
+On interruption, restart, or strange MCP behavior:
+
+- check health through `fidget-spinner`
+- reread frontier state
+- reread the most recent relevant nodes
+- then continue the loop
+
+On compaction, reread the DAG record before choosing the next move.
+
+## And Remember
+
+Cut the enemy. Think, what is the shortest path from A to B, truly?
+
+Seize it.
+
+The end is all-consuming.
+
+Cut! Leap! Seize!
+
+DO NOT STOP!
diff --git a/check.py b/check.py
new file mode 100644
index 0000000..23b558e
--- /dev/null
+++ b/check.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+from __future__ import annotations
+
+import argparse
+import subprocess
+import tomllib
+from pathlib import Path
+
+
+ROOT = Path(__file__).resolve().parent
+WORKSPACE_MANIFEST = ROOT / "Cargo.toml"
+
+
+def load_commands() -> dict[str, list[str]]:
+ workspace = tomllib.loads(WORKSPACE_MANIFEST.read_text(encoding="utf-8"))
+ metadata = workspace["workspace"]["metadata"]["rust-starter"]
+ commands: dict[str, list[str]] = {}
+ for key in (
+ "format_command",
+ "clippy_command",
+ "test_command",
+ "doc_command",
+ "fix_command",
+ ):
+ value = metadata.get(key)
+ if isinstance(value, list) and value and all(
+ isinstance(part, str) for part in value
+ ):
+ commands[key] = value
+ return commands
+
+
+def run(name: str, argv: list[str]) -> None:
+ print(f"[check] {name}: {' '.join(argv)}", flush=True)
+ proc = subprocess.run(argv, cwd=ROOT, check=False)
+ if proc.returncode != 0:
+ raise SystemExit(proc.returncode)
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="Thin Rust starter check runner")
+ parser.add_argument(
+ "mode",
+ nargs="?",
+ choices=("check", "deep", "fix"),
+ default="check",
+ help="Run the fast gate, include docs for the deep gate, or run the fix command.",
+ )
+ return parser.parse_args()
+
+
+def main() -> None:
+ commands = load_commands()
+ args = parse_args()
+
+ if args.mode == "fix":
+ run("fix", commands["fix_command"])
+ return
+
+ run("fmt", commands["format_command"])
+ run("clippy", commands["clippy_command"])
+ run("test", commands["test_command"])
+
+ if args.mode == "deep" and "doc_command" in commands:
+ run("doc", commands["doc_command"])
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except KeyboardInterrupt:
+ raise SystemExit(130)
+
diff --git a/crates/fidget-spinner-cli/Cargo.toml b/crates/fidget-spinner-cli/Cargo.toml
new file mode 100644
index 0000000..4ca70e9
--- /dev/null
+++ b/crates/fidget-spinner-cli/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "fidget-spinner-cli"
+description = "Thin local entrypoint for Fidget Spinner"
+edition.workspace = true
+license.workspace = true
+publish = false
+rust-version.workspace = true
+version.workspace = true
+
+[dependencies]
+camino.workspace = true
+clap.workspace = true
+dirs.workspace = true
+fidget-spinner-core = { path = "../fidget-spinner-core" }
+fidget-spinner-store-sqlite = { path = "../fidget-spinner-store-sqlite" }
+serde.workspace = true
+serde_json.workspace = true
+time.workspace = true
+uuid.workspace = true
+
+[lints]
+workspace = true
diff --git a/crates/fidget-spinner-cli/src/bundled_skill.rs b/crates/fidget-spinner-cli/src/bundled_skill.rs
new file mode 100644
index 0000000..85760bc
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/bundled_skill.rs
@@ -0,0 +1,69 @@
+use serde::Serialize;
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub(crate) struct BundledSkill {
+ pub name: &'static str,
+ pub description: &'static str,
+ pub resource_uri: &'static str,
+ pub body: &'static str,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize)]
+pub(crate) struct BundledSkillSummary {
+ pub name: &'static str,
+ pub description: &'static str,
+ pub resource_uri: &'static str,
+}
+
+impl BundledSkill {
+ #[must_use]
+ pub const fn summary(self) -> BundledSkillSummary {
+ BundledSkillSummary {
+ name: self.name,
+ description: self.description,
+ resource_uri: self.resource_uri,
+ }
+ }
+}
+
+const BUNDLED_SKILLS: [BundledSkill; 2] = [
+ BundledSkill {
+ name: "fidget-spinner",
+ description: "Base skill for working inside a Fidget Spinner project through the local DAG and MCP surface.",
+ resource_uri: "fidget-spinner://skill/fidget-spinner",
+ body: include_str!("../../../assets/codex-skills/fidget-spinner/SKILL.md"),
+ },
+ BundledSkill {
+ name: "frontier-loop",
+ description: "Aggressive autonomous frontier-push specialization for Fidget Spinner.",
+ resource_uri: "fidget-spinner://skill/frontier-loop",
+ body: include_str!("../../../assets/codex-skills/frontier-loop/SKILL.md"),
+ },
+];
+
+#[must_use]
+pub(crate) const fn default_bundled_skill() -> BundledSkill {
+ BUNDLED_SKILLS[0]
+}
+
+#[must_use]
+pub(crate) const fn frontier_loop_bundled_skill() -> BundledSkill {
+ BUNDLED_SKILLS[1]
+}
+
+#[must_use]
+pub(crate) fn bundled_skill(name: &str) -> Option<BundledSkill> {
+ BUNDLED_SKILLS
+ .iter()
+ .copied()
+ .find(|skill| skill.name == name)
+}
+
+#[must_use]
+pub(crate) fn bundled_skill_summaries() -> Vec<BundledSkillSummary> {
+ BUNDLED_SKILLS
+ .iter()
+ .copied()
+ .map(BundledSkill::summary)
+ .collect()
+}
diff --git a/crates/fidget-spinner-cli/src/main.rs b/crates/fidget-spinner-cli/src/main.rs
new file mode 100644
index 0000000..9b2b8ae
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/main.rs
@@ -0,0 +1,958 @@
+mod bundled_skill;
+mod mcp;
+
+use std::collections::{BTreeMap, BTreeSet};
+use std::fs;
+use std::path::{Path, PathBuf};
+
+use camino::{Utf8Path, Utf8PathBuf};
+use clap::{Args, Parser, Subcommand, ValueEnum};
+use fidget_spinner_core::{
+ AnnotationVisibility, CodeSnapshotRef, CommandRecipe, ExecutionBackend, FrontierContract,
+ FrontierNote, FrontierVerdict, GitCommitHash, MetricObservation, MetricSpec, MetricUnit,
+ NodeAnnotation, NodeClass, NodePayload, NonEmptyText, OptimizationObjective,
+};
+use fidget_spinner_store_sqlite::{
+ CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, EdgeAttachment,
+ EdgeAttachmentDirection, ListNodesQuery, ProjectStore, StoreError,
+};
+use serde::Serialize;
+use serde_json::{Map, Value, json};
+use uuid::Uuid;
+
+#[derive(Parser)]
+#[command(author, version, about = "Fidget Spinner local project CLI")]
+struct Cli {
+ #[command(subcommand)]
+ command: Command,
+}
+
+#[derive(Subcommand)]
+enum Command {
+ Init(InitArgs),
+ Schema {
+ #[command(subcommand)]
+ command: SchemaCommand,
+ },
+ Frontier {
+ #[command(subcommand)]
+ command: FrontierCommand,
+ },
+ Node {
+ #[command(subcommand)]
+ command: NodeCommand,
+ },
+ Note(NoteCommand),
+ Research(ResearchCommand),
+ Experiment {
+ #[command(subcommand)]
+ command: ExperimentCommand,
+ },
+ Mcp {
+ #[command(subcommand)]
+ command: McpCommand,
+ },
+ Skill {
+ #[command(subcommand)]
+ command: SkillCommand,
+ },
+}
+
+#[derive(Args)]
+struct InitArgs {
+ #[arg(long, default_value = ".")]
+ project: PathBuf,
+ #[arg(long)]
+ name: Option<String>,
+ #[arg(long, default_value = "local.project")]
+ namespace: String,
+}
+
+#[derive(Subcommand)]
+enum SchemaCommand {
+ Show(ProjectArg),
+}
+
+#[derive(Subcommand)]
+enum FrontierCommand {
+ Init(FrontierInitArgs),
+ Status(FrontierStatusArgs),
+}
+
+#[derive(Args)]
+struct FrontierInitArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ label: String,
+ #[arg(long)]
+ objective: String,
+ #[arg(long, default_value = "frontier contract")]
+ contract_title: String,
+ #[arg(long)]
+ contract_summary: Option<String>,
+ #[arg(long = "benchmark-suite")]
+ benchmark_suites: Vec<String>,
+ #[arg(long = "promotion-criterion")]
+ promotion_criteria: Vec<String>,
+ #[arg(long = "primary-metric-key")]
+ primary_metric_key: String,
+ #[arg(long = "primary-metric-unit", value_enum)]
+ primary_metric_unit: CliMetricUnit,
+ #[arg(long = "primary-metric-objective", value_enum)]
+ primary_metric_objective: CliOptimizationObjective,
+ #[arg(long = "seed-summary", default_value = "initial champion checkpoint")]
+ seed_summary: String,
+}
+
+#[derive(Args)]
+struct FrontierStatusArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+}
+
+#[derive(Subcommand)]
+enum NodeCommand {
+ Add(NodeAddArgs),
+ List(NodeListArgs),
+ Show(NodeShowArgs),
+ Annotate(NodeAnnotateArgs),
+ Archive(NodeArchiveArgs),
+}
+
+#[derive(Args)]
+struct NodeAddArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long, value_enum)]
+ class: CliNodeClass,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long)]
+ title: String,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long = "payload-json")]
+ payload_json: Option<String>,
+ #[arg(long = "payload-file")]
+ payload_file: Option<PathBuf>,
+ #[arg(long = "field")]
+ fields: Vec<String>,
+ #[arg(long = "annotation")]
+ annotations: Vec<String>,
+ #[arg(long = "parent")]
+ parents: Vec<String>,
+}
+
+#[derive(Args)]
+struct NodeListArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long, value_enum)]
+ class: Option<CliNodeClass>,
+ #[arg(long)]
+ include_archived: bool,
+ #[arg(long, default_value_t = 20)]
+ limit: u32,
+}
+
+#[derive(Args)]
+struct NodeShowArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ node: String,
+}
+
+#[derive(Args)]
+struct NodeAnnotateArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ node: String,
+ #[arg(long)]
+ body: String,
+ #[arg(long)]
+ label: Option<String>,
+ #[arg(long)]
+ visible: bool,
+}
+
+#[derive(Args)]
+struct NodeArchiveArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ node: String,
+}
+
+#[derive(Args)]
+struct NoteCommand {
+ #[command(subcommand)]
+ command: NoteSubcommand,
+}
+
+#[derive(Subcommand)]
+enum NoteSubcommand {
+ Quick(QuickNoteArgs),
+}
+
+#[derive(Args)]
+struct ResearchCommand {
+ #[command(subcommand)]
+ command: ResearchSubcommand,
+}
+
+#[derive(Subcommand)]
+enum ResearchSubcommand {
+ Add(QuickResearchArgs),
+}
+
+#[derive(Args)]
+struct QuickNoteArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long)]
+ title: String,
+ #[arg(long)]
+ body: String,
+ #[arg(long = "parent")]
+ parents: Vec<String>,
+}
+
+#[derive(Args)]
+struct QuickResearchArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: Option<String>,
+ #[arg(long)]
+ title: String,
+ #[arg(long)]
+ body: String,
+ #[arg(long)]
+ summary: Option<String>,
+ #[arg(long = "parent")]
+ parents: Vec<String>,
+}
+
+#[derive(Subcommand)]
+enum ExperimentCommand {
+ Close(ExperimentCloseArgs),
+}
+
+#[derive(Subcommand)]
+enum McpCommand {
+ Serve(McpServeArgs),
+ #[command(hide = true)]
+ Worker(McpWorkerArgs),
+}
+
+#[derive(Args)]
+struct ExperimentCloseArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ frontier: String,
+ #[arg(long = "base-checkpoint")]
+ base_checkpoint: String,
+ #[arg(long = "change-node")]
+ change_node: String,
+ #[arg(long = "candidate-summary")]
+ candidate_summary: String,
+ #[arg(long = "run-title")]
+ run_title: String,
+ #[arg(long = "run-summary")]
+ run_summary: Option<String>,
+ #[arg(long = "benchmark-suite")]
+ benchmark_suite: String,
+ #[arg(long = "backend", value_enum, default_value_t = CliExecutionBackend::Worktree)]
+ backend: CliExecutionBackend,
+ #[arg(long = "cwd")]
+ working_directory: Option<PathBuf>,
+ #[arg(long = "argv")]
+ argv: Vec<String>,
+ #[arg(long = "env")]
+ env: Vec<String>,
+ #[arg(long = "primary-metric-key")]
+ primary_metric_key: String,
+ #[arg(long = "primary-metric-unit", value_enum)]
+ primary_metric_unit: CliMetricUnit,
+ #[arg(long = "primary-metric-objective", value_enum)]
+ primary_metric_objective: CliOptimizationObjective,
+ #[arg(long = "primary-metric-value")]
+ primary_metric_value: f64,
+ #[arg(long = "metric")]
+ metrics: Vec<String>,
+ #[arg(long)]
+ note: String,
+ #[arg(long = "next-hypothesis")]
+ next_hypotheses: Vec<String>,
+ #[arg(long = "verdict", value_enum)]
+ verdict: CliFrontierVerdict,
+ #[arg(long = "decision-title")]
+ decision_title: String,
+ #[arg(long = "decision-rationale")]
+ decision_rationale: String,
+}
+
+#[derive(Subcommand)]
+enum SkillCommand {
+ List,
+ Install(SkillInstallArgs),
+ Show(SkillShowArgs),
+}
+
+#[derive(Args)]
+struct SkillInstallArgs {
+ #[arg(long)]
+ name: Option<String>,
+ #[arg(long)]
+ destination: Option<PathBuf>,
+}
+
+#[derive(Args)]
+struct SkillShowArgs {
+ #[arg(long)]
+ name: Option<String>,
+}
+
+#[derive(Args)]
+struct ProjectArg {
+ #[arg(long, default_value = ".")]
+ project: PathBuf,
+}
+
+#[derive(Args)]
+struct McpServeArgs {
+ #[arg(long)]
+ project: Option<PathBuf>,
+}
+
+#[derive(Args)]
+struct McpWorkerArgs {
+ #[arg(long)]
+ project: PathBuf,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliNodeClass {
+ Contract,
+ Change,
+ Run,
+ Analysis,
+ Decision,
+ Research,
+ Enabling,
+ Note,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliMetricUnit {
+ Seconds,
+ Bytes,
+ Count,
+ Ratio,
+ Custom,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliOptimizationObjective {
+ Minimize,
+ Maximize,
+ Target,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliExecutionBackend {
+ Local,
+ Worktree,
+ Ssh,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliFrontierVerdict {
+ PromoteToChampion,
+ KeepOnFrontier,
+ RevertToChampion,
+ ArchiveDeadEnd,
+ NeedsMoreEvidence,
+}
+
+fn main() {
+ if let Err(error) = run() {
+ eprintln!("error: {error}");
+ std::process::exit(1);
+ }
+}
+
+fn run() -> Result<(), StoreError> {
+ let cli = Cli::parse();
+ match cli.command {
+ Command::Init(args) => run_init(args),
+ Command::Schema { command } => match command {
+ SchemaCommand::Show(project) => {
+ let store = open_store(&project.project)?;
+ print_json(store.schema())
+ }
+ },
+ Command::Frontier { command } => match command {
+ FrontierCommand::Init(args) => run_frontier_init(args),
+ FrontierCommand::Status(args) => run_frontier_status(args),
+ },
+ Command::Node { command } => match command {
+ NodeCommand::Add(args) => run_node_add(args),
+ NodeCommand::List(args) => run_node_list(args),
+ NodeCommand::Show(args) => run_node_show(args),
+ NodeCommand::Annotate(args) => run_node_annotate(args),
+ NodeCommand::Archive(args) => run_node_archive(args),
+ },
+ Command::Note(command) => match command.command {
+ NoteSubcommand::Quick(args) => run_quick_note(args),
+ },
+ Command::Research(command) => match command.command {
+ ResearchSubcommand::Add(args) => run_quick_research(args),
+ },
+ Command::Experiment { command } => match command {
+ ExperimentCommand::Close(args) => run_experiment_close(args),
+ },
+ Command::Mcp { command } => match command {
+ McpCommand::Serve(args) => mcp::serve(args.project),
+ McpCommand::Worker(args) => mcp::serve_worker(args.project),
+ },
+ Command::Skill { command } => match command {
+ SkillCommand::List => print_json(&bundled_skill::bundled_skill_summaries()),
+ SkillCommand::Install(args) => run_skill_install(args),
+ SkillCommand::Show(args) => {
+ println!("{}", resolve_bundled_skill(args.name.as_deref())?.body);
+ Ok(())
+ }
+ },
+ }
+}
+
+fn run_init(args: InitArgs) -> Result<(), StoreError> {
+ let project_root = utf8_path(args.project);
+ let display_name = NonEmptyText::new(args.name.unwrap_or_else(|| {
+ project_root
+ .file_name()
+ .map_or_else(|| "fidget-spinner-project".to_owned(), ToOwned::to_owned)
+ }))?;
+ let namespace = NonEmptyText::new(args.namespace)?;
+ let store = ProjectStore::init(&project_root, display_name, namespace)?;
+ println!("initialized {}", store.state_root());
+ println!("project: {}", store.config().display_name);
+ println!("schema: {}", store.state_root().join("schema.json"));
+ Ok(())
+}
+
+fn run_frontier_init(args: FrontierInitArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let initial_checkpoint =
+ store.auto_capture_checkpoint(NonEmptyText::new(args.seed_summary)?)?;
+ let projection = store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new(args.label)?,
+ contract_title: NonEmptyText::new(args.contract_title)?,
+ contract_summary: args.contract_summary.map(NonEmptyText::new).transpose()?,
+ contract: FrontierContract {
+ objective: NonEmptyText::new(args.objective)?,
+ evaluation: fidget_spinner_core::EvaluationProtocol {
+ benchmark_suites: to_text_set(args.benchmark_suites)?,
+ primary_metric: MetricSpec {
+ metric_key: NonEmptyText::new(args.primary_metric_key)?,
+ unit: args.primary_metric_unit.into(),
+ objective: args.primary_metric_objective.into(),
+ },
+ supporting_metrics: BTreeSet::new(),
+ },
+ promotion_criteria: to_text_vec(args.promotion_criteria)?,
+ },
+ initial_checkpoint,
+ })?;
+ print_json(&projection)
+}
+
+fn run_frontier_status(args: FrontierStatusArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ if let Some(frontier) = args.frontier {
+ let projection = store.frontier_projection(parse_frontier_id(&frontier)?)?;
+ return print_json(&projection);
+ }
+ let frontiers = store.list_frontiers()?;
+ if frontiers.len() == 1 {
+ return print_json(&store.frontier_projection(frontiers[0].id)?);
+ }
+ print_json(&frontiers)
+}
+
+fn run_node_add(args: NodeAddArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let frontier_id = args
+ .frontier
+ .as_deref()
+ .map(parse_frontier_id)
+ .transpose()?;
+ let payload = load_payload(
+ store.schema().schema_ref(),
+ args.payload_json,
+ args.payload_file,
+ args.fields,
+ )?;
+ let annotations = args
+ .annotations
+ .into_iter()
+ .map(|body| Ok(NodeAnnotation::hidden(NonEmptyText::new(body)?)))
+ .collect::<Result<Vec<_>, StoreError>>()?;
+ let node = store.add_node(CreateNodeRequest {
+ class: args.class.into(),
+ frontier_id,
+ title: NonEmptyText::new(args.title)?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ payload,
+ annotations,
+ attachments: lineage_attachments(args.parents)?,
+ })?;
+ print_json(&node)
+}
+
+fn run_node_list(args: NodeListArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ let items = store.list_nodes(ListNodesQuery {
+ frontier_id: args
+ .frontier
+ .as_deref()
+ .map(parse_frontier_id)
+ .transpose()?,
+ class: args.class.map(Into::into),
+ include_archived: args.include_archived,
+ limit: args.limit,
+ })?;
+ print_json(&items)
+}
+
+fn run_node_show(args: NodeShowArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ let node_id = parse_node_id(&args.node)?;
+ let node = store
+ .get_node(node_id)?
+ .ok_or(StoreError::NodeNotFound(node_id))?;
+ print_json(&node)
+}
+
+fn run_node_annotate(args: NodeAnnotateArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let annotation = NodeAnnotation {
+ id: fidget_spinner_core::AnnotationId::fresh(),
+ visibility: if args.visible {
+ AnnotationVisibility::Visible
+ } else {
+ AnnotationVisibility::HiddenByDefault
+ },
+ label: args.label.map(NonEmptyText::new).transpose()?,
+ body: NonEmptyText::new(args.body)?,
+ created_at: time::OffsetDateTime::now_utc(),
+ };
+ store.annotate_node(parse_node_id(&args.node)?, annotation)?;
+ println!("annotated {}", args.node);
+ Ok(())
+}
+
+fn run_node_archive(args: NodeArchiveArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ store.archive_node(parse_node_id(&args.node)?)?;
+ println!("archived {}", args.node);
+ Ok(())
+}
+
+fn run_quick_note(args: QuickNoteArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let payload = NodePayload::with_schema(
+ store.schema().schema_ref(),
+ json_object(json!({ "body": args.body }))?,
+ );
+ let node = store.add_node(CreateNodeRequest {
+ class: NodeClass::Note,
+ frontier_id: args
+ .frontier
+ .as_deref()
+ .map(parse_frontier_id)
+ .transpose()?,
+ title: NonEmptyText::new(args.title)?,
+ summary: None,
+ payload,
+ annotations: Vec::new(),
+ attachments: lineage_attachments(args.parents)?,
+ })?;
+ print_json(&node)
+}
+
+fn run_quick_research(args: QuickResearchArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let payload = NodePayload::with_schema(
+ store.schema().schema_ref(),
+ json_object(json!({ "body": args.body }))?,
+ );
+ let node = store.add_node(CreateNodeRequest {
+ class: NodeClass::Research,
+ frontier_id: args
+ .frontier
+ .as_deref()
+ .map(parse_frontier_id)
+ .transpose()?,
+ title: NonEmptyText::new(args.title)?,
+ summary: args.summary.map(NonEmptyText::new).transpose()?,
+ payload,
+ annotations: Vec::new(),
+ attachments: lineage_attachments(args.parents)?,
+ })?;
+ print_json(&node)
+}
+
+fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let frontier_id = parse_frontier_id(&args.frontier)?;
+ let snapshot = store
+ .auto_capture_checkpoint(NonEmptyText::new(args.candidate_summary.clone())?)?
+ .map(|seed| seed.snapshot)
+ .ok_or(StoreError::GitInspectionFailed(
+ store.project_root().to_path_buf(),
+ ))?;
+ let command = CommandRecipe::new(
+ args.working_directory
+ .map(utf8_path)
+ .unwrap_or_else(|| store.project_root().to_path_buf()),
+ to_text_vec(args.argv)?,
+ parse_env(args.env),
+ )?;
+ let receipt = store.close_experiment(CloseExperimentRequest {
+ frontier_id,
+ base_checkpoint_id: parse_checkpoint_id(&args.base_checkpoint)?,
+ change_node_id: parse_node_id(&args.change_node)?,
+ candidate_summary: NonEmptyText::new(args.candidate_summary)?,
+ candidate_snapshot: snapshot,
+ run_title: NonEmptyText::new(args.run_title)?,
+ run_summary: args.run_summary.map(NonEmptyText::new).transpose()?,
+ backend: args.backend.into(),
+ benchmark_suite: NonEmptyText::new(args.benchmark_suite)?,
+ command,
+ code_snapshot: Some(capture_code_snapshot(store.project_root())?),
+ primary_metric: MetricObservation {
+ metric_key: NonEmptyText::new(args.primary_metric_key)?,
+ unit: args.primary_metric_unit.into(),
+ objective: args.primary_metric_objective.into(),
+ value: args.primary_metric_value,
+ },
+ supporting_metrics: args
+ .metrics
+ .into_iter()
+ .map(parse_metric_observation)
+ .collect::<Result<Vec<_>, _>>()?,
+ note: FrontierNote {
+ summary: NonEmptyText::new(args.note)?,
+ next_hypotheses: to_text_vec(args.next_hypotheses)?,
+ },
+ verdict: args.verdict.into(),
+ decision_title: NonEmptyText::new(args.decision_title)?,
+ decision_rationale: NonEmptyText::new(args.decision_rationale)?,
+ analysis_node_id: None,
+ })?;
+ print_json(&receipt)
+}
+
+fn run_skill_install(args: SkillInstallArgs) -> Result<(), StoreError> {
+ if let Some(name) = args.name.as_deref() {
+ let skill = resolve_bundled_skill(Some(name))?;
+ let destination = args
+ .destination
+ .unwrap_or(default_skill_root()?.join(skill.name));
+ install_skill(skill, &destination)?;
+ println!("{}", destination.display());
+ } else {
+ let destination_root = args.destination.unwrap_or(default_skill_root()?);
+ for skill in bundled_skill::bundled_skill_summaries() {
+ let destination = destination_root.join(skill.name);
+ install_skill(resolve_bundled_skill(Some(skill.name))?, &destination)?;
+ println!("{}", destination.display());
+ }
+ }
+ Ok(())
+}
+
+fn resolve_bundled_skill(
+ requested_name: Option<&str>,
+) -> Result<bundled_skill::BundledSkill, StoreError> {
+ requested_name.map_or_else(
+ || Ok(bundled_skill::default_bundled_skill()),
+ |name| {
+ bundled_skill::bundled_skill(name)
+ .ok_or_else(|| invalid_input(format!("unknown bundled skill `{name}`")))
+ },
+ )
+}
+
+fn default_skill_root() -> Result<PathBuf, StoreError> {
+ dirs::home_dir()
+ .map(|home| home.join(".codex/skills"))
+ .ok_or_else(|| invalid_input("home directory not found"))
+}
+
+fn install_skill(skill: bundled_skill::BundledSkill, destination: &Path) -> Result<(), StoreError> {
+ fs::create_dir_all(destination)?;
+ fs::write(destination.join("SKILL.md"), skill.body)?;
+ Ok(())
+}
+
+fn open_store(path: &Path) -> Result<ProjectStore, StoreError> {
+ ProjectStore::open(utf8_path(path.to_path_buf()))
+}
+
+fn utf8_path(path: impl Into<PathBuf>) -> Utf8PathBuf {
+ Utf8PathBuf::from(path.into().to_string_lossy().into_owned())
+}
+
+fn to_text_vec(values: Vec<String>) -> Result<Vec<NonEmptyText>, StoreError> {
+ values
+ .into_iter()
+ .map(NonEmptyText::new)
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(StoreError::from)
+}
+
+fn to_text_set(values: Vec<String>) -> Result<BTreeSet<NonEmptyText>, StoreError> {
+ to_text_vec(values).map(BTreeSet::from_iter)
+}
+
+fn parse_env(values: Vec<String>) -> BTreeMap<String, String> {
+ values
+ .into_iter()
+ .filter_map(|entry| {
+ let (key, value) = entry.split_once('=')?;
+ Some((key.to_owned(), value.to_owned()))
+ })
+ .collect()
+}
+
+fn lineage_attachments(parents: Vec<String>) -> Result<Vec<EdgeAttachment>, StoreError> {
+ parents
+ .into_iter()
+ .map(|parent| {
+ Ok(EdgeAttachment {
+ node_id: parse_node_id(&parent)?,
+ kind: fidget_spinner_core::EdgeKind::Lineage,
+ direction: EdgeAttachmentDirection::ExistingToNew,
+ })
+ })
+ .collect()
+}
+
+fn load_payload(
+ schema: fidget_spinner_core::PayloadSchemaRef,
+ payload_json: Option<String>,
+ payload_file: Option<PathBuf>,
+ fields: Vec<String>,
+) -> Result<NodePayload, StoreError> {
+ let mut map = Map::new();
+ if let Some(text) = payload_json {
+ map.extend(json_object(serde_json::from_str::<Value>(&text)?)?);
+ }
+ if let Some(path) = payload_file {
+ let text = fs::read_to_string(path)?;
+ map.extend(json_object(serde_json::from_str::<Value>(&text)?)?);
+ }
+ for field in fields {
+ let Some((key, raw_value)) = field.split_once('=') else {
+ continue;
+ };
+ let value = serde_json::from_str::<Value>(raw_value).unwrap_or_else(|_| json!(raw_value));
+ let _ = map.insert(key.to_owned(), value);
+ }
+ Ok(NodePayload::with_schema(schema, map))
+}
+
+fn json_object(value: Value) -> Result<Map<String, Value>, StoreError> {
+ match value {
+ Value::Object(map) => Ok(map),
+ other => Err(invalid_input(format!(
+ "expected JSON object, got {other:?}"
+ ))),
+ }
+}
+
+fn capture_code_snapshot(project_root: &Utf8Path) -> Result<CodeSnapshotRef, StoreError> {
+ let head_commit = run_git(project_root, &["rev-parse", "HEAD"])?;
+ let dirty_paths = run_git(project_root, &["status", "--porcelain"])?
+ .map(|status| {
+ status
+ .lines()
+ .filter_map(|line| line.get(3..).map(str::trim))
+ .filter(|line| !line.is_empty())
+ .map(Utf8PathBuf::from)
+ .collect::<BTreeSet<_>>()
+ })
+ .unwrap_or_default();
+ Ok(CodeSnapshotRef {
+ repo_root: run_git(project_root, &["rev-parse", "--show-toplevel"])?
+ .map(Utf8PathBuf::from)
+ .unwrap_or_else(|| project_root.to_path_buf()),
+ worktree_root: project_root.to_path_buf(),
+ worktree_name: run_git(project_root, &["rev-parse", "--abbrev-ref", "HEAD"])?
+ .map(NonEmptyText::new)
+ .transpose()?,
+ head_commit: head_commit.map(GitCommitHash::new).transpose()?,
+ dirty_paths,
+ })
+}
+
+fn run_git(project_root: &Utf8Path, args: &[&str]) -> Result<Option<String>, StoreError> {
+ let output = std::process::Command::new("git")
+ .arg("-C")
+ .arg(project_root.as_str())
+ .args(args)
+ .output()?;
+ if !output.status.success() {
+ return Ok(None);
+ }
+ let text = String::from_utf8_lossy(&output.stdout).trim().to_owned();
+ if text.is_empty() {
+ return Ok(None);
+ }
+ Ok(Some(text))
+}
+
+fn parse_metric_observation(raw: String) -> Result<MetricObservation, StoreError> {
+ let parts = raw.split(':').collect::<Vec<_>>();
+ if parts.len() != 4 {
+ return Err(invalid_input(
+ "metrics must look like key:unit:objective:value",
+ ));
+ }
+ Ok(MetricObservation {
+ metric_key: NonEmptyText::new(parts[0])?,
+ unit: parse_metric_unit(parts[1])?,
+ objective: parse_optimization_objective(parts[2])?,
+ value: parts[3]
+ .parse::<f64>()
+ .map_err(|error| invalid_input(format!("invalid metric value: {error}")))?,
+ })
+}
+
+fn parse_metric_unit(raw: &str) -> Result<MetricUnit, StoreError> {
+ match raw {
+ "seconds" => Ok(MetricUnit::Seconds),
+ "bytes" => Ok(MetricUnit::Bytes),
+ "count" => Ok(MetricUnit::Count),
+ "ratio" => Ok(MetricUnit::Ratio),
+ "custom" => Ok(MetricUnit::Custom),
+ other => Err(invalid_input(format!("unknown metric unit `{other}`"))),
+ }
+}
+
+fn parse_optimization_objective(raw: &str) -> Result<OptimizationObjective, StoreError> {
+ match raw {
+ "minimize" => Ok(OptimizationObjective::Minimize),
+ "maximize" => Ok(OptimizationObjective::Maximize),
+ "target" => Ok(OptimizationObjective::Target),
+ other => Err(invalid_input(format!(
+ "unknown optimization objective `{other}`"
+ ))),
+ }
+}
+
+fn parse_node_id(raw: &str) -> Result<fidget_spinner_core::NodeId, StoreError> {
+ Ok(fidget_spinner_core::NodeId::from_uuid(Uuid::parse_str(
+ raw,
+ )?))
+}
+
+fn parse_frontier_id(raw: &str) -> Result<fidget_spinner_core::FrontierId, StoreError> {
+ Ok(fidget_spinner_core::FrontierId::from_uuid(Uuid::parse_str(
+ raw,
+ )?))
+}
+
+fn parse_checkpoint_id(raw: &str) -> Result<fidget_spinner_core::CheckpointId, StoreError> {
+ Ok(fidget_spinner_core::CheckpointId::from_uuid(
+ Uuid::parse_str(raw)?,
+ ))
+}
+
+fn print_json<T: Serialize>(value: &T) -> Result<(), StoreError> {
+ println!("{}", to_pretty_json(value)?);
+ Ok(())
+}
+
+fn to_pretty_json<T: Serialize>(value: &T) -> Result<String, StoreError> {
+ serde_json::to_string_pretty(value).map_err(StoreError::from)
+}
+
+fn invalid_input(message: impl Into<String>) -> StoreError {
+ StoreError::Json(serde_json::Error::io(std::io::Error::new(
+ std::io::ErrorKind::InvalidInput,
+ message.into(),
+ )))
+}
+
+impl From<CliNodeClass> for NodeClass {
+ fn from(value: CliNodeClass) -> Self {
+ match value {
+ CliNodeClass::Contract => Self::Contract,
+ CliNodeClass::Change => Self::Change,
+ CliNodeClass::Run => Self::Run,
+ CliNodeClass::Analysis => Self::Analysis,
+ CliNodeClass::Decision => Self::Decision,
+ CliNodeClass::Research => Self::Research,
+ CliNodeClass::Enabling => Self::Enabling,
+ CliNodeClass::Note => Self::Note,
+ }
+ }
+}
+
+impl From<CliMetricUnit> for MetricUnit {
+ fn from(value: CliMetricUnit) -> Self {
+ match value {
+ CliMetricUnit::Seconds => Self::Seconds,
+ CliMetricUnit::Bytes => Self::Bytes,
+ CliMetricUnit::Count => Self::Count,
+ CliMetricUnit::Ratio => Self::Ratio,
+ CliMetricUnit::Custom => Self::Custom,
+ }
+ }
+}
+
+impl From<CliOptimizationObjective> for OptimizationObjective {
+ fn from(value: CliOptimizationObjective) -> Self {
+ match value {
+ CliOptimizationObjective::Minimize => Self::Minimize,
+ CliOptimizationObjective::Maximize => Self::Maximize,
+ CliOptimizationObjective::Target => Self::Target,
+ }
+ }
+}
+
+impl From<CliExecutionBackend> for ExecutionBackend {
+ fn from(value: CliExecutionBackend) -> Self {
+ match value {
+ CliExecutionBackend::Local => Self::LocalProcess,
+ CliExecutionBackend::Worktree => Self::WorktreeProcess,
+ CliExecutionBackend::Ssh => Self::SshProcess,
+ }
+ }
+}
+
+impl From<CliFrontierVerdict> for FrontierVerdict {
+ fn from(value: CliFrontierVerdict) -> Self {
+ match value {
+ CliFrontierVerdict::PromoteToChampion => Self::PromoteToChampion,
+ CliFrontierVerdict::KeepOnFrontier => Self::KeepOnFrontier,
+ CliFrontierVerdict::RevertToChampion => Self::RevertToChampion,
+ CliFrontierVerdict::ArchiveDeadEnd => Self::ArchiveDeadEnd,
+ CliFrontierVerdict::NeedsMoreEvidence => Self::NeedsMoreEvidence,
+ }
+ }
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/catalog.rs b/crates/fidget-spinner-cli/src/mcp/catalog.rs
new file mode 100644
index 0000000..178b980
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/catalog.rs
@@ -0,0 +1,541 @@
+use serde_json::{Value, json};
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub(crate) enum DispatchTarget {
+ Host,
+ Worker,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub(crate) enum ReplayContract {
+ SafeReplay,
+ NeverReplay,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub(crate) struct ToolSpec {
+ pub name: &'static str,
+ pub description: &'static str,
+ pub dispatch: DispatchTarget,
+ pub replay: ReplayContract,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub(crate) struct ResourceSpec {
+ pub uri: &'static str,
+ pub dispatch: DispatchTarget,
+ pub replay: ReplayContract,
+}
+
+impl ToolSpec {
+ #[must_use]
+ pub fn annotation_json(self) -> Value {
+ json!({
+ "title": self.name,
+ "readOnlyHint": self.replay == ReplayContract::SafeReplay,
+ "destructiveHint": self.replay == ReplayContract::NeverReplay,
+ "fidgetSpinner": {
+ "dispatch": match self.dispatch {
+ DispatchTarget::Host => "host",
+ DispatchTarget::Worker => "worker",
+ },
+ "replayContract": match self.replay {
+ ReplayContract::SafeReplay => "safe_replay",
+ ReplayContract::NeverReplay => "never_replay",
+ },
+ }
+ })
+ }
+}
+
+#[must_use]
+pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> {
+ match name {
+ "project.bind" => Some(ToolSpec {
+ name: "project.bind",
+ description: "Bind this MCP session to a project root or nested path inside a project store.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "project.status" => Some(ToolSpec {
+ name: "project.status",
+ description: "Read local project status, store paths, and git availability for the currently bound project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "project.schema" => Some(ToolSpec {
+ name: "project.schema",
+ description: "Read the project-local payload schema and field validation tiers.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "frontier.list" => Some(ToolSpec {
+ name: "frontier.list",
+ description: "List frontiers for the current project.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "frontier.status" => Some(ToolSpec {
+ name: "frontier.status",
+ description: "Read one frontier projection, including champion and active candidates.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "frontier.init" => Some(ToolSpec {
+ name: "frontier.init",
+ description: "Create a new frontier rooted in a contract node. If the project is a git repo, the current HEAD becomes the initial champion when possible.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "node.create" => Some(ToolSpec {
+ name: "node.create",
+ description: "Create a generic DAG node with project payload fields and optional lineage parents.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "change.record" => Some(ToolSpec {
+ name: "change.record",
+ description: "Record a core-path change hypothesis with low ceremony.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "node.list" => Some(ToolSpec {
+ name: "node.list",
+ description: "List recent nodes. Archived nodes are hidden unless explicitly requested.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "node.read" => Some(ToolSpec {
+ name: "node.read",
+ description: "Read one node including payload, diagnostics, and hidden annotations.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "node.annotate" => Some(ToolSpec {
+ name: "node.annotate",
+ description: "Attach a free-form annotation to any node.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "node.archive" => Some(ToolSpec {
+ name: "node.archive",
+ description: "Archive a node so it falls out of default enumeration without being deleted.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "note.quick" => Some(ToolSpec {
+ name: "note.quick",
+ description: "Push a quick off-path note without bureaucratic experiment closure.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "research.record" => Some(ToolSpec {
+ name: "research.record",
+ description: "Record off-path research or enabling work that should live in the DAG but not on the bureaucratic core path.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "experiment.close" => Some(ToolSpec {
+ name: "experiment.close",
+ description: "Atomically close a core-path experiment with candidate checkpoint capture, measured result, note, and verdict.",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::NeverReplay,
+ }),
+ "skill.list" => Some(ToolSpec {
+ name: "skill.list",
+ description: "List bundled skills shipped with this package.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "skill.show" => Some(ToolSpec {
+ name: "skill.show",
+ description: "Return one bundled skill text shipped with this package. Defaults to `fidget-spinner` when name is omitted.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "system.health" => Some(ToolSpec {
+ name: "system.health",
+ description: "Read MCP host health, session binding, worker generation, rollout state, and the last fault.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "system.telemetry" => Some(ToolSpec {
+ name: "system.telemetry",
+ description: "Read aggregate request, retry, restart, and per-operation telemetry for this MCP session.",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::SafeReplay,
+ }),
+ _ => None,
+ }
+}
+
+#[must_use]
+pub(crate) fn resource_spec(uri: &str) -> Option<ResourceSpec> {
+ match uri {
+ "fidget-spinner://project/config" => Some(ResourceSpec {
+ uri: "fidget-spinner://project/config",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "fidget-spinner://project/schema" => Some(ResourceSpec {
+ uri: "fidget-spinner://project/schema",
+ dispatch: DispatchTarget::Worker,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "fidget-spinner://skill/fidget-spinner" => Some(ResourceSpec {
+ uri: "fidget-spinner://skill/fidget-spinner",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::SafeReplay,
+ }),
+ "fidget-spinner://skill/frontier-loop" => Some(ResourceSpec {
+ uri: "fidget-spinner://skill/frontier-loop",
+ dispatch: DispatchTarget::Host,
+ replay: ReplayContract::SafeReplay,
+ }),
+ _ => None,
+ }
+}
+
+#[must_use]
+pub(crate) fn tool_definitions() -> Vec<Value> {
+ [
+ "project.bind",
+ "project.status",
+ "project.schema",
+ "frontier.list",
+ "frontier.status",
+ "frontier.init",
+ "node.create",
+ "change.record",
+ "node.list",
+ "node.read",
+ "node.annotate",
+ "node.archive",
+ "note.quick",
+ "research.record",
+ "experiment.close",
+ "skill.list",
+ "skill.show",
+ "system.health",
+ "system.telemetry",
+ ]
+ .into_iter()
+ .filter_map(tool_spec)
+ .map(|spec| {
+ json!({
+ "name": spec.name,
+ "description": spec.description,
+ "inputSchema": input_schema(spec.name),
+ "annotations": spec.annotation_json(),
+ })
+ })
+ .collect()
+}
+
+#[must_use]
+pub(crate) fn list_resources() -> Vec<Value> {
+ vec![
+ json!({
+ "uri": "fidget-spinner://project/config",
+ "name": "project-config",
+ "description": "Project-local store configuration",
+ "mimeType": "application/json"
+ }),
+ json!({
+ "uri": "fidget-spinner://project/schema",
+ "name": "project-schema",
+ "description": "Project-local payload schema and validation tiers",
+ "mimeType": "application/json"
+ }),
+ json!({
+ "uri": "fidget-spinner://skill/fidget-spinner",
+ "name": "fidget-spinner-skill",
+ "description": "Bundled base Fidget Spinner skill text for this package",
+ "mimeType": "text/markdown"
+ }),
+ json!({
+ "uri": "fidget-spinner://skill/frontier-loop",
+ "name": "frontier-loop-skill",
+ "description": "Bundled frontier-loop specialization skill text for this package",
+ "mimeType": "text/markdown"
+ }),
+ ]
+}
+
+fn input_schema(name: &str) -> Value {
+ match name {
+ "project.status" | "project.schema" | "skill.list" | "system.health"
+ | "system.telemetry" => json!({"type":"object","additionalProperties":false}),
+ "project.bind" => json!({
+ "type": "object",
+ "properties": {
+ "path": { "type": "string", "description": "Project root or any nested path inside a project with .fidget_spinner state." }
+ },
+ "required": ["path"],
+ "additionalProperties": false
+ }),
+ "skill.show" => json!({
+ "type": "object",
+ "properties": {
+ "name": { "type": "string", "description": "Bundled skill name. Defaults to `fidget-spinner`." }
+ },
+ "additionalProperties": false
+ }),
+ "frontier.list" => json!({"type":"object","additionalProperties":false}),
+ "frontier.status" => json!({
+ "type": "object",
+ "properties": {
+ "frontier_id": { "type": "string", "description": "Frontier UUID" }
+ },
+ "required": ["frontier_id"],
+ "additionalProperties": false
+ }),
+ "frontier.init" => json!({
+ "type": "object",
+ "properties": {
+ "label": { "type": "string" },
+ "objective": { "type": "string" },
+ "contract_title": { "type": "string" },
+ "contract_summary": { "type": "string" },
+ "benchmark_suites": { "type": "array", "items": { "type": "string" } },
+ "promotion_criteria": { "type": "array", "items": { "type": "string" } },
+ "primary_metric": metric_spec_schema(),
+ "supporting_metrics": { "type": "array", "items": metric_spec_schema() },
+ "seed_summary": { "type": "string" }
+ },
+ "required": ["label", "objective", "contract_title", "benchmark_suites", "promotion_criteria", "primary_metric"],
+ "additionalProperties": false
+ }),
+ "node.create" => json!({
+ "type": "object",
+ "properties": {
+ "class": node_class_schema(),
+ "frontier_id": { "type": "string" },
+ "title": { "type": "string" },
+ "summary": { "type": "string" },
+ "payload": { "type": "object" },
+ "annotations": { "type": "array", "items": annotation_schema() },
+ "parents": { "type": "array", "items": { "type": "string" } }
+ },
+ "required": ["class", "title"],
+ "additionalProperties": false
+ }),
+ "change.record" => json!({
+ "type": "object",
+ "properties": {
+ "frontier_id": { "type": "string" },
+ "title": { "type": "string" },
+ "summary": { "type": "string" },
+ "body": { "type": "string" },
+ "hypothesis": { "type": "string" },
+ "base_checkpoint_id": { "type": "string" },
+ "benchmark_suite": { "type": "string" },
+ "annotations": { "type": "array", "items": annotation_schema() },
+ "parents": { "type": "array", "items": { "type": "string" } }
+ },
+ "required": ["frontier_id", "title", "body"],
+ "additionalProperties": false
+ }),
+ "node.list" => json!({
+ "type": "object",
+ "properties": {
+ "frontier_id": { "type": "string" },
+ "class": node_class_schema(),
+ "include_archived": { "type": "boolean" },
+ "limit": { "type": "integer", "minimum": 1, "maximum": 500 }
+ },
+ "additionalProperties": false
+ }),
+ "node.read" | "node.archive" => json!({
+ "type": "object",
+ "properties": {
+ "node_id": { "type": "string" }
+ },
+ "required": ["node_id"],
+ "additionalProperties": false
+ }),
+ "node.annotate" => json!({
+ "type": "object",
+ "properties": {
+ "node_id": { "type": "string" },
+ "body": { "type": "string" },
+ "label": { "type": "string" },
+ "visible": { "type": "boolean" }
+ },
+ "required": ["node_id", "body"],
+ "additionalProperties": false
+ }),
+ "note.quick" => json!({
+ "type": "object",
+ "properties": {
+ "frontier_id": { "type": "string" },
+ "title": { "type": "string" },
+ "body": { "type": "string" },
+ "annotations": { "type": "array", "items": annotation_schema() },
+ "parents": { "type": "array", "items": { "type": "string" } }
+ },
+ "required": ["title", "body"],
+ "additionalProperties": false
+ }),
+ "research.record" => json!({
+ "type": "object",
+ "properties": {
+ "frontier_id": { "type": "string" },
+ "title": { "type": "string" },
+ "summary": { "type": "string" },
+ "body": { "type": "string" },
+ "annotations": { "type": "array", "items": annotation_schema() },
+ "parents": { "type": "array", "items": { "type": "string" } }
+ },
+ "required": ["title", "body"],
+ "additionalProperties": false
+ }),
+ "experiment.close" => json!({
+ "type": "object",
+ "properties": {
+ "frontier_id": { "type": "string" },
+ "base_checkpoint_id": { "type": "string" },
+ "change_node_id": { "type": "string" },
+ "candidate_summary": { "type": "string" },
+ "run": run_schema(),
+ "primary_metric": metric_observation_schema(),
+ "supporting_metrics": { "type": "array", "items": metric_observation_schema() },
+ "note": note_schema(),
+ "verdict": verdict_schema(),
+ "decision_title": { "type": "string" },
+ "decision_rationale": { "type": "string" },
+ "analysis_node_id": { "type": "string" }
+ },
+ "required": [
+ "frontier_id",
+ "base_checkpoint_id",
+ "change_node_id",
+ "candidate_summary",
+ "run",
+ "primary_metric",
+ "note",
+ "verdict",
+ "decision_title",
+ "decision_rationale"
+ ],
+ "additionalProperties": false
+ }),
+ _ => json!({"type":"object","additionalProperties":false}),
+ }
+}
+
+fn metric_spec_schema() -> Value {
+ json!({
+ "type": "object",
+ "properties": {
+ "key": { "type": "string" },
+ "unit": metric_unit_schema(),
+ "objective": optimization_objective_schema()
+ },
+ "required": ["key", "unit", "objective"],
+ "additionalProperties": false
+ })
+}
+
+fn metric_observation_schema() -> Value {
+ json!({
+ "type": "object",
+ "properties": {
+ "key": { "type": "string" },
+ "unit": metric_unit_schema(),
+ "objective": optimization_objective_schema(),
+ "value": { "type": "number" }
+ },
+ "required": ["key", "unit", "objective", "value"],
+ "additionalProperties": false
+ })
+}
+
+fn annotation_schema() -> Value {
+ json!({
+ "type": "object",
+ "properties": {
+ "body": { "type": "string" },
+ "label": { "type": "string" },
+ "visible": { "type": "boolean" }
+ },
+ "required": ["body"],
+ "additionalProperties": false
+ })
+}
+
+fn node_class_schema() -> Value {
+ json!({
+ "type": "string",
+ "enum": ["contract", "change", "run", "analysis", "decision", "research", "enabling", "note"]
+ })
+}
+
+fn metric_unit_schema() -> Value {
+ json!({
+ "type": "string",
+ "enum": ["seconds", "bytes", "count", "ratio", "custom"]
+ })
+}
+
+fn optimization_objective_schema() -> Value {
+ json!({
+ "type": "string",
+ "enum": ["minimize", "maximize", "target"]
+ })
+}
+
+fn verdict_schema() -> Value {
+ json!({
+ "type": "string",
+ "enum": [
+ "promote_to_champion",
+ "keep_on_frontier",
+ "revert_to_champion",
+ "archive_dead_end",
+ "needs_more_evidence"
+ ]
+ })
+}
+
+fn run_schema() -> Value {
+ json!({
+ "type": "object",
+ "properties": {
+ "title": { "type": "string" },
+ "summary": { "type": "string" },
+ "backend": {
+ "type": "string",
+ "enum": ["local_process", "worktree_process", "ssh_process"]
+ },
+ "benchmark_suite": { "type": "string" },
+ "command": {
+ "type": "object",
+ "properties": {
+ "working_directory": { "type": "string" },
+ "argv": { "type": "array", "items": { "type": "string" } },
+ "env": {
+ "type": "object",
+ "additionalProperties": { "type": "string" }
+ }
+ },
+ "required": ["argv"],
+ "additionalProperties": false
+ }
+ },
+ "required": ["title", "backend", "benchmark_suite", "command"],
+ "additionalProperties": false
+ })
+}
+
+fn note_schema() -> Value {
+ json!({
+ "type": "object",
+ "properties": {
+ "summary": { "type": "string" },
+ "next_hypotheses": { "type": "array", "items": { "type": "string" } }
+ },
+ "required": ["summary"],
+ "additionalProperties": false
+ })
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/fault.rs b/crates/fidget-spinner-cli/src/mcp/fault.rs
new file mode 100644
index 0000000..e9d1fce
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/fault.rs
@@ -0,0 +1,99 @@
+use serde::{Deserialize, Serialize};
+use serde_json::{Value, json};
+use time::OffsetDateTime;
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) enum FaultKind {
+ InvalidInput,
+ NotInitialized,
+ Unavailable,
+ Transient,
+ Internal,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) enum FaultStage {
+ Host,
+ Worker,
+ Store,
+ Transport,
+ Protocol,
+ Rollout,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct FaultRecord {
+ pub kind: FaultKind,
+ pub stage: FaultStage,
+ pub operation: String,
+ pub message: String,
+ pub retryable: bool,
+ pub retried: bool,
+ pub worker_generation: Option<u64>,
+ pub occurred_at: OffsetDateTime,
+}
+
+impl FaultRecord {
+ #[must_use]
+ pub fn new(
+ kind: FaultKind,
+ stage: FaultStage,
+ operation: impl Into<String>,
+ message: impl Into<String>,
+ ) -> Self {
+ Self {
+ kind,
+ stage,
+ operation: operation.into(),
+ message: message.into(),
+ retryable: false,
+ retried: false,
+ worker_generation: None,
+ occurred_at: OffsetDateTime::now_utc(),
+ }
+ }
+
+ #[must_use]
+ pub fn retryable(mut self, worker_generation: Option<u64>) -> Self {
+ self.retryable = true;
+ self.worker_generation = worker_generation;
+ self
+ }
+
+ #[must_use]
+ pub fn mark_retried(mut self) -> Self {
+ self.retried = true;
+ self
+ }
+
+ #[must_use]
+ pub fn into_jsonrpc_error(self) -> Value {
+ json!({
+ "code": self.jsonrpc_code(),
+ "message": self.message.clone(),
+ "data": self,
+ })
+ }
+
+ #[must_use]
+ pub fn into_tool_result(self) -> Value {
+ json!({
+ "content": [{
+ "type": "text",
+ "text": self.message,
+ }],
+ "structuredContent": self,
+ "isError": true,
+ })
+ }
+
+ #[must_use]
+ pub const fn jsonrpc_code(&self) -> i64 {
+ match self.kind {
+ FaultKind::InvalidInput => -32602,
+ FaultKind::NotInitialized => -32002,
+ FaultKind::Unavailable => -32004,
+ FaultKind::Transient | FaultKind::Internal => -32603,
+ }
+ }
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/host/binary.rs b/crates/fidget-spinner-cli/src/mcp/host/binary.rs
new file mode 100644
index 0000000..2107461
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/host/binary.rs
@@ -0,0 +1,43 @@
+use std::fs;
+use std::io;
+use std::path::{Path, PathBuf};
+
+use fidget_spinner_store_sqlite::StoreError;
+
+use crate::mcp::protocol::BinaryFingerprint;
+
+pub(super) struct BinaryRuntime {
+ pub(super) path: PathBuf,
+ startup_fingerprint: BinaryFingerprint,
+ pub(super) launch_path_stable: bool,
+}
+
+impl BinaryRuntime {
+ pub(super) fn new(path: PathBuf) -> Result<Self, StoreError> {
+ let startup_fingerprint = fingerprint_binary(&path)?;
+ Ok(Self {
+ launch_path_stable: !path
+ .components()
+ .any(|component| component.as_os_str().to_string_lossy() == "target"),
+ path,
+ startup_fingerprint,
+ })
+ }
+
+ pub(super) fn rollout_pending(&self) -> Result<bool, StoreError> {
+ Ok(fingerprint_binary(&self.path)? != self.startup_fingerprint)
+ }
+}
+
+fn fingerprint_binary(path: &Path) -> Result<BinaryFingerprint, StoreError> {
+ let metadata = fs::metadata(path)?;
+ let modified_unix_nanos = metadata
+ .modified()?
+ .duration_since(std::time::UNIX_EPOCH)
+ .map_err(|error| io::Error::other(format!("invalid binary mtime: {error}")))?
+ .as_nanos();
+ Ok(BinaryFingerprint {
+ length_bytes: metadata.len(),
+ modified_unix_nanos,
+ })
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/host/config.rs b/crates/fidget-spinner-cli/src/mcp/host/config.rs
new file mode 100644
index 0000000..8d1ee4b
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/host/config.rs
@@ -0,0 +1,18 @@
+use std::path::PathBuf;
+
+use fidget_spinner_store_sqlite::StoreError;
+
+#[derive(Clone, Debug)]
+pub(super) struct HostConfig {
+ pub(super) executable: PathBuf,
+ pub(super) initial_project: Option<PathBuf>,
+}
+
+impl HostConfig {
+ pub(super) fn new(initial_project: Option<PathBuf>) -> Result<Self, StoreError> {
+ Ok(Self {
+ executable: std::env::current_exe()?,
+ initial_project,
+ })
+ }
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/host/mod.rs b/crates/fidget-spinner-cli/src/mcp/host/mod.rs
new file mode 100644
index 0000000..21c19ec
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/host/mod.rs
@@ -0,0 +1,8 @@
+//! Stable host process that owns the MCP session and routes store work to a disposable worker.
+
+mod binary;
+mod config;
+mod process;
+mod runtime;
+
+pub(crate) use runtime::run_host as serve;
diff --git a/crates/fidget-spinner-cli/src/mcp/host/process.rs b/crates/fidget-spinner-cli/src/mcp/host/process.rs
new file mode 100644
index 0000000..d4cbb4b
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/host/process.rs
@@ -0,0 +1,246 @@
+use std::io::{BufRead, BufReader, BufWriter, Write};
+use std::path::PathBuf;
+use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
+
+use serde::{Deserialize, Serialize};
+use serde_json::Value;
+
+use crate::mcp::fault::{FaultKind, FaultRecord, FaultStage};
+use crate::mcp::protocol::{
+ HostRequestId, WorkerOperation, WorkerOutcome, WorkerRequest, WorkerResponse, WorkerSpawnConfig,
+};
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(super) struct ProjectBinding {
+ pub(super) requested_path: PathBuf,
+ pub(super) project_root: PathBuf,
+}
+
+pub(super) struct WorkerSupervisor {
+ config: WorkerSpawnConfig,
+ generation: u64,
+ crash_before_reply_once: bool,
+ bound_project_root: Option<PathBuf>,
+ child: Option<Child>,
+ stdin: Option<BufWriter<ChildStdin>>,
+ stdout: Option<BufReader<ChildStdout>>,
+}
+
+impl WorkerSupervisor {
+ pub(super) fn new(config: WorkerSpawnConfig, generation: u64) -> Self {
+ Self {
+ config,
+ generation,
+ crash_before_reply_once: false,
+ bound_project_root: None,
+ child: None,
+ stdin: None,
+ stdout: None,
+ }
+ }
+
+ pub(super) fn generation(&self) -> u64 {
+ self.generation
+ }
+
+ pub(super) fn rebind(&mut self, project_root: PathBuf) {
+ if self
+ .bound_project_root
+ .as_ref()
+ .is_some_and(|current| current == &project_root)
+ {
+ return;
+ }
+ self.kill_current_worker();
+ self.bound_project_root = Some(project_root);
+ }
+
+ pub(super) fn execute(
+ &mut self,
+ request_id: HostRequestId,
+ operation: WorkerOperation,
+ ) -> Result<Value, FaultRecord> {
+ self.ensure_worker()?;
+ let request = WorkerRequest::Execute {
+ id: request_id,
+ operation,
+ };
+ let stdin = self.stdin.as_mut().ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.stdin",
+ "worker stdin is not available",
+ )
+ .retryable(Some(self.generation))
+ })?;
+ serde_json::to_writer(&mut *stdin, &request).map_err(|error| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.write",
+ format!("failed to write worker request: {error}"),
+ )
+ .retryable(Some(self.generation))
+ })?;
+ stdin.write_all(b"\n").map_err(|error| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.write",
+ format!("failed to frame worker request: {error}"),
+ )
+ .retryable(Some(self.generation))
+ })?;
+ stdin.flush().map_err(|error| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.write",
+ format!("failed to flush worker request: {error}"),
+ )
+ .retryable(Some(self.generation))
+ })?;
+
+ if self.crash_before_reply_once {
+ self.crash_before_reply_once = false;
+ self.kill_current_worker();
+ return Err(FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.read",
+ "worker crashed before replying",
+ )
+ .retryable(Some(self.generation)));
+ }
+
+ let stdout = self.stdout.as_mut().ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.stdout",
+ "worker stdout is not available",
+ )
+ .retryable(Some(self.generation))
+ })?;
+ let mut line = String::new();
+ let bytes = stdout.read_line(&mut line).map_err(|error| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.read",
+ format!("failed to read worker response: {error}"),
+ )
+ .retryable(Some(self.generation))
+ })?;
+ if bytes == 0 {
+ self.kill_current_worker();
+ return Err(FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.read",
+ "worker exited before replying",
+ )
+ .retryable(Some(self.generation)));
+ }
+ let response = serde_json::from_str::<WorkerResponse>(&line).map_err(|error| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Protocol,
+ "worker.read",
+ format!("invalid worker response: {error}"),
+ )
+ .retryable(Some(self.generation))
+ })?;
+ match response.outcome {
+ WorkerOutcome::Success { result } => Ok(result),
+ WorkerOutcome::Fault { fault } => Err(fault),
+ }
+ }
+
+ pub(super) fn restart(&mut self) -> Result<(), FaultRecord> {
+ self.kill_current_worker();
+ self.ensure_worker()
+ }
+
+ pub(super) fn is_alive(&mut self) -> bool {
+ let Some(child) = self.child.as_mut() else {
+ return false;
+ };
+ if let Ok(None) = child.try_wait() {
+ true
+ } else {
+ self.child = None;
+ self.stdin = None;
+ self.stdout = None;
+ false
+ }
+ }
+
+ pub(super) fn arm_crash_once(&mut self) {
+ self.crash_before_reply_once = true;
+ }
+
+ fn ensure_worker(&mut self) -> Result<(), FaultRecord> {
+ if self.is_alive() {
+ return Ok(());
+ }
+ let Some(project_root) = self.bound_project_root.as_ref() else {
+ return Err(FaultRecord::new(
+ FaultKind::Unavailable,
+ FaultStage::Host,
+ "worker.spawn",
+ "project is not bound; call project.bind before using project tools",
+ ));
+ };
+ self.generation += 1;
+ let mut child = Command::new(&self.config.executable)
+ .arg("mcp")
+ .arg("worker")
+ .arg("--project")
+ .arg(project_root)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::inherit())
+ .spawn()
+ .map_err(|error| {
+ FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Transport,
+ "worker.spawn",
+ format!("failed to spawn worker: {error}"),
+ )
+ .retryable(Some(self.generation))
+ })?;
+ let stdin = child.stdin.take().ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::Internal,
+ FaultStage::Transport,
+ "worker.spawn",
+ "worker stdin pipe was not created",
+ )
+ })?;
+ let stdout = child.stdout.take().ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::Internal,
+ FaultStage::Transport,
+ "worker.spawn",
+ "worker stdout pipe was not created",
+ )
+ })?;
+ self.child = Some(child);
+ self.stdin = Some(BufWriter::new(stdin));
+ self.stdout = Some(BufReader::new(stdout));
+ Ok(())
+ }
+
+ fn kill_current_worker(&mut self) {
+ if let Some(child) = self.child.as_mut() {
+ let _ = child.kill();
+ let _ = child.wait();
+ }
+ self.child = None;
+ self.stdin = None;
+ self.stdout = None;
+ }
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/host/runtime.rs b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
new file mode 100644
index 0000000..dd75544
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/host/runtime.rs
@@ -0,0 +1,719 @@
+use std::io::{self, BufRead, Write};
+#[cfg(unix)]
+use std::os::unix::process::CommandExt;
+use std::path::PathBuf;
+use std::process::Command;
+use std::time::Instant;
+
+use serde::Serialize;
+use serde_json::{Value, json};
+
+use super::{
+ binary::BinaryRuntime,
+ config::HostConfig,
+ process::{ProjectBinding, WorkerSupervisor},
+};
+use crate::mcp::catalog::{
+ DispatchTarget, ReplayContract, list_resources, resource_spec, tool_definitions, tool_spec,
+};
+use crate::mcp::fault::{FaultKind, FaultRecord, FaultStage};
+use crate::mcp::protocol::{
+ CRASH_ONCE_ENV, FORCE_ROLLOUT_ENV, HOST_STATE_ENV, HostRequestId, HostStateSeed,
+ PROTOCOL_VERSION, ProjectBindingSeed, SERVER_NAME, SessionSeed, WorkerOperation,
+ WorkerSpawnConfig,
+};
+use crate::mcp::telemetry::{
+ BinaryHealth, BindingHealth, HealthSnapshot, InitializationHealth, ServerTelemetry,
+ WorkerHealth,
+};
+
+pub(crate) fn run_host(
+ initial_project: Option<PathBuf>,
+) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ let stdin = io::stdin();
+ let mut stdout = io::stdout().lock();
+ let mut host = HostRuntime::new(HostConfig::new(initial_project)?)?;
+
+ for line in stdin.lock().lines() {
+ let line = match line {
+ Ok(line) => line,
+ Err(error) => {
+ eprintln!("mcp stdin failure: {error}");
+ continue;
+ }
+ };
+ if line.trim().is_empty() {
+ continue;
+ }
+
+ let maybe_response = host.handle_line(&line);
+ if let Some(response) = maybe_response {
+ write_message(&mut stdout, &response)?;
+ }
+ host.maybe_roll_forward()?;
+ }
+
+ Ok(())
+}
+
+struct HostRuntime {
+ config: HostConfig,
+ binding: Option<ProjectBinding>,
+ session: SessionSeed,
+ telemetry: ServerTelemetry,
+ next_request_id: u64,
+ worker: WorkerSupervisor,
+ binary: BinaryRuntime,
+ force_rollout_key: Option<String>,
+ force_rollout_consumed: bool,
+ rollout_requested: bool,
+ crash_once_key: Option<String>,
+ crash_once_consumed: bool,
+}
+
+impl HostRuntime {
+ fn new(config: HostConfig) -> Result<Self, fidget_spinner_store_sqlite::StoreError> {
+ let restored = restore_host_state();
+ let session = restored
+ .as_ref()
+ .map_or_else(SessionSeed::default, |seed| seed.session.clone());
+ let telemetry = restored
+ .as_ref()
+ .map_or_else(ServerTelemetry::default, |seed| seed.telemetry.clone());
+ let next_request_id = restored
+ .as_ref()
+ .map_or(1, |seed| seed.next_request_id.max(1));
+ let worker_generation = restored.as_ref().map_or(0, |seed| seed.worker_generation);
+ let force_rollout_consumed = restored
+ .as_ref()
+ .is_some_and(|seed| seed.force_rollout_consumed);
+ let crash_once_consumed = restored
+ .as_ref()
+ .is_some_and(|seed| seed.crash_once_consumed);
+ let binding = restored
+ .as_ref()
+ .and_then(|seed| seed.binding.clone().map(ProjectBinding::from))
+ .or(config
+ .initial_project
+ .clone()
+ .map(resolve_project_binding)
+ .transpose()?
+ .map(|resolved| resolved.binding));
+
+ let worker = {
+ let mut worker = WorkerSupervisor::new(
+ WorkerSpawnConfig {
+ executable: config.executable.clone(),
+ },
+ worker_generation,
+ );
+ if let Some(project_root) = binding.as_ref().map(|binding| binding.project_root.clone())
+ {
+ worker.rebind(project_root);
+ }
+ worker
+ };
+
+ Ok(Self {
+ config: config.clone(),
+ binding,
+ session,
+ telemetry,
+ next_request_id,
+ worker,
+ binary: BinaryRuntime::new(config.executable.clone())?,
+ force_rollout_key: std::env::var(FORCE_ROLLOUT_ENV).ok(),
+ force_rollout_consumed,
+ rollout_requested: false,
+ crash_once_key: std::env::var(CRASH_ONCE_ENV).ok(),
+ crash_once_consumed,
+ })
+ }
+
+ fn handle_line(&mut self, line: &str) -> Option<Value> {
+ let message = match serde_json::from_str::<Value>(line) {
+ Ok(message) => message,
+ Err(error) => {
+ return Some(jsonrpc_error(
+ Value::Null,
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "jsonrpc.parse",
+ format!("parse error: {error}"),
+ ),
+ ));
+ }
+ };
+ self.handle_message(message)
+ }
+
+ fn handle_message(&mut self, message: Value) -> Option<Value> {
+ let Some(object) = message.as_object() else {
+ return Some(jsonrpc_error(
+ Value::Null,
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "jsonrpc.message",
+ "invalid request: expected JSON object",
+ ),
+ ));
+ };
+
+ let method = object.get("method").and_then(Value::as_str)?;
+ let id = object.get("id").cloned();
+ let params = object.get("params").cloned().unwrap_or_else(|| json!({}));
+ let operation_key = operation_key(method, &params);
+ let started_at = Instant::now();
+
+ self.telemetry.record_request(&operation_key);
+ let response = match self.dispatch(method, params, id.clone()) {
+ Ok(Some(result)) => {
+ self.telemetry
+ .record_success(&operation_key, started_at.elapsed().as_millis());
+ id.map(|id| jsonrpc_result(id, result))
+ }
+ Ok(None) => {
+ self.telemetry
+ .record_success(&operation_key, started_at.elapsed().as_millis());
+ None
+ }
+ Err(fault) => {
+ self.telemetry.record_error(
+ &operation_key,
+ fault.clone(),
+ started_at.elapsed().as_millis(),
+ );
+ Some(match id {
+ Some(id) => match method {
+ "tools/call" => jsonrpc_result(id, fault.into_tool_result()),
+ _ => jsonrpc_error(id, fault),
+ },
+ None => jsonrpc_error(Value::Null, fault),
+ })
+ }
+ };
+
+ if self.should_force_rollout(&operation_key) {
+ self.force_rollout_consumed = true;
+ self.telemetry.record_rollout();
+ self.rollout_requested = true;
+ }
+
+ response
+ }
+
+ fn dispatch(
+ &mut self,
+ method: &str,
+ params: Value,
+ request_id: Option<Value>,
+ ) -> Result<Option<Value>, FaultRecord> {
+ match method {
+ "initialize" => {
+ self.session.initialize_params = Some(params.clone());
+ self.session.initialized = false;
+ Ok(Some(json!({
+ "protocolVersion": PROTOCOL_VERSION,
+ "capabilities": {
+ "tools": { "listChanged": false },
+ "resources": { "listChanged": false, "subscribe": false }
+ },
+ "serverInfo": {
+ "name": SERVER_NAME,
+ "version": env!("CARGO_PKG_VERSION")
+ },
+ "instructions": "The DAG is canonical truth. Frontier state is derived. Bind the session with project.bind before project-local DAG operations when the MCP is running unbound."
+ })))
+ }
+ "notifications/initialized" => {
+ if self.session.initialize_params.is_none() {
+ return Err(FaultRecord::new(
+ FaultKind::NotInitialized,
+ FaultStage::Host,
+ "notifications/initialized",
+ "received initialized notification before initialize",
+ ));
+ }
+ self.session.initialized = true;
+ Ok(None)
+ }
+ "notifications/cancelled" => Ok(None),
+ "ping" => Ok(Some(json!({}))),
+ other => {
+ self.require_initialized(other)?;
+ match other {
+ "tools/list" => Ok(Some(json!({ "tools": tool_definitions() }))),
+ "resources/list" => Ok(Some(json!({ "resources": list_resources() }))),
+ "tools/call" => Ok(Some(self.dispatch_tool_call(params, request_id)?)),
+ "resources/read" => Ok(Some(self.dispatch_resource_read(params)?)),
+ _ => Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ other,
+ format!("method `{other}` is not implemented"),
+ )),
+ }
+ }
+ }
+ }
+
+ fn dispatch_tool_call(
+ &mut self,
+ params: Value,
+ _request_id: Option<Value>,
+ ) -> Result<Value, FaultRecord> {
+ let envelope = deserialize::<ToolCallEnvelope>(params, "tools/call")?;
+ let spec = tool_spec(&envelope.name).ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Host,
+ format!("tools/call:{}", envelope.name),
+ format!("unknown tool `{}`", envelope.name),
+ )
+ })?;
+ match spec.dispatch {
+ DispatchTarget::Host => self.handle_host_tool(&envelope.name, envelope.arguments),
+ DispatchTarget::Worker => self.dispatch_worker_tool(spec, envelope.arguments),
+ }
+ }
+
+ fn dispatch_resource_read(&mut self, params: Value) -> Result<Value, FaultRecord> {
+ let args = deserialize::<ReadResourceArgs>(params, "resources/read")?;
+ let spec = resource_spec(&args.uri).ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Host,
+ format!("resources/read:{}", args.uri),
+ format!("unknown resource `{}`", args.uri),
+ )
+ })?;
+ match spec.dispatch {
+ DispatchTarget::Host => Ok(Self::handle_host_resource(spec.uri)),
+ DispatchTarget::Worker => self.dispatch_worker_operation(
+ format!("resources/read:{}", args.uri),
+ spec.replay,
+ WorkerOperation::ReadResource { uri: args.uri },
+ ),
+ }
+ }
+
+ fn dispatch_worker_tool(
+ &mut self,
+ spec: crate::mcp::catalog::ToolSpec,
+ arguments: Value,
+ ) -> Result<Value, FaultRecord> {
+ let operation = format!("tools/call:{}", spec.name);
+ self.dispatch_worker_operation(
+ operation.clone(),
+ spec.replay,
+ WorkerOperation::CallTool {
+ name: spec.name.to_owned(),
+ arguments,
+ },
+ )
+ }
+
+ fn dispatch_worker_operation(
+ &mut self,
+ operation: String,
+ replay: ReplayContract,
+ worker_operation: WorkerOperation,
+ ) -> Result<Value, FaultRecord> {
+ let binding = self.require_bound_project(&operation)?;
+ self.worker.rebind(binding.project_root.clone());
+
+ if self.should_crash_worker_once(&operation) {
+ self.worker.arm_crash_once();
+ }
+
+ let request_id = self.allocate_request_id();
+ match self.worker.execute(request_id, worker_operation.clone()) {
+ Ok(result) => Ok(result),
+ Err(fault) => {
+ if replay == ReplayContract::SafeReplay && fault.retryable {
+ self.telemetry.record_retry(&operation);
+ self.telemetry.record_worker_restart();
+ self.worker
+ .restart()
+ .map_err(|restart_fault| restart_fault.mark_retried())?;
+ match self.worker.execute(request_id, worker_operation) {
+ Ok(result) => Ok(result),
+ Err(retry_fault) => Err(retry_fault.mark_retried()),
+ }
+ } else {
+ Err(fault)
+ }
+ }
+ }
+ }
+
+ fn handle_host_tool(&mut self, name: &str, arguments: Value) -> Result<Value, FaultRecord> {
+ match name {
+ "project.bind" => {
+ let args = deserialize::<ProjectBindArgs>(arguments, "tools/call:project.bind")?;
+ let resolved = resolve_project_binding(PathBuf::from(args.path))
+ .map_err(host_store_fault("tools/call:project.bind"))?;
+ self.worker.rebind(resolved.binding.project_root.clone());
+ self.binding = Some(resolved.binding);
+ tool_success(&resolved.status)
+ }
+ "skill.list" => tool_success(&json!({
+ "skills": crate::bundled_skill::bundled_skill_summaries(),
+ })),
+ "skill.show" => {
+ let args = deserialize::<SkillShowArgs>(arguments, "tools/call:skill.show")?;
+ let skill = args.name.as_deref().map_or_else(
+ || Ok(crate::bundled_skill::default_bundled_skill()),
+ |name| {
+ crate::bundled_skill::bundled_skill(name).ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Host,
+ "tools/call:skill.show",
+ format!("unknown bundled skill `{name}`"),
+ )
+ })
+ },
+ )?;
+ tool_success(&json!({
+ "name": skill.name,
+ "description": skill.description,
+ "resource_uri": skill.resource_uri,
+ "body": skill.body,
+ }))
+ }
+ "system.health" => tool_success(&HealthSnapshot {
+ initialization: InitializationHealth {
+ ready: self.session.initialized,
+ seed_captured: self.session.initialize_params.is_some(),
+ },
+ binding: binding_health(self.binding.as_ref()),
+ worker: WorkerHealth {
+ worker_generation: self.worker.generation(),
+ alive: self.worker.is_alive(),
+ },
+ binary: BinaryHealth {
+ current_executable: self.binary.path.display().to_string(),
+ launch_path_stable: self.binary.launch_path_stable,
+ rollout_pending: self.binary.rollout_pending().unwrap_or(false),
+ },
+ last_fault: self.telemetry.last_fault.clone(),
+ }),
+ "system.telemetry" => tool_success(&self.telemetry),
+ other => Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Host,
+ format!("tools/call:{other}"),
+ format!("unknown host tool `{other}`"),
+ )),
+ }
+ }
+
+ fn handle_host_resource(uri: &str) -> Value {
+ match uri {
+ "fidget-spinner://skill/fidget-spinner" => {
+ skill_resource(uri, crate::bundled_skill::default_bundled_skill().body)
+ }
+ "fidget-spinner://skill/frontier-loop" => skill_resource(
+ uri,
+ crate::bundled_skill::frontier_loop_bundled_skill().body,
+ ),
+ _ => unreachable!("host resources are catalog-gated"),
+ }
+ }
+
+ fn require_initialized(&self, operation: &str) -> Result<(), FaultRecord> {
+ if self.session.initialized {
+ return Ok(());
+ }
+ Err(FaultRecord::new(
+ FaultKind::NotInitialized,
+ FaultStage::Host,
+ operation,
+ "client must call initialize and notifications/initialized before normal operations",
+ ))
+ }
+
+ fn require_bound_project(&self, operation: &str) -> Result<&ProjectBinding, FaultRecord> {
+ self.binding.as_ref().ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::Unavailable,
+ FaultStage::Host,
+ operation,
+ "project is not bound; call project.bind with the target project root or a nested path inside it",
+ )
+ })
+ }
+
+ fn allocate_request_id(&mut self) -> HostRequestId {
+ let id = HostRequestId(self.next_request_id);
+ self.next_request_id += 1;
+ id
+ }
+
+ fn maybe_roll_forward(&mut self) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ let binary_pending = self.binary.rollout_pending()?;
+ if !self.rollout_requested && !binary_pending {
+ return Ok(());
+ }
+ if binary_pending && !self.rollout_requested {
+ self.telemetry.record_rollout();
+ }
+ self.roll_forward()
+ }
+
+ fn roll_forward(&mut self) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ let state = HostStateSeed {
+ session: self.session.clone(),
+ telemetry: self.telemetry.clone(),
+ next_request_id: self.next_request_id,
+ binding: self.binding.clone().map(ProjectBindingSeed::from),
+ worker_generation: self.worker.generation(),
+ force_rollout_consumed: self.force_rollout_consumed,
+ crash_once_consumed: self.crash_once_consumed,
+ };
+ let serialized = serde_json::to_string(&state)?;
+ let mut command = Command::new(&self.binary.path);
+ let _ = command.arg("mcp").arg("serve");
+ if let Some(project) = self.config.initial_project.as_ref() {
+ let _ = command.arg("--project").arg(project);
+ }
+ let _ = command.env(HOST_STATE_ENV, serialized);
+ #[cfg(unix)]
+ {
+ let error = command.exec();
+ Err(fidget_spinner_store_sqlite::StoreError::Io(error))
+ }
+ #[cfg(not(unix))]
+ {
+ return Err(fidget_spinner_store_sqlite::StoreError::Io(io::Error::new(
+ io::ErrorKind::Unsupported,
+ "host rollout requires unix exec support",
+ )));
+ }
+ }
+
+ fn should_force_rollout(&self, operation: &str) -> bool {
+ self.force_rollout_key
+ .as_deref()
+ .is_some_and(|key| key == operation)
+ && !self.force_rollout_consumed
+ }
+
+ fn should_crash_worker_once(&mut self, operation: &str) -> bool {
+ let should_crash = self
+ .crash_once_key
+ .as_deref()
+ .is_some_and(|key| key == operation)
+ && !self.crash_once_consumed;
+ if should_crash {
+ self.crash_once_consumed = true;
+ }
+ should_crash
+ }
+}
+
+#[derive(Debug, Serialize)]
+struct ProjectBindStatus {
+ requested_path: String,
+ project_root: String,
+ state_root: String,
+ display_name: fidget_spinner_core::NonEmptyText,
+ schema: fidget_spinner_core::PayloadSchemaRef,
+ git_repo_detected: bool,
+}
+
+struct ResolvedProjectBinding {
+ binding: ProjectBinding,
+ status: ProjectBindStatus,
+}
+
+fn resolve_project_binding(
+ requested_path: PathBuf,
+) -> Result<ResolvedProjectBinding, fidget_spinner_store_sqlite::StoreError> {
+ let store = crate::open_store(&requested_path)?;
+ Ok(ResolvedProjectBinding {
+ binding: ProjectBinding {
+ requested_path: requested_path.clone(),
+ project_root: PathBuf::from(store.project_root().as_str()),
+ },
+ status: ProjectBindStatus {
+ requested_path: requested_path.display().to_string(),
+ project_root: store.project_root().to_string(),
+ state_root: store.state_root().to_string(),
+ display_name: store.config().display_name.clone(),
+ schema: store.schema().schema_ref(),
+ git_repo_detected: crate::run_git(
+ store.project_root(),
+ &["rev-parse", "--show-toplevel"],
+ )?
+ .is_some(),
+ },
+ })
+}
+
+fn binding_health(binding: Option<&ProjectBinding>) -> BindingHealth {
+ match binding {
+ Some(binding) => BindingHealth {
+ bound: true,
+ requested_path: Some(binding.requested_path.display().to_string()),
+ project_root: Some(binding.project_root.display().to_string()),
+ state_root: Some(
+ binding
+ .project_root
+ .join(fidget_spinner_store_sqlite::STORE_DIR_NAME)
+ .display()
+ .to_string(),
+ ),
+ },
+ None => BindingHealth {
+ bound: false,
+ requested_path: None,
+ project_root: None,
+ state_root: None,
+ },
+ }
+}
+
+fn skill_resource(uri: &str, body: &str) -> Value {
+ json!({
+ "contents": [{
+ "uri": uri,
+ "mimeType": "text/markdown",
+ "text": body,
+ }]
+ })
+}
+
+impl From<ProjectBindingSeed> for ProjectBinding {
+ fn from(value: ProjectBindingSeed) -> Self {
+ Self {
+ requested_path: value.requested_path,
+ project_root: value.project_root,
+ }
+ }
+}
+
+impl From<ProjectBinding> for ProjectBindingSeed {
+ fn from(value: ProjectBinding) -> Self {
+ Self {
+ requested_path: value.requested_path,
+ project_root: value.project_root,
+ }
+ }
+}
+
+fn restore_host_state() -> Option<HostStateSeed> {
+ let raw = std::env::var(HOST_STATE_ENV).ok()?;
+ serde_json::from_str::<HostStateSeed>(&raw).ok()
+}
+
+fn deserialize<T: for<'de> serde::Deserialize<'de>>(
+ value: Value,
+ operation: &str,
+) -> Result<T, FaultRecord> {
+ serde_json::from_value(value).map_err(|error| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ operation,
+ format!("invalid params: {error}"),
+ )
+ })
+}
+
+fn operation_key(method: &str, params: &Value) -> String {
+ match method {
+ "tools/call" => params.get("name").and_then(Value::as_str).map_or_else(
+ || "tools/call".to_owned(),
+ |name| format!("tools/call:{name}"),
+ ),
+ "resources/read" => params.get("uri").and_then(Value::as_str).map_or_else(
+ || "resources/read".to_owned(),
+ |uri| format!("resources/read:{uri}"),
+ ),
+ other => other.to_owned(),
+ }
+}
+
+fn tool_success(value: &impl Serialize) -> Result<Value, FaultRecord> {
+ Ok(json!({
+ "content": [{
+ "type": "text",
+ "text": crate::to_pretty_json(value).map_err(|error| {
+ FaultRecord::new(FaultKind::Internal, FaultStage::Host, "tool_success", error.to_string())
+ })?,
+ }],
+ "structuredContent": serde_json::to_value(value).map_err(|error| {
+ FaultRecord::new(FaultKind::Internal, FaultStage::Host, "tool_success", error.to_string())
+ })?,
+ "isError": false,
+ }))
+}
+
+fn host_store_fault(
+ operation: &'static str,
+) -> impl FnOnce(fidget_spinner_store_sqlite::StoreError) -> FaultRecord {
+ move |error| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Host,
+ operation,
+ error.to_string(),
+ )
+ }
+}
+
+fn jsonrpc_result(id: Value, result: Value) -> Value {
+ json!({
+ "jsonrpc": "2.0",
+ "id": id,
+ "result": result,
+ })
+}
+
+fn jsonrpc_error(id: Value, fault: FaultRecord) -> Value {
+ json!({
+ "jsonrpc": "2.0",
+ "id": id,
+ "error": fault.into_jsonrpc_error(),
+ })
+}
+
+fn write_message(
+ stdout: &mut impl Write,
+ message: &Value,
+) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ serde_json::to_writer(&mut *stdout, message)?;
+ stdout.write_all(b"\n")?;
+ stdout.flush()?;
+ Ok(())
+}
+
+#[derive(Debug, serde::Deserialize)]
+struct ToolCallEnvelope {
+ name: String,
+ #[serde(default = "empty_json_object")]
+ arguments: Value,
+}
+
+fn empty_json_object() -> Value {
+ json!({})
+}
+
+#[derive(Debug, serde::Deserialize)]
+struct ReadResourceArgs {
+ uri: String,
+}
+
+#[derive(Debug, serde::Deserialize)]
+struct ProjectBindArgs {
+ path: String,
+}
+
+#[derive(Debug, serde::Deserialize)]
+struct SkillShowArgs {
+ name: Option<String>,
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/mod.rs b/crates/fidget-spinner-cli/src/mcp/mod.rs
new file mode 100644
index 0000000..adea066
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/mod.rs
@@ -0,0 +1,10 @@
+mod catalog;
+mod fault;
+mod host;
+mod protocol;
+mod service;
+mod telemetry;
+mod worker;
+
+pub(crate) use host::serve;
+pub(crate) use worker::serve as serve_worker;
diff --git a/crates/fidget-spinner-cli/src/mcp/protocol.rs b/crates/fidget-spinner-cli/src/mcp/protocol.rs
new file mode 100644
index 0000000..1f24f37
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/protocol.rs
@@ -0,0 +1,86 @@
+use std::path::PathBuf;
+
+use serde::{Deserialize, Serialize};
+use serde_json::Value;
+
+use crate::mcp::telemetry::ServerTelemetry;
+
+pub(crate) const PROTOCOL_VERSION: &str = "2025-11-25";
+pub(crate) const SERVER_NAME: &str = "fidget-spinner";
+pub(crate) const HOST_STATE_ENV: &str = "FIDGET_SPINNER_MCP_HOST_STATE";
+pub(crate) const FORCE_ROLLOUT_ENV: &str = "FIDGET_SPINNER_MCP_TEST_FORCE_ROLLOUT_KEY";
+pub(crate) const CRASH_ONCE_ENV: &str = "FIDGET_SPINNER_MCP_TEST_HOST_CRASH_ONCE_KEY";
+pub(crate) const TRANSIENT_ONCE_ENV: &str = "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_KEY";
+pub(crate) const TRANSIENT_ONCE_MARKER_ENV: &str =
+ "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_MARKER";
+
+#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct SessionSeed {
+ pub initialize_params: Option<Value>,
+ pub initialized: bool,
+}
+
+#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct HostStateSeed {
+ pub session: SessionSeed,
+ pub telemetry: ServerTelemetry,
+ pub next_request_id: u64,
+ pub binding: Option<ProjectBindingSeed>,
+ pub worker_generation: u64,
+ pub force_rollout_consumed: bool,
+ pub crash_once_consumed: bool,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct ProjectBindingSeed {
+ pub requested_path: PathBuf,
+ pub project_root: PathBuf,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+#[serde(transparent)]
+pub(crate) struct HostRequestId(pub u64);
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(tag = "kind", rename_all = "snake_case")]
+pub(crate) enum WorkerRequest {
+ Execute {
+ id: HostRequestId,
+ operation: WorkerOperation,
+ },
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(tag = "kind", rename_all = "snake_case")]
+pub(crate) enum WorkerOperation {
+ CallTool { name: String, arguments: Value },
+ ReadResource { uri: String },
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct WorkerResponse {
+ pub id: HostRequestId,
+ pub outcome: WorkerOutcome,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+#[serde(tag = "status", rename_all = "snake_case")]
+pub(crate) enum WorkerOutcome {
+ Success {
+ result: Value,
+ },
+ Fault {
+ fault: crate::mcp::fault::FaultRecord,
+ },
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct BinaryFingerprint {
+ pub length_bytes: u64,
+ pub modified_unix_nanos: u128,
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct WorkerSpawnConfig {
+ pub executable: PathBuf,
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/service.rs b/crates/fidget-spinner-cli/src/mcp/service.rs
new file mode 100644
index 0000000..a7cae10
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/service.rs
@@ -0,0 +1,813 @@
+use std::collections::BTreeMap;
+use std::fs;
+
+use camino::{Utf8Path, Utf8PathBuf};
+use fidget_spinner_core::{
+ AnnotationVisibility, CodeSnapshotRef, CommandRecipe, ExecutionBackend, FrontierContract,
+ FrontierNote, FrontierVerdict, MetricObservation, MetricSpec, MetricUnit, NodeAnnotation,
+ NodeClass, NodePayload, NonEmptyText,
+};
+use fidget_spinner_store_sqlite::{
+ CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, EdgeAttachment,
+ EdgeAttachmentDirection, ListNodesQuery, ProjectStore, StoreError,
+};
+use serde::Deserialize;
+use serde_json::{Map, Value, json};
+
+use crate::mcp::fault::{FaultKind, FaultRecord, FaultStage};
+use crate::mcp::protocol::{TRANSIENT_ONCE_ENV, TRANSIENT_ONCE_MARKER_ENV, WorkerOperation};
+
+pub(crate) struct WorkerService {
+ store: ProjectStore,
+}
+
+impl WorkerService {
+ pub fn new(project: &Utf8Path) -> Result<Self, StoreError> {
+ Ok(Self {
+ store: crate::open_store(project.as_std_path())?,
+ })
+ }
+
+ pub fn execute(&mut self, operation: WorkerOperation) -> Result<Value, FaultRecord> {
+ let operation_key = match &operation {
+ WorkerOperation::CallTool { name, .. } => format!("tools/call:{name}"),
+ WorkerOperation::ReadResource { uri } => format!("resources/read:{uri}"),
+ };
+ Self::maybe_inject_transient(&operation_key)?;
+
+ match operation {
+ WorkerOperation::CallTool { name, arguments } => self.call_tool(&name, arguments),
+ WorkerOperation::ReadResource { uri } => self.read_resource(&uri),
+ }
+ }
+
+ fn call_tool(&mut self, name: &str, arguments: Value) -> Result<Value, FaultRecord> {
+ match name {
+ "project.status" => tool_success(&json!({
+ "project_root": self.store.project_root(),
+ "state_root": self.store.state_root(),
+ "display_name": self.store.config().display_name,
+ "schema": self.store.schema().schema_ref(),
+ "git_repo_detected": crate::run_git(self.store.project_root(), &["rev-parse", "--show-toplevel"])
+ .map_err(store_fault("tools/call:project.status"))?
+ .is_some(),
+ })),
+ "project.schema" => tool_success(self.store.schema()),
+ "frontier.list" => tool_success(
+ &self
+ .store
+ .list_frontiers()
+ .map_err(store_fault("tools/call:frontier.list"))?,
+ ),
+ "frontier.status" => {
+ let args = deserialize::<FrontierStatusToolArgs>(arguments)?;
+ tool_success(
+ &self
+ .store
+ .frontier_projection(
+ crate::parse_frontier_id(&args.frontier_id)
+ .map_err(store_fault("tools/call:frontier.status"))?,
+ )
+ .map_err(store_fault("tools/call:frontier.status"))?,
+ )
+ }
+ "frontier.init" => {
+ let args = deserialize::<FrontierInitToolArgs>(arguments)?;
+ let initial_checkpoint = self
+ .store
+ .auto_capture_checkpoint(
+ NonEmptyText::new(
+ args.seed_summary
+ .unwrap_or_else(|| "initial champion checkpoint".to_owned()),
+ )
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ )
+ .map_err(store_fault("tools/call:frontier.init"))?;
+ let projection = self
+ .store
+ .create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new(args.label)
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ contract_title: NonEmptyText::new(args.contract_title)
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ contract_summary: args
+ .contract_summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ contract: FrontierContract {
+ objective: NonEmptyText::new(args.objective)
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ evaluation: fidget_spinner_core::EvaluationProtocol {
+ benchmark_suites: crate::to_text_set(args.benchmark_suites)
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ primary_metric: MetricSpec {
+ metric_key: NonEmptyText::new(args.primary_metric.key)
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ unit: parse_metric_unit_name(&args.primary_metric.unit)
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ objective: crate::parse_optimization_objective(
+ &args.primary_metric.objective,
+ )
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ },
+ supporting_metrics: args
+ .supporting_metrics
+ .into_iter()
+ .map(metric_spec_from_wire)
+ .collect::<Result<_, _>>()
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ },
+ promotion_criteria: crate::to_text_vec(args.promotion_criteria)
+ .map_err(store_fault("tools/call:frontier.init"))?,
+ },
+ initial_checkpoint,
+ })
+ .map_err(store_fault("tools/call:frontier.init"))?;
+ tool_success(&projection)
+ }
+ "node.create" => {
+ let args = deserialize::<NodeCreateToolArgs>(arguments)?;
+ let node = self
+ .store
+ .add_node(CreateNodeRequest {
+ class: parse_node_class_name(&args.class)
+ .map_err(store_fault("tools/call:node.create"))?,
+ frontier_id: args
+ .frontier_id
+ .as_deref()
+ .map(crate::parse_frontier_id)
+ .transpose()
+ .map_err(store_fault("tools/call:node.create"))?,
+ title: NonEmptyText::new(args.title)
+ .map_err(store_fault("tools/call:node.create"))?,
+ summary: args
+ .summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault("tools/call:node.create"))?,
+ payload: NodePayload::with_schema(
+ self.store.schema().schema_ref(),
+ args.payload.unwrap_or_default(),
+ ),
+ annotations: tool_annotations(args.annotations)
+ .map_err(store_fault("tools/call:node.create"))?,
+ attachments: lineage_attachments(args.parents)
+ .map_err(store_fault("tools/call:node.create"))?,
+ })
+ .map_err(store_fault("tools/call:node.create"))?;
+ tool_success(&node)
+ }
+ "change.record" => {
+ let args = deserialize::<ChangeRecordToolArgs>(arguments)?;
+ let mut fields = Map::new();
+ let _ = fields.insert("body".to_owned(), Value::String(args.body));
+ if let Some(hypothesis) = args.hypothesis {
+ let _ = fields.insert("hypothesis".to_owned(), Value::String(hypothesis));
+ }
+ if let Some(base_checkpoint_id) = args.base_checkpoint_id {
+ let _ = fields.insert(
+ "base_checkpoint_id".to_owned(),
+ Value::String(base_checkpoint_id),
+ );
+ }
+ if let Some(benchmark_suite) = args.benchmark_suite {
+ let _ =
+ fields.insert("benchmark_suite".to_owned(), Value::String(benchmark_suite));
+ }
+ let node = self
+ .store
+ .add_node(CreateNodeRequest {
+ class: NodeClass::Change,
+ frontier_id: Some(
+ crate::parse_frontier_id(&args.frontier_id)
+ .map_err(store_fault("tools/call:change.record"))?,
+ ),
+ title: NonEmptyText::new(args.title)
+ .map_err(store_fault("tools/call:change.record"))?,
+ summary: args
+ .summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault("tools/call:change.record"))?,
+ payload: NodePayload::with_schema(self.store.schema().schema_ref(), fields),
+ annotations: tool_annotations(args.annotations)
+ .map_err(store_fault("tools/call:change.record"))?,
+ attachments: lineage_attachments(args.parents)
+ .map_err(store_fault("tools/call:change.record"))?,
+ })
+ .map_err(store_fault("tools/call:change.record"))?;
+ tool_success(&node)
+ }
+ "node.list" => {
+ let args = deserialize::<NodeListToolArgs>(arguments)?;
+ let nodes = self
+ .store
+ .list_nodes(ListNodesQuery {
+ frontier_id: args
+ .frontier_id
+ .as_deref()
+ .map(crate::parse_frontier_id)
+ .transpose()
+ .map_err(store_fault("tools/call:node.list"))?,
+ class: args
+ .class
+ .as_deref()
+ .map(parse_node_class_name)
+ .transpose()
+ .map_err(store_fault("tools/call:node.list"))?,
+ include_archived: args.include_archived,
+ limit: args.limit.unwrap_or(20),
+ })
+ .map_err(store_fault("tools/call:node.list"))?;
+ tool_success(&nodes)
+ }
+ "node.read" => {
+ let args = deserialize::<NodeReadToolArgs>(arguments)?;
+ let node_id = crate::parse_node_id(&args.node_id)
+ .map_err(store_fault("tools/call:node.read"))?;
+ let node = self
+ .store
+ .get_node(node_id)
+ .map_err(store_fault("tools/call:node.read"))?
+ .ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Store,
+ "tools/call:node.read",
+ format!("node {node_id} was not found"),
+ )
+ })?;
+ tool_success(&node)
+ }
+ "node.annotate" => {
+ let args = deserialize::<NodeAnnotateToolArgs>(arguments)?;
+ let annotation = NodeAnnotation {
+ id: fidget_spinner_core::AnnotationId::fresh(),
+ visibility: if args.visible {
+ AnnotationVisibility::Visible
+ } else {
+ AnnotationVisibility::HiddenByDefault
+ },
+ label: args
+ .label
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault("tools/call:node.annotate"))?,
+ body: NonEmptyText::new(args.body)
+ .map_err(store_fault("tools/call:node.annotate"))?,
+ created_at: time::OffsetDateTime::now_utc(),
+ };
+ self.store
+ .annotate_node(
+ crate::parse_node_id(&args.node_id)
+ .map_err(store_fault("tools/call:node.annotate"))?,
+ annotation,
+ )
+ .map_err(store_fault("tools/call:node.annotate"))?;
+ tool_success(&json!({"annotated": args.node_id}))
+ }
+ "node.archive" => {
+ let args = deserialize::<NodeArchiveToolArgs>(arguments)?;
+ self.store
+ .archive_node(
+ crate::parse_node_id(&args.node_id)
+ .map_err(store_fault("tools/call:node.archive"))?,
+ )
+ .map_err(store_fault("tools/call:node.archive"))?;
+ tool_success(&json!({"archived": args.node_id}))
+ }
+ "note.quick" => {
+ let args = deserialize::<QuickNoteToolArgs>(arguments)?;
+ let node = self
+ .store
+ .add_node(CreateNodeRequest {
+ class: NodeClass::Note,
+ frontier_id: args
+ .frontier_id
+ .as_deref()
+ .map(crate::parse_frontier_id)
+ .transpose()
+ .map_err(store_fault("tools/call:note.quick"))?,
+ title: NonEmptyText::new(args.title)
+ .map_err(store_fault("tools/call:note.quick"))?,
+ summary: None,
+ payload: NodePayload::with_schema(
+ self.store.schema().schema_ref(),
+ crate::json_object(json!({ "body": args.body }))
+ .map_err(store_fault("tools/call:note.quick"))?,
+ ),
+ annotations: tool_annotations(args.annotations)
+ .map_err(store_fault("tools/call:note.quick"))?,
+ attachments: lineage_attachments(args.parents)
+ .map_err(store_fault("tools/call:note.quick"))?,
+ })
+ .map_err(store_fault("tools/call:note.quick"))?;
+ tool_success(&node)
+ }
+ "research.record" => {
+ let args = deserialize::<ResearchRecordToolArgs>(arguments)?;
+ let node = self
+ .store
+ .add_node(CreateNodeRequest {
+ class: NodeClass::Research,
+ frontier_id: args
+ .frontier_id
+ .as_deref()
+ .map(crate::parse_frontier_id)
+ .transpose()
+ .map_err(store_fault("tools/call:research.record"))?,
+ title: NonEmptyText::new(args.title)
+ .map_err(store_fault("tools/call:research.record"))?,
+ summary: args
+ .summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault("tools/call:research.record"))?,
+ payload: NodePayload::with_schema(
+ self.store.schema().schema_ref(),
+ crate::json_object(json!({ "body": args.body }))
+ .map_err(store_fault("tools/call:research.record"))?,
+ ),
+ annotations: tool_annotations(args.annotations)
+ .map_err(store_fault("tools/call:research.record"))?,
+ attachments: lineage_attachments(args.parents)
+ .map_err(store_fault("tools/call:research.record"))?,
+ })
+ .map_err(store_fault("tools/call:research.record"))?;
+ tool_success(&node)
+ }
+ "experiment.close" => {
+ let args = deserialize::<ExperimentCloseToolArgs>(arguments)?;
+ let frontier_id = crate::parse_frontier_id(&args.frontier_id)
+ .map_err(store_fault("tools/call:experiment.close"))?;
+ let snapshot = self
+ .store
+ .auto_capture_checkpoint(
+ NonEmptyText::new(args.candidate_summary.clone())
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ )
+ .map_err(store_fault("tools/call:experiment.close"))?
+ .map(|seed| seed.snapshot)
+ .ok_or_else(|| {
+ FaultRecord::new(
+ FaultKind::Internal,
+ FaultStage::Store,
+ "tools/call:experiment.close",
+ format!(
+ "git repository inspection failed for {}",
+ self.store.project_root()
+ ),
+ )
+ })?;
+ let receipt = self
+ .store
+ .close_experiment(CloseExperimentRequest {
+ frontier_id,
+ base_checkpoint_id: crate::parse_checkpoint_id(&args.base_checkpoint_id)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ change_node_id: crate::parse_node_id(&args.change_node_id)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ candidate_summary: NonEmptyText::new(args.candidate_summary)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ candidate_snapshot: snapshot,
+ run_title: NonEmptyText::new(args.run.title)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ run_summary: args
+ .run
+ .summary
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ backend: parse_backend_name(&args.run.backend)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ benchmark_suite: NonEmptyText::new(args.run.benchmark_suite)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ command: command_recipe_from_wire(
+ args.run.command,
+ self.store.project_root(),
+ )
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ code_snapshot: Some(
+ capture_code_snapshot(self.store.project_root())
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ ),
+ primary_metric: metric_observation_from_wire(args.primary_metric)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ supporting_metrics: args
+ .supporting_metrics
+ .into_iter()
+ .map(metric_observation_from_wire)
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ note: FrontierNote {
+ summary: NonEmptyText::new(args.note.summary)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ next_hypotheses: crate::to_text_vec(args.note.next_hypotheses)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ },
+ verdict: parse_verdict_name(&args.verdict)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ decision_title: NonEmptyText::new(args.decision_title)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ decision_rationale: NonEmptyText::new(args.decision_rationale)
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ analysis_node_id: args
+ .analysis_node_id
+ .as_deref()
+ .map(crate::parse_node_id)
+ .transpose()
+ .map_err(store_fault("tools/call:experiment.close"))?,
+ })
+ .map_err(store_fault("tools/call:experiment.close"))?;
+ tool_success(&receipt)
+ }
+ other => Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ format!("tools/call:{other}"),
+ format!("unknown tool `{other}`"),
+ )),
+ }
+ }
+
+ fn read_resource(&mut self, uri: &str) -> Result<Value, FaultRecord> {
+ match uri {
+ "fidget-spinner://project/config" => Ok(json!({
+ "contents": [{
+ "uri": uri,
+ "mimeType": "application/json",
+ "text": crate::to_pretty_json(self.store.config())
+ .map_err(store_fault("resources/read:fidget-spinner://project/config"))?,
+ }]
+ })),
+ "fidget-spinner://project/schema" => Ok(json!({
+ "contents": [{
+ "uri": uri,
+ "mimeType": "application/json",
+ "text": crate::to_pretty_json(self.store.schema())
+ .map_err(store_fault("resources/read:fidget-spinner://project/schema"))?,
+ }]
+ })),
+ _ => Err(FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Worker,
+ format!("resources/read:{uri}"),
+ format!("unknown resource `{uri}`"),
+ )),
+ }
+ }
+
+ fn maybe_inject_transient(operation: &str) -> Result<(), FaultRecord> {
+ let Some(target_operation) = std::env::var_os(TRANSIENT_ONCE_ENV) else {
+ return Ok(());
+ };
+ let target_operation = target_operation.to_string_lossy();
+ if target_operation != operation {
+ return Ok(());
+ }
+ let Some(marker_path) = std::env::var_os(TRANSIENT_ONCE_MARKER_ENV) else {
+ return Ok(());
+ };
+ if Utf8PathBuf::from(marker_path.to_string_lossy().into_owned()).exists() {
+ return Ok(());
+ }
+ fs::write(&marker_path, b"triggered").map_err(|error| {
+ FaultRecord::new(
+ FaultKind::Internal,
+ FaultStage::Worker,
+ operation,
+ format!("failed to write transient marker: {error}"),
+ )
+ })?;
+ Err(FaultRecord::new(
+ FaultKind::Transient,
+ FaultStage::Worker,
+ operation,
+ format!("injected transient fault for {operation}"),
+ )
+ .retryable(None))
+ }
+}
+
+fn deserialize<T: for<'de> Deserialize<'de>>(value: Value) -> Result<T, FaultRecord> {
+ serde_json::from_value(value).map_err(|error| {
+ FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "worker.deserialize",
+ format!("invalid params: {error}"),
+ )
+ })
+}
+
+fn tool_success(value: &impl serde::Serialize) -> Result<Value, FaultRecord> {
+ Ok(json!({
+ "content": [{
+ "type": "text",
+ "text": crate::to_pretty_json(value).map_err(store_fault("worker.tool_success"))?,
+ }],
+ "structuredContent": serde_json::to_value(value)
+ .map_err(store_fault("worker.tool_success"))?,
+ "isError": false,
+ }))
+}
+
+fn store_fault<E>(operation: &'static str) -> impl FnOnce(E) -> FaultRecord
+where
+ E: std::fmt::Display,
+{
+ move |error| {
+ FaultRecord::new(
+ classify_fault_kind(&error.to_string()),
+ FaultStage::Store,
+ operation,
+ error.to_string(),
+ )
+ }
+}
+
+fn classify_fault_kind(message: &str) -> FaultKind {
+ if message.contains("was not found")
+ || message.contains("invalid")
+ || message.contains("unknown")
+ || message.contains("empty")
+ {
+ FaultKind::InvalidInput
+ } else {
+ FaultKind::Internal
+ }
+}
+
+fn tool_annotations(raw: Vec<WireAnnotation>) -> Result<Vec<NodeAnnotation>, StoreError> {
+ raw.into_iter()
+ .map(|annotation| {
+ Ok(NodeAnnotation {
+ id: fidget_spinner_core::AnnotationId::fresh(),
+ visibility: if annotation.visible {
+ AnnotationVisibility::Visible
+ } else {
+ AnnotationVisibility::HiddenByDefault
+ },
+ label: annotation.label.map(NonEmptyText::new).transpose()?,
+ body: NonEmptyText::new(annotation.body)?,
+ created_at: time::OffsetDateTime::now_utc(),
+ })
+ })
+ .collect()
+}
+
+fn lineage_attachments(parents: Vec<String>) -> Result<Vec<EdgeAttachment>, StoreError> {
+ parents
+ .into_iter()
+ .map(|parent| {
+ Ok(EdgeAttachment {
+ node_id: crate::parse_node_id(&parent)?,
+ kind: fidget_spinner_core::EdgeKind::Lineage,
+ direction: EdgeAttachmentDirection::ExistingToNew,
+ })
+ })
+ .collect()
+}
+
+fn metric_spec_from_wire(raw: WireMetricSpec) -> Result<MetricSpec, StoreError> {
+ Ok(MetricSpec {
+ metric_key: NonEmptyText::new(raw.key)?,
+ unit: parse_metric_unit_name(&raw.unit)?,
+ objective: crate::parse_optimization_objective(&raw.objective)?,
+ })
+}
+
+fn metric_observation_from_wire(
+ raw: WireMetricObservation,
+) -> Result<MetricObservation, StoreError> {
+ Ok(MetricObservation {
+ metric_key: NonEmptyText::new(raw.key)?,
+ unit: parse_metric_unit_name(&raw.unit)?,
+ objective: crate::parse_optimization_objective(&raw.objective)?,
+ value: raw.value,
+ })
+}
+
+fn command_recipe_from_wire(
+ raw: WireRunCommand,
+ project_root: &Utf8Path,
+) -> Result<CommandRecipe, StoreError> {
+ let working_directory = raw
+ .working_directory
+ .map(Utf8PathBuf::from)
+ .unwrap_or_else(|| project_root.to_path_buf());
+ CommandRecipe::new(
+ working_directory,
+ crate::to_text_vec(raw.argv)?,
+ raw.env.into_iter().collect::<BTreeMap<_, _>>(),
+ )
+ .map_err(StoreError::from)
+}
+
+fn capture_code_snapshot(project_root: &Utf8Path) -> Result<CodeSnapshotRef, StoreError> {
+ crate::capture_code_snapshot(project_root)
+}
+
+fn parse_node_class_name(raw: &str) -> Result<NodeClass, StoreError> {
+ match raw {
+ "contract" => Ok(NodeClass::Contract),
+ "change" => Ok(NodeClass::Change),
+ "run" => Ok(NodeClass::Run),
+ "analysis" => Ok(NodeClass::Analysis),
+ "decision" => Ok(NodeClass::Decision),
+ "research" => Ok(NodeClass::Research),
+ "enabling" => Ok(NodeClass::Enabling),
+ "note" => Ok(NodeClass::Note),
+ other => Err(crate::invalid_input(format!(
+ "unknown node class `{other}`"
+ ))),
+ }
+}
+
+fn parse_metric_unit_name(raw: &str) -> Result<MetricUnit, StoreError> {
+ crate::parse_metric_unit(raw)
+}
+
+fn parse_backend_name(raw: &str) -> Result<ExecutionBackend, StoreError> {
+ match raw {
+ "local_process" => Ok(ExecutionBackend::LocalProcess),
+ "worktree_process" => Ok(ExecutionBackend::WorktreeProcess),
+ "ssh_process" => Ok(ExecutionBackend::SshProcess),
+ other => Err(crate::invalid_input(format!("unknown backend `{other}`"))),
+ }
+}
+
+fn parse_verdict_name(raw: &str) -> Result<FrontierVerdict, StoreError> {
+ match raw {
+ "promote_to_champion" => Ok(FrontierVerdict::PromoteToChampion),
+ "keep_on_frontier" => Ok(FrontierVerdict::KeepOnFrontier),
+ "revert_to_champion" => Ok(FrontierVerdict::RevertToChampion),
+ "archive_dead_end" => Ok(FrontierVerdict::ArchiveDeadEnd),
+ "needs_more_evidence" => Ok(FrontierVerdict::NeedsMoreEvidence),
+ other => Err(crate::invalid_input(format!("unknown verdict `{other}`"))),
+ }
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierStatusToolArgs {
+ frontier_id: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct FrontierInitToolArgs {
+ label: String,
+ objective: String,
+ contract_title: String,
+ contract_summary: Option<String>,
+ benchmark_suites: Vec<String>,
+ promotion_criteria: Vec<String>,
+ primary_metric: WireMetricSpec,
+ #[serde(default)]
+ supporting_metrics: Vec<WireMetricSpec>,
+ seed_summary: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct NodeCreateToolArgs {
+ class: String,
+ frontier_id: Option<String>,
+ title: String,
+ summary: Option<String>,
+ #[serde(default)]
+ payload: Option<Map<String, Value>>,
+ #[serde(default)]
+ annotations: Vec<WireAnnotation>,
+ #[serde(default)]
+ parents: Vec<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ChangeRecordToolArgs {
+ frontier_id: String,
+ title: String,
+ summary: Option<String>,
+ body: String,
+ hypothesis: Option<String>,
+ base_checkpoint_id: Option<String>,
+ benchmark_suite: Option<String>,
+ #[serde(default)]
+ annotations: Vec<WireAnnotation>,
+ #[serde(default)]
+ parents: Vec<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct NodeListToolArgs {
+ frontier_id: Option<String>,
+ class: Option<String>,
+ #[serde(default)]
+ include_archived: bool,
+ limit: Option<u32>,
+}
+
+#[derive(Debug, Deserialize)]
+struct NodeReadToolArgs {
+ node_id: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct NodeAnnotateToolArgs {
+ node_id: String,
+ body: String,
+ label: Option<String>,
+ #[serde(default)]
+ visible: bool,
+}
+
+#[derive(Debug, Deserialize)]
+struct NodeArchiveToolArgs {
+ node_id: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct QuickNoteToolArgs {
+ frontier_id: Option<String>,
+ title: String,
+ body: String,
+ #[serde(default)]
+ annotations: Vec<WireAnnotation>,
+ #[serde(default)]
+ parents: Vec<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ResearchRecordToolArgs {
+ frontier_id: Option<String>,
+ title: String,
+ summary: Option<String>,
+ body: String,
+ #[serde(default)]
+ annotations: Vec<WireAnnotation>,
+ #[serde(default)]
+ parents: Vec<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct ExperimentCloseToolArgs {
+ frontier_id: String,
+ base_checkpoint_id: String,
+ change_node_id: String,
+ candidate_summary: String,
+ run: WireRun,
+ primary_metric: WireMetricObservation,
+ #[serde(default)]
+ supporting_metrics: Vec<WireMetricObservation>,
+ note: WireFrontierNote,
+ verdict: String,
+ decision_title: String,
+ decision_rationale: String,
+ analysis_node_id: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct WireAnnotation {
+ body: String,
+ label: Option<String>,
+ #[serde(default)]
+ visible: bool,
+}
+
+#[derive(Debug, Deserialize)]
+struct WireMetricSpec {
+ key: String,
+ unit: String,
+ objective: String,
+}
+
+#[derive(Debug, Deserialize)]
+struct WireMetricObservation {
+ key: String,
+ unit: String,
+ objective: String,
+ value: f64,
+}
+
+#[derive(Debug, Deserialize)]
+struct WireRun {
+ title: String,
+ summary: Option<String>,
+ backend: String,
+ benchmark_suite: String,
+ command: WireRunCommand,
+}
+
+#[derive(Debug, Deserialize)]
+struct WireRunCommand {
+ working_directory: Option<String>,
+ argv: Vec<String>,
+ #[serde(default)]
+ env: BTreeMap<String, String>,
+}
+
+#[derive(Debug, Deserialize)]
+struct WireFrontierNote {
+ summary: String,
+ #[serde(default)]
+ next_hypotheses: Vec<String>,
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/telemetry.rs b/crates/fidget-spinner-cli/src/mcp/telemetry.rs
new file mode 100644
index 0000000..7206f76
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/telemetry.rs
@@ -0,0 +1,103 @@
+use std::collections::BTreeMap;
+
+use serde::{Deserialize, Serialize};
+
+use crate::mcp::fault::FaultRecord;
+
+#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct OperationTelemetry {
+ pub requests: u64,
+ pub successes: u64,
+ pub errors: u64,
+ pub retries: u64,
+ pub last_latency_ms: Option<u128>,
+}
+
+#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct ServerTelemetry {
+ pub requests: u64,
+ pub successes: u64,
+ pub errors: u64,
+ pub retries: u64,
+ pub worker_restarts: u64,
+ pub host_rollouts: u64,
+ pub last_fault: Option<FaultRecord>,
+ pub operations: BTreeMap<String, OperationTelemetry>,
+}
+
+impl ServerTelemetry {
+ pub fn record_request(&mut self, operation: &str) {
+ self.requests += 1;
+ self.operations
+ .entry(operation.to_owned())
+ .or_default()
+ .requests += 1;
+ }
+
+ pub fn record_success(&mut self, operation: &str, latency_ms: u128) {
+ self.successes += 1;
+ let entry = self.operations.entry(operation.to_owned()).or_default();
+ entry.successes += 1;
+ entry.last_latency_ms = Some(latency_ms);
+ }
+
+ pub fn record_retry(&mut self, operation: &str) {
+ self.retries += 1;
+ self.operations
+ .entry(operation.to_owned())
+ .or_default()
+ .retries += 1;
+ }
+
+ pub fn record_error(&mut self, operation: &str, fault: FaultRecord, latency_ms: u128) {
+ self.errors += 1;
+ self.last_fault = Some(fault.clone());
+ let entry = self.operations.entry(operation.to_owned()).or_default();
+ entry.errors += 1;
+ entry.last_latency_ms = Some(latency_ms);
+ }
+
+ pub fn record_worker_restart(&mut self) {
+ self.worker_restarts += 1;
+ }
+
+ pub fn record_rollout(&mut self) {
+ self.host_rollouts += 1;
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct InitializationHealth {
+ pub ready: bool,
+ pub seed_captured: bool,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct WorkerHealth {
+ pub worker_generation: u64,
+ pub alive: bool,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct BinaryHealth {
+ pub current_executable: String,
+ pub launch_path_stable: bool,
+ pub rollout_pending: bool,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct BindingHealth {
+ pub bound: bool,
+ pub requested_path: Option<String>,
+ pub project_root: Option<String>,
+ pub state_root: Option<String>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub(crate) struct HealthSnapshot {
+ pub initialization: InitializationHealth,
+ pub binding: BindingHealth,
+ pub worker: WorkerHealth,
+ pub binary: BinaryHealth,
+ pub last_fault: Option<FaultRecord>,
+}
diff --git a/crates/fidget-spinner-cli/src/mcp/worker.rs b/crates/fidget-spinner-cli/src/mcp/worker.rs
new file mode 100644
index 0000000..91c6db9
--- /dev/null
+++ b/crates/fidget-spinner-cli/src/mcp/worker.rs
@@ -0,0 +1,66 @@
+use std::io::{self, BufRead, Write};
+use std::path::PathBuf;
+
+use camino::Utf8PathBuf;
+
+use crate::mcp::fault::{FaultKind, FaultRecord, FaultStage};
+use crate::mcp::protocol::{WorkerOutcome, WorkerRequest, WorkerResponse};
+use crate::mcp::service::WorkerService;
+
+pub(crate) fn serve(project: PathBuf) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ let project = Utf8PathBuf::from(project.to_string_lossy().into_owned());
+ let mut service = WorkerService::new(&project)?;
+ let stdin = io::stdin();
+ let mut stdout = io::stdout().lock();
+
+ for line in stdin.lock().lines() {
+ let line = match line {
+ Ok(line) => line,
+ Err(error) => {
+ eprintln!("worker stdin failure: {error}");
+ continue;
+ }
+ };
+ if line.trim().is_empty() {
+ continue;
+ }
+
+ let request = match serde_json::from_str::<WorkerRequest>(&line) {
+ Ok(request) => request,
+ Err(error) => {
+ let response = WorkerResponse {
+ id: crate::mcp::protocol::HostRequestId(0),
+ outcome: WorkerOutcome::Fault {
+ fault: FaultRecord::new(
+ FaultKind::InvalidInput,
+ FaultStage::Protocol,
+ "worker.parse",
+ format!("invalid worker request: {error}"),
+ ),
+ },
+ };
+ write_message(&mut stdout, &response)?;
+ continue;
+ }
+ };
+
+ let WorkerRequest::Execute { id, operation } = request;
+ let outcome = match service.execute(operation) {
+ Ok(result) => WorkerOutcome::Success { result },
+ Err(fault) => WorkerOutcome::Fault { fault },
+ };
+ write_message(&mut stdout, &WorkerResponse { id, outcome })?;
+ }
+
+ Ok(())
+}
+
+fn write_message(
+ stdout: &mut impl Write,
+ response: &WorkerResponse,
+) -> Result<(), fidget_spinner_store_sqlite::StoreError> {
+ serde_json::to_writer(&mut *stdout, response)?;
+ stdout.write_all(b"\n")?;
+ stdout.flush()?;
+ Ok(())
+}
diff --git a/crates/fidget-spinner-cli/tests/mcp_hardening.rs b/crates/fidget-spinner-cli/tests/mcp_hardening.rs
new file mode 100644
index 0000000..8d3cd9d
--- /dev/null
+++ b/crates/fidget-spinner-cli/tests/mcp_hardening.rs
@@ -0,0 +1,424 @@
+use std::fs;
+use std::io::{self, BufRead, BufReader, Write};
+use std::path::PathBuf;
+use std::process::{Child, ChildStdin, ChildStdout, Command, Stdio};
+
+use camino::Utf8PathBuf;
+use clap as _;
+use dirs as _;
+use fidget_spinner_core::NonEmptyText;
+use fidget_spinner_store_sqlite::{ListNodesQuery, ProjectStore};
+use serde as _;
+use serde_json::{Value, json};
+use time as _;
+use uuid as _;
+
+type TestResult<T = ()> = Result<T, Box<dyn std::error::Error>>;
+
+fn must<T, E: std::fmt::Display>(result: Result<T, E>, context: &str) -> TestResult<T> {
+ result.map_err(|error| io::Error::other(format!("{context}: {error}")).into())
+}
+
+fn must_some<T>(value: Option<T>, context: &str) -> TestResult<T> {
+ value.ok_or_else(|| io::Error::other(context).into())
+}
+
+fn temp_project_root(name: &str) -> TestResult<Utf8PathBuf> {
+ let root = std::env::temp_dir().join(format!(
+ "fidget_spinner_mcp_{name}_{}_{}",
+ std::process::id(),
+ must(
+ std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH),
+ "current time after unix epoch",
+ )?
+ .as_nanos()
+ ));
+ must(fs::create_dir_all(&root), "create temp project root")?;
+ Ok(Utf8PathBuf::from(root.to_string_lossy().into_owned()))
+}
+
+fn init_project(root: &Utf8PathBuf) -> TestResult {
+ let _store = must(
+ ProjectStore::init(
+ root,
+ must(NonEmptyText::new("mcp test project"), "display name")?,
+ must(NonEmptyText::new("local.mcp.test"), "namespace")?,
+ ),
+ "init project store",
+ )?;
+ Ok(())
+}
+
+fn binary_path() -> PathBuf {
+ PathBuf::from(env!("CARGO_BIN_EXE_fidget-spinner-cli"))
+}
+
+struct McpHarness {
+ child: Child,
+ stdin: ChildStdin,
+ stdout: BufReader<ChildStdout>,
+}
+
+impl McpHarness {
+ fn spawn(project_root: Option<&Utf8PathBuf>, envs: &[(&str, String)]) -> TestResult<Self> {
+ let mut command = Command::new(binary_path());
+ let _ = command
+ .arg("mcp")
+ .arg("serve")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::inherit());
+ if let Some(project_root) = project_root {
+ let _ = command.arg("--project").arg(project_root.as_str());
+ }
+ for (key, value) in envs {
+ let _ = command.env(key, value);
+ }
+ let mut child = must(command.spawn(), "spawn mcp host")?;
+ let stdin = must_some(child.stdin.take(), "host stdin")?;
+ let stdout = BufReader::new(must_some(child.stdout.take(), "host stdout")?);
+ Ok(Self {
+ child,
+ stdin,
+ stdout,
+ })
+ }
+
+ fn initialize(&mut self) -> TestResult<Value> {
+ self.request(json!({
+ "jsonrpc": "2.0",
+ "id": 1,
+ "method": "initialize",
+ "params": {
+ "protocolVersion": "2025-11-25",
+ "capabilities": {},
+ "clientInfo": { "name": "mcp-hardening-test", "version": "0" }
+ }
+ }))
+ }
+
+ fn notify_initialized(&mut self) -> TestResult {
+ self.notify(json!({
+ "jsonrpc": "2.0",
+ "method": "notifications/initialized",
+ }))
+ }
+
+ fn tools_list(&mut self) -> TestResult<Value> {
+ self.request(json!({
+ "jsonrpc": "2.0",
+ "id": 2,
+ "method": "tools/list",
+ "params": {},
+ }))
+ }
+
+ fn bind_project(&mut self, id: u64, path: &Utf8PathBuf) -> TestResult<Value> {
+ self.call_tool(id, "project.bind", json!({ "path": path.as_str() }))
+ }
+
+ fn call_tool(&mut self, id: u64, name: &str, arguments: Value) -> TestResult<Value> {
+ self.request(json!({
+ "jsonrpc": "2.0",
+ "id": id,
+ "method": "tools/call",
+ "params": {
+ "name": name,
+ "arguments": arguments,
+ }
+ }))
+ }
+
+ fn request(&mut self, message: Value) -> TestResult<Value> {
+ let encoded = must(serde_json::to_string(&message), "request json")?;
+ must(writeln!(self.stdin, "{encoded}"), "write request")?;
+ must(self.stdin.flush(), "flush request")?;
+ let mut line = String::new();
+ let byte_count = must(self.stdout.read_line(&mut line), "read response")?;
+ if byte_count == 0 {
+ return Err(io::Error::other("unexpected EOF reading response").into());
+ }
+ must(serde_json::from_str(&line), "response json")
+ }
+
+ fn notify(&mut self, message: Value) -> TestResult {
+ let encoded = must(serde_json::to_string(&message), "notify json")?;
+ must(writeln!(self.stdin, "{encoded}"), "write notify")?;
+ must(self.stdin.flush(), "flush notify")?;
+ Ok(())
+ }
+}
+
+impl Drop for McpHarness {
+ fn drop(&mut self) {
+ let _ = self.child.kill();
+ let _ = self.child.wait();
+ }
+}
+
+fn tool_content(response: &Value) -> &Value {
+ &response["result"]["structuredContent"]
+}
+
+#[test]
+fn cold_start_exposes_health_and_telemetry() -> TestResult {
+ let project_root = temp_project_root("cold_start")?;
+ init_project(&project_root)?;
+
+ let mut harness = McpHarness::spawn(None, &[])?;
+ let initialize = harness.initialize()?;
+ assert_eq!(
+ initialize["result"]["protocolVersion"].as_str(),
+ Some("2025-11-25")
+ );
+ harness.notify_initialized()?;
+
+ let tools = harness.tools_list()?;
+ let tool_count = must_some(tools["result"]["tools"].as_array(), "tools array")?.len();
+ assert!(tool_count >= 18);
+
+ let health = harness.call_tool(3, "system.health", json!({}))?;
+ assert_eq!(
+ tool_content(&health)["initialization"]["ready"].as_bool(),
+ Some(true)
+ );
+ assert_eq!(
+ tool_content(&health)["initialization"]["seed_captured"].as_bool(),
+ Some(true)
+ );
+ assert_eq!(
+ tool_content(&health)["binding"]["bound"].as_bool(),
+ Some(false)
+ );
+
+ let telemetry = harness.call_tool(4, "system.telemetry", json!({}))?;
+ assert!(tool_content(&telemetry)["requests"].as_u64().unwrap_or(0) >= 3);
+
+ let skills = harness.call_tool(15, "skill.list", json!({}))?;
+ let skill_names = must_some(
+ tool_content(&skills)["skills"].as_array(),
+ "bundled skills array",
+ )?
+ .iter()
+ .filter_map(|skill| skill["name"].as_str())
+ .collect::<Vec<_>>();
+ assert!(skill_names.contains(&"fidget-spinner"));
+ assert!(skill_names.contains(&"frontier-loop"));
+
+ let base_skill = harness.call_tool(16, "skill.show", json!({"name": "fidget-spinner"}))?;
+ assert_eq!(
+ tool_content(&base_skill)["name"].as_str(),
+ Some("fidget-spinner")
+ );
+ Ok(())
+}
+
+#[test]
+fn safe_request_retries_after_worker_crash() -> TestResult {
+ let project_root = temp_project_root("crash_retry")?;
+ init_project(&project_root)?;
+
+ let mut harness = McpHarness::spawn(
+ None,
+ &[(
+ "FIDGET_SPINNER_MCP_TEST_HOST_CRASH_ONCE_KEY",
+ "tools/call:project.status".to_owned(),
+ )],
+ )?;
+ let _ = harness.initialize()?;
+ harness.notify_initialized()?;
+ let bind = harness.bind_project(3, &project_root)?;
+ assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
+
+ let response = harness.call_tool(5, "project.status", json!({}))?;
+ assert_eq!(response["result"]["isError"].as_bool(), Some(false));
+
+ let telemetry = harness.call_tool(6, "system.telemetry", json!({}))?;
+ assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(1));
+ assert_eq!(
+ tool_content(&telemetry)["worker_restarts"].as_u64(),
+ Some(1)
+ );
+ Ok(())
+}
+
+#[test]
+fn safe_request_retries_after_worker_transient_fault() -> TestResult {
+ let project_root = temp_project_root("transient_retry")?;
+ init_project(&project_root)?;
+ let marker = project_root.join("transient_once.marker");
+
+ let mut harness = McpHarness::spawn(
+ None,
+ &[
+ (
+ "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_KEY",
+ "tools/call:project.status".to_owned(),
+ ),
+ (
+ "FIDGET_SPINNER_MCP_TEST_WORKER_TRANSIENT_ONCE_MARKER",
+ marker.to_string(),
+ ),
+ ],
+ )?;
+ let _ = harness.initialize()?;
+ harness.notify_initialized()?;
+ let bind = harness.bind_project(12, &project_root)?;
+ assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
+
+ let response = harness.call_tool(13, "project.status", json!({}))?;
+ assert_eq!(response["result"]["isError"].as_bool(), Some(false));
+
+ let telemetry = harness.call_tool(14, "system.telemetry", json!({}))?;
+ assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(1));
+ assert_eq!(
+ tool_content(&telemetry)["worker_restarts"].as_u64(),
+ Some(1)
+ );
+ Ok(())
+}
+
+#[test]
+fn side_effecting_request_is_not_replayed_after_worker_crash() -> TestResult {
+ let project_root = temp_project_root("no_replay")?;
+ init_project(&project_root)?;
+
+ let mut harness = McpHarness::spawn(
+ None,
+ &[(
+ "FIDGET_SPINNER_MCP_TEST_HOST_CRASH_ONCE_KEY",
+ "tools/call:research.record".to_owned(),
+ )],
+ )?;
+ let _ = harness.initialize()?;
+ harness.notify_initialized()?;
+ let bind = harness.bind_project(6, &project_root)?;
+ assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
+
+ let response = harness.call_tool(
+ 7,
+ "research.record",
+ json!({
+ "title": "should not duplicate",
+ "body": "host crash before worker execution",
+ }),
+ )?;
+ assert_eq!(response["result"]["isError"].as_bool(), Some(true));
+
+ let nodes = harness.call_tool(8, "node.list", json!({}))?;
+ assert_eq!(
+ must_some(tool_content(&nodes).as_array(), "node list")?.len(),
+ 0
+ );
+
+ let telemetry = harness.call_tool(9, "system.telemetry", json!({}))?;
+ assert_eq!(tool_content(&telemetry)["retries"].as_u64(), Some(0));
+ Ok(())
+}
+
+#[test]
+fn forced_rollout_preserves_initialized_state() -> TestResult {
+ let project_root = temp_project_root("rollout")?;
+ init_project(&project_root)?;
+
+ let mut harness = McpHarness::spawn(
+ None,
+ &[(
+ "FIDGET_SPINNER_MCP_TEST_FORCE_ROLLOUT_KEY",
+ "tools/call:project.status".to_owned(),
+ )],
+ )?;
+ let _ = harness.initialize()?;
+ harness.notify_initialized()?;
+ let bind = harness.bind_project(9, &project_root)?;
+ assert_eq!(bind["result"]["isError"].as_bool(), Some(false));
+
+ let first = harness.call_tool(10, "project.status", json!({}))?;
+ assert_eq!(first["result"]["isError"].as_bool(), Some(false));
+
+ let second = harness.call_tool(11, "project.status", json!({}))?;
+ assert_eq!(second["result"]["isError"].as_bool(), Some(false));
+
+ let telemetry = harness.call_tool(12, "system.telemetry", json!({}))?;
+ assert_eq!(tool_content(&telemetry)["host_rollouts"].as_u64(), Some(1));
+ Ok(())
+}
+
+#[test]
+fn unbound_project_tools_fail_with_bind_hint() -> TestResult {
+ let mut harness = McpHarness::spawn(None, &[])?;
+ let _ = harness.initialize()?;
+ harness.notify_initialized()?;
+
+ let response = harness.call_tool(20, "project.status", json!({}))?;
+ assert_eq!(response["result"]["isError"].as_bool(), Some(true));
+ let message = response["result"]["structuredContent"]["message"].as_str();
+ assert!(message.is_some_and(|message| message.contains("project.bind")));
+ Ok(())
+}
+
+#[test]
+fn bind_retargets_writes_to_sibling_project_root() -> TestResult {
+ let spinner_root = temp_project_root("spinner_root")?;
+ let libgrid_root = temp_project_root("libgrid_root")?;
+ init_project(&spinner_root)?;
+ init_project(&libgrid_root)?;
+ let notes_dir = libgrid_root.join("notes");
+ must(
+ fs::create_dir_all(notes_dir.as_std_path()),
+ "create nested notes dir",
+ )?;
+
+ let mut harness = McpHarness::spawn(Some(&spinner_root), &[])?;
+ let _ = harness.initialize()?;
+ harness.notify_initialized()?;
+
+ let initial_status = harness.call_tool(30, "project.status", json!({}))?;
+ assert_eq!(
+ tool_content(&initial_status)["project_root"].as_str(),
+ Some(spinner_root.as_str())
+ );
+
+ let rebind = harness.bind_project(31, &notes_dir)?;
+ assert_eq!(rebind["result"]["isError"].as_bool(), Some(false));
+ assert_eq!(
+ tool_content(&rebind)["project_root"].as_str(),
+ Some(libgrid_root.as_str())
+ );
+
+ let status = harness.call_tool(32, "project.status", json!({}))?;
+ assert_eq!(
+ tool_content(&status)["project_root"].as_str(),
+ Some(libgrid_root.as_str())
+ );
+
+ let note = harness.call_tool(
+ 33,
+ "note.quick",
+ json!({
+ "title": "libgrid dogfood note",
+ "body": "rebind should redirect writes",
+ }),
+ )?;
+ assert_eq!(note["result"]["isError"].as_bool(), Some(false));
+
+ let spinner_store = must(ProjectStore::open(&spinner_root), "open spinner store")?;
+ let libgrid_store = must(ProjectStore::open(&libgrid_root), "open libgrid store")?;
+ assert_eq!(
+ must(
+ spinner_store.list_nodes(ListNodesQuery::default()),
+ "list spinner nodes after rebind"
+ )?
+ .len(),
+ 0
+ );
+ assert_eq!(
+ must(
+ libgrid_store.list_nodes(ListNodesQuery::default()),
+ "list libgrid nodes after rebind"
+ )?
+ .len(),
+ 1
+ );
+ Ok(())
+}
diff --git a/crates/fidget-spinner-core/Cargo.toml b/crates/fidget-spinner-core/Cargo.toml
new file mode 100644
index 0000000..b472b91
--- /dev/null
+++ b/crates/fidget-spinner-core/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "fidget-spinner-core"
+description = "Core domain model for a local-first experimental DAG"
+edition.workspace = true
+license.workspace = true
+publish = false
+rust-version.workspace = true
+version.workspace = true
+
+[dependencies]
+camino.workspace = true
+serde.workspace = true
+serde_json.workspace = true
+thiserror.workspace = true
+time.workspace = true
+uuid.workspace = true
+
+[lints]
+workspace = true
diff --git a/crates/fidget-spinner-core/src/error.rs b/crates/fidget-spinner-core/src/error.rs
new file mode 100644
index 0000000..8e976c7
--- /dev/null
+++ b/crates/fidget-spinner-core/src/error.rs
@@ -0,0 +1,9 @@
+use thiserror::Error;
+
+#[derive(Clone, Debug, Eq, Error, PartialEq)]
+pub enum CoreError {
+ #[error("text values must not be blank")]
+ EmptyText,
+ #[error("command recipes must contain at least one argv element")]
+ EmptyCommand,
+}
diff --git a/crates/fidget-spinner-core/src/id.rs b/crates/fidget-spinner-core/src/id.rs
new file mode 100644
index 0000000..ea2cd5a
--- /dev/null
+++ b/crates/fidget-spinner-core/src/id.rs
@@ -0,0 +1,46 @@
+use std::fmt::{self, Display, Formatter};
+
+use serde::{Deserialize, Serialize};
+use uuid::Uuid;
+
+macro_rules! define_id {
+ ($name:ident) => {
+ #[derive(
+ Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize,
+ )]
+ #[serde(transparent)]
+ pub struct $name(Uuid);
+
+ impl $name {
+ #[must_use]
+ pub fn fresh() -> Self {
+ Self(Uuid::now_v7())
+ }
+
+ #[must_use]
+ pub fn from_uuid(uuid: Uuid) -> Self {
+ Self(uuid)
+ }
+
+ #[must_use]
+ pub fn as_uuid(self) -> Uuid {
+ self.0
+ }
+ }
+
+ impl Display for $name {
+ fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(&self.0, formatter)
+ }
+ }
+ };
+}
+
+define_id!(AgentSessionId);
+define_id!(AnnotationId);
+define_id!(ArtifactId);
+define_id!(CheckpointId);
+define_id!(ExperimentId);
+define_id!(FrontierId);
+define_id!(NodeId);
+define_id!(RunId);
diff --git a/crates/fidget-spinner-core/src/lib.rs b/crates/fidget-spinner-core/src/lib.rs
new file mode 100644
index 0000000..f368268
--- /dev/null
+++ b/crates/fidget-spinner-core/src/lib.rs
@@ -0,0 +1,26 @@
+//! Core domain types for the Fidget Spinner frontier machine.
+//!
+//! The product direction is intentionally local-first and agent-first: the DAG
+//! is the canonical truth, while frontier state is a derived operational
+//! projection over that graph. The global spine is intentionally narrow so
+//! projects can carry richer payloads and annotations without fossilizing the
+//! whole system into one universal schema.
+
+mod error;
+mod id;
+mod model;
+
+pub use crate::error::CoreError;
+pub use crate::id::{
+ AgentSessionId, AnnotationId, ArtifactId, CheckpointId, ExperimentId, FrontierId, NodeId, RunId,
+};
+pub use crate::model::{
+ AdmissionState, AnnotationVisibility, ArtifactKind, ArtifactRef, CheckpointDisposition,
+ CheckpointRecord, CheckpointSnapshotRef, CodeSnapshotRef, CommandRecipe, CompletedExperiment,
+ DagEdge, DagNode, DiagnosticSeverity, EdgeKind, EvaluationProtocol, ExecutionBackend,
+ ExperimentResult, FieldPresence, FieldRole, FrontierContract, FrontierNote, FrontierProjection,
+ FrontierRecord, FrontierStatus, FrontierVerdict, GitCommitHash, InferencePolicy, JsonObject,
+ MetricObservation, MetricSpec, MetricUnit, NodeAnnotation, NodeClass, NodeDiagnostics,
+ NodePayload, NodeTrack, NonEmptyText, OptimizationObjective, PayloadSchemaRef,
+ ProjectFieldSpec, ProjectSchema, RunRecord, RunStatus, ValidationDiagnostic,
+};
diff --git a/crates/fidget-spinner-core/src/model.rs b/crates/fidget-spinner-core/src/model.rs
new file mode 100644
index 0000000..f0d1818
--- /dev/null
+++ b/crates/fidget-spinner-core/src/model.rs
@@ -0,0 +1,693 @@
+use std::collections::{BTreeMap, BTreeSet};
+use std::fmt::{self, Display, Formatter};
+
+use camino::Utf8PathBuf;
+use serde::{Deserialize, Serialize};
+use serde_json::{Map, Value};
+use time::OffsetDateTime;
+
+use crate::{
+ AgentSessionId, AnnotationId, ArtifactId, CheckpointId, CoreError, ExperimentId, FrontierId,
+ NodeId, RunId,
+};
+
+#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+#[serde(transparent)]
+pub struct NonEmptyText(String);
+
+impl NonEmptyText {
+ pub fn new(value: impl Into<String>) -> Result<Self, CoreError> {
+ let value = value.into();
+ if value.trim().is_empty() {
+ return Err(CoreError::EmptyText);
+ }
+ Ok(Self(value))
+ }
+
+ #[must_use]
+ pub fn as_str(&self) -> &str {
+ &self.0
+ }
+}
+
+impl Display for NonEmptyText {
+ fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ formatter.write_str(&self.0)
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+#[serde(transparent)]
+pub struct GitCommitHash(NonEmptyText);
+
+impl GitCommitHash {
+ pub fn new(value: impl Into<String>) -> Result<Self, CoreError> {
+ NonEmptyText::new(value).map(Self)
+ }
+
+ #[must_use]
+ pub fn as_str(&self) -> &str {
+ self.0.as_str()
+ }
+}
+
+impl Display for GitCommitHash {
+ fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(&self.0, formatter)
+ }
+}
+
+pub type JsonObject = Map<String, Value>;
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum NodeClass {
+ Contract,
+ Change,
+ Run,
+ Analysis,
+ Decision,
+ Research,
+ Enabling,
+ Note,
+}
+
+impl NodeClass {
+ #[must_use]
+ pub const fn as_str(self) -> &'static str {
+ match self {
+ Self::Contract => "contract",
+ Self::Change => "change",
+ Self::Run => "run",
+ Self::Analysis => "analysis",
+ Self::Decision => "decision",
+ Self::Research => "research",
+ Self::Enabling => "enabling",
+ Self::Note => "note",
+ }
+ }
+
+ #[must_use]
+ pub const fn default_track(self) -> NodeTrack {
+ match self {
+ Self::Contract | Self::Change | Self::Run | Self::Analysis | Self::Decision => {
+ NodeTrack::CorePath
+ }
+ Self::Research | Self::Enabling | Self::Note => NodeTrack::OffPath,
+ }
+ }
+}
+
+impl Display for NodeClass {
+ fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ formatter.write_str(self.as_str())
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum NodeTrack {
+ CorePath,
+ OffPath,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum AnnotationVisibility {
+ HiddenByDefault,
+ Visible,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum DiagnosticSeverity {
+ Error,
+ Warning,
+ Info,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum FieldPresence {
+ Required,
+ Recommended,
+ Optional,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum FieldRole {
+ Index,
+ ProjectionGate,
+ RenderOnly,
+ Opaque,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum InferencePolicy {
+ ManualOnly,
+ ModelMayInfer,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum FrontierStatus {
+ Exploring,
+ Paused,
+ Saturated,
+ Archived,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum CheckpointDisposition {
+ Champion,
+ FrontierCandidate,
+ Baseline,
+ DeadEnd,
+ Archived,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum MetricUnit {
+ Seconds,
+ Bytes,
+ Count,
+ Ratio,
+ Custom,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum OptimizationObjective {
+ Minimize,
+ Maximize,
+ Target,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum RunStatus {
+ Queued,
+ Running,
+ Succeeded,
+ Failed,
+ Cancelled,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum ExecutionBackend {
+ LocalProcess,
+ WorktreeProcess,
+ SshProcess,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum FrontierVerdict {
+ PromoteToChampion,
+ KeepOnFrontier,
+ RevertToChampion,
+ ArchiveDeadEnd,
+ NeedsMoreEvidence,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum AdmissionState {
+ Admitted,
+ Rejected,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub struct PayloadSchemaRef {
+ pub namespace: NonEmptyText,
+ pub version: u32,
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct NodePayload {
+ pub schema: Option<PayloadSchemaRef>,
+ pub fields: JsonObject,
+}
+
+impl NodePayload {
+ #[must_use]
+ pub fn empty() -> Self {
+ Self {
+ schema: None,
+ fields: JsonObject::new(),
+ }
+ }
+
+ #[must_use]
+ pub fn with_schema(schema: PayloadSchemaRef, fields: JsonObject) -> Self {
+ Self {
+ schema: Some(schema),
+ fields,
+ }
+ }
+
+ #[must_use]
+ pub fn field(&self, name: &str) -> Option<&Value> {
+ self.fields.get(name)
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct NodeAnnotation {
+ pub id: AnnotationId,
+ pub visibility: AnnotationVisibility,
+ pub label: Option<NonEmptyText>,
+ pub body: NonEmptyText,
+ pub created_at: OffsetDateTime,
+}
+
+impl NodeAnnotation {
+ #[must_use]
+ pub fn hidden(body: NonEmptyText) -> Self {
+ Self {
+ id: AnnotationId::fresh(),
+ visibility: AnnotationVisibility::HiddenByDefault,
+ label: None,
+ body,
+ created_at: OffsetDateTime::now_utc(),
+ }
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ValidationDiagnostic {
+ pub severity: DiagnosticSeverity,
+ pub code: String,
+ pub message: NonEmptyText,
+ pub field_name: Option<String>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct NodeDiagnostics {
+ pub admission: AdmissionState,
+ pub items: Vec<ValidationDiagnostic>,
+}
+
+impl NodeDiagnostics {
+ #[must_use]
+ pub const fn admitted() -> Self {
+ Self {
+ admission: AdmissionState::Admitted,
+ items: Vec::new(),
+ }
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ProjectFieldSpec {
+ pub name: NonEmptyText,
+ pub node_classes: BTreeSet<NodeClass>,
+ pub presence: FieldPresence,
+ pub severity: DiagnosticSeverity,
+ pub role: FieldRole,
+ pub inference_policy: InferencePolicy,
+}
+
+impl ProjectFieldSpec {
+ #[must_use]
+ pub fn applies_to(&self, class: NodeClass) -> bool {
+ self.node_classes.is_empty() || self.node_classes.contains(&class)
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ProjectSchema {
+ pub namespace: NonEmptyText,
+ pub version: u32,
+ pub fields: Vec<ProjectFieldSpec>,
+}
+
+impl ProjectSchema {
+ #[must_use]
+ pub fn default_with_namespace(namespace: NonEmptyText) -> Self {
+ Self {
+ namespace,
+ version: 1,
+ fields: Vec::new(),
+ }
+ }
+
+ #[must_use]
+ pub fn schema_ref(&self) -> PayloadSchemaRef {
+ PayloadSchemaRef {
+ namespace: self.namespace.clone(),
+ version: self.version,
+ }
+ }
+
+ #[must_use]
+ pub fn validate_node(&self, class: NodeClass, payload: &NodePayload) -> NodeDiagnostics {
+ let items = self
+ .fields
+ .iter()
+ .filter(|field| field.applies_to(class))
+ .filter_map(|field| {
+ let is_missing = payload.field(field.name.as_str()).is_none();
+ if !is_missing || field.presence == FieldPresence::Optional {
+ return None;
+ }
+ Some(ValidationDiagnostic {
+ severity: field.severity,
+ code: format!("missing.{}", field.name.as_str()),
+ message: validation_message(format!(
+ "missing project payload field `{}`",
+ field.name.as_str()
+ )),
+ field_name: Some(field.name.as_str().to_owned()),
+ })
+ })
+ .collect();
+ NodeDiagnostics {
+ admission: AdmissionState::Admitted,
+ items,
+ }
+ }
+}
+
+fn validation_message(value: String) -> NonEmptyText {
+ match NonEmptyText::new(value) {
+ Ok(message) => message,
+ Err(_) => unreachable!("validation diagnostics are never empty"),
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct DagNode {
+ pub id: NodeId,
+ pub class: NodeClass,
+ pub track: NodeTrack,
+ pub frontier_id: Option<FrontierId>,
+ pub archived: bool,
+ pub title: NonEmptyText,
+ pub summary: Option<NonEmptyText>,
+ pub payload: NodePayload,
+ pub annotations: Vec<NodeAnnotation>,
+ pub diagnostics: NodeDiagnostics,
+ pub agent_session_id: Option<AgentSessionId>,
+ pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
+}
+
+impl DagNode {
+ #[must_use]
+ pub fn new(
+ class: NodeClass,
+ frontier_id: Option<FrontierId>,
+ title: NonEmptyText,
+ summary: Option<NonEmptyText>,
+ payload: NodePayload,
+ diagnostics: NodeDiagnostics,
+ ) -> Self {
+ let now = OffsetDateTime::now_utc();
+ Self {
+ id: NodeId::fresh(),
+ class,
+ track: class.default_track(),
+ frontier_id,
+ archived: false,
+ title,
+ summary,
+ payload,
+ annotations: Vec::new(),
+ diagnostics,
+ agent_session_id: None,
+ created_at: now,
+ updated_at: now,
+ }
+ }
+
+ #[must_use]
+ pub fn is_core_path(&self) -> bool {
+ self.track == NodeTrack::CorePath
+ }
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub enum EdgeKind {
+ Lineage,
+ Evidence,
+ Comparison,
+ Supersedes,
+ Annotation,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct DagEdge {
+ pub source_id: NodeId,
+ pub target_id: NodeId,
+ pub kind: EdgeKind,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum ArtifactKind {
+ Note,
+ Patch,
+ BenchmarkBundle,
+ MetricSeries,
+ Table,
+ Plot,
+ Log,
+ Binary,
+ Checkpoint,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ArtifactRef {
+ pub id: ArtifactId,
+ pub kind: ArtifactKind,
+ pub label: NonEmptyText,
+ pub path: Utf8PathBuf,
+ pub media_type: Option<NonEmptyText>,
+ pub produced_by_run: Option<RunId>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct CodeSnapshotRef {
+ pub repo_root: Utf8PathBuf,
+ pub worktree_root: Utf8PathBuf,
+ pub worktree_name: Option<NonEmptyText>,
+ pub head_commit: Option<GitCommitHash>,
+ pub dirty_paths: BTreeSet<Utf8PathBuf>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct CheckpointSnapshotRef {
+ pub repo_root: Utf8PathBuf,
+ pub worktree_root: Utf8PathBuf,
+ pub worktree_name: Option<NonEmptyText>,
+ pub commit_hash: GitCommitHash,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct CommandRecipe {
+ pub working_directory: Utf8PathBuf,
+ pub argv: Vec<NonEmptyText>,
+ pub env: BTreeMap<String, String>,
+}
+
+impl CommandRecipe {
+ pub fn new(
+ working_directory: Utf8PathBuf,
+ argv: Vec<NonEmptyText>,
+ env: BTreeMap<String, String>,
+ ) -> Result<Self, CoreError> {
+ if argv.is_empty() {
+ return Err(CoreError::EmptyCommand);
+ }
+ Ok(Self {
+ working_directory,
+ argv,
+ env,
+ })
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, Ord, PartialEq, PartialOrd, Serialize)]
+pub struct MetricSpec {
+ pub metric_key: NonEmptyText,
+ pub unit: MetricUnit,
+ pub objective: OptimizationObjective,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct EvaluationProtocol {
+ pub benchmark_suites: BTreeSet<NonEmptyText>,
+ pub primary_metric: MetricSpec,
+ pub supporting_metrics: BTreeSet<MetricSpec>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct FrontierContract {
+ pub objective: NonEmptyText,
+ pub evaluation: EvaluationProtocol,
+ pub promotion_criteria: Vec<NonEmptyText>,
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct MetricObservation {
+ pub metric_key: NonEmptyText,
+ pub unit: MetricUnit,
+ pub objective: OptimizationObjective,
+ pub value: f64,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct FrontierRecord {
+ pub id: FrontierId,
+ pub label: NonEmptyText,
+ pub root_contract_node_id: NodeId,
+ pub status: FrontierStatus,
+ pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
+}
+
+impl FrontierRecord {
+ #[must_use]
+ pub fn new(label: NonEmptyText, root_contract_node_id: NodeId) -> Self {
+ Self::with_id(FrontierId::fresh(), label, root_contract_node_id)
+ }
+
+ #[must_use]
+ pub fn with_id(id: FrontierId, label: NonEmptyText, root_contract_node_id: NodeId) -> Self {
+ let now = OffsetDateTime::now_utc();
+ Self {
+ id,
+ label,
+ root_contract_node_id,
+ status: FrontierStatus::Exploring,
+ created_at: now,
+ updated_at: now,
+ }
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct CheckpointRecord {
+ pub id: CheckpointId,
+ pub frontier_id: FrontierId,
+ pub node_id: NodeId,
+ pub snapshot: CheckpointSnapshotRef,
+ pub disposition: CheckpointDisposition,
+ pub summary: NonEmptyText,
+ pub created_at: OffsetDateTime,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct RunRecord {
+ pub node_id: NodeId,
+ pub run_id: RunId,
+ pub frontier_id: Option<FrontierId>,
+ pub status: RunStatus,
+ pub backend: ExecutionBackend,
+ pub code_snapshot: Option<CodeSnapshotRef>,
+ pub benchmark_suite: Option<NonEmptyText>,
+ pub command: CommandRecipe,
+ pub started_at: Option<OffsetDateTime>,
+ pub finished_at: Option<OffsetDateTime>,
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct ExperimentResult {
+ pub benchmark_suite: NonEmptyText,
+ pub primary_metric: MetricObservation,
+ pub supporting_metrics: Vec<MetricObservation>,
+ pub benchmark_bundle: Option<ArtifactId>,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct FrontierNote {
+ pub summary: NonEmptyText,
+ pub next_hypotheses: Vec<NonEmptyText>,
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct CompletedExperiment {
+ pub id: ExperimentId,
+ pub frontier_id: FrontierId,
+ pub base_checkpoint_id: CheckpointId,
+ pub candidate_checkpoint_id: CheckpointId,
+ pub change_node_id: NodeId,
+ pub run_node_id: NodeId,
+ pub run_id: RunId,
+ pub analysis_node_id: Option<NodeId>,
+ pub decision_node_id: NodeId,
+ pub result: ExperimentResult,
+ pub note: FrontierNote,
+ pub verdict: FrontierVerdict,
+ pub created_at: OffsetDateTime,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct FrontierProjection {
+ pub frontier: FrontierRecord,
+ pub champion_checkpoint_id: Option<CheckpointId>,
+ pub candidate_checkpoint_ids: BTreeSet<CheckpointId>,
+ pub experiment_count: u64,
+}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::{BTreeMap, BTreeSet};
+
+ use camino::Utf8PathBuf;
+ use serde_json::json;
+
+ use super::{
+ CommandRecipe, DagNode, DiagnosticSeverity, FieldPresence, FieldRole, InferencePolicy,
+ JsonObject, NodeClass, NodePayload, NonEmptyText, ProjectFieldSpec, ProjectSchema,
+ };
+ use crate::CoreError;
+
+ #[test]
+ fn non_empty_text_rejects_blank_input() {
+ let text = NonEmptyText::new(" ");
+ assert_eq!(text, Err(CoreError::EmptyText));
+ }
+
+ #[test]
+ fn command_recipe_requires_argv() {
+ let recipe = CommandRecipe::new(
+ Utf8PathBuf::from("/tmp/worktree"),
+ Vec::new(),
+ BTreeMap::new(),
+ );
+ assert_eq!(recipe, Err(CoreError::EmptyCommand));
+ }
+
+ #[test]
+ fn schema_validation_warns_without_rejecting_ingest() -> Result<(), CoreError> {
+ let schema = ProjectSchema {
+ namespace: NonEmptyText::new("local.libgrid")?,
+ version: 1,
+ fields: vec![ProjectFieldSpec {
+ name: NonEmptyText::new("hypothesis")?,
+ node_classes: BTreeSet::from([NodeClass::Change]),
+ presence: FieldPresence::Required,
+ severity: DiagnosticSeverity::Warning,
+ role: FieldRole::ProjectionGate,
+ inference_policy: InferencePolicy::ManualOnly,
+ }],
+ };
+ let payload = NodePayload::with_schema(schema.schema_ref(), JsonObject::new());
+ let diagnostics = schema.validate_node(NodeClass::Change, &payload);
+
+ assert_eq!(diagnostics.admission, super::AdmissionState::Admitted);
+ assert_eq!(diagnostics.items.len(), 1);
+ assert_eq!(diagnostics.items[0].severity, DiagnosticSeverity::Warning);
+ Ok(())
+ }
+
+ #[test]
+ fn research_nodes_default_to_off_path() -> Result<(), CoreError> {
+ let payload = NodePayload {
+ schema: None,
+ fields: JsonObject::from_iter([("topic".to_owned(), json!("ideas"))]),
+ };
+ let node = DagNode::new(
+ NodeClass::Research,
+ None,
+ NonEmptyText::new("feature scouting")?,
+ None,
+ payload,
+ super::NodeDiagnostics::admitted(),
+ );
+
+ assert!(!node.is_core_path());
+ Ok(())
+ }
+}
diff --git a/crates/fidget-spinner-store-sqlite/Cargo.toml b/crates/fidget-spinner-store-sqlite/Cargo.toml
new file mode 100644
index 0000000..54e0784
--- /dev/null
+++ b/crates/fidget-spinner-store-sqlite/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "fidget-spinner-store-sqlite"
+description = "Per-project SQLite store for the Fidget Spinner DAG spine"
+edition.workspace = true
+license.workspace = true
+publish = false
+rust-version.workspace = true
+version.workspace = true
+
+[dependencies]
+camino.workspace = true
+fidget-spinner-core = { path = "../fidget-spinner-core" }
+rusqlite.workspace = true
+serde.workspace = true
+serde_json.workspace = true
+thiserror.workspace = true
+time.workspace = true
+uuid.workspace = true
+
+[lints]
+workspace = true
diff --git a/crates/fidget-spinner-store-sqlite/src/lib.rs b/crates/fidget-spinner-store-sqlite/src/lib.rs
new file mode 100644
index 0000000..7c129ab
--- /dev/null
+++ b/crates/fidget-spinner-store-sqlite/src/lib.rs
@@ -0,0 +1,1810 @@
+use std::fs;
+use std::io;
+use std::process::Command;
+
+use camino::{Utf8Path, Utf8PathBuf};
+use fidget_spinner_core::{
+ AnnotationVisibility, CheckpointDisposition, CheckpointRecord, CheckpointSnapshotRef,
+ CodeSnapshotRef, CommandRecipe, CompletedExperiment, DagEdge, DagNode, EdgeKind,
+ ExecutionBackend, ExperimentResult, FrontierContract, FrontierNote, FrontierProjection,
+ FrontierRecord, FrontierStatus, FrontierVerdict, GitCommitHash, JsonObject, MetricObservation,
+ MetricSpec, MetricUnit, NodeAnnotation, NodeClass, NodeDiagnostics, NodePayload, NonEmptyText,
+ OptimizationObjective, ProjectSchema, RunRecord, RunStatus,
+};
+use rusqlite::{Connection, OptionalExtension, Transaction, params};
+use serde::{Deserialize, Serialize};
+use serde_json::{Value, json};
+use thiserror::Error;
+use time::OffsetDateTime;
+use time::format_description::well_known::Rfc3339;
+use uuid::Uuid;
+
+pub const STORE_DIR_NAME: &str = ".fidget_spinner";
+pub const STATE_DB_NAME: &str = "state.sqlite";
+pub const PROJECT_CONFIG_NAME: &str = "project.json";
+pub const PROJECT_SCHEMA_NAME: &str = "schema.json";
+
+#[derive(Debug, Error)]
+pub enum StoreError {
+ #[error("project store is not initialized at {0}")]
+ MissingProjectStore(Utf8PathBuf),
+ #[error("I/O failure")]
+ Io(#[from] io::Error),
+ #[error("SQLite failure")]
+ Sql(#[from] rusqlite::Error),
+ #[error("JSON failure")]
+ Json(#[from] serde_json::Error),
+ #[error("time parse failure")]
+ TimeParse(#[from] time::error::Parse),
+ #[error("time format failure")]
+ TimeFormat(#[from] time::error::Format),
+ #[error("core domain failure")]
+ Core(#[from] fidget_spinner_core::CoreError),
+ #[error("UUID parse failure")]
+ Uuid(#[from] uuid::Error),
+ #[error("node {0} was not found")]
+ NodeNotFound(fidget_spinner_core::NodeId),
+ #[error("frontier {0} was not found")]
+ FrontierNotFound(fidget_spinner_core::FrontierId),
+ #[error("checkpoint {0} was not found")]
+ CheckpointNotFound(fidget_spinner_core::CheckpointId),
+ #[error("node {0} is not a change node")]
+ NodeNotChange(fidget_spinner_core::NodeId),
+ #[error("frontier {frontier_id} has no champion checkpoint")]
+ MissingChampionCheckpoint {
+ frontier_id: fidget_spinner_core::FrontierId,
+ },
+ #[error("git repository inspection failed for {0}")]
+ GitInspectionFailed(Utf8PathBuf),
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct ProjectConfig {
+ pub display_name: NonEmptyText,
+ pub created_at: OffsetDateTime,
+ pub store_format_version: u32,
+}
+
+impl ProjectConfig {
+ #[must_use]
+ pub fn new(display_name: NonEmptyText) -> Self {
+ Self {
+ display_name,
+ created_at: OffsetDateTime::now_utc(),
+ store_format_version: 1,
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct CreateNodeRequest {
+ pub class: NodeClass,
+ pub frontier_id: Option<fidget_spinner_core::FrontierId>,
+ pub title: NonEmptyText,
+ pub summary: Option<NonEmptyText>,
+ pub payload: NodePayload,
+ pub annotations: Vec<NodeAnnotation>,
+ pub attachments: Vec<EdgeAttachment>,
+}
+
+#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub enum EdgeAttachmentDirection {
+ ExistingToNew,
+ NewToExisting,
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct EdgeAttachment {
+ pub node_id: fidget_spinner_core::NodeId,
+ pub kind: EdgeKind,
+ pub direction: EdgeAttachmentDirection,
+}
+
+impl EdgeAttachment {
+ #[must_use]
+ pub fn materialize(&self, new_node_id: fidget_spinner_core::NodeId) -> DagEdge {
+ match self.direction {
+ EdgeAttachmentDirection::ExistingToNew => DagEdge {
+ source_id: self.node_id,
+ target_id: new_node_id,
+ kind: self.kind,
+ },
+ EdgeAttachmentDirection::NewToExisting => DagEdge {
+ source_id: new_node_id,
+ target_id: self.node_id,
+ kind: self.kind,
+ },
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct ListNodesQuery {
+ pub frontier_id: Option<fidget_spinner_core::FrontierId>,
+ pub class: Option<NodeClass>,
+ pub include_archived: bool,
+ pub limit: u32,
+}
+
+impl Default for ListNodesQuery {
+ fn default() -> Self {
+ Self {
+ frontier_id: None,
+ class: None,
+ include_archived: false,
+ limit: 20,
+ }
+ }
+}
+
+#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
+pub struct NodeSummary {
+ pub id: fidget_spinner_core::NodeId,
+ pub class: NodeClass,
+ pub track: fidget_spinner_core::NodeTrack,
+ pub frontier_id: Option<fidget_spinner_core::FrontierId>,
+ pub archived: bool,
+ pub title: NonEmptyText,
+ pub summary: Option<NonEmptyText>,
+ pub diagnostic_count: u64,
+ pub hidden_annotation_count: u64,
+ pub created_at: OffsetDateTime,
+ pub updated_at: OffsetDateTime,
+}
+
+#[derive(Clone, Debug)]
+pub struct CreateFrontierRequest {
+ pub label: NonEmptyText,
+ pub contract_title: NonEmptyText,
+ pub contract_summary: Option<NonEmptyText>,
+ pub contract: FrontierContract,
+ pub initial_checkpoint: Option<CheckpointSeed>,
+}
+
+#[derive(Clone, Debug)]
+pub struct CheckpointSeed {
+ pub summary: NonEmptyText,
+ pub snapshot: CheckpointSnapshotRef,
+}
+
+#[derive(Clone, Debug)]
+pub struct CloseExperimentRequest {
+ pub frontier_id: fidget_spinner_core::FrontierId,
+ pub base_checkpoint_id: fidget_spinner_core::CheckpointId,
+ pub change_node_id: fidget_spinner_core::NodeId,
+ pub candidate_summary: NonEmptyText,
+ pub candidate_snapshot: CheckpointSnapshotRef,
+ pub run_title: NonEmptyText,
+ pub run_summary: Option<NonEmptyText>,
+ pub backend: ExecutionBackend,
+ pub benchmark_suite: NonEmptyText,
+ pub command: CommandRecipe,
+ pub code_snapshot: Option<CodeSnapshotRef>,
+ pub primary_metric: MetricObservation,
+ pub supporting_metrics: Vec<MetricObservation>,
+ pub note: FrontierNote,
+ pub verdict: FrontierVerdict,
+ pub decision_title: NonEmptyText,
+ pub decision_rationale: NonEmptyText,
+ pub analysis_node_id: Option<fidget_spinner_core::NodeId>,
+}
+
+#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
+pub struct ExperimentReceipt {
+ pub checkpoint: CheckpointRecord,
+ pub run_node: DagNode,
+ pub run: RunRecord,
+ pub decision_node: DagNode,
+ pub experiment: CompletedExperiment,
+}
+
+pub struct ProjectStore {
+ project_root: Utf8PathBuf,
+ state_root: Utf8PathBuf,
+ connection: Connection,
+ config: ProjectConfig,
+ schema: ProjectSchema,
+}
+
+impl ProjectStore {
+ pub fn init(
+ project_root: impl AsRef<Utf8Path>,
+ display_name: NonEmptyText,
+ schema_namespace: NonEmptyText,
+ ) -> Result<Self, StoreError> {
+ let project_root = project_root.as_ref().to_path_buf();
+ let state_root = state_root(&project_root);
+ fs::create_dir_all(state_root.join("blobs"))?;
+ let config = ProjectConfig::new(display_name);
+ write_json_file(&state_root.join(PROJECT_CONFIG_NAME), &config)?;
+ let schema = ProjectSchema::default_with_namespace(schema_namespace);
+ write_json_file(&state_root.join(PROJECT_SCHEMA_NAME), &schema)?;
+
+ let connection = Connection::open(state_root.join(STATE_DB_NAME).as_std_path())?;
+ migrate(&connection)?;
+
+ Ok(Self {
+ project_root,
+ state_root,
+ connection,
+ config,
+ schema,
+ })
+ }
+
+ pub fn open(project_root: impl AsRef<Utf8Path>) -> Result<Self, StoreError> {
+ let requested_root = project_root.as_ref().to_path_buf();
+ let project_root = discover_project_root(&requested_root)
+ .ok_or(StoreError::MissingProjectStore(requested_root))?;
+ let state_root = state_root(&project_root);
+ let config = read_json_file::<ProjectConfig>(&state_root.join(PROJECT_CONFIG_NAME))?;
+ let schema = read_json_file::<ProjectSchema>(&state_root.join(PROJECT_SCHEMA_NAME))?;
+ let connection = Connection::open(state_root.join(STATE_DB_NAME).as_std_path())?;
+ migrate(&connection)?;
+ Ok(Self {
+ project_root,
+ state_root,
+ connection,
+ config,
+ schema,
+ })
+ }
+
+ #[must_use]
+ pub fn config(&self) -> &ProjectConfig {
+ &self.config
+ }
+
+ #[must_use]
+ pub fn schema(&self) -> &ProjectSchema {
+ &self.schema
+ }
+
+ #[must_use]
+ pub fn project_root(&self) -> &Utf8Path {
+ &self.project_root
+ }
+
+ #[must_use]
+ pub fn state_root(&self) -> &Utf8Path {
+ &self.state_root
+ }
+
+ pub fn create_frontier(
+ &mut self,
+ request: CreateFrontierRequest,
+ ) -> Result<FrontierProjection, StoreError> {
+ let frontier_id = fidget_spinner_core::FrontierId::fresh();
+ let payload = NodePayload::with_schema(
+ self.schema.schema_ref(),
+ frontier_contract_payload(&request.contract)?,
+ );
+ let diagnostics = self.schema.validate_node(NodeClass::Contract, &payload);
+ let contract_node = DagNode::new(
+ NodeClass::Contract,
+ Some(frontier_id),
+ request.contract_title,
+ request.contract_summary,
+ payload,
+ diagnostics,
+ );
+ let frontier = FrontierRecord::with_id(frontier_id, request.label, contract_node.id);
+
+ let tx = self.connection.transaction()?;
+ insert_node(&tx, &contract_node)?;
+ insert_frontier(&tx, &frontier)?;
+ if let Some(seed) = request.initial_checkpoint {
+ let checkpoint = CheckpointRecord {
+ id: fidget_spinner_core::CheckpointId::fresh(),
+ frontier_id: frontier.id,
+ node_id: contract_node.id,
+ snapshot: seed.snapshot,
+ disposition: CheckpointDisposition::Champion,
+ summary: seed.summary,
+ created_at: OffsetDateTime::now_utc(),
+ };
+ insert_checkpoint(&tx, &checkpoint)?;
+ }
+ insert_event(
+ &tx,
+ "frontier",
+ &frontier.id.to_string(),
+ "frontier.created",
+ json!({"root_contract_node_id": contract_node.id}),
+ )?;
+ tx.commit()?;
+
+ self.frontier_projection(frontier.id)
+ }
+
+ pub fn add_node(&mut self, request: CreateNodeRequest) -> Result<DagNode, StoreError> {
+ let diagnostics = self.schema.validate_node(request.class, &request.payload);
+ let mut node = DagNode::new(
+ request.class,
+ request.frontier_id,
+ request.title,
+ request.summary,
+ request.payload,
+ diagnostics,
+ );
+ node.annotations = request.annotations;
+
+ let tx = self.connection.transaction()?;
+ insert_node(&tx, &node)?;
+ for attachment in &request.attachments {
+ insert_edge(&tx, &attachment.materialize(node.id))?;
+ }
+ insert_event(
+ &tx,
+ "node",
+ &node.id.to_string(),
+ "node.created",
+ json!({"class": node.class.as_str(), "frontier_id": node.frontier_id}),
+ )?;
+ tx.commit()?;
+ Ok(node)
+ }
+
+ pub fn archive_node(&mut self, node_id: fidget_spinner_core::NodeId) -> Result<(), StoreError> {
+ let updated_at = encode_timestamp(OffsetDateTime::now_utc())?;
+ let changed = self.connection.execute(
+ "UPDATE nodes SET archived = 1, updated_at = ?1 WHERE id = ?2",
+ params![updated_at, node_id.to_string()],
+ )?;
+ if changed == 0 {
+ return Err(StoreError::NodeNotFound(node_id));
+ }
+ Ok(())
+ }
+
+ pub fn annotate_node(
+ &mut self,
+ node_id: fidget_spinner_core::NodeId,
+ annotation: NodeAnnotation,
+ ) -> Result<(), StoreError> {
+ let tx = self.connection.transaction()?;
+ let exists = tx
+ .query_row(
+ "SELECT 1 FROM nodes WHERE id = ?1",
+ params![node_id.to_string()],
+ |row| row.get::<_, i64>(0),
+ )
+ .optional()?;
+ if exists.is_none() {
+ return Err(StoreError::NodeNotFound(node_id));
+ }
+ insert_annotation(&tx, node_id, &annotation)?;
+ let _ = tx.execute(
+ "UPDATE nodes SET updated_at = ?1 WHERE id = ?2",
+ params![
+ encode_timestamp(OffsetDateTime::now_utc())?,
+ node_id.to_string()
+ ],
+ )?;
+ insert_event(
+ &tx,
+ "node",
+ &node_id.to_string(),
+ "node.annotated",
+ json!({"visibility": format!("{:?}", annotation.visibility)}),
+ )?;
+ tx.commit()?;
+ Ok(())
+ }
+
+ pub fn get_node(
+ &self,
+ node_id: fidget_spinner_core::NodeId,
+ ) -> Result<Option<DagNode>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT
+ id,
+ class,
+ track,
+ frontier_id,
+ archived,
+ title,
+ summary,
+ payload_schema_namespace,
+ payload_schema_version,
+ payload_json,
+ diagnostics_json,
+ agent_session_id,
+ created_at,
+ updated_at
+ FROM nodes
+ WHERE id = ?1",
+ )?;
+ let node = statement
+ .query_row(params![node_id.to_string()], read_node_row)
+ .optional()?;
+ node.map(|mut item| {
+ item.annotations = self.load_annotations(item.id)?;
+ Ok(item)
+ })
+ .transpose()
+ }
+
+ pub fn list_nodes(&self, query: ListNodesQuery) -> Result<Vec<NodeSummary>, StoreError> {
+ let frontier_id = query.frontier_id.map(|id| id.to_string());
+ let class = query.class.map(|item| item.as_str().to_owned());
+ let limit = i64::from(query.limit);
+ let mut statement = self.connection.prepare(
+ "SELECT
+ n.id,
+ n.class,
+ n.track,
+ n.frontier_id,
+ n.archived,
+ n.title,
+ n.summary,
+ n.diagnostics_json,
+ n.created_at,
+ n.updated_at,
+ (
+ SELECT COUNT(*)
+ FROM node_annotations AS a
+ WHERE a.node_id = n.id AND a.visibility = 'hidden'
+ ) AS hidden_annotation_count
+ FROM nodes AS n
+ WHERE (?1 IS NULL OR n.frontier_id = ?1)
+ AND (?2 IS NULL OR n.class = ?2)
+ AND (?3 = 1 OR n.archived = 0)
+ ORDER BY n.updated_at DESC
+ LIMIT ?4",
+ )?;
+ let mut rows = statement.query(params![
+ frontier_id,
+ class,
+ i64::from(query.include_archived),
+ limit
+ ])?;
+ let mut items = Vec::new();
+ while let Some(row) = rows.next()? {
+ let diagnostics = decode_json::<NodeDiagnostics>(&row.get::<_, String>(7)?)?;
+ items.push(NodeSummary {
+ id: parse_node_id(&row.get::<_, String>(0)?)?,
+ class: parse_node_class(&row.get::<_, String>(1)?)?,
+ track: parse_node_track(&row.get::<_, String>(2)?)?,
+ frontier_id: row
+ .get::<_, Option<String>>(3)?
+ .map(|raw| parse_frontier_id(&raw))
+ .transpose()?,
+ archived: row.get::<_, i64>(4)? != 0,
+ title: NonEmptyText::new(row.get::<_, String>(5)?)?,
+ summary: row
+ .get::<_, Option<String>>(6)?
+ .map(NonEmptyText::new)
+ .transpose()?,
+ diagnostic_count: diagnostics.items.len() as u64,
+ hidden_annotation_count: row.get::<_, i64>(10)? as u64,
+ created_at: decode_timestamp(&row.get::<_, String>(8)?)?,
+ updated_at: decode_timestamp(&row.get::<_, String>(9)?)?,
+ });
+ }
+ Ok(items)
+ }
+
+ pub fn list_frontiers(&self) -> Result<Vec<FrontierRecord>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT id, label, root_contract_node_id, status, created_at, updated_at
+ FROM frontiers
+ ORDER BY updated_at DESC",
+ )?;
+ let mut rows = statement.query([])?;
+ let mut items = Vec::new();
+ while let Some(row) = rows.next()? {
+ items.push(read_frontier_row(row)?);
+ }
+ Ok(items)
+ }
+
+ pub fn frontier_projection(
+ &self,
+ frontier_id: fidget_spinner_core::FrontierId,
+ ) -> Result<FrontierProjection, StoreError> {
+ let frontier = self.load_frontier(frontier_id)?;
+ let mut champion_checkpoint_id = None;
+ let mut candidate_checkpoint_ids = std::collections::BTreeSet::new();
+
+ let mut statement = self.connection.prepare(
+ "SELECT id, disposition
+ FROM checkpoints
+ WHERE frontier_id = ?1",
+ )?;
+ let mut rows = statement.query(params![frontier_id.to_string()])?;
+ while let Some(row) = rows.next()? {
+ let checkpoint_id = parse_checkpoint_id(&row.get::<_, String>(0)?)?;
+ match parse_checkpoint_disposition(&row.get::<_, String>(1)?)? {
+ CheckpointDisposition::Champion => champion_checkpoint_id = Some(checkpoint_id),
+ CheckpointDisposition::FrontierCandidate => {
+ let _ = candidate_checkpoint_ids.insert(checkpoint_id);
+ }
+ CheckpointDisposition::Baseline
+ | CheckpointDisposition::DeadEnd
+ | CheckpointDisposition::Archived => {}
+ }
+ }
+ let experiment_count = self.connection.query_row(
+ "SELECT COUNT(*) FROM experiments WHERE frontier_id = ?1",
+ params![frontier_id.to_string()],
+ |row| row.get::<_, i64>(0),
+ )? as u64;
+
+ Ok(FrontierProjection {
+ frontier,
+ champion_checkpoint_id,
+ candidate_checkpoint_ids,
+ experiment_count,
+ })
+ }
+
+ pub fn load_checkpoint(
+ &self,
+ checkpoint_id: fidget_spinner_core::CheckpointId,
+ ) -> Result<Option<CheckpointRecord>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT
+ id,
+ frontier_id,
+ node_id,
+ repo_root,
+ worktree_root,
+ worktree_name,
+ commit_hash,
+ disposition,
+ summary,
+ created_at
+ FROM checkpoints
+ WHERE id = ?1",
+ )?;
+ statement
+ .query_row(params![checkpoint_id.to_string()], |row| {
+ read_checkpoint_row(row)
+ })
+ .optional()
+ .map_err(StoreError::from)
+ }
+
+ pub fn close_experiment(
+ &mut self,
+ request: CloseExperimentRequest,
+ ) -> Result<ExperimentReceipt, StoreError> {
+ let change_node = self
+ .get_node(request.change_node_id)?
+ .ok_or(StoreError::NodeNotFound(request.change_node_id))?;
+ if change_node.class != NodeClass::Change {
+ return Err(StoreError::NodeNotChange(request.change_node_id));
+ }
+ if change_node.frontier_id != Some(request.frontier_id) {
+ return Err(StoreError::FrontierNotFound(request.frontier_id));
+ }
+ let base_checkpoint = self
+ .load_checkpoint(request.base_checkpoint_id)?
+ .ok_or(StoreError::CheckpointNotFound(request.base_checkpoint_id))?;
+ if base_checkpoint.frontier_id != request.frontier_id {
+ return Err(StoreError::CheckpointNotFound(request.base_checkpoint_id));
+ }
+
+ let run_payload = NodePayload::with_schema(
+ self.schema.schema_ref(),
+ json_object(json!({
+ "benchmark_suite": request.benchmark_suite.as_str(),
+ "backend": format!("{:?}", request.backend),
+ "command": request.command.argv.iter().map(NonEmptyText::as_str).collect::<Vec<_>>(),
+ }))?,
+ );
+ let run_diagnostics = self.schema.validate_node(NodeClass::Run, &run_payload);
+ let run_node = DagNode::new(
+ NodeClass::Run,
+ Some(request.frontier_id),
+ request.run_title,
+ request.run_summary,
+ run_payload,
+ run_diagnostics,
+ );
+ let run_id = fidget_spinner_core::RunId::fresh();
+ let now = OffsetDateTime::now_utc();
+ let run = RunRecord {
+ node_id: run_node.id,
+ run_id,
+ frontier_id: Some(request.frontier_id),
+ status: RunStatus::Succeeded,
+ backend: request.backend,
+ code_snapshot: request.code_snapshot,
+ benchmark_suite: Some(request.benchmark_suite.clone()),
+ command: request.command,
+ started_at: Some(now),
+ finished_at: Some(now),
+ };
+
+ let decision_payload = NodePayload::with_schema(
+ self.schema.schema_ref(),
+ json_object(json!({
+ "verdict": format!("{:?}", request.verdict),
+ "rationale": request.decision_rationale.as_str(),
+ }))?,
+ );
+ let decision_diagnostics = self
+ .schema
+ .validate_node(NodeClass::Decision, &decision_payload);
+ let decision_node = DagNode::new(
+ NodeClass::Decision,
+ Some(request.frontier_id),
+ request.decision_title,
+ Some(request.decision_rationale.clone()),
+ decision_payload,
+ decision_diagnostics,
+ );
+
+ let checkpoint = CheckpointRecord {
+ id: fidget_spinner_core::CheckpointId::fresh(),
+ frontier_id: request.frontier_id,
+ node_id: run_node.id,
+ snapshot: request.candidate_snapshot,
+ disposition: match request.verdict {
+ FrontierVerdict::PromoteToChampion => CheckpointDisposition::Champion,
+ FrontierVerdict::KeepOnFrontier | FrontierVerdict::NeedsMoreEvidence => {
+ CheckpointDisposition::FrontierCandidate
+ }
+ FrontierVerdict::RevertToChampion => CheckpointDisposition::DeadEnd,
+ FrontierVerdict::ArchiveDeadEnd => CheckpointDisposition::Archived,
+ },
+ summary: request.candidate_summary,
+ created_at: now,
+ };
+
+ let experiment = CompletedExperiment {
+ id: fidget_spinner_core::ExperimentId::fresh(),
+ frontier_id: request.frontier_id,
+ base_checkpoint_id: request.base_checkpoint_id,
+ candidate_checkpoint_id: checkpoint.id,
+ change_node_id: request.change_node_id,
+ run_node_id: run_node.id,
+ run_id,
+ analysis_node_id: request.analysis_node_id,
+ decision_node_id: decision_node.id,
+ result: ExperimentResult {
+ benchmark_suite: request.benchmark_suite,
+ primary_metric: request.primary_metric,
+ supporting_metrics: request.supporting_metrics,
+ benchmark_bundle: None,
+ },
+ note: request.note,
+ verdict: request.verdict,
+ created_at: now,
+ };
+
+ let tx = self.connection.transaction()?;
+ insert_node(&tx, &run_node)?;
+ insert_node(&tx, &decision_node)?;
+ insert_edge(
+ &tx,
+ &DagEdge {
+ source_id: request.change_node_id,
+ target_id: run_node.id,
+ kind: EdgeKind::Lineage,
+ },
+ )?;
+ insert_edge(
+ &tx,
+ &DagEdge {
+ source_id: run_node.id,
+ target_id: decision_node.id,
+ kind: EdgeKind::Evidence,
+ },
+ )?;
+ insert_run(
+ &tx,
+ &run,
+ &experiment.result.primary_metric,
+ &experiment.result.supporting_metrics,
+ )?;
+ match request.verdict {
+ FrontierVerdict::PromoteToChampion => {
+ demote_previous_champion(&tx, request.frontier_id)?;
+ }
+ FrontierVerdict::KeepOnFrontier
+ | FrontierVerdict::NeedsMoreEvidence
+ | FrontierVerdict::RevertToChampion
+ | FrontierVerdict::ArchiveDeadEnd => {}
+ }
+ insert_checkpoint(&tx, &checkpoint)?;
+ insert_experiment(&tx, &experiment)?;
+ touch_frontier(&tx, request.frontier_id)?;
+ insert_event(
+ &tx,
+ "experiment",
+ &experiment.id.to_string(),
+ "experiment.closed",
+ json!({
+ "frontier_id": request.frontier_id,
+ "verdict": format!("{:?}", request.verdict),
+ "candidate_checkpoint_id": checkpoint.id,
+ }),
+ )?;
+ tx.commit()?;
+
+ Ok(ExperimentReceipt {
+ checkpoint,
+ run_node,
+ run,
+ decision_node,
+ experiment,
+ })
+ }
+
+ pub fn auto_capture_checkpoint(
+ &self,
+ summary: NonEmptyText,
+ ) -> Result<Option<CheckpointSeed>, StoreError> {
+ auto_capture_checkpoint_seed(&self.project_root, summary)
+ }
+
+ fn load_annotations(
+ &self,
+ node_id: fidget_spinner_core::NodeId,
+ ) -> Result<Vec<NodeAnnotation>, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT id, visibility, label, body, created_at
+ FROM node_annotations
+ WHERE node_id = ?1
+ ORDER BY created_at ASC",
+ )?;
+ let mut rows = statement.query(params![node_id.to_string()])?;
+ let mut items = Vec::new();
+ while let Some(row) = rows.next()? {
+ items.push(NodeAnnotation {
+ id: parse_annotation_id(&row.get::<_, String>(0)?)?,
+ visibility: parse_annotation_visibility(&row.get::<_, String>(1)?)?,
+ label: row
+ .get::<_, Option<String>>(2)?
+ .map(NonEmptyText::new)
+ .transpose()?,
+ body: NonEmptyText::new(row.get::<_, String>(3)?)?,
+ created_at: decode_timestamp(&row.get::<_, String>(4)?)?,
+ });
+ }
+ Ok(items)
+ }
+
+ fn load_frontier(
+ &self,
+ frontier_id: fidget_spinner_core::FrontierId,
+ ) -> Result<FrontierRecord, StoreError> {
+ let mut statement = self.connection.prepare(
+ "SELECT id, label, root_contract_node_id, status, created_at, updated_at
+ FROM frontiers
+ WHERE id = ?1",
+ )?;
+ let frontier = statement
+ .query_row(params![frontier_id.to_string()], |row| {
+ read_frontier_row(row).map_err(to_sql_conversion_error)
+ })
+ .optional()?;
+ frontier.ok_or(StoreError::FrontierNotFound(frontier_id))
+ }
+}
+
+fn migrate(connection: &Connection) -> Result<(), StoreError> {
+ connection.execute_batch(
+ "
+ PRAGMA foreign_keys = ON;
+
+ CREATE TABLE IF NOT EXISTS nodes (
+ id TEXT PRIMARY KEY,
+ class TEXT NOT NULL,
+ track TEXT NOT NULL,
+ frontier_id TEXT,
+ archived INTEGER NOT NULL,
+ title TEXT NOT NULL,
+ summary TEXT,
+ payload_schema_namespace TEXT,
+ payload_schema_version INTEGER,
+ payload_json TEXT NOT NULL,
+ diagnostics_json TEXT NOT NULL,
+ agent_session_id TEXT,
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL
+ );
+
+ CREATE TABLE IF NOT EXISTS node_annotations (
+ id TEXT PRIMARY KEY,
+ node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
+ visibility TEXT NOT NULL,
+ label TEXT,
+ body TEXT NOT NULL,
+ created_at TEXT NOT NULL
+ );
+
+ CREATE TABLE IF NOT EXISTS node_edges (
+ source_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
+ target_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
+ kind TEXT NOT NULL,
+ PRIMARY KEY (source_id, target_id, kind)
+ );
+
+ CREATE TABLE IF NOT EXISTS frontiers (
+ id TEXT PRIMARY KEY,
+ label TEXT NOT NULL,
+ root_contract_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
+ status TEXT NOT NULL,
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL
+ );
+
+ CREATE TABLE IF NOT EXISTS checkpoints (
+ id TEXT PRIMARY KEY,
+ frontier_id TEXT NOT NULL REFERENCES frontiers(id) ON DELETE CASCADE,
+ node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
+ repo_root TEXT NOT NULL,
+ worktree_root TEXT NOT NULL,
+ worktree_name TEXT,
+ commit_hash TEXT NOT NULL,
+ disposition TEXT NOT NULL,
+ summary TEXT NOT NULL,
+ created_at TEXT NOT NULL
+ );
+
+ CREATE TABLE IF NOT EXISTS runs (
+ run_id TEXT PRIMARY KEY,
+ node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
+ frontier_id TEXT REFERENCES frontiers(id) ON DELETE SET NULL,
+ status TEXT NOT NULL,
+ backend TEXT NOT NULL,
+ repo_root TEXT,
+ worktree_root TEXT,
+ worktree_name TEXT,
+ head_commit TEXT,
+ dirty_paths_json TEXT,
+ benchmark_suite TEXT,
+ working_directory TEXT NOT NULL,
+ argv_json TEXT NOT NULL,
+ env_json TEXT NOT NULL,
+ started_at TEXT,
+ finished_at TEXT
+ );
+
+ CREATE TABLE IF NOT EXISTS metrics (
+ run_id TEXT NOT NULL REFERENCES runs(run_id) ON DELETE CASCADE,
+ metric_key TEXT NOT NULL,
+ unit TEXT NOT NULL,
+ objective TEXT NOT NULL,
+ value REAL NOT NULL
+ );
+
+ CREATE TABLE IF NOT EXISTS experiments (
+ id TEXT PRIMARY KEY,
+ frontier_id TEXT NOT NULL REFERENCES frontiers(id) ON DELETE CASCADE,
+ base_checkpoint_id TEXT NOT NULL REFERENCES checkpoints(id) ON DELETE RESTRICT,
+ candidate_checkpoint_id TEXT NOT NULL REFERENCES checkpoints(id) ON DELETE RESTRICT,
+ change_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
+ run_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
+ run_id TEXT NOT NULL REFERENCES runs(run_id) ON DELETE RESTRICT,
+ analysis_node_id TEXT REFERENCES nodes(id) ON DELETE RESTRICT,
+ decision_node_id TEXT NOT NULL REFERENCES nodes(id) ON DELETE RESTRICT,
+ benchmark_suite TEXT NOT NULL,
+ primary_metric_json TEXT NOT NULL,
+ supporting_metrics_json TEXT NOT NULL,
+ note_summary TEXT NOT NULL,
+ note_next_json TEXT NOT NULL,
+ verdict TEXT NOT NULL,
+ created_at TEXT NOT NULL
+ );
+
+ CREATE TABLE IF NOT EXISTS events (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ entity_kind TEXT NOT NULL,
+ entity_id TEXT NOT NULL,
+ event_kind TEXT NOT NULL,
+ payload_json TEXT NOT NULL,
+ created_at TEXT NOT NULL
+ );
+ ",
+ )?;
+ Ok(())
+}
+
+fn insert_node(tx: &Transaction<'_>, node: &DagNode) -> Result<(), StoreError> {
+ let schema_namespace = node
+ .payload
+ .schema
+ .as_ref()
+ .map(|schema| schema.namespace.as_str());
+ let schema_version = node
+ .payload
+ .schema
+ .as_ref()
+ .map(|schema| i64::from(schema.version));
+ let _ = tx.execute(
+ "INSERT INTO nodes (
+ id,
+ class,
+ track,
+ frontier_id,
+ archived,
+ title,
+ summary,
+ payload_schema_namespace,
+ payload_schema_version,
+ payload_json,
+ diagnostics_json,
+ agent_session_id,
+ created_at,
+ updated_at
+ ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)",
+ params![
+ node.id.to_string(),
+ node.class.as_str(),
+ encode_node_track(node.track),
+ node.frontier_id.map(|id| id.to_string()),
+ i64::from(node.archived),
+ node.title.as_str(),
+ node.summary.as_ref().map(NonEmptyText::as_str),
+ schema_namespace,
+ schema_version,
+ encode_json(&node.payload)?,
+ encode_json(&node.diagnostics)?,
+ node.agent_session_id.map(|id| id.to_string()),
+ encode_timestamp(node.created_at)?,
+ encode_timestamp(node.updated_at)?,
+ ],
+ )?;
+ for annotation in &node.annotations {
+ insert_annotation(tx, node.id, annotation)?;
+ }
+ Ok(())
+}
+
+fn insert_annotation(
+ tx: &Transaction<'_>,
+ node_id: fidget_spinner_core::NodeId,
+ annotation: &NodeAnnotation,
+) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "INSERT INTO node_annotations (id, node_id, visibility, label, body, created_at)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
+ params![
+ annotation.id.to_string(),
+ node_id.to_string(),
+ encode_annotation_visibility(annotation.visibility),
+ annotation.label.as_ref().map(NonEmptyText::as_str),
+ annotation.body.as_str(),
+ encode_timestamp(annotation.created_at)?,
+ ],
+ )?;
+ Ok(())
+}
+
+fn insert_edge(tx: &Transaction<'_>, edge: &DagEdge) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "INSERT OR IGNORE INTO node_edges (source_id, target_id, kind)
+ VALUES (?1, ?2, ?3)",
+ params![
+ edge.source_id.to_string(),
+ edge.target_id.to_string(),
+ encode_edge_kind(edge.kind),
+ ],
+ )?;
+ Ok(())
+}
+
+fn insert_frontier(tx: &Transaction<'_>, frontier: &FrontierRecord) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "INSERT INTO frontiers (id, label, root_contract_node_id, status, created_at, updated_at)
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6)",
+ params![
+ frontier.id.to_string(),
+ frontier.label.as_str(),
+ frontier.root_contract_node_id.to_string(),
+ encode_frontier_status(frontier.status),
+ encode_timestamp(frontier.created_at)?,
+ encode_timestamp(frontier.updated_at)?,
+ ],
+ )?;
+ Ok(())
+}
+
+fn insert_checkpoint(
+ tx: &Transaction<'_>,
+ checkpoint: &CheckpointRecord,
+) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "INSERT INTO checkpoints (
+ id,
+ frontier_id,
+ node_id,
+ repo_root,
+ worktree_root,
+ worktree_name,
+ commit_hash,
+ disposition,
+ summary,
+ created_at
+ ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
+ params![
+ checkpoint.id.to_string(),
+ checkpoint.frontier_id.to_string(),
+ checkpoint.node_id.to_string(),
+ checkpoint.snapshot.repo_root.as_str(),
+ checkpoint.snapshot.worktree_root.as_str(),
+ checkpoint
+ .snapshot
+ .worktree_name
+ .as_ref()
+ .map(NonEmptyText::as_str),
+ checkpoint.snapshot.commit_hash.as_str(),
+ encode_checkpoint_disposition(checkpoint.disposition),
+ checkpoint.summary.as_str(),
+ encode_timestamp(checkpoint.created_at)?,
+ ],
+ )?;
+ Ok(())
+}
+
+fn insert_run(
+ tx: &Transaction<'_>,
+ run: &RunRecord,
+ primary_metric: &MetricObservation,
+ supporting_metrics: &[MetricObservation],
+) -> Result<(), StoreError> {
+ let (repo_root, worktree_root, worktree_name, head_commit, dirty_paths) = run
+ .code_snapshot
+ .as_ref()
+ .map_or((None, None, None, None, None), |snapshot| {
+ (
+ Some(snapshot.repo_root.as_str().to_owned()),
+ Some(snapshot.worktree_root.as_str().to_owned()),
+ snapshot.worktree_name.as_ref().map(ToOwned::to_owned),
+ snapshot.head_commit.as_ref().map(ToOwned::to_owned),
+ Some(
+ snapshot
+ .dirty_paths
+ .iter()
+ .map(ToOwned::to_owned)
+ .collect::<Vec<_>>(),
+ ),
+ )
+ });
+ let dirty_paths_json = match dirty_paths.as_ref() {
+ Some(paths) => Some(encode_json(paths)?),
+ None => None,
+ };
+ let started_at = match run.started_at {
+ Some(timestamp) => Some(encode_timestamp(timestamp)?),
+ None => None,
+ };
+ let finished_at = match run.finished_at {
+ Some(timestamp) => Some(encode_timestamp(timestamp)?),
+ None => None,
+ };
+ let _ = tx.execute(
+ "INSERT INTO runs (
+ run_id,
+ node_id,
+ frontier_id,
+ status,
+ backend,
+ repo_root,
+ worktree_root,
+ worktree_name,
+ head_commit,
+ dirty_paths_json,
+ benchmark_suite,
+ working_directory,
+ argv_json,
+ env_json,
+ started_at,
+ finished_at
+ ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16)",
+ params![
+ run.run_id.to_string(),
+ run.node_id.to_string(),
+ run.frontier_id.map(|id| id.to_string()),
+ encode_run_status(run.status),
+ encode_backend(run.backend),
+ repo_root,
+ worktree_root,
+ worktree_name.map(|item| item.to_string()),
+ head_commit.map(|item| item.to_string()),
+ dirty_paths_json,
+ run.benchmark_suite.as_ref().map(NonEmptyText::as_str),
+ run.command.working_directory.as_str(),
+ encode_json(&run.command.argv)?,
+ encode_json(&run.command.env)?,
+ started_at,
+ finished_at,
+ ],
+ )?;
+
+ for metric in std::iter::once(primary_metric).chain(supporting_metrics.iter()) {
+ let _ = tx.execute(
+ "INSERT INTO metrics (run_id, metric_key, unit, objective, value)
+ VALUES (?1, ?2, ?3, ?4, ?5)",
+ params![
+ run.run_id.to_string(),
+ metric.metric_key.as_str(),
+ encode_metric_unit(metric.unit),
+ encode_optimization_objective(metric.objective),
+ metric.value,
+ ],
+ )?;
+ }
+ Ok(())
+}
+
+fn insert_experiment(
+ tx: &Transaction<'_>,
+ experiment: &CompletedExperiment,
+) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "INSERT INTO experiments (
+ id,
+ frontier_id,
+ base_checkpoint_id,
+ candidate_checkpoint_id,
+ change_node_id,
+ run_node_id,
+ run_id,
+ analysis_node_id,
+ decision_node_id,
+ benchmark_suite,
+ primary_metric_json,
+ supporting_metrics_json,
+ note_summary,
+ note_next_json,
+ verdict,
+ created_at
+ ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16)",
+ params![
+ experiment.id.to_string(),
+ experiment.frontier_id.to_string(),
+ experiment.base_checkpoint_id.to_string(),
+ experiment.candidate_checkpoint_id.to_string(),
+ experiment.change_node_id.to_string(),
+ experiment.run_node_id.to_string(),
+ experiment.run_id.to_string(),
+ experiment.analysis_node_id.map(|id| id.to_string()),
+ experiment.decision_node_id.to_string(),
+ experiment.result.benchmark_suite.as_str(),
+ encode_json(&experiment.result.primary_metric)?,
+ encode_json(&experiment.result.supporting_metrics)?,
+ experiment.note.summary.as_str(),
+ encode_json(&experiment.note.next_hypotheses)?,
+ encode_frontier_verdict(experiment.verdict),
+ encode_timestamp(experiment.created_at)?,
+ ],
+ )?;
+ Ok(())
+}
+
+fn insert_event(
+ tx: &Transaction<'_>,
+ entity_kind: &str,
+ entity_id: &str,
+ event_kind: &str,
+ payload: Value,
+) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "INSERT INTO events (entity_kind, entity_id, event_kind, payload_json, created_at)
+ VALUES (?1, ?2, ?3, ?4, ?5)",
+ params![
+ entity_kind,
+ entity_id,
+ event_kind,
+ payload.to_string(),
+ encode_timestamp(OffsetDateTime::now_utc())?,
+ ],
+ )?;
+ Ok(())
+}
+
+fn touch_frontier(
+ tx: &Transaction<'_>,
+ frontier_id: fidget_spinner_core::FrontierId,
+) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "UPDATE frontiers SET updated_at = ?1 WHERE id = ?2",
+ params![
+ encode_timestamp(OffsetDateTime::now_utc())?,
+ frontier_id.to_string()
+ ],
+ )?;
+ Ok(())
+}
+
+fn demote_previous_champion(
+ tx: &Transaction<'_>,
+ frontier_id: fidget_spinner_core::FrontierId,
+) -> Result<(), StoreError> {
+ let _ = tx.execute(
+ "UPDATE checkpoints
+ SET disposition = 'baseline'
+ WHERE frontier_id = ?1 AND disposition = 'champion'",
+ params![frontier_id.to_string()],
+ )?;
+ Ok(())
+}
+
+fn read_node_row(row: &rusqlite::Row<'_>) -> Result<DagNode, rusqlite::Error> {
+ let payload_json = row.get::<_, String>(9)?;
+ let diagnostics_json = row.get::<_, String>(10)?;
+ let payload = decode_json::<NodePayload>(&payload_json).map_err(to_sql_conversion_error)?;
+ let diagnostics =
+ decode_json::<NodeDiagnostics>(&diagnostics_json).map_err(to_sql_conversion_error)?;
+ Ok(DagNode {
+ id: parse_node_id(&row.get::<_, String>(0)?).map_err(to_sql_conversion_error)?,
+ class: parse_node_class(&row.get::<_, String>(1)?).map_err(to_sql_conversion_error)?,
+ track: parse_node_track(&row.get::<_, String>(2)?).map_err(to_sql_conversion_error)?,
+ frontier_id: row
+ .get::<_, Option<String>>(3)?
+ .map(|raw| parse_frontier_id(&raw))
+ .transpose()
+ .map_err(to_sql_conversion_error)?,
+ archived: row.get::<_, i64>(4)? != 0,
+ title: NonEmptyText::new(row.get::<_, String>(5)?).map_err(core_to_sql_conversion_error)?,
+ summary: row
+ .get::<_, Option<String>>(6)?
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(core_to_sql_conversion_error)?,
+ payload,
+ annotations: Vec::new(),
+ diagnostics,
+ agent_session_id: row
+ .get::<_, Option<String>>(11)?
+ .map(|raw| parse_agent_session_id(&raw))
+ .transpose()
+ .map_err(to_sql_conversion_error)?,
+ created_at: decode_timestamp(&row.get::<_, String>(12)?)
+ .map_err(to_sql_conversion_error)?,
+ updated_at: decode_timestamp(&row.get::<_, String>(13)?)
+ .map_err(to_sql_conversion_error)?,
+ })
+}
+
+fn read_frontier_row(row: &rusqlite::Row<'_>) -> Result<FrontierRecord, StoreError> {
+ Ok(FrontierRecord {
+ id: parse_frontier_id(&row.get::<_, String>(0)?)?,
+ label: NonEmptyText::new(row.get::<_, String>(1)?)?,
+ root_contract_node_id: parse_node_id(&row.get::<_, String>(2)?)?,
+ status: parse_frontier_status(&row.get::<_, String>(3)?)?,
+ created_at: decode_timestamp(&row.get::<_, String>(4)?)?,
+ updated_at: decode_timestamp(&row.get::<_, String>(5)?)?,
+ })
+}
+
+fn read_checkpoint_row(row: &rusqlite::Row<'_>) -> Result<CheckpointRecord, rusqlite::Error> {
+ Ok(CheckpointRecord {
+ id: parse_checkpoint_id(&row.get::<_, String>(0)?).map_err(to_sql_conversion_error)?,
+ frontier_id: parse_frontier_id(&row.get::<_, String>(1)?)
+ .map_err(to_sql_conversion_error)?,
+ node_id: parse_node_id(&row.get::<_, String>(2)?).map_err(to_sql_conversion_error)?,
+ snapshot: CheckpointSnapshotRef {
+ repo_root: Utf8PathBuf::from(row.get::<_, String>(3)?),
+ worktree_root: Utf8PathBuf::from(row.get::<_, String>(4)?),
+ worktree_name: row
+ .get::<_, Option<String>>(5)?
+ .map(NonEmptyText::new)
+ .transpose()
+ .map_err(core_to_sql_conversion_error)?,
+ commit_hash: GitCommitHash::new(row.get::<_, String>(6)?)
+ .map_err(core_to_sql_conversion_error)?,
+ },
+ disposition: parse_checkpoint_disposition(&row.get::<_, String>(7)?)
+ .map_err(to_sql_conversion_error)?,
+ summary: NonEmptyText::new(row.get::<_, String>(8)?)
+ .map_err(core_to_sql_conversion_error)?,
+ created_at: decode_timestamp(&row.get::<_, String>(9)?).map_err(to_sql_conversion_error)?,
+ })
+}
+
+fn frontier_contract_payload(contract: &FrontierContract) -> Result<JsonObject, StoreError> {
+ json_object(json!({
+ "objective": contract.objective.as_str(),
+ "benchmark_suites": contract
+ .evaluation
+ .benchmark_suites
+ .iter()
+ .map(NonEmptyText::as_str)
+ .collect::<Vec<_>>(),
+ "primary_metric": metric_spec_json(&contract.evaluation.primary_metric),
+ "supporting_metrics": contract
+ .evaluation
+ .supporting_metrics
+ .iter()
+ .map(metric_spec_json)
+ .collect::<Vec<_>>(),
+ "promotion_criteria": contract
+ .promotion_criteria
+ .iter()
+ .map(NonEmptyText::as_str)
+ .collect::<Vec<_>>(),
+ }))
+}
+
+fn metric_spec_json(metric: &MetricSpec) -> Value {
+ json!({
+ "metric_key": metric.metric_key.as_str(),
+ "unit": encode_metric_unit(metric.unit),
+ "objective": encode_optimization_objective(metric.objective),
+ })
+}
+
+fn json_object(value: Value) -> Result<JsonObject, StoreError> {
+ match value {
+ Value::Object(map) => Ok(map),
+ other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("expected JSON object, got {other:?}"),
+ )))),
+ }
+}
+
+fn write_json_file<T: Serialize>(path: &Utf8Path, value: &T) -> Result<(), StoreError> {
+ let serialized = serde_json::to_string_pretty(value)?;
+ fs::write(path.as_std_path(), serialized)?;
+ Ok(())
+}
+
+fn read_json_file<T: for<'de> Deserialize<'de>>(path: &Utf8Path) -> Result<T, StoreError> {
+ let bytes = fs::read(path.as_std_path())?;
+ serde_json::from_slice(&bytes).map_err(StoreError::from)
+}
+
+fn encode_json<T: Serialize>(value: &T) -> Result<String, StoreError> {
+ serde_json::to_string(value).map_err(StoreError::from)
+}
+
+fn decode_json<T: for<'de> Deserialize<'de>>(raw: &str) -> Result<T, StoreError> {
+ serde_json::from_str(raw).map_err(StoreError::from)
+}
+
+fn encode_timestamp(timestamp: OffsetDateTime) -> Result<String, StoreError> {
+ timestamp.format(&Rfc3339).map_err(StoreError::from)
+}
+
+fn decode_timestamp(raw: &str) -> Result<OffsetDateTime, StoreError> {
+ OffsetDateTime::parse(raw, &Rfc3339).map_err(StoreError::from)
+}
+
+fn state_root(project_root: &Utf8Path) -> Utf8PathBuf {
+ project_root.join(STORE_DIR_NAME)
+}
+
+#[must_use]
+pub fn discover_project_root(path: impl AsRef<Utf8Path>) -> Option<Utf8PathBuf> {
+ let mut cursor = discovery_start(path.as_ref());
+ loop {
+ if state_root(&cursor).exists() {
+ return Some(cursor);
+ }
+ let parent = cursor.parent()?;
+ cursor = parent.to_path_buf();
+ }
+}
+
+fn discovery_start(path: &Utf8Path) -> Utf8PathBuf {
+ match fs::metadata(path.as_std_path()) {
+ Ok(metadata) if metadata.is_file() => path
+ .parent()
+ .map_or_else(|| path.to_path_buf(), Utf8Path::to_path_buf),
+ _ => path.to_path_buf(),
+ }
+}
+
+fn auto_capture_checkpoint_seed(
+ project_root: &Utf8Path,
+ summary: NonEmptyText,
+) -> Result<Option<CheckpointSeed>, StoreError> {
+ let top_level = git_output(project_root, &["rev-parse", "--show-toplevel"])?;
+ let Some(repo_root) = top_level else {
+ return Ok(None);
+ };
+ let commit_hash = git_output(project_root, &["rev-parse", "HEAD"])?
+ .ok_or_else(|| StoreError::GitInspectionFailed(project_root.to_path_buf()))?;
+ let worktree_name = git_output(project_root, &["rev-parse", "--abbrev-ref", "HEAD"])?;
+ Ok(Some(CheckpointSeed {
+ summary,
+ snapshot: CheckpointSnapshotRef {
+ repo_root: Utf8PathBuf::from(repo_root),
+ worktree_root: project_root.to_path_buf(),
+ worktree_name: worktree_name.map(NonEmptyText::new).transpose()?,
+ commit_hash: GitCommitHash::new(commit_hash)?,
+ },
+ }))
+}
+
+fn git_output(project_root: &Utf8Path, args: &[&str]) -> Result<Option<String>, StoreError> {
+ let output = Command::new("git")
+ .arg("-C")
+ .arg(project_root.as_str())
+ .args(args)
+ .output()?;
+ if !output.status.success() {
+ return Ok(None);
+ }
+ let text = String::from_utf8_lossy(&output.stdout).trim().to_owned();
+ if text.is_empty() {
+ return Ok(None);
+ }
+ Ok(Some(text))
+}
+
+fn to_sql_conversion_error(error: StoreError) -> rusqlite::Error {
+ rusqlite::Error::FromSqlConversionFailure(0, rusqlite::types::Type::Text, Box::new(error))
+}
+
+fn core_to_sql_conversion_error(error: fidget_spinner_core::CoreError) -> rusqlite::Error {
+ to_sql_conversion_error(StoreError::from(error))
+}
+
+fn parse_uuid(raw: &str) -> Result<Uuid, StoreError> {
+ Uuid::parse_str(raw).map_err(StoreError::from)
+}
+
+fn parse_node_id(raw: &str) -> Result<fidget_spinner_core::NodeId, StoreError> {
+ Ok(fidget_spinner_core::NodeId::from_uuid(parse_uuid(raw)?))
+}
+
+fn parse_frontier_id(raw: &str) -> Result<fidget_spinner_core::FrontierId, StoreError> {
+ Ok(fidget_spinner_core::FrontierId::from_uuid(parse_uuid(raw)?))
+}
+
+fn parse_checkpoint_id(raw: &str) -> Result<fidget_spinner_core::CheckpointId, StoreError> {
+ Ok(fidget_spinner_core::CheckpointId::from_uuid(parse_uuid(
+ raw,
+ )?))
+}
+
+fn parse_agent_session_id(raw: &str) -> Result<fidget_spinner_core::AgentSessionId, StoreError> {
+ Ok(fidget_spinner_core::AgentSessionId::from_uuid(parse_uuid(
+ raw,
+ )?))
+}
+
+fn parse_annotation_id(raw: &str) -> Result<fidget_spinner_core::AnnotationId, StoreError> {
+ Ok(fidget_spinner_core::AnnotationId::from_uuid(parse_uuid(
+ raw,
+ )?))
+}
+
+fn parse_node_class(raw: &str) -> Result<NodeClass, StoreError> {
+ match raw {
+ "contract" => Ok(NodeClass::Contract),
+ "change" => Ok(NodeClass::Change),
+ "run" => Ok(NodeClass::Run),
+ "analysis" => Ok(NodeClass::Analysis),
+ "decision" => Ok(NodeClass::Decision),
+ "research" => Ok(NodeClass::Research),
+ "enabling" => Ok(NodeClass::Enabling),
+ "note" => Ok(NodeClass::Note),
+ other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("unknown node class `{other}`"),
+ )))),
+ }
+}
+
+fn encode_node_track(track: fidget_spinner_core::NodeTrack) -> &'static str {
+ match track {
+ fidget_spinner_core::NodeTrack::CorePath => "core-path",
+ fidget_spinner_core::NodeTrack::OffPath => "off-path",
+ }
+}
+
+fn parse_node_track(raw: &str) -> Result<fidget_spinner_core::NodeTrack, StoreError> {
+ match raw {
+ "core-path" => Ok(fidget_spinner_core::NodeTrack::CorePath),
+ "off-path" => Ok(fidget_spinner_core::NodeTrack::OffPath),
+ other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("unknown node track `{other}`"),
+ )))),
+ }
+}
+
+fn encode_annotation_visibility(visibility: AnnotationVisibility) -> &'static str {
+ match visibility {
+ AnnotationVisibility::HiddenByDefault => "hidden",
+ AnnotationVisibility::Visible => "visible",
+ }
+}
+
+fn parse_annotation_visibility(raw: &str) -> Result<AnnotationVisibility, StoreError> {
+ match raw {
+ "hidden" => Ok(AnnotationVisibility::HiddenByDefault),
+ "visible" => Ok(AnnotationVisibility::Visible),
+ other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("unknown annotation visibility `{other}`"),
+ )))),
+ }
+}
+
+fn encode_edge_kind(kind: EdgeKind) -> &'static str {
+ match kind {
+ EdgeKind::Lineage => "lineage",
+ EdgeKind::Evidence => "evidence",
+ EdgeKind::Comparison => "comparison",
+ EdgeKind::Supersedes => "supersedes",
+ EdgeKind::Annotation => "annotation",
+ }
+}
+
+fn encode_frontier_status(status: FrontierStatus) -> &'static str {
+ match status {
+ FrontierStatus::Exploring => "exploring",
+ FrontierStatus::Paused => "paused",
+ FrontierStatus::Saturated => "saturated",
+ FrontierStatus::Archived => "archived",
+ }
+}
+
+fn parse_frontier_status(raw: &str) -> Result<FrontierStatus, StoreError> {
+ match raw {
+ "exploring" => Ok(FrontierStatus::Exploring),
+ "paused" => Ok(FrontierStatus::Paused),
+ "saturated" => Ok(FrontierStatus::Saturated),
+ "archived" => Ok(FrontierStatus::Archived),
+ other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("unknown frontier status `{other}`"),
+ )))),
+ }
+}
+
+fn encode_checkpoint_disposition(disposition: CheckpointDisposition) -> &'static str {
+ match disposition {
+ CheckpointDisposition::Champion => "champion",
+ CheckpointDisposition::FrontierCandidate => "frontier-candidate",
+ CheckpointDisposition::Baseline => "baseline",
+ CheckpointDisposition::DeadEnd => "dead-end",
+ CheckpointDisposition::Archived => "archived",
+ }
+}
+
+fn parse_checkpoint_disposition(raw: &str) -> Result<CheckpointDisposition, StoreError> {
+ match raw {
+ "champion" => Ok(CheckpointDisposition::Champion),
+ "frontier-candidate" => Ok(CheckpointDisposition::FrontierCandidate),
+ "baseline" => Ok(CheckpointDisposition::Baseline),
+ "dead-end" => Ok(CheckpointDisposition::DeadEnd),
+ "archived" => Ok(CheckpointDisposition::Archived),
+ other => Err(StoreError::Json(serde_json::Error::io(io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("unknown checkpoint disposition `{other}`"),
+ )))),
+ }
+}
+
+fn encode_run_status(status: RunStatus) -> &'static str {
+ match status {
+ RunStatus::Queued => "queued",
+ RunStatus::Running => "running",
+ RunStatus::Succeeded => "succeeded",
+ RunStatus::Failed => "failed",
+ RunStatus::Cancelled => "cancelled",
+ }
+}
+
+fn encode_backend(backend: ExecutionBackend) -> &'static str {
+ match backend {
+ ExecutionBackend::LocalProcess => "local-process",
+ ExecutionBackend::WorktreeProcess => "worktree-process",
+ ExecutionBackend::SshProcess => "ssh-process",
+ }
+}
+
+fn encode_metric_unit(unit: MetricUnit) -> &'static str {
+ match unit {
+ MetricUnit::Seconds => "seconds",
+ MetricUnit::Bytes => "bytes",
+ MetricUnit::Count => "count",
+ MetricUnit::Ratio => "ratio",
+ MetricUnit::Custom => "custom",
+ }
+}
+
+fn encode_optimization_objective(objective: OptimizationObjective) -> &'static str {
+ match objective {
+ OptimizationObjective::Minimize => "minimize",
+ OptimizationObjective::Maximize => "maximize",
+ OptimizationObjective::Target => "target",
+ }
+}
+
+fn encode_frontier_verdict(verdict: FrontierVerdict) -> &'static str {
+ match verdict {
+ FrontierVerdict::PromoteToChampion => "promote-to-champion",
+ FrontierVerdict::KeepOnFrontier => "keep-on-frontier",
+ FrontierVerdict::RevertToChampion => "revert-to-champion",
+ FrontierVerdict::ArchiveDeadEnd => "archive-dead-end",
+ FrontierVerdict::NeedsMoreEvidence => "needs-more-evidence",
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use serde_json::json;
+
+ use super::{
+ CreateFrontierRequest, CreateNodeRequest, ListNodesQuery, PROJECT_SCHEMA_NAME, ProjectStore,
+ };
+ use fidget_spinner_core::{
+ CheckpointSnapshotRef, EvaluationProtocol, FrontierContract, MetricSpec, MetricUnit,
+ NodeAnnotation, NodeClass, NodePayload, NonEmptyText, OptimizationObjective,
+ };
+
+ fn temp_project_root(label: &str) -> camino::Utf8PathBuf {
+ let mut path = std::env::temp_dir();
+ path.push(format!(
+ "fidget_spinner_store_test_{}_{}",
+ label,
+ uuid::Uuid::now_v7()
+ ));
+ camino::Utf8PathBuf::from(path.to_string_lossy().into_owned())
+ }
+
+ #[test]
+ fn init_writes_model_facing_schema_file() -> Result<(), super::StoreError> {
+ let root = temp_project_root("schema");
+ let store = ProjectStore::init(
+ &root,
+ NonEmptyText::new("test project")?,
+ NonEmptyText::new("local.test")?,
+ )?;
+
+ assert!(store.state_root().join(PROJECT_SCHEMA_NAME).exists());
+ Ok(())
+ }
+
+ #[test]
+ fn add_node_persists_hidden_annotations() -> Result<(), super::StoreError> {
+ let root = temp_project_root("notes");
+ let mut store = ProjectStore::init(
+ &root,
+ NonEmptyText::new("test project")?,
+ NonEmptyText::new("local.test")?,
+ )?;
+ let node = store.add_node(CreateNodeRequest {
+ class: NodeClass::Research,
+ frontier_id: None,
+ title: NonEmptyText::new("feature sketch")?,
+ summary: Some(NonEmptyText::new("research note")?),
+ payload: NodePayload::with_schema(
+ store.schema().schema_ref(),
+ super::json_object(json!({"body": "freeform"}))?,
+ ),
+ annotations: vec![NodeAnnotation::hidden(NonEmptyText::new(
+ "private scratch",
+ )?)],
+ attachments: Vec::new(),
+ })?;
+ let loaded = store
+ .get_node(node.id)?
+ .ok_or(super::StoreError::NodeNotFound(node.id))?;
+
+ assert_eq!(loaded.annotations.len(), 1);
+ assert_eq!(
+ loaded.annotations[0].visibility,
+ fidget_spinner_core::AnnotationVisibility::HiddenByDefault
+ );
+ Ok(())
+ }
+
+ #[test]
+ fn frontier_projection_tracks_initial_champion() -> Result<(), super::StoreError> {
+ let root = temp_project_root("frontier");
+ let mut store = ProjectStore::init(
+ &root,
+ NonEmptyText::new("test project")?,
+ NonEmptyText::new("local.test")?,
+ )?;
+ let projection = store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new("optimization frontier")?,
+ contract_title: NonEmptyText::new("contract root")?,
+ contract_summary: None,
+ contract: FrontierContract {
+ objective: NonEmptyText::new("improve wall time")?,
+ evaluation: EvaluationProtocol {
+ benchmark_suites: std::collections::BTreeSet::from([NonEmptyText::new(
+ "smoke",
+ )?]),
+ primary_metric: MetricSpec {
+ metric_key: NonEmptyText::new("wall_clock_s")?,
+ unit: MetricUnit::Seconds,
+ objective: OptimizationObjective::Minimize,
+ },
+ supporting_metrics: std::collections::BTreeSet::new(),
+ },
+ promotion_criteria: vec![NonEmptyText::new("strict speedup")?],
+ },
+ initial_checkpoint: Some(super::CheckpointSeed {
+ summary: NonEmptyText::new("seed")?,
+ snapshot: CheckpointSnapshotRef {
+ repo_root: root.clone(),
+ worktree_root: root,
+ worktree_name: Some(NonEmptyText::new("main")?),
+ commit_hash: fidget_spinner_core::GitCommitHash::new("0123456789abcdef")?,
+ },
+ }),
+ })?;
+
+ assert!(projection.champion_checkpoint_id.is_some());
+ Ok(())
+ }
+
+ #[test]
+ fn list_nodes_hides_archived_by_default() -> Result<(), super::StoreError> {
+ let root = temp_project_root("archive");
+ let mut store = ProjectStore::init(
+ &root,
+ NonEmptyText::new("test project")?,
+ NonEmptyText::new("local.test")?,
+ )?;
+ let node = store.add_node(CreateNodeRequest {
+ class: NodeClass::Note,
+ frontier_id: None,
+ title: NonEmptyText::new("quick note")?,
+ summary: None,
+ payload: NodePayload::with_schema(
+ store.schema().schema_ref(),
+ super::json_object(json!({"body": "hello"}))?,
+ ),
+ annotations: Vec::new(),
+ attachments: Vec::new(),
+ })?;
+ store.archive_node(node.id)?;
+
+ let visible = store.list_nodes(ListNodesQuery::default())?;
+ let hidden = store.list_nodes(ListNodesQuery {
+ include_archived: true,
+ ..ListNodesQuery::default()
+ })?;
+
+ assert!(visible.is_empty());
+ assert_eq!(hidden.len(), 1);
+ Ok(())
+ }
+
+ #[test]
+ fn frontier_filter_includes_root_contract_node() -> Result<(), super::StoreError> {
+ let root = temp_project_root("contract-filter");
+ let mut store = ProjectStore::init(
+ &root,
+ NonEmptyText::new("test project")?,
+ NonEmptyText::new("local.test")?,
+ )?;
+ let projection = store.create_frontier(CreateFrontierRequest {
+ label: NonEmptyText::new("frontier")?,
+ contract_title: NonEmptyText::new("root contract")?,
+ contract_summary: None,
+ contract: FrontierContract {
+ objective: NonEmptyText::new("optimize")?,
+ evaluation: EvaluationProtocol {
+ benchmark_suites: std::collections::BTreeSet::from([NonEmptyText::new(
+ "smoke",
+ )?]),
+ primary_metric: MetricSpec {
+ metric_key: NonEmptyText::new("wall_clock_s")?,
+ unit: MetricUnit::Seconds,
+ objective: OptimizationObjective::Minimize,
+ },
+ supporting_metrics: std::collections::BTreeSet::new(),
+ },
+ promotion_criteria: vec![NonEmptyText::new("faster")?],
+ },
+ initial_checkpoint: None,
+ })?;
+
+ let nodes = store.list_nodes(ListNodesQuery {
+ frontier_id: Some(projection.frontier.id),
+ ..ListNodesQuery::default()
+ })?;
+
+ assert_eq!(nodes.len(), 1);
+ assert_eq!(nodes[0].class, NodeClass::Contract);
+ Ok(())
+ }
+}
diff --git a/docs/architecture.md b/docs/architecture.md
new file mode 100644
index 0000000..acab8fe
--- /dev/null
+++ b/docs/architecture.md
@@ -0,0 +1,527 @@
+# Fidget Spinner Architecture
+
+## Current Shape
+
+The current MVP implementation is intentionally narrower than the eventual full
+product:
+
+```text
+agent host
+ |
+ | bundled fidget-spinner skills + stdio MCP
+ v
+spinner MCP host
+ |
+ +-- public JSON-RPC transport
+ +-- session seed capture and restore
+ +-- explicit project binding
+ +-- tool catalog and replay contracts
+ +-- health and telemetry
+ +-- hot rollout / re-exec
+ |
+ +-- disposable MCP worker
+ | |
+ | +-- per-project SQLite store
+ | +-- per-project blob directory
+ | +-- git/worktree introspection
+ | +-- atomic experiment closure
+ |
+ v
+<project root>/.fidget_spinner/
+```
+
+There is no long-lived daemon yet. The first usable slice runs MCP from the CLI
+binary, but it already follows the hardened host/worker split required for
+long-lived sessions and safe replay behavior.
+
+## Package Boundary
+
+The package currently contains three coupled layers:
+
+- `fidget-spinner-core`
+- `fidget-spinner-store-sqlite`
+- `fidget-spinner-cli`
+
+And two bundled agent assets:
+
+- `assets/codex-skills/fidget-spinner/SKILL.md`
+- `assets/codex-skills/frontier-loop/SKILL.md`
+
+Those parts should be treated as one release unit.
+
+## Storage Topology
+
+Every initialized project owns a private state root:
+
+```text
+<project root>/.fidget_spinner/
+ project.json
+ schema.json
+ state.sqlite
+ blobs/
+```
+
+Why this shape:
+
+- schema freedom stays per project
+- migrations stay local
+- backup and portability stay simple
+- we avoid premature pressure toward a single global schema
+
+Cross-project search can come later as an additive index.
+
+## State Layers
+
+### 1. Global engine spine
+
+The engine depends on a stable, typed spine stored in SQLite:
+
+- nodes
+- node annotations
+- node edges
+- frontiers
+- checkpoints
+- runs
+- metrics
+- experiments
+- event log
+
+This layer powers traversal, indexing, archiving, and frontier projection.
+
+### 2. Project payload layer
+
+Each node stores a project payload as JSON, namespaced and versioned by the
+project schema in `.fidget_spinner/schema.json`.
+
+This is where domain-specific richness lives.
+
+### 3. Annotation sidecar
+
+Annotations are stored separately from payload and are default-hidden unless
+explicitly surfaced.
+
+That separation is important. It prevents free-form scratch text from silently
+mutating into a shadow schema.
+
+## Validation Model
+
+Validation has three tiers.
+
+### Storage validity
+
+Hard-fail conditions:
+
+- malformed engine envelope
+- broken ids
+- invalid enum values
+- broken relational integrity
+
+### Semantic quality
+
+Project field expectations are warning-heavy:
+
+- missing recommended fields emit diagnostics
+- missing projection-gated fields remain storable
+- ingest usually succeeds
+
+### Operational eligibility
+
+Specific actions may refuse incomplete records.
+
+Examples:
+
+- core-path experiment closure requires complete run/result/note/verdict state
+- future promotion helpers may require a projection-ready change payload
+
+## SQLite Schema
+
+### `nodes`
+
+Stores the global node envelope:
+
+- id
+- class
+- track
+- frontier id
+- archived flag
+- title
+- summary
+- schema namespace
+- schema version
+- payload JSON
+- diagnostics JSON
+- agent session id
+- timestamps
+
+### `node_annotations`
+
+Stores sidecar free-form annotations:
+
+- annotation id
+- owning node id
+- visibility
+- optional label
+- body
+- created timestamp
+
+### `node_edges`
+
+Stores typed DAG edges:
+
+- source node id
+- target node id
+- edge kind
+
+The current edge kinds are enough for the MVP:
+
+- `lineage`
+- `evidence`
+- `comparison`
+- `supersedes`
+- `annotation`
+
+### `frontiers`
+
+Stores derived operational frontier records:
+
+- frontier id
+- label
+- root contract node id
+- status
+- timestamps
+
+Important constraint:
+
+- the root contract node itself also carries the same frontier id
+
+That keeps frontier filtering honest.
+
+### `checkpoints`
+
+Stores committed candidate or champion checkpoints:
+
+- checkpoint id
+- frontier id
+- anchoring node id
+- repo/worktree metadata
+- commit hash
+- disposition
+- summary
+- created timestamp
+
+In the current codebase, a frontier may temporarily exist without a champion if
+it was initialized outside a git repo. Core-path experimentation is only fully
+available once git-backed checkpoints exist.
+
+### `runs`
+
+Stores run envelopes:
+
+- run id
+- run node id
+- frontier id
+- backend
+- status
+- code snapshot metadata
+- benchmark suite
+- command envelope
+- started and finished timestamps
+
+### `metrics`
+
+Stores primary and supporting run metrics:
+
+- run id
+- metric key
+- value
+- unit
+- optimization objective
+
+### `experiments`
+
+Stores the atomic closure object for core-path work:
+
+- experiment id
+- frontier id
+- base checkpoint id
+- candidate checkpoint id
+- change node id
+- run node id and run id
+- optional analysis node id
+- decision node id
+- verdict
+- note payload
+- created timestamp
+
+This table is the enforcement layer for frontier discipline.
+
+### `events`
+
+Stores durable audit events:
+
+- event id
+- entity kind
+- entity id
+- event kind
+- payload
+- created timestamp
+
+## Core Types
+
+### Node classes
+
+Core path:
+
+- `contract`
+- `change`
+- `run`
+- `analysis`
+- `decision`
+
+Off path:
+
+- `research`
+- `enabling`
+- `note`
+
+### Node tracks
+
+- `core_path`
+- `off_path`
+
+Track is derived from class, not operator whim.
+
+### Frontier projection
+
+The frontier projection currently exposes:
+
+- frontier record
+- champion checkpoint id
+- active candidate checkpoint ids
+- experiment count
+
+This projection is derived from canonical state and intentionally rebuildable.
+
+## Write Surfaces
+
+### Low-ceremony off-path writes
+
+These are intentionally cheap:
+
+- `note.quick`
+- `research.record`
+- generic `node.create` for escape-hatch use
+- `node.annotate`
+
+### Low-ceremony core-path entry
+
+`change.record` exists to capture intent before worktree state becomes muddy.
+
+### Atomic core-path closure
+
+`experiment.close` is the important write path.
+
+It persists, in one transaction:
+
+- run node
+- run record
+- candidate checkpoint
+- decision node
+- experiment record
+- lineage and evidence edges
+- frontier touch and champion demotion when needed
+
+That atomic boundary is the answer to the ceremony/atomicity pre-mortem.
+
+## MCP Surface
+
+The MVP MCP server is stdio-only and follows newline-delimited JSON-RPC message
+framing. The public server is a stable host. It owns initialization state,
+replay policy, telemetry, and host rollout. Execution happens in a disposable
+worker subprocess.
+
+### Host responsibilities
+
+- own the public JSON-RPC session
+- enforce initialize-before-use
+- classify tools and resources by replay contract
+- retry only explicitly safe operations after retryable worker faults
+- expose health and telemetry
+- re-exec the host binary while preserving initialization seed and counters
+
+### Worker responsibilities
+
+- open the per-project store
+- execute tool logic and resource reads
+- return typed success or typed fault records
+- remain disposable without losing canonical state
+
+### Fault model
+
+Faults are typed by:
+
+- kind: `invalid_input`, `not_initialized`, `transient`, `internal`
+- stage: `host`, `worker`, `store`, `transport`, `protocol`, `rollout`
+
+Those faults are surfaced both as JSON-RPC errors and as structured tool
+errors, depending on call type.
+
+### Replay contracts
+
+The tool catalog explicitly marks each operation as one of:
+
+- `safe_replay`
+- `never_replay`
+
+Current policy:
+
+- reads such as `project.status`, `project.schema`, `frontier.list`,
+ `frontier.status`, `node.list`, `node.read`, `skill.list`, `skill.show`, and
+ resource reads
+ are safe to replay once after a retryable worker fault
+- mutating tools such as `frontier.init`, `node.create`, `change.record`,
+ `node.annotate`, `node.archive`, `note.quick`, `research.record`, and
+ `experiment.close` are never auto-replayed
+
+This is the hardening answer to side-effect safety.
+
+Implemented server features:
+
+- tools
+- resources
+
+### Tools
+
+Implemented tools:
+
+- `system.health`
+- `system.telemetry`
+- `project.bind`
+- `project.status`
+- `project.schema`
+- `frontier.list`
+- `frontier.status`
+- `frontier.init`
+- `node.create`
+- `change.record`
+- `node.list`
+- `node.read`
+- `node.annotate`
+- `node.archive`
+- `note.quick`
+- `research.record`
+- `experiment.close`
+- `skill.list`
+- `skill.show`
+
+### Resources
+
+Implemented resources:
+
+- `fidget-spinner://project/config`
+- `fidget-spinner://project/schema`
+- `fidget-spinner://skill/fidget-spinner`
+- `fidget-spinner://skill/frontier-loop`
+
+### Operational tools
+
+`system.health` returns a typed operational snapshot:
+
+- initialization state
+- binding state
+- worker generation and liveness
+- current executable path
+- launch-path stability
+- rollout-pending state
+- last recorded fault
+
+`system.telemetry` returns cumulative counters:
+
+- requests
+- successes
+- errors
+- retries
+- worker restarts
+- host rollouts
+- per-operation counts and last latencies
+
+### Rollout model
+
+The host fingerprints its executable at startup. If the binary changes on disk,
+or if a rollout is explicitly requested, the host re-execs itself after sending
+the current response. The re-exec carries forward:
+
+- initialization seed
+- project binding
+- telemetry counters
+- request id sequence
+- worker generation
+- one-shot rollout and crash-test markers
+
+This keeps the public session stable while still allowing hot binary replacement.
+
+## CLI Surface
+
+The CLI remains thin and operational.
+
+Current commands:
+
+- `init`
+- `schema show`
+- `frontier init`
+- `frontier status`
+- `node add`
+- `node list`
+- `node show`
+- `node annotate`
+- `node archive`
+- `note quick`
+- `research add`
+- `experiment close`
+- `mcp serve`
+- hidden internal `mcp worker`
+- `skill list`
+- `skill install`
+- `skill show`
+
+The CLI is not the strategic write plane, but it is the easiest repair and
+bootstrap surface.
+
+## Bundled Skill
+
+The bundled `fidget-spinner` and `frontier-loop` skills should
+be treated as part of the product, not stray prompts.
+
+Their job is to teach agents:
+
+- DAG first
+- schema first
+- cheap off-path pushes
+- disciplined core-path closure
+- archive rather than delete
+- and, for the frontier-loop specialization, how to run an indefinite push
+
+The asset lives in-tree so it can drift only via an explicit code change.
+
+## Full-Product Trajectory
+
+The full product should add, not replace, the MVP implementation.
+
+Planned next layers:
+
+- `spinnerd` as a long-lived local daemon
+- HTTP and SSE
+- read-mostly local UI
+- runner orchestration beyond direct process execution
+- interruption recovery and resumable long loops
+- archive and pruning passes
+- optional cross-project indexing
+
+The invariant for that future work is strict:
+
+- keep the DAG canonical
+- keep frontier state derived
+- keep project payloads local and flexible
+- keep off-path writes cheap
+- keep core-path closure atomic
+- keep host-owned replay contracts explicit and auditable
diff --git a/docs/libgrid-dogfood.md b/docs/libgrid-dogfood.md
new file mode 100644
index 0000000..5e13e51
--- /dev/null
+++ b/docs/libgrid-dogfood.md
@@ -0,0 +1,202 @@
+# Libgrid Dogfood Plan
+
+## Why Libgrid
+
+`libgrid` is the right first serious dogfood target because it has exactly the
+failure mode Fidget Spinner is designed to kill:
+
+- long autonomous optimization loops
+- heavy worktree usage
+- benchmark-driven decisions
+- huge markdown logs that blur evidence, narrative, and verdicts
+
+That is the proving ground.
+
+## Immediate MVP Goal
+
+The MVP does not need to solve all of `libgrid`.
+
+It needs to solve this specific problem:
+
+replace the giant freeform experiment log with a machine in which the active
+frontier, the current champion, the candidate evidence, and the dead ends are
+all explicit and queryable.
+
+When using a global unbound MCP session from a `libgrid` worktree, the first
+project-local action should be `project.bind` against the `libgrid` worktree
+root or any nested path inside it. The session should not assume the MCP host's
+own repo.
+
+## Mapping Libgrid Work Into The Model
+
+### Frontier
+
+One optimization objective becomes one frontier:
+
+- improve MILP solve quality
+- reduce wall-clock time
+- reduce LP pressure
+- improve node throughput
+- improve best-bound quality
+
+### Contract node
+
+The root contract should state:
+
+- objective in plain language
+- benchmark suite set
+- primary metric
+- supporting metrics
+- promotion criteria
+
+### Change node
+
+Use `change.record` to capture:
+
+- what hypothesis is being tested
+- what base checkpoint it starts from
+- what benchmark suite matters
+- any terse sketch of the intended delta
+
+### Run node
+
+The run node should capture:
+
+- exact command
+- cwd
+- backend kind
+- benchmark suite
+- code snapshot
+- resulting metrics
+
+### Decision node
+
+The decision should make the verdict explicit:
+
+- promote to champion
+- keep on frontier
+- revert to champion
+- archive dead end
+- needs more evidence
+
+### Off-path nodes
+
+Use these freely:
+
+- `research` for ideas, external references, algorithm sketches
+- `enabling` for scaffolding that is not yet a benchmarked experiment
+- `note` for quick observations
+
+This is how the system avoids forcing every useful thought into experiment
+closure.
+
+## Suggested Libgrid Project Schema
+
+The `libgrid` project should eventually define richer payload conventions in
+`.fidget_spinner/schema.json`.
+
+The MVP does not need hard rejection. It does need meaningful warnings.
+
+Good first project fields:
+
+- `hypothesis` on `change`
+- `base_checkpoint_id` on `change`
+- `benchmark_suite` on `change` and `run`
+- `body` on `change`, `research`, and `note`
+- `comparison_claim` on `analysis`
+- `rationale` on `decision`
+
+Good first metric vocabulary:
+
+- `wall_clock_s`
+- `solved_instance_count`
+- `nodes_expanded`
+- `best_bound_delta`
+- `lp_calls`
+- `memory_bytes`
+
+## Libgrid MVP Workflow
+
+### 1. Seed the frontier
+
+1. Initialize the project store.
+2. Create a frontier contract.
+3. Capture the incumbent git checkpoint if available.
+
+### 2. Start a line of attack
+
+1. Read the current frontier and the recent DAG tail.
+2. Record a `change`.
+3. If needed, attach off-path `research` or `note` nodes first.
+
+### 3. Execute one experiment
+
+1. Modify the worktree.
+2. Commit the candidate checkpoint.
+3. Run the benchmark protocol.
+4. Close the experiment atomically.
+
+### 4. Judge and continue
+
+1. Promote the checkpoint or keep it alive.
+2. Archive dead ends instead of leaving them noisy and active.
+3. Repeat.
+
+## Benchmark Discipline
+
+For `libgrid`, the benchmark evidence needs to be structurally trustworthy.
+
+The MVP should always preserve at least:
+
+- benchmark suite identity
+- primary metric
+- supporting metrics
+- command envelope
+- host/worktree metadata
+- git commit identity
+
+This is the minimum needed to prevent "I think this was faster" folklore.
+
+## What The MVP Can Defer
+
+These are useful but not required for the first real dogfood loop:
+
+- strong markdown migration
+- multi-agent coordination
+- rich artifact bundling
+- pruning or vacuum passes beyond archive
+- UI-heavy analysis
+
+The right sequence is:
+
+1. start a clean front
+2. run new work through Fidget Spinner
+3. backfill old markdown only when it is worth the effort
+
+## Repo-Local Dogfood Before Libgrid
+
+This repository itself is a valid off-path dogfood target even though it is not
+currently a git repo.
+
+That means we can already use it to test:
+
+- project initialization
+- schema visibility
+- frontier creation without a champion
+- off-path research recording
+- hidden annotations
+- MCP read and write flows
+
+What it cannot honestly test is full git-backed core-path experiment closure.
+That still belongs in a real repo such as the `libgrid` worktree.
+
+## Acceptance Bar For Libgrid
+
+Fidget Spinner is ready for serious `libgrid` use when:
+
+- an agent can run for hours without generating a giant markdown graveyard
+- the operator can identify the champion checkpoint mechanically
+- each completed experiment has checkpoint, result, note, and verdict
+- off-path side investigations stay preserved but do not pollute the core path
+- the system feels like a machine for evidence rather than a diary with better
+ typography
diff --git a/docs/product-spec.md b/docs/product-spec.md
new file mode 100644
index 0000000..8ab6210
--- /dev/null
+++ b/docs/product-spec.md
@@ -0,0 +1,341 @@
+# Fidget Spinner Product Spec
+
+## Thesis
+
+Fidget Spinner is a local-first, agent-first frontier machine for autonomous
+program optimization and research.
+
+The immediate target is brutally practical: replace gigantic freeform
+experiment markdown with a machine that preserves evidence as structure.
+
+The package is deliberately two things at once:
+
+- a local MCP-backed DAG substrate
+- bundled skills that teach agents how to drive that substrate
+
+Those two halves should be versioned together and treated as one product.
+
+## Product Position
+
+This is not a hosted lab notebook.
+
+This is not a cloud compute marketplace.
+
+This is not a collaboration shell with experiments bolted on.
+
+This is a local machine for indefinite frontier pushes, with agents as primary
+writers and humans as auditors, reviewers, and occasional editors.
+
+## Non-Goals
+
+These are explicitly out of scope for the core product:
+
+- OAuth
+- hosted identity
+- cloud tenancy
+- billing, credits, and subscriptions
+- managed provider brokerage
+- chat as the system of record
+- mandatory remote control planes
+- replacing git
+
+Git remains the code substrate. Fidget Spinner is the evidence substrate.
+
+## Locked Design Decisions
+
+These are the load-bearing decisions to hold fixed through the MVP push.
+
+### 1. The DAG is canonical truth
+
+The canonical record is the DAG plus its normalized supporting tables.
+
+Frontier state is not a rival authority. It is a derived, rebuildable
+projection over the DAG and related run/checkpoint/experiment records.
+
+### 2. Storage is per-project
+
+Each project owns its own local store under:
+
+```text
+<project root>/.fidget_spinner/
+ state.sqlite
+ project.json
+ schema.json
+ blobs/
+```
+
+There is no mandatory global database in the MVP.
+
+### 3. Node structure is layered
+
+Every node has three layers:
+
+- a hard global envelope for indexing and traversal
+- a project-local structured payload
+- free-form sidecar annotations as an escape hatch
+
+The engine only hard-depends on the envelope. Project payloads remain flexible.
+
+### 4. Validation is warning-heavy
+
+Engine integrity is hard-validated.
+
+Project semantics are diagnostically validated.
+
+Workflow eligibility is action-gated.
+
+In other words:
+
+- bad engine state is rejected
+- incomplete project payloads are usually admitted with diagnostics
+- projections and frontier actions may refuse incomplete nodes later
+
+### 5. Core-path and off-path work must diverge
+
+Core-path work is disciplined and atomic.
+
+Off-path work is cheap and permissive.
+
+The point is to avoid forcing every scrap of research through the full
+benchmark/decision bureaucracy while still preserving it in the DAG.
+
+### 6. Completed core-path experiments are atomic
+
+A completed experiment exists only when all of these exist together:
+
+- base checkpoint
+- candidate checkpoint
+- measured result
+- terse note
+- explicit verdict
+
+The write surface should make that one atomic mutation, not a loose sequence of
+low-level calls.
+
+### 7. Checkpoints are git-backed
+
+Dirty worktree snapshots are useful as descriptive context, but a completed
+core-path experiment should anchor to a committed candidate checkpoint.
+
+Off-path notes and research can remain lightweight and non-committal.
+
+## Node Model
+
+### Global envelope
+
+The hard spine should be stable across projects. It includes at least:
+
+- node id
+- node class
+- node track
+- frontier id if any
+- archived flag
+- title
+- summary
+- schema namespace and version
+- timestamps
+- diagnostics
+- hidden or visible annotations
+
+This is the engine layer: the part that powers indexing, traversal, archiving,
+default enumeration, and model-facing summaries.
+
+### Project-local payload
+
+Every project may define richer payload fields in:
+
+`<project root>/.fidget_spinner/schema.json`
+
+That file is a model-facing contract. It defines field names and soft
+validation tiers without forcing global schema churn.
+
+Per-field settings should express at least:
+
+- presence: `required`, `recommended`, `optional`
+- severity: `error`, `warning`, `info`
+- role: `index`, `projection_gate`, `render_only`, `opaque`
+- inference policy: whether the model may infer the field
+
+These settings are advisory at ingest time and stricter at projection/action
+time.
+
+### Free-form annotations
+
+Any node may carry free-form annotations.
+
+These are explicitly sidecar, not primary payload. They are:
+
+- allowed everywhere
+- hidden from default enumeration
+- useful as a scratchpad or escape hatch
+- not allowed to become the only home of critical operational truth
+
+If a fact matters to automation, comparison, or promotion, it must migrate into
+the spine or project payload.
+
+## Node Taxonomy
+
+### Core-path node classes
+
+These are the disciplined frontier-loop classes:
+
+- `contract`
+- `change`
+- `run`
+- `analysis`
+- `decision`
+
+### Off-path node classes
+
+These are deliberately low-ceremony:
+
+- `research`
+- `enabling`
+- `note`
+
+They exist so the product can absorb real thinking instead of forcing users and
+agents back into sprawling markdown.
+
+## Frontier Model
+
+The frontier is a derived operational view over the canonical DAG.
+
+It answers:
+
+- what objective is active
+- what the current champion checkpoint is
+- which candidate checkpoints are still alive
+- how many completed experiments exist
+
+The DAG answers:
+
+- what changed
+- what ran
+- what evidence was collected
+- what was concluded
+- what dead ends and side investigations exist
+
+That split is deliberate. It prevents "frontier state" from turning into a
+second unofficial database.
+
+## First Usable MVP
+
+The first usable MVP is the first cut that can already replace a meaningful
+slice of the markdown habit without pretending the whole full-product vision is
+done.
+
+### MVP deliverables
+
+- per-project `.fidget_spinner/` state
+- local SQLite backing store
+- local blob directory
+- typed Rust core model
+- thin CLI for bootstrap and repair
+- hardened stdio MCP host exposed from the CLI
+- disposable MCP worker execution runtime
+- bundled `fidget-spinner` base skill
+- bundled `frontier-loop` skill
+- low-ceremony off-path note and research recording
+- atomic core-path experiment closure
+
+### Explicitly deferred from the MVP
+
+- long-lived `spinnerd`
+- web UI
+- remote runners
+- multi-agent hardening
+- aggressive pruning and vacuuming
+- strong markdown migration tooling
+- cross-project indexing
+
+### MVP model-facing surface
+
+The model-facing surface is a local MCP server oriented around frontier work.
+
+The initial tools should be:
+
+- `system.health`
+- `system.telemetry`
+- `project.bind`
+- `project.status`
+- `project.schema`
+- `frontier.list`
+- `frontier.status`
+- `frontier.init`
+- `node.create`
+- `change.record`
+- `node.list`
+- `node.read`
+- `node.annotate`
+- `node.archive`
+- `note.quick`
+- `research.record`
+- `experiment.close`
+- `skill.list`
+- `skill.show`
+
+The important point is not the exact names. The important point is the shape:
+
+- cheap read access to project and frontier context
+- cheap off-path writes
+- low-ceremony change capture
+- one atomic "close the experiment" tool
+- explicit operational introspection for long-lived agent sessions
+- explicit replay boundaries so side effects are never duplicated by accident
+
+### MVP skill posture
+
+The bundled skills should instruct agents to:
+
+1. inspect `system.health` first
+2. bind the MCP session to the target project before project-local reads or writes
+3. read project schema and frontier state
+4. pull context from the DAG instead of giant prose dumps
+5. use `note.quick` and `research.record` freely off path
+6. use `change.record` before worktree thrash becomes ambiguous
+7. use `experiment.close` to atomically seal core-path work
+8. archive detritus instead of deleting it
+9. use the base `fidget-spinner` skill for ordinary DAG work and add
+ `frontier-loop` only when the task becomes a true autonomous frontier push
+
+### MVP acceptance bar
+
+The MVP is successful when:
+
+- a project can be initialized locally with no hosted dependencies
+- an agent can inspect frontier state through MCP
+- an agent can inspect MCP health and telemetry through MCP
+- an agent can record off-path research without bureaucratic pain
+- a git-backed project can close a real core-path experiment atomically
+- retryable worker faults do not duplicate side effects
+- stale nodes can be archived instead of polluting normal enumeration
+- a human can answer "what changed, what ran, what is the current champion,
+ and why?" without doing markdown archaeology
+
+## Full Product
+
+The full product grows outward from the MVP rather than replacing it.
+
+### Planned additions
+
+- `spinnerd` as a long-lived local daemon
+- local HTTP and SSE
+- read-mostly graph and run inspection UI
+- richer artifact handling
+- model-driven pruning and archive passes
+- stronger interruption recovery
+- local runner backends beyond direct process execution
+- optional global indexing across projects
+- import/export and subgraph packaging
+
+### Invariant for all later stages
+
+No future layer should invalidate the MVP spine:
+
+- DAG canonical
+- frontier derived
+- project-local store
+- layered node model
+- warning-heavy schema validation
+- cheap off-path writes
+- atomic core-path closure
diff --git a/rust-toolchain.toml b/rust-toolchain.toml
new file mode 100644
index 0000000..5d3ba6e
--- /dev/null
+++ b/rust-toolchain.toml
@@ -0,0 +1,5 @@
+[toolchain]
+channel = "1.94.0"
+profile = "minimal"
+components = ["clippy", "rustfmt"]
+
diff --git a/scripts/install-codex-skill.sh b/scripts/install-codex-skill.sh
new file mode 100755
index 0000000..dcd4121
--- /dev/null
+++ b/scripts/install-codex-skill.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
+SOURCE_ROOT="$ROOT_DIR/assets/codex-skills"
+DEST_ROOT="${1:-$HOME/.codex/skills}"
+
+install_skill() {
+ local name="$1"
+ local source_dir="$SOURCE_ROOT/$name"
+ local dest_dir="$DEST_ROOT/$name"
+ mkdir -p "$DEST_ROOT"
+ rm -rf "$dest_dir"
+ ln -s "$source_dir" "$dest_dir"
+ printf 'installed skill symlink: %s -> %s\n' "$dest_dir" "$source_dir"
+}
+
+install_skill "fidget-spinner"
+install_skill "frontier-loop"
+
+printf 'mcp command: %s\n' "cargo run -p fidget-spinner-cli -- mcp serve"