mod bundled_skill; mod mcp; mod ui; use std::collections::{BTreeMap, BTreeSet}; use std::fs; use std::io; use std::net::SocketAddr; use std::path::{Path, PathBuf}; use camino::{Utf8Path, Utf8PathBuf}; use clap::{Args, Parser, Subcommand, ValueEnum}; use fidget_spinner_core::{ ArtifactKind, CommandRecipe, ExecutionBackend, ExperimentAnalysis, ExperimentStatus, FieldValueType, FrontierVerdict, MetricUnit, MetricVisibility, NonEmptyText, OptimizationObjective, RunDimensionValue, Slug, TagName, }; use fidget_spinner_store_sqlite::{ AttachmentSelector, CloseExperimentRequest, CreateArtifactRequest, CreateFrontierRequest, CreateHypothesisRequest, DefineMetricRequest, DefineRunDimensionRequest, ExperimentOutcomePatch, FrontierRoadmapItemDraft, ListArtifactsQuery, ListExperimentsQuery, ListHypothesesQuery, MetricBestQuery, MetricKeysQuery, MetricRankOrder, MetricScope, OpenExperimentRequest, ProjectStore, STORE_DIR_NAME, StoreError, TextPatch, UpdateArtifactRequest, UpdateExperimentRequest, UpdateFrontierBriefRequest, UpdateHypothesisRequest, VertexSelector, }; use serde::Serialize; use serde_json::Value; #[derive(Parser)] #[command( author, version, about = "Fidget Spinner CLI, MCP server, and local navigator" )] struct Cli { #[command(subcommand)] command: Command, } #[derive(Subcommand)] enum Command { /// Initialize a project-local `.fidget_spinner/` store. Init(InitArgs), /// Inspect project metadata and coarse counts. Project { #[command(subcommand)] command: ProjectCommand, }, /// Manage the repo-local tag registry. Tag { #[command(subcommand)] command: TagCommand, }, /// Create and inspect frontier scopes. Frontier { #[command(subcommand)] command: FrontierCommand, }, /// Record and inspect hypotheses. Hypothesis { #[command(subcommand)] command: HypothesisCommand, }, /// Open, inspect, update, and close experiments. Experiment { #[command(subcommand)] command: ExperimentCommand, }, /// Register external references and attach them to the ledger. Artifact { #[command(subcommand)] command: ArtifactCommand, }, /// Manage project-level metric definitions and rankings. Metric { #[command(subcommand)] command: MetricCommand, }, /// Define the typed dimension vocabulary used to slice experiments. Dimension { #[command(subcommand)] command: DimensionCommand, }, /// Serve the hardened stdio MCP endpoint. Mcp { #[command(subcommand)] command: McpCommand, }, /// Serve the local navigator. Ui { #[command(subcommand)] command: UiCommand, }, /// Inspect or install bundled Codex skills. Skill { #[command(subcommand)] command: SkillCommand, }, } #[derive(Args)] struct InitArgs { #[arg(long, default_value = ".")] project: PathBuf, #[arg(long)] name: Option, } #[derive(Subcommand)] enum ProjectCommand { Status(ProjectArg), } #[derive(Subcommand)] enum TagCommand { Add(TagAddArgs), List(ProjectArg), } #[derive(Subcommand)] enum FrontierCommand { Create(FrontierCreateArgs), List(ProjectArg), Read(FrontierSelectorArgs), Open(FrontierSelectorArgs), UpdateBrief(FrontierBriefUpdateArgs), History(FrontierSelectorArgs), } #[derive(Subcommand)] enum HypothesisCommand { Record(HypothesisRecordArgs), List(HypothesisListArgs), Read(HypothesisSelectorArgs), Update(HypothesisUpdateArgs), History(HypothesisSelectorArgs), } #[derive(Subcommand)] enum ExperimentCommand { Open(ExperimentOpenArgs), List(ExperimentListArgs), Read(ExperimentSelectorArgs), Update(ExperimentUpdateArgs), Close(ExperimentCloseArgs), History(ExperimentSelectorArgs), } #[derive(Subcommand)] enum ArtifactCommand { Record(ArtifactRecordArgs), List(ArtifactListArgs), Read(ArtifactSelectorArgs), Update(ArtifactUpdateArgs), History(ArtifactSelectorArgs), } #[derive(Subcommand)] enum MetricCommand { Define(MetricDefineArgs), Keys(MetricKeysArgs), Best(MetricBestArgs), } #[derive(Subcommand)] enum DimensionCommand { Define(DimensionDefineArgs), List(ProjectArg), } #[derive(Subcommand)] enum McpCommand { Serve(McpServeArgs), Worker(McpWorkerArgs), } #[derive(Subcommand)] enum UiCommand { Serve(UiServeArgs), } #[derive(Subcommand)] enum SkillCommand { List, Install(SkillInstallArgs), Show(SkillShowArgs), } #[derive(Args, Clone)] struct ProjectArg { #[arg(long, default_value = ".")] project: PathBuf, } #[derive(Args)] struct TagAddArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] name: String, #[arg(long)] description: String, } #[derive(Args)] struct FrontierCreateArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] label: String, #[arg(long)] objective: String, #[arg(long)] slug: Option, } #[derive(Args)] struct FrontierSelectorArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: String, } #[derive(Args)] struct FrontierBriefUpdateArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: String, #[arg(long)] expected_revision: Option, #[arg(long)] situation: Option, #[arg(long)] clear_situation: bool, #[arg(long = "unknown")] unknowns: Vec, #[arg(long = "roadmap")] roadmap: Vec, } #[derive(Args)] struct HypothesisRecordArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: String, #[arg(long)] title: String, #[arg(long)] summary: String, #[arg(long)] body: String, #[arg(long)] slug: Option, #[arg(long = "tag")] tags: Vec, #[arg(long = "parent")] parents: Vec, } #[derive(Args)] struct HypothesisListArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: Option, #[arg(long = "tag")] tags: Vec, #[arg(long)] include_archived: bool, #[arg(long)] limit: Option, } #[derive(Args)] struct HypothesisSelectorArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] hypothesis: String, } #[derive(Args)] struct HypothesisUpdateArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] hypothesis: String, #[arg(long)] expected_revision: Option, #[arg(long)] title: Option, #[arg(long)] summary: Option, #[arg(long)] body: Option, #[arg(long = "tag")] tags: Vec, #[arg(long = "replace-tags")] replace_tags: bool, #[arg(long = "parent")] parents: Vec, #[arg(long = "replace-parents")] replace_parents: bool, #[arg(long, value_enum)] state: Option, } #[derive(Args)] struct ExperimentOpenArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] hypothesis: String, #[arg(long)] title: String, #[arg(long)] summary: Option, #[arg(long)] slug: Option, #[arg(long = "tag")] tags: Vec, #[arg(long = "parent")] parents: Vec, } #[derive(Args)] struct ExperimentListArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: Option, #[arg(long)] hypothesis: Option, #[arg(long, value_enum)] status: Option, #[arg(long = "tag")] tags: Vec, #[arg(long)] include_archived: bool, #[arg(long)] limit: Option, } #[derive(Args)] struct ExperimentSelectorArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] experiment: String, } #[derive(Args)] struct ExperimentUpdateArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] experiment: String, #[arg(long)] expected_revision: Option, #[arg(long)] title: Option, #[arg(long)] summary: Option, #[arg(long)] clear_summary: bool, #[arg(long = "tag")] tags: Vec, #[arg(long = "replace-tags")] replace_tags: bool, #[arg(long = "parent")] parents: Vec, #[arg(long = "replace-parents")] replace_parents: bool, #[arg(long, value_enum)] state: Option, #[arg(long = "outcome-json")] outcome_json: Option, #[arg(long = "outcome-file")] outcome_file: Option, } #[derive(Args)] struct ExperimentCloseArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] experiment: String, #[arg(long)] expected_revision: Option, #[arg(long, value_enum)] backend: CliExecutionBackend, #[arg(long = "argv")] argv: Vec, #[arg(long)] working_directory: Option, #[arg(long = "env")] env: Vec, #[arg(long = "dimension")] dimensions: Vec, #[arg(long = "primary-metric")] primary_metric: String, #[arg(long = "metric")] supporting_metrics: Vec, #[arg(long, value_enum)] verdict: CliFrontierVerdict, #[arg(long)] rationale: String, #[arg(long)] analysis_summary: Option, #[arg(long)] analysis_body: Option, } #[derive(Args)] struct ArtifactRecordArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] kind: CliArtifactKind, #[arg(long)] label: String, #[arg(long)] summary: Option, #[arg(long)] locator: String, #[arg(long)] media_type: Option, #[arg(long)] slug: Option, #[arg(long = "attach")] attachments: Vec, } #[derive(Args)] struct ArtifactListArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: Option, #[arg(long)] kind: Option, #[arg(long)] attached_to: Option, #[arg(long)] limit: Option, } #[derive(Args)] struct ArtifactSelectorArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] artifact: String, } #[derive(Args)] struct ArtifactUpdateArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] artifact: String, #[arg(long)] expected_revision: Option, #[arg(long)] kind: Option, #[arg(long)] label: Option, #[arg(long)] summary: Option, #[arg(long)] clear_summary: bool, #[arg(long)] locator: Option, #[arg(long)] media_type: Option, #[arg(long)] clear_media_type: bool, #[arg(long = "attach")] attachments: Vec, #[arg(long = "replace-attachments")] replace_attachments: bool, } #[derive(Args)] struct MetricDefineArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] key: String, #[arg(long, value_enum)] unit: CliMetricUnit, #[arg(long, value_enum)] objective: CliOptimizationObjective, #[arg(long, value_enum, default_value_t = CliMetricVisibility::Canonical)] visibility: CliMetricVisibility, #[arg(long)] description: Option, } #[derive(Args)] struct MetricKeysArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: Option, #[arg(long, value_enum, default_value_t = CliMetricScope::Live)] scope: CliMetricScope, } #[derive(Args)] struct MetricBestArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] frontier: Option, #[arg(long)] hypothesis: Option, #[arg(long)] key: String, #[arg(long = "dimension")] dimensions: Vec, #[arg(long)] include_rejected: bool, #[arg(long)] limit: Option, #[arg(long, value_enum)] order: Option, } #[derive(Args)] struct DimensionDefineArgs { #[command(flatten)] project: ProjectArg, #[arg(long)] key: String, #[arg(long, value_enum)] value_type: CliFieldValueType, #[arg(long)] description: Option, } #[derive(Args)] struct McpServeArgs { #[arg(long)] project: Option, } #[derive(Args)] struct McpWorkerArgs { #[arg(long, default_value = ".")] project: PathBuf, } #[derive(Args)] struct UiServeArgs { #[arg(long, default_value = ".")] path: PathBuf, #[arg(long, default_value = "127.0.0.1:8913")] bind: SocketAddr, #[arg(long)] limit: Option, } #[derive(Args)] struct SkillInstallArgs { #[arg(long)] name: Option, #[arg(long)] destination: Option, } #[derive(Args)] struct SkillShowArgs { #[arg(long)] name: Option, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliMetricUnit { Seconds, Bytes, Count, Ratio, Custom, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliOptimizationObjective { Minimize, Maximize, Target, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliMetricVisibility { Canonical, Minor, Hidden, Archived, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliMetricScope { Live, Visible, All, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliMetricRankOrder { Asc, Desc, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliFieldValueType { String, Numeric, Boolean, Timestamp, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliArtifactKind { Document, Link, Log, Table, Plot, Dump, Binary, Other, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliExecutionBackend { Manual, LocalProcess, WorktreeProcess, SshProcess, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliFrontierVerdict { Accepted, Kept, Parked, Rejected, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliExperimentStatus { Open, Closed, } #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliArchivePatch { Archive, Restore, } fn main() -> Result<(), StoreError> { let cli = Cli::parse(); match cli.command { Command::Init(args) => run_init(args), Command::Project { command } => match command { ProjectCommand::Status(args) => print_json(&open_store(&args.project)?.status()?), }, Command::Tag { command } => match command { TagCommand::Add(args) => run_tag_add(args), TagCommand::List(args) => print_json(&open_store(&args.project)?.list_tags()?), }, Command::Frontier { command } => match command { FrontierCommand::Create(args) => run_frontier_create(args), FrontierCommand::List(args) => { print_json(&open_store(&args.project)?.list_frontiers()?) } FrontierCommand::Read(args) => { print_json(&open_store(&args.project.project)?.read_frontier(&args.frontier)?) } FrontierCommand::Open(args) => { print_json(&open_store(&args.project.project)?.frontier_open(&args.frontier)?) } FrontierCommand::UpdateBrief(args) => run_frontier_brief_update(args), FrontierCommand::History(args) => { print_json(&open_store(&args.project.project)?.frontier_history(&args.frontier)?) } }, Command::Hypothesis { command } => match command { HypothesisCommand::Record(args) => run_hypothesis_record(args), HypothesisCommand::List(args) => run_hypothesis_list(args), HypothesisCommand::Read(args) => { print_json(&open_store(&args.project.project)?.read_hypothesis(&args.hypothesis)?) } HypothesisCommand::Update(args) => run_hypothesis_update(args), HypothesisCommand::History(args) => print_json( &open_store(&args.project.project)?.hypothesis_history(&args.hypothesis)?, ), }, Command::Experiment { command } => match command { ExperimentCommand::Open(args) => run_experiment_open(args), ExperimentCommand::List(args) => run_experiment_list(args), ExperimentCommand::Read(args) => { print_json(&open_store(&args.project.project)?.read_experiment(&args.experiment)?) } ExperimentCommand::Update(args) => run_experiment_update(args), ExperimentCommand::Close(args) => run_experiment_close(args), ExperimentCommand::History(args) => print_json( &open_store(&args.project.project)?.experiment_history(&args.experiment)?, ), }, Command::Artifact { command } => match command { ArtifactCommand::Record(args) => run_artifact_record(args), ArtifactCommand::List(args) => run_artifact_list(args), ArtifactCommand::Read(args) => { print_json(&open_store(&args.project.project)?.read_artifact(&args.artifact)?) } ArtifactCommand::Update(args) => run_artifact_update(args), ArtifactCommand::History(args) => { print_json(&open_store(&args.project.project)?.artifact_history(&args.artifact)?) } }, Command::Metric { command } => match command { MetricCommand::Define(args) => run_metric_define(args), MetricCommand::Keys(args) => run_metric_keys(args), MetricCommand::Best(args) => run_metric_best(args), }, Command::Dimension { command } => match command { DimensionCommand::Define(args) => run_dimension_define(args), DimensionCommand::List(args) => { print_json(&open_store(&args.project)?.list_run_dimensions()?) } }, Command::Mcp { command } => match command { McpCommand::Serve(args) => mcp::serve(args.project), McpCommand::Worker(args) => mcp::serve_worker(args.project), }, Command::Ui { command } => match command { UiCommand::Serve(args) => run_ui_serve(args), }, Command::Skill { command } => match command { SkillCommand::List => print_json(&bundled_skill::bundled_skill_summaries()), SkillCommand::Install(args) => run_skill_install(args), SkillCommand::Show(args) => { println!("{}", resolve_bundled_skill(args.name.as_deref())?.body); Ok(()) } }, } } fn run_init(args: InitArgs) -> Result<(), StoreError> { let project_root = utf8_path(args.project); let store = ProjectStore::init( &project_root, args.name .map(NonEmptyText::new) .transpose()? .unwrap_or(default_display_name_for_root(&project_root)?), )?; print_json(&store.status()?) } fn run_tag_add(args: TagAddArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.register_tag( TagName::new(args.name)?, NonEmptyText::new(args.description)?, )?) } fn run_frontier_create(args: FrontierCreateArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.create_frontier(CreateFrontierRequest { label: NonEmptyText::new(args.label)?, objective: NonEmptyText::new(args.objective)?, slug: args.slug.map(Slug::new).transpose()?, })?) } fn run_frontier_brief_update(args: FrontierBriefUpdateArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; let roadmap = if args.roadmap.is_empty() { None } else { Some( args.roadmap .into_iter() .map(parse_roadmap_item) .collect::, _>>()?, ) }; let unknowns = if args.unknowns.is_empty() { None } else { Some(to_non_empty_texts(args.unknowns)?) }; print_json(&store.update_frontier_brief(UpdateFrontierBriefRequest { frontier: args.frontier, expected_revision: args.expected_revision, situation: cli_text_patch(args.situation, args.clear_situation)?, roadmap, unknowns, })?) } fn run_hypothesis_record(args: HypothesisRecordArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.create_hypothesis(CreateHypothesisRequest { frontier: args.frontier, slug: args.slug.map(Slug::new).transpose()?, title: NonEmptyText::new(args.title)?, summary: NonEmptyText::new(args.summary)?, body: NonEmptyText::new(args.body)?, tags: parse_tag_set(args.tags)?, parents: parse_vertex_selectors(args.parents)?, })?) } fn run_hypothesis_list(args: HypothesisListArgs) -> Result<(), StoreError> { let store = open_store(&args.project.project)?; print_json(&store.list_hypotheses(ListHypothesesQuery { frontier: args.frontier, tags: parse_tag_set(args.tags)?, include_archived: args.include_archived, limit: args.limit, })?) } fn run_hypothesis_update(args: HypothesisUpdateArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; let tags = if args.replace_tags { Some(parse_tag_set(args.tags)?) } else { None }; let parents = if args.replace_parents { Some(parse_vertex_selectors(args.parents)?) } else { None }; print_json(&store.update_hypothesis(UpdateHypothesisRequest { hypothesis: args.hypothesis, expected_revision: args.expected_revision, title: args.title.map(NonEmptyText::new).transpose()?, summary: args.summary.map(NonEmptyText::new).transpose()?, body: args.body.map(NonEmptyText::new).transpose()?, tags, parents, archived: archive_patch(args.state), })?) } fn run_experiment_open(args: ExperimentOpenArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.open_experiment(OpenExperimentRequest { hypothesis: args.hypothesis, slug: args.slug.map(Slug::new).transpose()?, title: NonEmptyText::new(args.title)?, summary: args.summary.map(NonEmptyText::new).transpose()?, tags: parse_tag_set(args.tags)?, parents: parse_vertex_selectors(args.parents)?, })?) } fn run_experiment_list(args: ExperimentListArgs) -> Result<(), StoreError> { let store = open_store(&args.project.project)?; print_json(&store.list_experiments(ListExperimentsQuery { frontier: args.frontier, hypothesis: args.hypothesis, tags: parse_tag_set(args.tags)?, include_archived: args.include_archived, status: args.status.map(Into::into), limit: args.limit, })?) } fn run_experiment_update(args: ExperimentUpdateArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; let outcome = load_optional_json::(args.outcome_json, args.outcome_file)?; print_json(&store.update_experiment(UpdateExperimentRequest { experiment: args.experiment, expected_revision: args.expected_revision, title: args.title.map(NonEmptyText::new).transpose()?, summary: cli_text_patch(args.summary, args.clear_summary)?, tags: if args.replace_tags { Some(parse_tag_set(args.tags)?) } else { None }, parents: if args.replace_parents { Some(parse_vertex_selectors(args.parents)?) } else { None }, archived: archive_patch(args.state), outcome, })?) } fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; let analysis = match (args.analysis_summary, args.analysis_body) { (Some(summary), Some(body)) => Some(ExperimentAnalysis { summary: NonEmptyText::new(summary)?, body: NonEmptyText::new(body)?, }), (None, None) => None, _ => { return Err(invalid_input( "analysis requires both --analysis-summary and --analysis-body", )); } }; print_json( &store.close_experiment(CloseExperimentRequest { experiment: args.experiment, expected_revision: args.expected_revision, backend: args.backend.into(), command: CommandRecipe::new( args.working_directory.map(utf8_path), to_non_empty_texts(args.argv)?, parse_env(args.env), )?, dimensions: parse_dimension_assignments(args.dimensions)?, primary_metric: parse_metric_value_assignment(&args.primary_metric)?, supporting_metrics: args .supporting_metrics .into_iter() .map(|raw| parse_metric_value_assignment(&raw)) .collect::, _>>()?, verdict: args.verdict.into(), rationale: NonEmptyText::new(args.rationale)?, analysis, })?, ) } fn run_artifact_record(args: ArtifactRecordArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.create_artifact(CreateArtifactRequest { slug: args.slug.map(Slug::new).transpose()?, kind: args.kind.into(), label: NonEmptyText::new(args.label)?, summary: args.summary.map(NonEmptyText::new).transpose()?, locator: NonEmptyText::new(args.locator)?, media_type: args.media_type.map(NonEmptyText::new).transpose()?, attachments: parse_attachment_selectors(args.attachments)?, })?) } fn run_artifact_list(args: ArtifactListArgs) -> Result<(), StoreError> { let store = open_store(&args.project.project)?; print_json( &store.list_artifacts(ListArtifactsQuery { frontier: args.frontier, kind: args.kind.map(Into::into), attached_to: args .attached_to .as_deref() .map(parse_attachment_selector) .transpose()?, limit: args.limit, })?, ) } fn run_artifact_update(args: ArtifactUpdateArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.update_artifact(UpdateArtifactRequest { artifact: args.artifact, expected_revision: args.expected_revision, kind: args.kind.map(Into::into), label: args.label.map(NonEmptyText::new).transpose()?, summary: cli_text_patch(args.summary, args.clear_summary)?, locator: args.locator.map(NonEmptyText::new).transpose()?, media_type: cli_text_patch(args.media_type, args.clear_media_type)?, attachments: if args.replace_attachments { Some(parse_attachment_selectors(args.attachments)?) } else { None }, })?) } fn run_metric_define(args: MetricDefineArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.define_metric(DefineMetricRequest { key: NonEmptyText::new(args.key)?, unit: args.unit.into(), objective: args.objective.into(), visibility: args.visibility.into(), description: args.description.map(NonEmptyText::new).transpose()?, })?) } fn run_metric_keys(args: MetricKeysArgs) -> Result<(), StoreError> { let store = open_store(&args.project.project)?; print_json(&store.metric_keys(MetricKeysQuery { frontier: args.frontier, scope: args.scope.into(), })?) } fn run_metric_best(args: MetricBestArgs) -> Result<(), StoreError> { let store = open_store(&args.project.project)?; print_json(&store.metric_best(MetricBestQuery { frontier: args.frontier, hypothesis: args.hypothesis, key: NonEmptyText::new(args.key)?, dimensions: parse_dimension_assignments(args.dimensions)?, include_rejected: args.include_rejected, limit: args.limit, order: args.order.map(Into::into), })?) } fn run_dimension_define(args: DimensionDefineArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; print_json(&store.define_run_dimension(DefineRunDimensionRequest { key: NonEmptyText::new(args.key)?, value_type: args.value_type.into(), description: args.description.map(NonEmptyText::new).transpose()?, })?) } fn run_skill_install(args: SkillInstallArgs) -> Result<(), StoreError> { if let Some(name) = args.name.as_deref() { let skill = resolve_bundled_skill(Some(name))?; let destination = args .destination .unwrap_or(default_skill_root()?.join(skill.name)); install_skill(skill, &destination)?; println!("{}", destination.display()); } else { let destination_root = args.destination.unwrap_or(default_skill_root()?); for skill in bundled_skill::bundled_skill_summaries() { let destination = destination_root.join(skill.name); install_skill(resolve_bundled_skill(Some(skill.name))?, &destination)?; println!("{}", destination.display()); } } Ok(()) } fn run_ui_serve(args: UiServeArgs) -> Result<(), StoreError> { let project_root = resolve_ui_project_root(&utf8_path(args.path))?; ui::serve(project_root, args.bind, args.limit) } fn resolve_bundled_skill( requested_name: Option<&str>, ) -> Result { requested_name.map_or_else( || Ok(bundled_skill::default_bundled_skill()), |name| { bundled_skill::bundled_skill(name) .ok_or_else(|| invalid_input(format!("unknown bundled skill `{name}`"))) }, ) } fn default_skill_root() -> Result { dirs::home_dir() .map(|home| home.join(".codex/skills")) .ok_or_else(|| invalid_input("home directory not found")) } fn install_skill(skill: bundled_skill::BundledSkill, destination: &Path) -> Result<(), StoreError> { fs::create_dir_all(destination)?; fs::write(destination.join("SKILL.md"), skill.body)?; Ok(()) } pub(crate) fn open_store(path: &Path) -> Result { ProjectStore::open(utf8_path(path.to_path_buf())) } pub(crate) fn resolve_ui_project_root(path: &Utf8Path) -> Result { if let Some(project_root) = fidget_spinner_store_sqlite::discover_project_root(path) { return Ok(project_root); } let candidates = discover_descendant_project_roots(path)?; match candidates.len() { 0 => Err(StoreError::MissingProjectStore(path.to_path_buf())), 1 => candidates .into_iter() .next() .ok_or_else(|| StoreError::MissingProjectStore(path.to_path_buf())), _ => Err(StoreError::AmbiguousProjectStoreDiscovery { path: path.to_path_buf(), candidates: candidates .iter() .map(|candidate| candidate.as_str()) .collect::>() .join(", "), }), } } pub(crate) fn open_or_init_store_for_binding(path: &Path) -> Result { let requested_root = utf8_path(path.to_path_buf()); match ProjectStore::open(requested_root.clone()) { Ok(store) => Ok(store), Err(StoreError::MissingProjectStore(_)) => { let project_root = binding_bootstrap_root(&requested_root)?; if !is_empty_directory(&project_root)? { return Err(StoreError::MissingProjectStore(requested_root)); } ProjectStore::init(&project_root, default_display_name_for_root(&project_root)?) } Err(error) => Err(error), } } pub(crate) fn utf8_path(path: impl Into) -> Utf8PathBuf { Utf8PathBuf::from(path.into().to_string_lossy().into_owned()) } fn binding_bootstrap_root(path: &Utf8Path) -> Result { match fs::metadata(path.as_std_path()) { Ok(metadata) if metadata.is_file() => Ok(path .parent() .map_or_else(|| path.to_path_buf(), Utf8Path::to_path_buf)), Ok(_) => Ok(path.to_path_buf()), Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(path.to_path_buf()), Err(error) => Err(StoreError::from(error)), } } fn is_empty_directory(path: &Utf8Path) -> Result { match fs::metadata(path.as_std_path()) { Ok(metadata) if metadata.is_dir() => { let mut entries = fs::read_dir(path.as_std_path())?; Ok(entries.next().transpose()?.is_none()) } Ok(_) => Ok(false), Err(error) if error.kind() == io::ErrorKind::NotFound => Ok(false), Err(error) => Err(StoreError::from(error)), } } fn discover_descendant_project_roots(path: &Utf8Path) -> Result, StoreError> { let start = binding_bootstrap_root(path)?; let mut candidates = BTreeSet::new(); collect_descendant_project_roots(&start, &mut candidates)?; Ok(candidates) } fn collect_descendant_project_roots( path: &Utf8Path, candidates: &mut BTreeSet, ) -> Result<(), StoreError> { let metadata = match fs::metadata(path.as_std_path()) { Ok(metadata) => metadata, Err(error) if error.kind() == io::ErrorKind::NotFound => return Ok(()), Err(error) => return Err(StoreError::from(error)), }; if metadata.is_file() { return Ok(()); } if path.file_name() == Some(STORE_DIR_NAME) { if let Some(project_root) = path.parent() { let _ = candidates.insert(project_root.to_path_buf()); } return Ok(()); } for entry in fs::read_dir(path.as_std_path())? { let entry = entry?; let entry_type = entry.file_type()?; if !entry_type.is_dir() { continue; } let child = utf8_path(entry.path()); if child.file_name() == Some(STORE_DIR_NAME) { let _ = candidates.insert(path.to_path_buf()); continue; } collect_descendant_project_roots(&child, candidates)?; } Ok(()) } fn default_display_name_for_root(project_root: &Utf8Path) -> Result { NonEmptyText::new( project_root .file_name() .map_or_else(|| "fidget-spinner-project".to_owned(), ToOwned::to_owned), ) .map_err(StoreError::from) } fn parse_tag_set(values: Vec) -> Result, StoreError> { values .into_iter() .map(TagName::new) .collect::, _>>() .map_err(StoreError::from) } pub(crate) fn parse_vertex_selectors( values: Vec, ) -> Result, StoreError> { values .into_iter() .map(|raw| { let (kind, selector) = raw .split_once(':') .ok_or_else(|| invalid_input("expected parent selector in the form `hypothesis:` or `experiment:`"))?; match kind { "hypothesis" => Ok(VertexSelector::Hypothesis(selector.to_owned())), "experiment" => Ok(VertexSelector::Experiment(selector.to_owned())), _ => Err(invalid_input(format!("unknown parent kind `{kind}`"))), } }) .collect() } pub(crate) fn parse_attachment_selectors( values: Vec, ) -> Result, StoreError> { values .into_iter() .map(|raw| parse_attachment_selector(&raw)) .collect() } pub(crate) fn parse_attachment_selector(raw: &str) -> Result { let (kind, selector) = raw .split_once(':') .ok_or_else(|| invalid_input("expected attachment selector in the form `frontier:`, `hypothesis:`, or `experiment:`"))?; match kind { "frontier" => Ok(AttachmentSelector::Frontier(selector.to_owned())), "hypothesis" => Ok(AttachmentSelector::Hypothesis(selector.to_owned())), "experiment" => Ok(AttachmentSelector::Experiment(selector.to_owned())), _ => Err(invalid_input(format!("unknown attachment kind `{kind}`"))), } } fn parse_roadmap_item(raw: String) -> Result { let mut parts = raw.splitn(3, ':'); let rank = parts .next() .ok_or_else(|| invalid_input("roadmap items must look like `rank:hypothesis[:summary]`"))? .parse::() .map_err(|error| invalid_input(format!("invalid roadmap rank: {error}")))?; let hypothesis = parts .next() .ok_or_else(|| invalid_input("roadmap items must include a hypothesis selector"))? .to_owned(); let summary = parts .next() .map(NonEmptyText::new) .transpose() .map_err(StoreError::from)?; Ok(FrontierRoadmapItemDraft { rank, hypothesis, summary, }) } pub(crate) fn parse_env(values: Vec) -> BTreeMap { values .into_iter() .filter_map(|entry| { let (key, value) = entry.split_once('=')?; Some((key.to_owned(), value.to_owned())) }) .collect() } fn parse_metric_value_assignment( raw: &str, ) -> Result { let (key, value) = raw .split_once('=') .ok_or_else(|| invalid_input("expected metric assignment in the form `key=value`"))?; let value = value .parse::() .map_err(|error| invalid_input(format!("invalid metric value `{value}`: {error}")))?; Ok(fidget_spinner_core::MetricValue { key: NonEmptyText::new(key.to_owned())?, value, }) } pub(crate) fn parse_dimension_assignments( values: Vec, ) -> Result, StoreError> { values .into_iter() .map(|entry| { let (key, raw_value) = entry.split_once('=').ok_or_else(|| { invalid_input("expected dimension assignment in the form `key=value`") })?; let json_value = serde_json::from_str::(raw_value) .unwrap_or_else(|_| Value::String(raw_value.to_owned())); Ok(( NonEmptyText::new(key.to_owned())?, json_to_dimension_value(json_value)?, )) }) .collect() } fn json_to_dimension_value(value: Value) -> Result { match value { Value::String(raw) => { if time::OffsetDateTime::parse(&raw, &time::format_description::well_known::Rfc3339) .is_ok() { Ok(RunDimensionValue::Timestamp(NonEmptyText::new(raw)?)) } else { Ok(RunDimensionValue::String(NonEmptyText::new(raw)?)) } } Value::Number(number) => number .as_f64() .map(RunDimensionValue::Numeric) .ok_or_else(|| invalid_input("numeric dimension values must fit into f64")), Value::Bool(value) => Ok(RunDimensionValue::Boolean(value)), _ => Err(invalid_input( "dimension values must be string, number, boolean, or RFC3339 timestamp", )), } } fn to_non_empty_texts(values: Vec) -> Result, StoreError> { values .into_iter() .map(NonEmptyText::new) .collect::, _>>() .map_err(StoreError::from) } fn load_optional_json serde::Deserialize<'de>>( inline: Option, file: Option, ) -> Result, StoreError> { match (inline, file) { (Some(raw), None) => serde_json::from_str(&raw) .map(Some) .map_err(StoreError::from), (None, Some(path)) => serde_json::from_slice(&fs::read(path)?) .map(Some) .map_err(StoreError::from), (None, None) => Ok(None), (Some(_), Some(_)) => Err(invalid_input( "use only one of --outcome-json or --outcome-file", )), } } const fn archive_patch(state: Option) -> Option { match state { None => None, Some(CliArchivePatch::Archive) => Some(true), Some(CliArchivePatch::Restore) => Some(false), } } fn cli_text_patch( value: Option, clear: bool, ) -> Result>, StoreError> { if clear { if value.is_some() { return Err(invalid_input("cannot set and clear the same field")); } return Ok(Some(TextPatch::Clear)); } value .map(NonEmptyText::new) .transpose() .map(|value| value.map(TextPatch::Set)) .map_err(StoreError::from) } fn invalid_input(message: impl Into) -> StoreError { StoreError::InvalidInput(message.into()) } pub(crate) fn to_pretty_json(value: &impl Serialize) -> Result { serde_json::to_string_pretty(value).map_err(StoreError::from) } fn print_json(value: &impl Serialize) -> Result<(), StoreError> { println!("{}", to_pretty_json(value)?); Ok(()) } impl From for MetricUnit { fn from(value: CliMetricUnit) -> Self { match value { CliMetricUnit::Seconds => Self::Seconds, CliMetricUnit::Bytes => Self::Bytes, CliMetricUnit::Count => Self::Count, CliMetricUnit::Ratio => Self::Ratio, CliMetricUnit::Custom => Self::Custom, } } } impl From for OptimizationObjective { fn from(value: CliOptimizationObjective) -> Self { match value { CliOptimizationObjective::Minimize => Self::Minimize, CliOptimizationObjective::Maximize => Self::Maximize, CliOptimizationObjective::Target => Self::Target, } } } impl From for MetricVisibility { fn from(value: CliMetricVisibility) -> Self { match value { CliMetricVisibility::Canonical => Self::Canonical, CliMetricVisibility::Minor => Self::Minor, CliMetricVisibility::Hidden => Self::Hidden, CliMetricVisibility::Archived => Self::Archived, } } } impl From for MetricScope { fn from(value: CliMetricScope) -> Self { match value { CliMetricScope::Live => Self::Live, CliMetricScope::Visible => Self::Visible, CliMetricScope::All => Self::All, } } } impl From for MetricRankOrder { fn from(value: CliMetricRankOrder) -> Self { match value { CliMetricRankOrder::Asc => Self::Asc, CliMetricRankOrder::Desc => Self::Desc, } } } impl From for FieldValueType { fn from(value: CliFieldValueType) -> Self { match value { CliFieldValueType::String => Self::String, CliFieldValueType::Numeric => Self::Numeric, CliFieldValueType::Boolean => Self::Boolean, CliFieldValueType::Timestamp => Self::Timestamp, } } } impl From for ArtifactKind { fn from(value: CliArtifactKind) -> Self { match value { CliArtifactKind::Document => Self::Document, CliArtifactKind::Link => Self::Link, CliArtifactKind::Log => Self::Log, CliArtifactKind::Table => Self::Table, CliArtifactKind::Plot => Self::Plot, CliArtifactKind::Dump => Self::Dump, CliArtifactKind::Binary => Self::Binary, CliArtifactKind::Other => Self::Other, } } } impl From for ExecutionBackend { fn from(value: CliExecutionBackend) -> Self { match value { CliExecutionBackend::Manual => Self::Manual, CliExecutionBackend::LocalProcess => Self::LocalProcess, CliExecutionBackend::WorktreeProcess => Self::WorktreeProcess, CliExecutionBackend::SshProcess => Self::SshProcess, } } } impl From for FrontierVerdict { fn from(value: CliFrontierVerdict) -> Self { match value { CliFrontierVerdict::Accepted => Self::Accepted, CliFrontierVerdict::Kept => Self::Kept, CliFrontierVerdict::Parked => Self::Parked, CliFrontierVerdict::Rejected => Self::Rejected, } } } impl From for ExperimentStatus { fn from(value: CliExperimentStatus) -> Self { match value { CliExperimentStatus::Open => Self::Open, CliExperimentStatus::Closed => Self::Closed, } } }