diff options
Diffstat (limited to 'crates/fidget-spinner-cli/src')
| -rw-r--r-- | crates/fidget-spinner-cli/src/main.rs | 80 | ||||
| -rw-r--r-- | crates/fidget-spinner-cli/src/mcp/catalog.rs | 24 | ||||
| -rw-r--r-- | crates/fidget-spinner-cli/src/mcp/service.rs | 124 |
3 files changed, 62 insertions, 166 deletions
diff --git a/crates/fidget-spinner-cli/src/main.rs b/crates/fidget-spinner-cli/src/main.rs index 491e30d..f56e751 100644 --- a/crates/fidget-spinner-cli/src/main.rs +++ b/crates/fidget-spinner-cli/src/main.rs @@ -10,10 +10,10 @@ use std::path::{Path, PathBuf}; use camino::{Utf8Path, Utf8PathBuf}; use clap::{Args, Parser, Subcommand, ValueEnum}; use fidget_spinner_core::{ - AnnotationVisibility, CodeSnapshotRef, CommandRecipe, DiagnosticSeverity, ExecutionBackend, - FieldPresence, FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierVerdict, - GitCommitHash, InferencePolicy, MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass, - NodePayload, NonEmptyText, OptimizationObjective, ProjectFieldSpec, TagName, + AnnotationVisibility, CommandRecipe, DiagnosticSeverity, ExecutionBackend, FieldPresence, + FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierVerdict, InferencePolicy, + MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass, NodePayload, NonEmptyText, + OptimizationObjective, ProjectFieldSpec, TagName, }; use fidget_spinner_store_sqlite::{ CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest, @@ -152,8 +152,6 @@ struct FrontierInitArgs { primary_metric_unit: CliMetricUnit, #[arg(long = "primary-metric-objective", value_enum)] primary_metric_objective: CliOptimizationObjective, - #[arg(long = "seed-summary", default_value = "initial champion checkpoint")] - seed_summary: String, } #[derive(Args)] @@ -490,11 +488,11 @@ struct MetricBestArgs { #[derive(Subcommand)] enum ExperimentCommand { - /// Open a stateful experiment against one hypothesis and base checkpoint. + /// Open a stateful experiment against one hypothesis. Open(ExperimentOpenArgs), /// List open experiments, optionally narrowed to one frontier. List(ExperimentListArgs), - /// Close a core-path experiment with checkpoint, run, note, and verdict. + /// Close a core-path experiment with run data, note, and verdict. Close(Box<ExperimentCloseArgs>), } @@ -518,8 +516,6 @@ struct ExperimentCloseArgs { project: ProjectArg, #[arg(long = "experiment")] experiment_id: String, - #[arg(long = "candidate-summary")] - candidate_summary: String, #[arg(long = "run-title")] run_title: String, #[arg(long = "run-summary")] @@ -567,8 +563,6 @@ struct ExperimentOpenArgs { project: ProjectArg, #[arg(long)] frontier: String, - #[arg(long = "base-checkpoint")] - base_checkpoint: String, #[arg(long = "hypothesis-node")] hypothesis_node: String, #[arg(long)] @@ -733,11 +727,10 @@ enum CliInferencePolicy { #[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)] enum CliFrontierVerdict { - PromoteToChampion, - KeepOnFrontier, - RevertToChampion, - ArchiveDeadEnd, - NeedsMoreEvidence, + Accepted, + Kept, + Parked, + Rejected, } fn main() { @@ -834,8 +827,6 @@ fn run_init(args: InitArgs) -> Result<(), StoreError> { fn run_frontier_init(args: FrontierInitArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; - let initial_checkpoint = - store.auto_capture_checkpoint(NonEmptyText::new(args.seed_summary)?)?; let projection = store.create_frontier(CreateFrontierRequest { label: NonEmptyText::new(args.label)?, contract_title: NonEmptyText::new(args.contract_title)?, @@ -853,7 +844,6 @@ fn run_frontier_init(args: FrontierInitArgs) -> Result<(), StoreError> { }, promotion_criteria: to_text_vec(args.promotion_criteria)?, }, - initial_checkpoint, })?; print_json(&projection) } @@ -1131,7 +1121,6 @@ fn run_experiment_open(args: ExperimentOpenArgs) -> Result<(), StoreError> { let summary = args.summary.map(NonEmptyText::new).transpose()?; let experiment = store.open_experiment(OpenExperimentRequest { frontier_id: parse_frontier_id(&args.frontier)?, - base_checkpoint_id: parse_checkpoint_id(&args.base_checkpoint)?, hypothesis_node_id: parse_node_id(&args.hypothesis_node)?, title: NonEmptyText::new(args.title)?, summary, @@ -1151,12 +1140,6 @@ fn run_experiment_list(args: ExperimentListArgs) -> Result<(), StoreError> { fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> { let mut store = open_store(&args.project.project)?; - let snapshot = store - .auto_capture_checkpoint(NonEmptyText::new(args.candidate_summary.clone())?)? - .map(|seed| seed.snapshot) - .ok_or(StoreError::GitInspectionFailed( - store.project_root().to_path_buf(), - ))?; let command = CommandRecipe::new( args.working_directory .map(utf8_path) @@ -1186,14 +1169,11 @@ fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> { }; let receipt = store.close_experiment(CloseExperimentRequest { experiment_id: parse_experiment_id(&args.experiment_id)?, - candidate_summary: NonEmptyText::new(args.candidate_summary)?, - candidate_snapshot: snapshot, run_title: NonEmptyText::new(args.run_title)?, run_summary: args.run_summary.map(NonEmptyText::new).transpose()?, backend: args.backend.into(), dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?, command, - code_snapshot: Some(capture_code_snapshot(store.project_root())?), primary_metric: parse_metric_value(args.primary_metric)?, supporting_metrics: args .metrics @@ -1539,31 +1519,6 @@ fn parse_node_class_set(classes: Vec<CliNodeClass>) -> BTreeSet<NodeClass> { classes.into_iter().map(Into::into).collect() } -fn capture_code_snapshot(project_root: &Utf8Path) -> Result<CodeSnapshotRef, StoreError> { - let head_commit = run_git(project_root, &["rev-parse", "HEAD"])?; - let dirty_paths = run_git(project_root, &["status", "--porcelain"])? - .map(|status| { - status - .lines() - .filter_map(|line| line.get(3..).map(str::trim)) - .filter(|line| !line.is_empty()) - .map(Utf8PathBuf::from) - .collect::<BTreeSet<_>>() - }) - .unwrap_or_default(); - Ok(CodeSnapshotRef { - repo_root: run_git(project_root, &["rev-parse", "--show-toplevel"])? - .map(Utf8PathBuf::from) - .unwrap_or_else(|| project_root.to_path_buf()), - worktree_root: project_root.to_path_buf(), - worktree_name: run_git(project_root, &["rev-parse", "--abbrev-ref", "HEAD"])? - .map(NonEmptyText::new) - .transpose()?, - head_commit: head_commit.map(GitCommitHash::new).transpose()?, - dirty_paths, - }) -} - fn run_git(project_root: &Utf8Path, args: &[&str]) -> Result<Option<String>, StoreError> { let output = std::process::Command::new("git") .arg("-C") @@ -1702,12 +1657,6 @@ fn parse_frontier_id(raw: &str) -> Result<fidget_spinner_core::FrontierId, Store )?)) } -fn parse_checkpoint_id(raw: &str) -> Result<fidget_spinner_core::CheckpointId, StoreError> { - Ok(fidget_spinner_core::CheckpointId::from_uuid( - Uuid::parse_str(raw)?, - )) -} - fn parse_experiment_id(raw: &str) -> Result<fidget_spinner_core::ExperimentId, StoreError> { Ok(fidget_spinner_core::ExperimentId::from_uuid( Uuid::parse_str(raw)?, @@ -1851,11 +1800,10 @@ impl From<CliInferencePolicy> for InferencePolicy { impl From<CliFrontierVerdict> for FrontierVerdict { fn from(value: CliFrontierVerdict) -> Self { match value { - CliFrontierVerdict::PromoteToChampion => Self::PromoteToChampion, - CliFrontierVerdict::KeepOnFrontier => Self::KeepOnFrontier, - CliFrontierVerdict::RevertToChampion => Self::RevertToChampion, - CliFrontierVerdict::ArchiveDeadEnd => Self::ArchiveDeadEnd, - CliFrontierVerdict::NeedsMoreEvidence => Self::NeedsMoreEvidence, + CliFrontierVerdict::Accepted => Self::Accepted, + CliFrontierVerdict::Kept => Self::Kept, + CliFrontierVerdict::Parked => Self::Parked, + CliFrontierVerdict::Rejected => Self::Rejected, } } } diff --git a/crates/fidget-spinner-cli/src/mcp/catalog.rs b/crates/fidget-spinner-cli/src/mcp/catalog.rs index 3b8abcc..ae3ca78 100644 --- a/crates/fidget-spinner-cli/src/mcp/catalog.rs +++ b/crates/fidget-spinner-cli/src/mcp/catalog.rs @@ -99,13 +99,13 @@ pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> { }), "frontier.status" => Some(ToolSpec { name: "frontier.status", - description: "Read one frontier projection, including champion and active candidates.", + description: "Read one frontier projection, including open/completed experiment counts and verdict totals.", dispatch: DispatchTarget::Worker, replay: ReplayContract::Convergent, }), "frontier.init" => Some(ToolSpec { name: "frontier.init", - description: "Create a new frontier rooted in a contract node. If the project is a git repo, the current HEAD becomes the initial champion when possible.", + description: "Create a new frontier rooted in a contract node.", dispatch: DispatchTarget::Worker, replay: ReplayContract::NeverReplay, }), @@ -183,7 +183,7 @@ pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> { }), "metric.best" => Some(ToolSpec { name: "metric.best", - description: "Rank completed experiments by one numeric key, with optional run-dimension filters and candidate commit surfacing.", + description: "Rank completed experiments by one numeric key, with optional run-dimension filters.", dispatch: DispatchTarget::Worker, replay: ReplayContract::Convergent, }), @@ -195,7 +195,7 @@ pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> { }), "experiment.open" => Some(ToolSpec { name: "experiment.open", - description: "Open a stateful experiment against one hypothesis and one base checkpoint.", + description: "Open a stateful experiment against one hypothesis.", dispatch: DispatchTarget::Worker, replay: ReplayContract::NeverReplay, }), @@ -213,7 +213,7 @@ pub(crate) fn tool_spec(name: &str) -> Option<ToolSpec> { }), "experiment.close" => Some(ToolSpec { name: "experiment.close", - description: "Close one open experiment with typed run dimensions, preregistered metric observations, candidate checkpoint capture, optional analysis, note, and verdict.", + description: "Close one open experiment with typed run dimensions, preregistered metric observations, optional analysis, note, and verdict.", dispatch: DispatchTarget::Worker, replay: ReplayContract::NeverReplay, }), @@ -562,12 +562,11 @@ fn input_schema(name: &str) -> Value { "type": "object", "properties": { "frontier_id": { "type": "string" }, - "base_checkpoint_id": { "type": "string" }, "hypothesis_node_id": { "type": "string" }, "title": { "type": "string" }, "summary": { "type": "string" } }, - "required": ["frontier_id", "base_checkpoint_id", "hypothesis_node_id", "title"], + "required": ["frontier_id", "hypothesis_node_id", "title"], "additionalProperties": false }), "experiment.list" => json!({ @@ -589,7 +588,6 @@ fn input_schema(name: &str) -> Value { "type": "object", "properties": { "experiment_id": { "type": "string" }, - "candidate_summary": { "type": "string" }, "run": run_schema(), "primary_metric": metric_value_schema(), "supporting_metrics": { "type": "array", "items": metric_value_schema() }, @@ -601,7 +599,6 @@ fn input_schema(name: &str) -> Value { }, "required": [ "experiment_id", - "candidate_summary", "run", "primary_metric", "note", @@ -753,11 +750,10 @@ fn verdict_schema() -> Value { json!({ "type": "string", "enum": [ - "promote_to_champion", - "keep_on_frontier", - "revert_to_champion", - "archive_dead_end", - "needs_more_evidence" + "accepted", + "kept", + "parked", + "rejected" ] }) } diff --git a/crates/fidget-spinner-cli/src/mcp/service.rs b/crates/fidget-spinner-cli/src/mcp/service.rs index 05f2382..f0cca1e 100644 --- a/crates/fidget-spinner-cli/src/mcp/service.rs +++ b/crates/fidget-spinner-cli/src/mcp/service.rs @@ -3,11 +3,11 @@ use std::fs; use camino::{Utf8Path, Utf8PathBuf}; use fidget_spinner_core::{ - AdmissionState, AnnotationVisibility, CodeSnapshotRef, CommandRecipe, DiagnosticSeverity, - ExecutionBackend, FieldPresence, FieldRole, FieldValueType, FrontierContract, FrontierNote, - FrontierProjection, FrontierRecord, FrontierVerdict, InferencePolicy, MetricSpec, MetricUnit, - MetricValue, NodeAnnotation, NodeClass, NodePayload, NonEmptyText, ProjectFieldSpec, - ProjectSchema, RunDimensionValue, TagName, TagRecord, + AdmissionState, AnnotationVisibility, CommandRecipe, DiagnosticSeverity, ExecutionBackend, + FieldPresence, FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierProjection, + FrontierRecord, FrontierVerdict, InferencePolicy, MetricSpec, MetricUnit, MetricValue, + NodeAnnotation, NodeClass, NodePayload, NonEmptyText, ProjectFieldSpec, ProjectSchema, + RunDimensionValue, TagName, TagRecord, }; use fidget_spinner_store_sqlite::{ CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest, @@ -203,16 +203,6 @@ impl WorkerService { } "frontier.init" => { let args = deserialize::<FrontierInitToolArgs>(arguments)?; - let initial_checkpoint = self - .store - .auto_capture_checkpoint( - NonEmptyText::new( - args.seed_summary - .unwrap_or_else(|| "initial champion checkpoint".to_owned()), - ) - .map_err(store_fault("tools/call:frontier.init"))?, - ) - .map_err(store_fault("tools/call:frontier.init"))?; let projection = self .store .create_frontier(CreateFrontierRequest { @@ -251,7 +241,6 @@ impl WorkerService { promotion_criteria: crate::to_text_vec(args.promotion_criteria) .map_err(store_fault("tools/call:frontier.init"))?, }, - initial_checkpoint, }) .map_err(store_fault("tools/call:frontier.init"))?; tool_success( @@ -702,8 +691,6 @@ impl WorkerService { .open_experiment(OpenExperimentRequest { frontier_id: crate::parse_frontier_id(&args.frontier_id) .map_err(store_fault("tools/call:experiment.open"))?, - base_checkpoint_id: crate::parse_checkpoint_id(&args.base_checkpoint_id) - .map_err(store_fault("tools/call:experiment.open"))?, hypothesis_node_id: crate::parse_node_id(&args.hypothesis_node_id) .map_err(store_fault("tools/call:experiment.open"))?, title: NonEmptyText::new(args.title) @@ -763,33 +750,11 @@ impl WorkerService { } "experiment.close" => { let args = deserialize::<ExperimentCloseToolArgs>(arguments)?; - let snapshot = self - .store - .auto_capture_checkpoint( - NonEmptyText::new(args.candidate_summary.clone()) - .map_err(store_fault("tools/call:experiment.close"))?, - ) - .map_err(store_fault("tools/call:experiment.close"))? - .map(|seed| seed.snapshot) - .ok_or_else(|| { - FaultRecord::new( - FaultKind::Internal, - FaultStage::Store, - "tools/call:experiment.close", - format!( - "git repository inspection failed for {}", - self.store.project_root() - ), - ) - })?; let receipt = self .store .close_experiment(CloseExperimentRequest { experiment_id: crate::parse_experiment_id(&args.experiment_id) .map_err(store_fault("tools/call:experiment.close"))?, - candidate_summary: NonEmptyText::new(args.candidate_summary) - .map_err(store_fault("tools/call:experiment.close"))?, - candidate_snapshot: snapshot, run_title: NonEmptyText::new(args.run.title) .map_err(store_fault("tools/call:experiment.close"))?, run_summary: args @@ -810,10 +775,6 @@ impl WorkerService { self.store.project_root(), ) .map_err(store_fault("tools/call:experiment.close"))?, - code_snapshot: Some( - capture_code_snapshot(self.store.project_root()) - .map_err(store_fault("tools/call:experiment.close"))?, - ), primary_metric: metric_value_from_wire(args.primary_metric) .map_err(store_fault("tools/call:experiment.close"))?, supporting_metrics: args @@ -1346,8 +1307,8 @@ fn experiment_close_output( let concise = json!({ "experiment_id": receipt.experiment.id, "frontier_id": receipt.experiment.frontier_id, - "candidate_checkpoint_id": receipt.experiment.candidate_checkpoint_id, - "verdict": format!("{:?}", receipt.experiment.verdict).to_ascii_lowercase(), + "experiment_title": receipt.experiment.title, + "verdict": metric_verdict_name(receipt.experiment.verdict), "run_id": receipt.run.run_id, "hypothesis_node_id": receipt.experiment.hypothesis_node_id, "decision_node_id": receipt.decision_node.id, @@ -1362,11 +1323,11 @@ fn experiment_close_output( "closed experiment {} on frontier {}", receipt.experiment.id, receipt.experiment.frontier_id ), + format!("title: {}", receipt.experiment.title), format!("hypothesis: {}", receipt.experiment.hypothesis_node_id), - format!("candidate: {}", receipt.experiment.candidate_checkpoint_id), format!( "verdict: {}", - format!("{:?}", receipt.experiment.verdict).to_ascii_lowercase() + metric_verdict_name(receipt.experiment.verdict) ), format!( "primary metric: {}", @@ -1393,7 +1354,6 @@ fn experiment_open_output( let concise = json!({ "experiment_id": item.id, "frontier_id": item.frontier_id, - "base_checkpoint_id": item.base_checkpoint_id, "hypothesis_node_id": item.hypothesis_node_id, "title": item.title, "summary": item.summary, @@ -1405,7 +1365,6 @@ fn experiment_open_output( format!("{action} {}", item.id), format!("frontier: {}", item.frontier_id), format!("hypothesis: {}", item.hypothesis_node_id), - format!("base checkpoint: {}", item.base_checkpoint_id), format!("title: {}", item.title), item.summary .as_ref() @@ -1426,7 +1385,6 @@ fn experiment_list_output(items: &[OpenExperimentSummary]) -> Result<ToolOutput, json!({ "experiment_id": item.id, "frontier_id": item.frontier_id, - "base_checkpoint_id": item.base_checkpoint_id, "hypothesis_node_id": item.hypothesis_node_id, "title": item.title, "summary": item.summary, @@ -1436,8 +1394,8 @@ fn experiment_list_output(items: &[OpenExperimentSummary]) -> Result<ToolOutput, let mut lines = vec![format!("{} open experiment(s)", items.len())]; lines.extend(items.iter().map(|item| { format!( - "{} {} | hypothesis={} | checkpoint={}", - item.id, item.title, item.hypothesis_node_id, item.base_checkpoint_id, + "{} {} | hypothesis={}", + item.id, item.title, item.hypothesis_node_id, ) })); detailed_tool_output( @@ -1511,12 +1469,11 @@ fn metric_best_output( "value": item.value, "order": item.order.as_str(), "experiment_id": item.experiment_id, + "experiment_title": item.experiment_title, "frontier_id": item.frontier_id, "hypothesis_node_id": item.hypothesis_node_id, "hypothesis_title": item.hypothesis_title, "verdict": metric_verdict_name(item.verdict), - "candidate_checkpoint_id": item.candidate_checkpoint_id, - "candidate_commit_hash": item.candidate_commit_hash, "run_id": item.run_id, "unit": item.unit.map(metric_unit_name), "objective": item.objective.map(metric_objective_name), @@ -1527,15 +1484,14 @@ fn metric_best_output( let mut lines = vec![format!("{} ranked experiment(s)", items.len())]; lines.extend(items.iter().enumerate().map(|(index, item)| { format!( - "{}. {}={} [{}] {} | verdict={} | commit={} | checkpoint={}", + "{}. {}={} [{}] {} | verdict={} | hypothesis={}", index + 1, item.key, item.value, item.source.as_str(), - item.hypothesis_title, + item.experiment_title, metric_verdict_name(item.verdict), - item.candidate_commit_hash, - item.candidate_checkpoint_id, + item.hypothesis_title, ) })); lines.extend( @@ -1668,17 +1624,13 @@ fn frontier_projection_summary_value(projection: &FrontierProjection) -> Value { "frontier_id": projection.frontier.id, "label": projection.frontier.label, "status": format!("{:?}", projection.frontier.status).to_ascii_lowercase(), - "champion_checkpoint_id": projection.champion_checkpoint_id, - "candidate_checkpoint_ids": projection.candidate_checkpoint_ids, - "experiment_count": projection.experiment_count, + "open_experiment_count": projection.open_experiment_count, + "completed_experiment_count": projection.completed_experiment_count, + "verdict_counts": projection.verdict_counts, }) } fn frontier_projection_text(prefix: &str, projection: &FrontierProjection) -> String { - let champion = projection - .champion_checkpoint_id - .map(|value| value.to_string()) - .unwrap_or_else(|| "none".to_owned()); [ format!( "{prefix} {} {}", @@ -1688,9 +1640,18 @@ fn frontier_projection_text(prefix: &str, projection: &FrontierProjection) -> St "status: {}", format!("{:?}", projection.frontier.status).to_ascii_lowercase() ), - format!("champion: {champion}"), - format!("candidates: {}", projection.candidate_checkpoint_ids.len()), - format!("experiments: {}", projection.experiment_count), + format!("open experiments: {}", projection.open_experiment_count), + format!( + "completed experiments: {}", + projection.completed_experiment_count + ), + format!( + "verdicts: accepted={} kept={} parked={} rejected={}", + projection.verdict_counts.accepted, + projection.verdict_counts.kept, + projection.verdict_counts.parked, + projection.verdict_counts.rejected, + ), ] .join("\n") } @@ -1991,11 +1952,10 @@ fn metric_objective_name(objective: fidget_spinner_core::OptimizationObjective) fn metric_verdict_name(verdict: FrontierVerdict) -> &'static str { match verdict { - FrontierVerdict::PromoteToChampion => "promote_to_champion", - FrontierVerdict::KeepOnFrontier => "keep_on_frontier", - FrontierVerdict::RevertToChampion => "revert_to_champion", - FrontierVerdict::ArchiveDeadEnd => "archive_dead_end", - FrontierVerdict::NeedsMoreEvidence => "needs_more_evidence", + FrontierVerdict::Accepted => "accepted", + FrontierVerdict::Kept => "kept", + FrontierVerdict::Parked => "parked", + FrontierVerdict::Rejected => "rejected", } } @@ -2192,10 +2152,6 @@ fn command_recipe_from_wire( .map_err(StoreError::from) } -fn capture_code_snapshot(project_root: &Utf8Path) -> Result<CodeSnapshotRef, StoreError> { - crate::capture_code_snapshot(project_root) -} - fn parse_node_class_name(raw: &str) -> Result<NodeClass, StoreError> { match raw { "contract" => Ok(NodeClass::Contract), @@ -2311,11 +2267,10 @@ fn parse_backend_name(raw: &str) -> Result<ExecutionBackend, StoreError> { fn parse_verdict_name(raw: &str) -> Result<FrontierVerdict, StoreError> { match raw { - "promote_to_champion" => Ok(FrontierVerdict::PromoteToChampion), - "keep_on_frontier" => Ok(FrontierVerdict::KeepOnFrontier), - "revert_to_champion" => Ok(FrontierVerdict::RevertToChampion), - "archive_dead_end" => Ok(FrontierVerdict::ArchiveDeadEnd), - "needs_more_evidence" => Ok(FrontierVerdict::NeedsMoreEvidence), + "accepted" => Ok(FrontierVerdict::Accepted), + "kept" => Ok(FrontierVerdict::Kept), + "parked" => Ok(FrontierVerdict::Parked), + "rejected" => Ok(FrontierVerdict::Rejected), other => Err(crate::invalid_input(format!("unknown verdict `{other}`"))), } } @@ -2342,7 +2297,6 @@ struct FrontierInitToolArgs { primary_metric: WireMetricSpec, #[serde(default)] supporting_metrics: Vec<WireMetricSpec>, - seed_summary: Option<String>, } #[derive(Debug, Deserialize)] @@ -2480,7 +2434,6 @@ struct MetricBestToolArgs { #[derive(Debug, Deserialize)] struct ExperimentOpenToolArgs { frontier_id: String, - base_checkpoint_id: String, hypothesis_node_id: String, title: String, summary: Option<String>, @@ -2499,7 +2452,6 @@ struct ExperimentReadToolArgs { #[derive(Debug, Deserialize)] struct ExperimentCloseToolArgs { experiment_id: String, - candidate_summary: String, run: WireRun, primary_metric: WireMetricValue, #[serde(default)] |