swarm repositories / source
aboutsummaryrefslogtreecommitdiff
path: root/crates/fidget-spinner-cli/src/main.rs
diff options
context:
space:
mode:
authormain <main@swarm.moe>2026-03-19 22:28:01 -0400
committermain <main@swarm.moe>2026-03-19 22:28:01 -0400
commitf706910944ee8abe7b27a248596f7705059969d9 (patch)
tree6a071e88b59146e10117f562fd28496bb821fc65 /crates/fidget-spinner-cli/src/main.rs
parent352fb5f089e74bf47b60c6221594b9c22defe251 (diff)
downloadfidget_spinner-f706910944ee8abe7b27a248596f7705059969d9.zip
Polish MCP ingest and schema surfaces
Diffstat (limited to 'crates/fidget-spinner-cli/src/main.rs')
-rw-r--r--crates/fidget-spinner-cli/src/main.rs533
1 files changed, 491 insertions, 42 deletions
diff --git a/crates/fidget-spinner-cli/src/main.rs b/crates/fidget-spinner-cli/src/main.rs
index fe4cb5f..3ad9534 100644
--- a/crates/fidget-spinner-cli/src/main.rs
+++ b/crates/fidget-spinner-cli/src/main.rs
@@ -10,13 +10,16 @@ use std::path::{Path, PathBuf};
use camino::{Utf8Path, Utf8PathBuf};
use clap::{Args, Parser, Subcommand, ValueEnum};
use fidget_spinner_core::{
- AnnotationVisibility, CodeSnapshotRef, CommandRecipe, ExecutionBackend, FrontierContract,
- FrontierNote, FrontierVerdict, GitCommitHash, MetricObservation, MetricSpec, MetricUnit,
- NodeAnnotation, NodeClass, NodePayload, NonEmptyText, OptimizationObjective, TagName,
+ AnnotationVisibility, CodeSnapshotRef, CommandRecipe, DiagnosticSeverity, ExecutionBackend,
+ FieldPresence, FieldRole, FieldValueType, FrontierContract, FrontierNote, FrontierVerdict,
+ GitCommitHash, InferencePolicy, MetricSpec, MetricUnit, MetricValue, NodeAnnotation, NodeClass,
+ NodePayload, NonEmptyText, OptimizationObjective, ProjectFieldSpec, TagName,
};
use fidget_spinner_store_sqlite::{
- CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, EdgeAttachment,
- EdgeAttachmentDirection, ListNodesQuery, ProjectStore, StoreError,
+ CloseExperimentRequest, CreateFrontierRequest, CreateNodeRequest, DefineMetricRequest,
+ DefineRunDimensionRequest, EdgeAttachment, EdgeAttachmentDirection, ListNodesQuery,
+ MetricBestQuery, MetricFieldSource, MetricKeyQuery, MetricRankOrder, ProjectStore,
+ RemoveSchemaFieldRequest, StoreError, UpsertSchemaFieldRequest,
};
use serde::Serialize;
use serde_json::{Map, Value, json};
@@ -61,6 +64,16 @@ enum Command {
},
/// Record off-path research and enabling work.
Research(ResearchCommand),
+ /// Inspect rankable metrics across closed experiments.
+ Metric {
+ #[command(subcommand)]
+ command: MetricCommand,
+ },
+ /// Define and inspect run dimensions used to slice experiment metrics.
+ Dimension {
+ #[command(subcommand)]
+ command: DimensionCommand,
+ },
/// Close a core-path experiment atomically.
Experiment {
#[command(subcommand)]
@@ -100,6 +113,10 @@ struct InitArgs {
enum SchemaCommand {
/// Show the current project schema as JSON.
Show(ProjectArg),
+ /// Add or replace one project schema field definition.
+ UpsertField(SchemaFieldUpsertArgs),
+ /// Remove one project schema field definition.
+ RemoveField(SchemaFieldRemoveArgs),
}
#[derive(Subcommand)]
@@ -169,8 +186,10 @@ struct NodeAddArgs {
#[arg(long)]
title: String,
#[arg(long)]
+ /// Required for `note` and `research` nodes.
summary: Option<String>,
#[arg(long = "payload-json")]
+ /// JSON object payload. `note` and `research` nodes require a non-empty `body` string.
payload_json: Option<String>,
#[arg(long = "payload-file")]
payload_file: Option<PathBuf>,
@@ -270,6 +289,74 @@ enum ResearchSubcommand {
Add(QuickResearchArgs),
}
+#[derive(Subcommand)]
+enum MetricCommand {
+ /// Register a project-level metric definition.
+ Define(MetricDefineArgs),
+ /// List rankable numeric keys observed in completed experiments.
+ Keys(MetricKeysArgs),
+ /// Rank completed experiments by one numeric key.
+ Best(MetricBestArgs),
+ /// Re-run the idempotent legacy metric-plane normalization.
+ Migrate(ProjectArg),
+}
+
+#[derive(Subcommand)]
+enum DimensionCommand {
+ /// Register a project-level run dimension definition.
+ Define(DimensionDefineArgs),
+ /// List run dimensions and sample values observed in completed runs.
+ List(ProjectArg),
+}
+
+#[derive(Args)]
+struct MetricDefineArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ /// Metric key used in experiment closure and ranking.
+ #[arg(long)]
+ key: String,
+ /// Canonical unit for this metric key.
+ #[arg(long, value_enum)]
+ unit: CliMetricUnit,
+ /// Optimization direction for this metric key.
+ #[arg(long, value_enum)]
+ objective: CliOptimizationObjective,
+ /// Optional human description shown in metric listings.
+ #[arg(long)]
+ description: Option<String>,
+}
+
+#[derive(Args)]
+struct MetricKeysArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ /// Restrict results to one frontier.
+ #[arg(long)]
+ frontier: Option<String>,
+ /// Restrict results to one metric source.
+ #[arg(long, value_enum)]
+ source: Option<CliMetricSource>,
+ /// Exact run-dimension filter in the form `key=value`.
+ #[arg(long = "dimension")]
+ dimensions: Vec<String>,
+}
+
+#[derive(Args)]
+struct DimensionDefineArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ /// Run-dimension key used to slice experiments.
+ #[arg(long)]
+ key: String,
+ /// Canonical value type for this run dimension.
+ #[arg(long = "type", value_enum)]
+ value_type: CliFieldValueType,
+ /// Optional human description shown in dimension listings.
+ #[arg(long)]
+ description: Option<String>,
+}
+
#[derive(Args)]
struct QuickNoteArgs {
#[command(flatten)]
@@ -279,6 +366,8 @@ struct QuickNoteArgs {
#[arg(long)]
title: String,
#[arg(long)]
+ summary: String,
+ #[arg(long)]
body: String,
#[command(flatten)]
tag_selection: ExplicitTagSelectionArgs,
@@ -305,13 +394,69 @@ struct QuickResearchArgs {
#[arg(long)]
title: String,
#[arg(long)]
- body: String,
+ summary: String,
#[arg(long)]
- summary: Option<String>,
+ body: String,
+ #[command(flatten)]
+ tag_selection: ExplicitTagSelectionArgs,
#[arg(long = "parent")]
parents: Vec<String>,
}
+#[derive(Args)]
+struct SchemaFieldUpsertArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ name: String,
+ #[arg(long = "class", value_enum)]
+ classes: Vec<CliNodeClass>,
+ #[arg(long, value_enum)]
+ presence: CliFieldPresence,
+ #[arg(long, value_enum)]
+ severity: CliDiagnosticSeverity,
+ #[arg(long, value_enum)]
+ role: CliFieldRole,
+ #[arg(long = "inference", value_enum)]
+ inference_policy: CliInferencePolicy,
+ #[arg(long = "type", value_enum)]
+ value_type: Option<CliFieldValueType>,
+}
+
+#[derive(Args)]
+struct SchemaFieldRemoveArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ #[arg(long)]
+ name: String,
+ #[arg(long = "class", value_enum)]
+ classes: Vec<CliNodeClass>,
+}
+
+#[derive(Args)]
+struct MetricBestArgs {
+ #[command(flatten)]
+ project: ProjectArg,
+ /// Metric key to rank on.
+ #[arg(long)]
+ key: String,
+ /// Restrict results to one frontier.
+ #[arg(long)]
+ frontier: Option<String>,
+ /// Restrict results to one metric source.
+ #[arg(long, value_enum)]
+ source: Option<CliMetricSource>,
+ /// Explicit ordering for sources whose objective cannot be inferred.
+ #[arg(long, value_enum)]
+ order: Option<CliMetricOrder>,
+ /// Exact run-dimension filter in the form `key=value`.
+ #[arg(long = "dimension")]
+ dimensions: Vec<String>,
+ /// Maximum number of ranked experiments to return.
+ #[arg(long, default_value_t = 10)]
+ limit: u32,
+}
+
#[derive(Subcommand)]
enum ExperimentCommand {
/// Close a core-path experiment with checkpoint, run, note, and verdict.
@@ -348,24 +493,23 @@ struct ExperimentCloseArgs {
run_title: String,
#[arg(long = "run-summary")]
run_summary: Option<String>,
- #[arg(long = "benchmark-suite")]
- benchmark_suite: String,
+ /// Repeat for each run dimension as `key=value`.
+ #[arg(long = "dimension")]
+ dimensions: Vec<String>,
#[arg(long = "backend", value_enum, default_value_t = CliExecutionBackend::Worktree)]
backend: CliExecutionBackend,
#[arg(long = "cwd")]
working_directory: Option<PathBuf>,
+ /// Repeat for each argv token passed to the recorded command.
#[arg(long = "argv")]
argv: Vec<String>,
+ /// Repeat for each environment override as `KEY=VALUE`.
#[arg(long = "env")]
env: Vec<String>,
- #[arg(long = "primary-metric-key")]
- primary_metric_key: String,
- #[arg(long = "primary-metric-unit", value_enum)]
- primary_metric_unit: CliMetricUnit,
- #[arg(long = "primary-metric-objective", value_enum)]
- primary_metric_objective: CliOptimizationObjective,
- #[arg(long = "primary-metric-value")]
- primary_metric_value: f64,
+ /// Primary metric in the form `key=value`; key must be preregistered.
+ #[arg(long = "primary-metric")]
+ primary_metric: String,
+ /// Supporting metric in the form `key=value`; repeat as needed.
#[arg(long = "metric")]
metrics: Vec<String>,
#[arg(long)]
@@ -475,6 +619,57 @@ enum CliExecutionBackend {
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliMetricSource {
+ RunMetric,
+ ChangePayload,
+ RunPayload,
+ AnalysisPayload,
+ DecisionPayload,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliMetricOrder {
+ Asc,
+ Desc,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliFieldValueType {
+ String,
+ Numeric,
+ Boolean,
+ Timestamp,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliDiagnosticSeverity {
+ Error,
+ Warning,
+ Info,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliFieldPresence {
+ Required,
+ Recommended,
+ Optional,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliFieldRole {
+ Index,
+ ProjectionGate,
+ RenderOnly,
+ Opaque,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
+enum CliInferencePolicy {
+ ManualOnly,
+ ModelMayInfer,
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, ValueEnum)]
enum CliFrontierVerdict {
PromoteToChampion,
KeepOnFrontier,
@@ -499,6 +694,8 @@ fn run() -> Result<(), StoreError> {
let store = open_store(&project.project)?;
print_json(store.schema())
}
+ SchemaCommand::UpsertField(args) => run_schema_field_upsert(args),
+ SchemaCommand::RemoveField(args) => run_schema_field_remove(args),
},
Command::Frontier { command } => match command {
FrontierCommand::Init(args) => run_frontier_init(args),
@@ -521,6 +718,16 @@ fn run() -> Result<(), StoreError> {
Command::Research(command) => match command.command {
ResearchSubcommand::Add(args) => run_quick_research(args),
},
+ Command::Metric { command } => match command {
+ MetricCommand::Define(args) => run_metric_define(args),
+ MetricCommand::Keys(args) => run_metric_keys(args),
+ MetricCommand::Best(args) => run_metric_best(args),
+ MetricCommand::Migrate(project) => run_metric_migrate(project),
+ },
+ Command::Dimension { command } => match command {
+ DimensionCommand::Define(args) => run_dimension_define(args),
+ DimensionCommand::List(project) => run_dimension_list(project),
+ },
Command::Experiment { command } => match command {
ExperimentCommand::Close(args) => run_experiment_close(args),
},
@@ -597,27 +804,58 @@ fn run_frontier_status(args: FrontierStatusArgs) -> Result<(), StoreError> {
print_json(&frontiers)
}
+fn run_schema_field_upsert(args: SchemaFieldUpsertArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let field = store.upsert_schema_field(UpsertSchemaFieldRequest {
+ name: NonEmptyText::new(args.name)?,
+ node_classes: parse_node_class_set(args.classes),
+ presence: args.presence.into(),
+ severity: args.severity.into(),
+ role: args.role.into(),
+ inference_policy: args.inference_policy.into(),
+ value_type: args.value_type.map(Into::into),
+ })?;
+ print_json(&json!({
+ "schema": store.schema().schema_ref(),
+ "field": schema_field_json(&field),
+ }))
+}
+
+fn run_schema_field_remove(args: SchemaFieldRemoveArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let removed_count = store.remove_schema_field(RemoveSchemaFieldRequest {
+ name: NonEmptyText::new(args.name)?,
+ node_classes: (!args.classes.is_empty()).then(|| parse_node_class_set(args.classes)),
+ })?;
+ print_json(&json!({
+ "schema": store.schema().schema_ref(),
+ "removed_count": removed_count,
+ }))
+}
+
fn run_node_add(args: NodeAddArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
+ let class: NodeClass = args.class.into();
let frontier_id = args
.frontier
.as_deref()
.map(parse_frontier_id)
.transpose()?;
- let tags = optional_cli_tags(args.tag_selection, args.class == CliNodeClass::Note)?;
+ let tags = optional_cli_tags(args.tag_selection, class == NodeClass::Note)?;
let payload = load_payload(
store.schema().schema_ref(),
args.payload_json,
args.payload_file,
args.fields,
)?;
+ validate_cli_prose_payload(class, args.summary.as_deref(), &payload)?;
let annotations = args
.annotations
.into_iter()
.map(|body| Ok(NodeAnnotation::hidden(NonEmptyText::new(body)?)))
.collect::<Result<Vec<_>, StoreError>>()?;
let node = store.add_node(CreateNodeRequest {
- class: args.class.into(),
+ class,
frontier_id,
title: NonEmptyText::new(args.title)?,
summary: args.summary.map(NonEmptyText::new).transpose()?,
@@ -693,7 +931,7 @@ fn run_quick_note(args: QuickNoteArgs) -> Result<(), StoreError> {
.map(parse_frontier_id)
.transpose()?,
title: NonEmptyText::new(args.title)?,
- summary: None,
+ summary: Some(NonEmptyText::new(args.summary)?),
tags: Some(explicit_cli_tags(args.tag_selection)?),
payload,
annotations: Vec::new(),
@@ -730,8 +968,8 @@ fn run_quick_research(args: QuickResearchArgs) -> Result<(), StoreError> {
.map(parse_frontier_id)
.transpose()?,
title: NonEmptyText::new(args.title)?,
- summary: args.summary.map(NonEmptyText::new).transpose()?,
- tags: None,
+ summary: Some(NonEmptyText::new(args.summary)?),
+ tags: optional_cli_tags(args.tag_selection, false)?,
payload,
annotations: Vec::new(),
attachments: lineage_attachments(args.parents)?,
@@ -739,6 +977,69 @@ fn run_quick_research(args: QuickResearchArgs) -> Result<(), StoreError> {
print_json(&node)
}
+fn run_metric_define(args: MetricDefineArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let record = store.define_metric(DefineMetricRequest {
+ key: NonEmptyText::new(args.key)?,
+ unit: args.unit.into(),
+ objective: args.objective.into(),
+ description: args.description.map(NonEmptyText::new).transpose()?,
+ })?;
+ print_json(&record)
+}
+
+fn run_metric_keys(args: MetricKeysArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ print_json(
+ &store.list_metric_keys_filtered(MetricKeyQuery {
+ frontier_id: args
+ .frontier
+ .as_deref()
+ .map(parse_frontier_id)
+ .transpose()?,
+ source: args.source.map(Into::into),
+ dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
+ })?,
+ )
+}
+
+fn run_metric_best(args: MetricBestArgs) -> Result<(), StoreError> {
+ let store = open_store(&args.project.project)?;
+ let entries = store.best_metrics(MetricBestQuery {
+ key: NonEmptyText::new(args.key)?,
+ frontier_id: args
+ .frontier
+ .as_deref()
+ .map(parse_frontier_id)
+ .transpose()?,
+ source: args.source.map(Into::into),
+ dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
+ order: args.order.map(Into::into),
+ limit: args.limit,
+ })?;
+ print_json(&entries)
+}
+
+fn run_metric_migrate(args: ProjectArg) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project)?;
+ print_json(&store.migrate_metric_plane()?)
+}
+
+fn run_dimension_define(args: DimensionDefineArgs) -> Result<(), StoreError> {
+ let mut store = open_store(&args.project.project)?;
+ let record = store.define_run_dimension(DefineRunDimensionRequest {
+ key: NonEmptyText::new(args.key)?,
+ value_type: args.value_type.into(),
+ description: args.description.map(NonEmptyText::new).transpose()?,
+ })?;
+ print_json(&record)
+}
+
+fn run_dimension_list(args: ProjectArg) -> Result<(), StoreError> {
+ let store = open_store(&args.project)?;
+ print_json(&store.list_run_dimensions()?)
+}
+
fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> {
let mut store = open_store(&args.project.project)?;
let frontier_id = parse_frontier_id(&args.frontier)?;
@@ -764,19 +1065,14 @@ fn run_experiment_close(args: ExperimentCloseArgs) -> Result<(), StoreError> {
run_title: NonEmptyText::new(args.run_title)?,
run_summary: args.run_summary.map(NonEmptyText::new).transpose()?,
backend: args.backend.into(),
- benchmark_suite: NonEmptyText::new(args.benchmark_suite)?,
+ dimensions: coerce_cli_dimension_filters(&store, args.dimensions)?,
command,
code_snapshot: Some(capture_code_snapshot(store.project_root())?),
- primary_metric: MetricObservation {
- metric_key: NonEmptyText::new(args.primary_metric_key)?,
- unit: args.primary_metric_unit.into(),
- objective: args.primary_metric_objective.into(),
- value: args.primary_metric_value,
- },
+ primary_metric: parse_metric_value(args.primary_metric)?,
supporting_metrics: args
.metrics
.into_iter()
- .map(parse_metric_observation)
+ .map(parse_metric_value)
.collect::<Result<Vec<_>, _>>()?,
note: FrontierNote {
summary: NonEmptyText::new(args.note)?,
@@ -1011,6 +1307,23 @@ fn load_payload(
Ok(NodePayload::with_schema(schema, map))
}
+fn validate_cli_prose_payload(
+ class: NodeClass,
+ summary: Option<&str>,
+ payload: &NodePayload,
+) -> Result<(), StoreError> {
+ if !matches!(class, NodeClass::Note | NodeClass::Research) {
+ return Ok(());
+ }
+ if summary.is_none() {
+ return Err(StoreError::ProseSummaryRequired(class));
+ }
+ match payload.field("body") {
+ Some(Value::String(body)) if !body.trim().is_empty() => Ok(()),
+ _ => Err(StoreError::ProseBodyRequired(class)),
+ }
+}
+
fn json_object(value: Value) -> Result<Map<String, Value>, StoreError> {
match value {
Value::Object(map) => Ok(map),
@@ -1020,6 +1333,22 @@ fn json_object(value: Value) -> Result<Map<String, Value>, StoreError> {
}
}
+fn schema_field_json(field: &ProjectFieldSpec) -> Value {
+ json!({
+ "name": field.name,
+ "node_classes": field.node_classes.iter().map(ToString::to_string).collect::<Vec<_>>(),
+ "presence": field.presence.as_str(),
+ "severity": field.severity.as_str(),
+ "role": field.role.as_str(),
+ "inference_policy": field.inference_policy.as_str(),
+ "value_type": field.value_type.map(FieldValueType::as_str),
+ })
+}
+
+fn parse_node_class_set(classes: Vec<CliNodeClass>) -> BTreeSet<NodeClass> {
+ classes.into_iter().map(Into::into).collect()
+}
+
fn capture_code_snapshot(project_root: &Utf8Path) -> Result<CodeSnapshotRef, StoreError> {
let head_commit = run_git(project_root, &["rev-parse", "HEAD"])?;
let dirty_paths = run_git(project_root, &["status", "--porcelain"])?
@@ -1084,23 +1413,71 @@ fn maybe_print_gitignore_hint(project_root: &Utf8Path) -> Result<(), StoreError>
}
}
-fn parse_metric_observation(raw: String) -> Result<MetricObservation, StoreError> {
- let parts = raw.split(':').collect::<Vec<_>>();
- if parts.len() != 4 {
- return Err(invalid_input(
- "metrics must look like key:unit:objective:value",
- ));
- }
- Ok(MetricObservation {
- metric_key: NonEmptyText::new(parts[0])?,
- unit: parse_metric_unit(parts[1])?,
- objective: parse_optimization_objective(parts[2])?,
- value: parts[3]
+fn parse_metric_value(raw: String) -> Result<MetricValue, StoreError> {
+ let Some((key, value)) = raw.split_once('=') else {
+ return Err(invalid_input("metrics must look like key=value"));
+ };
+ Ok(MetricValue {
+ key: NonEmptyText::new(key)?,
+ value: value
.parse::<f64>()
.map_err(|error| invalid_input(format!("invalid metric value: {error}")))?,
})
}
+fn coerce_cli_dimension_filters(
+ store: &ProjectStore,
+ raw_dimensions: Vec<String>,
+) -> Result<BTreeMap<NonEmptyText, fidget_spinner_core::RunDimensionValue>, StoreError> {
+ let definitions = store
+ .list_run_dimensions()?
+ .into_iter()
+ .map(|summary| (summary.key.to_string(), summary.value_type))
+ .collect::<BTreeMap<_, _>>();
+ let raw_dimensions = parse_dimension_assignments(raw_dimensions)?
+ .into_iter()
+ .map(|(key, raw_value)| {
+ let Some(value_type) = definitions.get(&key) else {
+ return Err(invalid_input(format!(
+ "unknown run dimension `{key}`; register it first"
+ )));
+ };
+ Ok((key, parse_cli_dimension_value(*value_type, &raw_value)?))
+ })
+ .collect::<Result<BTreeMap<_, _>, StoreError>>()?;
+ store.coerce_run_dimensions(raw_dimensions)
+}
+
+fn parse_dimension_assignments(
+ raw_dimensions: Vec<String>,
+) -> Result<BTreeMap<String, String>, StoreError> {
+ raw_dimensions
+ .into_iter()
+ .map(|raw| {
+ let Some((key, value)) = raw.split_once('=') else {
+ return Err(invalid_input("dimensions must look like key=value"));
+ };
+ Ok((key.to_owned(), value.to_owned()))
+ })
+ .collect()
+}
+
+fn parse_cli_dimension_value(value_type: FieldValueType, raw: &str) -> Result<Value, StoreError> {
+ match value_type {
+ FieldValueType::String | FieldValueType::Timestamp => Ok(Value::String(raw.to_owned())),
+ FieldValueType::Numeric => Ok(json!(raw.parse::<f64>().map_err(|error| {
+ invalid_input(format!("invalid numeric dimension value: {error}"))
+ })?)),
+ FieldValueType::Boolean => match raw {
+ "true" => Ok(Value::Bool(true)),
+ "false" => Ok(Value::Bool(false)),
+ other => Err(invalid_input(format!(
+ "invalid boolean dimension value `{other}`"
+ ))),
+ },
+ }
+}
+
fn parse_metric_unit(raw: &str) -> Result<MetricUnit, StoreError> {
match raw {
"seconds" => Ok(MetricUnit::Seconds),
@@ -1204,6 +1581,78 @@ impl From<CliExecutionBackend> for ExecutionBackend {
}
}
+impl From<CliMetricSource> for MetricFieldSource {
+ fn from(value: CliMetricSource) -> Self {
+ match value {
+ CliMetricSource::RunMetric => Self::RunMetric,
+ CliMetricSource::ChangePayload => Self::ChangePayload,
+ CliMetricSource::RunPayload => Self::RunPayload,
+ CliMetricSource::AnalysisPayload => Self::AnalysisPayload,
+ CliMetricSource::DecisionPayload => Self::DecisionPayload,
+ }
+ }
+}
+
+impl From<CliMetricOrder> for MetricRankOrder {
+ fn from(value: CliMetricOrder) -> Self {
+ match value {
+ CliMetricOrder::Asc => Self::Asc,
+ CliMetricOrder::Desc => Self::Desc,
+ }
+ }
+}
+
+impl From<CliFieldValueType> for FieldValueType {
+ fn from(value: CliFieldValueType) -> Self {
+ match value {
+ CliFieldValueType::String => Self::String,
+ CliFieldValueType::Numeric => Self::Numeric,
+ CliFieldValueType::Boolean => Self::Boolean,
+ CliFieldValueType::Timestamp => Self::Timestamp,
+ }
+ }
+}
+
+impl From<CliDiagnosticSeverity> for DiagnosticSeverity {
+ fn from(value: CliDiagnosticSeverity) -> Self {
+ match value {
+ CliDiagnosticSeverity::Error => Self::Error,
+ CliDiagnosticSeverity::Warning => Self::Warning,
+ CliDiagnosticSeverity::Info => Self::Info,
+ }
+ }
+}
+
+impl From<CliFieldPresence> for FieldPresence {
+ fn from(value: CliFieldPresence) -> Self {
+ match value {
+ CliFieldPresence::Required => Self::Required,
+ CliFieldPresence::Recommended => Self::Recommended,
+ CliFieldPresence::Optional => Self::Optional,
+ }
+ }
+}
+
+impl From<CliFieldRole> for FieldRole {
+ fn from(value: CliFieldRole) -> Self {
+ match value {
+ CliFieldRole::Index => Self::Index,
+ CliFieldRole::ProjectionGate => Self::ProjectionGate,
+ CliFieldRole::RenderOnly => Self::RenderOnly,
+ CliFieldRole::Opaque => Self::Opaque,
+ }
+ }
+}
+
+impl From<CliInferencePolicy> for InferencePolicy {
+ fn from(value: CliInferencePolicy) -> Self {
+ match value {
+ CliInferencePolicy::ManualOnly => Self::ManualOnly,
+ CliInferencePolicy::ModelMayInfer => Self::ModelMayInfer,
+ }
+ }
+}
+
impl From<CliFrontierVerdict> for FrontierVerdict {
fn from(value: CliFrontierVerdict) -> Self {
match value {