swarm repositories / source
summaryrefslogtreecommitdiff
path: root/crates
diff options
context:
space:
mode:
authormain <main@swarm.moe>2026-03-19 15:49:41 -0400
committermain <main@swarm.moe>2026-03-19 15:49:41 -0400
commitfa1bd32800b65aab31ea732dd240261b4047522c (patch)
tree2fd08af6f36b8beb3c7c941990becc1a0a091d62 /crates
downloadadequate-rust-mcp-1.0.0.zip
Release adequate-rust-mcp 1.0.0v1.0.0
Diffstat (limited to 'crates')
-rw-r--r--crates/adequate-rust-mcp/.gitignore1
-rw-r--r--crates/adequate-rust-mcp/Cargo.toml33
-rw-r--r--crates/adequate-rust-mcp/src/host/binary.rs54
-rw-r--r--crates/adequate-rust-mcp/src/host/config.rs195
-rw-r--r--crates/adequate-rust-mcp/src/host/mod.rs12
-rw-r--r--crates/adequate-rust-mcp/src/host/process.rs253
-rw-r--r--crates/adequate-rust-mcp/src/host/protocol.rs61
-rw-r--r--crates/adequate-rust-mcp/src/host/runtime.rs696
-rw-r--r--crates/adequate-rust-mcp/src/host/telemetry.rs97
-rw-r--r--crates/adequate-rust-mcp/src/host/tests.rs338
-rw-r--r--crates/adequate-rust-mcp/src/main.rs88
-rw-r--r--crates/adequate-rust-mcp/src/worker/clippy.rs196
-rw-r--r--crates/adequate-rust-mcp/src/worker/diagnostics.rs486
-rw-r--r--crates/adequate-rust-mcp/src/worker/errors.rs150
-rw-r--r--crates/adequate-rust-mcp/src/worker/input.rs452
-rw-r--r--crates/adequate-rust-mcp/src/worker/mod.rs73
-rw-r--r--crates/adequate-rust-mcp/src/worker/porcelain.rs488
-rw-r--r--crates/adequate-rust-mcp/src/worker/schema.rs496
-rw-r--r--crates/adequate-rust-mcp/src/worker/server.rs744
-rw-r--r--crates/adequate-rust-mcp/src/worker/telemetry.rs152
-rw-r--r--crates/adequate-rust-mcp/src/worker/tests.rs809
-rw-r--r--crates/adequate-rust-mcp/src/worker/workspace.rs313
-rw-r--r--crates/adequate-rust-mcp/tests/diagnostics_warmup_retry.rs403
-rw-r--r--crates/adequate-rust-mcp/tests/e2e_gauntlet.rs926
-rw-r--r--crates/adequate-rust-mcp/tests/host_inflight_replay.rs657
-rw-r--r--crates/adequate-rust-mcp/tests/worktree_workspace_rebind.rs383
-rw-r--r--crates/ra-mcp-domain/.gitignore1
-rw-r--r--crates/ra-mcp-domain/Cargo.toml21
-rw-r--r--crates/ra-mcp-domain/src/fault.rs129
-rw-r--r--crates/ra-mcp-domain/src/lib.rs5
-rw-r--r--crates/ra-mcp-domain/src/lifecycle.rs259
-rw-r--r--crates/ra-mcp-domain/src/types.rs460
-rw-r--r--crates/ra-mcp-engine/.gitignore1
-rw-r--r--crates/ra-mcp-engine/Cargo.toml28
-rw-r--r--crates/ra-mcp-engine/src/bin/fake-rust-analyzer.rs467
-rw-r--r--crates/ra-mcp-engine/src/config.rs79
-rw-r--r--crates/ra-mcp-engine/src/error.rs77
-rw-r--r--crates/ra-mcp-engine/src/lib.rs20
-rw-r--r--crates/ra-mcp-engine/src/lsp_transport.rs717
-rw-r--r--crates/ra-mcp-engine/src/supervisor.rs1257
-rw-r--r--crates/ra-mcp-engine/tests/engine_recovery.rs353
41 files changed, 12430 insertions, 0 deletions
diff --git a/crates/adequate-rust-mcp/.gitignore b/crates/adequate-rust-mcp/.gitignore
new file mode 100644
index 0000000..ea8c4bf
--- /dev/null
+++ b/crates/adequate-rust-mcp/.gitignore
@@ -0,0 +1 @@
+/target
diff --git a/crates/adequate-rust-mcp/Cargo.toml b/crates/adequate-rust-mcp/Cargo.toml
new file mode 100644
index 0000000..9702887
--- /dev/null
+++ b/crates/adequate-rust-mcp/Cargo.toml
@@ -0,0 +1,33 @@
+[package]
+name = "adequate-rust-mcp"
+categories.workspace = true
+description = "Durable host/worker MCP server for rust-analyzer with replay-aware recovery and porcelain-first tool output."
+edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+version.workspace = true
+
+[dependencies]
+libmcp.workspace = true
+ra-mcp-domain = { path = "../ra-mcp-domain" }
+ra-mcp-engine = { path = "../ra-mcp-engine" }
+notify.workspace = true
+rmcp.workspace = true
+schemars.workspace = true
+serde.workspace = true
+serde_json.workspace = true
+tokio.workspace = true
+toml.workspace = true
+tracing.workspace = true
+tracing-subscriber.workspace = true
+url.workspace = true
+
+[dev-dependencies]
+serial_test.workspace = true
+tempfile.workspace = true
+
+[lints]
+workspace = true
diff --git a/crates/adequate-rust-mcp/src/host/binary.rs b/crates/adequate-rust-mcp/src/host/binary.rs
new file mode 100644
index 0000000..0e35d64
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/binary.rs
@@ -0,0 +1,54 @@
+use notify::{Event, EventKind};
+use std::{
+ io,
+ path::Path,
+ time::{Duration, SystemTime},
+};
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub(super) struct BinaryFingerprint {
+ length: u64,
+ modified_nanos_since_epoch: u128,
+}
+
+impl BinaryFingerprint {
+ pub(super) fn capture(path: &Path) -> io::Result<Self> {
+ let metadata = std::fs::metadata(path)?;
+ let modified = metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH);
+ let nanos = modified
+ .duration_since(SystemTime::UNIX_EPOCH)
+ .unwrap_or(Duration::ZERO)
+ .as_nanos();
+ Ok(Self {
+ length: metadata.len(),
+ modified_nanos_since_epoch: nanos,
+ })
+ }
+}
+
+pub(super) fn should_consider_watch_event(event: &Event) -> bool {
+ matches!(
+ event.kind,
+ EventKind::Create(_)
+ | EventKind::Modify(_)
+ | EventKind::Remove(_)
+ | EventKind::Any
+ | EventKind::Other
+ )
+}
+
+pub(super) fn event_targets_binary(
+ event: &Event,
+ worker_binary: &Path,
+ worker_binary_canonical: Option<&Path>,
+) -> bool {
+ if event.paths.is_empty() {
+ return false;
+ }
+ event.paths.iter().any(|path| {
+ path == worker_binary
+ || std::fs::canonicalize(path).ok().is_some_and(|candidate| {
+ worker_binary_canonical.is_some_and(|known| known == candidate)
+ })
+ })
+}
diff --git a/crates/adequate-rust-mcp/src/host/config.rs b/crates/adequate-rust-mcp/src/host/config.rs
new file mode 100644
index 0000000..d00c60e
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/config.rs
@@ -0,0 +1,195 @@
+use std::{cmp::min, io, path::PathBuf, time::Duration};
+
+const DEFAULT_QUEUE_CAPACITY: usize = 512;
+pub(super) const EVENT_TICK_INTERVAL: Duration = Duration::from_millis(50);
+const DEFAULT_RELOAD_DEBOUNCE: Duration = Duration::from_millis(250);
+const DEFAULT_RESPAWN_FLOOR: Duration = Duration::from_millis(100);
+const DEFAULT_RESPAWN_CEILING: Duration = Duration::from_secs(3);
+const DEFAULT_MAX_REPLAY_ATTEMPTS: u8 = 8;
+const DEFAULT_TELEMETRY_SNAPSHOT_EVERY: u64 = 100;
+
+#[derive(Debug, Clone)]
+pub(super) struct HostConfig {
+ pub(super) worker_binary: PathBuf,
+ pub(super) workspace_root: PathBuf,
+ pub(super) telemetry_path: PathBuf,
+ pub(super) telemetry_snapshot_every: u64,
+ pub(super) queue_capacity: usize,
+ pub(super) reload_debounce: Duration,
+ pub(super) respawn_floor: Duration,
+ pub(super) respawn_ceiling: Duration,
+ pub(super) max_replay_attempts: u8,
+}
+
+impl HostConfig {
+ pub(super) fn from_env() -> io::Result<Self> {
+ let worker_binary = read_worker_binary_from_env()?;
+ let workspace_root = read_workspace_root_from_env()?;
+ let telemetry_path = resolve_telemetry_jsonl_path()?;
+ let telemetry_snapshot_every = read_u64_env(
+ "ADEQUATE_MCP_TELEMETRY_SNAPSHOT_EVERY",
+ DEFAULT_TELEMETRY_SNAPSHOT_EVERY,
+ );
+ let queue_capacity =
+ read_usize_env("ADEQUATE_MCP_HOST_QUEUE_CAPACITY", DEFAULT_QUEUE_CAPACITY);
+ let reload_debounce = read_duration_env(
+ "ADEQUATE_MCP_HOST_RELOAD_DEBOUNCE_MS",
+ DEFAULT_RELOAD_DEBOUNCE,
+ );
+ let respawn_floor =
+ read_duration_env("ADEQUATE_MCP_HOST_RESPAWN_FLOOR_MS", DEFAULT_RESPAWN_FLOOR);
+ let respawn_ceiling = read_duration_env(
+ "ADEQUATE_MCP_HOST_RESPAWN_CEILING_MS",
+ DEFAULT_RESPAWN_CEILING,
+ );
+ let max_replay_attempts = read_u8_env(
+ "ADEQUATE_MCP_HOST_MAX_REPLAY_ATTEMPTS",
+ DEFAULT_MAX_REPLAY_ATTEMPTS,
+ );
+ Ok(Self {
+ worker_binary,
+ workspace_root,
+ telemetry_path,
+ telemetry_snapshot_every,
+ queue_capacity,
+ reload_debounce,
+ respawn_floor,
+ respawn_ceiling,
+ max_replay_attempts,
+ })
+ }
+}
+
+#[derive(Debug, Clone)]
+pub(super) struct RespawnBackoff {
+ floor: Duration,
+ ceiling: Duration,
+ next_delay: Duration,
+}
+
+impl RespawnBackoff {
+ pub(super) fn new(floor: Duration, ceiling: Duration) -> Self {
+ Self {
+ floor,
+ ceiling,
+ next_delay: floor,
+ }
+ }
+
+ pub(super) fn reset(&mut self) {
+ self.next_delay = self.floor;
+ }
+
+ pub(super) fn consume_delay(&mut self) -> Duration {
+ let current = self.next_delay;
+ let doubled = current.checked_mul(2).unwrap_or(self.ceiling);
+ self.next_delay = min(doubled, self.ceiling);
+ current
+ }
+}
+
+fn read_worker_binary_from_env() -> io::Result<PathBuf> {
+ let from_env = std::env::var_os("ADEQUATE_MCP_WORKER_BINARY");
+ match from_env {
+ Some(value) if !value.is_empty() => Ok(PathBuf::from(value)),
+ Some(_) | None => std::env::current_exe(),
+ }
+}
+
+fn read_workspace_root_from_env() -> io::Result<PathBuf> {
+ let from_env = std::env::var_os("ADEQUATE_MCP_WORKSPACE_ROOT");
+ match from_env {
+ Some(value) if !value.is_empty() => Ok(PathBuf::from(value)),
+ Some(_) | None => std::env::current_dir(),
+ }
+}
+
+fn resolve_telemetry_jsonl_path() -> io::Result<PathBuf> {
+ let state_home = telemetry_state_home(
+ std::env::var_os("XDG_STATE_HOME").map(PathBuf::from),
+ std::env::var_os("HOME").map(PathBuf::from),
+ );
+ let Some(state_home) = state_home else {
+ return Err(io::Error::new(
+ io::ErrorKind::NotFound,
+ "cannot resolve XDG state home for telemetry log",
+ ));
+ };
+ Ok(state_home.join("adequate-rust-mcp").join("telemetry.jsonl"))
+}
+
+fn telemetry_state_home(xdg_state_home: Option<PathBuf>, home: Option<PathBuf>) -> Option<PathBuf> {
+ xdg_state_home.or_else(|| home.map(|home| home.join(".local").join("state")))
+}
+
+fn read_duration_env(name: &str, default: Duration) -> Duration {
+ let raw = std::env::var(name);
+ let Ok(raw) = raw else {
+ return default;
+ };
+ let parsed = raw.parse::<u64>();
+ let Ok(value) = parsed else {
+ return default;
+ };
+ Duration::from_millis(value)
+}
+
+fn read_usize_env(name: &str, default: usize) -> usize {
+ let raw = std::env::var(name);
+ let Ok(raw) = raw else {
+ return default;
+ };
+ let parsed = raw.parse::<usize>();
+ let Ok(value) = parsed else {
+ return default;
+ };
+ value
+}
+
+fn read_u8_env(name: &str, default: u8) -> u8 {
+ let raw = std::env::var(name);
+ let Ok(raw) = raw else {
+ return default;
+ };
+ let parsed = raw.parse::<u8>();
+ let Ok(value) = parsed else {
+ return default;
+ };
+ value
+}
+
+fn read_u64_env(name: &str, default: u64) -> u64 {
+ let raw = std::env::var(name);
+ let Ok(raw) = raw else {
+ return default;
+ };
+ let parsed = raw.parse::<u64>();
+ let Ok(value) = parsed else {
+ return default;
+ };
+ value
+}
+
+#[cfg(test)]
+mod tests {
+ use super::telemetry_state_home;
+ use std::path::PathBuf;
+
+ #[test]
+ fn telemetry_path_prefers_xdg_state_home() {
+ let path = telemetry_state_home(
+ Some(PathBuf::from("/xdg-state")),
+ Some(PathBuf::from("/home/main")),
+ );
+ assert_eq!(path, Some(PathBuf::from("/xdg-state")));
+ }
+
+ #[test]
+ fn telemetry_path_falls_back_to_home_local_state() {
+ let path = telemetry_state_home(None, Some(PathBuf::from("/home/main")));
+ assert_eq!(
+ path,
+ Some(PathBuf::from("/home/main").join(".local").join("state"))
+ );
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/host/mod.rs b/crates/adequate-rust-mcp/src/host/mod.rs
new file mode 100644
index 0000000..b5209a6
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/mod.rs
@@ -0,0 +1,12 @@
+//! Stable host process that proxies MCP frames to a hot-swappable worker process.
+
+mod binary;
+mod config;
+mod process;
+mod protocol;
+mod runtime;
+mod telemetry;
+#[cfg(test)]
+mod tests;
+
+pub(crate) use runtime::run_host;
diff --git a/crates/adequate-rust-mcp/src/host/process.rs b/crates/adequate-rust-mcp/src/host/process.rs
new file mode 100644
index 0000000..60296df
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/process.rs
@@ -0,0 +1,253 @@
+use notify::{Event, RecommendedWatcher, RecursiveMode, Watcher};
+use std::{io, path::Path, process::Stdio};
+use tokio::{
+ io::BufReader,
+ process::{Child, ChildStdin, ChildStdout, Command},
+ sync::mpsc,
+ task::JoinHandle,
+};
+use tracing::debug;
+
+use super::protocol::{FrameReadOutcome, FramedMessage, RequestId, read_frame};
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub(super) struct WorkerGeneration(u64);
+
+impl WorkerGeneration {
+ pub(super) const fn get(self) -> u64 {
+ self.0
+ }
+}
+
+#[derive(Debug, Default)]
+pub(super) struct GenerationCounter {
+ current: u64,
+}
+
+impl GenerationCounter {
+ pub(super) fn next(&mut self) -> WorkerGeneration {
+ self.current = self.current.saturating_add(1);
+ WorkerGeneration(self.current)
+ }
+}
+
+#[derive(Debug, Clone)]
+pub(super) enum ActiveWorkerReadiness {
+ Ready,
+ ReplayingInitialize {
+ request_id: RequestId,
+ initialized_notification: Option<Vec<u8>>,
+ },
+}
+
+#[derive(Debug)]
+pub(super) struct ActiveWorker {
+ pub(super) process: WorkerProcess,
+ pub(super) readiness: ActiveWorkerReadiness,
+}
+
+impl ActiveWorker {
+ pub(super) fn generation(&self) -> WorkerGeneration {
+ self.process.generation
+ }
+
+ pub(super) fn is_ready(&self) -> bool {
+ matches!(self.readiness, ActiveWorkerReadiness::Ready)
+ }
+}
+
+#[derive(Debug)]
+pub(super) struct WorkerProcess {
+ generation: WorkerGeneration,
+ child: Child,
+ pub(super) stdin: ChildStdin,
+ reader_task: JoinHandle<()>,
+}
+
+impl WorkerProcess {
+ pub(super) async fn terminate(mut self) {
+ self.reader_task.abort();
+ if self.child.id().is_some() {
+ let kill_result = self.child.kill().await;
+ if let Err(error) = kill_result {
+ debug!(
+ generation = self.generation.get(),
+ "failed to kill worker process cleanly: {error}"
+ );
+ }
+ }
+ let wait_result = self.child.wait().await;
+ if let Err(error) = wait_result {
+ debug!(
+ generation = self.generation.get(),
+ "failed to wait for worker process: {error}"
+ );
+ }
+ }
+}
+
+#[derive(Debug)]
+pub(super) enum HostEvent {
+ ClientFrame(FramedMessage),
+ ClientClosed,
+ ClientFault(io::Error),
+ WorkerFrame {
+ generation: WorkerGeneration,
+ frame: FramedMessage,
+ },
+ WorkerFault {
+ generation: WorkerGeneration,
+ error: io::Error,
+ },
+ Watcher(notify::Result<Event>),
+}
+
+pub(super) fn spawn_binary_watcher(
+ worker_binary: &Path,
+ event_tx: mpsc::UnboundedSender<HostEvent>,
+) -> io::Result<RecommendedWatcher> {
+ let parent = worker_binary.parent().ok_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!(
+ "worker binary path has no parent directory: {}",
+ worker_binary.display()
+ ),
+ )
+ })?;
+ let callback_tx = event_tx.clone();
+ let watcher_result = notify::recommended_watcher(move |event| {
+ let sent = callback_tx.send(HostEvent::Watcher(event));
+ if let Err(error) = sent {
+ drop(error);
+ }
+ });
+ let mut watcher = watcher_result.map_err(|error| io::Error::other(error.to_string()))?;
+ let watch_result = watcher.watch(parent, RecursiveMode::NonRecursive);
+ watch_result.map_err(|error| io::Error::other(error.to_string()))?;
+ Ok(watcher)
+}
+
+pub(super) async fn spawn_worker_process(
+ worker_binary: &Path,
+ generation: WorkerGeneration,
+ event_tx: mpsc::UnboundedSender<HostEvent>,
+) -> io::Result<WorkerProcess> {
+ let mut command = Command::new(worker_binary);
+ let _configured = command
+ .arg("--worker")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::inherit());
+
+ let spawn = command.spawn();
+ let mut child = spawn?;
+
+ let stdin = child.stdin.take().ok_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::BrokenPipe,
+ "worker process is missing stdin pipe",
+ )
+ })?;
+ let stdout = child.stdout.take().ok_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::BrokenPipe,
+ "worker process is missing stdout pipe",
+ )
+ })?;
+ let reader_task = spawn_worker_reader(stdout, generation, event_tx);
+
+ Ok(WorkerProcess {
+ generation,
+ child,
+ stdin,
+ reader_task,
+ })
+}
+
+pub(super) fn spawn_client_reader(event_tx: mpsc::UnboundedSender<HostEvent>) -> JoinHandle<()> {
+ tokio::spawn(async move {
+ let stdin = tokio::io::stdin();
+ let mut reader = BufReader::new(stdin);
+ loop {
+ match read_frame(&mut reader).await {
+ Ok(FrameReadOutcome::Frame(payload)) => match FramedMessage::parse(payload) {
+ Ok(frame) => {
+ let sent = event_tx.send(HostEvent::ClientFrame(frame));
+ if let Err(error) = sent {
+ drop(error);
+ break;
+ }
+ }
+ Err(error) => {
+ let sent = event_tx.send(HostEvent::ClientFault(error));
+ if let Err(send_error) = sent {
+ drop(send_error);
+ }
+ break;
+ }
+ },
+ Ok(FrameReadOutcome::EndOfStream) => {
+ let sent = event_tx.send(HostEvent::ClientClosed);
+ if let Err(error) = sent {
+ drop(error);
+ }
+ break;
+ }
+ Err(error) => {
+ let sent = event_tx.send(HostEvent::ClientFault(error));
+ if let Err(send_error) = sent {
+ drop(send_error);
+ }
+ break;
+ }
+ }
+ }
+ })
+}
+
+fn spawn_worker_reader(
+ stdout: ChildStdout,
+ generation: WorkerGeneration,
+ event_tx: mpsc::UnboundedSender<HostEvent>,
+) -> JoinHandle<()> {
+ tokio::spawn(async move {
+ let mut reader = BufReader::new(stdout);
+ loop {
+ match read_frame(&mut reader).await {
+ Ok(FrameReadOutcome::Frame(payload)) => match FramedMessage::parse(payload) {
+ Ok(frame) => {
+ let sent = event_tx.send(HostEvent::WorkerFrame { generation, frame });
+ if let Err(error) = sent {
+ drop(error);
+ break;
+ }
+ }
+ Err(error) => {
+ let sent = event_tx.send(HostEvent::WorkerFault { generation, error });
+ if let Err(send_error) = sent {
+ drop(send_error);
+ }
+ break;
+ }
+ },
+ Ok(FrameReadOutcome::EndOfStream) => {
+ let error =
+ io::Error::new(io::ErrorKind::UnexpectedEof, "worker stdout reached EOF");
+ let sent = event_tx.send(HostEvent::WorkerFault { generation, error });
+ if let Err(send_error) = sent {
+ drop(send_error);
+ }
+ break;
+ }
+ Err(error) => {
+ let sent = event_tx.send(HostEvent::WorkerFault { generation, error });
+ if let Err(send_error) = sent {
+ drop(send_error);
+ }
+ break;
+ }
+ }
+ }
+ })
+}
diff --git a/crates/adequate-rust-mcp/src/host/protocol.rs b/crates/adequate-rust-mcp/src/host/protocol.rs
new file mode 100644
index 0000000..43742b0
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/protocol.rs
@@ -0,0 +1,61 @@
+use libmcp::{ReplayContract, normalize_ascii_token};
+use serde_json::Value;
+
+pub(super) use libmcp::ReplayContract as EffectReplayContract;
+pub(super) use libmcp::{
+ FrameReadOutcome, FramedMessage, RequestId, RpcEnvelopeKind, parse_tool_call_meta, read_frame,
+ write_frame,
+};
+
+pub(super) fn classify_replay_contract(
+ frame: &FramedMessage,
+ rpc_method: &str,
+) -> EffectReplayContract {
+ if rpc_method != "tools/call" {
+ return ReplayContract::Convergent;
+ }
+
+ let tool_name = frame
+ .value
+ .get("params")
+ .and_then(Value::as_object)
+ .and_then(|params| params.get("name"))
+ .and_then(Value::as_str);
+ let Some(tool_name) = tool_name else {
+ return ReplayContract::ProbeRequired;
+ };
+
+ match normalize_ascii_token(tool_name).as_str() {
+ "hover" | "definition" | "references" | "renamesymbol" | "diagnostics"
+ | "clippydiagnostics" | "healthsnapshot" | "telemetrysnapshot" => {
+ ReplayContract::Convergent
+ }
+ "fixeverything" => ReplayContract::ProbeRequired,
+ "advancedlsprequest" => classify_advanced_lsp_replay_contract(frame),
+ _ => ReplayContract::ProbeRequired,
+ }
+}
+
+fn classify_advanced_lsp_replay_contract(frame: &FramedMessage) -> EffectReplayContract {
+ let method = frame
+ .value
+ .get("params")
+ .and_then(Value::as_object)
+ .and_then(|params| params.get("arguments"))
+ .and_then(Value::as_object)
+ .and_then(|arguments| {
+ arguments
+ .get("method")
+ .or_else(|| arguments.get("lsp_method"))
+ .or_else(|| arguments.get("lspMethod"))
+ })
+ .and_then(Value::as_str);
+ let Some(method) = method else {
+ return ReplayContract::ProbeRequired;
+ };
+
+ match normalize_ascii_token(method).as_str() {
+ "workspaceexecutecommand" => ReplayContract::ProbeRequired,
+ _ => ReplayContract::Convergent,
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/host/runtime.rs b/crates/adequate-rust-mcp/src/host/runtime.rs
new file mode 100644
index 0000000..34b45cf
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/runtime.rs
@@ -0,0 +1,696 @@
+use libmcp::{
+ HostRejection, HostSessionKernel, HostSessionKernelSnapshot, PendingRequest, ReplayBudget,
+ RestoredHostSessionKernel, load_snapshot_file_from_env, remove_snapshot_file,
+ write_snapshot_file,
+};
+use notify::{Event, RecommendedWatcher};
+use serde_json::{Value, json};
+#[cfg(unix)]
+use std::os::unix::process::CommandExt;
+use std::{
+ fs, io,
+ path::{Path, PathBuf},
+ process::Command,
+};
+use tokio::{sync::mpsc, task::JoinHandle, time::Instant};
+use tracing::{Level, debug, info, warn};
+
+use super::{
+ binary::{BinaryFingerprint, event_targets_binary, should_consider_watch_event},
+ config::{EVENT_TICK_INTERVAL, HostConfig, RespawnBackoff},
+ process::{
+ ActiveWorker, ActiveWorkerReadiness, GenerationCounter, HostEvent, WorkerGeneration,
+ spawn_binary_watcher, spawn_client_reader, spawn_worker_process,
+ },
+ protocol::{
+ FramedMessage, RequestId, RpcEnvelopeKind, classify_replay_contract, parse_tool_call_meta,
+ write_frame,
+ },
+ telemetry::{
+ TelemetryLog, ToolOutcome, classify_tool_outcome_from_response, duration_millis_u64,
+ host_tool_error_detail,
+ },
+};
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+enum HostControl {
+ Continue,
+ Exit,
+}
+
+const HOST_REEXEC_STATE_ENV: &str = "ADEQUATE_MCP_HOST_REEXEC_STATE";
+
+#[cfg(unix)]
+fn reexec_host_process(binary: &Path, state_path: &Path) -> io::Result<()> {
+ let error = Command::new(binary)
+ .env(HOST_REEXEC_STATE_ENV, state_path)
+ .exec();
+ Err(error)
+}
+
+#[cfg(not(unix))]
+fn reexec_host_process(_binary: &Path, _state_path: &Path) -> io::Result<()> {
+ Err(io::Error::new(
+ io::ErrorKind::Unsupported,
+ "host self-reexec requires unix process exec support",
+ ))
+}
+
+struct HostRuntime {
+ config: HostConfig,
+ telemetry: TelemetryLog,
+ events: mpsc::UnboundedReceiver<HostEvent>,
+ event_tx: mpsc::UnboundedSender<HostEvent>,
+ client_reader_task: JoinHandle<()>,
+ watcher: RecommendedWatcher,
+ client_stdout: tokio::io::Stdout,
+ worker: Option<ActiveWorker>,
+ session_kernel: HostSessionKernel,
+ generation_counter: GenerationCounter,
+ respawn_backoff: RespawnBackoff,
+ respawn_due: Option<Instant>,
+ reload_requested_at: Option<Instant>,
+ binary_fingerprint: Option<BinaryFingerprint>,
+ worker_binary_canonical: Option<PathBuf>,
+}
+
+impl HostRuntime {
+ async fn run(&mut self) -> io::Result<()> {
+ let mut ticker = tokio::time::interval(EVENT_TICK_INTERVAL);
+ loop {
+ tokio::select! {
+ _ = ticker.tick() => {
+ self.on_tick().await?;
+ }
+ maybe_event = self.events.recv() => {
+ let Some(event) = maybe_event else {
+ break;
+ };
+ let control = self.handle_event(event).await?;
+ if control == HostControl::Exit {
+ break;
+ }
+ }
+ }
+ }
+
+ self.shutdown().await;
+ Ok(())
+ }
+
+ async fn shutdown(&mut self) {
+ let snapshot = self.telemetry.write_hot_paths_snapshot();
+ if let Err(error) = snapshot {
+ warn!("telemetry snapshot flush failed: {error}");
+ }
+ self.client_reader_task.abort();
+ let worker = self.worker.take();
+ if let Some(worker) = worker {
+ worker.process.terminate().await;
+ }
+ let _keep_watcher_alive = &self.watcher;
+ }
+
+ async fn handle_event(&mut self, event: HostEvent) -> io::Result<HostControl> {
+ match event {
+ HostEvent::ClientFrame(frame) => {
+ self.on_client_frame(frame).await?;
+ Ok(HostControl::Continue)
+ }
+ HostEvent::ClientClosed => Ok(HostControl::Exit),
+ HostEvent::ClientFault(error) => {
+ warn!("client transport fault: {error}");
+ Ok(HostControl::Exit)
+ }
+ HostEvent::WorkerFrame { generation, frame } => {
+ self.on_worker_frame(generation, frame).await?;
+ Ok(HostControl::Continue)
+ }
+ HostEvent::WorkerFault { generation, error } => {
+ self.on_worker_fault(generation, error).await?;
+ Ok(HostControl::Continue)
+ }
+ HostEvent::Watcher(event) => {
+ self.on_watcher_event(event);
+ Ok(HostControl::Continue)
+ }
+ }
+ }
+
+ async fn on_tick(&mut self) -> io::Result<()> {
+ if let Some(requested_at) = self.reload_requested_at {
+ let elapsed = Instant::now().saturating_duration_since(requested_at);
+ if elapsed >= self.config.reload_debounce {
+ self.execute_reload_if_binary_changed().await?;
+ }
+ }
+
+ if let Some(respawn_due) = self.respawn_due
+ && Instant::now() >= respawn_due
+ {
+ self.respawn_due = None;
+ if self.worker.is_none() {
+ let spawn = self.spawn_worker().await;
+ if let Err(error) = spawn {
+ warn!("worker spawn failed: {error}");
+ self.schedule_respawn_with_backoff();
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn schedule_respawn_with_backoff(&mut self) {
+ let delay = self.respawn_backoff.consume_delay();
+ self.respawn_due = Some(Instant::now() + delay);
+ }
+
+ fn schedule_immediate_spawn(&mut self) {
+ self.respawn_due = Some(Instant::now());
+ }
+
+ async fn restart_worker_immediately(&mut self) -> io::Result<()> {
+ self.terminate_worker().await;
+ self.requeue_pending_for_replay().await?;
+ self.schedule_immediate_spawn();
+ Ok(())
+ }
+
+ async fn terminate_worker(&mut self) {
+ let worker = self.worker.take();
+ if let Some(worker) = worker {
+ worker.process.terminate().await;
+ }
+ }
+
+ async fn spawn_worker(&mut self) -> io::Result<()> {
+ let generation = self.generation_counter.next();
+ let process = spawn_worker_process(
+ &self.config.worker_binary,
+ generation,
+ self.event_tx.clone(),
+ )
+ .await?;
+ let mut worker = ActiveWorker {
+ process,
+ readiness: ActiveWorkerReadiness::Ready,
+ };
+
+ let replay_seed = self.session_kernel.replay_seed();
+ if let Some(seed) = replay_seed {
+ let replay_write =
+ write_frame(&mut worker.process.stdin, &seed.initialize_request.payload).await;
+ if let Err(error) = replay_write {
+ worker.process.terminate().await;
+ return Err(error);
+ }
+ worker.readiness = ActiveWorkerReadiness::ReplayingInitialize {
+ request_id: seed.initialize_request.id,
+ initialized_notification: seed.initialized_notification,
+ };
+ info!(
+ generation = generation.get(),
+ "spawned worker and started initialize replay"
+ );
+ } else {
+ info!(generation = generation.get(), "spawned worker");
+ }
+
+ self.worker = Some(worker);
+ self.respawn_backoff.reset();
+ self.worker_binary_canonical = fs::canonicalize(&self.config.worker_binary).ok();
+ self.binary_fingerprint = BinaryFingerprint::capture(&self.config.worker_binary).ok();
+
+ if self.worker_accepts_client_traffic() {
+ self.drain_queue().await?;
+ }
+ Ok(())
+ }
+
+ fn worker_accepts_client_traffic(&self) -> bool {
+ self.worker.as_ref().is_some_and(ActiveWorker::is_ready)
+ }
+
+ fn active_generation(&self) -> Option<WorkerGeneration> {
+ self.worker.as_ref().map(ActiveWorker::generation)
+ }
+
+ fn is_active_generation(&self, generation: WorkerGeneration) -> bool {
+ self.active_generation() == Some(generation)
+ }
+
+ fn on_watcher_event(&mut self, event: notify::Result<Event>) {
+ match event {
+ Ok(event) => {
+ if should_consider_watch_event(&event)
+ && event_targets_binary(
+ &event,
+ &self.config.worker_binary,
+ self.worker_binary_canonical.as_deref(),
+ )
+ {
+ self.reload_requested_at = Some(Instant::now());
+ }
+ }
+ Err(error) => {
+ warn!("watcher callback fault: {error}");
+ self.reload_requested_at = Some(Instant::now());
+ }
+ }
+ }
+
+ async fn execute_reload_if_binary_changed(&mut self) -> io::Result<()> {
+ let fingerprint = BinaryFingerprint::capture(&self.config.worker_binary);
+ match fingerprint {
+ Ok(fingerprint) => {
+ let changed = self.binary_fingerprint.as_ref() != Some(&fingerprint);
+ if changed {
+ self.binary_fingerprint = Some(fingerprint);
+ self.worker_binary_canonical =
+ fs::canonicalize(&self.config.worker_binary).ok();
+ self.reload_requested_at = None;
+ info!("detected worker binary update, rolling host forward");
+ let rollout = self.roll_host_forward().await;
+ if let Err(error) = rollout {
+ warn!(
+ "host self-rollout failed, falling back to worker-only restart: {error}"
+ );
+ }
+ } else {
+ self.reload_requested_at = None;
+ }
+ }
+ Err(error) if error.kind() == io::ErrorKind::NotFound => {
+ debug!("worker binary temporarily missing during reload check");
+ self.reload_requested_at = Some(Instant::now());
+ }
+ Err(error) => {
+ warn!("unable to fingerprint worker binary for reload: {error}");
+ self.reload_requested_at = Some(Instant::now());
+ }
+ }
+ Ok(())
+ }
+
+ async fn roll_host_forward(&mut self) -> io::Result<()> {
+ self.terminate_worker().await;
+ self.requeue_pending_for_replay().await?;
+ let snapshot = self.session_kernel.snapshot();
+ let state_path = write_snapshot_file("adequate-rust-mcp-host-reexec", &snapshot)?;
+ let reexec = reexec_host_process(&self.config.worker_binary, &state_path);
+ if let Err(error) = remove_snapshot_file(&state_path) {
+ warn!("failed to clean up host reexec snapshot after failed rollout: {error}");
+ }
+ self.schedule_immediate_spawn();
+ reexec
+ }
+
+ async fn on_client_frame(&mut self, frame: FramedMessage) -> io::Result<()> {
+ self.session_kernel.observe_client_frame(&frame);
+ if self.worker_accepts_client_traffic() {
+ let forward = self.forward_client_frame(frame).await;
+ if let Err(error) = forward {
+ self.handle_worker_failure(error, "failed forwarding client frame")
+ .await?;
+ }
+ return Ok(());
+ }
+ self.enqueue_client_frame(frame).await
+ }
+
+ async fn enqueue_client_frame(&mut self, frame: FramedMessage) -> io::Result<()> {
+ if let Err(reason) = self
+ .session_kernel
+ .queue_client_frame(frame.clone(), self.config.queue_capacity)
+ {
+ self.respond_rejected_frame(&frame, reason).await?;
+ return Ok(());
+ }
+ if self.worker.is_none() && self.respawn_due.is_none() {
+ self.schedule_immediate_spawn();
+ }
+ Ok(())
+ }
+
+ async fn respond_rejected_frame(
+ &mut self,
+ frame: &FramedMessage,
+ reason: HostRejection,
+ ) -> io::Result<()> {
+ if let RpcEnvelopeKind::Request { id, .. } = frame.classify() {
+ self.emit_error_response(&id, reason).await?;
+ self.record_host_error_for_rejected_frame(&id, frame, reason, 0);
+ }
+ Ok(())
+ }
+
+ async fn forward_client_frame(&mut self, frame: FramedMessage) -> io::Result<()> {
+ if let RpcEnvelopeKind::Request { ref method, .. } = frame.classify() {
+ let replay_contract = classify_replay_contract(&frame, method.as_str());
+ self.session_kernel
+ .record_forwarded_request(&frame, replay_contract);
+ }
+
+ self.write_to_worker(&frame.payload).await
+ }
+
+ async fn drain_queue(&mut self) -> io::Result<()> {
+ loop {
+ if !self.worker_accepts_client_traffic() {
+ break;
+ }
+ let frame = self.session_kernel.pop_queued_frame();
+ let Some(frame) = frame else {
+ break;
+ };
+ let forward = self.forward_client_frame(frame).await;
+ if let Err(error) = forward {
+ self.handle_worker_failure(error, "failed while draining client queue")
+ .await?;
+ break;
+ }
+ }
+ Ok(())
+ }
+
+ async fn on_worker_frame(
+ &mut self,
+ generation: WorkerGeneration,
+ frame: FramedMessage,
+ ) -> io::Result<()> {
+ if !self.is_active_generation(generation) {
+ return Ok(());
+ }
+
+ match frame.classify() {
+ RpcEnvelopeKind::Response { id, has_error } => {
+ if self.is_replay_initialize_response(&id) {
+ if has_error {
+ warn!(
+ generation = generation.get(),
+ "initialize replay failed on hot-swapped worker"
+ );
+ self.restart_worker_immediately().await?;
+ return Ok(());
+ }
+
+ if let Some(completed) = self.session_kernel.take_completed_request(&id) {
+ self.record_tool_completion_from_response(
+ &id,
+ &completed.request,
+ &frame,
+ has_error,
+ completed.replay_attempts,
+ );
+ self.write_to_client(&frame.payload).await?;
+ }
+
+ let replay_initialized = self.take_replay_initialized_notification();
+ if let Some(payload) = replay_initialized {
+ let write = self.write_to_worker(&payload).await;
+ if let Err(error) = write {
+ self.handle_worker_failure(
+ error,
+ "failed replaying initialized notification",
+ )
+ .await?;
+ return Ok(());
+ }
+ }
+
+ self.mark_worker_ready();
+ self.drain_queue().await?;
+ return Ok(());
+ }
+
+ if let Some(completed) = self.session_kernel.take_completed_request(&id) {
+ self.record_tool_completion_from_response(
+ &id,
+ &completed.request,
+ &frame,
+ has_error,
+ completed.replay_attempts,
+ );
+ self.write_to_client(&frame.payload).await?;
+ } else {
+ debug!(
+ generation = generation.get(),
+ "dropping response with unknown id from worker"
+ );
+ }
+ }
+ RpcEnvelopeKind::Request { .. }
+ | RpcEnvelopeKind::Notification { .. }
+ | RpcEnvelopeKind::Unknown => {
+ if self.worker_accepts_client_traffic() {
+ self.write_to_client(&frame.payload).await?;
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn is_replay_initialize_response(&self, response_id: &RequestId) -> bool {
+ let readiness = self.worker.as_ref().map(|worker| &worker.readiness);
+ match readiness {
+ Some(ActiveWorkerReadiness::ReplayingInitialize { request_id, .. }) => {
+ request_id == response_id
+ }
+ Some(ActiveWorkerReadiness::Ready) | None => false,
+ }
+ }
+
+ fn take_replay_initialized_notification(&mut self) -> Option<Vec<u8>> {
+ let readiness = self.worker.as_mut().map(|worker| &mut worker.readiness);
+ match readiness {
+ Some(ActiveWorkerReadiness::ReplayingInitialize {
+ initialized_notification,
+ ..
+ }) => initialized_notification.take(),
+ Some(ActiveWorkerReadiness::Ready) | None => None,
+ }
+ }
+
+ fn mark_worker_ready(&mut self) {
+ if let Some(worker) = self.worker.as_mut() {
+ worker.readiness = ActiveWorkerReadiness::Ready;
+ }
+ }
+
+ async fn on_worker_fault(
+ &mut self,
+ generation: WorkerGeneration,
+ error: io::Error,
+ ) -> io::Result<()> {
+ if !self.is_active_generation(generation) {
+ return Ok(());
+ }
+ self.handle_worker_failure(error, "worker transport fault")
+ .await
+ }
+
+ async fn handle_worker_failure(&mut self, error: io::Error, context: &str) -> io::Result<()> {
+ warn!("{context}: {error}");
+ self.terminate_worker().await;
+ self.requeue_pending_for_replay().await?;
+ self.schedule_respawn_with_backoff();
+ Ok(())
+ }
+
+ async fn requeue_pending_for_replay(&mut self) -> io::Result<()> {
+ let outcome = self
+ .session_kernel
+ .requeue_pending_for_replay(ReplayBudget {
+ max_attempts: self.config.max_replay_attempts,
+ queue_capacity: self.config.queue_capacity,
+ });
+ for rejected in outcome.rejected {
+ warn!(
+ request = ?rejected.request_id,
+ method = rejected.request.method,
+ reason = rejected.reason.message(),
+ "recovery dropped pending request"
+ );
+ self.emit_error_response(&rejected.request_id, rejected.reason)
+ .await?;
+ self.record_host_error_for_pending_request(
+ &rejected.request_id,
+ &rejected.request,
+ rejected.reason,
+ rejected.next_attempt,
+ );
+ }
+ Ok(())
+ }
+
+ fn record_tool_completion_from_response(
+ &mut self,
+ request_id: &RequestId,
+ pending: &PendingRequest,
+ response: &FramedMessage,
+ has_rpc_error: bool,
+ replay_attempts: u8,
+ ) {
+ let tool_meta = pending.tool_call_meta.as_ref();
+ let Some(tool_meta) = tool_meta else {
+ return;
+ };
+ let latency_ms = duration_millis_u64(pending.started_at.elapsed());
+ let (outcome, error) = classify_tool_outcome_from_response(response, has_rpc_error);
+ let write = self.telemetry.record_tool_completion(
+ request_id,
+ tool_meta,
+ latency_ms,
+ replay_attempts,
+ outcome,
+ error,
+ );
+ if let Err(error) = write {
+ warn!("telemetry write failed: {error}");
+ }
+ }
+
+ fn record_host_error_for_pending_request(
+ &mut self,
+ request_id: &RequestId,
+ pending: &PendingRequest,
+ reason: HostRejection,
+ replay_attempts: u8,
+ ) {
+ let tool_meta = pending.tool_call_meta.as_ref();
+ let Some(tool_meta) = tool_meta else {
+ return;
+ };
+ let latency_ms = duration_millis_u64(pending.started_at.elapsed());
+ let write = self.telemetry.record_tool_completion(
+ request_id,
+ tool_meta,
+ latency_ms,
+ replay_attempts,
+ ToolOutcome::Error,
+ host_tool_error_detail(reason),
+ );
+ if let Err(error) = write {
+ warn!("telemetry write failed: {error}");
+ }
+ }
+
+ fn record_host_error_for_rejected_frame(
+ &mut self,
+ request_id: &RequestId,
+ frame: &FramedMessage,
+ reason: HostRejection,
+ replay_attempts: u8,
+ ) {
+ let method = frame
+ .value
+ .get("method")
+ .and_then(Value::as_str)
+ .unwrap_or_default();
+ let tool_meta = parse_tool_call_meta(frame, method);
+ let Some(tool_meta) = tool_meta else {
+ return;
+ };
+ let write = self.telemetry.record_tool_completion(
+ request_id,
+ &tool_meta,
+ 0,
+ replay_attempts,
+ ToolOutcome::Error,
+ host_tool_error_detail(reason),
+ );
+ if let Err(error) = write {
+ warn!("telemetry write failed: {error}");
+ }
+ }
+
+ async fn emit_error_response(
+ &mut self,
+ request_id: &RequestId,
+ reason: HostRejection,
+ ) -> io::Result<()> {
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "id": request_id.to_json_value(),
+ "error": {
+ "code": reason.code(),
+ "message": reason.message(),
+ },
+ });
+ let bytes = serde_json::to_vec(&payload).map_err(|error| {
+ io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("failed to serialize host error response: {error}"),
+ )
+ })?;
+ self.write_to_client(&bytes).await
+ }
+
+ async fn write_to_client(&mut self, payload: &[u8]) -> io::Result<()> {
+ write_frame(&mut self.client_stdout, payload).await
+ }
+
+ async fn write_to_worker(&mut self, payload: &[u8]) -> io::Result<()> {
+ let worker = self.worker.as_mut().ok_or_else(|| {
+ io::Error::new(
+ io::ErrorKind::BrokenPipe,
+ "worker unavailable while writing",
+ )
+ })?;
+ write_frame(&mut worker.process.stdin, payload).await
+ }
+}
+
+fn init_tracing() {
+ let init_result = tracing_subscriber::fmt()
+ .with_max_level(Level::INFO)
+ .with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
+ .with_writer(io::stderr)
+ .try_init();
+ if let Err(error) = init_result {
+ eprintln!("host tracing init skipped: {error}");
+ }
+}
+
+/// Runs the stable host mode that survives worker failures and hot-swaps worker binaries.
+pub(crate) async fn run_host() -> Result<(), Box<dyn std::error::Error>> {
+ init_tracing();
+ let config = HostConfig::from_env()?;
+ let restored_state =
+ load_snapshot_file_from_env::<HostSessionKernelSnapshot>(HOST_REEXEC_STATE_ENV)?
+ .map(HostSessionKernelSnapshot::restore)
+ .transpose()?;
+ if restored_state.is_some() {
+ info!("restoring host state after self-rollout");
+ }
+ info!(telemetry_path = %config.telemetry_path.display(), "starting host telemetry");
+ let telemetry = TelemetryLog::new(
+ config.telemetry_path.as_path(),
+ config.workspace_root.as_path(),
+ config.telemetry_snapshot_every,
+ )?;
+ let (event_tx, event_rx) = mpsc::unbounded_channel::<HostEvent>();
+ let client_reader_task = spawn_client_reader(event_tx.clone());
+ let watcher = spawn_binary_watcher(&config.worker_binary, event_tx.clone())?;
+ let restored_state = restored_state.unwrap_or_else(RestoredHostSessionKernel::cold);
+
+ let mut runtime = HostRuntime {
+ telemetry,
+ worker_binary_canonical: fs::canonicalize(&config.worker_binary).ok(),
+ binary_fingerprint: BinaryFingerprint::capture(&config.worker_binary).ok(),
+ respawn_backoff: RespawnBackoff::new(config.respawn_floor, config.respawn_ceiling),
+ config,
+ events: event_rx,
+ event_tx,
+ client_reader_task,
+ watcher,
+ client_stdout: tokio::io::stdout(),
+ worker: None,
+ session_kernel: HostSessionKernel::from_restored(restored_state),
+ generation_counter: GenerationCounter::default(),
+ respawn_due: Some(Instant::now()),
+ reload_requested_at: None,
+ };
+ runtime.run().await?;
+ Ok(())
+}
diff --git a/crates/adequate-rust-mcp/src/host/telemetry.rs b/crates/adequate-rust-mcp/src/host/telemetry.rs
new file mode 100644
index 0000000..fcd3bf2
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/telemetry.rs
@@ -0,0 +1,97 @@
+use serde_json::Value;
+
+use super::protocol::FramedMessage;
+
+pub(super) use libmcp::{HostRejection, TelemetryLog, ToolErrorDetail, ToolOutcome};
+
+pub(super) fn host_tool_error_detail(reason: HostRejection) -> ToolErrorDetail {
+ ToolErrorDetail {
+ code: Some(reason.code()),
+ kind: Some("host_error".to_owned()),
+ message: Some(reason.message().to_owned()),
+ }
+}
+
+pub(super) fn classify_tool_outcome_from_response(
+ response: &FramedMessage,
+ has_rpc_error: bool,
+) -> (ToolOutcome, ToolErrorDetail) {
+ if has_rpc_error {
+ let error = response.value.get("error").and_then(Value::as_object);
+ let code = error
+ .and_then(|error| error.get("code"))
+ .and_then(Value::as_i64);
+ let message = error
+ .and_then(|error| error.get("message"))
+ .and_then(Value::as_str)
+ .map(str::to_owned);
+ let kind = error
+ .and_then(extract_jsonrpc_error_kind_from_data)
+ .or_else(|| jsonrpc_error_kind(code));
+ return (
+ ToolOutcome::Error,
+ ToolErrorDetail {
+ code,
+ kind,
+ message,
+ },
+ );
+ }
+
+ let result = response.value.get("result");
+ let is_tool_error = result
+ .and_then(|result| result.get("isError"))
+ .and_then(Value::as_bool)
+ .unwrap_or(false);
+ if is_tool_error {
+ let message = result.and_then(extract_tool_error_message);
+ return (
+ ToolOutcome::Error,
+ ToolErrorDetail {
+ code: None,
+ kind: Some("tool_error_payload".to_owned()),
+ message,
+ },
+ );
+ }
+
+ (ToolOutcome::Ok, ToolErrorDetail::default())
+}
+
+fn extract_jsonrpc_error_kind_from_data(error: &serde_json::Map<String, Value>) -> Option<String> {
+ error
+ .get("data")
+ .and_then(Value::as_object)
+ .and_then(|data| data.get("kind"))
+ .and_then(Value::as_str)
+ .map(str::to_owned)
+}
+
+fn jsonrpc_error_kind(code: Option<i64>) -> Option<String> {
+ let code = code?;
+ let kind = match code {
+ -32700 => "parse_error",
+ -32600 => "invalid_request",
+ -32601 => "method_not_found",
+ -32602 => "invalid_params",
+ -32603 => "internal_error",
+ -32095 => "replay_budget_exhausted",
+ -32097 => "queue_overflow",
+ _ if (-32099..=-32000).contains(&code) => "server_error",
+ _ => "jsonrpc_error",
+ };
+ Some(kind.to_owned())
+}
+
+fn extract_tool_error_message(result: &Value) -> Option<String> {
+ let content = result.get("content").and_then(Value::as_array)?;
+ content
+ .first()
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .map(str::to_owned)
+}
+
+pub(super) fn duration_millis_u64(duration: std::time::Duration) -> u64 {
+ u64::try_from(duration.as_millis()).unwrap_or(u64::MAX)
+}
diff --git a/crates/adequate-rust-mcp/src/host/tests.rs b/crates/adequate-rust-mcp/src/host/tests.rs
new file mode 100644
index 0000000..ded0997
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/host/tests.rs
@@ -0,0 +1,338 @@
+use super::{
+ binary::{BinaryFingerprint, event_targets_binary},
+ config::RespawnBackoff,
+ protocol::{
+ EffectReplayContract, FrameReadOutcome, FramedMessage, RequestId, RpcEnvelopeKind,
+ classify_replay_contract, parse_tool_call_meta, read_frame, write_frame,
+ },
+ telemetry::{ToolOutcome, classify_tool_outcome_from_response},
+};
+use notify::{Event, EventKind};
+use serde_json::{Value, json};
+use std::time::Duration;
+use tokio::io::{BufReader, duplex};
+
+fn encode_json(value: &Value) -> Vec<u8> {
+ let serialized = serde_json::to_vec(value);
+ assert!(
+ serialized.is_ok(),
+ "expected JSON serialization to succeed in test fixture"
+ );
+ serialized.unwrap_or_default()
+}
+
+#[test]
+fn request_id_roundtrip_preserves_number_and_text() {
+ let numeric = RequestId::from_json_value(&json!(42));
+ assert!(numeric.is_some(), "expected numeric request id to parse");
+ let Some(numeric) = numeric else {
+ return;
+ };
+ assert!(matches!(numeric, RequestId::Number(ref value) if value == "42"));
+ assert_eq!(numeric.to_json_value(), json!(42));
+
+ let textual = RequestId::from_json_value(&json!("abc"));
+ assert!(textual.is_some(), "expected string request id to parse");
+ let Some(textual) = textual else {
+ return;
+ };
+ assert!(matches!(textual, RequestId::Text(ref value) if value == "abc"));
+ assert_eq!(textual.to_json_value(), json!("abc"));
+}
+
+#[test]
+fn framed_message_classification_distinguishes_request_response_notification() {
+ let request = FramedMessage::parse(encode_json(
+ &json!({"jsonrpc":"2.0","id":1,"method":"initialize","params":{}}),
+ ));
+ assert!(request.is_ok(), "request fixture failed to parse");
+ let Ok(request) = request else {
+ return;
+ };
+ assert!(matches!(
+ request.classify(),
+ RpcEnvelopeKind::Request { ref method, .. } if method == "initialize"
+ ));
+
+ let response =
+ FramedMessage::parse(encode_json(&json!({"jsonrpc":"2.0","id":"x","result":{}})));
+ assert!(response.is_ok(), "response fixture failed to parse");
+ let Ok(response) = response else {
+ return;
+ };
+ assert!(matches!(
+ response.classify(),
+ RpcEnvelopeKind::Response {
+ has_error: false,
+ ..
+ }
+ ));
+
+ let notification = FramedMessage::parse(encode_json(
+ &json!({"jsonrpc":"2.0","method":"initialized","params":{}}),
+ ));
+ assert!(notification.is_ok(), "notification fixture failed to parse");
+ let Ok(notification) = notification else {
+ return;
+ };
+ assert!(matches!(
+ notification.classify(),
+ RpcEnvelopeKind::Notification { ref method } if method == "initialized"
+ ));
+}
+
+#[tokio::test]
+async fn frame_codec_round_trip_preserves_payload() {
+ let (mut writer, reader) = duplex(512);
+ let payload = encode_json(&json!({"jsonrpc":"2.0","id":9,"result":{"ok":true}}));
+ let payload_for_assert = payload.clone();
+
+ let write_handle = tokio::spawn(async move { write_frame(&mut writer, &payload).await });
+ let mut framed_reader = BufReader::new(reader);
+ let read = read_frame(&mut framed_reader).await;
+ assert!(read.is_ok(), "read_frame failed");
+ let Ok(read) = read else {
+ return;
+ };
+ assert!(matches!(read, FrameReadOutcome::Frame(ref frame) if frame == &payload_for_assert));
+
+ let joined = write_handle.await;
+ assert!(joined.is_ok(), "writer task join failed");
+ let Ok(joined) = joined else {
+ return;
+ };
+ assert!(joined.is_ok(), "write_frame failed");
+}
+
+#[test]
+fn binary_fingerprint_changes_after_content_update() {
+ let temp = tempfile::NamedTempFile::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(file) => file,
+ Err(_) => return,
+ };
+ let path = temp.path().to_path_buf();
+
+ let write_one = std::fs::write(&path, b"alpha");
+ assert!(write_one.is_ok());
+
+ let first = BinaryFingerprint::capture(&path);
+ assert!(first.is_ok());
+ let first = match first {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ std::thread::sleep(Duration::from_millis(3));
+ let write_two = std::fs::write(&path, b"beta-gamma");
+ assert!(write_two.is_ok());
+
+ let second = BinaryFingerprint::capture(&path);
+ assert!(
+ second.is_ok(),
+ "expected second fingerprint capture to succeed"
+ );
+ let Ok(second) = second else {
+ return;
+ };
+ assert_ne!(first, second);
+}
+
+#[test]
+fn respawn_backoff_doubles_and_caps() {
+ let floor = Duration::from_millis(10);
+ let ceiling = Duration::from_millis(40);
+ let mut backoff = RespawnBackoff::new(floor, ceiling);
+ assert_eq!(backoff.consume_delay(), Duration::from_millis(10));
+ assert_eq!(backoff.consume_delay(), Duration::from_millis(20));
+ assert_eq!(backoff.consume_delay(), Duration::from_millis(40));
+ assert_eq!(backoff.consume_delay(), Duration::from_millis(40));
+ backoff.reset();
+ assert_eq!(backoff.consume_delay(), Duration::from_millis(10));
+}
+
+#[test]
+fn watcher_event_target_match_handles_exact_path() {
+ let temp = tempfile::NamedTempFile::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(file) => file,
+ Err(_) => return,
+ };
+ let path = temp.path().to_path_buf();
+ let mut event = Event::new(EventKind::Modify(notify::event::ModifyKind::Any));
+ event.paths.push(path.clone());
+ assert!(event_targets_binary(&event, &path, Some(path.as_path())));
+}
+
+#[test]
+fn watcher_event_target_ignores_empty_path_payload() {
+ let temp = tempfile::NamedTempFile::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(file) => file,
+ Err(_) => return,
+ };
+ let path = temp.path().to_path_buf();
+ let event = Event::new(EventKind::Other);
+ assert!(!event_targets_binary(&event, &path, Some(path.as_path())));
+}
+
+#[test]
+fn replay_contract_marks_safe_tools_convergent() {
+ let frame = FramedMessage::parse(encode_json(&json!({
+ "jsonrpc":"2.0",
+ "id": 1,
+ "method": "tools/call",
+ "params": {
+ "name": "hover",
+ "arguments": { "file_path": "/tmp/lib.rs", "line": 1, "column": 1 }
+ }
+ })));
+ assert!(frame.is_ok());
+ let frame = match frame {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let replay = classify_replay_contract(&frame, "tools/call");
+ assert_eq!(replay, EffectReplayContract::Convergent);
+}
+
+#[test]
+fn replay_contract_marks_clippy_diagnostics_convergent() {
+ let frame = FramedMessage::parse(encode_json(&json!({
+ "jsonrpc":"2.0",
+ "id": 1,
+ "method": "tools/call",
+ "params": {
+ "name": "clippy_diagnostics",
+ "arguments": { "file_path": "/tmp/lib.rs" }
+ }
+ })));
+ assert!(frame.is_ok());
+ let frame = match frame {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let replay = classify_replay_contract(&frame, "tools/call");
+ assert_eq!(replay, EffectReplayContract::Convergent);
+}
+
+#[test]
+fn replay_contract_marks_execute_command_probe_required() {
+ let frame = FramedMessage::parse(encode_json(&json!({
+ "jsonrpc":"2.0",
+ "id": 1,
+ "method": "tools/call",
+ "params": {
+ "name": "advanced_lsp_request",
+ "arguments": { "method": "workspace/executeCommand", "params": {} }
+ }
+ })));
+ assert!(frame.is_ok());
+ let frame = match frame {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let replay = classify_replay_contract(&frame, "tools/call");
+ assert_eq!(replay, EffectReplayContract::ProbeRequired);
+}
+
+#[test]
+fn replay_contract_marks_fix_everything_probe_required() {
+ let frame = FramedMessage::parse(encode_json(&json!({
+ "jsonrpc":"2.0",
+ "id": 1,
+ "method": "tools/call",
+ "params": {
+ "name": "fix_everything",
+ "arguments": {}
+ }
+ })));
+ assert!(frame.is_ok());
+ let frame = match frame {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let replay = classify_replay_contract(&frame, "tools/call");
+ assert_eq!(replay, EffectReplayContract::ProbeRequired);
+}
+
+#[test]
+fn parse_tool_call_meta_extracts_path_hint_from_stringified_params() {
+ let frame = FramedMessage::parse(encode_json(&json!({
+ "jsonrpc":"2.0",
+ "id": 1,
+ "method": "tools/call",
+ "params": {
+ "name": "advanced_lsp_request",
+ "arguments": {
+ "method": "textDocument/definition",
+ "params": "{\"textDocument\":{\"uri\":\"file:///tmp/lib.rs\"},\"position\":{\"line\":6,\"character\":16}}"
+ }
+ }
+ })));
+ assert!(frame.is_ok());
+ let frame = match frame {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let meta = parse_tool_call_meta(&frame, "tools/call");
+ assert!(meta.is_some());
+ let meta = match meta {
+ Some(value) => value,
+ None => return,
+ };
+ assert_eq!(meta.lsp_method.as_deref(), Some("textDocument/definition"));
+ assert_eq!(meta.path_hint.as_deref(), Some("/tmp/lib.rs"));
+}
+
+#[test]
+fn response_error_kind_prefers_jsonrpc_data_kind() {
+ let frame = FramedMessage::parse(encode_json(&json!({
+ "jsonrpc":"2.0",
+ "id": 7,
+ "error": {
+ "code": -32603,
+ "message": "internal error",
+ "data": {
+ "kind": "porcelain_fallback",
+ "hint": "retry once"
+ }
+ }
+ })));
+ assert!(frame.is_ok());
+ let frame = match frame {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let (outcome, detail) = classify_tool_outcome_from_response(&frame, true);
+ assert!(matches!(outcome, ToolOutcome::Error));
+ assert_eq!(detail.code, Some(-32603));
+ assert_eq!(detail.kind.as_deref(), Some("porcelain_fallback"));
+}
+
+#[test]
+fn response_error_kind_falls_back_to_jsonrpc_code_mapping() {
+ let frame = FramedMessage::parse(encode_json(&json!({
+ "jsonrpc":"2.0",
+ "id": 8,
+ "error": {
+ "code": -32602,
+ "message": "invalid params"
+ }
+ })));
+ assert!(frame.is_ok());
+ let frame = match frame {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let (outcome, detail) = classify_tool_outcome_from_response(&frame, true);
+ assert!(matches!(outcome, ToolOutcome::Error));
+ assert_eq!(detail.code, Some(-32602));
+ assert_eq!(detail.kind.as_deref(), Some("invalid_params"));
+}
diff --git a/crates/adequate-rust-mcp/src/main.rs b/crates/adequate-rust-mcp/src/main.rs
new file mode 100644
index 0000000..e42806b
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/main.rs
@@ -0,0 +1,88 @@
+//! Process-mode dispatch for adequate-rust-mcp.
+
+mod host;
+mod worker;
+
+#[cfg(test)]
+use serial_test as _;
+use std::{ffi::OsString, io};
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+enum LaunchMode {
+ Host,
+ Worker,
+ Help,
+}
+
+fn parse_launch_mode(args: impl IntoIterator<Item = OsString>) -> Result<LaunchMode, String> {
+ let collected = args.into_iter().collect::<Vec<_>>();
+ match collected.as_slice() {
+ [] => Ok(LaunchMode::Host),
+ [flag] if flag == "--worker" => Ok(LaunchMode::Worker),
+ [flag] if flag == "--help" || flag == "-h" => Ok(LaunchMode::Help),
+ _ => Err("invalid arguments: use no args for host mode or --worker for worker mode".into()),
+ }
+}
+
+fn print_usage() {
+ println!("adequate-rust-mcp");
+ println!();
+ println!("USAGE:");
+ println!(" adequate-rust-mcp");
+ println!(" adequate-rust-mcp --worker");
+ println!(" adequate-rust-mcp --help");
+ println!();
+ println!("MODES:");
+ println!(" host stable public MCP endpoint that supervises worker generations");
+ println!(" --worker disposable worker process spawned by the host");
+}
+
+#[tokio::main]
+async fn main() -> Result<(), Box<dyn std::error::Error>> {
+ let mode = parse_launch_mode(std::env::args_os().skip(1))
+ .map_err(|message| io::Error::new(io::ErrorKind::InvalidInput, message))?;
+ match mode {
+ LaunchMode::Host => host::run_host().await,
+ LaunchMode::Worker => worker::run_worker().await,
+ LaunchMode::Help => {
+ print_usage();
+ Ok(())
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{LaunchMode, parse_launch_mode};
+ use std::ffi::OsString;
+
+ #[test]
+ fn parse_launch_mode_defaults_to_host() {
+ let mode = parse_launch_mode(Vec::<OsString>::new());
+ assert!(matches!(mode, Ok(LaunchMode::Host)));
+ }
+
+ #[test]
+ fn parse_launch_mode_accepts_worker_flag() {
+ let mode = parse_launch_mode(vec![OsString::from("--worker")]);
+ assert!(matches!(mode, Ok(LaunchMode::Worker)));
+ }
+
+ #[test]
+ fn parse_launch_mode_accepts_help_flag() {
+ let mode = parse_launch_mode(vec![OsString::from("--help")]);
+ assert!(matches!(mode, Ok(LaunchMode::Help)));
+ }
+
+ #[test]
+ fn parse_launch_mode_rejects_unknown_flags() {
+ let mode = parse_launch_mode(vec![OsString::from("--wat")]);
+ assert!(mode.is_err());
+ }
+
+ #[test]
+ fn parse_launch_mode_rejects_multiple_args() {
+ let mode = parse_launch_mode(vec![OsString::from("--worker"), OsString::from("--extra")]);
+ assert!(mode.is_err());
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/worker/clippy.rs b/crates/adequate-rust-mcp/src/worker/clippy.rs
new file mode 100644
index 0000000..89b9b3a
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/clippy.rs
@@ -0,0 +1,196 @@
+use super::{
+ CargoJsonMessageWire, DiagnosticEntry, DiagnosticLevel, DiagnosticsReport, PorcelainErrorKind,
+ RustcSpanWire, SourceFilePath, SourcePoint, SourceRange, porcelain_internal_error,
+ resolve_clippy_command_spec, resolve_workspace_root_path, run_workspace_command,
+};
+use ra_mcp_domain::types::{OneIndexedColumn, OneIndexedLine};
+use rmcp::ErrorData as McpError;
+use std::{
+ collections::HashMap,
+ fs,
+ path::{Path, PathBuf},
+};
+
+pub(super) async fn collect_clippy_diagnostics(
+ file_paths: Vec<SourceFilePath>,
+) -> Result<DiagnosticsReport, McpError> {
+ let workspace_root = resolve_workspace_root_path()?;
+ let clippy_command = resolve_clippy_command_spec(workspace_root.as_path())?;
+ let rendered_command = clippy_command.rendered();
+ let output = run_workspace_command(
+ workspace_root.as_path(),
+ &clippy_command,
+ rendered_command.as_str(),
+ )
+ .await?;
+ let diagnostics = parse_clippy_json_stream(
+ output.standard_output.as_str(),
+ file_paths.as_slice(),
+ workspace_root.as_path(),
+ );
+ if !output.status.success() && diagnostics.is_empty() {
+ return Err(porcelain_internal_error(
+ format!(
+ "clippy command failed without matching diagnostics for requested files: {}",
+ output.standard_error.trim()
+ ),
+ PorcelainErrorKind::ToolRuntimeFailure,
+ Some("run check.py locally to inspect full clippy output"),
+ false,
+ ));
+ }
+ Ok(DiagnosticsReport { diagnostics })
+}
+
+pub(super) fn parse_clippy_json_stream(
+ stdout: &str,
+ target_file_paths: &[SourceFilePath],
+ workspace_root: &Path,
+) -> Vec<DiagnosticEntry> {
+ let mut target_by_normalized = HashMap::<PathBuf, SourceFilePath>::new();
+ let mut target_order = Vec::<PathBuf>::new();
+ for target_path in target_file_paths {
+ let normalized = normalize_path_for_match(target_path.as_path());
+ target_order.push(normalized.clone());
+ let _existing = target_by_normalized
+ .entry(normalized)
+ .or_insert_with(|| target_path.clone());
+ }
+ let mut diagnostics_by_path = HashMap::<PathBuf, Vec<DiagnosticEntry>>::new();
+ for line in stdout.lines() {
+ let Ok(payload) = serde_json::from_str::<CargoJsonMessageWire>(line) else {
+ continue;
+ };
+ if payload.reason != "compiler-message" {
+ continue;
+ }
+ let Some(message) = payload.message else {
+ continue;
+ };
+ let Some((primary, matched_path, matched_target_path)) = select_matching_target_span(
+ message.spans.as_slice(),
+ &target_by_normalized,
+ workspace_root,
+ ) else {
+ continue;
+ };
+ let Some(range) = clippy_span_to_source_range(matched_target_path, primary) else {
+ continue;
+ };
+ diagnostics_by_path
+ .entry(matched_path)
+ .or_default()
+ .push(DiagnosticEntry {
+ range,
+ level: clippy_level_to_diagnostic_level(message.level.as_str()),
+ code: message.code.map(|code| code.code),
+ message: message.message,
+ });
+ }
+ for diagnostics in diagnostics_by_path.values_mut() {
+ sort_diagnostics(diagnostics);
+ diagnostics.dedup();
+ }
+ let mut fused = Vec::new();
+ for normalized_target in target_order {
+ if let Some(diagnostics) = diagnostics_by_path.get(&normalized_target) {
+ fused.extend(diagnostics.iter().cloned());
+ }
+ }
+ fused
+}
+
+pub(super) fn sort_diagnostics(diagnostics: &mut [DiagnosticEntry]) {
+ diagnostics.sort_by(|left, right| {
+ let left_rank = diagnostic_level_sort_rank(left.level);
+ let right_rank = diagnostic_level_sort_rank(right.level);
+ let left_start = left.range.start();
+ let right_start = right.range.start();
+ let left_end = left.range.end();
+ let right_end = right.range.end();
+ left_rank
+ .cmp(&right_rank)
+ .then_with(|| left_start.line().get().cmp(&right_start.line().get()))
+ .then_with(|| left_start.column().get().cmp(&right_start.column().get()))
+ .then_with(|| left_end.line().get().cmp(&right_end.line().get()))
+ .then_with(|| left_end.column().get().cmp(&right_end.column().get()))
+ .then_with(|| left.code.cmp(&right.code))
+ .then_with(|| left.message.cmp(&right.message))
+ });
+}
+
+pub(super) fn select_matching_target_span<'a>(
+ spans: &'a [RustcSpanWire],
+ target_by_normalized: &'a HashMap<PathBuf, SourceFilePath>,
+ workspace_root: &Path,
+) -> Option<(&'a RustcSpanWire, PathBuf, &'a SourceFilePath)> {
+ spans
+ .iter()
+ .find_map(|span| {
+ if !span.is_primary {
+ return None;
+ }
+ let normalized = normalize_span_file_path(span, workspace_root);
+ let target = target_by_normalized.get(normalized.as_path())?;
+ Some((span, normalized, target))
+ })
+ .or_else(|| {
+ spans.iter().find_map(|span| {
+ let normalized = normalize_span_file_path(span, workspace_root);
+ let target = target_by_normalized.get(normalized.as_path())?;
+ Some((span, normalized, target))
+ })
+ })
+}
+
+pub(super) fn normalize_span_file_path(span: &RustcSpanWire, workspace_root: &Path) -> PathBuf {
+ let raw = PathBuf::from(span.file_name.as_str());
+ let absolute = if raw.is_absolute() {
+ raw
+ } else {
+ workspace_root.join(raw)
+ };
+ normalize_path_for_match(absolute.as_path())
+}
+
+pub(super) fn normalize_path_for_match(path: &Path) -> PathBuf {
+ fs::canonicalize(path).unwrap_or_else(|_| path.to_path_buf())
+}
+
+pub(super) fn clippy_span_to_source_range(
+ file_path: &SourceFilePath,
+ span: &RustcSpanWire,
+) -> Option<SourceRange> {
+ let start_line = OneIndexedLine::try_new(span.line_start.max(1)).ok()?;
+ let start_column = OneIndexedColumn::try_new(span.column_start.max(1)).ok()?;
+ let end_line = OneIndexedLine::try_new(span.line_end.max(span.line_start).max(1)).ok()?;
+ let mut normalized_end_column = span.column_end.max(1);
+ if end_line == start_line {
+ normalized_end_column = normalized_end_column.max(start_column.get());
+ }
+ let end_column = OneIndexedColumn::try_new(normalized_end_column).ok()?;
+ SourceRange::try_new(
+ file_path.clone(),
+ SourcePoint::new(start_line, start_column),
+ SourcePoint::new(end_line, end_column),
+ )
+ .ok()
+}
+
+pub(super) fn clippy_level_to_diagnostic_level(level: &str) -> DiagnosticLevel {
+ match level {
+ "error" => DiagnosticLevel::Error,
+ "warning" => DiagnosticLevel::Warning,
+ "help" => DiagnosticLevel::Hint,
+ _ => DiagnosticLevel::Information,
+ }
+}
+
+const fn diagnostic_level_sort_rank(level: DiagnosticLevel) -> u8 {
+ match level {
+ DiagnosticLevel::Error => 0,
+ DiagnosticLevel::Warning => 1,
+ DiagnosticLevel::Information => 2,
+ DiagnosticLevel::Hint => 3,
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/worker/diagnostics.rs b/crates/adequate-rust-mcp/src/worker/diagnostics.rs
new file mode 100644
index 0000000..034b534
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/diagnostics.rs
@@ -0,0 +1,486 @@
+use super::{
+ CompactDiagnosticOutput, Content, DiagnosticEntry, DiagnosticLevel, DiagnosticLevelOutput,
+ DiagnosticOutput, DiagnosticsCountsOutput, DiagnosticsJsonOutput, DiagnosticsModeInput,
+ DiagnosticsModeOutput, DiagnosticsPathStyleInput, DiagnosticsRenderInput, DiagnosticsReport,
+ Fault, FaultOutput, FlattenedDiagnostics, HealthOutput, HealthStateOutput, HoverOutput,
+ HoverPayload, LifecycleSnapshot, LocationOutput, PorcelainErrorKind, RangeOutput, RenameOutput,
+ RenameReport, SourceFilePath, SourceLocation, SourcePoint, SourceRange,
+ porcelain_internal_error, resolve_workspace_root_path,
+};
+use libmcp::collapse_inline_whitespace;
+use rmcp::{ErrorData as McpError, model::CallToolResult};
+use std::{
+ path::{Path, PathBuf},
+ sync::Arc,
+ time::Duration,
+};
+
+#[derive(Debug, Clone, Copy, Default)]
+pub(super) struct DiagnosticsRenderConfig {
+ pub(super) mode: DiagnosticsModeInput,
+ pub(super) render: DiagnosticsRenderInput,
+ pub(super) max_items: Option<usize>,
+ pub(super) max_message_chars: Option<usize>,
+ pub(super) path_style: DiagnosticsPathStyleInput,
+}
+
+impl DiagnosticsRenderConfig {
+ pub(super) fn without_compact_limits(self) -> Self {
+ Self {
+ max_items: None,
+ max_message_chars: None,
+ ..self
+ }
+ }
+}
+
+pub(super) fn render_diagnostic_path(
+ file_path: &SourceFilePath,
+ style: DiagnosticsPathStyleInput,
+ workspace_root: Option<&Path>,
+) -> String {
+ let absolute = file_path.as_path();
+ match style {
+ DiagnosticsPathStyleInput::Absolute => absolute.display().to_string(),
+ DiagnosticsPathStyleInput::Relative => workspace_root
+ .and_then(|root| absolute.strip_prefix(root).ok())
+ .map(|relative| relative.display().to_string())
+ .filter(|relative| !relative.is_empty())
+ .unwrap_or_else(|| absolute.display().to_string()),
+ }
+}
+
+pub(super) fn maybe_truncate_message(raw: String, max_message_chars: Option<usize>) -> String {
+ let Some(limit) = max_message_chars else {
+ return raw;
+ };
+ let message_len = raw.chars().count();
+ if message_len <= limit {
+ return raw;
+ }
+ if limit == 0 {
+ return String::new();
+ }
+ if limit <= 3 {
+ return raw.chars().take(limit).collect();
+ }
+ let mut truncated = raw
+ .chars()
+ .take(limit.saturating_sub(3))
+ .collect::<String>();
+ truncated.push_str("...");
+ truncated
+}
+
+pub(super) fn usize_to_u64_saturating(value: usize) -> u64 {
+ u64::try_from(value).unwrap_or(u64::MAX)
+}
+
+pub(super) fn elapsed_millis_saturating(elapsed: Duration) -> u64 {
+ let millis = elapsed.as_millis();
+ if millis > u128::from(u64::MAX) {
+ u64::MAX
+ } else {
+ millis as u64
+ }
+}
+
+pub(super) fn diagnostics_total_count(entries: &[DiagnosticEntry]) -> u64 {
+ usize_to_u64_saturating(entries.len())
+}
+
+impl DiagnosticsCountsOutput {
+ pub(super) fn from_entries(entries: &[DiagnosticEntry]) -> Self {
+ let (mut e, mut w, mut i, mut h) = (0_u64, 0_u64, 0_u64, 0_u64);
+ for DiagnosticEntry { level, .. } in entries {
+ match level {
+ DiagnosticLevel::Error => e = e.saturating_add(1),
+ DiagnosticLevel::Warning => w = w.saturating_add(1),
+ DiagnosticLevel::Information => i = i.saturating_add(1),
+ DiagnosticLevel::Hint => h = h.saturating_add(1),
+ }
+ }
+ Self {
+ error_count: e,
+ warning_count: w,
+ information_count: i,
+ hint_count: h,
+ total_count: diagnostics_total_count(entries),
+ }
+ }
+}
+
+impl From<DiagnosticsModeInput> for DiagnosticsModeOutput {
+ fn from(value: DiagnosticsModeInput) -> Self {
+ match value {
+ DiagnosticsModeInput::Compact => Self::Compact,
+ DiagnosticsModeInput::Full => Self::Full,
+ DiagnosticsModeInput::Summary => Self::Summary,
+ }
+ }
+}
+
+impl CompactDiagnosticOutput {
+ pub(super) fn from_entry(
+ value: DiagnosticEntry,
+ render_config: DiagnosticsRenderConfig,
+ workspace_root: Option<&Path>,
+ ) -> Self {
+ let DiagnosticEntry {
+ range,
+ level,
+ code,
+ message,
+ } = value;
+ let file_path = range.file_path().clone();
+ let start = range.start();
+ let end = range.end();
+ Self {
+ severity: DiagnosticLevelOutput::from(level),
+ file_path: render_diagnostic_path(&file_path, render_config.path_style, workspace_root),
+ start_line: start.line().get(),
+ start_column: start.column().get(),
+ end_line: end.line().get(),
+ end_column: end.column().get(),
+ code,
+ message: maybe_truncate_message(message, render_config.max_message_chars),
+ }
+ }
+}
+
+pub(super) fn flatten_diagnostics(
+ diagnostics: Vec<DiagnosticEntry>,
+ render_config: DiagnosticsRenderConfig,
+) -> FlattenedDiagnostics {
+ let counts = DiagnosticsCountsOutput::from_entries(diagnostics.as_slice());
+ let total_count = diagnostics.len();
+ let visible_count = render_config
+ .max_items
+ .map_or(total_count, |limit| limit.min(total_count));
+ let overflow_count = total_count.saturating_sub(visible_count);
+ let workspace_root = diagnostics_workspace_root(render_config.path_style);
+ let items = diagnostics
+ .into_iter()
+ .take(visible_count)
+ .map(|entry| {
+ CompactDiagnosticOutput::from_entry(entry, render_config, workspace_root.as_deref())
+ })
+ .collect();
+ FlattenedDiagnostics {
+ counts,
+ items,
+ overflow_count,
+ }
+}
+
+impl DiagnosticsJsonOutput {
+ pub(super) fn from_report(
+ report: DiagnosticsReport,
+ render_config: DiagnosticsRenderConfig,
+ ) -> Self {
+ let DiagnosticsReport { diagnostics } = report;
+ let mode = DiagnosticsModeOutput::from(render_config.mode);
+ match render_config.mode {
+ DiagnosticsModeInput::Summary => Self {
+ mode,
+ counts: DiagnosticsCountsOutput::from_entries(diagnostics.as_slice()),
+ truncated: false,
+ overflow_count: 0,
+ items: Vec::new(),
+ diagnostics: None,
+ },
+ DiagnosticsModeInput::Full => Self {
+ mode,
+ counts: DiagnosticsCountsOutput::from_entries(diagnostics.as_slice()),
+ truncated: false,
+ overflow_count: 0,
+ items: Vec::new(),
+ diagnostics: Some(
+ diagnostics
+ .into_iter()
+ .map(DiagnosticOutput::from)
+ .collect(),
+ ),
+ },
+ DiagnosticsModeInput::Compact => {
+ let FlattenedDiagnostics {
+ counts,
+ items,
+ overflow_count,
+ } = flatten_diagnostics(diagnostics, render_config);
+ Self {
+ mode,
+ counts,
+ truncated: overflow_count > 0,
+ overflow_count: usize_to_u64_saturating(overflow_count),
+ items,
+ diagnostics: None,
+ }
+ }
+ }
+ }
+}
+
+pub(super) fn diagnostics_json_output_schema() -> Arc<rmcp::model::JsonObject> {
+ rmcp::handler::server::tool::schema_for_output::<DiagnosticsJsonOutput>().unwrap_or_else(
+ |error| {
+ tracing::error!(
+ ?error,
+ "failed to build diagnostics JSON output schema; falling back to empty schema"
+ );
+ Arc::default()
+ },
+ )
+}
+
+pub(super) fn diagnostics_call_tool_result(
+ report: DiagnosticsReport,
+ render_config: DiagnosticsRenderConfig,
+) -> Result<CallToolResult, McpError> {
+ match render_config.render {
+ DiagnosticsRenderInput::Porcelain => Ok(CallToolResult::success(vec![Content::text(
+ render_diagnostics_porcelain(report, render_config),
+ )])),
+ DiagnosticsRenderInput::Json => {
+ let output = DiagnosticsJsonOutput::from_report(report, render_config);
+ let value = serde_json::to_value(output).map_err(|error| {
+ porcelain_internal_error(
+ format!("failed to serialize diagnostics JSON output: {error}"),
+ PorcelainErrorKind::InternalFailure,
+ Some("retry once; if it persists, inspect worker logs"),
+ false,
+ )
+ })?;
+ Ok(CallToolResult::structured(value))
+ }
+ }
+}
+
+pub(super) fn render_diagnostics_porcelain(
+ report: DiagnosticsReport,
+ render_config: DiagnosticsRenderConfig,
+) -> String {
+ let DiagnosticsReport { diagnostics } = report;
+ match render_config.mode {
+ DiagnosticsModeInput::Summary => porcelain_counts_line(
+ DiagnosticsCountsOutput::from_entries(diagnostics.as_slice()),
+ ),
+ DiagnosticsModeInput::Compact | DiagnosticsModeInput::Full => {
+ let flattening_config = match render_config.mode {
+ DiagnosticsModeInput::Compact => render_config,
+ DiagnosticsModeInput::Full => render_config.without_compact_limits(),
+ DiagnosticsModeInput::Summary => unreachable!(),
+ };
+ let FlattenedDiagnostics {
+ counts,
+ items,
+ overflow_count,
+ } = flatten_diagnostics(diagnostics, flattening_config);
+ let mut lines = vec![porcelain_counts_line(counts)];
+ if items.is_empty() {
+ return lines.join("\n");
+ }
+ lines.push(String::new());
+ lines.extend(render_porcelain_diagnostic_groups(items));
+ if overflow_count > 0 {
+ lines.push(String::new());
+ lines.push(format!(
+ "... {overflow_count} more diagnostics omitted; rerun with a larger max_items or render=json"
+ ));
+ }
+ lines.join("\n")
+ }
+ }
+}
+
+pub(super) fn diagnostics_workspace_root(path_style: DiagnosticsPathStyleInput) -> Option<PathBuf> {
+ matches!(path_style, DiagnosticsPathStyleInput::Relative)
+ .then(resolve_workspace_root_path)
+ .and_then(Result::ok)
+}
+
+pub(super) fn render_porcelain_diagnostic_groups(
+ entries: Vec<CompactDiagnosticOutput>,
+) -> Vec<String> {
+ let mut groups = Vec::<(String, Vec<CompactDiagnosticOutput>)>::new();
+ for entry in entries {
+ if let Some((group_file_path, group_entries)) = groups.last_mut()
+ && *group_file_path == entry.file_path
+ {
+ group_entries.push(entry);
+ continue;
+ }
+ groups.push((entry.file_path.clone(), vec![entry]));
+ }
+ let single_file = groups.len() == 1;
+ let mut lines = Vec::new();
+ for (index, (file_path, group_entries)) in groups.into_iter().enumerate() {
+ if !single_file {
+ if index > 0 {
+ lines.push(String::new());
+ }
+ lines.push(file_path);
+ }
+ for entry in group_entries {
+ lines.push(render_porcelain_diagnostic_line(entry));
+ }
+ }
+ lines
+}
+
+pub(super) fn render_porcelain_diagnostic_line(entry: CompactDiagnosticOutput) -> String {
+ let CompactDiagnosticOutput {
+ severity,
+ file_path: _file_path,
+ start_line,
+ start_column,
+ end_line,
+ end_column,
+ code,
+ message,
+ } = entry;
+ let code = code.map_or(String::new(), |code| format!(" [{code}]"));
+ let message = porcelain_message(message.as_str());
+ format!(
+ "{} {}:{}-{}:{}{} {}",
+ diagnostic_level_label(severity),
+ start_line,
+ start_column,
+ end_line,
+ end_column,
+ code,
+ message
+ )
+}
+
+pub(super) fn porcelain_counts_line(counts: DiagnosticsCountsOutput) -> String {
+ let DiagnosticsCountsOutput {
+ error_count,
+ warning_count,
+ information_count,
+ hint_count,
+ total_count,
+ } = counts;
+ format!(
+ "{error_count} errors, {warning_count} warnings, {information_count} information, {hint_count} hints ({total_count} total)"
+ )
+}
+
+pub(super) fn diagnostic_level_label(level: DiagnosticLevelOutput) -> &'static str {
+ match level {
+ DiagnosticLevelOutput::Error => "error",
+ DiagnosticLevelOutput::Warning => "warning",
+ DiagnosticLevelOutput::Information => "information",
+ DiagnosticLevelOutput::Hint => "hint",
+ }
+}
+
+pub(super) fn porcelain_message(raw: &str) -> String {
+ collapse_inline_whitespace(raw)
+}
+
+pub(super) fn location_output(file_path: &SourceFilePath, point: SourcePoint) -> LocationOutput {
+ LocationOutput {
+ file_path: file_path.as_path().display().to_string(),
+ line: point.line().get(),
+ column: point.column().get(),
+ }
+}
+
+impl From<SourceLocation> for LocationOutput {
+ fn from(value: SourceLocation) -> Self {
+ location_output(value.file_path(), value.point())
+ }
+}
+
+impl From<SourceRange> for RangeOutput {
+ fn from(value: SourceRange) -> Self {
+ Self {
+ file_path: value.file_path().as_path().display().to_string(),
+ start: location_output(value.file_path(), value.start()),
+ end: location_output(value.file_path(), value.end()),
+ }
+ }
+}
+
+impl From<HoverPayload> for HoverOutput {
+ fn from(value: HoverPayload) -> Self {
+ Self {
+ rendered: value.rendered,
+ range: value.range.map(RangeOutput::from),
+ uncertain_empty: false,
+ note: None,
+ }
+ }
+}
+
+impl From<DiagnosticLevel> for DiagnosticLevelOutput {
+ fn from(value: DiagnosticLevel) -> Self {
+ match value {
+ DiagnosticLevel::Error => Self::Error,
+ DiagnosticLevel::Warning => Self::Warning,
+ DiagnosticLevel::Information => Self::Information,
+ DiagnosticLevel::Hint => Self::Hint,
+ }
+ }
+}
+
+impl From<DiagnosticEntry> for DiagnosticOutput {
+ fn from(value: DiagnosticEntry) -> Self {
+ Self {
+ range: RangeOutput::from(value.range),
+ level: DiagnosticLevelOutput::from(value.level),
+ code: value.code,
+ message: value.message,
+ }
+ }
+}
+
+impl From<RenameReport> for RenameOutput {
+ fn from(value: RenameReport) -> Self {
+ Self {
+ files_touched: value.files_touched,
+ edits_applied: value.edits_applied,
+ }
+ }
+}
+
+impl From<Fault> for FaultOutput {
+ fn from(value: Fault) -> Self {
+ Self {
+ class: format!("{:?}", value.class),
+ code: format!("{:?}", value.code),
+ detail: value.detail.message,
+ }
+ }
+}
+
+impl From<LifecycleSnapshot> for HealthOutput {
+ fn from(value: LifecycleSnapshot) -> Self {
+ match value {
+ LifecycleSnapshot::Cold { generation } => Self {
+ state: HealthStateOutput::Cold,
+ generation: generation.get(),
+ last_fault: None,
+ },
+ LifecycleSnapshot::Starting { generation } => Self {
+ state: HealthStateOutput::Starting,
+ generation: generation.get(),
+ last_fault: None,
+ },
+ LifecycleSnapshot::Ready { generation } => Self {
+ state: HealthStateOutput::Ready,
+ generation: generation.get(),
+ last_fault: None,
+ },
+ LifecycleSnapshot::Recovering {
+ generation,
+ last_fault,
+ } => Self {
+ state: HealthStateOutput::Recovering,
+ generation: generation.get(),
+ last_fault: Some(FaultOutput::from(last_fault)),
+ },
+ }
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/worker/errors.rs b/crates/adequate-rust-mcp/src/worker/errors.rs
new file mode 100644
index 0000000..e51dc62
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/errors.rs
@@ -0,0 +1,150 @@
+use super::{EngineError, McpError};
+use serde::Serialize;
+use serde_json::{Value, json};
+
+#[derive(Debug, Clone, Copy, Serialize)]
+#[serde(rename_all = "snake_case")]
+pub(super) enum PorcelainErrorKind {
+ InvalidInput,
+ PathNotFound,
+ PathNotFile,
+ PositionOutOfRange,
+ TransientRetryable,
+ ToolRuntimeFailure,
+ InternalFailure,
+ PorcelainFallback,
+}
+
+pub(super) fn porcelain_error_data(
+ kind: PorcelainErrorKind,
+ hint: Option<&'static str>,
+ retryable: bool,
+) -> Value {
+ let mut data = json!({
+ "kind": kind,
+ });
+ if let Some(hint) = hint {
+ data["hint"] = json!(hint);
+ }
+ if retryable {
+ data["retryable"] = json!(true);
+ }
+ data
+}
+
+pub(super) fn porcelain_invalid_params(
+ message: impl Into<String>,
+ kind: PorcelainErrorKind,
+ hint: Option<&'static str>,
+) -> McpError {
+ McpError::invalid_params(
+ message.into(),
+ Some(porcelain_error_data(kind, hint, false)),
+ )
+}
+
+pub(super) fn porcelain_internal_error(
+ message: impl Into<String>,
+ kind: PorcelainErrorKind,
+ hint: Option<&'static str>,
+ retryable: bool,
+) -> McpError {
+ McpError::internal_error(
+ message.into(),
+ Some(porcelain_error_data(kind, hint, retryable)),
+ )
+}
+
+pub(super) fn is_position_out_of_range_lsp_error(message: &str) -> bool {
+ let normalized = message.to_ascii_lowercase();
+ normalized.contains("invalid offset")
+ || normalized.contains("line index length")
+ || normalized.contains("linecol {")
+ || normalized.contains("position is out of range")
+}
+
+pub(super) fn is_transient_lsp_error(code: i64, message: &str) -> bool {
+ let normalized = message.to_ascii_lowercase();
+ code == -32801
+ || code == -32802
+ || normalized.contains("content modified")
+ || normalized.contains("document changed")
+ || normalized.contains("server cancelled")
+ || normalized.contains("request cancelled")
+ || normalized.contains("request canceled")
+}
+
+pub(super) fn map_engine_error(error: EngineError) -> McpError {
+ match error {
+ EngineError::Io(io_error) => match io_error.kind() {
+ std::io::ErrorKind::NotFound => porcelain_invalid_params(
+ "requested source file does not exist",
+ PorcelainErrorKind::PathNotFound,
+ Some("verify the file path or URI"),
+ ),
+ std::io::ErrorKind::InvalidInput => porcelain_invalid_params(
+ "invalid source file input",
+ PorcelainErrorKind::InvalidInput,
+ Some("use an absolute path, file URI, or workspace-relative path"),
+ ),
+ _ => porcelain_internal_error(
+ "source file operation failed",
+ PorcelainErrorKind::PorcelainFallback,
+ Some("retry once; if it persists, inspect worker logs"),
+ false,
+ ),
+ },
+ EngineError::Invariant(_) => porcelain_internal_error(
+ "internal invariant check failed while handling request",
+ PorcelainErrorKind::InternalFailure,
+ Some("retry once; if it persists, inspect worker logs"),
+ false,
+ ),
+ EngineError::InvalidPayload { .. } => porcelain_invalid_params(
+ "received unexpected response payload from rust-analyzer",
+ PorcelainErrorKind::InvalidInput,
+ Some("retry once; if it persists, refresh rust-analyzer state"),
+ ),
+ EngineError::InvalidFileUrl => porcelain_invalid_params(
+ "source path cannot be represented as a local file URL",
+ PorcelainErrorKind::InvalidInput,
+ Some("use a local file path or file:// URI"),
+ ),
+ EngineError::Fault(_) => porcelain_internal_error(
+ "rust-analyzer worker reported a transport/process fault",
+ PorcelainErrorKind::TransientRetryable,
+ Some("retry after worker recovery"),
+ true,
+ ),
+ EngineError::LspResponse(error) => {
+ let code = error.code;
+ let message = error.message.as_str();
+ if is_position_out_of_range_lsp_error(message) {
+ porcelain_invalid_params(
+ "requested line/column is outside the file bounds",
+ PorcelainErrorKind::PositionOutOfRange,
+ Some("pick a position inside the current file contents"),
+ )
+ } else if is_transient_lsp_error(code, message) {
+ porcelain_internal_error(
+ "request could not complete because rust-analyzer cancelled or invalidated it",
+ PorcelainErrorKind::TransientRetryable,
+ Some("retry after rust-analyzer settles"),
+ true,
+ )
+ } else {
+ porcelain_internal_error(
+ format!("rust-analyzer request failed (code={code})"),
+ PorcelainErrorKind::PorcelainFallback,
+ Some("retry once; if it persists, inspect worker logs"),
+ false,
+ )
+ }
+ }
+ EngineError::InvalidRequest { .. } => porcelain_invalid_params(
+ "failed to encode request payload for rust-analyzer",
+ PorcelainErrorKind::InvalidInput,
+ Some("inspect tool arguments and retry"),
+ ),
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/worker/input.rs b/crates/adequate-rust-mcp/src/worker/input.rs
new file mode 100644
index 0000000..93245a1
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/input.rs
@@ -0,0 +1,452 @@
+use super::{
+ AbsolutePathInput, AdvancedLspMethod, DiagnosticFileTargetsRaw, DiagnosticsInput,
+ DiagnosticsRenderConfig, OneIndexedInput, OneIndexedRawInput, PorcelainErrorKind,
+ SnapshotRenderInput, SourceFilePath, SourcePoint, SourcePosition, SymbolPositionInput,
+ SymbolQueryInput, porcelain_invalid_params, resolve_workspace_root_path,
+};
+use libmcp::{
+ PathNormalizeError, normalize_ascii_token as normalize_user_method_token, normalize_local_path,
+ parse_human_unsigned_u64, saturating_u64_to_usize,
+};
+use ra_mcp_domain::types::{OneIndexedColumn, OneIndexedLine};
+use rmcp::ErrorData as McpError;
+use serde::{Deserialize, Deserializer};
+use serde_json::Value;
+
+impl AbsolutePathInput {
+ pub(super) fn into_source_file_path(self) -> Result<SourceFilePath, McpError> {
+ let raw = self.0.trim();
+ if raw.is_empty() {
+ return Err(porcelain_invalid_params(
+ "source file path must be non-empty",
+ PorcelainErrorKind::InvalidInput,
+ Some("pass file_path as an absolute path, file URI, or workspace-relative path"),
+ ));
+ }
+
+ let workspace_root = resolve_workspace_root_path()?;
+ let absolute_path = normalize_local_path(raw, Some(workspace_root.as_path())).map_err(
+ |error| match error {
+ PathNormalizeError::Empty => porcelain_invalid_params(
+ "source file path must be non-empty",
+ PorcelainErrorKind::InvalidInput,
+ Some(
+ "pass file_path as an absolute path, file URI, or workspace-relative path",
+ ),
+ ),
+ PathNormalizeError::InvalidFileUri => porcelain_invalid_params(
+ "file URI is invalid",
+ PorcelainErrorKind::InvalidInput,
+ Some("ensure file_path is a valid file:// URI"),
+ ),
+ PathNormalizeError::NonLocalFileUri => porcelain_invalid_params(
+ "file URI must resolve to a local path",
+ PorcelainErrorKind::InvalidInput,
+ Some("use a local file:// URI"),
+ ),
+ },
+ )?;
+
+ SourceFilePath::try_new(absolute_path).map_err(|_| {
+ porcelain_invalid_params(
+ "source file path is invalid",
+ PorcelainErrorKind::InvalidInput,
+ Some("use a normalized local source file path"),
+ )
+ })
+ }
+}
+
+impl OneIndexedInput {
+ pub(super) const fn normalized_for_one_indexed(self) -> u64 {
+ if self.0 == 0 { 1 } else { self.0 }
+ }
+}
+
+impl<'de> Deserialize<'de> for OneIndexedInput {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let raw = OneIndexedRawInput::deserialize(deserializer)?;
+ let parsed = match raw {
+ OneIndexedRawInput::Unsigned(value) => Some(value),
+ OneIndexedRawInput::Float(value) => {
+ if value.is_finite() && value >= 0.0 && value.fract() == 0.0 {
+ let max = u64::MAX as f64;
+ if value <= max {
+ Some(value as u64)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+ OneIndexedRawInput::Text(text) => parse_human_unsigned_u64(text.as_str()),
+ };
+
+ parsed.map(Self).ok_or_else(|| {
+ serde::de::Error::custom(
+ "expected non-negative integer index (number, integer-like float, or numeric string)",
+ )
+ })
+ }
+}
+
+impl SymbolPositionInput {
+ pub(super) fn into_source_position(self) -> Result<SourcePosition, McpError> {
+ let line =
+ OneIndexedLine::try_new(self.line.normalized_for_one_indexed()).map_err(|_| {
+ porcelain_invalid_params(
+ "line index is out of supported bounds",
+ PorcelainErrorKind::InvalidInput,
+ Some("line must be >= 1"),
+ )
+ })?;
+ let column =
+ OneIndexedColumn::try_new(self.column.normalized_for_one_indexed()).map_err(|_| {
+ porcelain_invalid_params(
+ "column index is out of supported bounds",
+ PorcelainErrorKind::InvalidInput,
+ Some("column must be >= 1"),
+ )
+ })?;
+ Ok(SourcePosition::new(
+ self.file_path.into_source_file_path()?,
+ SourcePoint::new(line, column),
+ ))
+ }
+}
+
+impl SymbolQueryInput {
+ pub(super) fn into_request(
+ self,
+ ) -> Result<(SourcePosition, super::CommonRenderConfig), McpError> {
+ let Self {
+ position,
+ render,
+ path_style,
+ } = self;
+ let position = position.into_source_position()?;
+ Ok((
+ position,
+ super::CommonRenderConfig::from_user_input(render, path_style),
+ ))
+ }
+}
+
+impl SnapshotRenderInput {
+ pub(super) fn render(self) -> super::CommonRenderInput {
+ self.render.unwrap_or(super::CommonRenderInput::Porcelain)
+ }
+}
+
+impl DiagnosticsInput {
+ pub(super) fn into_request(
+ self,
+ ) -> Result<(Vec<SourceFilePath>, DiagnosticsRenderConfig), McpError> {
+ let Self {
+ file_paths,
+ mode,
+ render,
+ max_items,
+ max_message_chars,
+ path_style,
+ } = self;
+ if file_paths.is_empty() {
+ return Err(porcelain_invalid_params(
+ "at least one file path is required",
+ PorcelainErrorKind::InvalidInput,
+ Some("pass file_path or file_paths"),
+ ));
+ }
+ let file_paths = file_paths
+ .into_iter()
+ .map(AbsolutePathInput::into_source_file_path)
+ .collect::<Result<Vec<_>, McpError>>()?;
+ validate_requested_file_paths(file_paths.as_slice())?;
+ let defaults = DiagnosticsRenderConfig::default();
+ let render_config = DiagnosticsRenderConfig {
+ mode: mode.unwrap_or(defaults.mode),
+ render: render.unwrap_or(defaults.render),
+ max_items: max_items.map(saturating_u64_to_usize),
+ max_message_chars: max_message_chars.map(saturating_u64_to_usize),
+ path_style,
+ };
+ Ok((file_paths, render_config))
+ }
+}
+
+impl AdvancedLspMethod {
+ const ALL: [Self; 45] = [
+ Self::Hover,
+ Self::Definition,
+ Self::References,
+ Self::Declaration,
+ Self::TypeDefinition,
+ Self::Implementation,
+ Self::Completion,
+ Self::CompletionResolve,
+ Self::SignatureHelp,
+ Self::DocumentSymbol,
+ Self::WorkspaceSymbol,
+ Self::WorkspaceSymbolResolve,
+ Self::PrepareRename,
+ Self::Rename,
+ Self::CodeAction,
+ Self::CodeActionResolve,
+ Self::CodeLens,
+ Self::CodeLensResolve,
+ Self::ExecuteCommand,
+ Self::Formatting,
+ Self::RangeFormatting,
+ Self::OnTypeFormatting,
+ Self::DocumentHighlight,
+ Self::DocumentLink,
+ Self::DocumentLinkResolve,
+ Self::DocumentColor,
+ Self::ColorPresentation,
+ Self::LinkedEditingRange,
+ Self::InlayHint,
+ Self::InlayHintResolve,
+ Self::FoldingRange,
+ Self::SelectionRange,
+ Self::DocumentDiagnostic,
+ Self::WorkspaceDiagnostic,
+ Self::SemanticTokensFull,
+ Self::SemanticTokensFullDelta,
+ Self::SemanticTokensRange,
+ Self::Moniker,
+ Self::InlineValue,
+ Self::TypeHierarchyPrepare,
+ Self::TypeHierarchySupertypes,
+ Self::TypeHierarchySubtypes,
+ Self::CallHierarchyPrepare,
+ Self::CallHierarchyIncomingCalls,
+ Self::CallHierarchyOutgoingCalls,
+ ];
+
+ const fn canonical_input_name(&self) -> &'static str {
+ match self {
+ Self::Hover => "hover",
+ Self::Definition => "definition",
+ Self::References => "references",
+ Self::Declaration => "declaration",
+ Self::TypeDefinition => "type_definition",
+ Self::Implementation => "implementation",
+ Self::Completion => "completion",
+ Self::CompletionResolve => "completion_resolve",
+ Self::SignatureHelp => "signature_help",
+ Self::DocumentSymbol => "document_symbol",
+ Self::WorkspaceSymbol => "workspace_symbol",
+ Self::WorkspaceSymbolResolve => "workspace_symbol_resolve",
+ Self::PrepareRename => "prepare_rename",
+ Self::Rename => "rename",
+ Self::CodeAction => "code_action",
+ Self::CodeActionResolve => "code_action_resolve",
+ Self::CodeLens => "code_lens",
+ Self::CodeLensResolve => "code_lens_resolve",
+ Self::ExecuteCommand => "execute_command",
+ Self::Formatting => "formatting",
+ Self::RangeFormatting => "range_formatting",
+ Self::OnTypeFormatting => "on_type_formatting",
+ Self::DocumentHighlight => "document_highlight",
+ Self::DocumentLink => "document_link",
+ Self::DocumentLinkResolve => "document_link_resolve",
+ Self::DocumentColor => "document_color",
+ Self::ColorPresentation => "color_presentation",
+ Self::LinkedEditingRange => "linked_editing_range",
+ Self::InlayHint => "inlay_hint",
+ Self::InlayHintResolve => "inlay_hint_resolve",
+ Self::FoldingRange => "folding_range",
+ Self::SelectionRange => "selection_range",
+ Self::DocumentDiagnostic => "document_diagnostic",
+ Self::WorkspaceDiagnostic => "workspace_diagnostic",
+ Self::SemanticTokensFull => "semantic_tokens_full",
+ Self::SemanticTokensFullDelta => "semantic_tokens_full_delta",
+ Self::SemanticTokensRange => "semantic_tokens_range",
+ Self::Moniker => "moniker",
+ Self::InlineValue => "inline_value",
+ Self::TypeHierarchyPrepare => "type_hierarchy_prepare",
+ Self::TypeHierarchySupertypes => "type_hierarchy_supertypes",
+ Self::TypeHierarchySubtypes => "type_hierarchy_subtypes",
+ Self::CallHierarchyPrepare => "call_hierarchy_prepare",
+ Self::CallHierarchyIncomingCalls => "call_hierarchy_incoming_calls",
+ Self::CallHierarchyOutgoingCalls => "call_hierarchy_outgoing_calls",
+ }
+ }
+
+ fn from_user_input(raw: &str) -> Option<Self> {
+ let normalized = normalize_user_method_token(raw);
+ if normalized.is_empty() {
+ return None;
+ }
+ Self::ALL.into_iter().find(|candidate| {
+ normalized == normalize_user_method_token(candidate.canonical_input_name())
+ || normalized == normalize_user_method_token(candidate.as_lsp_method())
+ })
+ }
+
+ pub(super) const fn as_lsp_method(&self) -> &'static str {
+ match self {
+ Self::Hover => "textDocument/hover",
+ Self::Definition => "textDocument/definition",
+ Self::References => "textDocument/references",
+ Self::Declaration => "textDocument/declaration",
+ Self::TypeDefinition => "textDocument/typeDefinition",
+ Self::Implementation => "textDocument/implementation",
+ Self::Completion => "textDocument/completion",
+ Self::CompletionResolve => "completionItem/resolve",
+ Self::SignatureHelp => "textDocument/signatureHelp",
+ Self::DocumentSymbol => "textDocument/documentSymbol",
+ Self::WorkspaceSymbol => "workspace/symbol",
+ Self::WorkspaceSymbolResolve => "workspaceSymbol/resolve",
+ Self::PrepareRename => "textDocument/prepareRename",
+ Self::Rename => "textDocument/rename",
+ Self::CodeAction => "textDocument/codeAction",
+ Self::CodeActionResolve => "codeAction/resolve",
+ Self::CodeLens => "textDocument/codeLens",
+ Self::CodeLensResolve => "codeLens/resolve",
+ Self::ExecuteCommand => "workspace/executeCommand",
+ Self::Formatting => "textDocument/formatting",
+ Self::RangeFormatting => "textDocument/rangeFormatting",
+ Self::OnTypeFormatting => "textDocument/onTypeFormatting",
+ Self::DocumentHighlight => "textDocument/documentHighlight",
+ Self::DocumentLink => "textDocument/documentLink",
+ Self::DocumentLinkResolve => "documentLink/resolve",
+ Self::DocumentColor => "textDocument/documentColor",
+ Self::ColorPresentation => "textDocument/colorPresentation",
+ Self::LinkedEditingRange => "textDocument/linkedEditingRange",
+ Self::InlayHint => "textDocument/inlayHint",
+ Self::InlayHintResolve => "inlayHint/resolve",
+ Self::FoldingRange => "textDocument/foldingRange",
+ Self::SelectionRange => "textDocument/selectionRange",
+ Self::DocumentDiagnostic => "textDocument/diagnostic",
+ Self::WorkspaceDiagnostic => "workspace/diagnostic",
+ Self::SemanticTokensFull => "textDocument/semanticTokens/full",
+ Self::SemanticTokensFullDelta => "textDocument/semanticTokens/full/delta",
+ Self::SemanticTokensRange => "textDocument/semanticTokens/range",
+ Self::Moniker => "textDocument/moniker",
+ Self::InlineValue => "textDocument/inlineValue",
+ Self::TypeHierarchyPrepare => "textDocument/prepareTypeHierarchy",
+ Self::TypeHierarchySupertypes => "typeHierarchy/supertypes",
+ Self::TypeHierarchySubtypes => "typeHierarchy/subtypes",
+ Self::CallHierarchyPrepare => "textDocument/prepareCallHierarchy",
+ Self::CallHierarchyIncomingCalls => "callHierarchy/incomingCalls",
+ Self::CallHierarchyOutgoingCalls => "callHierarchy/outgoingCalls",
+ }
+ }
+}
+
+impl<'de> Deserialize<'de> for AdvancedLspMethod {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let raw = String::deserialize(deserializer)?;
+ Self::from_user_input(raw.as_str()).ok_or_else(|| {
+ serde::de::Error::custom(format!(
+ "unsupported advanced method `{raw}`; use tools/list for canonical method names (snake_case, camelCase, kebab-case, and full LSP paths are accepted)"
+ ))
+ })
+ }
+}
+
+pub(super) fn deserialize_advanced_lsp_params<'de, D>(deserializer: D) -> Result<Value, D::Error>
+where
+ D: Deserializer<'de>,
+{
+ let raw = Value::deserialize(deserializer)?;
+ normalize_advanced_lsp_params(raw).map_err(serde::de::Error::custom)
+}
+
+pub(super) fn deserialize_diagnostic_file_targets<'de, D>(
+ deserializer: D,
+) -> Result<Vec<AbsolutePathInput>, D::Error>
+where
+ D: Deserializer<'de>,
+{
+ let raw = Option::<DiagnosticFileTargetsRaw>::deserialize(deserializer)?;
+ let parsed = match raw {
+ None => Vec::new(),
+ Some(DiagnosticFileTargetsRaw::Single(path)) => vec![AbsolutePathInput(path)],
+ Some(DiagnosticFileTargetsRaw::Many(paths)) => {
+ paths.into_iter().map(AbsolutePathInput).collect::<Vec<_>>()
+ }
+ };
+ Ok(parsed)
+}
+
+pub(super) fn deserialize_optional_human_unsigned_u64<'de, D>(
+ deserializer: D,
+) -> Result<Option<u64>, D::Error>
+where
+ D: Deserializer<'de>,
+{
+ let raw = Option::<OneIndexedRawInput>::deserialize(deserializer)?;
+ let parsed = raw.map(|value| match value {
+ OneIndexedRawInput::Unsigned(number) => Some(number),
+ OneIndexedRawInput::Float(number) => {
+ if number.is_finite() && number >= 0.0 && number.fract() == 0.0 {
+ let max = u64::MAX as f64;
+ if number <= max {
+ Some(number as u64)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+ OneIndexedRawInput::Text(text) => parse_human_unsigned_u64(text.as_str()),
+ });
+ if parsed.is_some() && parsed.flatten().is_none() {
+ return Err(serde::de::Error::custom(
+ "expected non-negative integer limit (number, integer-like float, or numeric string)",
+ ));
+ }
+ Ok(parsed.flatten())
+}
+
+pub(super) fn validate_requested_file_paths(file_paths: &[SourceFilePath]) -> Result<(), McpError> {
+ for file_path in file_paths {
+ let raw_path = file_path.as_path();
+ if !raw_path.exists() {
+ return Err(porcelain_invalid_params(
+ format!("requested file path does not exist: {}", raw_path.display()),
+ PorcelainErrorKind::PathNotFound,
+ Some("verify the requested file path"),
+ ));
+ }
+ if !raw_path.is_file() {
+ return Err(porcelain_invalid_params(
+ format!("requested path is not a file: {}", raw_path.display()),
+ PorcelainErrorKind::PathNotFile,
+ Some("diagnostics require source file paths"),
+ ));
+ }
+ }
+ Ok(())
+}
+
+pub(super) fn normalize_advanced_lsp_params(raw: Value) -> Result<Value, String> {
+ match raw {
+ Value::String(text) => {
+ let trimmed = text.trim();
+ if trimmed.is_empty() {
+ return Ok(Value::String(text));
+ }
+ let starts_like_json = trimmed
+ .as_bytes()
+ .first()
+ .is_some_and(|first| matches!(*first, b'{' | b'['));
+ if !starts_like_json {
+ return Ok(Value::String(text));
+ }
+ serde_json::from_str::<Value>(trimmed)
+ .map_err(|error| format!("invalid JSON payload string: {error}"))
+ }
+ other => Ok(other),
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/worker/mod.rs b/crates/adequate-rust-mcp/src/worker/mod.rs
new file mode 100644
index 0000000..ea3c2ab
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/mod.rs
@@ -0,0 +1,73 @@
+//! Worker MCP server exposing robust rust-analyzer tools.
+
+mod clippy;
+mod diagnostics;
+mod errors;
+mod input;
+mod porcelain;
+mod schema;
+mod server;
+mod telemetry;
+#[cfg(test)]
+mod tests;
+mod workspace;
+
+pub(crate) use server::run_worker;
+
+use clippy::collect_clippy_diagnostics;
+#[cfg(test)]
+use clippy::parse_clippy_json_stream;
+use diagnostics::{
+ DiagnosticsRenderConfig, diagnostics_call_tool_result, diagnostics_json_output_schema,
+ elapsed_millis_saturating,
+};
+use errors::{
+ PorcelainErrorKind, map_engine_error, porcelain_internal_error, porcelain_invalid_params,
+};
+use input::{
+ deserialize_advanced_lsp_params, deserialize_diagnostic_file_targets,
+ deserialize_optional_human_unsigned_u64,
+};
+use porcelain::{
+ CommonRenderConfig, EMPTY_RESULT_MAX_RETRIES, EMPTY_RESULT_RETRY_DELAY_MS,
+ call_tool_result_with_render, hover_output_from_payload, json_output_schema,
+ locations_output_from_sources, render_definition_porcelain, render_health_porcelain,
+ render_hover_porcelain, render_references_porcelain, render_telemetry_porcelain,
+ should_retry_empty_symbol_result,
+};
+use schema::{
+ AbsolutePathInput, AdvancedLspMethod, AdvancedLspRequestInput, AdvancedLspResponseOutput,
+ CargoJsonMessageWire, CommonRenderInput, CompactDiagnosticOutput, DiagnosticFileTargetsRaw,
+ DiagnosticLevelOutput, DiagnosticOutput, DiagnosticsCountsOutput, DiagnosticsInput,
+ DiagnosticsJsonOutput, DiagnosticsModeInput, DiagnosticsModeOutput, DiagnosticsPathStyleInput,
+ DiagnosticsRenderInput, FaultOutput, FixEverythingInput, FixEverythingOutput, FixStepOutput,
+ FlattenedDiagnostics, HealthOutput, HealthStateOutput, HoverOutput, LocationOutput,
+ LocationsOutput, MethodTelemetryOutput, OneIndexedInput, OneIndexedRawInput, PathStyleInput,
+ RangeOutput, RenameInput, RenameOutput, RustcSpanWire, SnapshotRenderInput,
+ SymbolPositionInput, SymbolQueryInput, TelemetryOutput, TelemetryTotalsOutput,
+};
+use telemetry::ToolTelemetryState;
+#[cfg(test)]
+use workspace::{
+ read_workspace_tool_command, read_workspace_tool_metadata, resolve_workspace_fix_command_specs,
+};
+use workspace::{
+ resolve_clippy_command_spec, resolve_workspace_root_path, run_workspace_command,
+ run_workspace_fix_everything,
+};
+
+use ra_mcp_domain::{
+ fault::Fault,
+ lifecycle::LifecycleSnapshot,
+ types::{
+ SourceFilePath, SourceLocation, SourcePoint, SourcePosition, SourceRange, WorkspaceRoot,
+ },
+};
+use ra_mcp_engine::{
+ DiagnosticEntry, DiagnosticLevel, DiagnosticsReport, Engine, EngineConfig, EngineError,
+ HoverPayload, MethodTelemetrySnapshot, RenameReport, TelemetrySnapshot, TelemetryTotals,
+};
+use rmcp::{
+ ErrorData as McpError, Json, ServerHandler, ServiceExt,
+ model::{Content, ServerInfo},
+};
diff --git a/crates/adequate-rust-mcp/src/worker/porcelain.rs b/crates/adequate-rust-mcp/src/worker/porcelain.rs
new file mode 100644
index 0000000..9efbd15
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/porcelain.rs
@@ -0,0 +1,488 @@
+use super::{
+ CommonRenderInput, FaultOutput, HealthOutput, HoverOutput, HoverPayload, LocationOutput,
+ LocationsOutput, McpError, PathStyleInput, PorcelainErrorKind, RangeOutput, SourceFilePath,
+ SourceLocation, SourcePoint, SourceRange, TelemetryOutput, porcelain_internal_error,
+ resolve_workspace_root_path,
+};
+use libmcp::{RenderMode, collapse_inline_whitespace};
+use rmcp::model::{CallToolResult, Content};
+use schemars::JsonSchema;
+use serde::Serialize;
+use std::{
+ fs,
+ path::{Path, PathBuf},
+ sync::Arc,
+};
+
+const PORCELAIN_LOCATION_LIMIT: usize = 8;
+const PORCELAIN_SNIPPET_CHAR_LIMIT: usize = 120;
+pub(super) const EMPTY_RESULT_WARMUP_WINDOW_MS: u64 = 5_000;
+pub(super) const EMPTY_RESULT_RETRY_DELAY_MS: u64 = 500;
+pub(super) const EMPTY_RESULT_MAX_RETRIES: u8 = 4;
+pub(super) const UNCERTAIN_EMPTY_NOTE: &str =
+ "result may be incomplete during indexing; retry in a few seconds";
+
+pub(super) type CommonRenderConfig = libmcp::RenderConfig;
+
+pub(super) fn json_output_schema<T>() -> Arc<rmcp::model::JsonObject>
+where
+ T: JsonSchema + 'static,
+{
+ rmcp::handler::server::tool::schema_for_output::<T>().unwrap_or_else(|error| {
+ tracing::error!(
+ ?error,
+ "failed to build JSON output schema; falling back to empty schema"
+ );
+ Arc::default()
+ })
+}
+
+pub(super) fn call_tool_result_with_render<T>(
+ value: T,
+ render: CommonRenderInput,
+ porcelain: String,
+) -> Result<CallToolResult, McpError>
+where
+ T: Serialize,
+{
+ match render {
+ RenderMode::Porcelain => Ok(CallToolResult::success(vec![Content::text(porcelain)])),
+ RenderMode::Json => {
+ let value = serde_json::to_value(value).map_err(|error| {
+ porcelain_internal_error(
+ format!("failed to serialize JSON output: {error}"),
+ PorcelainErrorKind::InternalFailure,
+ Some("retry once; if it persists, inspect worker logs"),
+ false,
+ )
+ })?;
+ Ok(CallToolResult::structured(value))
+ }
+ }
+}
+
+pub(super) fn should_retry_empty_symbol_result(uptime_ms: u64) -> bool {
+ uptime_ms < EMPTY_RESULT_WARMUP_WINDOW_MS
+}
+
+pub(super) fn uncertain_empty_note(uncertain_empty: bool) -> Option<String> {
+ uncertain_empty.then(|| UNCERTAIN_EMPTY_NOTE.to_owned())
+}
+
+pub(super) fn locations_output_from_sources(
+ locations: Vec<SourceLocation>,
+ render_config: CommonRenderConfig,
+ uncertain_empty: bool,
+) -> LocationsOutput {
+ LocationsOutput {
+ locations: locations
+ .into_iter()
+ .map(|value| location_output(value.file_path(), value.point(), render_config))
+ .collect(),
+ uncertain_empty,
+ note: uncertain_empty_note(uncertain_empty),
+ }
+}
+
+pub(super) fn hover_output_from_payload(
+ payload: HoverPayload,
+ render_config: CommonRenderConfig,
+ uncertain_empty: bool,
+) -> HoverOutput {
+ HoverOutput {
+ rendered: payload.rendered,
+ range: payload
+ .range
+ .map(|value| range_output(value, render_config)),
+ uncertain_empty,
+ note: uncertain_empty_note(uncertain_empty),
+ }
+}
+
+pub(super) fn render_hover_porcelain(
+ payload: &HoverPayload,
+ render_config: CommonRenderConfig,
+ uncertain_empty: bool,
+) -> String {
+ let summary = payload
+ .rendered
+ .as_deref()
+ .and_then(hover_summary_line)
+ .unwrap_or_else(|| "no hover information".to_owned());
+ let mut lines = vec![summary];
+ if let Some(range) = payload.range.as_ref() {
+ lines.push(render_range_brief(range, render_config));
+ }
+ if uncertain_empty {
+ lines.push(format!("note: {UNCERTAIN_EMPTY_NOTE}"));
+ }
+ lines.join("\n")
+}
+
+pub(super) fn render_definition_porcelain(
+ locations: &[SourceLocation],
+ render_config: CommonRenderConfig,
+ uncertain_empty: bool,
+) -> String {
+ render_location_list_porcelain("definition", locations, render_config, uncertain_empty)
+}
+
+pub(super) fn render_references_porcelain(
+ locations: &[SourceLocation],
+ render_config: CommonRenderConfig,
+ uncertain_empty: bool,
+) -> String {
+ render_location_list_porcelain("reference", locations, render_config, uncertain_empty)
+}
+
+pub(super) fn render_health_porcelain(output: &HealthOutput) -> String {
+ let HealthOutput {
+ state,
+ generation,
+ last_fault,
+ } = output;
+ let mut line = format!("{} gen={generation}", render_health_state(state.clone()));
+ if let Some(last_fault) = last_fault.as_ref() {
+ line.push_str(format!(" last_fault={}", render_fault_brief(last_fault)).as_str());
+ }
+ line
+}
+
+pub(super) fn render_telemetry_porcelain(output: &TelemetryOutput) -> String {
+ let TelemetryOutput {
+ uptime_ms,
+ state,
+ generation,
+ consecutive_failures,
+ restart_count,
+ totals,
+ methods,
+ last_fault,
+ } = output;
+ let mut lines = vec![
+ format!(
+ "{} gen={} uptime={} restarts={} failures={}",
+ render_health_state(state.clone()),
+ generation,
+ render_duration_ms(*uptime_ms),
+ restart_count,
+ consecutive_failures
+ ),
+ format!(
+ "totals requests={} ok={} response_errors={} transport_faults={} retries={}",
+ totals.request_count,
+ totals.success_count,
+ totals.response_error_count,
+ totals.transport_fault_count,
+ totals.retry_count
+ ),
+ ];
+ let mut slowest = methods.clone();
+ slowest.sort_by(|left, right| {
+ right
+ .avg_latency_ms
+ .cmp(&left.avg_latency_ms)
+ .then_with(|| right.request_count.cmp(&left.request_count))
+ .then_with(|| left.method.cmp(&right.method))
+ });
+ let visible = slowest
+ .into_iter()
+ .filter(|method| method.request_count > 0)
+ .take(3)
+ .collect::<Vec<_>>();
+ if !visible.is_empty() {
+ lines.push("slowest".to_owned());
+ lines.extend(visible.into_iter().map(|method| {
+ format!(
+ "- {} avg={} max={} n={}",
+ method.method,
+ render_duration_ms(method.avg_latency_ms),
+ render_duration_ms(method.max_latency_ms),
+ method.request_count
+ )
+ }));
+ }
+ if let Some(last_fault) = last_fault.as_ref() {
+ lines.push(format!("last_fault {}", render_fault_brief(last_fault)));
+ }
+ lines.join("\n")
+}
+
+pub(super) fn location_output(
+ file_path: &SourceFilePath,
+ point: SourcePoint,
+ render_config: CommonRenderConfig,
+) -> LocationOutput {
+ LocationOutput {
+ file_path: render_source_path(file_path, render_config.path_style),
+ line: point.line().get(),
+ column: point.column().get(),
+ }
+}
+
+pub(super) fn range_output(value: SourceRange, render_config: CommonRenderConfig) -> RangeOutput {
+ RangeOutput {
+ file_path: render_source_path(value.file_path(), render_config.path_style),
+ start: location_output(value.file_path(), value.start(), render_config),
+ end: location_output(value.file_path(), value.end(), render_config),
+ }
+}
+
+fn render_location_list_porcelain(
+ noun: &str,
+ locations: &[SourceLocation],
+ render_config: CommonRenderConfig,
+ uncertain_empty: bool,
+) -> String {
+ let count = locations.len();
+ let header = format!("{count} {}{}", noun, if count == 1 { "" } else { "s" });
+ if count == 0 {
+ if uncertain_empty {
+ return format!("{header}\nnote: {UNCERTAIN_EMPTY_NOTE}");
+ }
+ return header;
+ }
+
+ let workspace_root = common_workspace_root(render_config.path_style);
+ let mut sorted = locations.to_vec();
+ sorted.sort_by(|left, right| {
+ location_sort_key(left, workspace_root.as_deref())
+ .cmp(&location_sort_key(right, workspace_root.as_deref()))
+ });
+
+ let visible = sorted
+ .into_iter()
+ .take(PORCELAIN_LOCATION_LIMIT)
+ .collect::<Vec<_>>();
+ let omitted = count.saturating_sub(visible.len());
+
+ let single_file = visible
+ .first()
+ .map(|first| {
+ visible
+ .iter()
+ .all(|location| location.file_path() == first.file_path())
+ })
+ .unwrap_or(false);
+
+ let mut lines = vec![header, String::new()];
+ if single_file {
+ lines.extend(
+ visible.into_iter().map(|location| {
+ format!("- {}", render_location_entry(location, render_config, true))
+ }),
+ );
+ } else {
+ let mut current_file: Option<String> = None;
+ for location in visible {
+ let file = render_source_path(location.file_path(), render_config.path_style);
+ if current_file.as_deref() != Some(file.as_str()) {
+ if current_file.is_some() {
+ lines.push(String::new());
+ }
+ lines.push(file.clone());
+ current_file = Some(file);
+ }
+ lines.push(format!(
+ "- {}",
+ render_location_entry(location, render_config, false)
+ ));
+ }
+ }
+ if omitted > 0 {
+ lines.push(String::new());
+ lines.push(format!("... +{omitted} more"));
+ }
+ lines.join("\n")
+}
+
+fn render_location_entry(
+ location: SourceLocation,
+ render_config: CommonRenderConfig,
+ omit_path: bool,
+) -> String {
+ let point = location.point();
+ let prefix = if omit_path {
+ format!("{}:{}", point.line().get(), point.column().get())
+ } else {
+ format!(
+ "{}:{}:{}",
+ render_source_path(location.file_path(), render_config.path_style),
+ point.line().get(),
+ point.column().get()
+ )
+ };
+ let snippet = source_line_snippet(location.file_path(), point.line().get())
+ .map(truncate_compact)
+ .unwrap_or_else(|| render_source_path(location.file_path(), render_config.path_style));
+ format!("{prefix} {snippet}")
+}
+
+fn render_range_brief(range: &SourceRange, render_config: CommonRenderConfig) -> String {
+ format!(
+ "{}:{}:{}-{}:{}",
+ render_source_path(range.file_path(), render_config.path_style),
+ range.start().line().get(),
+ range.start().column().get(),
+ range.end().line().get(),
+ range.end().column().get()
+ )
+}
+
+fn render_health_state(state: super::HealthStateOutput) -> &'static str {
+ match state {
+ super::HealthStateOutput::Cold => "cold",
+ super::HealthStateOutput::Starting => "starting",
+ super::HealthStateOutput::Ready => "ready",
+ super::HealthStateOutput::Recovering => "recovering",
+ }
+}
+
+fn render_fault_brief(fault: &FaultOutput) -> String {
+ format!(
+ "{}:{} {}",
+ fault.class.to_ascii_lowercase(),
+ fault.code.to_ascii_lowercase(),
+ squash_whitespace(fault.detail.as_str())
+ )
+}
+
+fn render_duration_ms(duration_ms: u64) -> String {
+ if duration_ms < 1_000 {
+ return format!("{duration_ms}ms");
+ }
+ let seconds = duration_ms / 1_000;
+ let millis = duration_ms % 1_000;
+ if seconds < 60 {
+ return format!("{seconds}.{millis:03}s");
+ }
+ let minutes = seconds / 60;
+ let remainder_seconds = seconds % 60;
+ format!("{minutes}m{remainder_seconds:02}s")
+}
+
+fn hover_summary_line(rendered: &str) -> Option<String> {
+ let lines = rendered
+ .lines()
+ .map(str::trim)
+ .filter(|line| !line.is_empty())
+ .filter(|line| !matches!(*line, "```" | "```rust" | "---"))
+ .collect::<Vec<_>>();
+ if lines.is_empty() {
+ return None;
+ }
+ let best = lines
+ .iter()
+ .copied()
+ .find(|line| looks_like_signature(line))
+ .unwrap_or(lines[0]);
+ Some(truncate_compact(best.to_owned()))
+}
+
+fn looks_like_signature(line: &str) -> bool {
+ [
+ "fn ", "struct ", "enum ", "trait ", "type ", "const ", "let ", "impl ", "mod ", "pub ",
+ ]
+ .iter()
+ .any(|token| line.contains(token))
+ || line.contains(": ")
+}
+
+fn source_line_snippet(file_path: &SourceFilePath, line: u64) -> Option<String> {
+ let line_index = usize::try_from(line.saturating_sub(1)).ok()?;
+ let text = fs::read_to_string(file_path.as_path()).ok()?;
+ let raw = text.lines().nth(line_index)?;
+ let squashed = squash_whitespace(raw);
+ if squashed.is_empty() {
+ return None;
+ }
+ Some(squashed)
+}
+
+fn truncate_compact(raw: String) -> String {
+ let char_count = raw.chars().count();
+ if char_count <= PORCELAIN_SNIPPET_CHAR_LIMIT {
+ return raw;
+ }
+ raw.chars()
+ .take(PORCELAIN_SNIPPET_CHAR_LIMIT.saturating_sub(3))
+ .chain("...".chars())
+ .collect()
+}
+
+fn squash_whitespace(raw: &str) -> String {
+ collapse_inline_whitespace(raw)
+}
+
+fn common_workspace_root(path_style: PathStyleInput) -> Option<PathBuf> {
+ matches!(path_style, PathStyleInput::Relative)
+ .then(resolve_workspace_root_path)
+ .and_then(Result::ok)
+}
+
+fn location_sort_key(
+ location: &SourceLocation,
+ workspace_root: Option<&Path>,
+) -> (u8, String, u64, u64) {
+ let file_path = location.file_path();
+ let rendered =
+ render_source_path_with_root(file_path, PathStyleInput::Relative, workspace_root);
+ (
+ path_priority(file_path.as_path(), workspace_root),
+ rendered,
+ location.point().line().get(),
+ location.point().column().get(),
+ )
+}
+
+fn render_source_path(file_path: &SourceFilePath, style: PathStyleInput) -> String {
+ let workspace_root = common_workspace_root(style);
+ render_source_path_with_root(file_path, style, workspace_root.as_deref())
+}
+
+fn render_source_path_with_root(
+ file_path: &SourceFilePath,
+ style: PathStyleInput,
+ workspace_root: Option<&Path>,
+) -> String {
+ let absolute = file_path.as_path();
+ match style {
+ PathStyleInput::Absolute => absolute.display().to_string(),
+ PathStyleInput::Relative => workspace_root
+ .and_then(|root| absolute.strip_prefix(root).ok())
+ .map(|relative| relative.display().to_string())
+ .filter(|relative| !relative.is_empty())
+ .unwrap_or_else(|| shorten_external_path(absolute)),
+ }
+}
+
+fn path_priority(path: &Path, workspace_root: Option<&Path>) -> u8 {
+ if workspace_root
+ .and_then(|root| path.strip_prefix(root).ok())
+ .is_some()
+ {
+ return 0;
+ }
+ let text = path.display().to_string();
+ if text.contains("/.cargo/registry/src/") {
+ return 1;
+ }
+ if text.contains("/rust/library/") || text.contains("/rustc/") {
+ return 2;
+ }
+ 3
+}
+
+fn shorten_external_path(path: &Path) -> String {
+ let text = path.display().to_string();
+ if let Some((_, suffix)) = text.split_once("/.cargo/registry/src/") {
+ return format!("[cargo]/{suffix}");
+ }
+ if let Some((_, suffix)) = text.split_once("/rust/library/") {
+ return format!("[rust]/library/{suffix}");
+ }
+ if let Some((_, suffix)) = text.split_once("/rustc/") {
+ return format!("[rustc]/{suffix}");
+ }
+ text
+}
diff --git a/crates/adequate-rust-mcp/src/worker/schema.rs b/crates/adequate-rust-mcp/src/worker/schema.rs
new file mode 100644
index 0000000..7ea8067
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/schema.rs
@@ -0,0 +1,496 @@
+use super::{
+ deserialize_advanced_lsp_params, deserialize_diagnostic_file_targets,
+ deserialize_optional_human_unsigned_u64,
+};
+use libmcp::{PathStyle, RenderMode};
+use schemars::JsonSchema;
+use serde::{Deserialize, Serialize};
+use serde_json::Value;
+
+#[derive(Debug, Clone, Deserialize, JsonSchema)]
+#[serde(transparent)]
+pub(super) struct AbsolutePathInput(pub(super) String);
+
+#[derive(Debug, Clone, JsonSchema)]
+#[serde(transparent)]
+pub(super) struct OneIndexedInput(pub(super) u64);
+
+#[derive(Debug, Clone, Deserialize, JsonSchema)]
+pub(super) struct SymbolPositionInput {
+ #[schemars(
+ description = "Absolute path, file URI, or project-relative path. Also accepts aliases: filePath, path, uri."
+ )]
+ #[serde(alias = "filePath", alias = "path", alias = "uri")]
+ pub(super) file_path: AbsolutePathInput,
+ #[schemars(description = "One-indexed line. Also accepts aliases: lineNumber, line_number.")]
+ #[serde(alias = "lineNumber", alias = "line_number")]
+ pub(super) line: OneIndexedInput,
+ #[schemars(description = "One-indexed column. Also accepts aliases: character, char, col.")]
+ #[serde(alias = "character", alias = "char", alias = "col")]
+ pub(super) column: OneIndexedInput,
+}
+
+#[derive(Debug, Clone, Deserialize, JsonSchema)]
+#[serde(deny_unknown_fields)]
+pub(super) struct SymbolQueryInput {
+ #[serde(flatten)]
+ pub(super) position: SymbolPositionInput,
+ #[schemars(
+ description = "Output rendering. Values: porcelain or json. Defaults to porcelain. Also accepts aliases: output, output_format, outputFormat, render_mode, renderMode.",
+ extend("examples" = ["porcelain", "json"])
+ )]
+ #[serde(
+ default,
+ alias = "output",
+ alias = "output_format",
+ alias = "outputFormat",
+ alias = "render_mode",
+ alias = "renderMode"
+ )]
+ pub(super) render: Option<CommonRenderInput>,
+ #[schemars(
+ description = "Path rendering style. Values: absolute or relative. Defaults to relative in porcelain mode and absolute in json mode. Also accepts aliases: pathStyle, path_style and value alias rel.",
+ extend("examples" = ["absolute", "relative"])
+ )]
+ #[serde(default, alias = "pathStyle", alias = "path_style")]
+ pub(super) path_style: Option<PathStyleInput>,
+}
+
+#[derive(Debug, Clone, Deserialize, JsonSchema)]
+#[serde(deny_unknown_fields)]
+pub(super) struct DiagnosticsInput {
+ #[schemars(
+ description = "One or many target files. Also accepts aliases: file_path, filePath, path, uri, filePaths, files, paths, uris."
+ )]
+ #[serde(
+ default,
+ alias = "file_path",
+ alias = "filePath",
+ alias = "path",
+ alias = "uri",
+ alias = "file_paths",
+ alias = "filePaths",
+ alias = "paths",
+ alias = "uris",
+ alias = "files",
+ deserialize_with = "deserialize_diagnostic_file_targets"
+ )]
+ pub(super) file_paths: Vec<AbsolutePathInput>,
+ #[schemars(
+ description = "Detail mode. Values: compact, full, summary. Accepts full aliases: raw, verbose, counts.",
+ extend("examples" = ["compact", "full", "summary"])
+ )]
+ #[serde(default)]
+ pub(super) mode: Option<DiagnosticsModeInput>,
+ #[schemars(
+ description = "Output rendering. Values: porcelain or json. Defaults to porcelain. Also accepts aliases: output, output_format, outputFormat, render_mode, renderMode.",
+ extend("examples" = ["porcelain", "json"])
+ )]
+ #[serde(
+ default,
+ alias = "output",
+ alias = "output_format",
+ alias = "outputFormat",
+ alias = "render_mode",
+ alias = "renderMode"
+ )]
+ pub(super) render: Option<DiagnosticsRenderInput>,
+ #[schemars(
+ description = "Optional compact item limit. Also accepts aliases: limit, maxItems."
+ )]
+ #[serde(
+ default,
+ alias = "limit",
+ alias = "maxItems",
+ deserialize_with = "deserialize_optional_human_unsigned_u64"
+ )]
+ pub(super) max_items: Option<u64>,
+ #[schemars(
+ description = "Optional per-message character limit in compact mode. Also accepts alias: maxMessageChars."
+ )]
+ #[serde(
+ default,
+ alias = "maxMessageChars",
+ alias = "max_message_chars",
+ deserialize_with = "deserialize_optional_human_unsigned_u64"
+ )]
+ pub(super) max_message_chars: Option<u64>,
+ #[schemars(
+ description = "Path rendering style. Values: absolute or relative. Also accepts aliases: pathStyle, path_style and value alias rel.",
+ extend("examples" = ["absolute", "relative"])
+ )]
+ #[serde(default, alias = "pathStyle", alias = "path_style")]
+ pub(super) path_style: DiagnosticsPathStyleInput,
+}
+
+#[derive(Debug, Clone, Copy, Deserialize, JsonSchema, Default)]
+#[serde(rename_all = "snake_case")]
+pub(super) enum DiagnosticsModeInput {
+ #[default]
+ Compact,
+ #[serde(alias = "raw", alias = "verbose")]
+ Full,
+ #[serde(alias = "counts")]
+ Summary,
+}
+
+pub(super) type DiagnosticsRenderInput = RenderMode;
+pub(super) type CommonRenderInput = RenderMode;
+
+#[derive(Debug, Clone, Copy, Deserialize, JsonSchema, Default)]
+#[serde(rename_all = "snake_case")]
+pub(super) enum DiagnosticsPathStyleInput {
+ #[default]
+ Absolute,
+ #[serde(alias = "rel")]
+ Relative,
+}
+
+pub(super) type PathStyleInput = PathStyle;
+
+#[derive(Debug, Clone, Deserialize, JsonSchema)]
+pub(super) struct RenameInput {
+ #[schemars(
+ description = "Absolute path, file URI, or project-relative path. Also accepts aliases: filePath, path, uri."
+ )]
+ #[serde(alias = "filePath", alias = "path", alias = "uri")]
+ pub(super) file_path: AbsolutePathInput,
+ #[schemars(description = "One-indexed line. Also accepts aliases: lineNumber, line_number.")]
+ #[serde(alias = "lineNumber", alias = "line_number")]
+ pub(super) line: OneIndexedInput,
+ #[schemars(description = "One-indexed column. Also accepts aliases: character, char, col.")]
+ #[serde(alias = "character", alias = "char", alias = "col")]
+ pub(super) column: OneIndexedInput,
+ #[schemars(
+ description = "Replacement symbol name. Also accepts aliases: newName, name, rename_to, renameTo."
+ )]
+ #[serde(
+ alias = "newName",
+ alias = "name",
+ alias = "rename_to",
+ alias = "renameTo"
+ )]
+ pub(super) new_name: String,
+}
+
+#[derive(Debug, Clone, Deserialize, JsonSchema)]
+pub(super) struct AdvancedLspRequestInput {
+ #[schemars(
+ description = "Method accepts snake_case, camelCase, kebab-case, or full LSP method paths for supported operations."
+ )]
+ #[serde(alias = "lsp_method", alias = "lspMethod")]
+ pub(super) method: AdvancedLspMethod,
+ #[schemars(description = "LSP params payload. Also accepts aliases: arguments, payload.")]
+ #[serde(
+ alias = "arguments",
+ alias = "payload",
+ deserialize_with = "deserialize_advanced_lsp_params"
+ )]
+ pub(super) params: Value,
+}
+
+#[derive(Debug, Clone, Deserialize, JsonSchema, Default)]
+pub(super) struct FixEverythingInput {}
+
+#[derive(Debug, Clone, Deserialize, JsonSchema, Default)]
+#[serde(deny_unknown_fields)]
+pub(super) struct SnapshotRenderInput {
+ #[schemars(
+ description = "Output rendering. Values: porcelain or json. Defaults to porcelain. Also accepts aliases: output, output_format, outputFormat, render_mode, renderMode.",
+ extend("examples" = ["porcelain", "json"])
+ )]
+ #[serde(
+ default,
+ alias = "output",
+ alias = "output_format",
+ alias = "outputFormat",
+ alias = "render_mode",
+ alias = "renderMode"
+ )]
+ pub(super) render: Option<CommonRenderInput>,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, JsonSchema)]
+#[serde(rename_all = "snake_case")]
+#[schemars(
+ description = "Canonical snake_case method names. Input also accepts camelCase, kebab-case, and full LSP method paths."
+)]
+pub(super) enum AdvancedLspMethod {
+ Hover,
+ Definition,
+ References,
+ Declaration,
+ TypeDefinition,
+ Implementation,
+ Completion,
+ CompletionResolve,
+ SignatureHelp,
+ DocumentSymbol,
+ WorkspaceSymbol,
+ WorkspaceSymbolResolve,
+ PrepareRename,
+ Rename,
+ CodeAction,
+ CodeActionResolve,
+ CodeLens,
+ CodeLensResolve,
+ ExecuteCommand,
+ Formatting,
+ RangeFormatting,
+ OnTypeFormatting,
+ DocumentHighlight,
+ DocumentLink,
+ DocumentLinkResolve,
+ DocumentColor,
+ ColorPresentation,
+ LinkedEditingRange,
+ InlayHint,
+ InlayHintResolve,
+ FoldingRange,
+ SelectionRange,
+ DocumentDiagnostic,
+ WorkspaceDiagnostic,
+ SemanticTokensFull,
+ SemanticTokensFullDelta,
+ SemanticTokensRange,
+ Moniker,
+ InlineValue,
+ TypeHierarchyPrepare,
+ TypeHierarchySupertypes,
+ TypeHierarchySubtypes,
+ CallHierarchyPrepare,
+ CallHierarchyIncomingCalls,
+ CallHierarchyOutgoingCalls,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct LocationOutput {
+ pub(super) file_path: String,
+ pub(super) line: u64,
+ pub(super) column: u64,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct LocationsOutput {
+ pub(super) locations: Vec<LocationOutput>,
+ pub(super) uncertain_empty: bool,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub(super) note: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct RangeOutput {
+ pub(super) file_path: String,
+ pub(super) start: LocationOutput,
+ pub(super) end: LocationOutput,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct HoverOutput {
+ pub(super) rendered: Option<String>,
+ pub(super) range: Option<RangeOutput>,
+ pub(super) uncertain_empty: bool,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub(super) note: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct DiagnosticsJsonOutput {
+ pub(super) mode: DiagnosticsModeOutput,
+ pub(super) counts: DiagnosticsCountsOutput,
+ pub(super) truncated: bool,
+ pub(super) overflow_count: u64,
+ #[serde(default, skip_serializing_if = "Vec::is_empty")]
+ pub(super) items: Vec<CompactDiagnosticOutput>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub(super) diagnostics: Option<Vec<DiagnosticOutput>>,
+}
+
+#[derive(Debug, Clone, Copy, Serialize, JsonSchema)]
+#[serde(rename_all = "snake_case")]
+pub(super) enum DiagnosticsModeOutput {
+ Compact,
+ Full,
+ Summary,
+}
+
+#[derive(Debug, Clone, Copy, Serialize, JsonSchema)]
+pub(super) struct DiagnosticsCountsOutput {
+ pub(super) error_count: u64,
+ pub(super) warning_count: u64,
+ pub(super) information_count: u64,
+ pub(super) hint_count: u64,
+ pub(super) total_count: u64,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct CompactDiagnosticOutput {
+ pub(super) severity: DiagnosticLevelOutput,
+ pub(super) file_path: String,
+ pub(super) start_line: u64,
+ pub(super) start_column: u64,
+ pub(super) end_line: u64,
+ pub(super) end_column: u64,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub(super) code: Option<String>,
+ pub(super) message: String,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct DiagnosticOutput {
+ pub(super) range: RangeOutput,
+ pub(super) level: DiagnosticLevelOutput,
+ pub(super) code: Option<String>,
+ pub(super) message: String,
+}
+
+#[derive(Debug, Clone)]
+pub(super) struct FlattenedDiagnostics {
+ pub(super) counts: DiagnosticsCountsOutput,
+ pub(super) items: Vec<CompactDiagnosticOutput>,
+ pub(super) overflow_count: usize,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, JsonSchema)]
+#[serde(rename_all = "snake_case")]
+pub(super) enum DiagnosticLevelOutput {
+ Error,
+ Warning,
+ Information,
+ Hint,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct RenameOutput {
+ pub(super) files_touched: u64,
+ pub(super) edits_applied: u64,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct AdvancedLspResponseOutput {
+ pub(super) result: Value,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct FixEverythingOutput {
+ pub(super) success: bool,
+ pub(super) workspace_root: String,
+ pub(super) steps: Vec<FixStepOutput>,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct FixStepOutput {
+ pub(super) step_name: String,
+ pub(super) command: Vec<String>,
+ pub(super) success: bool,
+ pub(super) exit_code: Option<i32>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub(super) standard_output_excerpt: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub(super) standard_error_excerpt: Option<String>,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct HealthOutput {
+ pub(super) state: HealthStateOutput,
+ pub(super) generation: u64,
+ pub(super) last_fault: Option<FaultOutput>,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+#[serde(rename_all = "snake_case")]
+pub(super) enum HealthStateOutput {
+ Cold,
+ Starting,
+ Ready,
+ Recovering,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct FaultOutput {
+ pub(super) class: String,
+ pub(super) code: String,
+ pub(super) detail: String,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct TelemetryOutput {
+ pub(super) uptime_ms: u64,
+ pub(super) state: HealthStateOutput,
+ pub(super) generation: u64,
+ pub(super) consecutive_failures: u32,
+ pub(super) restart_count: u64,
+ pub(super) totals: TelemetryTotalsOutput,
+ pub(super) methods: Vec<MethodTelemetryOutput>,
+ pub(super) last_fault: Option<FaultOutput>,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct TelemetryTotalsOutput {
+ pub(super) request_count: u64,
+ pub(super) success_count: u64,
+ pub(super) response_error_count: u64,
+ pub(super) transport_fault_count: u64,
+ pub(super) retry_count: u64,
+}
+
+#[derive(Debug, Clone, Serialize, JsonSchema)]
+pub(super) struct MethodTelemetryOutput {
+ pub(super) method: String,
+ pub(super) request_count: u64,
+ pub(super) success_count: u64,
+ pub(super) response_error_count: u64,
+ pub(super) transport_fault_count: u64,
+ pub(super) retry_count: u64,
+ pub(super) last_latency_ms: Option<u64>,
+ pub(super) max_latency_ms: u64,
+ pub(super) avg_latency_ms: u64,
+ pub(super) last_error: Option<String>,
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(untagged)]
+pub(super) enum OneIndexedRawInput {
+ Unsigned(u64),
+ Float(f64),
+ Text(String),
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(untagged)]
+pub(super) enum DiagnosticFileTargetsRaw {
+ Single(String),
+ Many(Vec<String>),
+}
+
+#[derive(Debug, Deserialize)]
+pub(super) struct CargoJsonMessageWire {
+ pub(super) reason: String,
+ #[serde(default)]
+ pub(super) message: Option<RustcDiagnosticWire>,
+}
+
+#[derive(Debug, Deserialize)]
+pub(super) struct RustcDiagnosticWire {
+ pub(super) message: String,
+ pub(super) level: String,
+ #[serde(default)]
+ pub(super) code: Option<RustcCodeWire>,
+ #[serde(default)]
+ pub(super) spans: Vec<RustcSpanWire>,
+}
+
+#[derive(Debug, Deserialize)]
+pub(super) struct RustcCodeWire {
+ pub(super) code: String,
+}
+
+#[derive(Debug, Deserialize)]
+pub(super) struct RustcSpanWire {
+ pub(super) file_name: String,
+ pub(super) line_start: u64,
+ pub(super) line_end: u64,
+ pub(super) column_start: u64,
+ pub(super) column_end: u64,
+ pub(super) is_primary: bool,
+}
diff --git a/crates/adequate-rust-mcp/src/worker/server.rs b/crates/adequate-rust-mcp/src/worker/server.rs
new file mode 100644
index 0000000..322f37a
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/server.rs
@@ -0,0 +1,744 @@
+use super::{
+ AdvancedLspRequestInput, AdvancedLspResponseOutput, DiagnosticsInput, EMPTY_RESULT_MAX_RETRIES,
+ EMPTY_RESULT_RETRY_DELAY_MS, Engine, EngineConfig, FixEverythingInput, FixEverythingOutput,
+ HealthOutput, HoverOutput, HoverPayload, Json, LocationsOutput, McpError,
+ MethodTelemetryOutput, PorcelainErrorKind, RenameInput, RenameOutput, ServerHandler,
+ ServerInfo, ServiceExt, SnapshotRenderInput, SymbolPositionInput, SymbolQueryInput,
+ TelemetryOutput, ToolTelemetryState, WorkspaceRoot, call_tool_result_with_render,
+ collect_clippy_diagnostics, diagnostics_call_tool_result, diagnostics_json_output_schema,
+ elapsed_millis_saturating, hover_output_from_payload, json_output_schema,
+ locations_output_from_sources, map_engine_error, porcelain_internal_error,
+ porcelain_invalid_params, render_definition_porcelain, render_health_porcelain,
+ render_hover_porcelain, render_references_porcelain, render_telemetry_porcelain,
+ run_workspace_fix_everything, should_retry_empty_symbol_result,
+};
+use ra_mcp_engine::BackoffPolicy;
+use rmcp::model::CallToolResult;
+use rmcp::{tool, tool_handler, tool_router, transport::stdio};
+use serde::Deserialize;
+use serde_json::Value;
+use std::{
+ collections::HashMap,
+ future::Future,
+ path::{Path, PathBuf},
+ pin::Pin,
+ process::Command,
+ str::FromStr,
+ sync::{Arc, Mutex as StdMutex},
+ time::{Duration, Instant},
+};
+use tokio::{sync::Mutex as AsyncMutex, time::sleep};
+use tracing::Level;
+
+type SymbolQueryFuture<'a, T> =
+ Pin<Box<dyn Future<Output = Result<T, ra_mcp_engine::EngineError>> + Send + 'a>>;
+
+#[derive(Clone)]
+struct EngineRegistry {
+ template_config: EngineConfig,
+ active_workspace_root: Arc<AsyncMutex<WorkspaceRoot>>,
+ engines: Arc<AsyncMutex<HashMap<WorkspaceRoot, Arc<Engine>>>>,
+}
+
+impl EngineRegistry {
+ fn new(config: EngineConfig) -> Self {
+ let workspace_root = config.workspace_root.clone();
+ let engine = Arc::new(Engine::new(config.clone()));
+ let mut engines = HashMap::new();
+ let previous = engines.insert(workspace_root.clone(), engine);
+ debug_assert!(previous.is_none());
+ Self {
+ template_config: config,
+ active_workspace_root: Arc::new(AsyncMutex::new(workspace_root)),
+ engines: Arc::new(AsyncMutex::new(engines)),
+ }
+ }
+
+ async fn active_engine(&self) -> Result<Arc<Engine>, McpError> {
+ let workspace_root = self.active_workspace_root.lock().await.clone();
+ self.engine_for_workspace_root(workspace_root).await
+ }
+
+ async fn engine_for_position(
+ &self,
+ position: &super::SourcePosition,
+ ) -> Result<Arc<Engine>, McpError> {
+ self.engine_for_file_path(position.file_path()).await
+ }
+
+ async fn engine_for_file_path(
+ &self,
+ file_path: &super::SourceFilePath,
+ ) -> Result<Arc<Engine>, McpError> {
+ let workspace_root = self.workspace_root_for_file(file_path).await?;
+ self.engine_for_workspace_root(workspace_root).await
+ }
+
+ async fn workspace_root_for_file(
+ &self,
+ file_path: &super::SourceFilePath,
+ ) -> Result<WorkspaceRoot, McpError> {
+ let cached = {
+ let engines = self.engines.lock().await;
+ best_cached_workspace_root(engines.keys(), file_path.as_path())
+ };
+ if let Some(root) = cached.as_ref()
+ && !needs_workspace_root_discovery(file_path.as_path(), root.as_path())
+ {
+ return Ok(root.clone());
+ }
+ Ok(discover_workspace_root(
+ file_path.as_path(),
+ &self.template_config.workspace_root,
+ ))
+ }
+
+ async fn engine_for_workspace_root(
+ &self,
+ workspace_root: WorkspaceRoot,
+ ) -> Result<Arc<Engine>, McpError> {
+ let engine = {
+ let mut engines = self.engines.lock().await;
+ if let Some(existing) = engines.get(&workspace_root) {
+ existing.clone()
+ } else {
+ let mut config = self.template_config.clone();
+ config.workspace_root = workspace_root.clone();
+ let created = Arc::new(Engine::new(config));
+ let previous = engines.insert(workspace_root.clone(), created.clone());
+ debug_assert!(previous.is_none());
+ created
+ }
+ };
+ *self.active_workspace_root.lock().await = workspace_root;
+ Ok(engine)
+ }
+}
+
+#[derive(Clone)]
+struct DiagnosticQuery {
+ engine: Arc<Engine>,
+ file_path: super::SourceFilePath,
+}
+
+#[derive(Clone)]
+struct AdequateRustMcpServer {
+ engines: EngineRegistry,
+ tool_telemetry: Arc<StdMutex<ToolTelemetryState>>,
+ tool_router: rmcp::handler::server::tool::ToolRouter<Self>,
+}
+
+#[tool_router]
+impl AdequateRustMcpServer {
+ fn new(config: EngineConfig) -> Self {
+ Self {
+ engines: EngineRegistry::new(config),
+ tool_telemetry: Arc::new(StdMutex::new(ToolTelemetryState::default())),
+ tool_router: Self::tool_router(),
+ }
+ }
+
+ fn with_tool_telemetry<R>(&self, mutate: impl FnOnce(&mut ToolTelemetryState) -> R) -> R {
+ let mut guard = match self.tool_telemetry.lock() {
+ Ok(state) => state,
+ Err(poisoned) => poisoned.into_inner(),
+ };
+ mutate(&mut guard)
+ }
+
+ async fn plan_diagnostic_queries(
+ &self,
+ file_paths: Vec<super::SourceFilePath>,
+ ) -> Result<Vec<DiagnosticQuery>, McpError> {
+ let mut queries = Vec::with_capacity(file_paths.len());
+ for file_path in file_paths {
+ let engine = self.engines.engine_for_file_path(&file_path).await?;
+ queries.push(DiagnosticQuery { engine, file_path });
+ }
+ Ok(queries)
+ }
+
+ fn record_clippy_telemetry_success(&self, elapsed: Duration) {
+ self.with_tool_telemetry(|state| {
+ state
+ .clippy_diagnostics
+ .record_success(elapsed_millis_saturating(elapsed));
+ });
+ }
+
+ fn record_clippy_telemetry_error(&self, elapsed: Duration, error: String) {
+ self.with_tool_telemetry(|state| {
+ state
+ .clippy_diagnostics
+ .record_error(elapsed_millis_saturating(elapsed), error);
+ });
+ }
+
+ fn clippy_telemetry_snapshot(&self) -> Option<MethodTelemetryOutput> {
+ self.with_tool_telemetry(|state| {
+ state
+ .clippy_diagnostics
+ .to_snapshot("tool/clippy_diagnostics")
+ })
+ }
+
+ fn record_fix_everything_telemetry_success(&self, elapsed: Duration) {
+ self.with_tool_telemetry(|state| {
+ state
+ .fix_everything
+ .record_success(elapsed_millis_saturating(elapsed));
+ });
+ }
+
+ fn record_fix_everything_telemetry_error(&self, elapsed: Duration, error: String) {
+ self.with_tool_telemetry(|state| {
+ state
+ .fix_everything
+ .record_error(elapsed_millis_saturating(elapsed), error);
+ });
+ }
+
+ fn fix_everything_telemetry_snapshot(&self) -> Option<MethodTelemetryOutput> {
+ self.with_tool_telemetry(|state| state.fix_everything.to_snapshot("tool/fix_everything"))
+ }
+
+ async fn collect_diagnostics_with_warmup_retry(
+ &self,
+ file_paths: Vec<super::SourceFilePath>,
+ ) -> Result<super::DiagnosticsReport, McpError> {
+ let queries = self.plan_diagnostic_queries(file_paths).await?;
+ let mut retries = 0_u8;
+ loop {
+ let mut aggregated = super::DiagnosticsReport {
+ diagnostics: Vec::new(),
+ };
+ let mut should_retry_all = !queries.is_empty();
+ for query in &queries {
+ let report = query
+ .engine
+ .diagnostics(query.file_path.clone())
+ .await
+ .map_err(map_engine_error)?;
+ let uptime_ms = query.engine.telemetry_snapshot().await.uptime_ms;
+ if !should_retry_unlinked_diagnostics(&report, uptime_ms) {
+ should_retry_all = false;
+ }
+ aggregated.diagnostics.extend(report.diagnostics);
+ }
+ if !should_retry_all || retries >= EMPTY_RESULT_MAX_RETRIES {
+ return Ok(aggregated);
+ }
+ retries = retries.saturating_add(1);
+ sleep(Duration::from_millis(EMPTY_RESULT_RETRY_DELAY_MS)).await;
+ }
+ }
+
+ async fn resolve_symbol_query<T>(
+ &self,
+ position: super::SourcePosition,
+ is_empty: impl Fn(&T) -> bool,
+ fetch: impl for<'a> Fn(&'a Engine, super::SourcePosition) -> SymbolQueryFuture<'a, T>,
+ ) -> Result<(T, bool), McpError> {
+ let engine = self.engines.engine_for_position(&position).await?;
+ let mut retries = 0_u8;
+ let mut retried_during_warmup = false;
+
+ loop {
+ let result = fetch(engine.as_ref(), position.clone())
+ .await
+ .map_err(map_engine_error)?;
+ if !is_empty(&result) {
+ return Ok((result, false));
+ }
+
+ let still_warming =
+ should_retry_empty_symbol_result(engine.telemetry_snapshot().await.uptime_ms);
+ if !still_warming || retries >= EMPTY_RESULT_MAX_RETRIES {
+ return Ok((result, retried_during_warmup || still_warming));
+ }
+
+ retried_during_warmup = true;
+ retries = retries.saturating_add(1);
+ sleep(Duration::from_millis(EMPTY_RESULT_RETRY_DELAY_MS)).await;
+ }
+ }
+
+ #[tool(
+ description = "Read hover information at a source position. Defaults to render=porcelain; use render=json for structured output.",
+ output_schema = json_output_schema::<HoverOutput>()
+ )]
+ async fn hover(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<SymbolQueryInput>,
+ ) -> Result<CallToolResult, McpError> {
+ let (position, render_config) = params.0.into_request()?;
+ let (payload, uncertain_empty) = self
+ .resolve_symbol_query(
+ position,
+ |hover: &HoverPayload| hover.rendered.is_none() && hover.range.is_none(),
+ |engine, position| Box::pin(engine.hover(position)),
+ )
+ .await?;
+ let output = hover_output_from_payload(payload.clone(), render_config, uncertain_empty);
+ let porcelain = render_hover_porcelain(&payload, render_config, uncertain_empty);
+ call_tool_result_with_render(output, render_config.render, porcelain)
+ }
+
+ #[tool(
+ description = "Resolve symbol definition locations at a source position. Defaults to render=porcelain; use render=json for structured output.",
+ output_schema = json_output_schema::<LocationsOutput>()
+ )]
+ async fn definition(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<SymbolQueryInput>,
+ ) -> Result<CallToolResult, McpError> {
+ let (position, render_config) = params.0.into_request()?;
+ let (locations, uncertain_empty) = self
+ .resolve_symbol_query(position, Vec::is_empty, |engine, position| {
+ Box::pin(engine.definition(position))
+ })
+ .await?;
+ let output =
+ locations_output_from_sources(locations.clone(), render_config, uncertain_empty);
+ let porcelain = render_definition_porcelain(&locations, render_config, uncertain_empty);
+ call_tool_result_with_render(output, render_config.render, porcelain)
+ }
+
+ #[tool(
+ description = "Find all symbol references at a source position. Defaults to render=porcelain; use render=json for structured output.",
+ output_schema = json_output_schema::<LocationsOutput>()
+ )]
+ async fn references(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<SymbolQueryInput>,
+ ) -> Result<CallToolResult, McpError> {
+ let (position, render_config) = params.0.into_request()?;
+ let (locations, uncertain_empty) = self
+ .resolve_symbol_query(position, Vec::is_empty, |engine, position| {
+ Box::pin(engine.references(position))
+ })
+ .await?;
+ let output =
+ locations_output_from_sources(locations.clone(), render_config, uncertain_empty);
+ let porcelain = render_references_porcelain(&locations, render_config, uncertain_empty);
+ call_tool_result_with_render(output, render_config.render, porcelain)
+ }
+
+ #[tool(description = "Rename a symbol at a source position.")]
+ async fn rename_symbol(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<RenameInput>,
+ ) -> Result<Json<RenameOutput>, McpError> {
+ let RenameInput {
+ file_path,
+ line,
+ column,
+ new_name,
+ } = params.0;
+ let position = SymbolPositionInput {
+ file_path,
+ line,
+ column,
+ }
+ .into_source_position()?;
+ let engine = self.engines.engine_for_position(&position).await?;
+ let report = engine
+ .rename_symbol(position, new_name)
+ .await
+ .map_err(map_engine_error)?;
+ Ok(Json(RenameOutput::from(report)))
+ }
+
+ #[tool(
+ description = "Collect diagnostics for one or more files. Defaults to mode=compact, render=porcelain; use render=json for structured output.",
+ output_schema = diagnostics_json_output_schema()
+ )]
+ async fn diagnostics(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<DiagnosticsInput>,
+ ) -> Result<CallToolResult, McpError> {
+ let (file_paths, render_config) = params.0.into_request()?;
+ let report = self
+ .collect_diagnostics_with_warmup_retry(file_paths)
+ .await?;
+ diagnostics_call_tool_result(report, render_config)
+ }
+
+ #[tool(
+ description = "Run cargo clippy for one or more files and return diagnostics using workspace-configured strictness. Defaults to mode=compact, render=porcelain; use render=json for structured output.",
+ output_schema = diagnostics_json_output_schema()
+ )]
+ async fn clippy_diagnostics(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<DiagnosticsInput>,
+ ) -> Result<CallToolResult, McpError> {
+ let started_at = Instant::now();
+ let (file_paths, render_config) = match params.0.into_request() {
+ Ok(parsed) => parsed,
+ Err(error) => {
+ self.record_clippy_telemetry_error(started_at.elapsed(), error.to_string());
+ return Err(error);
+ }
+ };
+ let report = collect_clippy_diagnostics(file_paths).await;
+ let elapsed = started_at.elapsed();
+ match report {
+ Ok(report) => {
+ self.record_clippy_telemetry_success(elapsed);
+ diagnostics_call_tool_result(report, render_config)
+ }
+ Err(error) => {
+ self.record_clippy_telemetry_error(elapsed, error.to_string());
+ Err(error)
+ }
+ }
+ }
+
+ #[tool(
+ description = "ONE-STOP: apply workspace autofixes (`cargo fmt --all` + `cargo clippy --fix`) using workspace-configured strictness."
+ )]
+ async fn fix_everything(
+ &self,
+ _params: rmcp::handler::server::wrapper::Parameters<FixEverythingInput>,
+ ) -> Result<Json<FixEverythingOutput>, McpError> {
+ let started_at = Instant::now();
+ let output = run_workspace_fix_everything().await;
+ let elapsed = started_at.elapsed();
+ match output {
+ Ok(output) => {
+ if output.success {
+ self.record_fix_everything_telemetry_success(elapsed);
+ } else {
+ let failed_step = output
+ .steps
+ .iter()
+ .find(|step| !step.success)
+ .map(|step| step.step_name.clone())
+ .unwrap_or_else(|| "unknown".to_owned());
+ self.record_fix_everything_telemetry_error(
+ elapsed,
+ format!("workspace fix reported failure in step `{failed_step}`"),
+ );
+ }
+ Ok(Json(output))
+ }
+ Err(error) => {
+ self.record_fix_everything_telemetry_error(elapsed, error.to_string());
+ Err(error)
+ }
+ }
+ }
+
+ #[tool(
+ description = "Invoke advanced rust-analyzer LSP requests (completion, code actions, prepare-rename, formatting, symbols, semantic tokens, call hierarchy, etc.)."
+ )]
+ async fn advanced_lsp_request(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<AdvancedLspRequestInput>,
+ ) -> Result<Json<AdvancedLspResponseOutput>, McpError> {
+ let AdvancedLspRequestInput {
+ method,
+ params: lsp_params,
+ } = params.0;
+ let engine = if let Some(file_path) = source_file_path_hint_from_value(&lsp_params) {
+ self.engines.engine_for_file_path(&file_path).await?
+ } else {
+ self.engines.active_engine().await?
+ };
+ let response = engine
+ .raw_lsp_request(method.as_lsp_method(), lsp_params)
+ .await
+ .map_err(map_engine_error)?;
+ Ok(Json(AdvancedLspResponseOutput { result: response }))
+ }
+
+ #[tool(
+ description = "Return process lifecycle and latest fault state. Defaults to render=porcelain; use render=json for structured output.",
+ output_schema = json_output_schema::<HealthOutput>()
+ )]
+ async fn health_snapshot(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<SnapshotRenderInput>,
+ ) -> Result<CallToolResult, McpError> {
+ let render = params.0.render();
+ let snapshot = self
+ .engines
+ .active_engine()
+ .await?
+ .lifecycle_snapshot()
+ .await;
+ let output = HealthOutput::from(snapshot);
+ let porcelain = render_health_porcelain(&output);
+ call_tool_result_with_render(output, render, porcelain)
+ }
+
+ #[tool(
+ description = "Return aggregate request/recovery telemetry for this worker process. Defaults to render=porcelain; use render=json for structured output.",
+ output_schema = json_output_schema::<TelemetryOutput>()
+ )]
+ async fn telemetry_snapshot(
+ &self,
+ params: rmcp::handler::server::wrapper::Parameters<SnapshotRenderInput>,
+ ) -> Result<CallToolResult, McpError> {
+ let render = params.0.render();
+ let snapshot = self
+ .engines
+ .active_engine()
+ .await?
+ .telemetry_snapshot()
+ .await;
+ let mut tool_snapshots = Vec::new();
+ if let Some(clippy_tool) = self.clippy_telemetry_snapshot() {
+ tool_snapshots.push(clippy_tool);
+ }
+ if let Some(fix_tool) = self.fix_everything_telemetry_snapshot() {
+ tool_snapshots.push(fix_tool);
+ }
+ let output = TelemetryOutput::from_snapshots(snapshot, tool_snapshots);
+ let porcelain = render_telemetry_porcelain(&output);
+ call_tool_result_with_render(output, render, porcelain)
+ }
+}
+
+#[tool_handler]
+impl ServerHandler for AdequateRustMcpServer {
+ fn get_info(&self) -> ServerInfo {
+ ServerInfo {
+ instructions: Some(
+ "Robust rust-analyzer MCP server with aggressive self-healing and restart semantics."
+ .into(),
+ ),
+ capabilities: rmcp::model::ServerCapabilities::builder()
+ .enable_tools()
+ .build(),
+ server_info: rmcp::model::Implementation {
+ name: "adequate-rust-mcp".into(),
+ version: env!("CARGO_PKG_VERSION").into(),
+ ..Default::default()
+ },
+ ..Default::default()
+ }
+ }
+}
+
+fn read_duration_env(name: &str, default: Duration) -> Duration {
+ let raw = std::env::var(name);
+ let Ok(raw) = raw else {
+ return default;
+ };
+ let parsed = u64::from_str(&raw);
+ let Ok(value) = parsed else {
+ return default;
+ };
+ Duration::from_millis(value)
+}
+
+fn read_workspace_root() -> Result<WorkspaceRoot, McpError> {
+ let from_env = std::env::var("ADEQUATE_MCP_WORKSPACE_ROOT")
+ .ok()
+ .map(PathBuf::from);
+ let raw_root = match from_env {
+ Some(path) => path,
+ None => std::env::current_dir().map_err(|_| {
+ porcelain_internal_error(
+ "failed to determine current working directory",
+ PorcelainErrorKind::InternalFailure,
+ Some("set ADEQUATE_MCP_WORKSPACE_ROOT explicitly"),
+ false,
+ )
+ })?,
+ };
+ WorkspaceRoot::try_new(raw_root).map_err(|_| {
+ porcelain_invalid_params(
+ "workspace root is invalid",
+ PorcelainErrorKind::InvalidInput,
+ Some("set ADEQUATE_MCP_WORKSPACE_ROOT to an existing directory"),
+ )
+ })
+}
+
+fn build_engine_config() -> Result<EngineConfig, McpError> {
+ let workspace_root = read_workspace_root()?;
+ let binary = std::env::var("ADEQUATE_MCP_RA_BINARY")
+ .ok()
+ .filter(|value| !value.is_empty())
+ .map(PathBuf::from)
+ .unwrap_or_else(|| PathBuf::from("rust-analyzer"));
+ let startup_timeout =
+ read_duration_env("ADEQUATE_MCP_STARTUP_TIMEOUT_MS", Duration::from_secs(20));
+ let request_timeout =
+ read_duration_env("ADEQUATE_MCP_REQUEST_TIMEOUT_MS", Duration::from_secs(12));
+ let floor = read_duration_env("ADEQUATE_MCP_BACKOFF_FLOOR_MS", Duration::from_millis(50));
+ let ceiling = read_duration_env("ADEQUATE_MCP_BACKOFF_CEILING_MS", Duration::from_secs(2));
+ let backoff_policy = BackoffPolicy::try_new(floor, ceiling).map_err(|_| {
+ porcelain_invalid_params(
+ "backoff timing configuration is invalid",
+ PorcelainErrorKind::InvalidInput,
+ Some("ensure backoff floor is <= ceiling and both are positive"),
+ )
+ })?;
+ EngineConfig::try_new(
+ workspace_root,
+ binary,
+ Vec::new(),
+ Vec::new(),
+ startup_timeout,
+ request_timeout,
+ backoff_policy,
+ )
+ .map_err(|_| {
+ porcelain_invalid_params(
+ "engine configuration is invalid",
+ PorcelainErrorKind::InvalidInput,
+ Some("verify workspace root and timeout/backoff environment values"),
+ )
+ })
+}
+
+/// Runs the worker-mode MCP server.
+pub(crate) async fn run_worker() -> Result<(), Box<dyn std::error::Error>> {
+ let init_result = tracing_subscriber::fmt()
+ .with_max_level(Level::INFO)
+ .with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
+ .with_writer(std::io::stderr)
+ .try_init();
+ if let Err(error) = init_result {
+ eprintln!("worker tracing init skipped: {error}");
+ }
+
+ let config = build_engine_config()?;
+ let server = AdequateRustMcpServer::new(config);
+ let service = server.serve(stdio()).await?;
+ let quit_reason = service.waiting().await?;
+ tracing::info!("server terminated: {quit_reason:?}");
+ Ok(())
+}
+
+#[derive(Debug, Deserialize)]
+struct CargoMetadataWorkspaceRoot {
+ workspace_root: PathBuf,
+}
+
+fn best_cached_workspace_root<'a>(
+ roots: impl Iterator<Item = &'a WorkspaceRoot>,
+ file_path: &Path,
+) -> Option<WorkspaceRoot> {
+ roots
+ .filter(|root| file_path.starts_with(root.as_path()))
+ .max_by_key(|root| root.as_path().components().count())
+ .cloned()
+}
+
+fn discover_workspace_root(file_path: &Path, fallback_root: &WorkspaceRoot) -> WorkspaceRoot {
+ if file_path.starts_with(fallback_root.as_path())
+ && !needs_workspace_root_discovery(file_path, fallback_root.as_path())
+ {
+ return fallback_root.clone();
+ }
+ let Some(start_dir) = file_path.parent() else {
+ return fallback_root.clone();
+ };
+ cargo_metadata_workspace_root(start_dir, file_path)
+ .or_else(|| git_workspace_root(start_dir, file_path))
+ .unwrap_or_else(|| fallback_root.clone())
+}
+
+fn needs_workspace_root_discovery(file_path: &Path, workspace_root: &Path) -> bool {
+ file_path
+ .strip_prefix(workspace_root)
+ .ok()
+ .and_then(|relative| relative.components().next())
+ .is_some_and(|component| component.as_os_str() == ".worktrees")
+}
+
+fn cargo_metadata_workspace_root(start_dir: &Path, file_path: &Path) -> Option<WorkspaceRoot> {
+ let output = Command::new("cargo")
+ .arg("metadata")
+ .arg("--format-version")
+ .arg("1")
+ .arg("--no-deps")
+ .current_dir(start_dir)
+ .output()
+ .ok()?;
+ if !output.status.success() {
+ return None;
+ }
+ let parsed = serde_json::from_slice::<CargoMetadataWorkspaceRoot>(&output.stdout).ok()?;
+ workspace_root_candidate(parsed.workspace_root, file_path)
+}
+
+fn git_workspace_root(start_dir: &Path, file_path: &Path) -> Option<WorkspaceRoot> {
+ let output = Command::new("git")
+ .arg("rev-parse")
+ .arg("--show-toplevel")
+ .current_dir(start_dir)
+ .output()
+ .ok()?;
+ if !output.status.success() {
+ return None;
+ }
+ let raw_root = String::from_utf8(output.stdout).ok()?;
+ workspace_root_candidate(PathBuf::from(raw_root.trim()), file_path)
+}
+
+fn workspace_root_candidate(candidate: PathBuf, file_path: &Path) -> Option<WorkspaceRoot> {
+ let workspace_root = WorkspaceRoot::try_new(candidate).ok()?;
+ file_path
+ .starts_with(workspace_root.as_path())
+ .then_some(workspace_root)
+}
+
+fn source_file_path_hint_from_value(value: &Value) -> Option<super::SourceFilePath> {
+ match value {
+ Value::String(raw) => source_file_path_hint_from_str(raw),
+ Value::Object(object) => {
+ for key in ["uri", "file_path", "filePath", "path"] {
+ let raw = object.get(key).and_then(Value::as_str);
+ if let Some(raw) = raw
+ && let Some(file_path) = source_file_path_hint_from_str(raw)
+ {
+ return Some(file_path);
+ }
+ }
+ object.values().find_map(source_file_path_hint_from_value)
+ }
+ Value::Array(items) => items.iter().find_map(source_file_path_hint_from_value),
+ Value::Null | Value::Bool(_) | Value::Number(_) => None,
+ }
+}
+
+fn source_file_path_hint_from_str(raw: &str) -> Option<super::SourceFilePath> {
+ let trimmed = raw.trim();
+ if trimmed.starts_with("file://") {
+ let file_url = url::Url::parse(trimmed).ok()?;
+ let path = file_url.to_file_path().ok()?;
+ return super::SourceFilePath::try_new(path).ok();
+ }
+ let path = PathBuf::from(trimmed);
+ path.is_absolute()
+ .then(|| super::SourceFilePath::try_new(path).ok())
+ .flatten()
+}
+
+fn should_retry_unlinked_diagnostics(report: &super::DiagnosticsReport, uptime_ms: u64) -> bool {
+ should_retry_empty_symbol_result(uptime_ms)
+ && !report.diagnostics.is_empty()
+ && report
+ .diagnostics
+ .iter()
+ .all(diagnostic_looks_like_unlinked_file)
+}
+
+fn diagnostic_looks_like_unlinked_file(diagnostic: &super::DiagnosticEntry) -> bool {
+ diagnostic.code.as_deref().is_some_and(|code| {
+ code.eq_ignore_ascii_case("unlinked-file") || code.eq_ignore_ascii_case("unlinked_file")
+ }) || {
+ let message = diagnostic.message.to_ascii_lowercase();
+ message.contains("not part of any crate")
+ || message.contains("not part of a crate")
+ || message.contains("not included in any crates")
+ || message.contains("not included in the crate graph")
+ || message.contains("not included in crate graph")
+ || message.contains("can't offer ide services")
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/worker/telemetry.rs b/crates/adequate-rust-mcp/src/worker/telemetry.rs
new file mode 100644
index 0000000..92c7371
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/telemetry.rs
@@ -0,0 +1,152 @@
+use super::{
+ FaultOutput, HealthStateOutput, LifecycleSnapshot, MethodTelemetryOutput,
+ MethodTelemetrySnapshot, TelemetryOutput, TelemetrySnapshot, TelemetryTotals,
+ TelemetryTotalsOutput,
+};
+
+#[derive(Debug, Default)]
+pub(super) struct ToolTelemetryState {
+ pub(super) clippy_diagnostics: MethodTelemetryAccumulator,
+ pub(super) fix_everything: MethodTelemetryAccumulator,
+}
+
+#[derive(Debug, Clone, Default)]
+pub(super) struct MethodTelemetryAccumulator {
+ pub(super) request_count: u64,
+ pub(super) success_count: u64,
+ pub(super) response_error_count: u64,
+ pub(super) transport_fault_count: u64,
+ pub(super) retry_count: u64,
+ pub(super) total_latency_ms: u128,
+ pub(super) last_latency_ms: Option<u64>,
+ pub(super) max_latency_ms: u64,
+ pub(super) last_error: Option<String>,
+}
+
+impl MethodTelemetryAccumulator {
+ pub(super) fn record_success(&mut self, latency_ms: u64) {
+ self.request_count = self.request_count.saturating_add(1);
+ self.success_count = self.success_count.saturating_add(1);
+ self.total_latency_ms = self.total_latency_ms.saturating_add(u128::from(latency_ms));
+ self.last_latency_ms = Some(latency_ms);
+ self.max_latency_ms = self.max_latency_ms.max(latency_ms);
+ self.last_error = None;
+ }
+
+ pub(super) fn record_error(&mut self, latency_ms: u64, error: String) {
+ self.request_count = self.request_count.saturating_add(1);
+ self.response_error_count = self.response_error_count.saturating_add(1);
+ self.total_latency_ms = self.total_latency_ms.saturating_add(u128::from(latency_ms));
+ self.last_latency_ms = Some(latency_ms);
+ self.max_latency_ms = self.max_latency_ms.max(latency_ms);
+ self.last_error = Some(error);
+ }
+
+ pub(super) fn to_snapshot(&self, method: &str) -> Option<MethodTelemetryOutput> {
+ if self.request_count == 0 {
+ return None;
+ }
+ let avg_latency_ms = if self.request_count == 0 {
+ 0
+ } else {
+ let average = self.total_latency_ms / u128::from(self.request_count);
+ u64::try_from(average).unwrap_or(u64::MAX)
+ };
+ Some(MethodTelemetryOutput {
+ method: method.to_owned(),
+ request_count: self.request_count,
+ success_count: self.success_count,
+ response_error_count: self.response_error_count,
+ transport_fault_count: self.transport_fault_count,
+ retry_count: self.retry_count,
+ last_latency_ms: self.last_latency_ms,
+ max_latency_ms: self.max_latency_ms,
+ avg_latency_ms,
+ last_error: self.last_error.clone(),
+ })
+ }
+}
+
+impl From<TelemetryTotals> for TelemetryTotalsOutput {
+ fn from(value: TelemetryTotals) -> Self {
+ Self {
+ request_count: value.request_count,
+ success_count: value.success_count,
+ response_error_count: value.response_error_count,
+ transport_fault_count: value.transport_fault_count,
+ retry_count: value.retry_count,
+ }
+ }
+}
+
+impl From<MethodTelemetrySnapshot> for MethodTelemetryOutput {
+ fn from(value: MethodTelemetrySnapshot) -> Self {
+ Self {
+ method: value.method,
+ request_count: value.request_count,
+ success_count: value.success_count,
+ response_error_count: value.response_error_count,
+ transport_fault_count: value.transport_fault_count,
+ retry_count: value.retry_count,
+ last_latency_ms: value.last_latency_ms,
+ max_latency_ms: value.max_latency_ms,
+ avg_latency_ms: value.avg_latency_ms,
+ last_error: value.last_error,
+ }
+ }
+}
+
+impl TelemetryOutput {
+ pub(super) fn from_snapshots(
+ value: TelemetrySnapshot,
+ extra_tools: Vec<MethodTelemetryOutput>,
+ ) -> Self {
+ let TelemetrySnapshot {
+ uptime_ms,
+ lifecycle,
+ consecutive_failures,
+ restart_count,
+ totals,
+ methods,
+ last_fault,
+ } = value;
+ let (state, generation) = match lifecycle {
+ LifecycleSnapshot::Cold { generation } => (HealthStateOutput::Cold, generation.get()),
+ LifecycleSnapshot::Starting { generation } => {
+ (HealthStateOutput::Starting, generation.get())
+ }
+ LifecycleSnapshot::Ready { generation } => (HealthStateOutput::Ready, generation.get()),
+ LifecycleSnapshot::Recovering { generation, .. } => {
+ (HealthStateOutput::Recovering, generation.get())
+ }
+ };
+ let mut totals = TelemetryTotalsOutput::from(totals);
+ let mut methods = methods
+ .into_iter()
+ .map(MethodTelemetryOutput::from)
+ .collect::<Vec<_>>();
+ for tool in extra_tools {
+ totals.request_count = totals.request_count.saturating_add(tool.request_count);
+ totals.success_count = totals.success_count.saturating_add(tool.success_count);
+ totals.response_error_count = totals
+ .response_error_count
+ .saturating_add(tool.response_error_count);
+ totals.transport_fault_count = totals
+ .transport_fault_count
+ .saturating_add(tool.transport_fault_count);
+ totals.retry_count = totals.retry_count.saturating_add(tool.retry_count);
+ methods.push(tool);
+ }
+ methods.sort_by(|left, right| left.method.cmp(&right.method));
+ Self {
+ uptime_ms,
+ state,
+ generation,
+ consecutive_failures,
+ restart_count,
+ totals,
+ methods,
+ last_fault: last_fault.map(FaultOutput::from),
+ }
+ }
+}
diff --git a/crates/adequate-rust-mcp/src/worker/tests.rs b/crates/adequate-rust-mcp/src/worker/tests.rs
new file mode 100644
index 0000000..1ccd18a
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/tests.rs
@@ -0,0 +1,809 @@
+use super::{
+ AbsolutePathInput, AdvancedLspMethod, AdvancedLspRequestInput, CommonRenderConfig,
+ CommonRenderInput, DiagnosticsInput, DiagnosticsJsonOutput, DiagnosticsModeInput,
+ DiagnosticsPathStyleInput, DiagnosticsRenderConfig, DiagnosticsRenderInput, HoverPayload,
+ OneIndexedInput, PathStyleInput, SnapshotRenderInput, SymbolQueryInput,
+ parse_clippy_json_stream, read_workspace_tool_command, read_workspace_tool_metadata,
+ render_definition_porcelain, render_hover_porcelain, resolve_workspace_fix_command_specs,
+};
+use ra_mcp_domain::types::{
+ OneIndexedColumn, OneIndexedLine, SourceFilePath, SourcePoint, SourceRange,
+};
+use ra_mcp_engine::{DiagnosticEntry, DiagnosticLevel, DiagnosticsReport};
+use serde_json::json;
+use std::collections::HashSet;
+use std::path::PathBuf;
+use url::Url;
+
+#[test]
+fn advanced_lsp_method_mapping_is_unique() {
+ let methods = vec![
+ AdvancedLspMethod::Hover,
+ AdvancedLspMethod::Definition,
+ AdvancedLspMethod::References,
+ AdvancedLspMethod::Declaration,
+ AdvancedLspMethod::TypeDefinition,
+ AdvancedLspMethod::Implementation,
+ AdvancedLspMethod::Completion,
+ AdvancedLspMethod::CompletionResolve,
+ AdvancedLspMethod::SignatureHelp,
+ AdvancedLspMethod::DocumentSymbol,
+ AdvancedLspMethod::WorkspaceSymbol,
+ AdvancedLspMethod::WorkspaceSymbolResolve,
+ AdvancedLspMethod::PrepareRename,
+ AdvancedLspMethod::Rename,
+ AdvancedLspMethod::CodeAction,
+ AdvancedLspMethod::CodeActionResolve,
+ AdvancedLspMethod::CodeLens,
+ AdvancedLspMethod::CodeLensResolve,
+ AdvancedLspMethod::ExecuteCommand,
+ AdvancedLspMethod::Formatting,
+ AdvancedLspMethod::RangeFormatting,
+ AdvancedLspMethod::OnTypeFormatting,
+ AdvancedLspMethod::DocumentHighlight,
+ AdvancedLspMethod::DocumentLink,
+ AdvancedLspMethod::DocumentLinkResolve,
+ AdvancedLspMethod::DocumentColor,
+ AdvancedLspMethod::ColorPresentation,
+ AdvancedLspMethod::LinkedEditingRange,
+ AdvancedLspMethod::InlayHint,
+ AdvancedLspMethod::InlayHintResolve,
+ AdvancedLspMethod::FoldingRange,
+ AdvancedLspMethod::SelectionRange,
+ AdvancedLspMethod::DocumentDiagnostic,
+ AdvancedLspMethod::WorkspaceDiagnostic,
+ AdvancedLspMethod::SemanticTokensFull,
+ AdvancedLspMethod::SemanticTokensFullDelta,
+ AdvancedLspMethod::SemanticTokensRange,
+ AdvancedLspMethod::Moniker,
+ AdvancedLspMethod::InlineValue,
+ AdvancedLspMethod::TypeHierarchyPrepare,
+ AdvancedLspMethod::TypeHierarchySupertypes,
+ AdvancedLspMethod::TypeHierarchySubtypes,
+ AdvancedLspMethod::CallHierarchyPrepare,
+ AdvancedLspMethod::CallHierarchyIncomingCalls,
+ AdvancedLspMethod::CallHierarchyOutgoingCalls,
+ ];
+
+ let mapped = methods
+ .iter()
+ .map(AdvancedLspMethod::as_lsp_method)
+ .collect::<Vec<_>>();
+ let unique = mapped.iter().copied().collect::<HashSet<_>>();
+ assert_eq!(mapped.len(), unique.len());
+ assert!(mapped.contains(&"textDocument/prepareRename"));
+ assert!(mapped.contains(&"workspace/executeCommand"));
+ assert!(mapped.contains(&"textDocument/completion"));
+ assert!(mapped.contains(&"textDocument/codeAction"));
+ assert!(mapped.contains(&"textDocument/references"));
+ assert!(mapped.contains(&"textDocument/rename"));
+ assert!(mapped.contains(&"textDocument/diagnostic"));
+ assert!(mapped.contains(&"workspace/diagnostic"));
+ assert!(mapped.contains(&"completionItem/resolve"));
+ assert!(mapped.contains(&"textDocument/prepareTypeHierarchy"));
+}
+
+#[test]
+fn advanced_lsp_method_deserialization_accepts_alias_shapes() {
+ let camel = serde_json::from_value::<AdvancedLspMethod>(json!("prepareRename"));
+ assert!(camel.is_ok());
+ assert_eq!(
+ camel.unwrap_or(AdvancedLspMethod::Hover),
+ AdvancedLspMethod::PrepareRename
+ );
+
+ let kebab = serde_json::from_value::<AdvancedLspMethod>(json!("prepare-rename"));
+ assert!(kebab.is_ok());
+ assert_eq!(
+ kebab.unwrap_or(AdvancedLspMethod::Hover),
+ AdvancedLspMethod::PrepareRename
+ );
+
+ let full_method =
+ serde_json::from_value::<AdvancedLspMethod>(json!("textDocument/prepareRename"));
+ assert!(full_method.is_ok());
+ assert_eq!(
+ full_method.unwrap_or(AdvancedLspMethod::Hover),
+ AdvancedLspMethod::PrepareRename
+ );
+}
+
+#[test]
+fn advanced_lsp_request_input_unpacks_json_encoded_string_payload() {
+ let parsed = serde_json::from_value::<AdvancedLspRequestInput>(json!({
+ "method": "workspace/symbol",
+ "params": "{\"query\":\"resolve_writable_directive_roles\"}"
+ }));
+ assert!(parsed.is_ok());
+ let parsed = match parsed {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ assert_eq!(parsed.method, AdvancedLspMethod::WorkspaceSymbol);
+ assert_eq!(
+ parsed.params,
+ json!({"query": "resolve_writable_directive_roles"})
+ );
+}
+
+#[test]
+fn advanced_lsp_request_input_rejects_malformed_json_payload_string() {
+ let parsed = serde_json::from_value::<AdvancedLspRequestInput>(json!({
+ "method": "workspace/symbol",
+ "params": "{\"query\":}"
+ }));
+ assert!(parsed.is_err());
+}
+
+#[test]
+fn one_indexed_input_deserialization_accepts_loose_numeric_forms() {
+ let from_string = serde_json::from_value::<OneIndexedInput>(json!("17"));
+ assert!(from_string.is_ok());
+ assert_eq!(
+ from_string
+ .unwrap_or(OneIndexedInput(1))
+ .normalized_for_one_indexed(),
+ 17
+ );
+
+ let from_float = serde_json::from_value::<OneIndexedInput>(json!(12.0));
+ assert!(from_float.is_ok());
+ assert_eq!(
+ from_float
+ .unwrap_or(OneIndexedInput(1))
+ .normalized_for_one_indexed(),
+ 12
+ );
+
+ let from_zero = serde_json::from_value::<OneIndexedInput>(json!(0));
+ assert!(from_zero.is_ok());
+ assert_eq!(
+ from_zero
+ .unwrap_or(OneIndexedInput(1))
+ .normalized_for_one_indexed(),
+ 1
+ );
+}
+
+#[test]
+fn symbol_query_input_defaults_to_porcelain_relative() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let file_path = temp.path().join("symbol_query.rs");
+ assert!(std::fs::write(file_path.as_path(), "fn main() {}\n").is_ok());
+
+ let parsed = serde_json::from_value::<SymbolQueryInput>(json!({
+ "file_path": file_path.display().to_string(),
+ "line": 1,
+ "column": 1
+ }));
+ assert!(parsed.is_ok());
+ let parsed = match parsed {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let requested = parsed.into_request();
+ assert!(requested.is_ok());
+ let (position, render_config) = match requested {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ assert_eq!(position.file_path().as_path(), file_path.as_path());
+ assert_eq!(position.line().get(), 1);
+ assert_eq!(position.column().get(), 1);
+ assert!(matches!(render_config.render, CommonRenderInput::Porcelain));
+ assert!(matches!(render_config.path_style, PathStyleInput::Relative));
+}
+
+#[test]
+fn snapshot_render_input_defaults_to_porcelain() {
+ let parsed = serde_json::from_value::<SnapshotRenderInput>(json!({}));
+ assert!(parsed.is_ok());
+ let parsed = match parsed {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ assert!(matches!(parsed.render(), CommonRenderInput::Porcelain));
+}
+
+#[test]
+fn diagnostics_input_defaults_to_compact_porcelain_absolute_without_limits() {
+ let parsed = serde_json::from_value::<DiagnosticsInput>(json!({
+ "file_path": "/tmp/diagnostics_defaults.rs"
+ }));
+ assert!(parsed.is_ok());
+ let parsed = match parsed {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ assert!(parsed.mode.is_none());
+ assert!(parsed.render.is_none());
+ assert!(matches!(
+ parsed.path_style,
+ DiagnosticsPathStyleInput::Absolute
+ ));
+ assert_eq!(parsed.file_paths.len(), 1);
+ assert!(parsed.max_items.is_none());
+ assert!(parsed.max_message_chars.is_none());
+}
+
+#[test]
+fn diagnostics_input_rejects_legacy_format_field() {
+ let parsed = serde_json::from_value::<DiagnosticsInput>(json!({
+ "file_path": "/tmp/diagnostics_defaults.rs",
+ "format": "json"
+ }));
+ assert!(parsed.is_err());
+}
+
+#[test]
+fn transient_lsp_error_recognizes_server_cancelled() {
+ assert!(super::errors::is_transient_lsp_error(
+ -32802,
+ "server cancelled request during workspace reload",
+ ));
+}
+
+#[test]
+fn diagnostics_input_accepts_file_list_and_preserves_order() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let first_path = temp.path().join("first.rs");
+ let second_path = temp.path().join("second.rs");
+ let write_first = std::fs::write(first_path.as_path(), "fn first() {}\n");
+ let write_second = std::fs::write(second_path.as_path(), "fn second() {}\n");
+ assert!(write_first.is_ok() && write_second.is_ok());
+
+ let parsed = serde_json::from_value::<DiagnosticsInput>(json!({
+ "file_paths": [
+ first_path.display().to_string(),
+ second_path.display().to_string()
+ ]
+ }));
+ assert!(parsed.is_ok());
+ let parsed = match parsed {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let requested = parsed.into_request();
+ assert!(requested.is_ok());
+ let (file_paths, _render_config) = match requested {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ assert_eq!(file_paths.len(), 2);
+ assert_eq!(file_paths[0].as_path(), first_path.as_path());
+ assert_eq!(file_paths[1].as_path(), second_path.as_path());
+}
+
+#[test]
+fn diagnostics_input_rejects_missing_file_paths() {
+ let parsed = serde_json::from_value::<DiagnosticsInput>(json!({
+ "file_paths": ["/tmp/does_not_exist_adequate_mcp.rs"]
+ }));
+ assert!(parsed.is_ok());
+ let parsed = match parsed {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let requested = parsed.into_request();
+ assert!(requested.is_err());
+}
+
+#[test]
+fn render_definition_porcelain_marks_uncertain_empty_results() {
+ let rendered = render_definition_porcelain(
+ &[],
+ CommonRenderConfig::from_user_input(
+ Some(CommonRenderInput::Porcelain),
+ Some(PathStyleInput::Relative),
+ ),
+ true,
+ );
+ assert_eq!(
+ rendered,
+ "0 definitions\nnote: result may be incomplete during indexing; retry in a few seconds"
+ );
+}
+
+#[test]
+fn render_hover_porcelain_prefers_signature_and_appends_uncertain_note() {
+ let rendered = render_hover_porcelain(
+ &HoverPayload {
+ rendered: Some(
+ "```rust\nfn parse_launch_mode(args: impl IntoIterator<Item = OsString>) -> Result<LaunchMode, String>\n```\n\nParses the launch mode."
+ .to_owned(),
+ ),
+ range: None,
+ },
+ CommonRenderConfig::from_user_input(
+ Some(CommonRenderInput::Porcelain),
+ Some(PathStyleInput::Relative),
+ ),
+ true,
+ );
+ assert!(rendered.contains("fn parse_launch_mode("));
+ assert!(rendered.contains("note: result may be incomplete during indexing"));
+ assert!(!rendered.contains("```"));
+}
+
+#[test]
+fn diagnostics_compact_projection_preserves_full_information_without_limits() {
+ let file_path = SourceFilePath::try_new(PathBuf::from("/tmp/diag_projection.rs"));
+ assert!(file_path.is_ok());
+ let file_path = match file_path {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let range = |line: u64, start_column: u64, end_column: u64| {
+ SourceRange::try_new(
+ file_path.clone(),
+ SourcePoint::new(
+ OneIndexedLine::try_new(line).expect("valid test line"),
+ OneIndexedColumn::try_new(start_column).expect("valid test column"),
+ ),
+ SourcePoint::new(
+ OneIndexedLine::try_new(line).expect("valid test line"),
+ OneIndexedColumn::try_new(end_column).expect("valid test column"),
+ ),
+ )
+ .expect("valid test range")
+ };
+ let first_range = range(3, 7, 12);
+ let second_range = range(8, 2, 9);
+ let report = DiagnosticsReport {
+ diagnostics: vec![
+ DiagnosticEntry {
+ range: first_range,
+ level: DiagnosticLevel::Error,
+ code: Some("E0382".to_owned()),
+ message: "borrow of moved value".to_owned(),
+ },
+ DiagnosticEntry {
+ range: second_range,
+ level: DiagnosticLevel::Warning,
+ code: None,
+ message: "unused variable".to_owned(),
+ },
+ ],
+ };
+
+ let full = DiagnosticsJsonOutput::from_report(
+ report.clone(),
+ DiagnosticsRenderConfig {
+ mode: DiagnosticsModeInput::Full,
+ render: DiagnosticsRenderInput::Json,
+ max_items: None,
+ max_message_chars: None,
+ path_style: DiagnosticsPathStyleInput::Absolute,
+ },
+ );
+ let compact = DiagnosticsJsonOutput::from_report(
+ report,
+ DiagnosticsRenderConfig {
+ mode: DiagnosticsModeInput::Compact,
+ render: DiagnosticsRenderInput::Json,
+ max_items: None,
+ max_message_chars: None,
+ path_style: DiagnosticsPathStyleInput::Absolute,
+ },
+ );
+
+ assert!(compact.diagnostics.is_none());
+ assert_eq!(compact.overflow_count, 0);
+ assert!(!compact.truncated);
+ assert_eq!(compact.counts.total_count, 2);
+ assert_eq!(compact.counts.error_count, 1);
+ assert_eq!(compact.counts.warning_count, 1);
+ assert_eq!(compact.items.len(), 2);
+
+ let full_items = full.diagnostics.unwrap_or_default();
+ assert_eq!(full_items.len(), compact.items.len());
+ for (full_item, compact_item) in full_items.iter().zip(compact.items.iter()) {
+ let expected_severity = full_item.level;
+ let start = &full_item.range.start;
+ let end = &full_item.range.end;
+ assert_eq!(compact_item.severity, expected_severity);
+ assert_eq!(compact_item.file_path, full_item.range.file_path);
+ assert_eq!(compact_item.start_line, start.line);
+ assert_eq!(compact_item.start_column, start.column);
+ assert_eq!(compact_item.end_line, end.line);
+ assert_eq!(compact_item.end_column, end.column);
+ assert_eq!(compact_item.code, full_item.code);
+ assert_eq!(compact_item.message, full_item.message);
+ }
+}
+
+#[test]
+fn diagnostics_compact_limits_apply_truncation_metadata() {
+ let file_path = SourceFilePath::try_new(PathBuf::from("/tmp/diag_limits.rs"));
+ assert!(file_path.is_ok());
+ let file_path = match file_path {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let make_range = |line: u64, col_start: u64, col_end: u64| -> Option<SourceRange> {
+ let start = SourcePoint::new(
+ OneIndexedLine::try_new(line).ok()?,
+ OneIndexedColumn::try_new(col_start).ok()?,
+ );
+ let end = SourcePoint::new(
+ OneIndexedLine::try_new(line).ok()?,
+ OneIndexedColumn::try_new(col_end).ok()?,
+ );
+ SourceRange::try_new(file_path.clone(), start, end).ok()
+ };
+ let first = make_range(1, 1, 5);
+ let second = make_range(2, 1, 6);
+ let third = make_range(3, 1, 7);
+ assert!(first.is_some() && second.is_some() && third.is_some());
+ let report = DiagnosticsReport {
+ diagnostics: vec![
+ DiagnosticEntry {
+ range: first.unwrap_or_else(|| unreachable!()),
+ level: DiagnosticLevel::Error,
+ code: None,
+ message: "123456789".to_owned(),
+ },
+ DiagnosticEntry {
+ range: second.unwrap_or_else(|| unreachable!()),
+ level: DiagnosticLevel::Warning,
+ code: None,
+ message: "abcdefghi".to_owned(),
+ },
+ DiagnosticEntry {
+ range: third.unwrap_or_else(|| unreachable!()),
+ level: DiagnosticLevel::Hint,
+ code: None,
+ message: "should_not_be_visible".to_owned(),
+ },
+ ],
+ };
+ let compact = DiagnosticsJsonOutput::from_report(
+ report,
+ DiagnosticsRenderConfig {
+ mode: DiagnosticsModeInput::Compact,
+ render: DiagnosticsRenderInput::Json,
+ max_items: Some(2),
+ max_message_chars: Some(8),
+ path_style: DiagnosticsPathStyleInput::Absolute,
+ },
+ );
+ assert!(compact.truncated);
+ assert_eq!(compact.overflow_count, 1);
+ assert_eq!(compact.items.len(), 2);
+ assert_eq!(compact.items[0].message, "12345...");
+ assert_eq!(compact.items[1].message, "abcde...");
+}
+
+#[test]
+fn manifest_clippy_command_override_is_read() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let manifest = temp.path().join("Cargo.toml");
+ let write = std::fs::write(
+ manifest.as_path(),
+ r#"
+[workspace]
+members = []
+
+[workspace.metadata.adequate-rust-mcp]
+clippy_command = ["cargo", "clippy", "--workspace", "--message-format=json", "--", "-Dwarnings"]
+"#,
+ );
+ assert!(write.is_ok());
+ let metadata = read_workspace_tool_metadata(temp.path());
+ assert!(metadata.is_ok());
+ let metadata = match metadata {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let parsed = read_workspace_tool_command(
+ metadata.as_ref(),
+ "clippy_command",
+ &["clippy_command", "clippyCommand"],
+ );
+ assert!(parsed.is_ok());
+ let parsed = parsed.ok().flatten();
+ assert!(parsed.is_some());
+ let parsed = parsed.unwrap_or_else(|| unreachable!());
+ assert_eq!(parsed.program, "cargo");
+ assert_eq!(
+ parsed.args,
+ vec![
+ "clippy",
+ "--workspace",
+ "--message-format=json",
+ "--",
+ "-Dwarnings"
+ ]
+ );
+}
+
+#[test]
+fn manifest_fix_and_format_command_overrides_are_read() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let manifest = temp.path().join("Cargo.toml");
+ let write = std::fs::write(
+ manifest.as_path(),
+ r#"
+[workspace]
+members = []
+
+[workspace.metadata.adequate-rust-mcp]
+format_command = ["cargo", "fmt", "--all", "--check"]
+fix_command = ["cargo", "clippy", "--fix", "--workspace", "--all-targets"]
+"#,
+ );
+ assert!(write.is_ok());
+
+ let parsed = resolve_workspace_fix_command_specs(temp.path());
+ assert!(parsed.is_ok());
+ let parsed = match parsed {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let (format_command, fix_command) = parsed;
+ assert_eq!(format_command.program, "cargo");
+ assert_eq!(format_command.args, vec!["fmt", "--all", "--check"]);
+ assert_eq!(fix_command.program, "cargo");
+ assert_eq!(
+ fix_command.args,
+ vec!["clippy", "--fix", "--workspace", "--all-targets"]
+ );
+}
+
+#[test]
+fn parse_clippy_json_stream_filters_to_target_file() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let src_dir = temp.path().join("src");
+ let create_dir = std::fs::create_dir_all(src_dir.as_path());
+ assert!(create_dir.is_ok());
+ let main_file = src_dir.join("main.rs");
+ let lib_file = src_dir.join("lib.rs");
+ let write_main = std::fs::write(main_file.as_path(), "fn main() {}\n");
+ let write_lib = std::fs::write(lib_file.as_path(), "pub fn helper() {}\n");
+ assert!(write_main.is_ok() && write_lib.is_ok());
+ let target_path = SourceFilePath::try_new(main_file.clone());
+ assert!(target_path.is_ok());
+ let target_path = match target_path {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let target_message = json!({
+ "reason": "compiler-message",
+ "message": {
+ "message": "manual implementation of Option::map",
+ "level": "warning",
+ "code": { "code": "clippy::manual_map" },
+ "spans": [
+ {
+ "file_name": "src/main.rs",
+ "line_start": 1,
+ "line_end": 1,
+ "column_start": 1,
+ "column_end": 5,
+ "is_primary": true
+ }
+ ]
+ }
+ })
+ .to_string();
+ let other_message = json!({
+ "reason": "compiler-message",
+ "message": {
+ "message": "unused function",
+ "level": "warning",
+ "code": { "code": "dead_code" },
+ "spans": [
+ {
+ "file_name": "src/lib.rs",
+ "line_start": 1,
+ "line_end": 1,
+ "column_start": 1,
+ "column_end": 3,
+ "is_primary": true
+ }
+ ]
+ }
+ })
+ .to_string();
+
+ let stream = format!("{target_message}\n{other_message}\n");
+ let diagnostics = parse_clippy_json_stream(stream.as_str(), &[target_path], temp.path());
+ assert_eq!(diagnostics.len(), 1);
+ let diagnostic = diagnostics.first().cloned();
+ assert!(diagnostic.is_some());
+ let diagnostic = diagnostic.unwrap_or_else(|| unreachable!());
+ assert_eq!(diagnostic.code.unwrap_or_default(), "clippy::manual_map");
+ assert_eq!(diagnostic.message, "manual implementation of Option::map");
+}
+
+#[test]
+fn parse_clippy_json_stream_emits_in_requested_file_order() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let src_dir = temp.path().join("src");
+ let create_dir = std::fs::create_dir_all(src_dir.as_path());
+ assert!(create_dir.is_ok());
+ let main_file = src_dir.join("main.rs");
+ let lib_file = src_dir.join("lib.rs");
+ let write_main = std::fs::write(main_file.as_path(), "fn main() {}\n");
+ let write_lib = std::fs::write(lib_file.as_path(), "pub fn helper() {}\n");
+ assert!(write_main.is_ok() && write_lib.is_ok());
+
+ let main_target = SourceFilePath::try_new(main_file.clone());
+ let lib_target = SourceFilePath::try_new(lib_file.clone());
+ assert!(main_target.is_ok() && lib_target.is_ok());
+ let main_target = match main_target {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let lib_target = match lib_target {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let main_message = json!({
+ "reason": "compiler-message",
+ "message": {
+ "message": "main warning",
+ "level": "warning",
+ "code": { "code": "clippy::main" },
+ "spans": [
+ {
+ "file_name": "src/main.rs",
+ "line_start": 1,
+ "line_end": 1,
+ "column_start": 1,
+ "column_end": 4,
+ "is_primary": true
+ }
+ ]
+ }
+ })
+ .to_string();
+ let lib_message = json!({
+ "reason": "compiler-message",
+ "message": {
+ "message": "lib warning",
+ "level": "warning",
+ "code": { "code": "clippy::lib" },
+ "spans": [
+ {
+ "file_name": "src/lib.rs",
+ "line_start": 1,
+ "line_end": 1,
+ "column_start": 1,
+ "column_end": 4,
+ "is_primary": true
+ }
+ ]
+ }
+ })
+ .to_string();
+
+ let stream = format!("{main_message}\n{lib_message}\n");
+ let requested = vec![lib_target, main_target];
+ let diagnostics = parse_clippy_json_stream(stream.as_str(), requested.as_slice(), temp.path());
+ assert_eq!(diagnostics.len(), 2);
+ assert_eq!(
+ diagnostics[0].code.clone().unwrap_or_default(),
+ "clippy::lib"
+ );
+ assert_eq!(
+ diagnostics[1].code.clone().unwrap_or_default(),
+ "clippy::main"
+ );
+}
+
+#[test]
+fn parse_clippy_json_stream_deduplicates_identical_messages() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let src_dir = temp.path().join("src");
+ let create_dir = std::fs::create_dir_all(src_dir.as_path());
+ assert!(create_dir.is_ok());
+ let main_file = src_dir.join("main.rs");
+ let write_main = std::fs::write(main_file.as_path(), "fn main() {}\n");
+ assert!(write_main.is_ok());
+
+ let target_path = SourceFilePath::try_new(main_file.clone());
+ assert!(target_path.is_ok());
+ let target_path = match target_path {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let message = json!({
+ "reason": "compiler-message",
+ "message": {
+ "message": "duplicate warning",
+ "level": "warning",
+ "code": { "code": "clippy::duplicate" },
+ "spans": [
+ {
+ "file_name": "src/main.rs",
+ "line_start": 1,
+ "line_end": 1,
+ "column_start": 1,
+ "column_end": 4,
+ "is_primary": true
+ }
+ ]
+ }
+ })
+ .to_string();
+
+ let stream = format!("{message}\n{message}\n");
+ let diagnostics = parse_clippy_json_stream(stream.as_str(), &[target_path], temp.path());
+ assert_eq!(diagnostics.len(), 1);
+ assert_eq!(
+ diagnostics[0].code.clone().unwrap_or_default(),
+ "clippy::duplicate"
+ );
+}
+
+#[test]
+fn absolute_path_input_normalizes_file_uris_and_relative_paths() {
+ let temp = tempfile::TempDir::new();
+ assert!(temp.is_ok());
+ let temp = match temp {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let workspace_file = temp.path().join("mod.rs");
+ let uri = Url::from_file_path(&workspace_file);
+ assert!(uri.is_ok());
+ let uri = match uri {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let from_uri = AbsolutePathInput(uri.to_string()).into_source_file_path();
+ assert!(from_uri.is_ok());
+
+ let previous_dir = std::env::current_dir();
+ assert!(previous_dir.is_ok());
+ let previous_dir = match previous_dir {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+
+ let set_into_temp = std::env::set_current_dir(temp.path());
+ assert!(set_into_temp.is_ok());
+ let from_relative = AbsolutePathInput("mod.rs".to_owned()).into_source_file_path();
+ assert!(from_relative.is_ok());
+ let _result = std::env::set_current_dir(previous_dir);
+}
diff --git a/crates/adequate-rust-mcp/src/worker/workspace.rs b/crates/adequate-rust-mcp/src/worker/workspace.rs
new file mode 100644
index 0000000..ef7ed47
--- /dev/null
+++ b/crates/adequate-rust-mcp/src/worker/workspace.rs
@@ -0,0 +1,313 @@
+use super::{
+ FixEverythingOutput, FixStepOutput, PorcelainErrorKind, porcelain_internal_error,
+ porcelain_invalid_params,
+};
+use rmcp::ErrorData as McpError;
+use std::{
+ fs,
+ path::{Path, PathBuf},
+};
+use tokio::process::Command;
+
+pub(super) const COMMAND_OUTPUT_EXCERPT_CHAR_LIMIT: usize = 1_500;
+
+#[derive(Debug, Clone)]
+pub(super) struct WorkspaceCommandSpec {
+ pub(super) program: String,
+ pub(super) args: Vec<String>,
+}
+
+#[derive(Debug, Clone)]
+pub(super) struct WorkspaceCommandRunOutput {
+ pub(super) status: std::process::ExitStatus,
+ pub(super) standard_output: String,
+ pub(super) standard_error: String,
+}
+
+impl WorkspaceCommandSpec {
+ pub(super) fn rendered(&self) -> String {
+ let mut parts = Vec::with_capacity(self.args.len().saturating_add(1));
+ parts.push(self.program.as_str());
+ parts.extend(self.args.iter().map(String::as_str));
+ parts.join(" ")
+ }
+
+ pub(super) fn into_argv(self) -> Vec<String> {
+ let Self { program, args } = self;
+ let mut command = Vec::with_capacity(args.len().saturating_add(1));
+ command.push(program);
+ command.extend(args);
+ command
+ }
+}
+
+pub(super) async fn run_workspace_fix_everything() -> Result<FixEverythingOutput, McpError> {
+ let workspace_root = resolve_workspace_root_path()?;
+ let (format_command, fix_command) =
+ resolve_workspace_fix_command_specs(workspace_root.as_path())?;
+ let format_step =
+ run_workspace_fix_step(workspace_root.as_path(), "format_workspace", format_command)
+ .await?;
+ let fix_step = run_workspace_fix_step(
+ workspace_root.as_path(),
+ "clippy_fix_workspace",
+ fix_command,
+ )
+ .await?;
+ let steps = vec![format_step, fix_step];
+ let success = steps.iter().all(|step| step.success);
+ Ok(FixEverythingOutput {
+ success,
+ workspace_root: workspace_root.display().to_string(),
+ steps,
+ })
+}
+
+pub(super) async fn run_workspace_fix_step(
+ workspace_root: &Path,
+ step_name: &str,
+ command_spec: WorkspaceCommandSpec,
+) -> Result<FixStepOutput, McpError> {
+ let rendered_command = command_spec.rendered();
+ let output =
+ run_workspace_command(workspace_root, &command_spec, rendered_command.as_str()).await?;
+ Ok(FixStepOutput {
+ step_name: step_name.to_owned(),
+ command: command_spec.into_argv(),
+ success: output.status.success(),
+ exit_code: output.status.code(),
+ standard_output_excerpt: command_output_excerpt(output.standard_output.as_str()),
+ standard_error_excerpt: command_output_excerpt(output.standard_error.as_str()),
+ })
+}
+
+pub(super) fn resolve_workspace_root_path() -> Result<PathBuf, McpError> {
+ let from_env = std::env::var("ADEQUATE_MCP_WORKSPACE_ROOT")
+ .ok()
+ .map(PathBuf::from);
+ let raw_root = match from_env {
+ Some(path) => path,
+ None => std::env::current_dir().map_err(|_| {
+ porcelain_internal_error(
+ "failed to determine current working directory",
+ PorcelainErrorKind::InternalFailure,
+ Some("set ADEQUATE_MCP_WORKSPACE_ROOT explicitly"),
+ false,
+ )
+ })?,
+ };
+ let normalized = fs::canonicalize(&raw_root).unwrap_or(raw_root);
+ if normalized.is_dir() {
+ Ok(normalized)
+ } else {
+ Err(porcelain_invalid_params(
+ "workspace root must be a directory",
+ PorcelainErrorKind::InvalidInput,
+ Some("set ADEQUATE_MCP_WORKSPACE_ROOT to a project directory"),
+ ))
+ }
+}
+
+pub(super) fn default_clippy_command_spec() -> WorkspaceCommandSpec {
+ WorkspaceCommandSpec {
+ program: "cargo".to_owned(),
+ args: vec![
+ "clippy".to_owned(),
+ "--workspace".to_owned(),
+ "--all-targets".to_owned(),
+ "--all-features".to_owned(),
+ "--message-format=json".to_owned(),
+ ],
+ }
+}
+
+pub(super) fn default_format_command_spec() -> WorkspaceCommandSpec {
+ WorkspaceCommandSpec {
+ program: "cargo".to_owned(),
+ args: vec!["fmt".to_owned(), "--all".to_owned()],
+ }
+}
+
+pub(super) fn default_fix_command_spec() -> WorkspaceCommandSpec {
+ WorkspaceCommandSpec {
+ program: "cargo".to_owned(),
+ args: vec![
+ "clippy".to_owned(),
+ "--fix".to_owned(),
+ "--workspace".to_owned(),
+ "--all-targets".to_owned(),
+ "--all-features".to_owned(),
+ "--allow-dirty".to_owned(),
+ "--allow-staged".to_owned(),
+ ],
+ }
+}
+
+pub(super) fn resolve_clippy_command_spec(
+ workspace_root: &Path,
+) -> Result<WorkspaceCommandSpec, McpError> {
+ let metadata = read_workspace_tool_metadata(workspace_root)?;
+ let command = read_workspace_tool_command(
+ metadata.as_ref(),
+ "clippy_command",
+ &["clippy_command", "clippyCommand"],
+ )?;
+ Ok(command.unwrap_or_else(default_clippy_command_spec))
+}
+
+pub(super) fn resolve_workspace_fix_command_specs(
+ workspace_root: &Path,
+) -> Result<(WorkspaceCommandSpec, WorkspaceCommandSpec), McpError> {
+ let metadata = read_workspace_tool_metadata(workspace_root)?;
+ let format_command = read_workspace_tool_command(
+ metadata.as_ref(),
+ "format_command",
+ &[
+ "format_command",
+ "formatCommand",
+ "fmt_command",
+ "fmtCommand",
+ ],
+ )?
+ .unwrap_or_else(default_format_command_spec);
+ let fix_command = read_workspace_tool_command(
+ metadata.as_ref(),
+ "fix_command",
+ &[
+ "fix_command",
+ "fixCommand",
+ "fix_everything_command",
+ "fixEverythingCommand",
+ ],
+ )?
+ .unwrap_or_else(default_fix_command_spec);
+ Ok((format_command, fix_command))
+}
+
+pub(super) fn read_workspace_tool_metadata(
+ workspace_root: &Path,
+) -> Result<Option<toml::map::Map<String, toml::Value>>, McpError> {
+ let manifest_path = workspace_root.join("Cargo.toml");
+ let manifest_text = fs::read_to_string(manifest_path.as_path()).map_err(|_| {
+ porcelain_internal_error(
+ "failed reading workspace Cargo.toml for tool configuration",
+ PorcelainErrorKind::ToolRuntimeFailure,
+ Some("ensure workspace root points to a Cargo workspace"),
+ false,
+ )
+ })?;
+ let manifest = manifest_text.parse::<toml::Value>().map_err(|_| {
+ porcelain_internal_error(
+ "failed parsing workspace Cargo.toml for tool configuration",
+ PorcelainErrorKind::ToolRuntimeFailure,
+ Some("fix Cargo.toml syntax under [workspace.metadata]"),
+ false,
+ )
+ })?;
+ let metadata = manifest
+ .get("workspace")
+ .and_then(toml::Value::as_table)
+ .and_then(|workspace| workspace.get("metadata"))
+ .and_then(toml::Value::as_table)
+ .and_then(|metadata| {
+ metadata
+ .get("adequate-rust-mcp")
+ .or_else(|| metadata.get("adequate_rust_mcp"))
+ })
+ .and_then(toml::Value::as_table)
+ .cloned();
+ Ok(metadata)
+}
+
+pub(super) fn read_workspace_tool_command(
+ metadata: Option<&toml::map::Map<String, toml::Value>>,
+ setting_name: &'static str,
+ key_aliases: &[&str],
+) -> Result<Option<WorkspaceCommandSpec>, McpError> {
+ let Some(metadata) = metadata else {
+ return Ok(None);
+ };
+ let value = key_aliases.iter().find_map(|key| metadata.get(*key));
+ let Some(value) = value else {
+ return Ok(None);
+ };
+ let command_items = value.as_array().ok_or_else(|| {
+ porcelain_invalid_params(
+ format!("workspace metadata {setting_name} must be an array of strings"),
+ PorcelainErrorKind::InvalidInput,
+ Some("set command metadata to an array like [\"cargo\", \"...\"]"),
+ )
+ })?;
+ let mut command = command_items
+ .iter()
+ .map(toml::Value::as_str)
+ .collect::<Option<Vec<_>>>()
+ .ok_or_else(|| {
+ porcelain_invalid_params(
+ format!("workspace metadata {setting_name} must be an array of strings"),
+ PorcelainErrorKind::InvalidInput,
+ Some("set command metadata to an array like [\"cargo\", \"...\"]"),
+ )
+ })?
+ .into_iter()
+ .map(str::to_owned)
+ .collect::<Vec<_>>();
+ if command.is_empty() || command[0].trim().is_empty() {
+ return Err(porcelain_invalid_params(
+ format!("workspace metadata {setting_name} must include a binary name"),
+ PorcelainErrorKind::InvalidInput,
+ Some("first entry in the command array must be executable name"),
+ ));
+ }
+ let program = command.remove(0);
+ Ok(Some(WorkspaceCommandSpec {
+ program,
+ args: command,
+ }))
+}
+
+pub(super) async fn run_workspace_command(
+ workspace_root: &Path,
+ command_spec: &WorkspaceCommandSpec,
+ rendered_command: &str,
+) -> Result<WorkspaceCommandRunOutput, McpError> {
+ let mut command = Command::new(command_spec.program.as_str());
+ let _configured_command = command
+ .args(command_spec.args.as_slice())
+ .current_dir(workspace_root)
+ .env("CARGO_TERM_COLOR", "never");
+ let output = command.output().await.map_err(|_| {
+ porcelain_internal_error(
+ format!("failed to spawn workspace command `{rendered_command}`"),
+ PorcelainErrorKind::ToolRuntimeFailure,
+ Some("ensure required toolchain binaries are installed and available in PATH"),
+ false,
+ )
+ })?;
+ Ok(WorkspaceCommandRunOutput {
+ status: output.status,
+ standard_output: String::from_utf8_lossy(output.stdout.as_slice()).into_owned(),
+ standard_error: String::from_utf8_lossy(output.stderr.as_slice()).into_owned(),
+ })
+}
+
+pub(super) fn command_output_excerpt(output: &str) -> Option<String> {
+ let trimmed = output.trim();
+ if trimmed.is_empty() {
+ return None;
+ }
+ let total_chars = trimmed.chars().count();
+ if total_chars <= COMMAND_OUTPUT_EXCERPT_CHAR_LIMIT {
+ return Some(trimmed.to_owned());
+ }
+ let keep_from_char = total_chars.saturating_sub(COMMAND_OUTPUT_EXCERPT_CHAR_LIMIT);
+ let keep_from_byte = trimmed
+ .char_indices()
+ .nth(keep_from_char)
+ .map_or(0, |(index, _)| index);
+ Some(format!(
+ "[...truncated {} chars...] {}",
+ keep_from_char,
+ &trimmed[keep_from_byte..]
+ ))
+}
diff --git a/crates/adequate-rust-mcp/tests/diagnostics_warmup_retry.rs b/crates/adequate-rust-mcp/tests/diagnostics_warmup_retry.rs
new file mode 100644
index 0000000..b27c34f
--- /dev/null
+++ b/crates/adequate-rust-mcp/tests/diagnostics_warmup_retry.rs
@@ -0,0 +1,403 @@
+//! Integration test for transient unlinked-file diagnostics during warm-up.
+
+use notify as _;
+use ra_mcp_domain as _;
+use ra_mcp_engine as _;
+use rmcp as _;
+use schemars as _;
+use serde as _;
+use serde_json::{Value, json};
+use serial_test::serial;
+use std::{
+ error::Error,
+ fs, io,
+ path::{Path, PathBuf},
+ process::Stdio,
+ time::Duration,
+};
+use tempfile::TempDir;
+use tokio::{
+ io::{AsyncBufReadExt, AsyncWriteExt, BufReader, Lines},
+ process::{Child, ChildStdin, ChildStdout, Command},
+};
+use toml as _;
+use tracing as _;
+use tracing_subscriber as _;
+use url as _;
+
+const RESPONSE_TIMEOUT: Duration = Duration::from_secs(20);
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn diagnostics_retries_transient_unlinked_file_reports() -> Result<(), Box<dyn Error>> {
+ let fixture = Fixture::new()?;
+ let worker_binary = resolve_worker_binary()?;
+ let fake_ra_binary = resolve_fake_ra_binary()?;
+ if !fake_ra_binary.exists() {
+ eprintln!(
+ "skipping warmup diagnostics test: fake-rust-analyzer missing at {}",
+ fake_ra_binary.display()
+ );
+ return Ok(());
+ }
+
+ let fake_wrapper = fixture.path().join("fake-ra-warmup.sh");
+ write_fake_ra_wrapper(
+ fake_wrapper.as_path(),
+ fake_ra_binary.as_path(),
+ FakeRaBehavior {
+ diagnostic_warmup_count: 2,
+ diagnostic_cancel_count: 0,
+ },
+ )?;
+
+ let mut harness = WorkerHarness::spawn(
+ worker_binary.as_path(),
+ fixture.path(),
+ fake_wrapper.as_path(),
+ )
+ .await?;
+ harness.initialize().await?;
+
+ let diagnostics = harness
+ .call_tool(
+ "diagnostics",
+ json!({
+ "file_path": fixture.source_file().display().to_string(),
+ "render": "json",
+ "mode": "full"
+ }),
+ )
+ .await?;
+ let items = diagnostics
+ .get("diagnostics")
+ .and_then(Value::as_array)
+ .ok_or_else(|| io::Error::other("diagnostics payload missing diagnostics array"))?;
+ assert_eq!(items.len(), 1);
+ let message = items[0]
+ .get("message")
+ .and_then(Value::as_str)
+ .ok_or_else(|| io::Error::other("diagnostic missing message"))?;
+ assert_eq!(message, "fake diagnostic");
+ let code = items[0].get("code").cloned().unwrap_or(Value::Null);
+ assert_eq!(code, Value::Null);
+
+ harness.shutdown().await;
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn diagnostics_retries_server_cancelled_response() -> Result<(), Box<dyn Error>> {
+ let fixture = Fixture::new()?;
+ let worker_binary = resolve_worker_binary()?;
+ let fake_ra_binary = resolve_fake_ra_binary()?;
+ if !fake_ra_binary.exists() {
+ eprintln!(
+ "skipping cancelled diagnostics test: fake-rust-analyzer missing at {}",
+ fake_ra_binary.display()
+ );
+ return Ok(());
+ }
+
+ let fake_wrapper = fixture.path().join("fake-ra-cancel.sh");
+ write_fake_ra_wrapper(
+ fake_wrapper.as_path(),
+ fake_ra_binary.as_path(),
+ FakeRaBehavior {
+ diagnostic_warmup_count: 0,
+ diagnostic_cancel_count: 1,
+ },
+ )?;
+
+ let mut harness = WorkerHarness::spawn(
+ worker_binary.as_path(),
+ fixture.path(),
+ fake_wrapper.as_path(),
+ )
+ .await?;
+ harness.initialize().await?;
+
+ let diagnostics = harness
+ .call_tool(
+ "diagnostics",
+ json!({
+ "file_path": fixture.source_file().display().to_string(),
+ "render": "json",
+ "mode": "full"
+ }),
+ )
+ .await?;
+ let items = diagnostics
+ .get("diagnostics")
+ .and_then(Value::as_array)
+ .ok_or_else(|| io::Error::other("diagnostics payload missing diagnostics array"))?;
+ assert_eq!(items.len(), 1);
+ let message = items[0]
+ .get("message")
+ .and_then(Value::as_str)
+ .ok_or_else(|| io::Error::other("diagnostic missing message"))?;
+ assert_eq!(message, "fake diagnostic");
+
+ harness.shutdown().await;
+ Ok(())
+}
+
+struct Fixture {
+ temp_dir: TempDir,
+ source_file: PathBuf,
+}
+
+impl Fixture {
+ fn new() -> Result<Self, Box<dyn Error>> {
+ let temp_dir = tempfile::tempdir()?;
+ let src = temp_dir.path().join("src");
+ fs::create_dir_all(&src)?;
+ fs::write(
+ temp_dir.path().join("Cargo.toml"),
+ "[package]\nname = \"diagnostics_warmup_fixture\"\nversion = \"0.0.0\"\nedition = \"2024\"\n",
+ )?;
+ let source_file = src.join("lib.rs");
+ fs::write(&source_file, "pub fn compute() -> i32 { 1 }\n")?;
+ Ok(Self {
+ temp_dir,
+ source_file,
+ })
+ }
+
+ fn path(&self) -> &Path {
+ self.temp_dir.path()
+ }
+
+ fn source_file(&self) -> &Path {
+ self.source_file.as_path()
+ }
+}
+
+struct WorkerHarness {
+ child: Child,
+ stdin: ChildStdin,
+ stdout: Lines<BufReader<ChildStdout>>,
+ next_id: u64,
+}
+
+impl Drop for WorkerHarness {
+ fn drop(&mut self) {
+ let _ = self.child.start_kill();
+ }
+}
+
+impl WorkerHarness {
+ async fn spawn(
+ worker_binary: &Path,
+ workspace_root: &Path,
+ fake_ra_binary: &Path,
+ ) -> Result<Self, Box<dyn Error>> {
+ let mut child = Command::new(worker_binary)
+ .arg("--worker")
+ .env("ADEQUATE_MCP_WORKSPACE_ROOT", workspace_root)
+ .env("ADEQUATE_MCP_RA_BINARY", fake_ra_binary)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::null())
+ .spawn()?;
+
+ let stdin = child
+ .stdin
+ .take()
+ .ok_or_else(|| io::Error::other("worker stdin unavailable"))?;
+ let stdout = child
+ .stdout
+ .take()
+ .ok_or_else(|| io::Error::other("worker stdout unavailable"))?;
+ Ok(Self {
+ child,
+ stdin,
+ stdout: BufReader::new(stdout).lines(),
+ next_id: 1,
+ })
+ }
+
+ async fn initialize(&mut self) -> Result<(), Box<dyn Error>> {
+ let _ = self
+ .request(
+ "initialize",
+ json!({
+ "protocolVersion": "2025-11-25",
+ "capabilities": {},
+ "clientInfo": {
+ "name": "diagnostics-warmup-test",
+ "version": "1.0.0"
+ }
+ }),
+ )
+ .await?;
+ self.notify("notifications/initialized", json!({})).await?;
+ Ok(())
+ }
+
+ async fn call_tool(
+ &mut self,
+ tool_name: &str,
+ arguments: Value,
+ ) -> Result<Value, Box<dyn Error>> {
+ let response = self
+ .request(
+ "tools/call",
+ json!({
+ "name": tool_name,
+ "arguments": arguments,
+ }),
+ )
+ .await?;
+ let result = response
+ .get("result")
+ .ok_or_else(|| io::Error::other("tool response missing result"))?;
+ let is_error = result
+ .get("isError")
+ .and_then(Value::as_bool)
+ .unwrap_or(false);
+ if is_error {
+ return Err(Box::new(io::Error::other(format!(
+ "tool `{tool_name}` returned error payload: {result}"
+ ))));
+ }
+ Ok(result
+ .get("structuredContent")
+ .cloned()
+ .unwrap_or(Value::Null))
+ }
+
+ async fn request(&mut self, method: &str, params: Value) -> Result<Value, Box<dyn Error>> {
+ let id = self.next_id;
+ self.next_id = self.next_id.saturating_add(1);
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "id": id,
+ "method": method,
+ "params": params,
+ });
+ self.write_message(&payload).await?;
+ self.read_response(id).await
+ }
+
+ async fn notify(&mut self, method: &str, params: Value) -> Result<(), Box<dyn Error>> {
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "method": method,
+ "params": params,
+ });
+ self.write_message(&payload).await
+ }
+
+ async fn write_message(&mut self, message: &Value) -> Result<(), Box<dyn Error>> {
+ let serialized = serde_json::to_vec(message)?;
+ self.stdin.write_all(&serialized).await?;
+ self.stdin.write_all(b"\n").await?;
+ self.stdin.flush().await?;
+ Ok(())
+ }
+
+ async fn read_response(&mut self, request_id: u64) -> Result<Value, Box<dyn Error>> {
+ let deadline = tokio::time::Instant::now() + RESPONSE_TIMEOUT;
+ loop {
+ if tokio::time::Instant::now() >= deadline {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
+ let next_line = tokio::time::timeout(remaining, self.stdout.next_line()).await;
+ let line = match next_line {
+ Ok(Ok(Some(line))) => line,
+ Ok(Ok(None)) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "worker stdout closed while awaiting response",
+ )));
+ }
+ Ok(Err(error)) => return Err(Box::new(error)),
+ Err(_) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ };
+ let message = match serde_json::from_str::<Value>(&line) {
+ Ok(message) => message,
+ Err(_) => continue,
+ };
+ if message.get("id").and_then(Value::as_u64) == Some(request_id) {
+ return Ok(message);
+ }
+ }
+ }
+
+ async fn shutdown(&mut self) {
+ let _ = self.child.kill().await;
+ let _ = self.child.wait().await;
+ }
+}
+
+#[derive(Clone, Copy)]
+struct FakeRaBehavior {
+ diagnostic_warmup_count: u8,
+ diagnostic_cancel_count: u8,
+}
+
+fn write_fake_ra_wrapper(
+ script_path: &Path,
+ fake_ra_binary: &Path,
+ behavior: FakeRaBehavior,
+) -> io::Result<()> {
+ let script = format!(
+ "#!/usr/bin/env bash\nexec \"{}\" --mode stable --diagnostic-warmup-count {} --diagnostic-cancel-count {}\n",
+ fake_ra_binary.display(),
+ behavior.diagnostic_warmup_count,
+ behavior.diagnostic_cancel_count
+ );
+ fs::write(script_path, script)?;
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+
+ let permissions = fs::Permissions::from_mode(0o755);
+ fs::set_permissions(script_path, permissions)?;
+ }
+ Ok(())
+}
+
+fn resolve_worker_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_adequate-rust-mcp") {
+ return Ok(PathBuf::from(path));
+ }
+
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to find integration test deps directory"))?;
+ let target_dir = deps_dir
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve target debug directory"))?;
+ Ok(target_dir.join("adequate-rust-mcp"))
+}
+
+fn resolve_fake_ra_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake-rust-analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake_rust_analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve integration test deps directory"))?;
+ let target_debug = deps_dir
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve target debug directory"))?;
+ Ok(target_debug.join("fake-rust-analyzer"))
+}
+use libmcp as _;
diff --git a/crates/adequate-rust-mcp/tests/e2e_gauntlet.rs b/crates/adequate-rust-mcp/tests/e2e_gauntlet.rs
new file mode 100644
index 0000000..a5a2861
--- /dev/null
+++ b/crates/adequate-rust-mcp/tests/e2e_gauntlet.rs
@@ -0,0 +1,926 @@
+//! Optional live-fire end-to-end MCP gauntlet against a real rust-analyzer process.
+//!
+//! This test is gated behind `ADEQUATE_MCP_ENABLE_E2E=1` because it is intentionally
+//! heavyweight and depends on a local rust-analyzer binary.
+
+use notify as _;
+use ra_mcp_domain as _;
+use ra_mcp_engine as _;
+use rmcp as _;
+use schemars as _;
+use serde as _;
+use serde_json::{Value, json};
+use serial_test::serial;
+use std::{
+ collections::HashMap,
+ error::Error,
+ fs, io,
+ path::{Path, PathBuf},
+ process::Stdio,
+ time::{Duration, Instant},
+};
+use tempfile::TempDir;
+use tokio::{
+ io::{AsyncBufReadExt, AsyncWriteExt, BufReader, Lines},
+ process::{Child, ChildStdin, ChildStdout, Command},
+};
+use toml as _;
+use tracing as _;
+use tracing_subscriber as _;
+use url::Url;
+
+const E2E_ENABLE_ENV: &str = "ADEQUATE_MCP_ENABLE_E2E";
+const RA_BINARY_ENV: &str = "ADEQUATE_MCP_RA_BINARY";
+const RESPONSE_TIMEOUT: Duration = Duration::from_secs(45);
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn live_fire_gauntlet_reaches_known_state() -> Result<(), Box<dyn Error>> {
+ if std::env::var_os(E2E_ENABLE_ENV).is_none() {
+ eprintln!("skipping e2e gauntlet: set {E2E_ENABLE_ENV}=1 to enable");
+ return Ok(());
+ }
+
+ let ra_binary = std::env::var(RA_BINARY_ENV).unwrap_or_else(|_| "rust-analyzer".to_owned());
+ if !rust_analyzer_available(ra_binary.as_str()).await {
+ eprintln!("skipping e2e gauntlet: rust-analyzer binary unavailable at `{ra_binary}`");
+ return Ok(());
+ }
+
+ let fixture = Fixture::new()?;
+ let file_uri = fixture.main_uri()?;
+ let file_path = fixture.main_path().display().to_string();
+
+ let mut harness = McpHarness::spawn(fixture.workspace_root(), ra_binary.as_str()).await?;
+ harness.initialize().await?;
+
+ let tools = harness.tools_list().await?;
+ assert!(tools.iter().any(|tool| tool == "advanced_lsp_request"));
+ assert!(tools.iter().any(|tool| tool == "rename_symbol"));
+ assert!(tools.iter().any(|tool| tool == "hover"));
+ assert!(tools.iter().any(|tool| tool == "references"));
+ assert!(tools.iter().any(|tool| tool == "clippy_diagnostics"));
+ assert!(tools.iter().any(|tool| tool == "fix_everything"));
+
+ let diagnostics_tool = harness.tool_spec("diagnostics").await?;
+ let diagnostics_properties = diagnostics_tool
+ .get("inputSchema")
+ .and_then(|schema| schema.get("properties"))
+ .and_then(Value::as_object)
+ .ok_or_else(|| io::Error::other("diagnostics input schema missing properties"))?;
+ assert!(diagnostics_properties.contains_key("mode"));
+ assert!(diagnostics_properties.contains_key("render"));
+ assert!(!diagnostics_properties.contains_key("format"));
+ let diagnostics_render_schema = diagnostics_properties
+ .get("render")
+ .ok_or_else(|| io::Error::other("diagnostics render schema missing"))?
+ .to_string();
+ assert!(diagnostics_render_schema.contains("\"porcelain\""));
+ assert!(diagnostics_render_schema.contains("\"json\""));
+ assert!(diagnostics_tool.get("outputSchema").is_some());
+
+ let clippy_tool = harness.tool_spec("clippy_diagnostics").await?;
+ let clippy_properties = clippy_tool
+ .get("inputSchema")
+ .and_then(|schema| schema.get("properties"))
+ .and_then(Value::as_object)
+ .ok_or_else(|| io::Error::other("clippy input schema missing properties"))?;
+ assert!(clippy_properties.contains_key("render"));
+ assert!(!clippy_properties.contains_key("format"));
+
+ for tool_name in ["hover", "definition", "references"] {
+ let tool = harness.tool_spec(tool_name).await?;
+ let properties = tool
+ .get("inputSchema")
+ .and_then(|schema| schema.get("properties"))
+ .and_then(Value::as_object)
+ .ok_or_else(|| {
+ io::Error::other(format!("{tool_name} input schema missing properties"))
+ })?;
+ assert!(properties.contains_key("render"));
+ assert!(properties.contains_key("path_style"));
+ assert!(tool.get("outputSchema").is_some());
+ }
+
+ for tool_name in ["health_snapshot", "telemetry_snapshot"] {
+ let tool = harness.tool_spec(tool_name).await?;
+ let properties = tool
+ .get("inputSchema")
+ .and_then(|schema| schema.get("properties"))
+ .and_then(Value::as_object)
+ .ok_or_else(|| {
+ io::Error::other(format!("{tool_name} input schema missing properties"))
+ })?;
+ assert!(properties.contains_key("render"));
+ assert!(tool.get("outputSchema").is_some());
+ }
+
+ // Warm symbol index before semantic operations.
+ let symbols = harness
+ .call_tool_retrying_transient(
+ "advanced_lsp_request",
+ json!({
+ "method": "documentSymbol",
+ "arguments": {
+ "textDocument": { "uri": file_uri }
+ }
+ }),
+ )
+ .await?;
+ assert!(symbols.is_array() || symbols.is_object());
+
+ let definition_via_stringified_params = harness
+ .call_tool_retrying_transient(
+ "advanced_lsp_request",
+ json!({
+ "method": "textDocument/definition",
+ "params": format!(
+ "{{\"textDocument\":{{\"uri\":\"{file_uri}\"}},\"position\":{{\"line\":6,\"character\":16}}}}"
+ )
+ }),
+ )
+ .await?;
+ assert!(
+ definition_via_stringified_params.is_array()
+ || definition_via_stringified_params.is_object()
+ );
+
+ for query in ["seed", "compute", "value"] {
+ let workspace_symbols = harness
+ .call_tool_retrying_transient(
+ "advanced_lsp_request",
+ json!({
+ "method": "workspaceSymbol",
+ "params": {
+ "query": query
+ }
+ }),
+ )
+ .await?;
+ assert!(workspace_symbols.is_array() || workspace_symbols.is_object());
+ }
+
+ let prepare = harness
+ .call_tool_retrying_transient(
+ "advanced_lsp_request",
+ json!({
+ "method": "prepareRename",
+ "arguments": {
+ "textDocument": { "uri": file_uri },
+ "position": { "line": 5, "character": 8 }
+ }
+ }),
+ )
+ .await?;
+ assert!(!prepare.is_null());
+
+ // Exercise alias-heavy input normalization for rename.
+ let rename = harness
+ .call_tool_retrying_transient(
+ "rename_symbol",
+ json!({
+ "filePath": file_uri,
+ "line": "6",
+ "character": 9,
+ "newName": "seed_value"
+ }),
+ )
+ .await?;
+ let edits_applied = rename
+ .get("edits_applied")
+ .and_then(Value::as_u64)
+ .unwrap_or(0);
+ let files_touched = rename
+ .get("files_touched")
+ .and_then(Value::as_u64)
+ .unwrap_or(0);
+ assert!(edits_applied >= 2);
+ assert!(files_touched >= 1);
+
+ let hover_default = harness
+ .call_tool_response_retrying_transient(
+ "hover",
+ json!({
+ "file_path": file_path,
+ "line": 7,
+ "column": 30
+ }),
+ )
+ .await?;
+ assert!(hover_default.get("structuredContent").is_none());
+ let hover_default_text = hover_default
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert!(hover_default_text.contains("fn compute_seed("));
+ assert!(!hover_default_text.trim_start().starts_with('{'));
+
+ let hover = harness
+ .call_tool_retrying_transient(
+ "hover",
+ json!({
+ "file_path": file_path,
+ "line": 7,
+ "column": 30,
+ "render": "json"
+ }),
+ )
+ .await?;
+ assert!(hover.get("rendered").is_some());
+
+ let definition_default = harness
+ .call_tool_response_retrying_transient(
+ "definition",
+ json!({
+ "file_path": file_path,
+ "line": 7,
+ "column": 30
+ }),
+ )
+ .await?;
+ assert!(definition_default.get("structuredContent").is_none());
+ let definition_default_text = definition_default
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert!(definition_default_text.contains("1 definition"));
+ assert!(!definition_default_text.trim_start().starts_with('{'));
+
+ let definition = harness
+ .call_tool_retrying_transient(
+ "definition",
+ json!({
+ "file_path": file_path,
+ "line": 7,
+ "column": 30,
+ "render": "json"
+ }),
+ )
+ .await?;
+ let definition_count = definition
+ .get("locations")
+ .and_then(Value::as_array)
+ .map_or(0, Vec::len);
+ assert!(definition_count >= 1);
+
+ let references_default = harness
+ .call_tool_response_retrying_transient(
+ "references",
+ json!({
+ "file_path": file_path,
+ "line": 7,
+ "column": 30
+ }),
+ )
+ .await?;
+ assert!(references_default.get("structuredContent").is_none());
+ let references_default_text = references_default
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert!(references_default_text.contains("references"));
+ assert!(!references_default_text.trim_start().starts_with('{'));
+
+ let references = harness
+ .call_tool_retrying_transient(
+ "references",
+ json!({
+ "file_path": file_path,
+ "line": 7,
+ "column": 30,
+ "render": "json"
+ }),
+ )
+ .await?;
+ let reference_count = references
+ .get("locations")
+ .and_then(Value::as_array)
+ .map_or(0, Vec::len);
+ assert!(reference_count >= 2);
+
+ let diagnostics_default = harness
+ .call_tool_response_retrying_transient("diagnostics", json!({ "file_path": file_path }))
+ .await?;
+ assert!(diagnostics_default.get("structuredContent").is_none());
+ let diagnostics_default_text = diagnostics_default
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert!(diagnostics_default_text.contains("total"));
+ assert!(!diagnostics_default_text.trim_start().starts_with('{'));
+
+ let diagnostics = harness
+ .call_tool_retrying_transient(
+ "diagnostics",
+ json!({ "file_path": file_path, "render": "json" }),
+ )
+ .await?;
+ let compact_mode = diagnostics
+ .get("mode")
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert_eq!(compact_mode, "compact");
+ let compact_items = diagnostics
+ .get("items")
+ .and_then(Value::as_array)
+ .map_or(0, Vec::len);
+ let compact_total = diagnostics
+ .get("counts")
+ .and_then(Value::as_object)
+ .and_then(|counts| counts.get("total_count"))
+ .and_then(Value::as_u64)
+ .unwrap_or(0);
+ assert_eq!(compact_total, compact_items as u64);
+
+ let diagnostics_full = harness
+ .call_tool_retrying_transient(
+ "diagnostics",
+ json!({ "file_path": file_path, "mode": "full", "render": "json" }),
+ )
+ .await?;
+ let full_mode = diagnostics_full
+ .get("mode")
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert_eq!(full_mode, "full");
+ let full_items = diagnostics_full
+ .get("diagnostics")
+ .and_then(Value::as_array)
+ .cloned()
+ .unwrap_or_default();
+ assert_eq!(compact_items, full_items.len());
+
+ let compact_entries = diagnostics
+ .get("items")
+ .and_then(Value::as_array)
+ .cloned()
+ .unwrap_or_default();
+ assert_eq!(compact_entries.len(), full_items.len());
+ for (compact_entry, full_entry) in compact_entries.iter().zip(full_items.iter()) {
+ let expected_severity = full_entry
+ .get("level")
+ .and_then(Value::as_str)
+ .unwrap_or("information");
+ let full_range = full_entry
+ .get("range")
+ .cloned()
+ .unwrap_or_else(|| json!({}));
+ assert_eq!(
+ compact_entry
+ .get("severity")
+ .and_then(Value::as_str)
+ .unwrap_or(""),
+ expected_severity
+ );
+ assert_eq!(
+ compact_entry
+ .get("file_path")
+ .and_then(Value::as_str)
+ .unwrap_or(""),
+ full_range
+ .get("file_path")
+ .and_then(Value::as_str)
+ .unwrap_or("")
+ );
+ assert_eq!(
+ compact_entry
+ .get("start_line")
+ .and_then(Value::as_u64)
+ .unwrap_or(0),
+ full_range
+ .get("start")
+ .and_then(Value::as_object)
+ .and_then(|start| start.get("line"))
+ .and_then(Value::as_u64)
+ .unwrap_or(0)
+ );
+ assert_eq!(
+ compact_entry
+ .get("start_column")
+ .and_then(Value::as_u64)
+ .unwrap_or(0),
+ full_range
+ .get("start")
+ .and_then(Value::as_object)
+ .and_then(|start| start.get("column"))
+ .and_then(Value::as_u64)
+ .unwrap_or(0)
+ );
+ assert_eq!(
+ compact_entry
+ .get("end_line")
+ .and_then(Value::as_u64)
+ .unwrap_or(0),
+ full_range
+ .get("end")
+ .and_then(Value::as_object)
+ .and_then(|end| end.get("line"))
+ .and_then(Value::as_u64)
+ .unwrap_or(0)
+ );
+ assert_eq!(
+ compact_entry
+ .get("end_column")
+ .and_then(Value::as_u64)
+ .unwrap_or(0),
+ full_range
+ .get("end")
+ .and_then(Value::as_object)
+ .and_then(|end| end.get("column"))
+ .and_then(Value::as_u64)
+ .unwrap_or(0)
+ );
+ assert_eq!(
+ compact_entry.get("code").cloned().unwrap_or(Value::Null),
+ full_entry.get("code").cloned().unwrap_or(Value::Null)
+ );
+ assert_eq!(
+ compact_entry
+ .get("message")
+ .and_then(Value::as_str)
+ .unwrap_or(""),
+ full_entry
+ .get("message")
+ .and_then(Value::as_str)
+ .unwrap_or("")
+ );
+ }
+
+ let diagnostics_batch = harness
+ .call_tool_retrying_transient(
+ "diagnostics",
+ json!({ "file_paths": [file_path, file_path], "mode": "full", "render": "json" }),
+ )
+ .await?;
+ let diagnostics_batch_items = diagnostics_batch
+ .get("diagnostics")
+ .and_then(Value::as_array)
+ .map_or(0, Vec::len);
+ assert_eq!(diagnostics_batch_items, full_items.len().saturating_mul(2));
+
+ let clippy_default = harness
+ .call_tool_response("clippy_diagnostics", json!({ "file_path": file_path }))
+ .await?;
+ assert!(clippy_default.get("structuredContent").is_none());
+ let clippy_default_text = clippy_default
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert!(clippy_default_text.contains("total"));
+ assert!(!clippy_default_text.trim_start().starts_with('{'));
+
+ let clippy_compact = harness
+ .call_tool_retrying_transient(
+ "clippy_diagnostics",
+ json!({ "file_path": file_path, "render": "json" }),
+ )
+ .await?;
+ assert_eq!(
+ clippy_compact
+ .get("mode")
+ .and_then(Value::as_str)
+ .unwrap_or(""),
+ "compact"
+ );
+ assert!(clippy_compact.get("counts").is_some());
+
+ let clippy_full = harness
+ .call_tool_retrying_transient(
+ "clippy_diagnostics",
+ json!({ "file_path": file_path, "mode": "full", "render": "json" }),
+ )
+ .await?;
+ assert_eq!(
+ clippy_full
+ .get("mode")
+ .and_then(Value::as_str)
+ .unwrap_or(""),
+ "full"
+ );
+ let clippy_total = clippy_compact
+ .get("counts")
+ .and_then(Value::as_object)
+ .and_then(|counts| counts.get("total_count"))
+ .and_then(Value::as_u64)
+ .unwrap_or(0);
+ let clippy_full_items = clippy_full
+ .get("diagnostics")
+ .and_then(Value::as_array)
+ .map_or(0, Vec::len);
+ assert_eq!(clippy_total as usize, clippy_full_items);
+ assert!(clippy_total >= 1);
+
+ let health_default = harness
+ .call_tool_response_retrying_transient("health_snapshot", json!({}))
+ .await?;
+ assert!(health_default.get("structuredContent").is_none());
+ let health_default_text = health_default
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert!(health_default_text.contains("gen="));
+ assert!(!health_default_text.trim_start().starts_with('{'));
+
+ let health = harness
+ .call_tool_retrying_transient("health_snapshot", json!({ "render": "json" }))
+ .await?;
+ let generation = health
+ .get("generation")
+ .and_then(Value::as_u64)
+ .unwrap_or(0);
+ assert!(generation >= 1);
+
+ let telemetry_default = harness
+ .call_tool_response_retrying_transient("telemetry_snapshot", json!({}))
+ .await?;
+ assert!(telemetry_default.get("structuredContent").is_none());
+ let telemetry_default_text = telemetry_default
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str)
+ .unwrap_or("");
+ assert!(telemetry_default_text.contains("totals requests="));
+ assert!(!telemetry_default_text.trim_start().starts_with('{'));
+
+ let telemetry = harness
+ .call_tool_retrying_transient("telemetry_snapshot", json!({ "render": "json" }))
+ .await?;
+ assert!(telemetry.get("totals").is_some());
+
+ harness.shutdown().await;
+ Ok(())
+}
+
+struct Fixture {
+ temp_dir: TempDir,
+ main_path: PathBuf,
+}
+
+impl Fixture {
+ fn new() -> Result<Self, Box<dyn Error>> {
+ let temp_dir = tempfile::tempdir()?;
+ let src_dir = temp_dir.path().join("src");
+ fs::create_dir_all(&src_dir)?;
+
+ fs::write(
+ temp_dir.path().join("Cargo.toml"),
+ "[package]\nname = \"gauntlet_fixture\"\nversion = \"0.0.0\"\nedition = \"2024\"\n",
+ )?;
+ let main_path = src_dir.join("main.rs");
+ fs::write(
+ &main_path,
+ "fn compute_seed(seed: i32) -> i32 {\n seed + 1\n}\n\nfn main() {\n let seed = 41;\n let value = compute_seed(seed);\n println!(\"{value}\");\n let unused = 0;\n}\n",
+ )?;
+
+ Ok(Self {
+ temp_dir,
+ main_path,
+ })
+ }
+
+ fn workspace_root(&self) -> &Path {
+ self.temp_dir.path()
+ }
+
+ fn main_path(&self) -> &Path {
+ self.main_path.as_path()
+ }
+
+ fn main_uri(&self) -> Result<String, Box<dyn Error>> {
+ let url = Url::from_file_path(&self.main_path)
+ .map_err(|()| io::Error::other("fixture main.rs is not representable as file URI"))?;
+ Ok(url.to_string())
+ }
+}
+
+struct McpHarness {
+ child: Child,
+ stdin: ChildStdin,
+ stdout: Lines<BufReader<ChildStdout>>,
+ buffered_responses: HashMap<u64, Value>,
+ next_id: u64,
+}
+
+impl Drop for McpHarness {
+ fn drop(&mut self) {
+ let _result = self.child.start_kill();
+ }
+}
+
+impl McpHarness {
+ async fn spawn(workspace_root: &Path, ra_binary: &str) -> Result<Self, Box<dyn Error>> {
+ let worker_binary = resolve_worker_binary()?;
+ let mut child = Command::new(worker_binary)
+ .arg("--worker")
+ .env("ADEQUATE_MCP_WORKSPACE_ROOT", workspace_root)
+ .env("ADEQUATE_MCP_RA_BINARY", ra_binary)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::null())
+ .spawn()?;
+
+ let stdin = child
+ .stdin
+ .take()
+ .ok_or_else(|| io::Error::other("worker stdin unavailable"))?;
+ let stdout = child
+ .stdout
+ .take()
+ .ok_or_else(|| io::Error::other("worker stdout unavailable"))?;
+ Ok(Self {
+ child,
+ stdin,
+ stdout: BufReader::new(stdout).lines(),
+ buffered_responses: HashMap::new(),
+ next_id: 1,
+ })
+ }
+
+ async fn initialize(&mut self) -> Result<(), Box<dyn Error>> {
+ let initialize_payload = json!({
+ "protocolVersion": "2025-11-25",
+ "capabilities": {},
+ "clientInfo": {
+ "name": "adequate-e2e-gauntlet",
+ "version": "1.0.0"
+ }
+ });
+ let _result = self.request("initialize", initialize_payload).await?;
+ self.notify("notifications/initialized", json!({})).await?;
+ Ok(())
+ }
+
+ async fn tools_list(&mut self) -> Result<Vec<String>, Box<dyn Error>> {
+ let tools = self.tools_catalog().await?;
+
+ let names = tools
+ .iter()
+ .filter_map(|tool| tool.get("name").and_then(Value::as_str))
+ .map(str::to_owned)
+ .collect::<Vec<_>>();
+ Ok(names)
+ }
+
+ async fn tool_spec(&mut self, tool_name: &str) -> Result<Value, Box<dyn Error>> {
+ let tools = self.tools_catalog().await?;
+ tools
+ .into_iter()
+ .find(|tool| tool.get("name").and_then(Value::as_str) == Some(tool_name))
+ .ok_or_else(|| io::Error::other(format!("tools/list missing `{tool_name}`")).into())
+ }
+
+ async fn tools_catalog(&mut self) -> Result<Vec<Value>, Box<dyn Error>> {
+ let response = self.request("tools/list", json!({})).await?;
+ let tools = response
+ .get("tools")
+ .and_then(Value::as_array)
+ .ok_or_else(|| io::Error::other("tools/list missing tools array"))?;
+ Ok(tools.clone())
+ }
+
+ async fn call_tool(
+ &mut self,
+ tool_name: &str,
+ arguments: Value,
+ ) -> Result<Value, Box<dyn Error>> {
+ let response = self.call_tool_response(tool_name, arguments).await?;
+
+ if let Some(content) = response.get("structuredContent") {
+ return Ok(content.clone());
+ }
+
+ let text = response
+ .get("content")
+ .and_then(Value::as_array)
+ .and_then(|items| items.first())
+ .and_then(|entry| entry.get("text"))
+ .and_then(Value::as_str);
+ let Some(text) = text else {
+ return Ok(Value::Null);
+ };
+ if let Ok(parsed) = serde_json::from_str::<Value>(text) {
+ return Ok(parsed);
+ }
+ Ok(Value::String(text.to_owned()))
+ }
+
+ async fn call_tool_response(
+ &mut self,
+ tool_name: &str,
+ arguments: Value,
+ ) -> Result<Value, Box<dyn Error>> {
+ let response = self
+ .request(
+ "tools/call",
+ json!({
+ "name": tool_name,
+ "arguments": arguments,
+ }),
+ )
+ .await?;
+
+ let is_error = response
+ .get("isError")
+ .and_then(Value::as_bool)
+ .unwrap_or(false);
+ if is_error {
+ return Err(Box::new(io::Error::other(format!(
+ "tool `{tool_name}` returned error payload: {response}"
+ ))));
+ }
+ Ok(response)
+ }
+
+ async fn call_tool_response_retrying_transient(
+ &mut self,
+ tool_name: &str,
+ arguments: Value,
+ ) -> Result<Value, Box<dyn Error>> {
+ let mut attempt = 0_u8;
+ loop {
+ attempt = attempt.saturating_add(1);
+ let result = self.call_tool_response(tool_name, arguments.clone()).await;
+ match result {
+ Ok(response) => return Ok(response),
+ Err(error) => {
+ let message = error.to_string();
+ let is_transient = is_transient_tool_error(message.as_str());
+ if !is_transient || attempt >= 4 {
+ return Err(error);
+ }
+ tokio::time::sleep(Duration::from_millis(250)).await;
+ }
+ }
+ }
+ }
+
+ async fn call_tool_retrying_transient(
+ &mut self,
+ tool_name: &str,
+ arguments: Value,
+ ) -> Result<Value, Box<dyn Error>> {
+ let mut attempt = 0_u8;
+ loop {
+ attempt = attempt.saturating_add(1);
+ let result = self.call_tool(tool_name, arguments.clone()).await;
+ match result {
+ Ok(payload) => return Ok(payload),
+ Err(error) => {
+ let message = error.to_string();
+ let is_transient = is_transient_tool_error(message.as_str());
+ if !is_transient || attempt >= 4 {
+ return Err(error);
+ }
+ tokio::time::sleep(Duration::from_millis(250)).await;
+ }
+ }
+ }
+ }
+
+ async fn notify(&mut self, method: &str, params: Value) -> Result<(), Box<dyn Error>> {
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "method": method,
+ "params": params,
+ });
+ let serialized = serde_json::to_vec(&payload)?;
+ self.stdin.write_all(&serialized).await?;
+ self.stdin.write_all(b"\n").await?;
+ self.stdin.flush().await?;
+ Ok(())
+ }
+
+ async fn request(&mut self, method: &str, params: Value) -> Result<Value, Box<dyn Error>> {
+ let request_id = self.next_id;
+ self.next_id = self.next_id.saturating_add(1);
+
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "method": method,
+ "params": params,
+ });
+ let serialized = serde_json::to_vec(&payload)?;
+ self.stdin.write_all(&serialized).await?;
+ self.stdin.write_all(b"\n").await?;
+ self.stdin.flush().await?;
+
+ let response = self.read_response(request_id).await?;
+ if let Some(error) = response.get("error") {
+ return Err(Box::new(io::Error::other(format!(
+ "json-rpc error for method `{method}`: {error}"
+ ))));
+ }
+ let result = response
+ .get("result")
+ .cloned()
+ .ok_or_else(|| io::Error::other("json-rpc response missing result"))?;
+ Ok(result)
+ }
+
+ async fn read_response(&mut self, request_id: u64) -> Result<Value, Box<dyn Error>> {
+ if let Some(buffered) = self.buffered_responses.remove(&request_id) {
+ return Ok(buffered);
+ }
+
+ let deadline = Instant::now() + RESPONSE_TIMEOUT;
+ loop {
+ if Instant::now() >= deadline {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ let remaining = deadline.saturating_duration_since(Instant::now());
+ let next_line = tokio::time::timeout(remaining, self.stdout.next_line()).await;
+ let line = match next_line {
+ Ok(Ok(Some(line))) => line,
+ Ok(Ok(None)) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "worker stdout closed while awaiting response",
+ )));
+ }
+ Ok(Err(error)) => return Err(Box::new(error)),
+ Err(_elapsed) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ };
+
+ let parsed = serde_json::from_str::<Value>(&line);
+ let Ok(message) = parsed else {
+ continue;
+ };
+ let response_id = message.get("id").and_then(Value::as_u64);
+ let Some(response_id) = response_id else {
+ continue;
+ };
+
+ if response_id == request_id {
+ return Ok(message);
+ }
+ let _existing = self.buffered_responses.insert(response_id, message);
+ }
+ }
+
+ async fn shutdown(&mut self) {
+ let _kill_result = self.child.kill().await;
+ let _wait_result = self.child.wait().await;
+ }
+}
+
+fn is_transient_tool_error(message: &str) -> bool {
+ message.contains("\"kind\":\"transient_retryable\"")
+ || message.contains("code=-32801")
+ || message.contains("content modified")
+ || message.contains("document changed")
+}
+
+fn resolve_worker_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_adequate-rust-mcp") {
+ return Ok(PathBuf::from(path));
+ }
+
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to find integration test deps directory"))?;
+ let target_dir = deps_dir
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve target debug directory"))?;
+ Ok(target_dir.join("adequate-rust-mcp"))
+}
+
+async fn rust_analyzer_available(binary: &str) -> bool {
+ let status = Command::new(binary)
+ .arg("--version")
+ .stdout(Stdio::null())
+ .stderr(Stdio::null())
+ .status()
+ .await;
+ matches!(status, Ok(status) if status.success())
+}
+use libmcp as _;
diff --git a/crates/adequate-rust-mcp/tests/host_inflight_replay.rs b/crates/adequate-rust-mcp/tests/host_inflight_replay.rs
new file mode 100644
index 0000000..17088d6
--- /dev/null
+++ b/crates/adequate-rust-mcp/tests/host_inflight_replay.rs
@@ -0,0 +1,657 @@
+//! Host-level replay test: in-flight requests must survive worker hot-swap.
+
+use notify as _;
+use ra_mcp_domain as _;
+use ra_mcp_engine as _;
+use rmcp as _;
+use schemars as _;
+use serde as _;
+use serde_json::{Value, json};
+use serial_test::serial;
+use std::{
+ collections::HashMap,
+ error::Error,
+ fs, io,
+ path::{Path, PathBuf},
+ process::Stdio,
+ time::{Duration, Instant},
+};
+use tempfile::TempDir;
+use tokio::{
+ io::{AsyncBufReadExt, AsyncWriteExt, BufReader, Lines},
+ process::{Child, ChildStdin, ChildStdout, Command},
+};
+use toml as _;
+use tracing as _;
+use tracing_subscriber as _;
+use url as _;
+
+const RESPONSE_TIMEOUT: Duration = Duration::from_secs(20);
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn host_replays_inflight_request_during_hot_swap() -> Result<(), Box<dyn Error>> {
+ let fixture = Fixture::new()?;
+ let host_binary = resolve_host_binary()?;
+ let fake_ra_binary = resolve_fake_ra_binary()?;
+ if !fake_ra_binary.exists() {
+ eprintln!(
+ "skipping host replay test: fake-rust-analyzer missing at {}",
+ fake_ra_binary.display()
+ );
+ return Ok(());
+ }
+
+ let worker_binary = fixture.path().join("adequate-rust-mcp-worker");
+ replace_worker_binary(host_binary.as_path(), worker_binary.as_path())?;
+
+ let fake_wrapper = fixture.path().join("fake-ra-wrapper.sh");
+ let telemetry_state_home = fixture.path().join("state-home");
+ let telemetry_path = telemetry_state_home
+ .join("adequate-rust-mcp")
+ .join("telemetry.jsonl");
+ let wrapper_config = FakeRaWrapperConfig {
+ hover_delay_ms: Some(1200),
+ execute_command_delay_ms: None,
+ execute_command_log_path: None,
+ };
+ write_fake_ra_wrapper(
+ fake_wrapper.as_path(),
+ fake_ra_binary.as_path(),
+ &wrapper_config,
+ )?;
+
+ let mut harness = HostHarness::spawn(
+ host_binary.as_path(),
+ worker_binary.as_path(),
+ fixture.path(),
+ fake_wrapper.as_path(),
+ telemetry_state_home.as_path(),
+ )
+ .await?;
+ harness.initialize().await?;
+
+ let hover_id = harness.next_request_id();
+ harness
+ .send_request_with_id(
+ hover_id,
+ "tools/call",
+ json!({
+ "name": "hover",
+ "arguments": {
+ "file_path": fixture.source_file().display().to_string(),
+ "line": 1,
+ "column": 1,
+ "render": "json"
+ }
+ }),
+ )
+ .await?;
+
+ tokio::time::sleep(Duration::from_millis(120)).await;
+ replace_worker_binary(host_binary.as_path(), worker_binary.as_path())?;
+
+ let response = harness.read_response(hover_id).await?;
+ assert!(
+ response.get("error").is_none(),
+ "expected replayed success, got error response: {response}",
+ );
+
+ let result = response
+ .get("result")
+ .ok_or_else(|| io::Error::other("missing result in response"))?;
+ let is_error = result
+ .get("isError")
+ .and_then(Value::as_bool)
+ .unwrap_or(false);
+ assert!(!is_error, "tool response marked as error: {result}");
+
+ let rendered = result
+ .get("structuredContent")
+ .and_then(|payload| payload.get("rendered"))
+ .and_then(Value::as_str);
+ assert_eq!(rendered, Some("hover::ok"));
+
+ harness.shutdown().await;
+
+ let telemetry_events = read_jsonl_events(telemetry_path.as_path())?;
+ assert!(
+ telemetry_events
+ .iter()
+ .any(|event| event.get("event").and_then(Value::as_str) == Some("tool_call")),
+ "expected tool_call telemetry event",
+ );
+ assert!(
+ telemetry_events
+ .iter()
+ .any(|event| event.get("event").and_then(Value::as_str) == Some("hot_paths_snapshot")),
+ "expected hot_paths_snapshot telemetry event",
+ );
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn host_replays_probe_required_request_during_hot_swap() -> Result<(), Box<dyn Error>> {
+ let fixture = Fixture::new()?;
+ let host_binary = resolve_host_binary()?;
+ let fake_ra_binary = resolve_fake_ra_binary()?;
+ if !fake_ra_binary.exists() {
+ eprintln!(
+ "skipping host probe-required replay test: fake-rust-analyzer missing at {}",
+ fake_ra_binary.display()
+ );
+ return Ok(());
+ }
+
+ let worker_binary = fixture.path().join("adequate-rust-mcp-worker");
+ replace_worker_binary(host_binary.as_path(), worker_binary.as_path())?;
+
+ let effect_log_path = fixture.path().join("execute-command-effects.log");
+ let telemetry_state_home = fixture.path().join("state-home");
+ let telemetry_path = telemetry_state_home
+ .join("adequate-rust-mcp")
+ .join("telemetry.jsonl");
+ let fake_wrapper = fixture.path().join("fake-ra-wrapper-probe.sh");
+ let wrapper_config = FakeRaWrapperConfig {
+ hover_delay_ms: None,
+ execute_command_delay_ms: Some(1200),
+ execute_command_log_path: Some(effect_log_path.clone()),
+ };
+ write_fake_ra_wrapper(
+ fake_wrapper.as_path(),
+ fake_ra_binary.as_path(),
+ &wrapper_config,
+ )?;
+
+ let mut harness = HostHarness::spawn(
+ host_binary.as_path(),
+ worker_binary.as_path(),
+ fixture.path(),
+ fake_wrapper.as_path(),
+ telemetry_state_home.as_path(),
+ )
+ .await?;
+ harness.initialize().await?;
+
+ let request_id = harness.next_request_id();
+ harness
+ .send_request_with_id(
+ request_id,
+ "tools/call",
+ json!({
+ "name": "advanced_lsp_request",
+ "arguments": {
+ "method": "execute_command",
+ "arguments": {
+ "command": "probe.required.synthetic",
+ "arguments": []
+ }
+ }
+ }),
+ )
+ .await?;
+
+ tokio::time::sleep(Duration::from_millis(300)).await;
+ replace_worker_binary(host_binary.as_path(), worker_binary.as_path())?;
+
+ let response = harness.read_response(request_id).await?;
+ assert!(
+ response.get("error").is_none(),
+ "expected replayed success for probe-required request, got: {response}",
+ );
+ let result = response
+ .get("result")
+ .ok_or_else(|| io::Error::other("missing result in response"))?;
+ let is_error = result
+ .get("isError")
+ .and_then(Value::as_bool)
+ .unwrap_or(false);
+ assert!(!is_error, "tool response marked as error: {result}");
+
+ let effect_log = fs::read_to_string(effect_log_path)?;
+ let effect_count = effect_log.lines().count();
+ assert!(
+ effect_count >= 2,
+ "expected at-least-once replay to duplicate probe-required effect under forced restart",
+ );
+
+ harness.shutdown().await;
+
+ let telemetry_events = read_jsonl_events(telemetry_path.as_path())?;
+ let probe_tool_event = telemetry_events.iter().find(|event| {
+ event.get("event").and_then(Value::as_str) == Some("tool_call")
+ && event.get("tool_name").and_then(Value::as_str) == Some("advanced_lsp_request")
+ });
+ assert!(
+ probe_tool_event.is_some(),
+ "expected probe-required tool telemetry"
+ );
+ let probe_tool_event = match probe_tool_event {
+ Some(value) => value,
+ None => return Ok(()),
+ };
+ let replay_attempts = probe_tool_event
+ .get("replay_attempts")
+ .and_then(Value::as_u64)
+ .unwrap_or(0);
+ assert!(replay_attempts >= 1);
+
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn host_logs_wrong_arg_failures_with_path_telemetry() -> Result<(), Box<dyn Error>> {
+ let fixture = Fixture::new()?;
+ let host_binary = resolve_host_binary()?;
+ let fake_ra_binary = resolve_fake_ra_binary()?;
+ if !fake_ra_binary.exists() {
+ eprintln!(
+ "skipping host telemetry failure test: fake-rust-analyzer missing at {}",
+ fake_ra_binary.display()
+ );
+ return Ok(());
+ }
+
+ let worker_binary = fixture.path().join("adequate-rust-mcp-worker");
+ replace_worker_binary(host_binary.as_path(), worker_binary.as_path())?;
+
+ let fake_wrapper = fixture.path().join("fake-ra-wrapper-telemetry.sh");
+ let telemetry_state_home = fixture.path().join("state-home");
+ let telemetry_path = telemetry_state_home
+ .join("adequate-rust-mcp")
+ .join("telemetry.jsonl");
+ let wrapper_config = FakeRaWrapperConfig {
+ hover_delay_ms: None,
+ execute_command_delay_ms: None,
+ execute_command_log_path: None,
+ };
+ write_fake_ra_wrapper(
+ fake_wrapper.as_path(),
+ fake_ra_binary.as_path(),
+ &wrapper_config,
+ )?;
+
+ let mut harness = HostHarness::spawn(
+ host_binary.as_path(),
+ worker_binary.as_path(),
+ fixture.path(),
+ fake_wrapper.as_path(),
+ telemetry_state_home.as_path(),
+ )
+ .await?;
+ harness.initialize().await?;
+
+ let invalid_request_id = harness.next_request_id();
+ harness
+ .send_request_with_id(
+ invalid_request_id,
+ "tools/call",
+ json!({
+ "name": "hover",
+ "arguments": {
+ "file_path": fixture.source_file().display().to_string(),
+ "line": "definitely-not-a-number",
+ "column": 1
+ }
+ }),
+ )
+ .await?;
+
+ let response = harness.read_response(invalid_request_id).await?;
+ assert!(
+ response.get("error").is_some(),
+ "expected invalid params error response, got: {response}",
+ );
+
+ harness.shutdown().await;
+
+ let source_path = fixture.source_file().display().to_string();
+ let telemetry_events = read_jsonl_events(telemetry_path.as_path())?;
+ let tool_event = telemetry_events.iter().find(|event| {
+ event.get("event").and_then(Value::as_str) == Some("tool_call")
+ && event.get("request_id") == Some(&json!(invalid_request_id))
+ });
+ assert!(
+ tool_event.is_some(),
+ "expected telemetry tool_call for invalid request",
+ );
+ let tool_event = match tool_event {
+ Some(value) => value,
+ None => return Ok(()),
+ };
+ assert_eq!(
+ tool_event.get("outcome").and_then(Value::as_str),
+ Some("error")
+ );
+ assert_eq!(
+ tool_event.get("path_hint").and_then(Value::as_str),
+ Some(source_path.as_str()),
+ );
+ assert!(
+ tool_event
+ .get("error_message")
+ .and_then(Value::as_str)
+ .is_some_and(|message| !message.trim().is_empty()),
+ "expected non-empty error message for invalid args telemetry",
+ );
+
+ let snapshot_contains_path = telemetry_events
+ .iter()
+ .filter(|event| event.get("event").and_then(Value::as_str) == Some("hot_paths_snapshot"))
+ .flat_map(|event| {
+ event
+ .get("hottest_paths")
+ .and_then(Value::as_array)
+ .cloned()
+ .unwrap_or_default()
+ })
+ .any(|path_line| {
+ path_line.get("path").and_then(Value::as_str) == Some(source_path.as_str())
+ && path_line
+ .get("error_count")
+ .and_then(Value::as_u64)
+ .is_some_and(|count| count >= 1)
+ });
+ assert!(
+ snapshot_contains_path,
+ "expected hot path snapshots to include errored source path",
+ );
+
+ Ok(())
+}
+
+struct Fixture {
+ temp_dir: TempDir,
+ source_file: PathBuf,
+}
+
+impl Fixture {
+ fn new() -> Result<Self, Box<dyn Error>> {
+ let temp_dir = tempfile::tempdir()?;
+ let src = temp_dir.path().join("src");
+ fs::create_dir_all(&src)?;
+ fs::write(
+ temp_dir.path().join("Cargo.toml"),
+ "[package]\nname = \"host_replay_fixture\"\nversion = \"0.0.0\"\nedition = \"2024\"\n",
+ )?;
+ let source_file = src.join("lib.rs");
+ fs::write(&source_file, "pub fn compute() -> i32 { 1 }\n")?;
+ Ok(Self {
+ temp_dir,
+ source_file,
+ })
+ }
+
+ fn path(&self) -> &Path {
+ self.temp_dir.path()
+ }
+
+ fn source_file(&self) -> &Path {
+ self.source_file.as_path()
+ }
+}
+
+struct HostHarness {
+ child: Child,
+ stdin: ChildStdin,
+ stdout: Lines<BufReader<ChildStdout>>,
+ buffered_responses: HashMap<u64, Value>,
+ next_id: u64,
+}
+
+impl Drop for HostHarness {
+ fn drop(&mut self) {
+ let _ = self.child.start_kill();
+ }
+}
+
+impl HostHarness {
+ async fn spawn(
+ host_binary: &Path,
+ worker_binary: &Path,
+ workspace_root: &Path,
+ fake_ra_binary: &Path,
+ telemetry_state_home: &Path,
+ ) -> Result<Self, Box<dyn Error>> {
+ let mut child = Command::new(host_binary)
+ .env("ADEQUATE_MCP_WORKER_BINARY", worker_binary)
+ .env("ADEQUATE_MCP_WORKSPACE_ROOT", workspace_root)
+ .env("ADEQUATE_MCP_RA_BINARY", fake_ra_binary)
+ .env("XDG_STATE_HOME", telemetry_state_home)
+ .env("ADEQUATE_MCP_TELEMETRY_SNAPSHOT_EVERY", "1")
+ .env("ADEQUATE_MCP_HOST_RELOAD_DEBOUNCE_MS", "0")
+ .env("ADEQUATE_MCP_HOST_RESPAWN_FLOOR_MS", "10")
+ .env("ADEQUATE_MCP_HOST_RESPAWN_CEILING_MS", "20")
+ .env("ADEQUATE_MCP_HOST_MAX_REPLAY_ATTEMPTS", "8")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::null())
+ .spawn()?;
+
+ let stdin = child
+ .stdin
+ .take()
+ .ok_or_else(|| io::Error::other("host stdin unavailable"))?;
+ let stdout = child
+ .stdout
+ .take()
+ .ok_or_else(|| io::Error::other("host stdout unavailable"))?;
+ Ok(Self {
+ child,
+ stdin,
+ stdout: BufReader::new(stdout).lines(),
+ buffered_responses: HashMap::new(),
+ next_id: 1,
+ })
+ }
+
+ async fn initialize(&mut self) -> Result<(), Box<dyn Error>> {
+ let _result = self
+ .request(
+ "initialize",
+ json!({
+ "protocolVersion": "2025-11-25",
+ "capabilities": {},
+ "clientInfo": {
+ "name": "host-replay-test",
+ "version": "1.0.0"
+ }
+ }),
+ )
+ .await?;
+ self.notify("notifications/initialized", json!({})).await?;
+ Ok(())
+ }
+
+ fn next_request_id(&mut self) -> u64 {
+ let id = self.next_id;
+ self.next_id = self.next_id.saturating_add(1);
+ id
+ }
+
+ async fn send_request_with_id(
+ &mut self,
+ id: u64,
+ method: &str,
+ params: Value,
+ ) -> Result<(), Box<dyn Error>> {
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "id": id,
+ "method": method,
+ "params": params,
+ });
+ self.write_message(&payload).await
+ }
+
+ async fn request(&mut self, method: &str, params: Value) -> Result<Value, Box<dyn Error>> {
+ let id = self.next_request_id();
+ self.send_request_with_id(id, method, params).await?;
+ self.read_response(id).await
+ }
+
+ async fn notify(&mut self, method: &str, params: Value) -> Result<(), Box<dyn Error>> {
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "method": method,
+ "params": params,
+ });
+ self.write_message(&payload).await
+ }
+
+ async fn write_message(&mut self, message: &Value) -> Result<(), Box<dyn Error>> {
+ let serialized = serde_json::to_vec(message)?;
+ self.stdin.write_all(&serialized).await?;
+ self.stdin.write_all(b"\n").await?;
+ self.stdin.flush().await?;
+ Ok(())
+ }
+
+ async fn read_response(&mut self, request_id: u64) -> Result<Value, Box<dyn Error>> {
+ if let Some(buffered) = self.buffered_responses.remove(&request_id) {
+ return Ok(buffered);
+ }
+ let deadline = Instant::now() + RESPONSE_TIMEOUT;
+ loop {
+ if Instant::now() >= deadline {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ let remaining = deadline.saturating_duration_since(Instant::now());
+ let next_line = tokio::time::timeout(remaining, self.stdout.next_line()).await;
+ let line = match next_line {
+ Ok(Ok(Some(line))) => line,
+ Ok(Ok(None)) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "host stdout closed while awaiting response",
+ )));
+ }
+ Ok(Err(error)) => return Err(Box::new(error)),
+ Err(_) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ };
+
+ let parsed = serde_json::from_str::<Value>(&line);
+ let Ok(message) = parsed else {
+ continue;
+ };
+ let response_id = message.get("id").and_then(Value::as_u64);
+ let Some(response_id) = response_id else {
+ continue;
+ };
+ if response_id == request_id {
+ return Ok(message);
+ }
+ let _previous = self.buffered_responses.insert(response_id, message);
+ }
+ }
+
+ async fn shutdown(&mut self) {
+ let _ = self.child.kill().await;
+ let _ = self.child.wait().await;
+ }
+}
+
+fn replace_worker_binary(source_binary: &Path, target_binary: &Path) -> io::Result<()> {
+ let staging_path = target_binary.with_extension("next");
+ let _copied_bytes = fs::copy(source_binary, &staging_path)?;
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+
+ let permissions = fs::Permissions::from_mode(0o755);
+ fs::set_permissions(&staging_path, permissions)?;
+ }
+ fs::rename(staging_path, target_binary)?;
+ Ok(())
+}
+
+#[derive(Debug, Clone)]
+struct FakeRaWrapperConfig {
+ hover_delay_ms: Option<u64>,
+ execute_command_delay_ms: Option<u64>,
+ execute_command_log_path: Option<PathBuf>,
+}
+
+fn write_fake_ra_wrapper(
+ script_path: &Path,
+ fake_ra_binary: &Path,
+ config: &FakeRaWrapperConfig,
+) -> io::Result<()> {
+ let mut args = vec!["--mode stable".to_owned()];
+ if let Some(delay_ms) = config.hover_delay_ms {
+ args.push(format!("--hover-delay-ms {delay_ms}"));
+ }
+ if let Some(delay_ms) = config.execute_command_delay_ms {
+ args.push(format!("--execute-command-delay-ms {delay_ms}"));
+ }
+ if let Some(path) = config.execute_command_log_path.as_ref() {
+ args.push(format!("--execute-command-log \"{}\"", path.display()));
+ }
+
+ let script = format!(
+ "#!/usr/bin/env bash\nexec \"{}\" {}\n",
+ fake_ra_binary.display(),
+ args.join(" ")
+ );
+ fs::write(script_path, script)?;
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+
+ let permissions = fs::Permissions::from_mode(0o755);
+ fs::set_permissions(script_path, permissions)?;
+ }
+ Ok(())
+}
+
+fn resolve_host_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_adequate-rust-mcp") {
+ return Ok(PathBuf::from(path));
+ }
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve integration test deps directory"))?;
+ let target_debug = deps_dir
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve target debug directory"))?;
+ Ok(target_debug.join("adequate-rust-mcp"))
+}
+
+fn resolve_fake_ra_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake-rust-analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake_rust_analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve integration test deps directory"))?;
+ let target_debug = deps_dir
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve target debug directory"))?;
+ Ok(target_debug.join("fake-rust-analyzer"))
+}
+
+fn read_jsonl_events(path: &Path) -> Result<Vec<Value>, Box<dyn Error>> {
+ let raw = fs::read_to_string(path)?;
+ let events = raw
+ .lines()
+ .filter(|line| !line.trim().is_empty())
+ .map(serde_json::from_str::<Value>)
+ .collect::<Result<Vec<_>, _>>()?;
+ Ok(events)
+}
+use libmcp as _;
diff --git a/crates/adequate-rust-mcp/tests/worktree_workspace_rebind.rs b/crates/adequate-rust-mcp/tests/worktree_workspace_rebind.rs
new file mode 100644
index 0000000..ca61753
--- /dev/null
+++ b/crates/adequate-rust-mcp/tests/worktree_workspace_rebind.rs
@@ -0,0 +1,383 @@
+//! Integration test for rebinding worker engines to sibling git worktrees.
+
+use notify as _;
+use ra_mcp_domain as _;
+use ra_mcp_engine as _;
+use rmcp as _;
+use schemars as _;
+use serde as _;
+use serde_json::{Value, json};
+use serial_test::serial;
+use std::{
+ error::Error,
+ fs, io,
+ path::{Path, PathBuf},
+ process::Stdio,
+ time::{Duration, Instant},
+};
+use tempfile::TempDir;
+use tokio::{
+ io::{AsyncBufReadExt, AsyncWriteExt, BufReader, Lines},
+ process::{Child, ChildStdin, ChildStdout, Command},
+};
+use toml as _;
+use tracing as _;
+use tracing_subscriber as _;
+use url as _;
+
+const RESPONSE_TIMEOUT: Duration = Duration::from_secs(20);
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn diagnostics_rebind_to_sibling_git_worktree_root() -> Result<(), Box<dyn Error>> {
+ let fixture = Fixture::new()?;
+ let worker_binary = resolve_worker_binary()?;
+ let fake_ra_binary = resolve_fake_ra_binary()?;
+ if !fake_ra_binary.exists() {
+ eprintln!(
+ "skipping worktree rebind test: fake-rust-analyzer missing at {}",
+ fake_ra_binary.display()
+ );
+ return Ok(());
+ }
+
+ let fake_wrapper = fixture.base_dir().join("fake-ra-worktree.sh");
+ write_fake_ra_wrapper(fake_wrapper.as_path(), fake_ra_binary.as_path())?;
+
+ let mut harness = WorkerHarness::spawn(
+ worker_binary.as_path(),
+ fixture.repo_root(),
+ fake_wrapper.as_path(),
+ )
+ .await?;
+ harness.initialize().await?;
+
+ let diagnostics = harness
+ .call_tool(
+ "diagnostics",
+ json!({
+ "file_path": fixture.worktree_source().display().to_string(),
+ "render": "json",
+ "mode": "full",
+ "path_style": "absolute"
+ }),
+ )
+ .await?;
+ let items = diagnostics
+ .get("diagnostics")
+ .and_then(Value::as_array)
+ .ok_or_else(|| io::Error::other("diagnostics payload missing diagnostics array"))?;
+ assert_eq!(items.len(), 1);
+ let message = items[0]
+ .get("message")
+ .and_then(Value::as_str)
+ .ok_or_else(|| io::Error::other("diagnostic missing message"))?;
+ assert_eq!(message, "fake diagnostic");
+ let code = items[0].get("code").cloned().unwrap_or(Value::Null);
+ assert_eq!(code, Value::Null);
+
+ harness.shutdown().await;
+ Ok(())
+}
+
+struct Fixture {
+ temp_dir: TempDir,
+ repo_root: PathBuf,
+ worktree_root: PathBuf,
+ worktree_source: PathBuf,
+}
+
+impl Fixture {
+ fn new() -> Result<Self, Box<dyn Error>> {
+ let temp_dir = tempfile::tempdir()?;
+ let base_dir = temp_dir.path().to_path_buf();
+ let repo_root = base_dir.join("repo");
+ fs::create_dir_all(repo_root.join("src"))?;
+ fs::write(repo_root.join("Cargo.toml"), fixture_manifest())?;
+ fs::write(repo_root.join("src/lib.rs"), fixture_source())?;
+
+ run_git(["init", "-q"], repo_root.as_path())?;
+ run_git(
+ ["config", "user.email", "adequate@example.com"],
+ repo_root.as_path(),
+ )?;
+ run_git(["config", "user.name", "adequate"], repo_root.as_path())?;
+ run_git(["add", "."], repo_root.as_path())?;
+ run_git(["commit", "-qm", "fixture"], repo_root.as_path())?;
+
+ let worktree_root = base_dir.join("repo-worktree");
+ run_git(
+ [
+ "worktree",
+ "add",
+ "-q",
+ worktree_root.to_str().ok_or_else(|| {
+ io::Error::other("worktree path is not valid UTF-8 for git invocation")
+ })?,
+ "-b",
+ "worktree-rebind-test",
+ ],
+ repo_root.as_path(),
+ )?;
+ let worktree_source = worktree_root.join("src/lib.rs");
+
+ Ok(Self {
+ temp_dir,
+ repo_root,
+ worktree_root,
+ worktree_source,
+ })
+ }
+
+ fn base_dir(&self) -> &Path {
+ self.temp_dir.path()
+ }
+
+ fn repo_root(&self) -> &Path {
+ self.repo_root.as_path()
+ }
+
+ fn worktree_source(&self) -> &Path {
+ self.worktree_source.as_path()
+ }
+
+ #[allow(dead_code)]
+ fn worktree_root(&self) -> &Path {
+ self.worktree_root.as_path()
+ }
+}
+
+struct WorkerHarness {
+ child: Child,
+ stdin: ChildStdin,
+ stdout: Lines<BufReader<ChildStdout>>,
+ next_id: u64,
+}
+
+impl Drop for WorkerHarness {
+ fn drop(&mut self) {
+ let _ = self.child.start_kill();
+ }
+}
+
+impl WorkerHarness {
+ async fn spawn(
+ worker_binary: &Path,
+ workspace_root: &Path,
+ fake_ra_binary: &Path,
+ ) -> Result<Self, Box<dyn Error>> {
+ let mut child = Command::new(worker_binary)
+ .arg("--worker")
+ .env("ADEQUATE_MCP_WORKSPACE_ROOT", workspace_root)
+ .env("ADEQUATE_MCP_RA_BINARY", fake_ra_binary)
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::null())
+ .spawn()?;
+
+ let stdin = child
+ .stdin
+ .take()
+ .ok_or_else(|| io::Error::other("worker stdin unavailable"))?;
+ let stdout = child
+ .stdout
+ .take()
+ .ok_or_else(|| io::Error::other("worker stdout unavailable"))?;
+ Ok(Self {
+ child,
+ stdin,
+ stdout: BufReader::new(stdout).lines(),
+ next_id: 1,
+ })
+ }
+
+ async fn initialize(&mut self) -> Result<(), Box<dyn Error>> {
+ let _ = self
+ .request(
+ "initialize",
+ json!({
+ "protocolVersion": "2025-11-25",
+ "capabilities": {},
+ "clientInfo": {
+ "name": "worktree-rebind-test",
+ "version": "1.0.0"
+ }
+ }),
+ )
+ .await?;
+ self.notify("notifications/initialized", json!({})).await?;
+ Ok(())
+ }
+
+ async fn call_tool(
+ &mut self,
+ tool_name: &str,
+ arguments: Value,
+ ) -> Result<Value, Box<dyn Error>> {
+ let response = self
+ .request(
+ "tools/call",
+ json!({
+ "name": tool_name,
+ "arguments": arguments,
+ }),
+ )
+ .await?;
+ let result = response
+ .get("result")
+ .ok_or_else(|| io::Error::other("tool response missing result"))?;
+ let is_error = result
+ .get("isError")
+ .and_then(Value::as_bool)
+ .unwrap_or(false);
+ if is_error {
+ return Err(Box::new(io::Error::other(format!(
+ "tool `{tool_name}` returned error payload: {result}"
+ ))));
+ }
+ Ok(result
+ .get("structuredContent")
+ .cloned()
+ .unwrap_or(Value::Null))
+ }
+
+ async fn request(&mut self, method: &str, params: Value) -> Result<Value, Box<dyn Error>> {
+ let id = self.next_id;
+ self.next_id = self.next_id.saturating_add(1);
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "id": id,
+ "method": method,
+ "params": params,
+ });
+ self.write_message(&payload).await?;
+ self.read_response(id).await
+ }
+
+ async fn notify(&mut self, method: &str, params: Value) -> Result<(), Box<dyn Error>> {
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "method": method,
+ "params": params,
+ });
+ self.write_message(&payload).await
+ }
+
+ async fn write_message(&mut self, message: &Value) -> Result<(), Box<dyn Error>> {
+ let serialized = serde_json::to_vec(message)?;
+ self.stdin.write_all(&serialized).await?;
+ self.stdin.write_all(b"\n").await?;
+ self.stdin.flush().await?;
+ Ok(())
+ }
+
+ async fn read_response(&mut self, request_id: u64) -> Result<Value, Box<dyn Error>> {
+ let deadline = Instant::now() + RESPONSE_TIMEOUT;
+ loop {
+ if Instant::now() >= deadline {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ let remaining = deadline.saturating_duration_since(Instant::now());
+ let next_line = tokio::time::timeout(remaining, self.stdout.next_line()).await;
+ let line = match next_line {
+ Ok(Ok(Some(line))) => line,
+ Ok(Ok(None)) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "worker stdout closed while awaiting response",
+ )));
+ }
+ Ok(Err(error)) => return Err(Box::new(error)),
+ Err(_) => {
+ return Err(Box::new(io::Error::new(
+ io::ErrorKind::TimedOut,
+ format!("timed out waiting for response id {request_id}"),
+ )));
+ }
+ };
+
+ let message = serde_json::from_str::<Value>(&line)?;
+ if message.get("id").and_then(Value::as_u64) == Some(request_id) {
+ return Ok(message);
+ }
+ }
+ }
+
+ async fn shutdown(&mut self) {
+ let _ = self.child.kill().await;
+ let _ = self.child.wait().await;
+ }
+}
+
+fn fixture_manifest() -> &'static str {
+ "[package]\nname = \"worktree_rebind_fixture\"\nversion = \"0.0.0\"\nedition = \"2024\"\n"
+}
+
+fn fixture_source() -> &'static str {
+ "pub fn compute() -> i32 { 1 }\n"
+}
+
+fn run_git<const N: usize>(args: [&str; N], cwd: &Path) -> Result<(), Box<dyn Error>> {
+ let status = std::process::Command::new("git")
+ .args(args)
+ .current_dir(cwd)
+ .status()?;
+ if !status.success() {
+ return Err(Box::new(io::Error::other(format!(
+ "git command failed in {}",
+ cwd.display()
+ ))));
+ }
+ Ok(())
+}
+
+fn write_fake_ra_wrapper(script_path: &Path, fake_ra_binary: &Path) -> io::Result<()> {
+ let script = format!(
+ "#!/usr/bin/env bash\nexec \"{}\" --mode stable --strict-root-match\n",
+ fake_ra_binary.display()
+ );
+ fs::write(script_path, script)?;
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::PermissionsExt;
+
+ let permissions = fs::Permissions::from_mode(0o755);
+ fs::set_permissions(script_path, permissions)?;
+ }
+ Ok(())
+}
+
+fn resolve_worker_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_adequate-rust-mcp") {
+ return Ok(PathBuf::from(path));
+ }
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve integration test deps directory"))?;
+ let target_debug = deps_dir
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve target debug directory"))?;
+ Ok(target_debug.join("adequate-rust-mcp"))
+}
+
+fn resolve_fake_ra_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake-rust-analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake_rust_analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve integration test deps directory"))?;
+ let target_debug = deps_dir
+ .parent()
+ .ok_or_else(|| io::Error::other("failed to resolve target debug directory"))?;
+ Ok(target_debug.join("fake-rust-analyzer"))
+}
+use libmcp as _;
diff --git a/crates/ra-mcp-domain/.gitignore b/crates/ra-mcp-domain/.gitignore
new file mode 100644
index 0000000..ea8c4bf
--- /dev/null
+++ b/crates/ra-mcp-domain/.gitignore
@@ -0,0 +1 @@
+/target
diff --git a/crates/ra-mcp-domain/Cargo.toml b/crates/ra-mcp-domain/Cargo.toml
new file mode 100644
index 0000000..eca022e
--- /dev/null
+++ b/crates/ra-mcp-domain/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "ra-mcp-domain"
+categories.workspace = true
+description = "Typed lifecycle, request, and fault algebra for the adequate-rust-mcp stack."
+edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+version.workspace = true
+
+[dependencies]
+serde.workspace = true
+thiserror.workspace = true
+
+[dev-dependencies]
+assert_matches.workspace = true
+
+[lints]
+workspace = true
diff --git a/crates/ra-mcp-domain/src/fault.rs b/crates/ra-mcp-domain/src/fault.rs
new file mode 100644
index 0000000..6d404ab
--- /dev/null
+++ b/crates/ra-mcp-domain/src/fault.rs
@@ -0,0 +1,129 @@
+//! Fault taxonomy and recovery guidance.
+
+use crate::types::Generation;
+use serde::{Deserialize, Serialize};
+use thiserror::Error;
+
+/// Logical fault class.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+pub enum FaultClass {
+ /// Underlying I/O or transport channel failure.
+ Transport,
+ /// Child process startup/liveness/exiting failures.
+ Process,
+ /// Malformed or unexpected protocol payloads.
+ Protocol,
+ /// Deadline exceeded.
+ Timeout,
+ /// Internal resource budget exhaustion.
+ Resource,
+}
+
+/// Fine-grained fault code.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+pub enum FaultCode {
+ /// Pipe write failed with `EPIPE`.
+ BrokenPipe,
+ /// Pipe reached EOF.
+ UnexpectedEof,
+ /// Child process exited unexpectedly.
+ ChildExited,
+ /// Child process failed to spawn.
+ SpawnFailed,
+ /// Startup sequence exceeded deadline.
+ StartupTimedOut,
+ /// Request exceeded deadline.
+ RequestTimedOut,
+ /// Received an invalid protocol frame.
+ InvalidFrame,
+ /// Received invalid JSON.
+ InvalidJson,
+ /// Response could not be correlated with a pending request.
+ UnknownResponseId,
+}
+
+/// Recovery strategy for a fault.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+pub enum RecoveryDirective {
+ /// Retry the request on the same process.
+ RetryInPlace,
+ /// Restart the worker process and retry once.
+ RestartAndReplay,
+ /// Fail-fast and bubble to the caller.
+ AbortRequest,
+}
+
+/// Structured fault event.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct Fault {
+ /// Generation in which this fault happened.
+ pub generation: Generation,
+ /// Broad fault class.
+ pub class: FaultClass,
+ /// Specific fault code.
+ pub code: FaultCode,
+ /// Caller-facing context.
+ pub detail: FaultDetail,
+}
+
+impl Fault {
+ /// Constructs a new fault.
+ #[must_use]
+ pub fn new(
+ generation: Generation,
+ class: FaultClass,
+ code: FaultCode,
+ detail: FaultDetail,
+ ) -> Self {
+ Self {
+ generation,
+ class,
+ code,
+ detail,
+ }
+ }
+
+ /// Returns the default recovery directive for this fault.
+ #[must_use]
+ pub fn directive(&self) -> RecoveryDirective {
+ match (self.class, self.code) {
+ (FaultClass::Transport, FaultCode::BrokenPipe)
+ | (FaultClass::Transport, FaultCode::UnexpectedEof)
+ | (FaultClass::Process, FaultCode::ChildExited)
+ | (FaultClass::Process, FaultCode::SpawnFailed)
+ | (FaultClass::Timeout, FaultCode::StartupTimedOut) => {
+ RecoveryDirective::RestartAndReplay
+ }
+ (FaultClass::Timeout, FaultCode::RequestTimedOut) => {
+ RecoveryDirective::RestartAndReplay
+ }
+ (FaultClass::Protocol, FaultCode::UnknownResponseId) => RecoveryDirective::RetryInPlace,
+ (FaultClass::Protocol, FaultCode::InvalidFrame)
+ | (FaultClass::Protocol, FaultCode::InvalidJson)
+ | (FaultClass::Resource, _) => RecoveryDirective::AbortRequest,
+ _ => RecoveryDirective::AbortRequest,
+ }
+ }
+}
+
+/// Typed detail payload for a fault.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct FaultDetail {
+ /// Human-consumable context.
+ pub message: String,
+}
+
+impl FaultDetail {
+ /// Creates a new detail message.
+ #[must_use]
+ pub fn new(message: impl Into<String>) -> Self {
+ Self {
+ message: message.into(),
+ }
+ }
+}
+
+/// Domain fault conversion error.
+#[derive(Debug, Error)]
+#[error("fault conversion failure: {0}")]
+pub struct FaultConversionError(String);
diff --git a/crates/ra-mcp-domain/src/lib.rs b/crates/ra-mcp-domain/src/lib.rs
new file mode 100644
index 0000000..90285f0
--- /dev/null
+++ b/crates/ra-mcp-domain/src/lib.rs
@@ -0,0 +1,5 @@
+//! Domain model for the Adequate Rust MCP server.
+
+pub mod fault;
+pub mod lifecycle;
+pub mod types;
diff --git a/crates/ra-mcp-domain/src/lifecycle.rs b/crates/ra-mcp-domain/src/lifecycle.rs
new file mode 100644
index 0000000..91007ac
--- /dev/null
+++ b/crates/ra-mcp-domain/src/lifecycle.rs
@@ -0,0 +1,259 @@
+//! Typestate machine for worker lifecycle.
+
+use crate::{
+ fault::Fault,
+ types::{Generation, InvariantViolation},
+};
+use serde::{Deserialize, Serialize};
+
+/// A worker in cold state (no process).
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct Cold;
+
+/// A worker in startup handshake.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct Starting;
+
+/// A healthy worker ready to serve requests.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct Ready;
+
+/// A worker currently recovering from failure.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct Recovering {
+ last_fault: Fault,
+}
+
+impl Recovering {
+ /// Constructs recovering state from the most recent fault.
+ #[must_use]
+ pub fn new(last_fault: Fault) -> Self {
+ Self { last_fault }
+ }
+
+ /// Returns the most recent fault.
+ #[must_use]
+ pub fn last_fault(&self) -> &Fault {
+ &self.last_fault
+ }
+}
+
+/// Lifecycle state with a typestate payload.
+#[derive(Debug, Clone)]
+pub struct Lifecycle<S> {
+ generation: Generation,
+ state: S,
+}
+
+impl Lifecycle<Cold> {
+ /// Constructs a cold lifecycle.
+ #[must_use]
+ pub fn cold() -> Self {
+ Self {
+ generation: Generation::genesis(),
+ state: Cold,
+ }
+ }
+
+ /// Begins startup sequence.
+ #[must_use]
+ pub fn ignite(self) -> Lifecycle<Starting> {
+ Lifecycle {
+ generation: self.generation,
+ state: Starting,
+ }
+ }
+}
+
+impl Lifecycle<Starting> {
+ /// Marks startup as successful.
+ #[must_use]
+ pub fn arm(self) -> Lifecycle<Ready> {
+ Lifecycle {
+ generation: self.generation,
+ state: Ready,
+ }
+ }
+
+ /// Marks startup as failed and enters recovery.
+ #[must_use]
+ pub fn fracture(self, fault: Fault) -> Lifecycle<Recovering> {
+ Lifecycle {
+ generation: self.generation,
+ state: Recovering::new(fault),
+ }
+ }
+}
+
+impl Lifecycle<Ready> {
+ /// Moves from ready to recovering after a fault.
+ #[must_use]
+ pub fn fracture(self, fault: Fault) -> Lifecycle<Recovering> {
+ Lifecycle {
+ generation: self.generation,
+ state: Recovering::new(fault),
+ }
+ }
+}
+
+impl Lifecycle<Recovering> {
+ /// Advances generation and retries startup.
+ #[must_use]
+ pub fn respawn(self) -> Lifecycle<Starting> {
+ Lifecycle {
+ generation: self.generation.next(),
+ state: Starting,
+ }
+ }
+
+ /// Returns the most recent fault.
+ #[must_use]
+ pub fn last_fault(&self) -> &Fault {
+ self.state.last_fault()
+ }
+}
+
+impl<S> Lifecycle<S> {
+ /// Returns the active generation.
+ #[must_use]
+ pub fn generation(&self) -> Generation {
+ self.generation
+ }
+
+ /// Returns the typestate payload.
+ #[must_use]
+ pub fn state(&self) -> &S {
+ &self.state
+ }
+}
+
+/// Serializable lifecycle snapshot for diagnostics.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub enum LifecycleSnapshot {
+ /// No worker is currently running.
+ Cold {
+ /// Current generation counter.
+ generation: Generation,
+ },
+ /// Worker startup is in progress.
+ Starting {
+ /// Current generation counter.
+ generation: Generation,
+ },
+ /// Worker is ready for requests.
+ Ready {
+ /// Current generation counter.
+ generation: Generation,
+ },
+ /// Worker is recovering after a fault.
+ Recovering {
+ /// Current generation counter.
+ generation: Generation,
+ /// Most recent fault.
+ last_fault: Fault,
+ },
+}
+
+/// Dynamically typed lifecycle state for runtime storage.
+#[derive(Debug, Clone)]
+pub enum DynamicLifecycle {
+ /// Cold typestate wrapper.
+ Cold(Lifecycle<Cold>),
+ /// Starting typestate wrapper.
+ Starting(Lifecycle<Starting>),
+ /// Ready typestate wrapper.
+ Ready(Lifecycle<Ready>),
+ /// Recovering typestate wrapper.
+ Recovering(Lifecycle<Recovering>),
+}
+
+impl DynamicLifecycle {
+ /// Creates a cold dynamic lifecycle.
+ #[must_use]
+ pub fn cold() -> Self {
+ Self::Cold(Lifecycle::cold())
+ }
+
+ /// Returns the serializable snapshot.
+ #[must_use]
+ pub fn snapshot(&self) -> LifecycleSnapshot {
+ match self {
+ Self::Cold(state) => LifecycleSnapshot::Cold {
+ generation: state.generation(),
+ },
+ Self::Starting(state) => LifecycleSnapshot::Starting {
+ generation: state.generation(),
+ },
+ Self::Ready(state) => LifecycleSnapshot::Ready {
+ generation: state.generation(),
+ },
+ Self::Recovering(state) => LifecycleSnapshot::Recovering {
+ generation: state.generation(),
+ last_fault: state.last_fault().clone(),
+ },
+ }
+ }
+
+ /// Enters startup from cold or recovering.
+ pub fn begin_startup(self) -> Result<Self, InvariantViolation> {
+ match self {
+ Self::Cold(state) => Ok(Self::Starting(state.ignite())),
+ Self::Recovering(state) => Ok(Self::Starting(state.respawn())),
+ Self::Starting(_) | Self::Ready(_) => Err(InvariantViolation::new(
+ "invalid lifecycle transition to starting",
+ )),
+ }
+ }
+
+ /// Marks startup as complete.
+ pub fn complete_startup(self) -> Result<Self, InvariantViolation> {
+ match self {
+ Self::Starting(state) => Ok(Self::Ready(state.arm())),
+ _ => Err(InvariantViolation::new(
+ "invalid lifecycle transition to ready",
+ )),
+ }
+ }
+
+ /// Records a fault and enters recovering state.
+ pub fn fracture(self, fault: Fault) -> Result<Self, InvariantViolation> {
+ match self {
+ Self::Starting(state) => Ok(Self::Recovering(state.fracture(fault))),
+ Self::Ready(state) => Ok(Self::Recovering(state.fracture(fault))),
+ Self::Recovering(state) => Ok(Self::Recovering(Lifecycle {
+ generation: state.generation(),
+ state: Recovering::new(fault),
+ })),
+ Self::Cold(_) => Err(InvariantViolation::new("cannot fracture cold lifecycle")),
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{DynamicLifecycle, Lifecycle, LifecycleSnapshot};
+ use crate::fault::{Fault, FaultClass, FaultCode, FaultDetail};
+
+ #[test]
+ fn typestate_chain_advances_generation_on_recovery() {
+ let cold = Lifecycle::cold();
+ let starting = cold.ignite();
+ let ready = starting.arm();
+ let ready_generation = ready.generation();
+ let fault = Fault::new(
+ ready_generation,
+ FaultClass::Transport,
+ FaultCode::BrokenPipe,
+ FaultDetail::new("broken pipe"),
+ );
+ let recovering = ready.fracture(fault);
+ let restarted = recovering.respawn();
+ assert!(restarted.generation() > ready_generation);
+ }
+
+ #[test]
+ fn dynamic_snapshot_of_recovering_is_infallible() {
+ let cold = DynamicLifecycle::cold();
+ assert!(matches!(cold.snapshot(), LifecycleSnapshot::Cold { .. }));
+ }
+}
diff --git a/crates/ra-mcp-domain/src/types.rs b/crates/ra-mcp-domain/src/types.rs
new file mode 100644
index 0000000..db709d6
--- /dev/null
+++ b/crates/ra-mcp-domain/src/types.rs
@@ -0,0 +1,460 @@
+//! Fundamental domain types.
+
+use serde::{Deserialize, Deserializer, Serialize};
+use std::{
+ num::NonZeroU64,
+ path::{Path, PathBuf},
+};
+use thiserror::Error;
+
+/// A value that failed a domain-level invariant.
+#[derive(Debug, Clone, PartialEq, Eq, Error)]
+#[error("domain invariant violated: {detail}")]
+pub struct InvariantViolation {
+ detail: &'static str,
+}
+
+impl InvariantViolation {
+ /// Creates a new invariant violation.
+ #[must_use]
+ pub fn new(detail: &'static str) -> Self {
+ Self { detail }
+ }
+}
+
+/// Process generation for a rust-analyzer worker.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
+#[serde(transparent)]
+pub struct Generation(NonZeroU64);
+
+impl Generation {
+ /// Returns the first generation.
+ #[must_use]
+ pub fn genesis() -> Self {
+ Self(NonZeroU64::MIN)
+ }
+
+ /// Returns the inner integer value.
+ #[must_use]
+ pub fn get(self) -> u64 {
+ self.0.get()
+ }
+
+ /// Advances to the next generation.
+ #[must_use]
+ pub fn next(self) -> Self {
+ let next = self.get().saturating_add(1);
+ let non_zero = NonZeroU64::new(next).map_or(NonZeroU64::MAX, |value| value);
+ Self(non_zero)
+ }
+}
+
+/// A non-empty absolute workspace root.
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+pub struct WorkspaceRoot(PathBuf);
+
+impl WorkspaceRoot {
+ /// Constructs a validated workspace root.
+ pub fn try_new(path: PathBuf) -> Result<Self, InvariantViolation> {
+ if !path.is_absolute() {
+ return Err(InvariantViolation::new("workspace root must be absolute"));
+ }
+ if path.as_os_str().is_empty() {
+ return Err(InvariantViolation::new("workspace root must be non-empty"));
+ }
+ Ok(Self(path))
+ }
+
+ /// Returns the root path.
+ #[must_use]
+ pub fn as_path(&self) -> &Path {
+ self.0.as_path()
+ }
+}
+
+/// A non-empty absolute source file path.
+#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)]
+#[serde(transparent)]
+pub struct SourceFilePath(PathBuf);
+
+impl SourceFilePath {
+ /// Constructs a validated source file path.
+ pub fn try_new(path: PathBuf) -> Result<Self, InvariantViolation> {
+ if !path.is_absolute() {
+ return Err(InvariantViolation::new("source file path must be absolute"));
+ }
+ if path.as_os_str().is_empty() {
+ return Err(InvariantViolation::new(
+ "source file path must be non-empty",
+ ));
+ }
+ Ok(Self(path))
+ }
+
+ /// Returns the underlying path.
+ #[must_use]
+ pub fn as_path(&self) -> &Path {
+ self.0.as_path()
+ }
+}
+
+impl<'de> Deserialize<'de> for SourceFilePath {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let path = PathBuf::deserialize(deserializer)?;
+ Self::try_new(path).map_err(serde::de::Error::custom)
+ }
+}
+
+/// One-indexed source line number.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
+#[serde(transparent)]
+pub struct OneIndexedLine(NonZeroU64);
+
+impl OneIndexedLine {
+ /// Constructs a one-indexed line.
+ pub fn try_new(raw: u64) -> Result<Self, InvariantViolation> {
+ let line = NonZeroU64::new(raw).ok_or(InvariantViolation::new("line must be >= 1"))?;
+ Ok(Self(line))
+ }
+
+ /// Returns the one-indexed value.
+ #[must_use]
+ pub fn get(self) -> u64 {
+ self.0.get()
+ }
+
+ /// Returns the corresponding zero-indexed value for LSP.
+ #[must_use]
+ pub fn to_zero_indexed(self) -> u32 {
+ let raw = self.get().saturating_sub(1);
+ u32::try_from(raw).unwrap_or(u32::MAX)
+ }
+}
+
+/// One-indexed source column number.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
+#[serde(transparent)]
+pub struct OneIndexedColumn(NonZeroU64);
+
+impl OneIndexedColumn {
+ /// Constructs a one-indexed column.
+ pub fn try_new(raw: u64) -> Result<Self, InvariantViolation> {
+ let column = NonZeroU64::new(raw).ok_or(InvariantViolation::new("column must be >= 1"))?;
+ Ok(Self(column))
+ }
+
+ /// Returns the one-indexed value.
+ #[must_use]
+ pub fn get(self) -> u64 {
+ self.0.get()
+ }
+
+ /// Returns the corresponding zero-indexed value for LSP.
+ #[must_use]
+ pub fn to_zero_indexed(self) -> u32 {
+ let raw = self.get().saturating_sub(1);
+ u32::try_from(raw).unwrap_or(u32::MAX)
+ }
+}
+
+/// A file-local source point.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
+pub struct SourcePoint {
+ line: OneIndexedLine,
+ column: OneIndexedColumn,
+}
+
+impl SourcePoint {
+ /// Constructs a file-local source point.
+ #[must_use]
+ pub fn new(line: OneIndexedLine, column: OneIndexedColumn) -> Self {
+ Self { line, column }
+ }
+
+ /// Returns the line component.
+ #[must_use]
+ pub fn line(self) -> OneIndexedLine {
+ self.line
+ }
+
+ /// Returns the column component.
+ #[must_use]
+ pub fn column(self) -> OneIndexedColumn {
+ self.column
+ }
+}
+
+/// Request position in a source file.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
+pub struct SourcePosition {
+ file_path: SourceFilePath,
+ #[serde(flatten)]
+ point: SourcePoint,
+}
+
+impl SourcePosition {
+ /// Constructs a request position.
+ #[must_use]
+ pub fn new(file_path: SourceFilePath, point: SourcePoint) -> Self {
+ Self { file_path, point }
+ }
+
+ /// Returns the source file path.
+ #[must_use]
+ pub fn file_path(&self) -> &SourceFilePath {
+ &self.file_path
+ }
+
+ /// Returns the file-local point.
+ #[must_use]
+ pub fn point(&self) -> SourcePoint {
+ self.point
+ }
+
+ /// Returns the one-indexed line.
+ #[must_use]
+ pub fn line(&self) -> OneIndexedLine {
+ self.point.line()
+ }
+
+ /// Returns the one-indexed column.
+ #[must_use]
+ pub fn column(&self) -> OneIndexedColumn {
+ self.point.column()
+ }
+}
+
+#[derive(Debug, Clone, Deserialize)]
+struct SourcePositionWire {
+ file_path: SourceFilePath,
+ #[serde(flatten)]
+ point: SourcePoint,
+}
+
+impl<'de> Deserialize<'de> for SourcePosition {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let SourcePositionWire { file_path, point } =
+ SourcePositionWire::deserialize(deserializer)?;
+ Ok(Self::new(file_path, point))
+ }
+}
+
+/// A concrete source location.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
+pub struct SourceLocation {
+ file_path: SourceFilePath,
+ #[serde(flatten)]
+ point: SourcePoint,
+}
+
+impl SourceLocation {
+ /// Constructs a source location.
+ #[must_use]
+ pub fn new(file_path: SourceFilePath, point: SourcePoint) -> Self {
+ Self { file_path, point }
+ }
+
+ /// Returns the source file path.
+ #[must_use]
+ pub fn file_path(&self) -> &SourceFilePath {
+ &self.file_path
+ }
+
+ /// Returns the file-local point.
+ #[must_use]
+ pub fn point(&self) -> SourcePoint {
+ self.point
+ }
+
+ /// Returns the one-indexed line.
+ #[must_use]
+ pub fn line(&self) -> OneIndexedLine {
+ self.point.line()
+ }
+
+ /// Returns the one-indexed column.
+ #[must_use]
+ pub fn column(&self) -> OneIndexedColumn {
+ self.point.column()
+ }
+}
+
+#[derive(Debug, Clone, Deserialize)]
+struct SourceLocationWire {
+ file_path: SourceFilePath,
+ #[serde(flatten)]
+ point: SourcePoint,
+}
+
+impl<'de> Deserialize<'de> for SourceLocation {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let SourceLocationWire { file_path, point } =
+ SourceLocationWire::deserialize(deserializer)?;
+ Ok(Self::new(file_path, point))
+ }
+}
+
+/// A source range in a specific file.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
+pub struct SourceRange {
+ file_path: SourceFilePath,
+ start: SourcePoint,
+ end: SourcePoint,
+}
+
+impl SourceRange {
+ /// Constructs a validated source range.
+ pub fn try_new(
+ file_path: SourceFilePath,
+ start: SourcePoint,
+ end: SourcePoint,
+ ) -> Result<Self, InvariantViolation> {
+ if end < start {
+ return Err(InvariantViolation::new(
+ "source range end must not precede start",
+ ));
+ }
+ Ok(Self {
+ file_path,
+ start,
+ end,
+ })
+ }
+
+ /// Returns the source file path.
+ #[must_use]
+ pub fn file_path(&self) -> &SourceFilePath {
+ &self.file_path
+ }
+
+ /// Returns the start point.
+ #[must_use]
+ pub fn start(&self) -> SourcePoint {
+ self.start
+ }
+
+ /// Returns the end point.
+ #[must_use]
+ pub fn end(&self) -> SourcePoint {
+ self.end
+ }
+}
+
+#[derive(Debug, Clone, Deserialize)]
+struct SourceRangeWire {
+ file_path: SourceFilePath,
+ start: SourcePoint,
+ end: SourcePoint,
+}
+
+impl<'de> Deserialize<'de> for SourceRange {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let SourceRangeWire {
+ file_path,
+ start,
+ end,
+ } = SourceRangeWire::deserialize(deserializer)?;
+ Self::try_new(file_path, start, end).map_err(serde::de::Error::custom)
+ }
+}
+
+/// A monotonically increasing request sequence.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct RequestSequence(NonZeroU64);
+
+impl RequestSequence {
+ /// Starts a fresh sequence.
+ #[must_use]
+ pub fn genesis() -> Self {
+ Self(NonZeroU64::MIN)
+ }
+
+ /// Returns the current integer value.
+ #[must_use]
+ pub fn get(self) -> u64 {
+ self.0.get()
+ }
+
+ /// Consumes and returns the next sequence.
+ #[must_use]
+ pub fn next(self) -> Self {
+ let next = self.get().saturating_add(1);
+ let non_zero = NonZeroU64::new(next).map_or(NonZeroU64::MAX, |value| value);
+ Self(non_zero)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{
+ Generation, InvariantViolation, OneIndexedColumn, OneIndexedLine, RequestSequence,
+ SourceFilePath, SourcePoint, SourceRange,
+ };
+ use assert_matches::assert_matches;
+ use std::{num::NonZeroU64, path::PathBuf};
+
+ #[test]
+ fn generation_advances_monotonically() {
+ let first = Generation::genesis();
+ let second = first.next();
+ let third = second.next();
+ assert!(first < second);
+ assert!(second < third);
+ }
+
+ #[test]
+ fn generation_saturates_at_maximum() {
+ let max = Generation(NonZeroU64::MAX);
+ assert_eq!(max.next(), max);
+ }
+
+ #[test]
+ fn line_must_be_one_or_greater() {
+ assert_matches!(OneIndexedLine::try_new(0), Err(InvariantViolation { .. }));
+ }
+
+ #[test]
+ fn column_must_be_one_or_greater() {
+ assert_matches!(OneIndexedColumn::try_new(0), Err(InvariantViolation { .. }));
+ }
+
+ #[test]
+ fn source_range_rejects_reversed_points() {
+ let file_path = SourceFilePath::try_new(PathBuf::from("/tmp/range.rs"));
+ assert!(file_path.is_ok());
+ let file_path = match file_path {
+ Ok(value) => value,
+ Err(_) => return,
+ };
+ let start = SourcePoint::new(
+ OneIndexedLine::try_new(4).unwrap_or(OneIndexedLine(NonZeroU64::MIN)),
+ OneIndexedColumn::try_new(3).unwrap_or(OneIndexedColumn(NonZeroU64::MIN)),
+ );
+ let end = SourcePoint::new(
+ OneIndexedLine::try_new(2).unwrap_or(OneIndexedLine(NonZeroU64::MIN)),
+ OneIndexedColumn::try_new(1).unwrap_or(OneIndexedColumn(NonZeroU64::MIN)),
+ );
+ assert_matches!(
+ SourceRange::try_new(file_path, start, end),
+ Err(InvariantViolation { .. })
+ );
+ }
+
+ #[test]
+ fn request_sequence_saturates_at_maximum() {
+ let max = RequestSequence(NonZeroU64::MAX);
+ assert_eq!(max.next().get(), u64::MAX);
+ }
+}
diff --git a/crates/ra-mcp-engine/.gitignore b/crates/ra-mcp-engine/.gitignore
new file mode 100644
index 0000000..ea8c4bf
--- /dev/null
+++ b/crates/ra-mcp-engine/.gitignore
@@ -0,0 +1 @@
+/target
diff --git a/crates/ra-mcp-engine/Cargo.toml b/crates/ra-mcp-engine/Cargo.toml
new file mode 100644
index 0000000..d5d870d
--- /dev/null
+++ b/crates/ra-mcp-engine/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "ra-mcp-engine"
+categories.workspace = true
+description = "Resilient rust-analyzer transport and worker-supervision engine used by adequate-rust-mcp."
+edition.workspace = true
+keywords.workspace = true
+license.workspace = true
+readme.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+version.workspace = true
+
+[dependencies]
+lsp-types.workspace = true
+ra-mcp-domain = { path = "../ra-mcp-domain" }
+serde.workspace = true
+serde_json.workspace = true
+thiserror.workspace = true
+tokio.workspace = true
+tracing.workspace = true
+url.workspace = true
+
+[dev-dependencies]
+serial_test.workspace = true
+tempfile.workspace = true
+
+[lints]
+workspace = true
diff --git a/crates/ra-mcp-engine/src/bin/fake-rust-analyzer.rs b/crates/ra-mcp-engine/src/bin/fake-rust-analyzer.rs
new file mode 100644
index 0000000..c64b68b
--- /dev/null
+++ b/crates/ra-mcp-engine/src/bin/fake-rust-analyzer.rs
@@ -0,0 +1,467 @@
+//! Fault-injectable fake rust-analyzer used by integration tests.
+
+use lsp_types as _;
+use ra_mcp_domain as _;
+use ra_mcp_engine as _;
+use serde as _;
+use serde_json::{Value, json};
+#[cfg(test)]
+use serial_test as _;
+use std::{
+ fs,
+ io::{self, BufRead, BufReader, Read, Write},
+ path::{Path, PathBuf},
+ time::Duration,
+};
+#[cfg(test)]
+use tempfile as _;
+use thiserror as _;
+use tokio as _;
+use tracing as _;
+use url as _;
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+enum Mode {
+ Stable,
+ CrashOnFirstHover,
+}
+
+fn main() -> Result<(), Box<dyn std::error::Error>> {
+ run().map_err(|error| Box::new(error) as Box<dyn std::error::Error>)
+}
+
+fn run() -> io::Result<()> {
+ let mut mode = Mode::Stable;
+ let mut marker = None::<PathBuf>;
+ let mut hover_delay = Duration::ZERO;
+ let mut execute_command_delay = Duration::ZERO;
+ let mut execute_command_log = None::<PathBuf>;
+ let mut diagnostic_warmup_count = 0_u8;
+ let mut diagnostic_cancel_count = 0_u8;
+ let mut strict_root_match = false;
+ let mut workspace_root = None::<PathBuf>;
+ let mut args = std::env::args().skip(1);
+ loop {
+ let argument = args.next();
+ let Some(argument) = argument else {
+ break;
+ };
+ match argument.as_str() {
+ "--mode" => {
+ if let Some(value) = args.next() {
+ mode = parse_mode(&value).unwrap_or(Mode::Stable);
+ }
+ }
+ "--crash-marker" => {
+ if let Some(value) = args.next() {
+ marker = Some(PathBuf::from(value));
+ }
+ }
+ "--hover-delay-ms" => {
+ if let Some(value) = args.next() {
+ let parsed = value.parse::<u64>().ok();
+ if let Some(delay_ms) = parsed {
+ hover_delay = Duration::from_millis(delay_ms);
+ }
+ }
+ }
+ "--execute-command-delay-ms" => {
+ if let Some(value) = args.next() {
+ let parsed = value.parse::<u64>().ok();
+ if let Some(delay_ms) = parsed {
+ execute_command_delay = Duration::from_millis(delay_ms);
+ }
+ }
+ }
+ "--execute-command-log" => {
+ if let Some(value) = args.next() {
+ execute_command_log = Some(PathBuf::from(value));
+ }
+ }
+ "--diagnostic-warmup-count" => {
+ if let Some(value) = args.next() {
+ let parsed = value.parse::<u8>().ok();
+ if let Some(count) = parsed {
+ diagnostic_warmup_count = count;
+ }
+ }
+ }
+ "--diagnostic-cancel-count" => {
+ if let Some(value) = args.next() {
+ let parsed = value.parse::<u8>().ok();
+ if let Some(count) = parsed {
+ diagnostic_cancel_count = count;
+ }
+ }
+ }
+ "--strict-root-match" => {
+ strict_root_match = true;
+ }
+ _ => {}
+ }
+ }
+
+ let stdin = io::stdin();
+ let stdout = io::stdout();
+ let mut reader = BufReader::new(stdin.lock());
+ let mut writer = stdout.lock();
+
+ loop {
+ let frame = match read_frame(&mut reader) {
+ Ok(frame) => frame,
+ Err(error) if error.kind() == io::ErrorKind::UnexpectedEof => break,
+ Err(error) => return Err(error),
+ };
+ let message: Value = serde_json::from_slice(&frame)
+ .map_err(|error| io::Error::new(io::ErrorKind::InvalidData, error.to_string()))?;
+ if let Some(method) = message.get("method").and_then(Value::as_str) {
+ if method == "initialized" {
+ continue;
+ }
+
+ let request_id = message.get("id").cloned();
+ let Some(request_id) = request_id else {
+ continue;
+ };
+ if method == "initialize" {
+ workspace_root = initialized_workspace_root(&message);
+ }
+
+ if mode == Mode::CrashOnFirstHover
+ && method == "textDocument/hover"
+ && should_crash(&marker)?
+ {
+ std::process::exit(0);
+ }
+ if method == "textDocument/hover" && !hover_delay.is_zero() {
+ std::thread::sleep(hover_delay);
+ }
+ if method == "workspace/executeCommand" {
+ if let Some(path) = execute_command_log.as_ref() {
+ log_execute_command_effect(path, &message)?;
+ }
+ if !execute_command_delay.is_zero() {
+ std::thread::sleep(execute_command_delay);
+ }
+ }
+
+ let response = if strict_root_match
+ && request_targets_outside_workspace(&message, workspace_root.as_deref())
+ {
+ strict_root_mismatch_response(method, request_id, &message)
+ } else if method == "textDocument/diagnostic" && diagnostic_cancel_count > 0 {
+ diagnostic_cancel_count = diagnostic_cancel_count.saturating_sub(1);
+ server_cancelled_response(request_id)
+ } else if method == "textDocument/diagnostic" && diagnostic_warmup_count > 0 {
+ diagnostic_warmup_count = diagnostic_warmup_count.saturating_sub(1);
+ warmup_unlinked_diagnostic_response(request_id)
+ } else {
+ make_response(method, request_id, &message)
+ };
+ write_frame(&mut writer, &response)?;
+ }
+ }
+
+ Ok(())
+}
+
+fn parse_mode(raw: &str) -> Option<Mode> {
+ match raw {
+ "stable" => Some(Mode::Stable),
+ "crash_on_first_hover" => Some(Mode::CrashOnFirstHover),
+ _ => None,
+ }
+}
+
+fn should_crash(marker: &Option<PathBuf>) -> io::Result<bool> {
+ let Some(marker) = marker else {
+ return Ok(true);
+ };
+ if marker.exists() {
+ return Ok(false);
+ }
+ fs::write(marker, b"crashed")?;
+ Ok(true)
+}
+
+fn log_execute_command_effect(path: &PathBuf, request: &Value) -> io::Result<()> {
+ let command = request
+ .get("params")
+ .and_then(|params| params.get("command"))
+ .and_then(Value::as_str)
+ .unwrap_or("<missing-command>");
+ let mut file = fs::OpenOptions::new()
+ .create(true)
+ .append(true)
+ .open(path)?;
+ writeln!(file, "{command}")?;
+ Ok(())
+}
+
+fn initialized_workspace_root(request: &Value) -> Option<PathBuf> {
+ let root_uri = request
+ .get("params")
+ .and_then(|params| params.get("rootUri"))
+ .and_then(Value::as_str)?;
+ let root_url = url::Url::parse(root_uri).ok()?;
+ root_url.to_file_path().ok()
+}
+
+fn request_targets_outside_workspace(request: &Value, workspace_root: Option<&Path>) -> bool {
+ let Some(workspace_root) = workspace_root else {
+ return false;
+ };
+ let file_path = request_document_path(request);
+ let Some(file_path) = file_path else {
+ return false;
+ };
+ !file_path.starts_with(workspace_root)
+}
+
+fn request_document_path(request: &Value) -> Option<PathBuf> {
+ let uri = request
+ .get("params")
+ .and_then(|params| params.get("textDocument"))
+ .and_then(|doc| doc.get("uri"))
+ .and_then(Value::as_str)?;
+ let url = url::Url::parse(uri).ok()?;
+ url.to_file_path().ok()
+}
+
+fn strict_root_mismatch_response(method: &str, request_id: Value, request: &Value) -> Value {
+ match method {
+ "textDocument/hover" => json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": Value::Null
+ }),
+ "textDocument/definition" => json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": Value::Null
+ }),
+ "textDocument/references" => json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": Value::Null
+ }),
+ "textDocument/rename" => {
+ let uri = request
+ .get("params")
+ .and_then(|params| params.get("textDocument"))
+ .and_then(|doc| doc.get("uri"))
+ .and_then(Value::as_str)
+ .unwrap_or("file:///tmp/fallback.rs")
+ .to_owned();
+ json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "changes": {
+ uri: []
+ }
+ }
+ })
+ }
+ "textDocument/diagnostic" => warmup_unlinked_diagnostic_response(request_id),
+ _ => make_response(method, request_id, request),
+ }
+}
+
+fn make_response(method: &str, request_id: Value, request: &Value) -> Value {
+ match method {
+ "initialize" => json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "capabilities": {}
+ }
+ }),
+ "textDocument/hover" => json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "contents": {
+ "kind": "markdown",
+ "value": "hover::ok"
+ }
+ }
+ }),
+ "textDocument/definition" => {
+ let uri = request
+ .get("params")
+ .and_then(|params| params.get("textDocument"))
+ .and_then(|doc| doc.get("uri"))
+ .cloned()
+ .unwrap_or(Value::String("file:///tmp/fallback.rs".to_owned()));
+ json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": [{
+ "uri": uri,
+ "range": {
+ "start": { "line": 2, "character": 3 },
+ "end": { "line": 2, "character": 8 }
+ }
+ }]
+ })
+ }
+ "textDocument/references" => {
+ let uri = request
+ .get("params")
+ .and_then(|params| params.get("textDocument"))
+ .and_then(|doc| doc.get("uri"))
+ .cloned()
+ .unwrap_or(Value::String("file:///tmp/fallback.rs".to_owned()));
+ json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": [{
+ "uri": uri,
+ "range": {
+ "start": { "line": 4, "character": 1 },
+ "end": { "line": 4, "character": 5 }
+ }
+ }]
+ })
+ }
+ "textDocument/rename" => {
+ let uri = request
+ .get("params")
+ .and_then(|params| params.get("textDocument"))
+ .and_then(|doc| doc.get("uri"))
+ .and_then(Value::as_str)
+ .unwrap_or("file:///tmp/fallback.rs")
+ .to_owned();
+ json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "changes": {
+ uri: [
+ {
+ "range": {
+ "start": { "line": 1, "character": 1 },
+ "end": { "line": 1, "character": 4 }
+ },
+ "newText": "renamed_symbol"
+ }
+ ]
+ }
+ }
+ })
+ }
+ "textDocument/diagnostic" => json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "kind": "full",
+ "items": [{
+ "range": {
+ "start": { "line": 0, "character": 0 },
+ "end": { "line": 0, "character": 3 }
+ },
+ "severity": 1,
+ "message": "fake diagnostic"
+ }]
+ }
+ }),
+ "workspace/executeCommand" => {
+ let command = request
+ .get("params")
+ .and_then(|params| params.get("command"))
+ .cloned()
+ .unwrap_or(Value::Null);
+ json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "ack": "ok",
+ "command": command
+ }
+ })
+ }
+ _ => json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "error": {
+ "code": -32601,
+ "message": format!("method not found: {method}")
+ }
+ }),
+ }
+}
+
+fn warmup_unlinked_diagnostic_response(request_id: Value) -> Value {
+ json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "result": {
+ "kind": "full",
+ "items": [{
+ "range": {
+ "start": { "line": 0, "character": 0 },
+ "end": { "line": 0, "character": 0 }
+ },
+ "severity": 2,
+ "code": "unlinked-file",
+ "message": "This file is not part of any crate, so rust-analyzer can't offer IDE services."
+ }]
+ }
+ })
+}
+
+fn server_cancelled_response(request_id: Value) -> Value {
+ json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "error": {
+ "code": -32802,
+ "message": "server cancelled request during workspace reload"
+ }
+ })
+}
+
+fn read_frame(reader: &mut BufReader<impl Read>) -> io::Result<Vec<u8>> {
+ let mut content_length = None::<usize>;
+ loop {
+ let mut line = String::new();
+ let bytes = reader.read_line(&mut line)?;
+ if bytes == 0 {
+ return Err(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "EOF while reading headers",
+ ));
+ }
+ if line == "\r\n" || line == "\n" {
+ break;
+ }
+ let trimmed = line.trim_end_matches(['\r', '\n']);
+ if let Some(raw_length) = trimmed.strip_prefix("Content-Length:") {
+ let parsed = raw_length.trim().parse::<usize>().map_err(|error| {
+ io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid Content-Length header: {error}"),
+ )
+ })?;
+ content_length = Some(parsed);
+ }
+ }
+
+ let length = content_length.ok_or_else(|| {
+ io::Error::new(io::ErrorKind::InvalidData, "missing Content-Length header")
+ })?;
+ let mut payload = vec![0_u8; length];
+ reader.read_exact(&mut payload)?;
+ Ok(payload)
+}
+
+fn write_frame(writer: &mut impl Write, payload: &Value) -> io::Result<()> {
+ let serialized = serde_json::to_vec(payload)
+ .map_err(|error| io::Error::new(io::ErrorKind::InvalidData, error.to_string()))?;
+ let header = format!("Content-Length: {}\r\n\r\n", serialized.len());
+ writer.write_all(header.as_bytes())?;
+ writer.write_all(&serialized)?;
+ writer.flush()?;
+ Ok(())
+}
diff --git a/crates/ra-mcp-engine/src/config.rs b/crates/ra-mcp-engine/src/config.rs
new file mode 100644
index 0000000..8d116d5
--- /dev/null
+++ b/crates/ra-mcp-engine/src/config.rs
@@ -0,0 +1,79 @@
+use ra_mcp_domain::types::{InvariantViolation, WorkspaceRoot};
+use std::{path::PathBuf, time::Duration};
+
+/// Exponential backoff policy for worker restart attempts.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct BackoffPolicy {
+ /// Minimum delay between restart attempts.
+ pub floor: Duration,
+ /// Maximum delay between restart attempts.
+ pub ceiling: Duration,
+}
+
+impl BackoffPolicy {
+ /// Builds a validated backoff policy.
+ pub fn try_new(floor: Duration, ceiling: Duration) -> Result<Self, InvariantViolation> {
+ if floor.is_zero() {
+ return Err(InvariantViolation::new("backoff floor must be non-zero"));
+ }
+ if ceiling < floor {
+ return Err(InvariantViolation::new(
+ "backoff ceiling must be greater than or equal to floor",
+ ));
+ }
+ Ok(Self { floor, ceiling })
+ }
+}
+
+/// Runtime engine configuration.
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct EngineConfig {
+ /// Absolute workspace root used for rust-analyzer process cwd/root URI.
+ pub workspace_root: WorkspaceRoot,
+ /// rust-analyzer executable path.
+ pub rust_analyzer_binary: PathBuf,
+ /// Additional rust-analyzer process arguments.
+ pub rust_analyzer_args: Vec<String>,
+ /// Additional rust-analyzer process environment variables.
+ pub rust_analyzer_env: Vec<(String, String)>,
+ /// Startup handshake timeout.
+ pub startup_timeout: Duration,
+ /// Timeout for ordinary requests.
+ pub request_timeout: Duration,
+ /// Restart backoff policy.
+ pub backoff_policy: BackoffPolicy,
+}
+
+impl EngineConfig {
+ /// Builds validated engine configuration.
+ pub fn try_new(
+ workspace_root: WorkspaceRoot,
+ rust_analyzer_binary: PathBuf,
+ rust_analyzer_args: Vec<String>,
+ rust_analyzer_env: Vec<(String, String)>,
+ startup_timeout: Duration,
+ request_timeout: Duration,
+ backoff_policy: BackoffPolicy,
+ ) -> Result<Self, InvariantViolation> {
+ if rust_analyzer_binary.as_os_str().is_empty() {
+ return Err(InvariantViolation::new(
+ "rust-analyzer binary path must be non-empty",
+ ));
+ }
+ if startup_timeout.is_zero() {
+ return Err(InvariantViolation::new("startup timeout must be non-zero"));
+ }
+ if request_timeout.is_zero() {
+ return Err(InvariantViolation::new("request timeout must be non-zero"));
+ }
+ Ok(Self {
+ workspace_root,
+ rust_analyzer_binary,
+ rust_analyzer_args,
+ rust_analyzer_env,
+ startup_timeout,
+ request_timeout,
+ backoff_policy,
+ })
+ }
+}
diff --git a/crates/ra-mcp-engine/src/error.rs b/crates/ra-mcp-engine/src/error.rs
new file mode 100644
index 0000000..f40e1ae
--- /dev/null
+++ b/crates/ra-mcp-engine/src/error.rs
@@ -0,0 +1,77 @@
+use crate::lsp_transport::RpcErrorPayload;
+use ra_mcp_domain::{fault::Fault, types::InvariantViolation};
+use serde_json::Value;
+use thiserror::Error;
+
+/// Engine result type.
+pub type EngineResult<T> = Result<T, EngineError>;
+
+/// Structured rust-analyzer response error.
+#[derive(Debug, Clone, Error)]
+#[error("lsp response error: code={code}, message={message}")]
+pub struct LspResponseError {
+ /// LSP JSON-RPC error code.
+ pub code: i64,
+ /// LSP JSON-RPC error message.
+ pub message: String,
+ /// Optional JSON-RPC error data payload.
+ pub data: Option<Value>,
+}
+
+/// Engine failure type.
+#[derive(Debug, Error)]
+pub enum EngineError {
+ /// I/O failure while syncing source documents.
+ #[error("io error: {0}")]
+ Io(#[from] std::io::Error),
+ /// Domain invariant was violated.
+ #[error(transparent)]
+ Invariant(#[from] InvariantViolation),
+ /// Transport/process/protocol fault.
+ #[error("engine fault: {0:?}")]
+ Fault(Fault),
+ /// rust-analyzer returned a JSON-RPC error object.
+ #[error(transparent)]
+ LspResponse(#[from] LspResponseError),
+ /// Response payload could not be deserialized into expected type.
+ #[error("invalid lsp payload for method {method}: {message}")]
+ InvalidPayload {
+ /// Request method.
+ method: &'static str,
+ /// Decoder error detail.
+ message: String,
+ },
+ /// Request params could not be serialized into JSON.
+ #[error("invalid lsp request payload for method {method}: {message}")]
+ InvalidRequest {
+ /// Request method.
+ method: &'static str,
+ /// Encoder error detail.
+ message: String,
+ },
+ /// Path to URL conversion failed.
+ #[error("path cannot be represented as file URL")]
+ InvalidFileUrl,
+}
+
+impl From<Fault> for EngineError {
+ fn from(value: Fault) -> Self {
+ Self::Fault(value)
+ }
+}
+
+impl From<RpcErrorPayload> for LspResponseError {
+ fn from(value: RpcErrorPayload) -> Self {
+ Self {
+ code: value.code,
+ message: value.message,
+ data: value.data,
+ }
+ }
+}
+
+impl From<RpcErrorPayload> for EngineError {
+ fn from(value: RpcErrorPayload) -> Self {
+ Self::LspResponse(value.into())
+ }
+}
diff --git a/crates/ra-mcp-engine/src/lib.rs b/crates/ra-mcp-engine/src/lib.rs
new file mode 100644
index 0000000..3d36a5b
--- /dev/null
+++ b/crates/ra-mcp-engine/src/lib.rs
@@ -0,0 +1,20 @@
+#![recursion_limit = "512"]
+
+//! Resilient rust-analyzer execution engine and typed LSP façade.
+
+#[cfg(test)]
+use serial_test as _;
+#[cfg(test)]
+use tempfile as _;
+
+mod config;
+mod error;
+mod lsp_transport;
+mod supervisor;
+
+pub use config::{BackoffPolicy, EngineConfig};
+pub use error::{EngineError, EngineResult};
+pub use supervisor::{
+ DiagnosticEntry, DiagnosticLevel, DiagnosticsReport, Engine, HoverPayload,
+ MethodTelemetrySnapshot, RenameReport, TelemetrySnapshot, TelemetryTotals,
+};
diff --git a/crates/ra-mcp-engine/src/lsp_transport.rs b/crates/ra-mcp-engine/src/lsp_transport.rs
new file mode 100644
index 0000000..c47d4f2
--- /dev/null
+++ b/crates/ra-mcp-engine/src/lsp_transport.rs
@@ -0,0 +1,717 @@
+use crate::config::EngineConfig;
+use ra_mcp_domain::{
+ fault::{Fault, FaultClass, FaultCode, FaultDetail},
+ types::Generation,
+};
+use serde::{Deserialize, Serialize};
+use serde_json::{Value, json};
+use std::{
+ collections::HashMap,
+ io,
+ process::Stdio,
+ sync::{
+ Arc,
+ atomic::{AtomicU64, Ordering},
+ },
+ time::Duration,
+};
+use tokio::{
+ io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt, BufReader},
+ process::{Child, ChildStdin, ChildStdout, Command},
+ sync::{Mutex, oneshot, watch},
+ task::JoinHandle,
+};
+use tracing::{debug, warn};
+use url::Url;
+
+#[derive(Debug, Clone)]
+pub(crate) struct WorkerHandle {
+ generation: Generation,
+ child: Arc<Mutex<Child>>,
+ writer: Arc<Mutex<ChildStdin>>,
+ pending: Arc<Mutex<HashMap<u64, oneshot::Sender<PendingOutcome>>>>,
+ next_request_id: Arc<AtomicU64>,
+ terminal_fault_rx: watch::Receiver<Option<Fault>>,
+ reader_task: Arc<Mutex<Option<JoinHandle<()>>>>,
+ stderr_task: Arc<Mutex<Option<JoinHandle<()>>>>,
+}
+
+#[derive(Debug)]
+enum PendingOutcome {
+ Result(Value),
+ ResponseError(RpcErrorPayload),
+ TransportFault(Fault),
+}
+
+#[derive(Debug, Clone, Deserialize)]
+pub(crate) struct RpcErrorPayload {
+ pub(crate) code: i64,
+ pub(crate) message: String,
+ pub(crate) data: Option<Value>,
+}
+
+#[derive(Debug)]
+pub(crate) enum WorkerRequestError {
+ Fault(Fault),
+ Response(RpcErrorPayload),
+}
+
+impl WorkerHandle {
+ pub(crate) fn terminal_fault(&self) -> Option<Fault> {
+ self.terminal_fault_rx.borrow().clone()
+ }
+
+ pub(crate) async fn send_notification(
+ &self,
+ method: &'static str,
+ params: &impl Serialize,
+ ) -> Result<(), Fault> {
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "method": method,
+ "params": params,
+ });
+ let mut writer = self.writer.lock().await;
+ write_frame(&mut writer, &payload).await.map_err(|error| {
+ classify_io_fault(
+ self.generation,
+ FaultClass::Transport,
+ "failed to write notification",
+ error,
+ )
+ })
+ }
+
+ pub(crate) async fn send_request(
+ &self,
+ method: &'static str,
+ params: &impl Serialize,
+ timeout: Duration,
+ ) -> Result<Value, WorkerRequestError> {
+ let request_id = self.next_request_id.fetch_add(1, Ordering::Relaxed);
+ let (sender, receiver) = oneshot::channel::<PendingOutcome>();
+ {
+ let mut pending = self.pending.lock().await;
+ let previous = pending.insert(request_id, sender);
+ if let Some(previous_sender) = previous {
+ drop(previous_sender);
+ }
+ }
+
+ let payload = json!({
+ "jsonrpc": "2.0",
+ "id": request_id,
+ "method": method,
+ "params": params,
+ });
+
+ let write_result = {
+ let mut writer = self.writer.lock().await;
+ write_frame(&mut writer, &payload).await
+ };
+
+ if let Err(error) = write_result {
+ let mut pending = self.pending.lock().await;
+ let removed = pending.remove(&request_id);
+ if let Some(sender) = removed {
+ drop(sender);
+ }
+ return Err(WorkerRequestError::Fault(classify_io_fault(
+ self.generation,
+ FaultClass::Transport,
+ "failed to write request",
+ error,
+ )));
+ }
+
+ match tokio::time::timeout(timeout, receiver).await {
+ Ok(Ok(PendingOutcome::Result(value))) => Ok(value),
+ Ok(Ok(PendingOutcome::ResponseError(error))) => {
+ Err(WorkerRequestError::Response(error))
+ }
+ Ok(Ok(PendingOutcome::TransportFault(fault))) => Err(WorkerRequestError::Fault(fault)),
+ Ok(Err(_closed)) => Err(WorkerRequestError::Fault(Fault::new(
+ self.generation,
+ FaultClass::Transport,
+ FaultCode::UnexpectedEof,
+ FaultDetail::new("response channel closed before result"),
+ ))),
+ Err(_elapsed) => {
+ let mut pending = self.pending.lock().await;
+ let removed = pending.remove(&request_id);
+ if let Some(sender) = removed {
+ drop(sender);
+ }
+ Err(WorkerRequestError::Fault(Fault::new(
+ self.generation,
+ FaultClass::Timeout,
+ FaultCode::RequestTimedOut,
+ FaultDetail::new(format!("request timed out for method {method}")),
+ )))
+ }
+ }
+ }
+
+ pub(crate) async fn terminate(&self) {
+ let mut child = self.child.lock().await;
+ if child.id().is_some()
+ && let Err(error) = child.kill().await
+ {
+ debug!(
+ generation = self.generation.get(),
+ "failed to kill rust-analyzer process cleanly: {error}"
+ );
+ }
+ if let Err(error) = child.wait().await {
+ debug!(
+ generation = self.generation.get(),
+ "failed to wait rust-analyzer process cleanly: {error}"
+ );
+ }
+
+ if let Some(task) = self.reader_task.lock().await.take() {
+ task.abort();
+ }
+ if let Some(task) = self.stderr_task.lock().await.take() {
+ task.abort();
+ }
+ }
+}
+
+pub(crate) async fn spawn_worker(
+ config: &EngineConfig,
+ generation: Generation,
+) -> Result<WorkerHandle, Fault> {
+ let mut command = Command::new(&config.rust_analyzer_binary);
+ let _args = command.args(&config.rust_analyzer_args);
+ let _envs = command.envs(config.rust_analyzer_env.iter().cloned());
+ let _configured = command
+ .current_dir(config.workspace_root.as_path())
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped());
+
+ let mut child = command.spawn().map_err(|error| {
+ classify_io_fault(
+ generation,
+ FaultClass::Process,
+ "failed to spawn rust-analyzer",
+ error,
+ )
+ })?;
+
+ let stdin = child.stdin.take().ok_or_else(|| {
+ Fault::new(
+ generation,
+ FaultClass::Process,
+ FaultCode::SpawnFailed,
+ FaultDetail::new("missing stdin pipe from rust-analyzer process"),
+ )
+ })?;
+ let stdout = child.stdout.take().ok_or_else(|| {
+ Fault::new(
+ generation,
+ FaultClass::Process,
+ FaultCode::SpawnFailed,
+ FaultDetail::new("missing stdout pipe from rust-analyzer process"),
+ )
+ })?;
+ let stderr = child.stderr.take().ok_or_else(|| {
+ Fault::new(
+ generation,
+ FaultClass::Process,
+ FaultCode::SpawnFailed,
+ FaultDetail::new("missing stderr pipe from rust-analyzer process"),
+ )
+ })?;
+
+ let child = Arc::new(Mutex::new(child));
+ let writer = Arc::new(Mutex::new(stdin));
+ let pending = Arc::new(Mutex::new(
+ HashMap::<u64, oneshot::Sender<PendingOutcome>>::new(),
+ ));
+ let next_request_id = Arc::new(AtomicU64::new(1));
+ let (terminal_fault_tx, terminal_fault_rx) = watch::channel(None::<Fault>);
+
+ let reader_task = {
+ let pending = Arc::clone(&pending);
+ let terminal_fault_tx = terminal_fault_tx.clone();
+ tokio::spawn(async move {
+ read_stdout_loop(generation, stdout, pending, terminal_fault_tx).await;
+ })
+ };
+
+ let stderr_task = tokio::spawn(async move {
+ stream_stderr(generation, stderr).await;
+ });
+
+ let handle = WorkerHandle {
+ generation,
+ child,
+ writer,
+ pending,
+ next_request_id,
+ terminal_fault_rx,
+ reader_task: Arc::new(Mutex::new(Some(reader_task))),
+ stderr_task: Arc::new(Mutex::new(Some(stderr_task))),
+ };
+
+ let initialize_params = build_initialize_params(config)?;
+ let startup = handle
+ .send_request("initialize", &initialize_params, config.startup_timeout)
+ .await;
+ if let Err(error) = startup {
+ handle.terminate().await;
+ return Err(map_worker_request_error(generation, error));
+ }
+
+ let initialized_params = json!({});
+ let initialized_result = handle
+ .send_notification("initialized", &initialized_params)
+ .await
+ .map_err(|fault| {
+ handle_fault_notification(generation, "initialized notification failed", fault)
+ });
+ if let Err(fault) = initialized_result {
+ handle.terminate().await;
+ return Err(fault);
+ }
+
+ Ok(handle)
+}
+
+fn map_worker_request_error(generation: Generation, error: WorkerRequestError) -> Fault {
+ match error {
+ WorkerRequestError::Fault(fault) => fault,
+ WorkerRequestError::Response(response) => Fault::new(
+ generation,
+ FaultClass::Protocol,
+ FaultCode::InvalidFrame,
+ FaultDetail::new(format!(
+ "initialize returned LSP error {}: {}",
+ response.code, response.message
+ )),
+ ),
+ }
+}
+
+fn handle_fault_notification(generation: Generation, context: &'static str, fault: Fault) -> Fault {
+ let detail = FaultDetail::new(format!("{context}: {}", fault.detail.message));
+ Fault::new(generation, fault.class, fault.code, detail)
+}
+
+fn build_initialize_params(config: &EngineConfig) -> Result<Value, Fault> {
+ let root_uri = Url::from_directory_path(config.workspace_root.as_path()).map_err(|()| {
+ Fault::new(
+ Generation::genesis(),
+ FaultClass::Protocol,
+ FaultCode::InvalidFrame,
+ FaultDetail::new("workspace root cannot be represented as file URI"),
+ )
+ })?;
+ let folder_name = config
+ .workspace_root
+ .as_path()
+ .file_name()
+ .and_then(|value| value.to_str())
+ .unwrap_or("workspace")
+ .to_owned();
+ let root_uri_string = root_uri.to_string();
+ Ok(json!({
+ "processId": std::process::id(),
+ "rootUri": root_uri_string.clone(),
+ "capabilities": build_client_capabilities(),
+ "workspaceFolders": [{
+ "uri": root_uri_string,
+ "name": folder_name,
+ }],
+ "trace": "off",
+ "clientInfo": {
+ "name": "adequate-rust-mcp",
+ "version": env!("CARGO_PKG_VERSION"),
+ }
+ }))
+}
+
+fn build_client_capabilities() -> Value {
+ let symbol_kind_values = (1_u32..=26).collect::<Vec<_>>();
+ json!({
+ "workspace": {
+ "applyEdit": true,
+ "workspaceEdit": {
+ "documentChanges": true,
+ "resourceOperations": ["create", "rename", "delete"],
+ },
+ "symbol": {
+ "dynamicRegistration": false,
+ "resolveSupport": {
+ "properties": ["location.range", "containerName"],
+ },
+ },
+ "diagnostics": {
+ "refreshSupport": true,
+ },
+ "executeCommand": {
+ "dynamicRegistration": false,
+ },
+ "workspaceFolders": true,
+ "configuration": true,
+ },
+ "textDocument": {
+ "synchronization": {
+ "dynamicRegistration": false,
+ "willSave": false,
+ "didSave": true,
+ "willSaveWaitUntil": false,
+ },
+ "hover": {
+ "dynamicRegistration": false,
+ "contentFormat": ["markdown", "plaintext"],
+ },
+ "definition": {
+ "dynamicRegistration": false,
+ "linkSupport": true,
+ },
+ "declaration": {
+ "dynamicRegistration": false,
+ "linkSupport": true,
+ },
+ "typeDefinition": {
+ "dynamicRegistration": false,
+ "linkSupport": true,
+ },
+ "implementation": {
+ "dynamicRegistration": false,
+ "linkSupport": true,
+ },
+ "references": {
+ "dynamicRegistration": false,
+ },
+ "documentHighlight": {
+ "dynamicRegistration": false,
+ },
+ "documentSymbol": {
+ "dynamicRegistration": false,
+ "hierarchicalDocumentSymbolSupport": true,
+ "symbolKind": {
+ "valueSet": symbol_kind_values,
+ },
+ },
+ "completion": {
+ "dynamicRegistration": false,
+ "contextSupport": true,
+ "completionItem": {
+ "snippetSupport": true,
+ "documentationFormat": ["markdown", "plaintext"],
+ "resolveSupport": {
+ "properties": ["documentation", "detail", "additionalTextEdits"],
+ },
+ },
+ },
+ "signatureHelp": {
+ "dynamicRegistration": false,
+ },
+ "codeAction": {
+ "dynamicRegistration": false,
+ "isPreferredSupport": true,
+ "codeActionLiteralSupport": {
+ "codeActionKind": {
+ "valueSet": [
+ "",
+ "quickfix",
+ "refactor",
+ "refactor.extract",
+ "refactor.inline",
+ "refactor.rewrite",
+ "source",
+ "source.organizeImports",
+ ],
+ },
+ },
+ },
+ "codeLens": {
+ "dynamicRegistration": false,
+ },
+ "documentLink": {
+ "dynamicRegistration": false,
+ "tooltipSupport": true,
+ },
+ "colorProvider": {
+ "dynamicRegistration": false,
+ },
+ "linkedEditingRange": {
+ "dynamicRegistration": false,
+ },
+ "rename": {
+ "dynamicRegistration": false,
+ "prepareSupport": true,
+ },
+ "typeHierarchy": {
+ "dynamicRegistration": false,
+ },
+ "inlineValue": {
+ "dynamicRegistration": false,
+ },
+ "moniker": {
+ "dynamicRegistration": false,
+ },
+ "diagnostic": {
+ "dynamicRegistration": false,
+ },
+ "documentFormatting": {
+ "dynamicRegistration": false,
+ },
+ "documentRangeFormatting": {
+ "dynamicRegistration": false,
+ },
+ "documentOnTypeFormatting": {
+ "dynamicRegistration": false,
+ },
+ "foldingRange": {
+ "dynamicRegistration": false,
+ },
+ "selectionRange": {
+ "dynamicRegistration": false,
+ },
+ "inlayHint": {
+ "dynamicRegistration": false,
+ "resolveSupport": {
+ "properties": ["tooltip", "textEdits", "label.tooltip", "label.location", "label.command"],
+ },
+ },
+ "semanticTokens": {
+ "dynamicRegistration": false,
+ "requests": {
+ "full": {
+ "delta": true,
+ },
+ "range": true,
+ },
+ "tokenTypes": [
+ "namespace", "type", "class", "enum", "interface", "struct", "typeParameter",
+ "parameter", "variable", "property", "enumMember", "event", "function",
+ "method", "macro", "keyword", "modifier", "comment", "string", "number",
+ "regexp", "operator",
+ ],
+ "tokenModifiers": [
+ "declaration", "definition", "readonly", "static", "deprecated", "abstract",
+ "async", "modification", "documentation", "defaultLibrary",
+ ],
+ "formats": ["relative"],
+ "multilineTokenSupport": true,
+ "overlappingTokenSupport": true,
+ },
+ "publishDiagnostics": {
+ "relatedInformation": true,
+ },
+ },
+ "window": {
+ "workDoneProgress": true,
+ },
+ "general": {
+ "positionEncodings": ["utf-8", "utf-16"],
+ },
+ })
+}
+
+async fn stream_stderr(generation: Generation, stderr: tokio::process::ChildStderr) {
+ let mut reader = BufReader::new(stderr).lines();
+ loop {
+ match reader.next_line().await {
+ Ok(Some(line)) => {
+ debug!(
+ generation = generation.get(),
+ "rust-analyzer stderr: {line}"
+ );
+ }
+ Ok(None) => break,
+ Err(error) => {
+ debug!(
+ generation = generation.get(),
+ "rust-analyzer stderr stream failed: {error}"
+ );
+ break;
+ }
+ }
+ }
+}
+
+async fn read_stdout_loop(
+ generation: Generation,
+ stdout: ChildStdout,
+ pending: Arc<Mutex<HashMap<u64, oneshot::Sender<PendingOutcome>>>>,
+ terminal_fault_tx: watch::Sender<Option<Fault>>,
+) {
+ let mut reader = BufReader::new(stdout);
+ loop {
+ match read_frame(&mut reader).await {
+ Ok(frame) => {
+ if let Err(fault) = dispatch_frame(generation, &pending, &frame).await {
+ emit_terminal_fault(&terminal_fault_tx, &pending, fault).await;
+ break;
+ }
+ }
+ Err(error) => {
+ let fault = classify_io_fault(
+ generation,
+ FaultClass::Transport,
+ "failed to read frame",
+ error,
+ );
+ emit_terminal_fault(&terminal_fault_tx, &pending, fault).await;
+ break;
+ }
+ }
+ }
+}
+
+async fn emit_terminal_fault(
+ terminal_fault_tx: &watch::Sender<Option<Fault>>,
+ pending: &Arc<Mutex<HashMap<u64, oneshot::Sender<PendingOutcome>>>>,
+ fault: Fault,
+) {
+ if let Err(error) = terminal_fault_tx.send(Some(fault.clone())) {
+ warn!("failed to publish terminal fault: {error}");
+ }
+ let mut pending_guard = pending.lock().await;
+ for sender in pending_guard.drain().map(|(_id, sender)| sender) {
+ if let Err(outcome) = sender.send(PendingOutcome::TransportFault(fault.clone())) {
+ drop(outcome);
+ }
+ }
+}
+
+async fn dispatch_frame(
+ generation: Generation,
+ pending: &Arc<Mutex<HashMap<u64, oneshot::Sender<PendingOutcome>>>>,
+ frame: &[u8],
+) -> Result<(), Fault> {
+ let value: Value = serde_json::from_slice(frame).map_err(|error| {
+ Fault::new(
+ generation,
+ FaultClass::Protocol,
+ FaultCode::InvalidJson,
+ FaultDetail::new(format!("failed to deserialize JSON-RPC frame: {error}")),
+ )
+ })?;
+
+ let response_id = value.get("id").and_then(Value::as_u64);
+ let Some(response_id) = response_id else {
+ return Ok(());
+ };
+
+ let mut pending_guard = pending.lock().await;
+ let Some(sender) = pending_guard.remove(&response_id) else {
+ warn!(
+ generation = generation.get(),
+ response_id, "received response for unknown request id"
+ );
+ return Ok(());
+ };
+ drop(pending_guard);
+
+ if let Some(result) = value.get("result") {
+ if let Err(outcome) = sender.send(PendingOutcome::Result(result.clone())) {
+ drop(outcome);
+ }
+ return Ok(());
+ }
+
+ if let Some(error_value) = value.get("error") {
+ let error: RpcErrorPayload =
+ serde_json::from_value(error_value.clone()).map_err(|error| {
+ Fault::new(
+ generation,
+ FaultClass::Protocol,
+ FaultCode::InvalidJson,
+ FaultDetail::new(format!(
+ "failed to deserialize JSON-RPC error payload: {error}"
+ )),
+ )
+ })?;
+ if let Err(outcome) = sender.send(PendingOutcome::ResponseError(error)) {
+ drop(outcome);
+ }
+ return Ok(());
+ }
+
+ Err(Fault::new(
+ generation,
+ FaultClass::Protocol,
+ FaultCode::InvalidFrame,
+ FaultDetail::new("response frame missing both result and error"),
+ ))
+}
+
+async fn read_frame(reader: &mut BufReader<ChildStdout>) -> Result<Vec<u8>, io::Error> {
+ let mut content_length = None::<usize>;
+ loop {
+ let mut header_line = String::new();
+ let bytes_read = reader.read_line(&mut header_line).await?;
+ if bytes_read == 0 {
+ return Err(io::Error::new(
+ io::ErrorKind::UnexpectedEof,
+ "EOF while reading headers",
+ ));
+ }
+
+ if header_line == "\r\n" || header_line == "\n" {
+ break;
+ }
+
+ let trimmed = header_line.trim_end_matches(['\r', '\n']);
+ if let Some(length) = trimmed.strip_prefix("Content-Length:") {
+ let parsed = length.trim().parse::<usize>().map_err(|parse_error| {
+ io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("invalid Content-Length header: {parse_error}"),
+ )
+ })?;
+ content_length = Some(parsed);
+ }
+ }
+
+ let length = content_length.ok_or_else(|| {
+ io::Error::new(io::ErrorKind::InvalidData, "missing Content-Length header")
+ })?;
+
+ let mut payload = vec![0_u8; length];
+ let _bytes_read = reader.read_exact(&mut payload).await?;
+ Ok(payload)
+}
+
+async fn write_frame(writer: &mut ChildStdin, value: &Value) -> Result<(), io::Error> {
+ let payload = serde_json::to_vec(value).map_err(|error| {
+ io::Error::new(
+ io::ErrorKind::InvalidData,
+ format!("failed to serialize JSON-RPC payload: {error}"),
+ )
+ })?;
+ let header = format!("Content-Length: {}\r\n\r\n", payload.len());
+ writer.write_all(header.as_bytes()).await?;
+ writer.write_all(&payload).await?;
+ writer.flush().await?;
+ Ok(())
+}
+
+fn classify_io_fault(
+ generation: Generation,
+ class: FaultClass,
+ context: &'static str,
+ error: io::Error,
+) -> Fault {
+ let code = match error.kind() {
+ io::ErrorKind::BrokenPipe => FaultCode::BrokenPipe,
+ io::ErrorKind::UnexpectedEof => FaultCode::UnexpectedEof,
+ _ => match class {
+ FaultClass::Process => FaultCode::SpawnFailed,
+ _ => FaultCode::InvalidFrame,
+ },
+ };
+ Fault::new(
+ generation,
+ class,
+ code,
+ FaultDetail::new(format!("{context}: {error}")),
+ )
+}
diff --git a/crates/ra-mcp-engine/src/supervisor.rs b/crates/ra-mcp-engine/src/supervisor.rs
new file mode 100644
index 0000000..f0c7ea6
--- /dev/null
+++ b/crates/ra-mcp-engine/src/supervisor.rs
@@ -0,0 +1,1257 @@
+use crate::{
+ config::EngineConfig,
+ error::{EngineError, EngineResult},
+ lsp_transport::{WorkerHandle, WorkerRequestError, spawn_worker},
+};
+use lsp_types::{
+ DiagnosticSeverity, GotoDefinitionResponse, Hover, HoverContents, Location, LocationLink,
+ MarkedString, Position, Range, Uri, WorkspaceEdit,
+};
+use ra_mcp_domain::{
+ fault::{Fault, RecoveryDirective},
+ lifecycle::{DynamicLifecycle, LifecycleSnapshot},
+ types::{
+ InvariantViolation, OneIndexedColumn, OneIndexedLine, SourceFilePath, SourceLocation,
+ SourcePoint, SourcePosition, SourceRange,
+ },
+};
+use serde::{Deserialize, Serialize, de::DeserializeOwned};
+use serde_json::Value;
+use std::{
+ cmp::min,
+ collections::HashMap,
+ fs,
+ sync::Arc,
+ time::{Duration, Instant, SystemTime},
+};
+use tokio::{sync::Mutex, time::sleep};
+use tracing::{debug, warn};
+use url::Url;
+
+/// Hover response payload.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct HoverPayload {
+ /// Rendered markdown/text content, if available.
+ pub rendered: Option<String>,
+ /// Symbol range, if rust-analyzer provided one.
+ pub range: Option<SourceRange>,
+}
+
+/// Diagnostic severity level.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
+pub enum DiagnosticLevel {
+ /// Error severity.
+ Error,
+ /// Warning severity.
+ Warning,
+ /// Informational severity.
+ Information,
+ /// Hint severity.
+ Hint,
+}
+
+/// One diagnostic record.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct DiagnosticEntry {
+ /// Affected range.
+ pub range: SourceRange,
+ /// Severity.
+ pub level: DiagnosticLevel,
+ /// Optional diagnostic code.
+ pub code: Option<String>,
+ /// User-facing diagnostic message.
+ pub message: String,
+}
+
+/// Diagnostics report for a single file.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct DiagnosticsReport {
+ /// Entries returned by rust-analyzer.
+ pub diagnostics: Vec<DiagnosticEntry>,
+}
+
+/// Summary of rename operation impact.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct RenameReport {
+ /// Number of files touched by the edit.
+ pub files_touched: u64,
+ /// Number of text edits in total.
+ pub edits_applied: u64,
+}
+
+/// Aggregate runtime telemetry snapshot for engine behavior.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct TelemetrySnapshot {
+ /// Process uptime in milliseconds.
+ pub uptime_ms: u64,
+ /// Current lifecycle snapshot.
+ pub lifecycle: LifecycleSnapshot,
+ /// Number of consecutive failures currently tracked by supervisor.
+ pub consecutive_failures: u32,
+ /// Number of worker restarts performed.
+ pub restart_count: u64,
+ /// Global counters across all requests.
+ pub totals: TelemetryTotals,
+ /// Per-method counters and latency aggregates.
+ pub methods: Vec<MethodTelemetrySnapshot>,
+ /// Last fault that triggered worker restart, if any.
+ pub last_fault: Option<Fault>,
+}
+
+/// Total request/fault counters.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct TelemetryTotals {
+ /// Total request attempts issued to rust-analyzer.
+ pub request_count: u64,
+ /// Successful request attempts.
+ pub success_count: u64,
+ /// LSP response error attempts.
+ pub response_error_count: u64,
+ /// Transport/protocol fault attempts.
+ pub transport_fault_count: u64,
+ /// Retry attempts performed.
+ pub retry_count: u64,
+}
+
+/// Per-method telemetry aggregate.
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct MethodTelemetrySnapshot {
+ /// LSP method name.
+ pub method: String,
+ /// Total request attempts for this method.
+ pub request_count: u64,
+ /// Successful attempts.
+ pub success_count: u64,
+ /// LSP response error attempts.
+ pub response_error_count: u64,
+ /// Transport/protocol fault attempts.
+ pub transport_fault_count: u64,
+ /// Retry attempts for this method.
+ pub retry_count: u64,
+ /// Last observed attempt latency in milliseconds.
+ pub last_latency_ms: Option<u64>,
+ /// Maximum observed attempt latency in milliseconds.
+ pub max_latency_ms: u64,
+ /// Average attempt latency in milliseconds.
+ pub avg_latency_ms: u64,
+ /// Last error detail for this method, if any.
+ pub last_error: Option<String>,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+enum RequestMethod {
+ Hover,
+ Definition,
+ References,
+ Rename,
+ DocumentDiagnostic,
+ Raw(&'static str),
+}
+
+impl RequestMethod {
+ const fn as_lsp_method(self) -> &'static str {
+ match self {
+ Self::Hover => "textDocument/hover",
+ Self::Definition => "textDocument/definition",
+ Self::References => "textDocument/references",
+ Self::Rename => "textDocument/rename",
+ Self::DocumentDiagnostic => "textDocument/diagnostic",
+ Self::Raw(method) => method,
+ }
+ }
+
+ fn retry_delay(self, payload: &crate::lsp_transport::RpcErrorPayload) -> Option<Duration> {
+ if self.supports_transient_response_retry()
+ && is_transient_response_error(payload.code, payload.message.as_str())
+ {
+ return Some(self.transient_response_retry_delay());
+ }
+ let retryable_method = matches!(
+ self.as_lsp_method(),
+ "textDocument/rename"
+ | "textDocument/prepareRename"
+ | "textDocument/definition"
+ | "textDocument/references"
+ );
+ if !retryable_method
+ || payload.code != -32602
+ || !payload.message.contains("No references found at position")
+ {
+ return None;
+ }
+ match self.as_lsp_method() {
+ "textDocument/rename" | "textDocument/prepareRename" => {
+ Some(Duration::from_millis(1500))
+ }
+ _ => Some(Duration::from_millis(250)),
+ }
+ }
+
+ const fn supports_transient_response_retry(self) -> bool {
+ matches!(
+ self,
+ Self::Hover
+ | Self::Definition
+ | Self::References
+ | Self::Rename
+ | Self::DocumentDiagnostic
+ )
+ }
+
+ fn transient_response_retry_delay(self) -> Duration {
+ match self {
+ Self::DocumentDiagnostic => Duration::from_millis(250),
+ Self::Rename => Duration::from_millis(350),
+ Self::Hover | Self::Definition | Self::References => Duration::from_millis(150),
+ Self::Raw(_) => Duration::from_millis(0),
+ }
+ }
+}
+
+fn is_transient_response_error(code: i64, message: &str) -> bool {
+ let normalized = message.to_ascii_lowercase();
+ code == -32801
+ || code == -32802
+ || normalized.contains("content modified")
+ || normalized.contains("document changed")
+ || normalized.contains("server cancelled")
+ || normalized.contains("request cancelled")
+ || normalized.contains("request canceled")
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct TextDocumentIdentifierWire {
+ uri: String,
+}
+
+#[derive(Debug, Clone, Copy, Serialize)]
+struct PositionWire {
+ line: u32,
+ character: u32,
+}
+
+impl From<SourcePoint> for PositionWire {
+ fn from(value: SourcePoint) -> Self {
+ Self {
+ line: value.line().to_zero_indexed(),
+ character: value.column().to_zero_indexed(),
+ }
+ }
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct TextDocumentPositionParamsWire {
+ #[serde(rename = "textDocument")]
+ text_document: TextDocumentIdentifierWire,
+ position: PositionWire,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct ReferencesContextWire {
+ #[serde(rename = "includeDeclaration")]
+ include_declaration: bool,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct ReferencesParamsWire {
+ #[serde(rename = "textDocument")]
+ text_document: TextDocumentIdentifierWire,
+ position: PositionWire,
+ context: ReferencesContextWire,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct RenameParamsWire {
+ #[serde(rename = "textDocument")]
+ text_document: TextDocumentIdentifierWire,
+ position: PositionWire,
+ #[serde(rename = "newName")]
+ new_name: String,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct DocumentDiagnosticParamsWire {
+ #[serde(rename = "textDocument")]
+ text_document: TextDocumentIdentifierWire,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct VersionedTextDocumentIdentifierWire {
+ uri: String,
+ version: i32,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct TextDocumentContentChangeEventWire {
+ text: String,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct DidChangeTextDocumentParamsWire {
+ #[serde(rename = "textDocument")]
+ text_document: VersionedTextDocumentIdentifierWire,
+ #[serde(rename = "contentChanges")]
+ content_changes: Vec<TextDocumentContentChangeEventWire>,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct TextDocumentItemWire {
+ uri: String,
+ #[serde(rename = "languageId")]
+ language_id: &'static str,
+ version: i32,
+ text: String,
+}
+
+#[derive(Debug, Clone, Serialize)]
+struct DidOpenTextDocumentParamsWire {
+ #[serde(rename = "textDocument")]
+ text_document: TextDocumentItemWire,
+}
+
+/// Resilient engine façade.
+#[derive(Clone)]
+pub struct Engine {
+ supervisor: Arc<Mutex<Supervisor>>,
+}
+
+struct Supervisor {
+ config: EngineConfig,
+ lifecycle: DynamicLifecycle,
+ worker: Option<WorkerHandle>,
+ consecutive_failures: u32,
+ open_documents: HashMap<SourceFilePath, OpenDocumentState>,
+ telemetry: TelemetryState,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+struct OpenDocumentState {
+ version: i32,
+ fingerprint: SourceFileFingerprint,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+struct SourceFileFingerprint {
+ byte_len: u64,
+ modified_nanos_since_epoch: u128,
+}
+
+#[derive(Debug)]
+struct TelemetryState {
+ started_at: Instant,
+ totals: TelemetryTotalsState,
+ methods: HashMap<&'static str, MethodTelemetryState>,
+ restart_count: u64,
+ last_fault: Option<Fault>,
+}
+
+#[derive(Debug, Default)]
+struct TelemetryTotalsState {
+ request_count: u64,
+ success_count: u64,
+ response_error_count: u64,
+ transport_fault_count: u64,
+ retry_count: u64,
+}
+
+#[derive(Debug, Default)]
+struct MethodTelemetryState {
+ request_count: u64,
+ success_count: u64,
+ response_error_count: u64,
+ transport_fault_count: u64,
+ retry_count: u64,
+ total_latency_ms: u128,
+ last_latency_ms: Option<u64>,
+ max_latency_ms: u64,
+ last_error: Option<String>,
+}
+
+impl Engine {
+ /// Creates a new engine.
+ #[must_use]
+ pub fn new(config: EngineConfig) -> Self {
+ Self {
+ supervisor: Arc::new(Mutex::new(Supervisor::new(config))),
+ }
+ }
+
+ /// Returns current lifecycle snapshot.
+ pub async fn lifecycle_snapshot(&self) -> LifecycleSnapshot {
+ let supervisor = self.supervisor.lock().await;
+ supervisor.snapshot()
+ }
+
+ /// Returns aggregate request/fault telemetry snapshot.
+ pub async fn telemetry_snapshot(&self) -> TelemetrySnapshot {
+ let supervisor = self.supervisor.lock().await;
+ supervisor.telemetry_snapshot()
+ }
+
+ /// Executes hover request.
+ pub async fn hover(&self, position: SourcePosition) -> EngineResult<HoverPayload> {
+ let document_hint = Some(position.file_path().clone());
+ let request = text_document_position_params(&position)?;
+ let hover = self
+ .issue_typed_request::<_, Option<Hover>>(RequestMethod::Hover, &request, document_hint)
+ .await?;
+ let payload = hover
+ .map(|hover| -> Result<HoverPayload, EngineError> {
+ let range = hover
+ .range
+ .map(|range| range_to_source_range(position.file_path(), range))
+ .transpose()?;
+ Ok(HoverPayload {
+ rendered: Some(render_hover_contents(hover.contents)),
+ range,
+ })
+ })
+ .transpose()?
+ .unwrap_or(HoverPayload {
+ rendered: None,
+ range: None,
+ });
+ Ok(payload)
+ }
+
+ /// Executes definition request.
+ pub async fn definition(&self, position: SourcePosition) -> EngineResult<Vec<SourceLocation>> {
+ let document_hint = Some(position.file_path().clone());
+ let request = text_document_position_params(&position)?;
+ let parsed = self
+ .issue_typed_request::<_, Option<GotoDefinitionResponse>>(
+ RequestMethod::Definition,
+ &request,
+ document_hint,
+ )
+ .await?;
+ let locations = match parsed {
+ None => Vec::new(),
+ Some(GotoDefinitionResponse::Scalar(location)) => {
+ vec![source_location_from_lsp_location(location)?]
+ }
+ Some(GotoDefinitionResponse::Array(locations)) => locations
+ .into_iter()
+ .map(source_location_from_lsp_location)
+ .collect::<Result<Vec<_>, _>>()?,
+ Some(GotoDefinitionResponse::Link(links)) => links
+ .into_iter()
+ .map(source_location_from_lsp_link)
+ .collect::<Result<Vec<_>, _>>()?,
+ };
+ Ok(locations)
+ }
+
+ /// Executes references request.
+ pub async fn references(&self, position: SourcePosition) -> EngineResult<Vec<SourceLocation>> {
+ let request = ReferencesParamsWire {
+ text_document: text_document_identifier(position.file_path())?,
+ position: PositionWire::from(position.point()),
+ context: ReferencesContextWire {
+ include_declaration: true,
+ },
+ };
+ let parsed = self
+ .issue_typed_request::<_, Option<Vec<Location>>>(
+ RequestMethod::References,
+ &request,
+ Some(position.file_path().clone()),
+ )
+ .await?;
+ parsed
+ .unwrap_or_default()
+ .into_iter()
+ .map(source_location_from_lsp_location)
+ .collect::<Result<Vec<_>, _>>()
+ }
+
+ /// Executes rename request.
+ pub async fn rename_symbol(
+ &self,
+ position: SourcePosition,
+ new_name: String,
+ ) -> EngineResult<RenameReport> {
+ let request = RenameParamsWire {
+ text_document: text_document_identifier(position.file_path())?,
+ position: PositionWire::from(position.point()),
+ new_name,
+ };
+ let edit = self
+ .issue_typed_request::<_, WorkspaceEdit>(
+ RequestMethod::Rename,
+ &request,
+ Some(position.file_path().clone()),
+ )
+ .await?;
+ Ok(summarize_workspace_edit(edit))
+ }
+
+ /// Executes document diagnostics request.
+ pub async fn diagnostics(&self, file_path: SourceFilePath) -> EngineResult<DiagnosticsReport> {
+ let request = DocumentDiagnosticParamsWire {
+ text_document: text_document_identifier(&file_path)?,
+ };
+ let response = self
+ .issue_request(
+ RequestMethod::DocumentDiagnostic,
+ &request,
+ Some(file_path.clone()),
+ )
+ .await?;
+ parse_diagnostics_report(&file_path, response)
+ }
+
+ /// Executes an arbitrary typed LSP request and returns raw JSON payload.
+ pub async fn raw_lsp_request(
+ &self,
+ method: &'static str,
+ params: Value,
+ ) -> EngineResult<Value> {
+ let document_hint = source_file_path_hint_from_request_params(&params)?;
+ self.issue_request(RequestMethod::Raw(method), &params, document_hint)
+ .await
+ }
+
+ async fn issue_typed_request<P, R>(
+ &self,
+ method: RequestMethod,
+ params: &P,
+ document_hint: Option<SourceFilePath>,
+ ) -> EngineResult<R>
+ where
+ P: Serialize,
+ R: DeserializeOwned,
+ {
+ let response = self.issue_request(method, params, document_hint).await?;
+ serde_json::from_value::<R>(response).map_err(|error| EngineError::InvalidPayload {
+ method: method.as_lsp_method(),
+ message: error.to_string(),
+ })
+ }
+
+ async fn issue_request<P>(
+ &self,
+ method: RequestMethod,
+ params: &P,
+ document_hint: Option<SourceFilePath>,
+ ) -> EngineResult<Value>
+ where
+ P: Serialize,
+ {
+ let max_attempts = 2_u8;
+ let mut attempt = 0_u8;
+ while attempt < max_attempts {
+ attempt = attempt.saturating_add(1);
+ let (worker, request_timeout) = {
+ let mut supervisor = self.supervisor.lock().await;
+ let worker = supervisor.ensure_worker().await?;
+ if let Some(file_path) = document_hint.as_ref() {
+ supervisor.synchronize_document(&worker, file_path).await?;
+ }
+ (worker, supervisor.request_timeout())
+ };
+
+ let attempt_started_at = Instant::now();
+ let result = worker
+ .send_request(method.as_lsp_method(), params, request_timeout)
+ .await;
+ let latency = attempt_started_at.elapsed();
+ match result {
+ Ok(value) => {
+ let mut supervisor = self.supervisor.lock().await;
+ supervisor.record_success(method.as_lsp_method(), latency);
+ return Ok(value);
+ }
+ Err(WorkerRequestError::Response(payload)) => {
+ let retry_delay = (attempt < max_attempts)
+ .then(|| method.retry_delay(&payload))
+ .flatten();
+ let should_retry = retry_delay.is_some();
+ {
+ let mut supervisor = self.supervisor.lock().await;
+ supervisor.record_response_error(
+ method.as_lsp_method(),
+ latency,
+ payload.code,
+ format_lsp_response_error_detail(&payload),
+ should_retry,
+ );
+ }
+
+ if let Some(retry_delay) = retry_delay {
+ debug!(
+ attempt,
+ method = method.as_lsp_method(),
+ code = payload.code,
+ delay_ms = retry_delay.as_millis(),
+ "retrying request after transient lsp response error"
+ );
+ sleep(retry_delay).await;
+ continue;
+ }
+ return Err(EngineError::from(payload));
+ }
+ Err(WorkerRequestError::Fault(fault)) => {
+ let directive = fault.directive();
+ let will_retry = matches!(
+ directive,
+ RecoveryDirective::RetryInPlace | RecoveryDirective::RestartAndReplay
+ ) && attempt < max_attempts;
+ {
+ let mut supervisor = self.supervisor.lock().await;
+ supervisor.record_transport_fault(
+ method.as_lsp_method(),
+ latency,
+ fault.detail.message.clone(),
+ will_retry,
+ );
+ }
+
+ match directive {
+ RecoveryDirective::RetryInPlace => {
+ debug!(
+ attempt,
+ method = method.as_lsp_method(),
+ "retrying request in-place after fault"
+ );
+ if attempt >= max_attempts {
+ return Err(EngineError::Fault(fault));
+ }
+ }
+ RecoveryDirective::RestartAndReplay => {
+ let mut supervisor = self.supervisor.lock().await;
+ supervisor.record_fault(fault.clone()).await?;
+ if attempt >= max_attempts {
+ return Err(EngineError::Fault(fault));
+ }
+ debug!(
+ attempt,
+ method = method.as_lsp_method(),
+ "restarting worker and replaying request"
+ );
+ }
+ RecoveryDirective::AbortRequest => {
+ let mut supervisor = self.supervisor.lock().await;
+ supervisor.record_fault(fault.clone()).await?;
+ return Err(EngineError::Fault(fault));
+ }
+ }
+ }
+ }
+ }
+ Err(EngineError::Fault(Fault::new(
+ self.lifecycle_generation().await,
+ ra_mcp_domain::fault::FaultClass::Resource,
+ ra_mcp_domain::fault::FaultCode::RequestTimedOut,
+ ra_mcp_domain::fault::FaultDetail::new(format!(
+ "exhausted retries for method {}",
+ method.as_lsp_method()
+ )),
+ )))
+ }
+
+ async fn lifecycle_generation(&self) -> ra_mcp_domain::types::Generation {
+ let supervisor = self.supervisor.lock().await;
+ supervisor.generation()
+ }
+}
+
+impl TelemetryState {
+ fn new() -> Self {
+ Self {
+ started_at: Instant::now(),
+ totals: TelemetryTotalsState::default(),
+ methods: HashMap::new(),
+ restart_count: 0,
+ last_fault: None,
+ }
+ }
+
+ fn record_success(&mut self, method: &'static str, latency: Duration) {
+ self.totals.request_count = self.totals.request_count.saturating_add(1);
+ self.totals.success_count = self.totals.success_count.saturating_add(1);
+ let entry = self.methods.entry(method).or_default();
+ entry.request_count = entry.request_count.saturating_add(1);
+ entry.success_count = entry.success_count.saturating_add(1);
+ entry.record_latency(latency);
+ entry.last_error = None;
+ }
+
+ fn record_response_error(
+ &mut self,
+ method: &'static str,
+ latency: Duration,
+ detail: String,
+ retry_performed: bool,
+ ) {
+ self.totals.request_count = self.totals.request_count.saturating_add(1);
+ self.totals.response_error_count = self.totals.response_error_count.saturating_add(1);
+ if retry_performed {
+ self.totals.retry_count = self.totals.retry_count.saturating_add(1);
+ }
+
+ let entry = self.methods.entry(method).or_default();
+ entry.request_count = entry.request_count.saturating_add(1);
+ entry.response_error_count = entry.response_error_count.saturating_add(1);
+ if retry_performed {
+ entry.retry_count = entry.retry_count.saturating_add(1);
+ }
+ entry.record_latency(latency);
+ entry.last_error = Some(detail);
+ }
+
+ fn record_transport_fault(
+ &mut self,
+ method: &'static str,
+ latency: Duration,
+ detail: String,
+ retry_performed: bool,
+ ) {
+ self.totals.request_count = self.totals.request_count.saturating_add(1);
+ self.totals.transport_fault_count = self.totals.transport_fault_count.saturating_add(1);
+ if retry_performed {
+ self.totals.retry_count = self.totals.retry_count.saturating_add(1);
+ }
+
+ let entry = self.methods.entry(method).or_default();
+ entry.request_count = entry.request_count.saturating_add(1);
+ entry.transport_fault_count = entry.transport_fault_count.saturating_add(1);
+ if retry_performed {
+ entry.retry_count = entry.retry_count.saturating_add(1);
+ }
+ entry.record_latency(latency);
+ entry.last_error = Some(detail);
+ }
+
+ fn record_restart(&mut self, fault: Fault) {
+ self.restart_count = self.restart_count.saturating_add(1);
+ self.last_fault = Some(fault);
+ }
+
+ fn snapshot(
+ &self,
+ lifecycle: LifecycleSnapshot,
+ consecutive_failures: u32,
+ ) -> TelemetrySnapshot {
+ let mut methods = self
+ .methods
+ .iter()
+ .map(|(method, entry)| MethodTelemetrySnapshot {
+ method: (*method).to_owned(),
+ request_count: entry.request_count,
+ success_count: entry.success_count,
+ response_error_count: entry.response_error_count,
+ transport_fault_count: entry.transport_fault_count,
+ retry_count: entry.retry_count,
+ last_latency_ms: entry.last_latency_ms,
+ max_latency_ms: entry.max_latency_ms,
+ avg_latency_ms: entry.average_latency_ms(),
+ last_error: entry.last_error.clone(),
+ })
+ .collect::<Vec<_>>();
+ methods.sort_by(|left, right| left.method.cmp(&right.method));
+
+ let uptime_ms = duration_millis_u64(self.started_at.elapsed());
+ TelemetrySnapshot {
+ uptime_ms,
+ lifecycle,
+ consecutive_failures,
+ restart_count: self.restart_count,
+ totals: TelemetryTotals {
+ request_count: self.totals.request_count,
+ success_count: self.totals.success_count,
+ response_error_count: self.totals.response_error_count,
+ transport_fault_count: self.totals.transport_fault_count,
+ retry_count: self.totals.retry_count,
+ },
+ methods,
+ last_fault: self.last_fault.clone(),
+ }
+ }
+}
+
+impl MethodTelemetryState {
+ fn record_latency(&mut self, latency: Duration) {
+ let latency_ms = duration_millis_u64(latency);
+ self.last_latency_ms = Some(latency_ms);
+ self.max_latency_ms = self.max_latency_ms.max(latency_ms);
+ self.total_latency_ms = self.total_latency_ms.saturating_add(latency_ms as u128);
+ }
+
+ fn average_latency_ms(&self) -> u64 {
+ if self.request_count == 0 {
+ return 0;
+ }
+ let avg = self.total_latency_ms / u128::from(self.request_count);
+ if avg > u128::from(u64::MAX) {
+ u64::MAX
+ } else {
+ avg as u64
+ }
+ }
+}
+
+fn duration_millis_u64(duration: Duration) -> u64 {
+ let millis = duration.as_millis();
+ if millis > u128::from(u64::MAX) {
+ u64::MAX
+ } else {
+ millis as u64
+ }
+}
+
+impl Supervisor {
+ fn new(config: EngineConfig) -> Self {
+ Self {
+ config,
+ lifecycle: DynamicLifecycle::cold(),
+ worker: None,
+ consecutive_failures: 0,
+ open_documents: HashMap::new(),
+ telemetry: TelemetryState::new(),
+ }
+ }
+
+ fn request_timeout(&self) -> Duration {
+ self.config.request_timeout
+ }
+
+ async fn synchronize_document(
+ &mut self,
+ worker: &WorkerHandle,
+ file_path: &SourceFilePath,
+ ) -> EngineResult<()> {
+ let fingerprint = capture_source_file_fingerprint(file_path)?;
+ if let Some(existing) = self.open_documents.get_mut(file_path) {
+ if existing.fingerprint == fingerprint {
+ return Ok(());
+ }
+ let text = fs::read_to_string(file_path.as_path())?;
+ let next_version = existing.version.saturating_add(1);
+ let params = DidChangeTextDocumentParamsWire {
+ text_document: VersionedTextDocumentIdentifierWire {
+ uri: file_uri_string_from_source_path(file_path)?,
+ version: next_version,
+ },
+ content_changes: vec![TextDocumentContentChangeEventWire { text }],
+ };
+ worker
+ .send_notification("textDocument/didChange", &params)
+ .await
+ .map_err(EngineError::from)?;
+ existing.version = next_version;
+ existing.fingerprint = fingerprint;
+ return Ok(());
+ }
+
+ let text = fs::read_to_string(file_path.as_path())?;
+ let params = DidOpenTextDocumentParamsWire {
+ text_document: TextDocumentItemWire {
+ uri: file_uri_string_from_source_path(file_path)?,
+ language_id: "rust",
+ version: 1,
+ text,
+ },
+ };
+ worker
+ .send_notification("textDocument/didOpen", &params)
+ .await
+ .map_err(EngineError::from)?;
+ let _previous = self.open_documents.insert(
+ file_path.clone(),
+ OpenDocumentState {
+ version: 1,
+ fingerprint,
+ },
+ );
+ Ok(())
+ }
+
+ fn snapshot(&self) -> LifecycleSnapshot {
+ self.lifecycle.snapshot()
+ }
+
+ fn telemetry_snapshot(&self) -> TelemetrySnapshot {
+ let lifecycle = self.snapshot();
+ self.telemetry
+ .snapshot(lifecycle, self.consecutive_failures)
+ }
+
+ fn generation(&self) -> ra_mcp_domain::types::Generation {
+ let snapshot = self.snapshot();
+ match snapshot {
+ LifecycleSnapshot::Cold { generation }
+ | LifecycleSnapshot::Starting { generation }
+ | LifecycleSnapshot::Ready { generation }
+ | LifecycleSnapshot::Recovering { generation, .. } => generation,
+ }
+ }
+
+ async fn ensure_worker(&mut self) -> EngineResult<WorkerHandle> {
+ if let Some(worker) = self.worker.clone() {
+ if let Some(fault) = worker.terminal_fault() {
+ warn!(
+ generation = fault.generation.get(),
+ "worker marked terminal, recycling"
+ );
+ self.record_fault(fault).await?;
+ } else {
+ return Ok(worker);
+ }
+ }
+ self.spawn_worker().await
+ }
+
+ async fn spawn_worker(&mut self) -> EngineResult<WorkerHandle> {
+ self.lifecycle = self.lifecycle.clone().begin_startup()?;
+ let generation = self.generation();
+ let started = spawn_worker(&self.config, generation).await;
+ match started {
+ Ok(worker) => {
+ self.lifecycle = self.lifecycle.clone().complete_startup()?;
+ self.worker = Some(worker.clone());
+ self.consecutive_failures = 0;
+ self.open_documents.clear();
+ Ok(worker)
+ }
+ Err(fault) => {
+ self.record_fault(fault.clone()).await?;
+ Err(EngineError::Fault(fault))
+ }
+ }
+ }
+
+ async fn record_fault(&mut self, fault: Fault) -> EngineResult<()> {
+ self.lifecycle = fracture_or_force_recovery(self.lifecycle.clone(), fault.clone())?;
+ self.consecutive_failures = self.consecutive_failures.saturating_add(1);
+ self.telemetry.record_restart(fault.clone());
+
+ if let Some(worker) = self.worker.take() {
+ worker.terminate().await;
+ }
+ self.open_documents.clear();
+
+ let delay = self.next_backoff_delay();
+ debug!(
+ failures = self.consecutive_failures,
+ delay_ms = delay.as_millis(),
+ "applying restart backoff delay"
+ );
+ sleep(delay).await;
+ Ok(())
+ }
+
+ fn record_success(&mut self, method: &'static str, latency: Duration) {
+ self.consecutive_failures = 0;
+ self.telemetry.record_success(method, latency);
+ }
+
+ fn record_response_error(
+ &mut self,
+ method: &'static str,
+ latency: Duration,
+ code: i64,
+ message: String,
+ retry_performed: bool,
+ ) {
+ let detail = format!("code={code} message={message}");
+ self.telemetry
+ .record_response_error(method, latency, detail, retry_performed);
+ }
+
+ fn record_transport_fault(
+ &mut self,
+ method: &'static str,
+ latency: Duration,
+ detail: String,
+ retry_performed: bool,
+ ) {
+ self.telemetry
+ .record_transport_fault(method, latency, detail, retry_performed);
+ }
+
+ fn next_backoff_delay(&self) -> Duration {
+ let exponent = self.consecutive_failures.saturating_sub(1);
+ let multiplier = if exponent >= 31 {
+ u32::MAX
+ } else {
+ 1_u32 << exponent
+ };
+ let scaled = self.config.backoff_policy.floor.saturating_mul(multiplier);
+ min(scaled, self.config.backoff_policy.ceiling)
+ }
+}
+
+fn fracture_or_force_recovery(
+ lifecycle: DynamicLifecycle,
+ fault: Fault,
+) -> EngineResult<DynamicLifecycle> {
+ match lifecycle.clone().fracture(fault.clone()) {
+ Ok(next) => Ok(next),
+ Err(_error) => {
+ let started = lifecycle.begin_startup()?;
+ started.fracture(fault).map_err(EngineError::from)
+ }
+ }
+}
+
+fn text_document_identifier(
+ file_path: &SourceFilePath,
+) -> EngineResult<TextDocumentIdentifierWire> {
+ Ok(TextDocumentIdentifierWire {
+ uri: file_uri_string_from_source_path(file_path)?,
+ })
+}
+
+fn text_document_position_params(
+ position: &SourcePosition,
+) -> EngineResult<TextDocumentPositionParamsWire> {
+ Ok(TextDocumentPositionParamsWire {
+ text_document: text_document_identifier(position.file_path())?,
+ position: PositionWire::from(position.point()),
+ })
+}
+
+fn format_lsp_response_error_detail(payload: &crate::lsp_transport::RpcErrorPayload) -> String {
+ let crate::lsp_transport::RpcErrorPayload {
+ code,
+ message,
+ data,
+ } = payload;
+ match data {
+ Some(data) => format!("code={code} message={message} data={data}"),
+ None => format!("code={code} message={message}"),
+ }
+}
+
+fn file_uri_string_from_source_path(file_path: &SourceFilePath) -> EngineResult<String> {
+ let file_url =
+ Url::from_file_path(file_path.as_path()).map_err(|()| EngineError::InvalidFileUrl)?;
+ Ok(file_url.to_string())
+}
+
+fn source_file_path_hint_from_request_params(
+ params: &Value,
+) -> EngineResult<Option<SourceFilePath>> {
+ let maybe_uri = params
+ .get("textDocument")
+ .and_then(Value::as_object)
+ .and_then(|document| document.get("uri"))
+ .and_then(Value::as_str);
+ let Some(uri) = maybe_uri else {
+ return Ok(None);
+ };
+ let file_path = source_file_path_from_file_uri_str(uri)?;
+ Ok(Some(file_path))
+}
+
+fn source_file_path_from_file_uri_str(uri: &str) -> EngineResult<SourceFilePath> {
+ let file_url = Url::parse(uri).map_err(|_error| EngineError::InvalidFileUrl)?;
+ let file_path = file_url
+ .to_file_path()
+ .map_err(|()| EngineError::InvalidFileUrl)?;
+ SourceFilePath::try_new(file_path).map_err(EngineError::from)
+}
+
+fn capture_source_file_fingerprint(
+ file_path: &SourceFilePath,
+) -> EngineResult<SourceFileFingerprint> {
+ let metadata = fs::metadata(file_path.as_path())?;
+ let modified = metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH);
+ let modified_nanos_since_epoch = modified
+ .duration_since(SystemTime::UNIX_EPOCH)
+ .unwrap_or(Duration::ZERO)
+ .as_nanos();
+ Ok(SourceFileFingerprint {
+ byte_len: metadata.len(),
+ modified_nanos_since_epoch,
+ })
+}
+
+fn source_location_from_lsp_link(link: LocationLink) -> EngineResult<SourceLocation> {
+ let uri = link.target_uri;
+ let range = link.target_selection_range;
+ source_location_from_uri_and_position(uri, range.start)
+}
+
+fn source_location_from_lsp_location(location: Location) -> EngineResult<SourceLocation> {
+ source_location_from_uri_and_position(location.uri, location.range.start)
+}
+
+fn source_location_from_uri_and_position(
+ uri: Uri,
+ position: Position,
+) -> EngineResult<SourceLocation> {
+ let file_url = Url::parse(uri.as_str()).map_err(|_error| EngineError::InvalidFileUrl)?;
+ let path = file_url
+ .to_file_path()
+ .map_err(|()| EngineError::InvalidFileUrl)?;
+ let file_path = SourceFilePath::try_new(path)?;
+ let point = SourcePoint::new(
+ OneIndexedLine::try_new(u64::from(position.line).saturating_add(1))?,
+ OneIndexedColumn::try_new(u64::from(position.character).saturating_add(1))?,
+ );
+ Ok(SourceLocation::new(file_path, point))
+}
+
+fn range_to_source_range(
+ file_path: &SourceFilePath,
+ range: Range,
+) -> Result<SourceRange, InvariantViolation> {
+ let start = SourcePoint::new(
+ OneIndexedLine::try_new(u64::from(range.start.line).saturating_add(1))?,
+ OneIndexedColumn::try_new(u64::from(range.start.character).saturating_add(1))?,
+ );
+ let end = SourcePoint::new(
+ OneIndexedLine::try_new(u64::from(range.end.line).saturating_add(1))?,
+ OneIndexedColumn::try_new(u64::from(range.end.character).saturating_add(1))?,
+ );
+ SourceRange::try_new(file_path.clone(), start, end)
+}
+
+fn render_hover_contents(contents: HoverContents) -> String {
+ match contents {
+ HoverContents::Scalar(marked_string) => marked_string_to_string(marked_string),
+ HoverContents::Array(items) => items
+ .into_iter()
+ .map(marked_string_to_string)
+ .collect::<Vec<_>>()
+ .join("\n"),
+ HoverContents::Markup(markup) => markup.value,
+ }
+}
+
+fn marked_string_to_string(marked: MarkedString) -> String {
+ match marked {
+ MarkedString::String(value) => value,
+ MarkedString::LanguageString(language_string) => {
+ format!(
+ "```{}\n{}\n```",
+ language_string.language, language_string.value
+ )
+ }
+ }
+}
+
+fn summarize_workspace_edit(edit: WorkspaceEdit) -> RenameReport {
+ let mut touched = HashMap::<String, u64>::new();
+ let mut edits_applied = 0_u64;
+
+ if let Some(changes) = edit.changes {
+ for (uri, edits) in changes {
+ let edit_count = u64::try_from(edits.len()).unwrap_or(u64::MAX);
+ let _previous = touched.insert(uri.as_str().to_owned(), edit_count);
+ edits_applied = edits_applied.saturating_add(edit_count);
+ }
+ }
+
+ if let Some(document_changes) = edit.document_changes {
+ match document_changes {
+ lsp_types::DocumentChanges::Edits(edits) => {
+ for document_edit in edits {
+ let uri = document_edit.text_document.uri;
+ let edit_count = u64::try_from(document_edit.edits.len()).unwrap_or(u64::MAX);
+ let _entry = touched
+ .entry(uri.as_str().to_owned())
+ .and_modify(|count| *count = count.saturating_add(edit_count))
+ .or_insert(edit_count);
+ edits_applied = edits_applied.saturating_add(edit_count);
+ }
+ }
+ lsp_types::DocumentChanges::Operations(operations) => {
+ edits_applied = edits_applied
+ .saturating_add(u64::try_from(operations.len()).unwrap_or(u64::MAX));
+ for operation in operations {
+ match operation {
+ lsp_types::DocumentChangeOperation::Op(operation) => match operation {
+ lsp_types::ResourceOp::Create(create) => {
+ let _entry =
+ touched.entry(create.uri.as_str().to_owned()).or_insert(0);
+ }
+ lsp_types::ResourceOp::Rename(rename) => {
+ let _entry = touched
+ .entry(rename.new_uri.as_str().to_owned())
+ .or_insert(0);
+ }
+ lsp_types::ResourceOp::Delete(delete) => {
+ let _entry =
+ touched.entry(delete.uri.as_str().to_owned()).or_insert(0);
+ }
+ },
+ lsp_types::DocumentChangeOperation::Edit(edit) => {
+ let edit_count = u64::try_from(edit.edits.len()).unwrap_or(u64::MAX);
+ let _entry = touched
+ .entry(edit.text_document.uri.as_str().to_owned())
+ .and_modify(|count| *count = count.saturating_add(edit_count))
+ .or_insert(edit_count);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ RenameReport {
+ files_touched: u64::try_from(touched.len()).unwrap_or(u64::MAX),
+ edits_applied,
+ }
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(tag = "kind", rename_all = "lowercase")]
+enum DiagnosticReportWire {
+ Full { items: Vec<DiagnosticWire> },
+ Unchanged {},
+}
+
+#[derive(Debug, Deserialize)]
+struct DiagnosticWire {
+ range: Range,
+ severity: Option<DiagnosticSeverity>,
+ code: Option<Value>,
+ message: String,
+}
+
+fn parse_diagnostics_report(
+ file_path: &SourceFilePath,
+ value: Value,
+) -> EngineResult<DiagnosticsReport> {
+ let parsed = serde_json::from_value::<DiagnosticReportWire>(value).map_err(|error| {
+ EngineError::InvalidPayload {
+ method: "textDocument/diagnostic",
+ message: error.to_string(),
+ }
+ })?;
+ match parsed {
+ DiagnosticReportWire::Unchanged {} => Ok(DiagnosticsReport {
+ diagnostics: Vec::new(),
+ }),
+ DiagnosticReportWire::Full { items } => {
+ let diagnostics = items
+ .into_iter()
+ .map(|item| {
+ let range = range_to_source_range(file_path, item.range)?;
+ let level = match item.severity.unwrap_or(DiagnosticSeverity::INFORMATION) {
+ DiagnosticSeverity::ERROR => DiagnosticLevel::Error,
+ DiagnosticSeverity::WARNING => DiagnosticLevel::Warning,
+ DiagnosticSeverity::INFORMATION => DiagnosticLevel::Information,
+ DiagnosticSeverity::HINT => DiagnosticLevel::Hint,
+ _ => DiagnosticLevel::Information,
+ };
+ let code = item.code.map(|value| match value {
+ Value::String(message) => message,
+ Value::Number(number) => number.to_string(),
+ other => other.to_string(),
+ });
+ Ok(DiagnosticEntry {
+ range,
+ level,
+ code,
+ message: item.message,
+ })
+ })
+ .collect::<Result<Vec<_>, InvariantViolation>>()?;
+ Ok(DiagnosticsReport { diagnostics })
+ }
+ }
+}
diff --git a/crates/ra-mcp-engine/tests/engine_recovery.rs b/crates/ra-mcp-engine/tests/engine_recovery.rs
new file mode 100644
index 0000000..a7f2db8
--- /dev/null
+++ b/crates/ra-mcp-engine/tests/engine_recovery.rs
@@ -0,0 +1,353 @@
+//! Integration tests for engine restart and transport recovery.
+
+use lsp_types as _;
+use ra_mcp_domain::{
+ lifecycle::LifecycleSnapshot,
+ types::{
+ OneIndexedColumn, OneIndexedLine, SourceFilePath, SourcePoint, SourcePosition,
+ WorkspaceRoot,
+ },
+};
+use ra_mcp_engine::{BackoffPolicy, Engine, EngineConfig, EngineError};
+use serde as _;
+use serde_json::{self, json};
+use serial_test::serial;
+use std::{error::Error, fs, path::PathBuf, time::Duration};
+use tempfile::TempDir;
+use thiserror as _;
+use tracing as _;
+use url as _;
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn stable_fake_server_handles_core_requests() -> Result<(), Box<dyn Error>> {
+ let fixture = make_fixture()?;
+ let config = make_engine_config(&fixture, vec!["--mode".into(), "stable".into()])?;
+ let engine = Engine::new(config);
+ let position = fixture.position()?;
+
+ let hover = engine.hover(position.clone()).await?;
+ assert_eq!(hover.rendered.as_deref(), Some("hover::ok"));
+
+ let definitions = engine.definition(position.clone()).await?;
+ assert_eq!(definitions.len(), 1);
+ assert_eq!(definitions[0].line().get(), 3);
+ assert_eq!(definitions[0].column().get(), 4);
+
+ let references = engine.references(position.clone()).await?;
+ assert_eq!(references.len(), 1);
+
+ let rename = engine
+ .rename_symbol(position.clone(), "renamed".to_owned())
+ .await?;
+ assert!(rename.files_touched >= 1);
+ assert!(rename.edits_applied >= 1);
+
+ let diagnostics = engine.diagnostics(fixture.source_file_path()?).await?;
+ assert_eq!(diagnostics.diagnostics.len(), 1);
+
+ let snapshot = engine.lifecycle_snapshot().await;
+ assert!(matches!(snapshot, LifecycleSnapshot::Ready { .. }));
+
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn stable_fake_server_reports_success_telemetry() -> Result<(), Box<dyn Error>> {
+ let fixture = make_fixture()?;
+ let config = make_engine_config(&fixture, vec!["--mode".into(), "stable".into()])?;
+ let engine = Engine::new(config);
+ let position = fixture.position()?;
+
+ let _hover = engine.hover(position.clone()).await?;
+ let _definition = engine.definition(position.clone()).await?;
+ let _references = engine.references(position.clone()).await?;
+ let _diagnostics = engine.diagnostics(fixture.source_file_path()?).await?;
+
+ let telemetry = engine.telemetry_snapshot().await;
+ assert_eq!(telemetry.totals.request_count, 4);
+ assert_eq!(telemetry.totals.success_count, 4);
+ assert_eq!(telemetry.totals.response_error_count, 0);
+ assert_eq!(telemetry.totals.transport_fault_count, 0);
+ assert_eq!(telemetry.totals.retry_count, 0);
+ assert_eq!(telemetry.restart_count, 0);
+ assert!(telemetry.last_fault.is_none());
+ assert_eq!(telemetry.consecutive_failures, 0);
+
+ assert_method_counts(
+ telemetry.methods.as_slice(),
+ "textDocument/hover",
+ MethodExpectation::new(1, 1, 0, 0, 0),
+ );
+ assert_method_counts(
+ telemetry.methods.as_slice(),
+ "textDocument/definition",
+ MethodExpectation::new(1, 1, 0, 0, 0),
+ );
+ assert_method_counts(
+ telemetry.methods.as_slice(),
+ "textDocument/references",
+ MethodExpectation::new(1, 1, 0, 0, 0),
+ );
+ assert_method_counts(
+ telemetry.methods.as_slice(),
+ "textDocument/diagnostic",
+ MethodExpectation::new(1, 1, 0, 0, 0),
+ );
+
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn diagnostics_retry_server_cancelled_response() -> Result<(), Box<dyn Error>> {
+ let fixture = make_fixture()?;
+ let config = make_engine_config(
+ &fixture,
+ vec![
+ "--mode".into(),
+ "stable".into(),
+ "--diagnostic-cancel-count".into(),
+ "1".into(),
+ ],
+ )?;
+ let engine = Engine::new(config);
+
+ let diagnostics = engine.diagnostics(fixture.source_file_path()?).await?;
+ assert_eq!(diagnostics.diagnostics.len(), 1);
+
+ let telemetry = engine.telemetry_snapshot().await;
+ assert_eq!(telemetry.totals.request_count, 2);
+ assert_eq!(telemetry.totals.success_count, 1);
+ assert_eq!(telemetry.totals.response_error_count, 1);
+ assert_eq!(telemetry.totals.transport_fault_count, 0);
+ assert_eq!(telemetry.totals.retry_count, 1);
+ assert_eq!(telemetry.restart_count, 0);
+ assert_eq!(telemetry.consecutive_failures, 0);
+ assert_method_counts(
+ telemetry.methods.as_slice(),
+ "textDocument/diagnostic",
+ MethodExpectation::new(2, 1, 1, 0, 1),
+ );
+
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn engine_recovers_after_first_hover_crash() -> Result<(), Box<dyn Error>> {
+ let fixture = make_fixture()?;
+ let marker = fixture.path().join("crash-marker");
+ let args = vec![
+ "--mode".into(),
+ "crash_on_first_hover".into(),
+ "--crash-marker".into(),
+ marker.display().to_string(),
+ ];
+ let config = make_engine_config(&fixture, args)?;
+ let engine = Engine::new(config);
+
+ let hover = engine.hover(fixture.position()?).await?;
+ assert_eq!(hover.rendered.as_deref(), Some("hover::ok"));
+ assert!(marker.exists());
+
+ let snapshot = engine.lifecycle_snapshot().await;
+ let generation = if let LifecycleSnapshot::Ready { generation } = snapshot {
+ generation.get()
+ } else {
+ return Err("expected ready lifecycle snapshot after successful recovery".into());
+ };
+ assert!(generation >= 2);
+
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn crash_recovery_records_transport_fault_retry_and_restart() -> Result<(), Box<dyn Error>> {
+ let fixture = make_fixture()?;
+ let marker = fixture.path().join("crash-marker");
+ let args = vec![
+ "--mode".into(),
+ "crash_on_first_hover".into(),
+ "--crash-marker".into(),
+ marker.display().to_string(),
+ ];
+ let config = make_engine_config(&fixture, args)?;
+ let engine = Engine::new(config);
+
+ let hover = engine.hover(fixture.position()?).await?;
+ assert_eq!(hover.rendered.as_deref(), Some("hover::ok"));
+
+ let telemetry = engine.telemetry_snapshot().await;
+ assert_eq!(telemetry.totals.request_count, 2);
+ assert_eq!(telemetry.totals.success_count, 1);
+ assert_eq!(telemetry.totals.response_error_count, 0);
+ assert_eq!(telemetry.totals.transport_fault_count, 1);
+ assert_eq!(telemetry.totals.retry_count, 1);
+ assert_eq!(telemetry.restart_count, 1);
+ assert_eq!(telemetry.consecutive_failures, 0);
+ assert!(telemetry.last_fault.is_some());
+ assert_method_counts(
+ telemetry.methods.as_slice(),
+ "textDocument/hover",
+ MethodExpectation::new(2, 1, 0, 1, 1),
+ );
+
+ Ok(())
+}
+
+#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+#[serial]
+async fn response_error_requests_are_telemetered() -> Result<(), Box<dyn Error>> {
+ let fixture = make_fixture()?;
+ let config = make_engine_config(&fixture, vec!["--mode".into(), "stable".into()])?;
+ let engine = Engine::new(config);
+
+ let invalid = engine
+ .raw_lsp_request("textDocument/notReal", json!({}))
+ .await;
+ match invalid {
+ Err(EngineError::LspResponse { .. }) => {}
+ other => return Err(format!("expected LSP response error, got {other:?}").into()),
+ }
+
+ let telemetry = engine.telemetry_snapshot().await;
+ assert_eq!(telemetry.totals.request_count, 1);
+ assert_eq!(telemetry.totals.success_count, 0);
+ assert_eq!(telemetry.totals.response_error_count, 1);
+ assert_eq!(telemetry.totals.transport_fault_count, 0);
+ assert_eq!(telemetry.totals.retry_count, 0);
+ assert_eq!(telemetry.restart_count, 0);
+ assert_method_counts(
+ telemetry.methods.as_slice(),
+ "textDocument/notReal",
+ MethodExpectation::new(1, 0, 1, 0, 0),
+ );
+
+ Ok(())
+}
+
+#[derive(Debug, Clone, Copy)]
+struct MethodExpectation {
+ request_count: u64,
+ success_count: u64,
+ response_error_count: u64,
+ transport_fault_count: u64,
+ retry_count: u64,
+}
+
+impl MethodExpectation {
+ const fn new(
+ request_count: u64,
+ success_count: u64,
+ response_error_count: u64,
+ transport_fault_count: u64,
+ retry_count: u64,
+ ) -> Self {
+ Self {
+ request_count,
+ success_count,
+ response_error_count,
+ transport_fault_count,
+ retry_count,
+ }
+ }
+}
+
+fn assert_method_counts(
+ methods: &[ra_mcp_engine::MethodTelemetrySnapshot],
+ method: &str,
+ expected: MethodExpectation,
+) {
+ let maybe_entry = methods.iter().find(|entry| entry.method == method);
+ assert!(
+ maybe_entry.is_some(),
+ "expected telemetry entry for method `{method}`",
+ );
+ let entry = if let Some(value) = maybe_entry {
+ value
+ } else {
+ return;
+ };
+ assert_eq!(entry.request_count, expected.request_count);
+ assert_eq!(entry.success_count, expected.success_count);
+ assert_eq!(entry.response_error_count, expected.response_error_count);
+ assert_eq!(entry.transport_fault_count, expected.transport_fault_count);
+ assert_eq!(entry.retry_count, expected.retry_count);
+}
+
+struct Fixture {
+ temp_dir: TempDir,
+}
+
+impl Fixture {
+ fn path(&self) -> &std::path::Path {
+ self.temp_dir.path()
+ }
+
+ fn source_file_path(&self) -> Result<SourceFilePath, Box<dyn Error>> {
+ let path = self.path().join("src").join("lib.rs");
+ SourceFilePath::try_new(path).map_err(|error| error.to_string().into())
+ }
+
+ fn position(&self) -> Result<SourcePosition, Box<dyn Error>> {
+ let line = OneIndexedLine::try_new(1).map_err(|error| error.to_string())?;
+ let column = OneIndexedColumn::try_new(1).map_err(|error| error.to_string())?;
+ Ok(SourcePosition::new(
+ self.source_file_path()?,
+ SourcePoint::new(line, column),
+ ))
+ }
+}
+
+fn make_fixture() -> Result<Fixture, Box<dyn Error>> {
+ let temp_dir = tempfile::tempdir()?;
+ let src_dir = temp_dir.path().join("src");
+ fs::create_dir_all(&src_dir)?;
+ fs::write(
+ temp_dir.path().join("Cargo.toml"),
+ "[package]\nname = \"fixture\"\nversion = \"0.0.0\"\nedition = \"2024\"\n",
+ )?;
+ fs::write(src_dir.join("lib.rs"), "pub fn touch() -> i32 { 1 }\n")?;
+ Ok(Fixture { temp_dir })
+}
+
+fn make_engine_config(
+ fixture: &Fixture,
+ args: Vec<String>,
+) -> Result<EngineConfig, Box<dyn Error>> {
+ let workspace_root =
+ WorkspaceRoot::try_new(fixture.path().to_path_buf()).map_err(|error| error.to_string())?;
+ let binary = fake_rust_analyzer_binary()?;
+ let backoff = BackoffPolicy::try_new(Duration::from_millis(5), Duration::from_millis(20))
+ .map_err(|error| error.to_string())?;
+ EngineConfig::try_new(
+ workspace_root,
+ binary,
+ args,
+ Vec::new(),
+ Duration::from_secs(2),
+ Duration::from_secs(2),
+ backoff,
+ )
+ .map_err(|error| error.to_string().into())
+}
+
+fn fake_rust_analyzer_binary() -> Result<PathBuf, Box<dyn Error>> {
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake-rust-analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ if let Ok(path) = std::env::var("CARGO_BIN_EXE_fake_rust_analyzer") {
+ return Ok(PathBuf::from(path));
+ }
+ let current = std::env::current_exe()?;
+ let deps_dir = current
+ .parent()
+ .ok_or_else(|| "failed to resolve test binary parent".to_owned())?;
+ let debug_dir = deps_dir
+ .parent()
+ .ok_or_else(|| "failed to resolve target debug directory".to_owned())?;
+ Ok(debug_dir.join("fake-rust-analyzer"))
+}