Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion application/apps/indexer/mcp/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,26 @@ version = "0.1.0"
edition = "2024"

[lints]
workspace = true
workspace = true

[dependencies]
serde.workspace = true
anyhow.workspace = true
axum = { version = "0.7", features = ["macros"] }
rmcp = { version = "0.11", features = [
"client",
"macros",
"reqwest",
"schemars",
"server",
"transport-io",
"transport-streamable-http-client-reqwest",
"transport-streamable-http-server",
"transport-streamable-http-server-session",
] }
schemars = "1.1"
tokio.workspace = true
tokio-util.workspace = true
log.workspace = true
reqwest = "0.12.25"
thiserror.workspace = true
59 changes: 59 additions & 0 deletions application/apps/indexer/mcp/src/client/conversation.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
// Simple model for storing the state of a chat conversation inside the MCP client

use rmcp::{
model::Content,
serde_json::{Map, Value},
};

#[derive(Debug)]
pub enum ChatMessage {
ClientToLlm(ClientToLlm),
LlmToClient(LlmToClient),
}

#[derive(Debug)]
pub enum ClientToLlm {
SystemPrompt { content: String },
UserPrompt { content: String },
ToolResult { content: Vec<Content> },
}

#[derive(Debug)]
pub enum LlmToClient {
ToolCall {
tool_name: String,
arguments: Option<Map<String, Value>>,
},
System {
message: String,
},
FinalResponse {
content: String,
},
}
pub struct Conversation {
chat_messages: Vec<ChatMessage>,
// TODO:[MCP] keep track of steps?
// TODO:[MCP] conversation ID=?
}

impl Conversation {
/// Create a new conversation with an initial system prompt
/// # Arguments
/// * `system_prompt`: The system prompt to initialize the conversation with
pub fn new(system_prompt: String) -> Self {
Self {
chat_messages: vec![ChatMessage::ClientToLlm(ClientToLlm::SystemPrompt {
content: system_prompt,
})],
}
}

pub fn chat_messages(&self) -> &[ChatMessage] {
&self.chat_messages
}

pub fn add_chat_message(&mut self, message: ChatMessage) {
self.chat_messages.push(message);
}
}
81 changes: 81 additions & 0 deletions application/apps/indexer/mcp/src/client/llm/mock.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
// A mock LLM client for testing purposes
// It simulates LLM behavior without making actual API calls / without a HTTP client
// The logic for processing messages is as follows:
// - User prompts received from the chipmunk core will cause the mock LLM to emit a ToolCall message:
// LlmToClient::ToolCall ("apply_search_filter") with the prompt content as filters
// - ToolResult messages will be answered with a FinalResponse message containing the tool result

use log::warn;
use rmcp::serde_json::{self, json};
use tokio::time::{Duration, sleep};

use crate::client::conversation::LlmToClient;
use crate::{
client::{
conversation::{ChatMessage, ClientToLlm, Conversation},
llm::LlmClient,
},
types::{McpError, SearchFilter},
};
pub struct MockLlmClient;

// Abstraction of LLM clients using the LlmClient trait
impl LlmClient for MockLlmClient {
/// Process a conversation by taking appropriate action based on the last message and return a LLM response.
async fn respond(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
MockLlmClient::respond(self, conversation).await
}
}

impl MockLlmClient {
/// Process a conversation take appropriate action based on the last message in the conversation
/// and return a LLM response.
/// For the mock client, we have hardcoded logic to simulate LLM behaviour
pub async fn respond(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
warn!(
"🟢 Mock LLM client processing message: {:?}",
conversation.chat_messages().last()
);
match conversation.chat_messages().last() {
Some(ChatMessage::ClientToLlm(message)) => match message {
ClientToLlm::SystemPrompt { .. } => Err(McpError::Generic {
message: "Mock LLM client received a system prompt; nothing to do".into(),
}),
ClientToLlm::UserPrompt { content } => {
// Simulate LLM reasoning duration
warn!("⏰ Mock LLM client waits 5s ...");
sleep(Duration::from_secs(5)).await;

let filters = vec![SearchFilter {
value: content.clone(),
is_regex: false,
ignore_case: true,
is_word: false,
}];

let arguments: Option<serde_json::Map<String, serde_json::Value>> =
json!({ "filters": filters }).as_object().cloned();

// Return a ToolCall
Ok(LlmToClient::ToolCall {
tool_name: "apply_search_filter".into(),
arguments,
})
}
ClientToLlm::ToolResult { content } => {
// Simulate LLM reasoning duration
warn!("⏰ Mock LLM client waits 5s ...");
sleep(Duration::from_secs(5)).await;

// Return a FinalResponse
Ok(LlmToClient::FinalResponse {
content: format!("Final LLM Response: Tool result {:?}", content),
})
}
},
_ => Err(McpError::Generic {
message: "Mock LLM client received unsupported request".into(),
}),
}
}
}
61 changes: 61 additions & 0 deletions application/apps/indexer/mcp/src/client/llm/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
// LLM client abstraction layer
use crate::{
client::conversation::{Conversation, LlmToClient},
types::McpError,
};

pub mod mock;

/// Configuration used for creation of different LLM client
#[derive(Debug, Clone)]
pub enum LlmConfig {
Mock,
// Other LLM configurations can be added here. They may need other parameters like:
// - API keys
// - Model names
// - temperature settings
// - feature flags, ect.
// // E.g.:
// OpenAi { api_key: String, model: String },
}

// LLM client trait that LLM clients must implement
// Note: this causes the following warning:
// use of `async fn` in public traits is discouraged as auto trait bounds cannot be specified
// note: you can suppress this lint if you plan to use the trait only in your own code, or do not care about auto traits like `Send` on the `Future`
// note: `#[warn(async_fn_in_trait)]` on by default
// note: `#[warn(async_fn_in_trait)]` on by default
// We suppress the warning for now as all LLM clients are internal to the MCP client module.
// Alternatively we would need to use the async-trait crate.
#[allow(async_fn_in_trait)]
pub trait LlmClient {
/// Process a conversation by taking appropriate action based on the last message and return a LLM response.
async fn respond(&self, conversation: &Conversation) -> Result<LlmToClient, McpError>;
}

// LLM client abstraction wrapper providing a facade for different LLM client implementations
pub struct Llm<C: LlmClient> {
client: C,
}

// LLM client abstraction wrapper implementation
impl<C: LlmClient> Llm<C> {
pub fn new(client: C) -> Self {
Self { client }
}

/// Forward processing of a conversation to the underlying LLM client implementation
pub async fn process(&self, conversation: &Conversation) -> Result<LlmToClient, McpError> {
self.client.respond(conversation).await
}
}

// Implementation of LLM creation from configuration for the mock client
// TODO:[MCP] Can this be moved to the client modules? Via trait?
impl Llm<mock::MockLlmClient> {
pub fn from_config(config: LlmConfig) -> Self {
match config {
LlmConfig::Mock => Llm::new(mock::MockLlmClient),
}
}
}
7 changes: 5 additions & 2 deletions application/apps/indexer/mcp/src/client/messages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,13 @@
#[derive(Debug)]
pub enum McpClientToChipmunk {
Response { response: String },
// TODO:[MCP] add other message types as needed. E.g.:
}

/// Messages chipmunk to the MCP client
/// Messages from chipmunk to the MCP client
#[derive(Debug, Clone)]
pub enum McpChipmunkToClient {
Prompt { prompt: String },
UserPrompt { prompt: String },
// TODO:[MCP] add other message types as needed. E.g.:
// SystemPrompt { prompt: String },
}
Loading