This commit is contained in:
eweeman
2026-01-14 16:57:36 -08:00
parent 2544e891f8
commit 4a6e2b898f
14 changed files with 343 additions and 136 deletions

View File

@@ -1,96 +1,111 @@
import json
import logging
from typing import Dict, Any
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from llm.local_llm_client import chat_completion
logger = logging.getLogger(__name__)
SYSTEM_PROMPT = """
You are a Slack assistant.
If a tool is required, respond ONLY in valid JSON:
{
"tool": "tool_name",
"arguments": { ... }
}
If no tool is required, respond normally with text.
"""
def try_parse_tool_call(text):
try:
data = json.loads(text)
if isinstance(data, dict) and "tool" in data and "arguments" in data:
return data
except json.JSONDecodeError:
return None
return None
# Try to import LLM client, but don't fail if it's not available
try:
from llm.local_llm_client import chat_completion
LLM_AVAILABLE = True
except ImportError:
logger.warning("LLM client not available, using simple echo mode")
LLM_AVAILABLE = False
chat_completion = None
def process_mention(
event_data,
slack_client,
vector_store, # qdrant_functions
bot_profile,
tool_registry
):
event = event_data["event"]
channel = event["channel"]
user = event["user"]
text = event["text"]
ts = event["ts"]
thread_ts = event.get("thread_ts", ts)
# ---- RAG retrieval (if enabled) ----
context_blocks = []
if getattr(bot_profile, "ENABLE_RAG_RETRIEVAL", True):
try:
results = vector_store.search_similar(text, limit=5)
for r in results:
context_blocks.append(f"- {r['text']}")
except Exception as e:
logging.warning(f"RAG retrieval failed: {e}")
context_text = "\n".join(context_blocks)
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{
"role": "user",
"content": f"""
Context:
{context_text}
User message:
{text}
"""
}
]
llm_output = chat_completion(messages)
# ---- Tool handling ----
tool_call = try_parse_tool_call(llm_output)
if tool_call:
tool_name = tool_call["tool"]
args = tool_call["arguments"]
tool_entry = tool_registry.get(tool_name)
if not tool_entry:
response_text = f"⚠️ Unknown tool: `{tool_name}`"
else:
event_data: dict,
slack_client: WebClient,
vector_store: Any,
bot_profile: Any,
tool_registry: Dict[str, Any]
) -> None:
"""
Process messages that mention the bot.
"""
event = event_data.get("event", {})
channel = event.get("channel")
user = event.get("user")
text = event.get("text", "")
ts = event.get("ts") # This is the message timestamp
logger.info(f"Processing mention from {user} in {channel}")
# Remove bot mention from text
from config import BOT_USER_ID
clean_text = text.replace(f"<@{BOT_USER_ID}>", "").strip()
# Get bot configuration
bot_name = getattr(bot_profile, "BOT_IDENTIFIER", "Bot")
system_prompt = getattr(bot_profile, "SYSTEM_PROMPT", "You are a helpful assistant.")
try:
# Try to get RAG context if enabled
rag_enabled = getattr(bot_profile, "ENABLE_RAG_INSERT", False)
context = ""
if rag_enabled:
try:
result = tool_entry["function"](**args)
response_text = json.dumps(result, indent=2)
# Search for similar messages
similar = vector_store.search_similar(clean_text, limit=3)
if similar:
context = "\nRelevant context:\n" + "\n".join(similar)
except AttributeError:
logger.warning("RAG retrieval failed: search_similar not implemented")
except Exception as e:
logging.error(f"Tool execution failed: {e}", exc_info=True)
response_text = "⚠️ Tool execution failed."
else:
response_text = llm_output
slack_client.chat_postMessage(
channel=channel,
text=response_text,
thread_ts=thread_ts
)
logger.error(f"RAG retrieval error: {e}")
# Generate response
if LLM_AVAILABLE and chat_completion:
try:
# Use LLM to generate response
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": clean_text}
]
if context:
messages.insert(1, {"role": "system", "content": f"Context: {context}"})
llm_response = chat_completion(messages)
response_text = llm_response.get("content", "Sorry, I couldn't generate a response.")
except Exception as e:
logger.error(f"LLM error: {e}", exc_info=True)
response_text = f"You said: {clean_text}"
else:
# Simple echo response when LLM not available
response_text = f"You said: {clean_text}"
if context:
response_text += f"\n{context}"
# Send message to channel (NOT as a thread reply)
slack_client.chat_postMessage(
channel=channel,
text=response_text
)
logger.info(f"Sent response to {channel}")
except SlackApiError as e:
logger.error(f"Slack API error: {e.response['error']}", exc_info=True)
try:
slack_client.chat_postMessage(
channel=channel,
text="Sorry, I encountered a Slack API error."
)
except:
pass
except Exception as e:
logger.error(f"Error processing mention: {e}", exc_info=True)
try:
slack_client.chat_postMessage(
channel=channel,
text="⚠️ Sorry, I encountered an internal error."
)
except:
pass