import json import logging from local_llm_client import chat_completion SYSTEM_PROMPT = """ You are a Slack assistant. If a tool is required, respond ONLY in valid JSON: { "tool": "tool_name", "arguments": { ... } } If no tool is required, respond normally with text. """ def try_parse_tool_call(text): try: data = json.loads(text) if isinstance(data, dict) and "tool" in data and "arguments" in data: return data except json.JSONDecodeError: return None return None def process_mention( event_data, slack_client, vector_store, # qdrant_functions bot_profile, tool_registry ): event = event_data["event"] channel = event["channel"] user = event["user"] text = event["text"] ts = event["ts"] thread_ts = event.get("thread_ts", ts) # ---- RAG retrieval (if enabled) ---- context_blocks = [] if getattr(bot_profile, "ENABLE_RAG_RETRIEVAL", True): try: results = vector_store.search_similar(text, limit=5) for r in results: context_blocks.append(f"- {r['text']}") except Exception as e: logging.warning(f"RAG retrieval failed: {e}") context_text = "\n".join(context_blocks) messages = [ {"role": "system", "content": SYSTEM_PROMPT}, { "role": "user", "content": f""" Context: {context_text} User message: {text} """ } ] llm_output = chat_completion(messages) # ---- Tool handling ---- tool_call = try_parse_tool_call(llm_output) if tool_call: tool_name = tool_call["tool"] args = tool_call["arguments"] tool_entry = tool_registry.get(tool_name) if not tool_entry: response_text = f"⚠️ Unknown tool: `{tool_name}`" else: try: result = tool_entry["function"](**args) response_text = json.dumps(result, indent=2) except Exception as e: logging.error(f"Tool execution failed: {e}", exc_info=True) response_text = "⚠️ Tool execution failed." else: response_text = llm_output slack_client.chat_postMessage( channel=channel, text=response_text, thread_ts=thread_ts )