progress
This commit is contained in:
@@ -1,27 +1,84 @@
|
||||
import requests
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict, Any
|
||||
from slack_sdk import WebClient
|
||||
from slack_sdk.errors import SlackApiError
|
||||
|
||||
LLM_ENDPOINT = os.getenv("LOCAL_LLM_ENDPOINT")
|
||||
MODEL_NAME = os.getenv("LOCAL_LLM_MODEL", "llama3")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def chat_completion(messages, temperature=0.3, max_tokens=1024):
|
||||
payload = {
|
||||
"model": MODEL_NAME,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
"max_tokens": max_tokens
|
||||
}
|
||||
|
||||
def process_mention(
|
||||
event_data: dict,
|
||||
slack_client: WebClient,
|
||||
vector_store: Any,
|
||||
bot_profile: Any,
|
||||
tool_registry: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Process messages that mention the bot.
|
||||
"""
|
||||
event = event_data.get("event", {})
|
||||
channel = event.get("channel")
|
||||
user = event.get("user")
|
||||
text = event.get("text", "")
|
||||
ts = event.get("ts") # This is the message timestamp
|
||||
|
||||
logger.info(f"Processing mention from {user} in {channel}")
|
||||
|
||||
# Remove bot mention from text
|
||||
from config import BOT_USER_ID
|
||||
clean_text = text.replace(f"<@{BOT_USER_ID}>", "").strip()
|
||||
|
||||
# Get bot configuration
|
||||
bot_name = getattr(bot_profile, "BOT_IDENTIFIER", "Bot")
|
||||
|
||||
try:
|
||||
resp = requests.post(
|
||||
LLM_ENDPOINT,
|
||||
json=payload,
|
||||
timeout=60
|
||||
# Try to get RAG context if enabled
|
||||
rag_enabled = getattr(bot_profile, "ENABLE_RAG_INSERT", False)
|
||||
context = ""
|
||||
|
||||
if rag_enabled:
|
||||
try:
|
||||
# Search for similar messages
|
||||
similar = vector_store.search_similar(clean_text, limit=3)
|
||||
if similar:
|
||||
context = "\nRelevant context:\n" + "\n".join(similar)
|
||||
except AttributeError:
|
||||
logger.warning("RAG retrieval failed: search_similar not implemented")
|
||||
except Exception as e:
|
||||
logger.error(f"RAG retrieval error: {e}")
|
||||
|
||||
# TODO: Integrate with your LLM here
|
||||
# For now, simple echo response
|
||||
|
||||
response_text = f"You said: {clean_text}"
|
||||
|
||||
if context:
|
||||
response_text += f"\n{context}"
|
||||
|
||||
# Send message to channel (NOT as a thread reply)
|
||||
slack_client.chat_postMessage(
|
||||
channel=channel,
|
||||
text=response_text
|
||||
)
|
||||
resp.raise_for_status()
|
||||
return resp.json()["choices"][0]["message"]["content"]
|
||||
|
||||
|
||||
logger.info(f"Sent response to {channel}")
|
||||
|
||||
except SlackApiError as e:
|
||||
logger.error(f"Slack API error: {e.response['error']}", exc_info=True)
|
||||
try:
|
||||
slack_client.chat_postMessage(
|
||||
channel=channel,
|
||||
text="Sorry, I encountered a Slack API error."
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Local LLM call failed: {e}", exc_info=True)
|
||||
return "⚠️ Local LLM is currently unavailable."
|
||||
logger.error(f"Error processing mention: {e}", exc_info=True)
|
||||
try:
|
||||
slack_client.chat_postMessage(
|
||||
channel=channel,
|
||||
text="⚠️ Sorry, I encountered an internal error."
|
||||
)
|
||||
except:
|
||||
pass
|
||||
Reference in New Issue
Block a user