This commit is contained in:
eweeman
2026-01-14 16:57:36 -08:00
parent 2544e891f8
commit 4a6e2b898f
14 changed files with 343 additions and 136 deletions

2
.env
View File

@@ -20,7 +20,7 @@ EMBEDDING_MODEL=all-MiniLM-L6-v2
#########################
# Local LLM (Remote Machine)
#########################
LOCAL_LLM_ENDPOINT=https://api.chat.pathcore.org/v1/chat/completions
LOCAL_LLM_ENDPOINT=http://172.168.10.10:11434/v1/chat/completions
LOCAL_LLM_MODEL=mistral
LOCAL_LLM_TIMEOUT=60

9
bots/deafult_bot.py Normal file
View File

@@ -0,0 +1,9 @@
BOT_IDENTIFIER = "default_bot"
SYSTEM_PROMPT = """You are a helpful AI assistant in Slack.
You answer questions clearly and concisely.
You are friendly and professional."""
ENABLED_TOOL_NAMES = [] # Add tool names here as you create them
ENABLE_RAG_INSERT = False # Set to True to enable vector storage

9
channel_cache.json Normal file
View File

@@ -0,0 +1,9 @@
{
"C08L3JJDSDV": {
"id": "C08L3JJDSDV",
"name": "project-ai",
"is_private": false,
"is_dm": false,
"cached_at": "2026-01-14T15:55:02.290619"
}
}

View File

@@ -5,5 +5,6 @@
"C0DQ40MH8": "sales",
"C2RGSA4GL": "billing",
"C0DUFQ4BB": "wireless",
"C09KNPDT481": "abot_scripting_bot"
"C09KNPDT481": "abot_scripting_bot",
"C08L3JJDSDV": "default_bot"
}

View File

@@ -1,4 +1,4 @@
BOT_USER_ID = ""
BOT_USER_ID = "U08LF3N25QE"
CONVERSATION_TIMEOUT_MINUTES = 15
MAX_HISTORY_LENGTH = 10
MAX_MESSAGE_LENGTH = 4000

View File

@@ -1,27 +1,84 @@
import requests
import logging
import os
from typing import Dict, Any
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
LLM_ENDPOINT = os.getenv("LOCAL_LLM_ENDPOINT")
MODEL_NAME = os.getenv("LOCAL_LLM_MODEL", "llama3")
logger = logging.getLogger(__name__)
def chat_completion(messages, temperature=0.3, max_tokens=1024):
payload = {
"model": MODEL_NAME,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens
}
def process_mention(
event_data: dict,
slack_client: WebClient,
vector_store: Any,
bot_profile: Any,
tool_registry: Dict[str, Any]
) -> None:
"""
Process messages that mention the bot.
"""
event = event_data.get("event", {})
channel = event.get("channel")
user = event.get("user")
text = event.get("text", "")
ts = event.get("ts") # This is the message timestamp
logger.info(f"Processing mention from {user} in {channel}")
# Remove bot mention from text
from config import BOT_USER_ID
clean_text = text.replace(f"<@{BOT_USER_ID}>", "").strip()
# Get bot configuration
bot_name = getattr(bot_profile, "BOT_IDENTIFIER", "Bot")
try:
resp = requests.post(
LLM_ENDPOINT,
json=payload,
timeout=60
# Try to get RAG context if enabled
rag_enabled = getattr(bot_profile, "ENABLE_RAG_INSERT", False)
context = ""
if rag_enabled:
try:
# Search for similar messages
similar = vector_store.search_similar(clean_text, limit=3)
if similar:
context = "\nRelevant context:\n" + "\n".join(similar)
except AttributeError:
logger.warning("RAG retrieval failed: search_similar not implemented")
except Exception as e:
logger.error(f"RAG retrieval error: {e}")
# TODO: Integrate with your LLM here
# For now, simple echo response
response_text = f"You said: {clean_text}"
if context:
response_text += f"\n{context}"
# Send message to channel (NOT as a thread reply)
slack_client.chat_postMessage(
channel=channel,
text=response_text
)
resp.raise_for_status()
return resp.json()["choices"][0]["message"]["content"]
logger.info(f"Sent response to {channel}")
except SlackApiError as e:
logger.error(f"Slack API error: {e.response['error']}", exc_info=True)
try:
slack_client.chat_postMessage(
channel=channel,
text="Sorry, I encountered a Slack API error."
)
except:
pass
except Exception as e:
logging.error(f"Local LLM call failed: {e}", exc_info=True)
return "⚠️ Local LLM is currently unavailable."
logger.error(f"Error processing mention: {e}", exc_info=True)
try:
slack_client.chat_postMessage(
channel=channel,
text="⚠️ Sorry, I encountered an internal error."
)
except:
pass

View File

@@ -10,6 +10,7 @@ from typing import Dict, Any
from dotenv import load_dotenv
from flask import Flask, jsonify
from slackeventsapi import SlackEventAdapter
from slack_sdk import WebClient
import slack
import json
from hot_reload import ReloadableRegistry, start_hot_reload
@@ -128,7 +129,7 @@ if not SIGNING_SECRET or not SLACK_TOKEN:
sys.exit("Missing Slack credentials")
slack_event_adapter = SlackEventAdapter(SIGNING_SECRET, "/slack/events", app)
slack_client = slack.WebClient(token=SLACK_TOKEN)
slack_client = WebClient(token=SLACK_TOKEN)
# --------------------------------------------------
# Deduplication

View File

@@ -1,96 +1,111 @@
import json
import logging
from typing import Dict, Any
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from llm.local_llm_client import chat_completion
logger = logging.getLogger(__name__)
SYSTEM_PROMPT = """
You are a Slack assistant.
If a tool is required, respond ONLY in valid JSON:
{
"tool": "tool_name",
"arguments": { ... }
}
If no tool is required, respond normally with text.
"""
def try_parse_tool_call(text):
try:
data = json.loads(text)
if isinstance(data, dict) and "tool" in data and "arguments" in data:
return data
except json.JSONDecodeError:
return None
return None
# Try to import LLM client, but don't fail if it's not available
try:
from llm.local_llm_client import chat_completion
LLM_AVAILABLE = True
except ImportError:
logger.warning("LLM client not available, using simple echo mode")
LLM_AVAILABLE = False
chat_completion = None
def process_mention(
event_data,
slack_client,
vector_store, # qdrant_functions
bot_profile,
tool_registry
):
event = event_data["event"]
channel = event["channel"]
user = event["user"]
text = event["text"]
ts = event["ts"]
thread_ts = event.get("thread_ts", ts)
# ---- RAG retrieval (if enabled) ----
context_blocks = []
if getattr(bot_profile, "ENABLE_RAG_RETRIEVAL", True):
try:
results = vector_store.search_similar(text, limit=5)
for r in results:
context_blocks.append(f"- {r['text']}")
except Exception as e:
logging.warning(f"RAG retrieval failed: {e}")
context_text = "\n".join(context_blocks)
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{
"role": "user",
"content": f"""
Context:
{context_text}
User message:
{text}
"""
}
]
llm_output = chat_completion(messages)
# ---- Tool handling ----
tool_call = try_parse_tool_call(llm_output)
if tool_call:
tool_name = tool_call["tool"]
args = tool_call["arguments"]
tool_entry = tool_registry.get(tool_name)
if not tool_entry:
response_text = f"⚠️ Unknown tool: `{tool_name}`"
else:
event_data: dict,
slack_client: WebClient,
vector_store: Any,
bot_profile: Any,
tool_registry: Dict[str, Any]
) -> None:
"""
Process messages that mention the bot.
"""
event = event_data.get("event", {})
channel = event.get("channel")
user = event.get("user")
text = event.get("text", "")
ts = event.get("ts") # This is the message timestamp
logger.info(f"Processing mention from {user} in {channel}")
# Remove bot mention from text
from config import BOT_USER_ID
clean_text = text.replace(f"<@{BOT_USER_ID}>", "").strip()
# Get bot configuration
bot_name = getattr(bot_profile, "BOT_IDENTIFIER", "Bot")
system_prompt = getattr(bot_profile, "SYSTEM_PROMPT", "You are a helpful assistant.")
try:
# Try to get RAG context if enabled
rag_enabled = getattr(bot_profile, "ENABLE_RAG_INSERT", False)
context = ""
if rag_enabled:
try:
result = tool_entry["function"](**args)
response_text = json.dumps(result, indent=2)
# Search for similar messages
similar = vector_store.search_similar(clean_text, limit=3)
if similar:
context = "\nRelevant context:\n" + "\n".join(similar)
except AttributeError:
logger.warning("RAG retrieval failed: search_similar not implemented")
except Exception as e:
logging.error(f"Tool execution failed: {e}", exc_info=True)
response_text = "⚠️ Tool execution failed."
else:
response_text = llm_output
slack_client.chat_postMessage(
channel=channel,
text=response_text,
thread_ts=thread_ts
)
logger.error(f"RAG retrieval error: {e}")
# Generate response
if LLM_AVAILABLE and chat_completion:
try:
# Use LLM to generate response
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": clean_text}
]
if context:
messages.insert(1, {"role": "system", "content": f"Context: {context}"})
llm_response = chat_completion(messages)
response_text = llm_response.get("content", "Sorry, I couldn't generate a response.")
except Exception as e:
logger.error(f"LLM error: {e}", exc_info=True)
response_text = f"You said: {clean_text}"
else:
# Simple echo response when LLM not available
response_text = f"You said: {clean_text}"
if context:
response_text += f"\n{context}"
# Send message to channel (NOT as a thread reply)
slack_client.chat_postMessage(
channel=channel,
text=response_text
)
logger.info(f"Sent response to {channel}")
except SlackApiError as e:
logger.error(f"Slack API error: {e.response['error']}", exc_info=True)
try:
slack_client.chat_postMessage(
channel=channel,
text="Sorry, I encountered a Slack API error."
)
except:
pass
except Exception as e:
logger.error(f"Error processing mention: {e}", exc_info=True)
try:
slack_client.chat_postMessage(
channel=channel,
text="⚠️ Sorry, I encountered an internal error."
)
except:
pass

View File

@@ -1,29 +1,24 @@
# qdrant_functions.py
# rag/qdrant_functions.py
import logging
from qdrant_client import QdrantClient
from sentence_transformers import SentenceTransformer
import uuid
import os
from typing import Any, List
QDRANT_HOST = os.getenv("QDRANT_HOST", "localhost")
QDRANT_PORT = int(os.getenv("QDRANT_PORT", 6333))
QDRANT_COLLECTION = os.getenv("QDRANT_COLLECTION", "abot-slack")
logger = logging.getLogger(__name__)
client = QdrantClient(host=QDRANT_HOST, port=QDRANT_PORT)
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
VECTOR_SIZE = 384
def embed_and_store_slack_message(
slack_client: Any,
channel: str,
user: str,
text: str,
ts: str,
bot_user_id: str
) -> None:
"""Embed and store a Slack message in the vector database."""
logger.info(f"RAG insert for message in {channel}: {text[:50]}...")
pass
def ensure_collection():
collections = [c.name for c in client.get_collections().collections]
if QDRANT_COLLECTION not in collections:
client.create_collection(
collection_name=QDRANT_COLLECTION,
vectors_config={
"size": VECTOR_SIZE,
"distance": "Cosine"
}
)
logging.info(f"Created Qdrant collection {QDRANT_COLLECTION}")
ensure_collection()
def search_similar(query: str, limit: int = 5) -> List[str]:
"""Search for similar messages in the vector database."""
logger.info(f"RAG search for: {query[:50]}...")
return [] # Return empty list for now

View File

@@ -1,5 +1,5 @@
flask
slack-sdk
slack_sdk
slackeventsapi
python-dotenv

3
robots.txt Normal file
View File

@@ -0,0 +1,3 @@
# robots.txt
User-agent: *
Disallow: /

38
scripts/list_channels.py Normal file
View File

@@ -0,0 +1,38 @@
import os
import json
from dotenv import load_dotenv
from slack_sdk import WebClient
load_dotenv()
client = WebClient(token=os.getenv("SLACK_TOKEN"))
# Get all channels
response = client.conversations_list(types="public_channel,private_channel")
if response["ok"]:
channels = response["channels"]
print("Available channels:\n")
print(f"{'Channel Name':<30} {'Channel ID':<15} {'Type':<10}")
print("-" * 60)
channel_map = {}
for channel in channels:
channel_type = "Private" if channel.get('is_private') else "Public"
print(f"{channel['name']:<30} {channel['id']:<15} {channel_type:<10}")
# Add to suggested channel_map.json
channel_map[channel['id']] = "default_bot"
print("\n" + "="*60)
print("\nSuggested channel_map.json:\n")
print(json.dumps(channel_map, indent=2))
# Optionally save it
save = input("\nSave this to channel_map.json? (y/n): ")
if save.lower() == 'y':
with open('channel_map.json', 'w') as f:
json.dump(channel_map, f, indent=2)
print("✓ Saved to channel_map.json")

View File

@@ -0,0 +1,55 @@
import os
from dotenv import load_dotenv
from slack_sdk import WebClient
from pathlib import Path
load_dotenv()
def test_slack_connection():
"""Test basic Slack connection and bot setup"""
token = os.getenv("SLACK_TOKEN")
if not token:
print("❌ SLACK_TOKEN not found in .env file")
return False
print(f"✓ SLACK_TOKEN found: {token[:10]}...")
try:
client = WebClient(token=token)
# Test authentication
auth_response = client.auth_test()
if auth_response["ok"]:
print(f"✓ Bot authenticated successfully!")
print(f" Bot User ID: {auth_response['user_id']}")
print(f" Bot Username: {auth_response['user']}")
print(f" Team: {auth_response['team']}")
# Test listing channels
channels_response = client.conversations_list(types="public_channel,private_channel")
if channels_response["ok"]:
channels = channels_response["channels"]
print(f"\n✓ Found {len(channels)} channels:")
for channel in channels[:5]: # Show first 5
print(f" - {channel['name']} (ID: {channel['id']})")
if len(channels) > 5:
print(f" ... and {len(channels) - 5} more")
return True
else:
print(f"❌ Authentication failed: {auth_response}")
return False
except Exception as e:
print(f"❌ Error connecting to Slack: {e}")
return False
if __name__ == "__main__":
print("Testing Slack Connection...\n")
test_slack_connection()

24
user_cache.json Normal file
View File

@@ -0,0 +1,24 @@
{
"U03TRF48XLY": {
"id": "U03TRF48XLY",
"name": "edwin.weeman",
"real_name": "edwin.weeman",
"display_name": "",
"email": "",
"title": "",
"first_name": "edwin.weeman",
"last_name": "",
"cached_at": "2026-01-14T15:55:02.449664"
},
"U04JA29H6P3": {
"id": "U04JA29H6P3",
"name": "ryangvenecia",
"real_name": "Ryan Venecia",
"display_name": "",
"email": "",
"title": "",
"first_name": "Ryan",
"last_name": "Venecia",
"cached_at": "2026-01-14T16:06:03.750498"
}
}