This commit is contained in:
iklobato
2025-02-20 14:20:51 -03:00
parent e4543b56d7
commit 928c2f23c8

38
app.py
View File

@@ -1,50 +1,62 @@
import asyncio
import logging import logging
import os import os
import yaml import yaml
import asyncio
from browser_use import Agent from browser_use import Agent
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_community.document_loaders import DirectoryLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from dotenv import load_dotenv from dotenv import load_dotenv
from langchain.memory import ConversationBufferMemory
from langchain_community.document_loaders import DirectoryLoader
from langchain_community.vectorstores import FAISS
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
load_dotenv() load_dotenv()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(
level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s'
)
with open('config.yaml') as f: with open('config.yaml') as f:
config = yaml.safe_load(f) config = yaml.safe_load(f)
def load_documents(): def load_documents():
loader = DirectoryLoader(config['documents_dir'], glob="**/*.txt") loader = DirectoryLoader(config['documents_dir'], glob="**/*.txt")
docs = loader.load() docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
return text_splitter.split_documents(docs) return text_splitter.split_documents(docs)
def setup_rag(): def setup_rag():
docs = load_documents() docs = load_documents()
vectorstore = FAISS.from_documents(docs, OpenAIEmbeddings()) vectorstore = FAISS.from_documents(docs, OpenAIEmbeddings())
return vectorstore.as_retriever() return vectorstore.as_retriever()
memory = ConversationBufferMemory() memory = ConversationBufferMemory()
def generate_response(query): def generate_response(query):
retriever = setup_rag() retriever = setup_rag()
relevant_docs = retriever.get_relevant_documents(query) relevant_docs = retriever.get_relevant_documents(query)
context = "\n\n".join([doc.page_content for doc in relevant_docs]) context = "\n\n".join([doc.page_content for doc in relevant_docs])
memory.save_context({"input": query}, {"output": ""}) memory.save_context({"input": query}, {"output": ""})
history = memory.load_memory_variables({}) history = memory.load_memory_variables({})
llm = ChatOpenAI(model=config['deepseek']['model'], api_key=config['deepseek']['api_key']) llm = ChatOpenAI(
response = llm.invoke(f"Context: {context}\n\nHistory: {history}\n\nQuestion: {query}") model=config['deepseek']['model'], api_key=config['deepseek']['api_key']
)
response = llm.invoke(
f"Context: {context}\n\nHistory: {history}\n\nQuestion: {query}"
)
return f"{config['bot_disclaimer']}\n\n{response.content}" return f"{config['bot_disclaimer']}\n\n{response.content}"
async def process_messages(): async def process_messages():
agent = Agent( agent = Agent(
llm=ChatOpenAI(model="gpt-4o"), llm=ChatOpenAI(model="gpt-4o"),
task="Log in to LinkedIn, go to messages, check for new messages, and reply based on the provided knowledge base." task="Log in to LinkedIn, go to messages, check for new messages, and reply based on the provided knowledge base.",
) )
while True: while True:
try: try:
@@ -55,7 +67,7 @@ async def process_messages():
logging.error(f"Error: {str(e)}") logging.error(f"Error: {str(e)}")
time.sleep(300) time.sleep(300)
if __name__ == "__main__": if __name__ == "__main__":
logging.info("Starting bot...") logging.info("Starting bot...")
asyncio.run(process_messages()) asyncio.run(process_messages())