diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..505a3b1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +# Python-generated files +__pycache__/ +*.py[oc] +build/ +dist/ +wheels/ +*.egg-info + +# Virtual environments +.venv diff --git a/.python-version b/.python-version new file mode 100644 index 0000000..cc1923a --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.8 diff --git a/README.md b/README.md index decb07a..2f22bcb 100644 --- a/README.md +++ b/README.md @@ -24,13 +24,13 @@ Automatically respond to LinkedIn messages from recruiters using AI. This bot le 1. **Clone the Repository**: ```bash - git clone https://github.com/your-username/automatic-linkedin-answer-ai.git + git clone https://github.com/iklobato/automatic-linkedin-answer-ai.git cd automatic-linkedin-answer-ai ``` 2. **Install Dependencies**: ```bash - pip install -r requirements.txt + uv install ``` 3. **Set Up Environment Variables**: diff --git a/app.py b/app.py new file mode 100644 index 0000000..4deab58 --- /dev/null +++ b/app.py @@ -0,0 +1,61 @@ +import logging +import os +import yaml +import asyncio +from browser_use import Agent +from langchain_openai import ChatOpenAI, OpenAIEmbeddings +from langchain_community.document_loaders import DirectoryLoader +from langchain_text_splitters import RecursiveCharacterTextSplitter +from langchain_community.vectorstores import FAISS +from langchain.memory import ConversationBufferMemory +from dotenv import load_dotenv + +load_dotenv() +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +with open('config.yaml') as f: + config = yaml.safe_load(f) + +def load_documents(): + loader = DirectoryLoader(config['documents_dir'], glob="**/*.txt") + docs = loader.load() + text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) + return text_splitter.split_documents(docs) + +def setup_rag(): + docs = load_documents() + vectorstore = FAISS.from_documents(docs, OpenAIEmbeddings()) + return vectorstore.as_retriever() + +memory = ConversationBufferMemory() + +def generate_response(query): + retriever = setup_rag() + relevant_docs = retriever.get_relevant_documents(query) + context = "\n\n".join([doc.page_content for doc in relevant_docs]) + + memory.save_context({"input": query}, {"output": ""}) + history = memory.load_memory_variables({}) + + llm = ChatOpenAI(model=config['deepseek']['model'], api_key=config['deepseek']['api_key']) + response = llm.invoke(f"Context: {context}\n\nHistory: {history}\n\nQuestion: {query}") + return f"{config['bot_disclaimer']}\n\n{response.content}" + +async def process_messages(): + agent = Agent( + llm=ChatOpenAI(model="gpt-4o"), + task="Log in to LinkedIn, go to messages, check for new messages, and reply based on the provided knowledge base." + ) + while True: + try: + result = await agent.run() + logging.info(f"Agent result: {result}") + time.sleep(60) + except Exception as e: + logging.error(f"Error: {str(e)}") + time.sleep(300) + +if __name__ == "__main__": + logging.info("Starting bot...") + asyncio.run(process_messages()) + diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..7b6cb40 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,7 @@ +[project] +name = "automatic-linkedin-answer-ai" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.8" +dependencies = [] diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..6cd9fef --- /dev/null +++ b/uv.lock @@ -0,0 +1,8 @@ +version = 1 +revision = 1 +requires-python = ">=3.8" + +[[package]] +name = "automatic-linkedin-answer-ai" +version = "0.1.0" +source = { virtual = "." }