init
This commit is contained in:
10
.gitignore
vendored
Normal file
10
.gitignore
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# Python-generated files
|
||||
__pycache__/
|
||||
*.py[oc]
|
||||
build/
|
||||
dist/
|
||||
wheels/
|
||||
*.egg-info
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.8
|
||||
@@ -24,13 +24,13 @@ Automatically respond to LinkedIn messages from recruiters using AI. This bot le
|
||||
|
||||
1. **Clone the Repository**:
|
||||
```bash
|
||||
git clone https://github.com/your-username/automatic-linkedin-answer-ai.git
|
||||
git clone https://github.com/iklobato/automatic-linkedin-answer-ai.git
|
||||
cd automatic-linkedin-answer-ai
|
||||
```
|
||||
|
||||
2. **Install Dependencies**:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
uv install
|
||||
```
|
||||
|
||||
3. **Set Up Environment Variables**:
|
||||
|
||||
61
app.py
Normal file
61
app.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import logging
|
||||
import os
|
||||
import yaml
|
||||
import asyncio
|
||||
from browser_use import Agent
|
||||
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
||||
from langchain_community.document_loaders import DirectoryLoader
|
||||
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
||||
from langchain_community.vectorstores import FAISS
|
||||
from langchain.memory import ConversationBufferMemory
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
with open('config.yaml') as f:
|
||||
config = yaml.safe_load(f)
|
||||
|
||||
def load_documents():
|
||||
loader = DirectoryLoader(config['documents_dir'], glob="**/*.txt")
|
||||
docs = loader.load()
|
||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
||||
return text_splitter.split_documents(docs)
|
||||
|
||||
def setup_rag():
|
||||
docs = load_documents()
|
||||
vectorstore = FAISS.from_documents(docs, OpenAIEmbeddings())
|
||||
return vectorstore.as_retriever()
|
||||
|
||||
memory = ConversationBufferMemory()
|
||||
|
||||
def generate_response(query):
|
||||
retriever = setup_rag()
|
||||
relevant_docs = retriever.get_relevant_documents(query)
|
||||
context = "\n\n".join([doc.page_content for doc in relevant_docs])
|
||||
|
||||
memory.save_context({"input": query}, {"output": ""})
|
||||
history = memory.load_memory_variables({})
|
||||
|
||||
llm = ChatOpenAI(model=config['deepseek']['model'], api_key=config['deepseek']['api_key'])
|
||||
response = llm.invoke(f"Context: {context}\n\nHistory: {history}\n\nQuestion: {query}")
|
||||
return f"{config['bot_disclaimer']}\n\n{response.content}"
|
||||
|
||||
async def process_messages():
|
||||
agent = Agent(
|
||||
llm=ChatOpenAI(model="gpt-4o"),
|
||||
task="Log in to LinkedIn, go to messages, check for new messages, and reply based on the provided knowledge base."
|
||||
)
|
||||
while True:
|
||||
try:
|
||||
result = await agent.run()
|
||||
logging.info(f"Agent result: {result}")
|
||||
time.sleep(60)
|
||||
except Exception as e:
|
||||
logging.error(f"Error: {str(e)}")
|
||||
time.sleep(300)
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.info("Starting bot...")
|
||||
asyncio.run(process_messages())
|
||||
|
||||
7
pyproject.toml
Normal file
7
pyproject.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
[project]
|
||||
name = "automatic-linkedin-answer-ai"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.8"
|
||||
dependencies = []
|
||||
Reference in New Issue
Block a user