2023-06-06 01:38:15 +03:00
|
|
|
import os
|
2023-05-21 02:20:55 +03:00
|
|
|
import re
|
|
|
|
import tempfile
|
2023-06-06 01:38:15 +03:00
|
|
|
import unicodedata
|
2023-08-15 19:09:09 +03:00
|
|
|
from urllib.parse import urljoin
|
2023-06-06 01:38:15 +03:00
|
|
|
|
|
|
|
import requests
|
2023-08-19 13:31:15 +03:00
|
|
|
from bs4 import BeautifulSoup
|
2023-08-21 13:45:32 +03:00
|
|
|
from newspaper import Article
|
|
|
|
from pydantic import BaseModel
|
|
|
|
|
2023-05-21 02:20:55 +03:00
|
|
|
|
|
|
|
class CrawlWebsite(BaseModel):
|
2023-06-20 10:54:23 +03:00
|
|
|
url: str
|
|
|
|
js: bool = False
|
2023-08-21 13:45:32 +03:00
|
|
|
depth: int = int(os.getenv("CRAWL_DEPTH", "1"))
|
2023-06-20 10:54:23 +03:00
|
|
|
max_pages: int = 100
|
|
|
|
max_time: int = 60
|
2023-05-21 02:20:55 +03:00
|
|
|
|
|
|
|
def _crawl(self, url):
|
2023-08-21 13:45:32 +03:00
|
|
|
try:
|
2023-08-08 18:15:43 +03:00
|
|
|
response = requests.get(url)
|
|
|
|
if response.status_code == 200:
|
|
|
|
return response.text
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
2023-08-15 19:09:09 +03:00
|
|
|
raise
|
|
|
|
|
|
|
|
def extract_content(self, url):
|
|
|
|
article = Article(url)
|
|
|
|
try:
|
|
|
|
article.download()
|
|
|
|
article.parse()
|
|
|
|
except Exception as e:
|
2023-08-21 13:45:32 +03:00
|
|
|
print(f"Error downloading or parsing article: {e}")
|
2023-05-21 02:20:55 +03:00
|
|
|
return None
|
2023-08-15 19:09:09 +03:00
|
|
|
return article.text
|
|
|
|
|
|
|
|
def _process_recursive(self, url, depth, visited_urls):
|
|
|
|
if depth == 0 or url in visited_urls:
|
|
|
|
return ""
|
|
|
|
|
|
|
|
visited_urls.add(url)
|
|
|
|
|
|
|
|
content = self.extract_content(url)
|
|
|
|
raw_html = self._crawl(url)
|
|
|
|
|
|
|
|
if not raw_html:
|
|
|
|
return content
|
|
|
|
|
2023-08-21 13:45:32 +03:00
|
|
|
soup = BeautifulSoup(raw_html, "html.parser")
|
|
|
|
links = [a["href"] for a in soup.find_all("a", href=True)]
|
2023-08-15 19:09:09 +03:00
|
|
|
for link in links:
|
|
|
|
full_url = urljoin(url, link)
|
|
|
|
# Ensure we're staying on the same domain
|
|
|
|
if self.url in full_url:
|
2023-08-25 13:03:13 +03:00
|
|
|
content += self._process_recursive(full_url, depth - 1, visited_urls) # type: ignore
|
2023-08-15 19:09:09 +03:00
|
|
|
|
|
|
|
return content
|
2023-05-21 02:20:55 +03:00
|
|
|
|
|
|
|
def process(self):
|
2023-08-15 19:09:09 +03:00
|
|
|
# Extract and combine content recursively
|
|
|
|
visited_urls = set()
|
|
|
|
extracted_content = self._process_recursive(self.url, self.depth, visited_urls)
|
2023-06-20 10:54:23 +03:00
|
|
|
|
|
|
|
# Create a file
|
2023-08-15 19:09:09 +03:00
|
|
|
file_name = slugify(self.url) + ".txt"
|
2023-05-21 02:20:55 +03:00
|
|
|
temp_file_path = os.path.join(tempfile.gettempdir(), file_name)
|
2023-06-20 10:54:23 +03:00
|
|
|
with open(temp_file_path, "w") as temp_file:
|
2023-08-25 13:03:13 +03:00
|
|
|
temp_file.write(extracted_content) # type: ignore
|
2023-08-11 09:20:12 +03:00
|
|
|
|
2023-08-15 19:09:09 +03:00
|
|
|
return temp_file_path, file_name
|
2023-06-20 10:54:23 +03:00
|
|
|
|
2023-06-06 01:38:15 +03:00
|
|
|
def checkGithub(self):
|
2023-08-21 13:45:32 +03:00
|
|
|
return "github.com" in self.url
|
|
|
|
|
2023-05-21 02:20:55 +03:00
|
|
|
|
|
|
|
def slugify(text):
|
2023-06-20 10:54:23 +03:00
|
|
|
text = unicodedata.normalize("NFKD", text).encode("ascii", "ignore").decode("utf-8")
|
|
|
|
text = re.sub(r"[^\w\s-]", "", text).strip().lower()
|
|
|
|
text = re.sub(r"[-\s]+", "-", text)
|
|
|
|
return text
|