#!/usr/bin/env python3
"""Darknet search tool — queries .onion search engines via Tor SOCKS5"""
import requests
import sys
import json
from bs4 import BeautifulSoup

TOR_PROXY = {"http": "socks5h://127.0.0.1:9050", "https": "socks5h://127.0.0.1:9050"}
TIMEOUT = 45

def search_ahmia(query, max_results=10):
    """Ahmia - curated .onion index"""
    results = []
    try:
        url = f"https://ahmia.fi/search/?q={requests.utils.quote(query)}"
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        for item in soup.select("li.result")[:max_results]:
            title_el = item.select_one("h4")
            link_el = item.select_one("a")
            desc_el = item.select_one("p")
            if title_el and link_el:
                results.append({
                    "engine": "ahmia",
                    "title": title_el.get_text(strip=True),
                    "url": link_el.get("href", ""),
                    "description": desc_el.get_text(strip=True) if desc_el else ""
                })
    except Exception as e:
        results.append({"engine": "ahmia", "error": str(e)})
    return results

def search_torch(query, max_results=10):
    """Torch - largest .onion search engine (no filtering)"""
    results = []
    try:
        url = f"http://xmh57jrknzkhv6y3ls3ubitzfqnkrwxhopf5aygthi7d6rplyvk3noyd.onion/cgi-bin/omega/omega?P={requests.utils.quote(query)}"
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        seen = set()
        for link in soup.find_all("a"):
            href = link.get("href", "")
            title = link.get_text(strip=True)
            if ".onion" in href and href not in seen and title and len(title) > 5:
                seen.add(href)
                results.append({
                    "engine": "torch",
                    "title": title[:200],
                    "url": href,
                    "description": ""
                })
                if len(results) >= max_results:
                    break
    except Exception as e:
        results.append({"engine": "torch", "error": str(e)})
    return results

def search_haystack(query, max_results=10):
    """Haystack - .onion search engine"""
    results = []
    try:
        url = f"http://haystak5njsmn2hqkewecpaxetahtwhsbsa64jom2k22z5afxhnpxfid.onion/?q={requests.utils.quote(query)}"
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        for item in soup.select(".result")[:max_results]:
            title_el = item.select_one("a")
            desc_el = item.select_one(".description, .snippet, p")
            if title_el:
                results.append({
                    "engine": "haystack",
                    "title": title_el.get_text(strip=True),
                    "url": title_el.get("href", ""),
                    "description": desc_el.get_text(strip=True) if desc_el else ""
                })
    except Exception as e:
        results.append({"engine": "haystack", "error": str(e)})
    return results

def search_tordex(query, max_results=10):
    """TorDex - large .onion search engine, no filtering"""
    results = []
    try:
        url = f"http://tordexpmg4xy32rfp4ovnz7zq5ujoejwq2u26uxxtkscgo5u3losmeid.onion/search?query={requests.utils.quote(query)}"
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        seen = set()
        for item in soup.select(".result, .search-result, h5 a, .result-title a"):
            href = item.get("href", "")
            title = item.get_text(strip=True)
            if not href:
                link = item.select_one("a")
                if link:
                    href = link.get("href", "")
                    title = link.get_text(strip=True)
            if ".onion" in href and href not in seen and title:
                seen.add(href)
                desc_el = item.find_next_sibling()
                results.append({
                    "engine": "tordex",
                    "title": title[:200],
                    "url": href,
                    "description": desc_el.get_text(strip=True)[:300] if desc_el else ""
                })
                if len(results) >= max_results:
                    break
    except Exception as e:
        results.append({"engine": "tordex", "error": str(e)})
    return results

def search_bobby(query, max_results=10):
    """Bobby - dark web search"""
    results = []
    try:
        url = f"http://bobby64o755x3gqz3kkfjerjgbvut5ootafolixzwpfx2xms24ob3oad.onion/search?q={requests.utils.quote(query)}&t=0"
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        for item in soup.select(".result, .search-result, tr, li")[:max_results]:
            link = item.select_one("a[href*='.onion']") or item.select_one("a")
            if link:
                results.append({
                    "engine": "bobby",
                    "title": link.get_text(strip=True),
                    "url": link.get("href", ""),
                    "description": ""
                })
    except Exception as e:
        results.append({"engine": "bobby", "error": str(e)})
    return results

def search_duckduckgo_onion(query, max_results=10):
    """DuckDuckGo .onion - anonymous clearnet search"""
    results = []
    try:
        url = f"https://duckduckgogg42xjoc72x3sjasowoarfbgcmvfimaftt6twagswzczad.onion/?q={requests.utils.quote(query)}"
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        for item in soup.select(".result, .web-result")[:max_results]:
            title_el = item.select_one("a.result__a, h2 a")
            desc_el = item.select_one(".result__snippet, .result__body")
            if title_el:
                results.append({
                    "engine": "duckduckgo_onion",
                    "title": title_el.get_text(strip=True),
                    "url": title_el.get("href", ""),
                    "description": desc_el.get_text(strip=True) if desc_el else ""
                })
    except Exception as e:
        results.append({"engine": "duckduckgo_onion", "error": str(e)})
    return results

def search_searxng(query, max_results=10):
    """SearXNG local (via Tor proxy)"""
    results = []
    try:
        r = requests.get("http://127.0.0.1:8890/search",
                        params={"q": query, "format": "json"},
                        timeout=TIMEOUT)
        data = r.json()
        for item in data.get("results", [])[:max_results]:
            results.append({
                "engine": "searxng",
                "title": item.get("title", ""),
                "url": item.get("url", ""),
                "description": item.get("content", "")
            })
    except Exception as e:
        results.append({"engine": "searxng", "error": str(e)})
    return results

def fetch_onion(url):
    """Fetch content from a .onion or any URL via Tor"""
    try:
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        for tag in soup(["script", "style", "nav", "footer", "header"]):
            tag.decompose()
        text = soup.get_text(separator="\n", strip=True)
        return text[:8000]
    except Exception as e:
        return f"Error fetching {url}: {e}"


def check_reputation(url):
    """Check .onion site reputation across multiple sources"""
    import re
    # Extract onion address from URL
    match = re.search(r'([a-z2-7]{56}\.onion|[a-z2-7]{16}\.onion)', url)
    onion_addr = match.group(1) if match else url
    short_addr = onion_addr[:16]

    report = {"url": url, "onion_address": onion_addr, "checks": []}

    # 1. Check Ahmia index
    try:
        r = requests.get(f"https://ahmia.fi/search/?q={onion_addr}", timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        results = soup.select("li.result")
        indexed = len(results) > 0
        report["checks"].append({
            "source": "Ahmia.fi",
            "result": f"Indexed ({len(results)} results)" if indexed else "NOT indexed",
            "verdict": "neutral" if indexed else "suspicious"
        })
    except Exception as e:
        report["checks"].append({"source": "Ahmia.fi", "result": f"Error: {e}", "verdict": "unknown"})

    # 2. Search for scam reports
    try:
        scam_query = f"{short_addr} scam"
        scam_results = search_torch(scam_query, max_results=5)
        valid = [r for r in scam_results if "error" not in r]
        scam_mentions = [r for r in valid if "scam" in r.get("title", "").lower() or "scam" in r.get("description", "").lower()]
        report["checks"].append({
            "source": "Torch (scam search)",
            "result": f"{len(scam_mentions)} scam mentions found" if scam_mentions else "No scam mentions",
            "verdict": "dangerous" if scam_mentions else "neutral"
        })
    except Exception as e:
        report["checks"].append({"source": "Torch (scam)", "result": f"Error: {e}", "verdict": "unknown"})

    # 3. Search for reviews
    try:
        review_query = f"{short_addr} review"
        review_results = search_tordex(review_query, max_results=5)
        valid = [r for r in review_results if "error" not in r]
        report["checks"].append({
            "source": "Tordex (reviews)",
            "result": f"{len(valid)} review mentions found" if valid else "No reviews found",
            "details": [{"title": r["title"], "url": r["url"]} for r in valid[:3]],
            "verdict": "positive" if valid else "suspicious"
        })
    except Exception as e:
        report["checks"].append({"source": "Tordex (reviews)", "result": f"Error: {e}", "verdict": "unknown"})

    # 4. Check SCAM DIRECTORY
    try:
        scam_dir_url = f"http://2s3knlqncblhwq5ljuwnly2j6xfz5wjmv3atjq7m2k3w7mxtsuq2x6qd.onion/?s={short_addr}"
        r = requests.get(scam_dir_url, proxies=TOR_PROXY, timeout=TIMEOUT)
        found = onion_addr in r.text or short_addr in r.text
        report["checks"].append({
            "source": "SCAM DIRECTORY (.onion)",
            "result": "LISTED as scam!" if found else "Not listed",
            "verdict": "dangerous" if found else "neutral"
        })
    except Exception as e:
        report["checks"].append({"source": "SCAM DIRECTORY", "result": f"Error: {e}", "verdict": "unknown"})

    # 5. Check Deepweb Reviews
    try:
        reviews_url = f"http://bwdbtsypbk7tzofrzsf52j6axqngk7a2fxscl4kmbkrv2iech6pxhhad.onion/?s={short_addr}"
        r = requests.get(reviews_url, proxies=TOR_PROXY, timeout=TIMEOUT)
        found = onion_addr in r.text or short_addr in r.text
        report["checks"].append({
            "source": "Deepweb Reviews (.onion)",
            "result": "Has reviews" if found else "No reviews found",
            "verdict": "positive" if found else "neutral"
        })
    except Exception as e:
        report["checks"].append({"source": "Deepweb Reviews", "result": f"Error: {e}", "verdict": "unknown"})

    # 6. Site analysis
    try:
        r = requests.get(url, proxies=TOR_PROXY, timeout=TIMEOUT)
        soup = BeautifulSoup(r.text, "html.parser")
        text = soup.get_text(strip=True).lower()
        has_pgp = bool(soup.find(string=re.compile(r'-----BEGIN PGP', re.I)))
        has_canary = "warrant canary" in text
        has_escrow = "escrow" in text
        site_age_claim = re.search(r'©?\s*(20\d{2})\s*[-–]', soup.get_text())

        analysis = []
        if has_pgp:
            analysis.append("PGP key found (good)")
        else:
            analysis.append("No PGP key (suspicious)")
        if has_canary:
            analysis.append("Warrant canary present (good)")
        if has_escrow:
            analysis.append("Claims escrow support")
        if site_age_claim:
            analysis.append(f"Claims to exist since {site_age_claim.group(1)}")

        report["checks"].append({
            "source": "Site analysis",
            "result": "; ".join(analysis) if analysis else "No trust indicators found",
            "verdict": "positive" if has_pgp else "suspicious"
        })
    except Exception as e:
        report["checks"].append({"source": "Site analysis", "result": f"Error: {e}", "verdict": "unknown"})

    # Summary
    verdicts = [c["verdict"] for c in report["checks"]]
    dangerous = verdicts.count("dangerous")
    suspicious = verdicts.count("suspicious")
    positive = verdicts.count("positive")

    if dangerous > 0:
        report["summary"] = "HIGH RISK — scam indicators found"
    elif suspicious >= 3:
        report["summary"] = "SUSPICIOUS — multiple warning signs"
    elif suspicious >= 2:
        report["summary"] = "LOW TRUST — limited reputation data"
    elif positive >= 2:
        report["summary"] = "MODERATE TRUST — some positive indicators"
    else:
        report["summary"] = "UNKNOWN — insufficient data"

    return report

if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("Usage: darknet-search.py <query> [--fetch <url>]")
        sys.exit(1)

    if sys.argv[1] == "--reputation" and len(sys.argv) > 2:
        report = check_reputation(sys.argv[2])
        print(json.dumps(report, indent=2, ensure_ascii=False))
        sys.exit(0)

    if sys.argv[1] == "--fetch" and len(sys.argv) > 2:
        print(fetch_onion(sys.argv[2]))
    else:
        query = " ".join(sys.argv[1:])
        print(f"=== Darknet Search: {query} ===\n")

        all_results = []
        # .onion search engines
        all_results.extend(search_torch(query))
        all_results.extend(search_haystack(query))
        all_results.extend(search_ahmia(query))
        all_results.extend(search_tordex(query))
        all_results.extend(search_bobby(query))
        all_results.extend(search_duckduckgo_onion(query))
        # clearnet via Tor
        all_results.extend(search_searxng(query))

        # Filter out errors for cleaner output
        valid = [r for r in all_results if "error" not in r]
        errors = [r for r in all_results if "error" in r]

        print(f"Found {len(valid)} results from {len(set(r['engine'] for r in valid))} engines")
        if errors:
            print(f"({len(errors)} engines had errors)\n")
        print(json.dumps(valid, indent=2, ensure_ascii=False))
        if errors:
            print(f"\n--- Errors ---")
            print(json.dumps(errors, indent=2, ensure_ascii=False))
