Repository restructuring
This commit is contained in:
136
tools/scanner/crawl_urls.py
Executable file
136
tools/scanner/crawl_urls.py
Executable file
@@ -0,0 +1,136 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import urllib.parse
|
||||
import urllib3
|
||||
import requests
|
||||
import queue
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
|
||||
class Crawler:
|
||||
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
|
||||
parts = urllib.parse.urlparse(url)
|
||||
if not parts.scheme and not parts.netloc and parts.path:
|
||||
self.domain = parts.path
|
||||
self.url = parts._replace(scheme="http", netloc=parts.path, path="").geturl()
|
||||
self.scheme = "http"
|
||||
else:
|
||||
self.domain = parts.netloc
|
||||
self.scheme = "http" if not parts.scheme else parts.scheme
|
||||
|
||||
self.user_agent = "WebCrawler/1.0"
|
||||
self.cookies = {}
|
||||
self.proxy = None
|
||||
|
||||
#
|
||||
self.queue = queue.Queue()
|
||||
self.visited = set()
|
||||
self.out_of_scope = set()
|
||||
self.resources = set()
|
||||
self.pages = set()
|
||||
|
||||
def request(self, url):
|
||||
headers = {"User-Agent": self.user_agent}
|
||||
kwargs = {"verify": False, "cookies": self.cookies, "headers": headers}
|
||||
if self.proxy:
|
||||
kwargs["proxy"] = {
|
||||
"http": self.proxy,
|
||||
"https": self.proxy
|
||||
}
|
||||
|
||||
print("requesting:", url)
|
||||
return requests.get(url, **kwargs)
|
||||
|
||||
def start(self):
|
||||
|
||||
self.queue.put(self.url)
|
||||
while not self.queue.empty():
|
||||
url = self.queue.get()
|
||||
if url in self.visited:
|
||||
continue
|
||||
|
||||
self.visited.add(url)
|
||||
res = self.request(url)
|
||||
content_type = res.headers.get("Content-Type", None)
|
||||
if "text/html" not in content_type.lower().split(";"):
|
||||
continue
|
||||
|
||||
urls = self.collect_urls(res.text)
|
||||
for url in urls:
|
||||
parts = urllib.parse.urlparse(url)
|
||||
if parts.netloc and parts.netloc != self.domain:
|
||||
self.out_of_scope.add(url)
|
||||
else:
|
||||
resources_ext = ["jpg", "jpeg", "gif", "png", "css", "js", "svg", "ico"]
|
||||
path, args = parts.path, None
|
||||
if "?" in path:
|
||||
path = path[0:path.index("?")]
|
||||
args = urllib.parse.parse_args(path[path.index("?") + 1:])
|
||||
if path.rsplit(".", 1)[-1] in resources_ext:
|
||||
self.resources.add(url)
|
||||
else:
|
||||
self.pages.add(url)
|
||||
self.queue.put(parts._replace(netloc=self.domain, scheme=self.scheme, fragment="").geturl())
|
||||
|
||||
@staticmethod
|
||||
def collect_urls(page):
|
||||
if not isinstance(page, BeautifulSoup):
|
||||
page = BeautifulSoup(page, "html.parser")
|
||||
|
||||
urls = set()
|
||||
attrs = ["src", "href", "action"]
|
||||
tags = ["a", "link", "script", "img", "form"]
|
||||
|
||||
for tag in tags:
|
||||
for e in page.find_all(tag):
|
||||
for attr in attrs:
|
||||
if e.has_attr(attr):
|
||||
urls.add(e[attr])
|
||||
|
||||
return urls
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("url", help="The target URI to scan to, e.g. http://example.com:8080/dir/")
|
||||
parser.add_argument("--proxy", help="Proxy to connect through") # TODO
|
||||
parser.add_argument("--user-agent", help="User-Agent to use")
|
||||
parser.add_argument("--cookie", help="Cookies to send", action='append', default=[])
|
||||
parser.add_argument('--verbose', '-v', help="Verbose otuput", action='store_true')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
crawler = Crawler(args.url)
|
||||
if args.user_agent:
|
||||
crawler.user_agent = args.user_agent
|
||||
if args.proxy:
|
||||
crawler.proxy = args.proxy
|
||||
|
||||
cookie_pattern = re.compile("^([a-zA-Z0-9.%/+_-]+)=([a-zA-Z0-9.%/+_-])*$")
|
||||
for cookie in crawler.cookies:
|
||||
m = cookie_pattern.match(cookie)
|
||||
if not m:
|
||||
print("[-] Cookie does not match pattern:", cookie)
|
||||
print("[-] You might need to URL-encode it")
|
||||
exit()
|
||||
key, value = (urllib.parse.unquoute(m[1]), urllib.parse.unquoute(m[2]))
|
||||
crawler.cookies[key] = value
|
||||
|
||||
crawler.start()
|
||||
|
||||
results = {
|
||||
"Pages": crawler.pages,
|
||||
"Resources": crawler.resources,
|
||||
"Out of Scope": crawler.out_of_scope
|
||||
}
|
||||
|
||||
for name, values in results.items():
|
||||
print(f"=== {name} ===")
|
||||
print("\n".join(values))
|
||||
33
tools/scanner/first_scan.sh
Executable file
33
tools/scanner/first_scan.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "Invalid usage: $0 <host>"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "[-] Script requires root permissions (e.g. nmap scan)"
|
||||
exit
|
||||
fi
|
||||
|
||||
IP_ADDRESS=$1
|
||||
|
||||
echo "[+] Checking online status…"
|
||||
ping -c1 -W1 -q "${IP_ADDRESS}" &>/dev/null
|
||||
status=$(echo $?)
|
||||
|
||||
if ! [[ $status == 0 ]] ; then
|
||||
echo "[-] Target not reachable"
|
||||
exit
|
||||
fi
|
||||
|
||||
echo "[+] Scanning for open ports…"
|
||||
PORTS=$(nmap -p- -T4 ${IP_ADDRESS} | grep ^[0-9] | cut -d '/' -f 1 | tr '\n' ',' | sed s/,$//)
|
||||
if [ -z "${PORTS}" ]; then
|
||||
echo "[-] No open ports found"
|
||||
exit
|
||||
fi
|
||||
|
||||
echo "[+] Open ports: ${PORTS}"
|
||||
echo "[+] Performing service scans…"
|
||||
nmap -A "${IP_ADDRESS}" -p$PORTS -T4 -v
|
||||
13
tools/scanner/gobuster.sh
Executable file
13
tools/scanner/gobuster.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "Invalid usage: $0 <host>"
|
||||
exit
|
||||
fi
|
||||
|
||||
HOST=$1
|
||||
(set -x; gobuster dir \
|
||||
--url="${HOST}" \
|
||||
--wordlist="/usr/share/wordlists/SecLists/Discovery/Web-Content/raft-large-words-lowercase.txt" \
|
||||
-b "403,404" -k \
|
||||
"${@:2}")
|
||||
33
tools/scanner/phpinfo-analyzer.py
Executable file
33
tools/scanner/phpinfo-analyzer.py
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import sys
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
def analyze(soup):
|
||||
tables = soup.find_all("table")
|
||||
for table in tables:
|
||||
thead = table.find("tr", { "class": "h" })
|
||||
if not thead or len(thead.find_all("th")) != 3:
|
||||
continue
|
||||
|
||||
for tr in table.find_all("tr"):
|
||||
tds = tr.find_all("td")
|
||||
if len(tds) != 3:
|
||||
continue
|
||||
|
||||
label, local, master = tds
|
||||
if local.text != master.text:
|
||||
print(f"[+] {label.text} differs. local={local.text} master={master.text}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: %s <url>", sys.argv[0])
|
||||
else:
|
||||
url = sys.argv[1]
|
||||
res = requests.get(url)
|
||||
if res.status_code != 200:
|
||||
print("[-] Server returned:", res.status_code, res.reason)
|
||||
else:
|
||||
soup = BeautifulSoup(res.text, "html.parser")
|
||||
analyze(soup)
|
||||
57
tools/scanner/subdomainFuzz.sh
Executable file
57
tools/scanner/subdomainFuzz.sh
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "Invalid usage: $0 <domain>"
|
||||
exit
|
||||
fi
|
||||
|
||||
DOMAIN=$1
|
||||
PROTOCOL="http"
|
||||
|
||||
if [[ $DOMAIN = https://* ]]; then
|
||||
PROTOCOL="https"
|
||||
fi
|
||||
|
||||
DOMAIN=$(echo $DOMAIN | sed -e 's|^[^/]*//||' -e 's|/.*$||')
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "[ ] Resolving IP-Address…"
|
||||
output=$(resolveip $DOMAIN 2>&1)
|
||||
status=$(echo $?)
|
||||
if ! [[ $status == 0 ]] ; then
|
||||
echo "[-] ${output}"
|
||||
exit
|
||||
fi
|
||||
IP_ADDRESS=$(echo $output | head -n 1 | awk '{print $NF}')
|
||||
echo "[+] IP-Address: ${IP_ADDRESS}"
|
||||
else
|
||||
IP_ADDRESS=$2
|
||||
echo "[+] Using IP-Address: ${IP_ADDRESS}"
|
||||
fi
|
||||
|
||||
function sni () {
|
||||
protocol=$1
|
||||
sni=$2
|
||||
if ! [[ "$sni" =~ ".*:[0-9]+" ]]; then
|
||||
if [[ $protocol == "https" ]]; then
|
||||
sni="$sni:443"
|
||||
else
|
||||
sni="$sni:80"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo $sni
|
||||
}
|
||||
|
||||
echo "[ ] Retrieving default site…"
|
||||
rnd=$(uuidgen)
|
||||
sni=$(sni ${PROTOCOL} ${rnd}.${DOMAIN})
|
||||
charcountIpAddress=$(curl -s "${PROTOCOL}://${IP_ADDRESS}" -k -m 5 | wc -m)
|
||||
charcountNonExistent=$(curl -s "${PROTOCOL}://${rnd}.${DOMAIN}" --resolve "${sni}:${IP_ADDRESS}" -k -m 5 | wc -m)
|
||||
charcountDomain=$(curl -s "${PROTOCOL}://${DOMAIN}" -k -m 5 | wc -m)
|
||||
echo "[+] Chars: ${charcountDomain}, ${charcountIpAddress}, ${charcountNonExistent}"
|
||||
echo "[ ] Fuzzing…"
|
||||
|
||||
(set -x; ffuf --fs ${charcountDomain},${charcountIpAddress},${charcountNonExistent} --fc 400 --mc all \
|
||||
-w /usr/share/wordlists/SecLists/Discovery/DNS/subdomains-top1million-110000.txt \
|
||||
-u "${PROTOCOL}://${DOMAIN}" -H "Host: FUZZ.${DOMAIN}" "${@:2}")
|
||||
Reference in New Issue
Block a user