Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def breachcomp_check(targets, breachcomp_path):
# https://gist.github.com/scottlinux/9a3b11257ac575e4f71de811322ce6b3
try:
import subprocess
query_bin = os.path.join(breachcomp_path, "query.sh")
st = os.stat(query_bin)
os.chmod(query_bin, st.st_mode | stat.S_IEXEC)
for t in targets:
c.info_news(f"Looking up {t.email} in BreachCompilation")
procfd = subprocess.run([query_bin, t.email], stdout=subprocess.PIPE)
try:
output = procfd.stdout.decode("cp437")
except Exception as e:
c.bad_news(f"Could not decode bytes for {t.email} results")
output = procfd.stdout
# print(output[:85], "[...]")
print(output)
continue
if len(output) != 0:
split_output = output.split("\n")
for line in split_output:
if ":" in line:
t.pwned += 1
t.data.append(("BC_PASS", line.split(":")[1]))
c.good_news(
Prints results, saves to csv if in user inputs
"""
if not user_args.user_targets:
c.bad_news("Missing Target")
exit(1)
targets = []
start_time = time.time()
c.good_news("Targets:")
# Find targets in user input or file
for arg in user_args.user_targets:
user_stdin_target = fetch_emails(arg, user_args)
if user_stdin_target:
targets.extend(user_stdin_target)
elif os.path.isfile(arg):
c.info_news("Reading from file " + arg)
targets.extend(get_emails_from_file(arg, user_args))
else:
c.bad_news("No targets found in user input")
exit(1)
c.info_news("Removing duplicates")
targets = list(set(targets))
# Launch
breached_targets = target_factory(targets, user_args)
# These are not done inside the factory as the factory iterates over each target individually
# The following functions perform line by line checks of all targets per line
if user_args.bc_path:
breached_targets = breachcomp_check(breached_targets, user_args.bc_path)
found_list = []
signal.signal(signal.SIGINT, original_sigint_handler)
# Launch
try:
async_results = [
pool.apply_async(gzip_worker, args=(f, target_list))
for i, f in enumerate(files_to_parse)
]
for r in async_results:
if r.get() is not None:
found_list.extend(r.get(60))
except KeyboardInterrupt:
c.bad_news("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
else:
c.info_news("Terminating worker pool")
pool.close()
pool.join()
return found_list
def find_files(to_parse, pattern=""):
"""
Returns list of files from t_parse filepath.
Supports using globing (*) in filepaths.
Can check for patterns such as 'gz'.
"""
allfiles = []
if "*" in to_parse:
glob_result = glob.glob(to_parse)
for g in glob_result:
allfiles.append(g)
c.info_news("Using file {}".format(g))
if os.path.isfile(to_parse):
if pattern in to_parse:
c.info_news("Using file {}".format(to_parse))
allfiles.append(to_parse)
elif os.path.isdir(to_parse):
for root, _, filenames in os.walk(to_parse):
for filename in filenames:
if pattern in filename:
c.info_news("Using file {}".format(os.path.join(root, filename)))
allfiles.append(os.path.join(root, filename))
return allfiles
def raw_in_count(filename):
"""
StackOverflow trick to rapidly count lines in big files.
Returns total line number.
"""
c.info_news("Identifying total line number...")
f = open(filename, "rb")
bufgen = takewhile(lambda x: x, (f.raw.read(1024 * 1024) for _ in repeat(None)))
return sum(buf.count(b"\n") for buf in bufgen)
def print_results(results, hide=False):
for t in results:
print()
c.print_res_header(t.target)
for i in range(len(t.data)):
sleep(0.001)
if len(t.data) == 1:
print()
c.info_news("No results founds")
continue
if len(t.data[i]) >= 2: # Contains header + body data
if hide:
if "PASS" in t.data[i][0]:
c.print_result(
t.target, t.data[i][1][:4] + "********", t.data[i][0]
)
continue
if "LOCAL" in t.data[i][0]:
c.print_result(
t.target, t.data[i][1][:-5] + "********", t.data[i][0]
)
continue
if "HIBP" in t.data[i][0]:
c.print_result(t.target, t.data[i][1], t.data[i][0])
if "HUNTER_PUB" in t.data[i][0]:
def target_urls(user_args):
"""
For each user input with --url, check if its a file.
If yes open and parse each line with regexp, else parse the input with regexp directly.
Parse html pages from URLs for email patterns.
Returns list of email targets
"""
try:
c.info_news("Starting URL fetch")
urls = []
emails = []
for arg in user_args.user_urls:
if os.path.isfile(arg):
e = get_urls_from_file(arg)
else:
e = fetch_urls(arg)
if e is None:
continue
else:
urls.extend(e)
for url in urls:
e = worker_url(url)
# e = get_emails_from_file(tmpfile, user_args)
if e is None:
def check_scylla_online():
"""
Checks if scylla.sh is online
"""
# Supress SSL Warning on UI
try:
requests.packages.urllib3.disable_warnings()
re = requests.head(
url="https://scylla.sh", verify=False
)
if re.status_code == 200:
c.good_news("scylla.sh is up")
return True
return False
except Exception:
c.info_news("scylla.sh is down, skipping")