Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ $ python3 -m pip install -r requirements.txt
```console
$ python3 sherlock --help
usage: sherlock [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT]
[--output OUTPUT] [--tor] [--unique-tor] [--csv]
[--output OUTPUT] [--tor] [--unique-tor] [--csv] [--txt]
[--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE]
[--timeout TIMEOUT] [--print-all] [--print-found] [--no-color]
[--browse] [--local] [--nsfw]
Expand Down Expand Up @@ -71,6 +71,7 @@ optional arguments:
--csv Create Comma-Separated Values (CSV) File.
--xlsx Create the standard file for the modern Microsoft Excel
spreadsheet (xslx).
--txt Create .txt file
--site SITE_NAME Limit analysis to just the listed sites. Add multiple options to
specify more than one site.
--proxy PROXY_URL, -p PROXY_URL
Expand Down
34 changes: 22 additions & 12 deletions sherlock/sherlock.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,8 +517,12 @@ def main():
)
parser.add_argument("--xlsx",
action="store_true", dest="xlsx", default=False,
help="Create the standard file for the modern Microsoft Excel spreadsheet (xslx)."
)
help="Create the standard file for the modern Microsoft Excel spreadsheet (xslx).")

parser.add_argument("--txt",
action="store_true", dest="txt", default=False,
help="Create .txt file.")

parser.add_argument("--site",
action="append", metavar="SITE_NAME",
dest="site_list", default=None,
Expand Down Expand Up @@ -694,15 +698,19 @@ def main():
else:
result_file = f"{username}.txt"

with open(result_file, "w", encoding="utf-8") as file:
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
if dictionary.get("status").status == QueryStatus.CLAIMED:
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write(
f"Total Websites Username Detected On : {exists_counter}\n")
if args.txt:
try:
with open(result_file, "w", encoding="utf-8") as file:
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
if dictionary.get("status").status == QueryStatus.CLAIMED:
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write(
f"Total Websites Username Detected On : {exists_counter}\n")
except IOError as e:
print("An error occured. File could not be made or written.", e)

if args.csv:
result_file = f"{username}.csv"
Expand Down Expand Up @@ -763,7 +771,9 @@ def main():
exists.append(str(results[site]["status"].status))
http_status.append(results[site]["http_status"])

DataFrame = pd.DataFrame({"username": usernames, "name": names, "url_main": url_main, "url_user": url_user, "exists": exists, "http_status": http_status, "response_time_s": response_time_s})
DataFrame = pd.DataFrame(
{"username": usernames, "name": names, "url_main": url_main, "url_user": url_user, "exists": exists,
"http_status": http_status, "response_time_s": response_time_s})
DataFrame.to_excel(f'{username}.xlsx', sheet_name='sheet1', index=False)

print()
Expand Down