diff --git a/VERSION b/VERSION index d532fd93..249319ed 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.5.22 +1.5.23 diff --git a/engines/nmap/Dockerfile b/engines/nmap/Dockerfile index 6d0d5e96..2b1836e4 100644 --- a/engines/nmap/Dockerfile +++ b/engines/nmap/Dockerfile @@ -1,5 +1,5 @@ FROM alpine:3.16.3 -LABEL Name="Nmap\ \(Patrowl engine\)" Version="1.4.49" +LABEL Name="Nmap\ \(Patrowl engine\)" Version="1.5.0" # Set the working directory RUN mkdir -p /opt/patrowl-engines/nmap diff --git a/engines/nmap/VERSION b/engines/nmap/VERSION index ed32bf2a..bc80560f 100644 --- a/engines/nmap/VERSION +++ b/engines/nmap/VERSION @@ -1 +1 @@ -1.4.49 +1.5.0 diff --git a/engines/nmap/__init__.py b/engines/nmap/__init__.py index b11a9d2d..2a8a55bd 100644 --- a/engines/nmap/__init__.py +++ b/engines/nmap/__init__.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- __title__ = 'patrowl_engine_nmap' -__version__ = '1.4.49' +__version__ = '1.5.0' __author__ = 'Nicolas MATTIOCCO' __license__ = 'AGPLv3' __copyright__ = 'Copyright (C) 2018-2022 Nicolas Mattiocco - @MaKyOtOx' diff --git a/engines/nmap/engine-nmap.py b/engines/nmap/engine-nmap.py index 4bf14791..b06fce92 100644 --- a/engines/nmap/engine-nmap.py +++ b/engines/nmap/engine-nmap.py @@ -20,71 +20,165 @@ import xml.etree.ElementTree as ET import banner +# Own library imports +from PatrowlEnginesUtils.PatrowlEngine import _json_serial +from PatrowlEnginesUtils.PatrowlEngine import PatrowlEngine +from PatrowlEnginesUtils.PatrowlEngineExceptions import PatrowlEngineExceptions + +from requests.packages.urllib3.exceptions import InsecureRequestWarning + app = Flask(__name__) APP_DEBUG = os.environ.get("DEBUG", "").lower() in ["true", "1", "yes", "y", "on"] +APP_MAXSCANS = int(os.environ.get("APP_MAXSCANS", 5)) + APP_HOST = "0.0.0.0" APP_PORT = 5001 -APP_MAXSCANS = int(os.environ.get("APP_MAXSCANS", 5)) +APP_ENGINE_NAME = "nmap" APP_SCAN_TIMEOUT_DEFAULT = int(os.environ.get("APP_SCAN_TIMEOUT_DEFAULT", 7200)) BASE_DIR = os.path.dirname(os.path.realpath(__file__)) this = sys.modules[__name__] -this.scanner = {} -this.scan_id = 1 -this.scans = {} - -# Generic functions -def _json_serial(obj): - if isinstance(obj, datetime.datetime): - serial = obj.isoformat() - return serial - raise TypeError("Type not serializable") +engine = PatrowlEngine( + app=app, base_dir=BASE_DIR, name=APP_ENGINE_NAME, max_scans=APP_MAXSCANS +) +this.engine = engine # Route actions @app.route("/") def default(): """Handle default route.""" - return redirect(url_for("index")) + return engine.default() @app.route("/engines/nmap/") def index(): - """Handle index route.""" - return jsonify({"page": "index"}) + """Return index page.""" + return engine.index() + + +@app.route("/engines/nmap/liveness") +def liveness(): + """Return liveness page.""" + return engine.liveness() + + +@app.route("/engines/nmap/readiness") +def readiness(): + """Return readiness page.""" + return engine.readiness() + + +@app.route("/engines/nmap/info") +def info(): + """Get info on running engine.""" + return engine.info() + + +@app.route("/engines/nmap/clean") +def clean(): + """Clean all scans.""" + reloadconfig() + return engine.clean() + + +@app.route("/engines/nmap/clean/") +def clean_scan(scan_id): + """Clean scan identified by id.""" + return engine.clean_scan(scan_id) + + +def _engine_is_busy(): + """Returns if engine is busy scanning.""" + return engine._engine_is_busy() + + +@app.route("/engines/nmap/status") +def status(): + """Get status on engine and all scans.""" + return engine.get_status() + + +@app.route("/engines/nuclei/getreport/") +def getreport(scan_id): + """Get report on finished scans.""" + return engine.getreport(scan_id) def loadconfig(): """Load configuration from local file.""" + res = {"page": "loadconfig"} conf_file = f"{BASE_DIR}/nmap.json" if os.path.exists(conf_file): json_data = open(conf_file) - this.scanner = json.load(json_data) - this.scanner["status"] = "READY" + engine.scanner = json.load(json_data) else: - this.scanner["status"] = "ERROR" + engine.scanner["status"] = "ERROR" return {"status": "ERROR", "reason": "config file not found."} - if not os.path.isfile(this.scanner["path"]): - this.scanner["status"] = "ERROR" + + if not os.path.isfile(engine.scanner["path"]): + engine.scanner["status"] = "ERROR" return {"status": "ERROR", "reason": "path to nmap binary not found."} version_filename = f"{BASE_DIR}/VERSION" if os.path.exists(version_filename): version_file = open(version_filename, "r") - this.scanner["version"] = version_file.read().rstrip("\n") + engine.scanner["version"] = version_file.read().rstrip("\n") version_file.close() + engine.scanner["status"] = "READY" + res.update( + { + "status": "success", + "message": "Config file loaded.", + "config": engine.scanner, + } + ) + return res + @app.route("/engines/nmap/reloadconfig") def reloadconfig(): """Reload configuration route.""" res = {"page": "reloadconfig"} loadconfig() - res.update({"config": this.scanner}) + res.update({"config": engine.scanner}) return jsonify(res) +@app.errorhandler(404) +def page_not_found(e): + """Page not found.""" + return engine.page_not_found() + + +@app.route("/engines/nmap/test") +def test(): + """Return test page.""" + return engine.test() + + +@app.route("/engines/nmap/status/") +def status_scan(scan_id): + """Get status on scan identified by id.""" + return engine.status_scan(scan_id) + + +# Stop all scans +@app.route("/engines/nmap/stopscans") +def stop(): + """Stop all scans.""" + return engine.stop_scan() + + +@app.route("/engines/nmap/stop/") +def stop_scan(scan_id): + """Stop scan identified by id.""" + return engine.stop_scan(scan_id) + + +########################## @app.route("/engines/nmap/startscan", methods=["POST"]) def start(): res = {"page": "startscan"} @@ -102,13 +196,13 @@ def start(): # update scanner status status() - if this.scanner["status"] != "READY": + if engine.scanner["status"] != "READY": res.update( { "status": "refused", "details": { "reason": "scanner not ready", - "status": this.scanner["status"], + "status": engine.scanner["status"], }, } ) @@ -126,7 +220,7 @@ def start(): return jsonify(res), 500 scan_id = str(data["scan_id"]) - if data["scan_id"] in this.scans.keys(): + if data["scan_id"] in engine.scans.keys(): res.update( { "status": "refused", @@ -137,40 +231,42 @@ def start(): ) return jsonify(res), 503 - if type(data["options"]) is str: - data["options"] = json.loads(data["options"]) + options = {} + if isinstance(data["options"], str): + options = json.loads(data["options"]) scan = { "assets": data["assets"], - "futures": [], - "threads": [], + "threads": {}, "proc": None, - "options": data["options"], + "position": data.get("position", 0), + "root_scan_id": data.get("root_scan_id", 0), + "options": options, "scan_id": scan_id, "status": "STARTED", "issues_available": False, "started_at": int(time.time() * 1000), "nb_findings": 0, } + engine.scans.update({scan_id: scan}) - this.scans.update({scan_id: scan}) - th = threading.Thread(target=_scan_thread, args=(scan_id,)) + app.logger.debug("Launching thread for asset list") + th = threading.Thread( + target=_scan_thread, + kwargs={"scan_id": scan_id, "thread_id": 0}, + ) th.start() - this.scans[scan_id]["threads"].append(th) - - # th = this.pool.submit(_scan_thread, args=(scan_id,)) - # this.scans[scan_id]['futures'].append(th) + # engine.scans[scan_id]["threads"].append(th) res.update({"status": "accepted", "details": {"scan_id": scan["scan_id"]}}) - return jsonify(res) -def _scan_thread(scan_id): +def _scan_thread(scan_id, thread_id): hosts = [] - for asset in this.scans[scan_id]["assets"]: - if asset["datatype"] not in this.scanner["allowed_asset_types"]: + for asset in engine.scans[scan_id]["assets"]: + if asset["datatype"] not in engine.scanner["allowed_asset_types"]: return jsonify( { "status": "refused", @@ -199,23 +295,23 @@ def _scan_thread(scan_id): app.logger.debug("asset: %s", item) # Sanitize args : - options = this.scans[scan_id]["options"] + options = engine.scans[scan_id]["options"] ports = None if "ports" in options: ports = ",".join(options["ports"]) - # del this.scans[scan_id]['options']['ports'] + # del engine.scans[scan_id]['options']['ports'] app.logger.debug("options: %s", options) log_path = f"{BASE_DIR}/logs/{scan_id}.error" - cmd = f"{this.scanner['path']} -vvv -oX {BASE_DIR}/results/nmap_{scan_id}.xml" + cmd = f"{engine.scanner['path']} -vvv -oX {BASE_DIR}/results/nmap_{scan_id}.xml" # Check options for opt_key in options.keys(): if ( - opt_key in this.scanner["options"] + opt_key in engine.scanner["options"] and options.get(opt_key) and opt_key not in [ @@ -227,7 +323,7 @@ def _scan_thread(scan_id): "host_file_path", ] ): - cmd += " {}".format(this.scanner["options"][opt_key]["value"]) + cmd += " {}".format(engine.scanner["options"][opt_key]["value"]) if ( opt_key == "ports" and ports is not None ): # /!\ @todo / Security issue: Sanitize parameters here @@ -279,308 +375,91 @@ def _scan_thread(scan_id): cmd_sec = split(cmd) - this.scans[scan_id]["proc_cmd"] = "not set!!" + engine.scans[scan_id]["proc_cmd"] = "not set!!" with open(log_path, "w"): - this.scans[scan_id]["proc"] = subprocess.Popen( + proc = subprocess.Popen( cmd_sec, shell=False, # stdout=open("/dev/null", "w"), stderr=stderr stdout=open("/dev/null", "w"), stderr=open("/dev/null", "w"), ) - this.scans[scan_id]["proc_cmd"] = cmd - - proc = this.scans[scan_id]["proc"] + engine.scans[scan_id]["proc"] = proc + thread_info = { + "thread_id": thread_id, + "proc": proc, + "cmd": cmd, + "thread": threading.current_thread(), + "status": "RUNNING", + "asset": engine.scans[scan_id]["assets"], + } + engine.scans[scan_id]["threads"].update({thread_id: thread_info}) + engine.scans[scan_id]["status"] = "SCANNING" + engine.scans[scan_id]["proc_cmd"] = cmd - # Define max timeout - max_timeout = APP_SCAN_TIMEOUT_DEFAULT - timeout = time.time() + max_timeout + app.logger.debug( + f"##### RUNNING 1 scan on thread {thread_id}, for scan {scan_id}, scans length is {len(engine.scans)} #####" + ) + print( + f"##### RUNNING 1 scan on thread {thread_id}, for scan {scan_id}, scans length is {len(engine.scans)} #####" + ) + # # Define max timeout + # max_timeout = APP_SCAN_TIMEOUT_DEFAULT + # timeout = time.time() + max_timeout # while time.time() < timeout: - # if hasattr(proc, 'pid') and psutil.pid_exists(proc.pid) and psutil.Process(proc.pid).status() in ["sleeping", "running"]: + # if ( + # hasattr(proc, "pid") + # and psutil.pid_exists(proc.pid) + # and psutil.Process(proc.pid).status() in ["sleeping", "running"] + # ): # # Scan is still in progress # time.sleep(3) # # print(f'scan {scan_id} still running...') # else: # # Scan is finished # # print(f'scan {scan_id} is finished !') - - # # Check if the report is available (exists && scan finished) - # report_filename = f"{BASE_DIR}/results/nmap_{scan_id}.xml" - # if not os.path.exists(report_filename): - # return False - - # issues, raw_hosts = _parse_report(report_filename, scan_id) - - # # Check if banner grabbing is requested - # if "banner" in options.keys() and options["banner"] in [True, 1, "true", "1", "y", "yes", "on"]: - # extra_issues = get_service_banner(scan_id, raw_hosts) - # issues.extend(extra_issues) - - # this.scans[scan_id]["issues"] = deepcopy(issues) - # this.scans[scan_id]["issues_available"] = True - # this.scans[scan_id]["status"] = "FINISHED" # break - # return True - while time.time() < timeout: - if ( - hasattr(proc, "pid") - and psutil.pid_exists(proc.pid) - and psutil.Process(proc.pid).status() in ["sleeping", "running"] - ): - # Scan is still in progress - time.sleep(3) - # print(f'scan {scan_id} still running...') - else: - # Scan is finished - # print(f'scan {scan_id} is finished !') - break - - time.sleep(1) # wait for creating report file (could be long) - - # Check if the report is available (exists && scan finished) - report_filename = f"{BASE_DIR}/results/nmap_{scan_id}.xml" - if not os.path.exists(report_filename): - # this.scans[scan_id]["status"] = "FINISHED" # ERROR ? - # this.scans[scan_id]["issues_available"] = True - this.scans[scan_id]["status"] = "ERROR" - this.scans[scan_id]["issues_available"] = False - return False - - try: - issues, raw_hosts = _parse_report(report_filename, scan_id) - - # Check if banner grabbing is requested - if "banner" in options.keys() and options["banner"] in [ - True, - 1, - "true", - "1", - "y", - "yes", - "on", - ]: - extra_issues = get_service_banner(scan_id, raw_hosts) - issues.extend(extra_issues) - - this.scans[scan_id]["issues"] = deepcopy(issues) - except Exception as e: - app.logger.info(e) - # traceback.print_exception(*sys.exc_info()) - this.scans[scan_id]["status"] = "ERROR" - this.scans[scan_id]["issues_available"] = False - this.scans[scan_id]["issues_available"] = True - this.scans[scan_id]["status"] = "FINISHED" + # time.sleep(1) # wait for creating report file (could be long) + + # # Check if the report is available (exists && scan finished) + # report_filename = f"{BASE_DIR}/results/nmap_{scan_id}.xml" + # if not os.path.exists(report_filename): + # # engine.scans[scan_id]["status"] = "FINISHED" # ERROR ? + # # engine.scans[scan_id]["issues_available"] = True + # engine.scans[scan_id]["status"] = "ERROR" + # engine.scans[scan_id]["issues_available"] = False + # return False + + # try: + # issues, summary, raw_hosts = _parse_report(report_filename, scan_id) + + # # Check if banner grabbing is requested + # if "banner" in options.keys() and options["banner"] in [ + # True, + # 1, + # "true", + # "1", + # "y", + # "yes", + # "on", + # ]: + # extra_issues = get_service_banner(scan_id, raw_hosts) + # issues.extend(extra_issues) + + # engine.scans[scan_id]["issues"] = deepcopy(issues) + # except Exception as e: + # app.logger.info(e) + # # traceback.print_exception(*sys.exc_info()) + # engine.scans[scan_id]["status"] = "ERROR" + # engine.scans[scan_id]["issues_available"] = False + # engine.scans[scan_id]["issues_available"] = True + # engine.scans[scan_id]["status"] = "FINISHED" return True -@app.route("/engines/nmap/clean") -def clean(): - res = {"page": "clean"} - - stop() - this.scans.clear() - loadconfig() - res.update({"status": "SUCCESS"}) - return jsonify(res) - - -@app.route("/engines/nmap/clean/") -def clean_scan(scan_id): - res = {"page": "clean_scan"} - res.update({"scan_id": scan_id}) - - if scan_id not in this.scans.keys(): - res.update({"status": "error", "reason": f"scan_id '{scan_id}' not found"}) - return jsonify(res) - - stop_scan(scan_id) - this.scans.pop(scan_id) - res.update({"status": "removed"}) - return jsonify(res) - - -# Stop all scans -@app.route("/engines/nmap/stopscans") -def stop(): - res = {"page": "stopscans"} - - for scan_id in this.scans.keys(): - stop_scan(scan_id) - - res.update({"status": "SUCCESS"}) - - return jsonify(res) - - -@app.route("/engines/nmap/stop/") -def stop_scan(scan_id): - res = {"page": "stopscan"} - - if scan_id not in this.scans.keys(): - res.update({"status": "error", "reason": f"scan_id '{scan_id}' not found"}) - return jsonify(res) - - # Stop the nmap cmd - proc = this.scans[scan_id]["proc"] - if hasattr(proc, "pid"): - if psutil.pid_exists(proc.pid): - psutil.Process(proc.pid).terminate() - res.update( - { - "status": "TERMINATED", - "details": { - "pid": proc.pid, - "cmd": this.scans[scan_id]["proc_cmd"], - "scan_id": scan_id, - }, - } - ) - - # Stop the thread '_scan_thread' - for th in this.scans[scan_id]["threads"]: - th.join() - - this.scans[scan_id]["status"] = "STOPPED" - # this.scans[scan_id]["finished_at"] = int(time.time() * 1000) - return jsonify(res) - - -@app.route("/engines/nmap/status/") -def scan_status(scan_id): - res = {"page": "status", "status": "SCANNING"} - if scan_id not in this.scans.keys(): - res.update({"status": "error", "reason": f"scan_id '{scan_id}' not found"}) - return jsonify(res), 404 - - if this.scans[scan_id]["status"] == "ERROR": - res.update({"status": "error", "reason": "todo"}) - return jsonify(res), 503 - - # Fix when a scan is started but the thread has not been created yet - if this.scans[scan_id]["status"] == "STARTED": - res.update({"status": "SCANNING"}) - - proc = this.scans[scan_id]["proc"] - if not hasattr(proc, "pid"): - res.update({"status": "ERROR", "reason": "No PID found"}) - return jsonify(res), 503 - - # if not psutil.pid_exists(proc.pid): - if ( - not psutil.pid_exists(proc.pid) - and this.scans[scan_id]["issues_available"] is True - ): - res.update({"status": "FINISHED"}) - this.scans[scan_id]["status"] = "FINISHED" - # print(f"scan_status/scan '{scan_id}' is finished") - - elif ( - not psutil.pid_exists(proc.pid) - and this.scans[scan_id]["issues_available"] is False - and this.scans[scan_id]["status"] == "ERROR" - ): - res.update({"status": "ERROR"}) - # print(f"scan_status/scan '{scan_id}' is finished") - - elif psutil.pid_exists(proc.pid) and psutil.Process(proc.pid).status() in [ - "sleeping", - "running", - ]: - res.update( - { - "status": "SCANNING", - "info": {"pid": proc.pid, "cmd": this.scans[scan_id]["proc_cmd"]}, - } - ) - # print(f"scan_status/scan '{scan_id}' is still SCANNING") - elif ( - psutil.pid_exists(proc.pid) - and psutil.Process(proc.pid).status() == "zombie" - and this.scans[scan_id]["issues_available"] is True - ): - res.update({"status": "FINISHED"}) - this.scans[scan_id]["status"] = "FINISHED" - psutil.Process(proc.pid).terminate() - - # print(scan_id, res['status'], psutil.pid_exists(proc.pid), hasattr(proc, "pid"), this.scans[scan_id]["issues_available"], psutil.Process(proc.pid).status()) - return jsonify(res) - - -def _engine_is_busy(): - """Returns if engine is busy scanning.""" - scans_count = 0 - # for scan_id, scan_infos in this.scans: - for scan_id in this.scans.keys(): - # do not use scan_status as it updates all scans - # TODO rewrite function later - if this.scans[scan_id]["status"] in ["SCANNING", "STARTED"]: - scans_count += 1 - if scans_count >= APP_MAXSCANS: - return True - return False - - -@app.route("/engines/nmap/status") -def status(): - res = {"page": "status"} - this.scanner["status"] = "READY" - - # display info on the scanner - res.update({"scanner": this.scanner}) - - # display the status of scans performed - scans = {} - for scan in this.scans.keys(): - scan_status(scan) - scans.update( - { - scan: { - "status": this.scans[scan]["status"], - "options": this.scans[scan]["options"], - "nb_findings": this.scans[scan]["nb_findings"], - } - } - ) - res.update({"scans": scans}) - - if _engine_is_busy() is True: - this.scanner["status"] = "BUSY" - - if not os.path.exists(f"{BASE_DIR}/nmap.json"): - app.logger.error("nmap.json config file not found") - this.scanner["status"] = "ERROR" - - if "path" in this.scanner: - if not os.path.isfile(this.scanner["path"]): - app.logger.error("NMAP engine not found (%s)", this.scanner["path"]) - this.scanner["status"] = "ERROR" - - res.update({"status": this.scanner["status"]}) - return jsonify(res) - - -@app.route("/engines/nmap/info") -def info(): - scans = {} - for scan in this.scans.keys(): - scan_status(scan) - scans.update( - { - scan: { - "status": this.scans[scan]["status"], - "options": this.scans[scan]["options"], - "nb_findings": this.scans[scan]["nb_findings"], - } - } - ) - - res = {"page": "info", "engine_config": this.scanner, "scans": scans} - return jsonify(res) - - def get_service_banner(scan_id, raw_hosts): ts = int(time.time() * 1000) res = [] @@ -631,9 +510,9 @@ def _add_issue( risk={}, raw=[], ): - this.scans[scan_id]["nb_findings"] = this.scans[scan_id]["nb_findings"] + 1 + engine.scans[scan_id]["nb_findings"] = engine.scans[scan_id]["nb_findings"] + 1 issue = { - "issue_id": this.scans[scan_id]["nb_findings"], + "issue_id": engine.scans[scan_id]["nb_findings"], "severity": severity, "confidence": confidence, "target": target, @@ -656,14 +535,16 @@ def _add_issue( def _parse_report(filename, scan_id): """Parse the nmap report.""" - res = [] + issues = [] target = {} raw_hosts = {} + nb_vulns = {"info": 0, "low": 0, "medium": 0, "high": 0, "critical": 0} + try: tree = ET.parse(filename) except Exception: # No Element found in XML file - return res, raw_hosts + return issues, raw_hosts if tree.find("taskbegin") is not None: ts = tree.find("taskbegin").get("time") @@ -671,11 +552,11 @@ def _parse_report(filename, scan_id): ts = tree.getroot().get("start") unresolved_domains = set() - for a in this.scans[scan_id]["assets"]: + for a in engine.scans[scan_id]["assets"]: if a["datatype"] == "domain": unresolved_domains.add(a["value"]) down_ips = set() - for a in this.scans[scan_id]["assets"]: + for a in engine.scans[scan_id]["assets"]: if a["datatype"] == "ip": down_ips.add(a["value"]) @@ -698,7 +579,7 @@ def _parse_report(filename, scan_id): addr_list.append(addr) # Check if it was extracted from URLs. If yes: add them - for a in this.scans[scan_id]["assets"]: + for a in engine.scans[scan_id]["assets"]: if a["datatype"] == "url" and urlparse(a["value"]).netloc in addr_list: addr_list.append(a["value"]) @@ -712,7 +593,7 @@ def _parse_report(filename, scan_id): for hostnames in host.findall("hostnames"): for hostname in list(hostnames): ip_address = str(host.find("address").get("addr")) - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -756,7 +637,7 @@ def _parse_report(filename, scan_id): os_cpe = osclass.find("cpe") if os_cpe is not None: os_data["cpe"].append(os_cpe.text) - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -861,11 +742,11 @@ def _parse_report(filename, scan_id): script_output = "" - #Get Result from Port Script. + # Get Result from Port Script. for port_script in port.findall("script"): - script_output += port_script.get("output")+"\n" - port_data.update({"script_output":script_output}) - res.append( + script_output += port_script.get("output") + "\n" + port_data.update({"script_output": script_output}) + issues.append( deepcopy( _add_issue( scan_id, @@ -887,7 +768,7 @@ def _parse_report(filename, scan_id): if port_state not in ["filtered", "closed"]: openports = True - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -905,10 +786,10 @@ def _parse_report(filename, scan_id): if ( not openports - and "ports" in this.scans[scan_id]["options"].keys() - and this.scans[scan_id]["options"]["ports"][0] in ["-", "1-65535"] + and "ports" in engine.scans[scan_id]["options"].keys() + and engine.scans[scan_id]["options"]["ports"][0] in ["-", "1-65535"] ): # only if all ports were scanned you can add the finding - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -924,7 +805,7 @@ def _parse_report(filename, scan_id): # get host status status = host.find("status").get("state") if openports: # There are open ports so it must be up - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -936,12 +817,12 @@ def _parse_report(filename, scan_id): ) ) ) - # elif status and status == "up" and "no_ping" in this.scans[scan_id]["options"].keys() and this.scans[scan_id]["options"]["no_ping"] == '0': #if no_ping (-Pn) is used all hosts are always up even if they are not + # elif status and status == "up" and "no_ping" in engine.scans[scan_id]["options"].keys() and engine.scans[scan_id]["options"]["no_ping"] == '0': #if no_ping (-Pn) is used all hosts are always up even if they are not elif ( status and status == "up" ): # if no_ping (-Pn) is used all hosts are always up even if they are not - # if "no_ping" in this.scans[scan_id]["options"].keys() and this.scans[scan_id]["options"]["no_ping"] == '0': - res.append( + # if "no_ping" in engine.scans[scan_id]["options"].keys() and engine.scans[scan_id]["options"]["no_ping"] == '0': + issues.append( deepcopy( _add_issue( scan_id, @@ -954,7 +835,7 @@ def _parse_report(filename, scan_id): ) ) if status and status == "down": - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -976,7 +857,7 @@ def _parse_report(filename, scan_id): if host.find("hostscript") is not None: for script in host.find("hostscript"): script_output = script.get("output") - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -991,13 +872,13 @@ def _parse_report(filename, scan_id): ) ) - if "script_output_fields" in this.scans[scan_id]["options"].keys(): + if "script_output_fields" in engine.scans[scan_id]["options"].keys(): for elem in script.findall("elem"): if ( elem.get("key") - in this.scans[scan_id]["options"]["script_output_fields"] + in engine.scans[scan_id]["options"]["script_output_fields"] ): - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -1019,7 +900,7 @@ def _parse_report(filename, scan_id): "addr": [unresolved_domain], "addr_type": "tcp", } - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -1035,18 +916,18 @@ def _parse_report(filename, scan_id): ) ) if ( - "ports" in this.scans[scan_id]["options"].keys() - and this.scans[scan_id]["options"]["ports"][0] in ["-", "1-65535"] + "ports" in engine.scans[scan_id]["options"].keys() + and engine.scans[scan_id]["options"]["ports"][0] in ["-", "1-65535"] ) or ( - "fast_scan" in this.scans[scan_id]["options"].keys() - and this.scans[scan_id]["options"]["fast_scan"] + "fast_scan" in engine.scans[scan_id]["options"].keys() + and engine.scans[scan_id]["options"]["fast_scan"] ): for down_ip in down_ips: target = { "addr": [down_ip], "addr_type": "tcp", } - res.append( + issues.append( deepcopy( _add_issue( scan_id, @@ -1060,7 +941,16 @@ def _parse_report(filename, scan_id): ) ) - return res, raw_hosts + summary = { + "nb_issues": len(issues), + "nb_info": 0, + "nb_low": 0, + "nb_medium": 0, + "nb_high": 0, + "nb_critical": 0, + "engine_name": "nmap", + } + return issues, summary, raw_hosts def _get_cpe_link(cpe): @@ -1091,24 +981,22 @@ def _get_vulners_findings(findings): def getfindings(scan_id): """Get findings from engine.""" res = {"page": "getfindings", "scan_id": scan_id} - if not scan_id.isdecimal(): - res.update({"status": "error", "reason": "scan_id must be numeric digits only"}) - return jsonify(res) - if scan_id not in this.scans.keys(): - res.update({"status": "error", "reason": f"scan_id '{scan_id}' not found"}) - return jsonify(res) - - proc = this.scans[scan_id]["proc"] + if scan_id not in engine.scans.keys(): + raise PatrowlEngineExceptions(1002, "scan_id '{}' not found".format(scan_id)) + + # check if the scan is finished (thread as well) + status_res = engine.status_scan(scan_id) + if engine.scans[scan_id]["status"] != "FINISHED": + raise PatrowlEngineExceptions( + 1003, + "scan_id '{}' not finished (status={})".format( + scan_id, status_res["status"] + ), + ) - # check if the scan is finished - status() - if ( - hasattr(proc, "pid") - and psutil.pid_exists(proc.pid) - and psutil.Process(proc.pid).status() in ["sleeping", "running"] - ): - res.update({"status": "error", "reason": "Scan in progress"}) - return jsonify(res) + issues = [] + summary = {} + scan = {"scan_id": scan_id} # check if the report is available (exists && scan finished) report_filename = f"{BASE_DIR}/results/nmap_{scan_id}.xml" @@ -1116,20 +1004,35 @@ def getfindings(scan_id): res.update({"status": "error", "reason": "Report file not available"}) return jsonify(res) - if "issues" not in this.scans[scan_id].keys(): - res.update({"status": "error", "reason": "Issues not available yet"}) - return jsonify(res) + issues, _, raw_hosts = _parse_report(report_filename, scan_id) + + # Check if banner grabbing is requested + options = engine.scans[scan_id]["options"] + if "banner" in options and options["banner"] in [ + True, + 1, + "true", + "1", + "y", + "yes", + "on", + ]: + extra_issues = get_service_banner(scan_id, raw_hosts) + issues.extend(extra_issues) + + nb_vulns = {"info": 0, "low": 0, "medium": 0, "high": 0, "critical": 0} + for issue in issues: + nb_vulns[issue["severity"]] += 1 - issues = this.scans[scan_id]["issues"] - scan = {"scan_id": scan_id} summary = { "nb_issues": len(issues), - "nb_info": len(issues), - "nb_low": 0, - "nb_medium": 0, - "nb_high": 0, + "nb_info": nb_vulns["info"], + "nb_low": nb_vulns["low"], + "nb_medium": nb_vulns["medium"], + "nb_high": nb_vulns["high"], + "nb_critical": nb_vulns["critical"], "engine_name": "nmap", - "engine_version": this.scanner["version"], + "engine_version": engine.scanner["version"], } # Store the findings in a file @@ -1145,64 +1048,15 @@ def getfindings(scan_id): if os.path.exists(hosts_filename): os.remove(hosts_filename) - res.update( - {"scan": scan, "summary": summary, "issues": issues, "status": "success"} - ) - return jsonify(res) - - -@app.route("/engines/nmap/getreport/") -def getreport(scan_id): - if scan_id not in this.scans.keys(): - return jsonify({"status": "ERROR", "reason": f"scan_id '{scan_id}' not found"}) - # remove the scan from the active scan list - clean_scan(scan_id) + engine.clean_scan(scan_id) - filepath = f"{BASE_DIR}/results/nmap_{scan_id}.json" - if not os.path.exists(filepath): - return jsonify( - { - "status": "ERROR", - "reason": f"report file for scan_id '{scan_id}' not found", - } - ) - - return send_from_directory( - f"{BASE_DIR}/results", - f"nmap_{scan_id}.json", - mimetype="application/json", - download_name=f"nmap_{scan_id}.json", - as_attachment=True, - ) - - -@app.route("/engines/nmap/test") -def test(): - res = "

Test Page (DEBUG):

" - for rule in app.url_map.iter_rules(): - options = {} - for arg in rule.arguments: - options[arg] = "[{0}]".format(arg) - - methods = ",".join(rule.methods) - url = url_for(rule.endpoint, **options) - res += urllib.request.url2pathname( - "{0:50s} {1:20s} {2}
".format( - rule.endpoint, methods, url - ) - ) - - return res - - -@app.errorhandler(404) -def page_not_found(e): - return jsonify({"page": "not found"}) + res.update({"summary": summary, "issues": issues, "status": "success"}) + return jsonify(res) -@app.before_first_request -def main(): +with app.app_context(): + """First function called.""" # if os.getuid() != 0: #run with root because of docker env vars scope # app.logger.error("Start the NMAP engine using root privileges !") # sys.exit(-1) diff --git a/engines/nmap/nmap.json.sample b/engines/nmap/nmap.json.sample index 0bea308f..690f8edd 100644 --- a/engines/nmap/nmap.json.sample +++ b/engines/nmap/nmap.json.sample @@ -1,6 +1,6 @@ { "name": "Nmap", - "version": "1.4.49", + "version": "1.5.0", "description": "Network Scanner", "path": "/usr/bin/nmap", "allowed_asset_types": ["ip", "domain", "fqdn", "url", "ip-range", "ip-subnet"], diff --git a/engines/owl_dns/Dockerfile b/engines/owl_dns/Dockerfile index eb05516e..675a859d 100644 --- a/engines/owl_dns/Dockerfile +++ b/engines/owl_dns/Dockerfile @@ -1,10 +1,11 @@ FROM alpine:3.16.3 -LABEL Name="Patrowl\ DNS\ \(Patrowl engine\)" Version="1.5.8" +LABEL Name="Patrowl\ DNS\ \(Patrowl engine\)" Version="1.5.9" # Install dependencies RUN apk add --update --no-cache \ python3 python3-dev py3-pip \ git \ + build-base linux-headers \ && rm -rf /var/cache/apk/* # Create the target repo @@ -12,19 +13,6 @@ RUN mkdir -p /opt/patrowl-engines/owl_dns RUN mkdir -p /opt/patrowl-engines/owl_dns/results RUN mkdir -p /opt/patrowl-engines/owl_dns/external-libs -# Set the working directory to /opt/ -WORKDIR /opt/patrowl-engines/owl_dns - -# Copy the current directory contents into the container at / -COPY __init__.py . -COPY engine-owl_dns.py . -COPY owl_dns.json.sample owl_dns.json -COPY requirements.txt . -COPY README.md . -COPY VERSION . -COPY etc/ etc/ -COPY modules/ modules/ - WORKDIR /opt/patrowl-engines/owl_dns/external-libs RUN git clone https://github.com/Patrowl/Sublist3r WORKDIR /opt/patrowl-engines/owl_dns/external-libs/Sublist3r @@ -32,13 +20,25 @@ RUN pip3 install --trusted-host pypi.python.org -r requirements.txt WORKDIR /opt/patrowl-engines/owl_dns/external-libs RUN git clone https://github.com/elceef/dnstwist +# Set the working directory to /opt/ +WORKDIR /opt/patrowl-engines/owl_dns + # Install python modules for engine -WORKDIR /opt/patrowl-engines/owl_dns/ +COPY requirements.txt . RUN pip3 install --upgrade pip RUN pip3 install --trusted-host pypi.python.org -r requirements.txt +# Copy the current directory contents into the container at / +COPY __init__.py . +COPY engine_owl_dns.py . +COPY owl_dns.json.sample owl_dns.json +COPY README.md . +COPY VERSION . +COPY etc/ etc/ +COPY modules/ modules/ + # TCP port exposed by the container (NAT) EXPOSE 5006 # Run app.py when the container launches -CMD ["gunicorn", "engine-owl_dns:app", "-b", "0.0.0.0:5006", "--access-logfile", "-", "--threads", "10"] +CMD ["gunicorn", "engine_owl_dns:app", "-b", "0.0.0.0:5006", "--access-logfile", "-", "--threads", "10"] diff --git a/engines/owl_dns/Dockerfile.ubuntu b/engines/owl_dns/Dockerfile.ubuntu index dcfe0e29..a5c0b592 100644 --- a/engines/owl_dns/Dockerfile.ubuntu +++ b/engines/owl_dns/Dockerfile.ubuntu @@ -19,7 +19,7 @@ WORKDIR /opt/patrowl-engines/owl_dns/ # Copy the current directory contents into the container at / COPY __init__.py . -COPY engine-owl_dns.py . +COPY engine_owl_dns.py . COPY owl_dns.json.sample owl_dns.json COPY requirements.txt . COPY README.md . @@ -48,4 +48,4 @@ RUN pip3 install --trusted-host pypi.python.org -r requirements.txt EXPOSE 5006 # Run app.py when the container launches -CMD ["gunicorn", "engine-owl_dns:app", "-b", "0.0.0.0:5006", "--access-logfile", "-", "--threads", "10"] +CMD ["gunicorn", "engine_owl_dns:app", "-b", "0.0.0.0:5006", "--access-logfile", "-", "--threads", "10"] diff --git a/engines/owl_dns/VERSION b/engines/owl_dns/VERSION index 1cc9c180..2b26b8d2 100644 --- a/engines/owl_dns/VERSION +++ b/engines/owl_dns/VERSION @@ -1 +1 @@ -1.5.8 +1.5.9 diff --git a/engines/owl_dns/__init__.py b/engines/owl_dns/__init__.py index 8b177675..762a2e71 100644 --- a/engines/owl_dns/__init__.py +++ b/engines/owl_dns/__init__.py @@ -1,8 +1,12 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +import os +import sys + +sys.path.append(os.path.dirname(os.path.realpath(__file__))) __title__ = "patrowl_engine_owl_dns" -__version__ = "1.5.8" +__version__ = "1.5.9" __author__ = "Nicolas MATTIOCCO" __license__ = "AGPLv3" __copyright__ = "Copyright (C) 2018-2024 Nicolas Mattiocco - @MaKyOtOx" diff --git a/engines/owl_dns/engine-owl_dns.py b/engines/owl_dns/engine_owl_dns.py similarity index 68% rename from engines/owl_dns/engine-owl_dns.py rename to engines/owl_dns/engine_owl_dns.py index 63d805b5..eae62f95 100644 --- a/engines/owl_dns/engine-owl_dns.py +++ b/engines/owl_dns/engine_owl_dns.py @@ -1,42 +1,71 @@ #!/usr/bin/python3 # -*- coding: utf-8 -*- -import os, sys, json, time, urllib, hashlib, threading -import datetime, copy, dns.resolver, socket, optparse, random, string -from flask import Flask, request, jsonify, redirect, url_for, send_from_directory +import os +import sys +import json +import time +import hashlib +import threading +import datetime +import copy +import dns.resolver +import socket +import optparse +import random +import string import validators import requests import whois from ipwhois import IPWhois -from modules.dnstwist import dnstwist +from modules.dnstwist import DnsTwist from modules.dkimsignatures import dkimlist +import re +import urllib from concurrent.futures import ThreadPoolExecutor + +from flask import Flask, jsonify, redirect, request, send_from_directory, url_for from netaddr import IPAddress, IPNetwork from netaddr.core import AddrFormatError -import re + +from etc.issues import spf_issues + +# Own library imports +# sys.path.append("./PatrowlEnginesUtils/") +from PatrowlEnginesUtils.PatrowlEngine import _json_serial +from PatrowlEnginesUtils.PatrowlEngine import PatrowlEngine app = Flask(__name__) APP_DEBUG = os.environ.get("DEBUG", "").lower() in ["true", "1", "yes", "y", "on"] +APP_MAXSCANS = int(os.environ.get("APP_MAXSCANS", 10)) + APP_HOST = "0.0.0.0" APP_PORT = 5006 -APP_MAXSCANS = int(os.environ.get("APP_MAXSCANS", 5)) +APP_ENGINE_NAME = "owl_dns" + APP_TIMEOUT = int(os.environ.get("APP_TIMEOUT", 3600)) APP_WF_MAX_PAGE = int(os.environ.get("APP_WF_MAX_PAGE", 10)) BASE_DIR = os.path.dirname(os.path.realpath(__file__)) this = sys.modules[__name__] -this.scanner = {} -this.scans = {} -this.scan_lock = threading.RLock() -this.wf_apitokens = [] -this.resolver = dns.resolver.Resolver() -this.resolver.lifetime = this.resolver.timeout = 5.0 +ENGINE_TIME_OUT = 5.0 + +engine = PatrowlEngine( + app=app, base_dir=BASE_DIR, name=APP_ENGINE_NAME, max_scans=APP_MAXSCANS +) +this.engine = engine -list_nameservers = os.environ.get("NAMESERVER", "8.8.8.8,8.8.4.4").split(",") -this.resolver.nameservers = list_nameservers -this.pool = ThreadPoolExecutor(5) +engine.metadata["scan_lock"] = threading.RLock() +engine.metadata["wf_apitokens"] = [] +engine.metadata["resolver"] = dns.resolver.Resolver() +engine.metadata["resolver"].timeout = ENGINE_TIME_OUT +engine.metadata["resolver"].lifetime = ENGINE_TIME_OUT +list_nameservers = os.environ.get("NAMESERVER", "8.8.8.8,8.8.4.4").split(",") +engine.metadata["resolver"].nameservers = list_nameservers +# +engine.metadata["pool"] = ThreadPoolExecutor(5) def get_random_string(n=32): @@ -45,38 +74,61 @@ def get_random_string(n=32): @app.route("/") def default(): - return redirect(url_for("index")) + return engine.default() @app.route("/engines/owl_dns/") def index(): - return jsonify({"page": "index"}) + return engine.index() + + +@app.route("/engines/owl_dns/liveness") +def liveness(): + """Return liveness page.""" + return engine.liveness() + + +@app.route("/engines/owl_dns/readiness") +def readiness(): + """Return readiness page.""" + return engine.readiness() + + +@app.route("/engines/owl_dns/info") +def info(): + """Get info on running engine.""" + return engine.info() + + +def _engine_is_busy(): + """Returns if engine is busy scanning.""" + return engine._engine_is_busy() def _loadconfig(): conf_file = f"{BASE_DIR}/owl_dns.json" if os.path.exists(conf_file): json_data = open(conf_file) - this.scanner = json.load(json_data) - this.scanner["status"] = "READY" - sys.path.append(this.scanner["sublist3r_bin_path"]) + engine.scanner = json.load(json_data) + engine.scanner["status"] = "READY" + sys.path.append(engine.scanner["sublist3r_bin_path"]) globals()["sublist3r"] = __import__("sublist3r") - dnstwist(this.scanner["dnstwist_bin_path"]) + DnsTwist(engine.scanner["dnstwist_bin_path"]) else: app.logger.error(f"Error: config file '{conf_file}' not found") return {"status": "error", "reason": "config file not found"} - if not os.path.isfile(this.scanner["seg_path"]): - this.scanner["status"] = "ERROR" + if not os.path.isfile(engine.scanner["seg_path"]): + engine.scanner["status"] = "ERROR" app.logger.error("Error: path to Secure Email Gateway providers not found") return { "status": "ERROR", "reason": "path to Secure Email Gateway providers not found.", } - if not os.path.isfile(this.scanner["external_ip_ranges_path"]): - this.scanner["status"] = "ERROR" + if not os.path.isfile(engine.scanner["external_ip_ranges_path"]): + engine.scanner["status"] = "ERROR" app.logger.error( "Error: path to external IP ranges (CDN, WAF, Cloud) not found" ) @@ -85,15 +137,15 @@ def _loadconfig(): "reason": "path to external IP ranges (CDN, WAF, Cloud) not found.", } - this.wf_apitokens = [] - for apikey in this.scanner["whoisfreak_api_tokens"]: - this.wf_apitokens.append(apikey) - del this.scanner["whoisfreak_api_tokens"] + engine.metadata["wf_apitokens"] = [] + for apikey in engine.scanner["whoisfreak_api_tokens"]: + engine.metadata["wf_apitokens"].append(apikey) + del engine.scanner["whoisfreak_api_tokens"] version_filename = f"{BASE_DIR}/VERSION" if os.path.exists(version_filename): version_file = open(version_filename, "r") - this.scanner["version"] = version_file.read().rstrip("\n") + engine.scanner["version"] = version_file.read().rstrip("\n") version_file.close() @@ -101,7 +153,7 @@ def _loadconfig(): def reloadconfig(): res = {"page": "reloadconfig"} _loadconfig() - res.update({"config": this.scanner}) + res.update({"config": engine.scanner}) return jsonify(res) @@ -111,7 +163,7 @@ def start_scan(): res = {"page": "startscan"} # check the scanner is ready to start a new scan - if len(this.scans) == APP_MAXSCANS * 2: + if len(engine.scans) == APP_MAXSCANS * 2: res.update( { "status": "error", @@ -134,7 +186,7 @@ def start_scan(): scan_id = str(data["scan_id"]) - this.scans.update( + engine.scans.update( { scan_id: { "status": "STARTED", @@ -145,24 +197,24 @@ def start_scan(): ) status() - if this.scanner["status"] != "READY": + if engine.scanner["status"] != "READY": res.update( { "status": "refused", "details": { "reason": "scanner not ready", - "status": this.scanner["status"], + "status": engine.scanner["status"], }, } ) - this.scans.update( + engine.scans.update( { scan_id: { "status": "ERROR", } } ) - this.scans.pop(scan_id, None) + engine.scans.pop(scan_id, None) return jsonify(res), 503 # Sanitize args : @@ -171,20 +223,22 @@ def start_scan(): "threads": [], "futures": [], "dnstwist": {}, - "options": data["options"], + "position": data.get("position", 0), + "root_scan_id": data.get("root_scan_id", 0), + "options": data.get("options", {}), "scan_id": scan_id, "status": "STARTED", "started_at": int(time.time() * 1000), "findings": {}, } - this.scans.update({scan_id: scan}) + engine.scans.update({scan_id: scan}) if "do_whois" in scan["options"].keys() and data["options"]["do_whois"]: for asset in data["assets"]: if asset["datatype"] in ["domain", "ip", "fqdn"]: - th = this.pool.submit(_get_whois, scan_id, asset["value"]) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit(_get_whois, scan_id, asset["value"]) + engine.scans[scan_id]["futures"].append(th) if ( "do_advanced_whois" in scan["options"].keys() @@ -192,8 +246,8 @@ def start_scan(): ): for asset in data["assets"]: if asset["datatype"] == "domain": - th = this.pool.submit(_get_whois, scan_id, asset["value"]) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit(_get_whois, scan_id, asset["value"]) + engine.scans[scan_id]["futures"].append(th) # subdomains enumeration using search engines, VT and public PassiveDNS API if ( @@ -202,8 +256,10 @@ def start_scan(): ): for asset in data["assets"]: if asset["datatype"] == "domain": - th = this.pool.submit(_subdomain_enum, scan_id, asset["value"]) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit( + _subdomain_enum, scan_id, asset["value"] + ) + engine.scans[scan_id]["futures"].append(th) if ( "do_subdomains_resolve" in scan["options"].keys() @@ -211,29 +267,35 @@ def start_scan(): ): for asset in data["assets"]: if asset["datatype"] == "domain": - th = this.pool.submit(_dns_resolve, scan_id, asset["value"], True) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit( + _dns_resolve, scan_id, asset["value"], True + ) + engine.scans[scan_id]["futures"].append(th) if "do_dns_resolve" in scan["options"].keys() and data["options"]["do_dns_resolve"]: for asset in data["assets"]: if asset["datatype"] == "domain": - th = this.pool.submit(_dns_resolve, scan_id, asset["value"], False) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit( + _dns_resolve, scan_id, asset["value"], False + ) + engine.scans[scan_id]["futures"].append(th) if "do_seg_check" in scan["options"].keys() and data["options"]["do_seg_check"]: for asset in data["assets"]: if asset["datatype"] in ["domain", "fqdn"]: - th = this.pool.submit(_do_seg_check, scan_id, asset["value"]) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit( + _do_seg_check, scan_id, asset["value"] + ) + engine.scans[scan_id]["futures"].append(th) if "do_spf_check" in scan["options"].keys() and data["options"]["do_spf_check"]: for asset in data["assets"]: if asset["datatype"] == "domain": th = threading.Thread( - target=_perform_spf_check, args=(scan_id, asset["value"]) + target=_do_spf_check, args=(scan_id, asset["value"]) ) th.start() - this.scans[scan_id]["threads"].append(th) + engine.scans[scan_id]["threads"].append(th) if "do_dkim_check" in scan["options"].keys() and data["options"]["do_dkim_check"]: for asset in data["assets"]: @@ -242,7 +304,7 @@ def start_scan(): target=_do_dkim_check, args=(scan_id, asset["value"]) ) th.start() - this.scans[scan_id]["threads"].append(th) + engine.scans[scan_id]["threads"].append(th) if "do_dmarc_check" in scan["options"].keys() and data["options"]["do_dmarc_check"]: for asset in data["assets"]: @@ -251,7 +313,7 @@ def start_scan(): target=_do_dmarc_check, args=(scan_id, asset["value"]) ) th.start() - this.scans[scan_id]["threads"].append(th) + engine.scans[scan_id]["threads"].append(th) if ( "do_subdomain_bruteforce" in scan["options"].keys() @@ -259,76 +321,66 @@ def start_scan(): ): for asset in data["assets"]: if asset["datatype"] == "domain": - th = this.pool.submit(_subdomain_bruteforce, scan_id, asset["value"]) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit( + _subdomain_bruteforce, scan_id, asset["value"] + ) + engine.scans[scan_id]["futures"].append(th) if "do_reverse_dns" in scan["options"].keys() and data["options"]["do_reverse_dns"]: for asset in data["assets"]: if asset["datatype"] == "ip": - th = this.pool.submit(_reverse_dns, scan_id, asset["value"]) - this.scans[scan_id]["futures"].append(th) + th = engine.metadata["pool"].submit( + _reverse_dns, scan_id, asset["value"] + ) + engine.scans[scan_id]["futures"].append(th) if ( "do_dnstwist_subdomain_search" in scan["options"].keys() and data["options"]["do_dnstwist_subdomain_search"] ): - # Check if extra TLD should be tested - tld = False - if ( - "dnstwist_check_tld" in scan["options"].keys() - and data["options"]["dnstwist_check_tld"] - ): - tld = this.scanner["dnstwist_common_tlds"] - check_ssdeep = False - if ( - "dnstwist_check_ssdeep" in scan["options"].keys() - and data["options"]["dnstwist_check_ssdeep"] - ): - check_ssdeep = True - check_geoip = False - if ( - "dnstwist_check_geoip" in scan["options"].keys() - and data["options"]["dnstwist_check_geoip"] - ): - check_geoip = True - check_mx = False - if ( - "dnstwist_check_mx" in scan["options"].keys() - and data["options"]["dnstwist_check_mx"] - ): - check_mx = True - check_whois = False - if ( - "dnstwist_check_whois" in scan["options"].keys() - and data["options"]["dnstwist_check_whois"] - ): - check_whois = True - check_banners = False - if ( - "dnstwist_check_banners" in scan["options"].keys() - and data["options"]["dnstwist_check_banners"] - ): - check_banners = True + options = { + "tld": False, + "check_ssdeep": False, + "check_geoip": False, + "check_mx": False, + "check_whois": False, + "check_banners": False, + } + + # Dictionary to map option names to their variables and initial values + options_mapping = { + "dnstwist_check_tld": ("tld", engine.scanner["dnstwist_common_tlds"]), + "dnstwist_check_ssdeep": ("check_ssdeep", True), + "dnstwist_check_geoip": ("check_geoip", True), + "dnstwist_check_mx": ("check_mx", True), + "dnstwist_check_whois": ("check_whois", True), + "dnstwist_check_banners": ("check_banners", True), + } + + # Check if options should be tested + for option, (var_name, value) in options_mapping.items(): + if option in scan["options"].keys() and data["options"].get(option): + options[var_name] = value + timeout = APP_TIMEOUT if "max_timeout" in scan["options"].keys() and data["options"]["max_timeout"]: timeout = data["options"]["max_timeout"] for asset in data["assets"]: if asset["datatype"] == "domain": - th = this.pool.submit( - dnstwist.search_subdomains, - scan_id, + th = engine.metadata["pool"].submit( + DnsTwist.search_subdomains, asset["value"], - tld, - check_ssdeep, - check_geoip, - check_mx, - check_whois, - check_banners, + options["tld"], + options["check_ssdeep"], + options["check_geoip"], + options["check_mx"], + options["check_whois"], + options["check_banners"], timeout, ) - this.scans[scan_id]["dnstwist"][asset["value"]] = {} - this.scans[scan_id]["futures"].append(th) + engine.scans[scan_id]["dnstwist"][asset["value"]] = {} + engine.scans[scan_id]["futures"].append(th) if ( "do_reverse_whois" in scan["options"].keys() @@ -336,42 +388,42 @@ def start_scan(): ): for asset in data["assets"]: if asset["datatype"] in ["domain", "fqdn", "keyword", "email"]: - th = this.pool.submit( + th = engine.metadata["pool"].submit( _reverse_whois, scan_id, asset["value"], asset["datatype"] ) - this.scans[scan_id]["futures"].append(th) + engine.scans[scan_id]["futures"].append(th) if "do_cdn_check" in scan["options"].keys() and data["options"]["do_cdn_check"]: for asset in data["assets"]: if asset["datatype"] in ["ip", "domain", "fqdn"]: - th = this.pool.submit( + th = engine.metadata["pool"].submit( _cdn_check, scan_id, asset["value"], asset["datatype"] ) - this.scans[scan_id]["futures"].append(th) + engine.scans[scan_id]["futures"].append(th) if "do_waf_check" in scan["options"].keys() and data["options"]["do_waf_check"]: for asset in data["assets"]: if asset["datatype"] == "ip": - th = this.pool.submit( + th = engine.metadata["pool"].submit( _waf_check, scan_id, asset["value"], asset["datatype"] ) - this.scans[scan_id]["futures"].append(th) + engine.scans[scan_id]["futures"].append(th) if "do_cloud_check" in scan["options"].keys() and data["options"]["do_cloud_check"]: for asset in data["assets"]: if asset["datatype"] == "ip": - th = this.pool.submit( + th = engine.metadata["pool"].submit( _cloud_check, scan_id, asset["value"], asset["datatype"] ) - this.scans[scan_id]["futures"].append(th) + engine.scans[scan_id]["futures"].append(th) if "do_saas_check" in scan["options"].keys() and data["options"]["do_saas_check"]: for asset in data["assets"]: if asset["datatype"] == "ip": - th = this.pool.submit( + th = engine.metadata["pool"].submit( _saas_check, scan_id, asset["value"], asset["datatype"] ) - this.scans[scan_id]["futures"].append(th) + engine.scans[scan_id]["futures"].append(th) res.update({"status": "accepted", "details": {"scan_id": scan["scan_id"]}}) @@ -445,12 +497,14 @@ def _get_wf_domains(wf_url: str, max_pages: int): def _reverse_whois(scan_id, asset, datatype): res = {} domains = [] - if len(this.wf_apitokens) == 0: + if len(engine.metadata["wf_apitokens"]) == 0: # No whoisfreak API Token available return res # Select an API KEY - apikey = this.wf_apitokens[random.randint(0, len(this.wf_apitokens) - 1)] + apikey = engine.metadata["wf_apitokens"][ + random.randint(0, len(engine.metadata["wf_apitokens"]) - 1) + ] # Check the asset is a valid domain name or IP Address if datatype in ["domain", "fqdn"]: @@ -501,11 +555,11 @@ def _reverse_whois(scan_id, asset, datatype): # Limit max pages to rationalize credit usage max_pages = APP_WF_MAX_PAGE if ( - "reverse_whois_max_pages" in this.scans[scan_id]["options"].keys() - and isinstance(this.scans[scan_id]["options"]["reverse_whois_max_pages"], int) - and this.scans[scan_id]["options"]["reverse_whois_max_pages"] > 0 + "reverse_whois_max_pages" in engine.scans[scan_id]["options"].keys() + and isinstance(engine.scans[scan_id]["options"]["reverse_whois_max_pages"], int) + and engine.scans[scan_id]["options"]["reverse_whois_max_pages"] > 0 ): - max_pages = this.scans[scan_id]["options"]["reverse_whois_max_pages"] + max_pages = engine.scans[scan_id]["options"]["reverse_whois_max_pages"] try: for wf_type in wf_types: @@ -525,10 +579,10 @@ def _reverse_whois(scan_id, asset, datatype): scan_lock = threading.RLock() with scan_lock: - if "reverse_whois" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["reverse_whois"] = {} + if "reverse_whois" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["reverse_whois"] = {} if bool(res): - this.scans[scan_id]["findings"]["reverse_whois"].update(res) + engine.scans[scan_id]["findings"]["reverse_whois"].update(res) return res @@ -546,7 +600,7 @@ def is_ipaddr_in_subnet(ip: str, subnet: str) -> bool: def _check_ip(ip: str, record_types: list = []) -> dict: """Check IP from CDN, WAF, Cloud, SaaS providers public records.""" - with open(this.scanner["external_ip_ranges_path"]) as all_data_file: + with open(engine.scanner["external_ip_ranges_path"]) as all_data_file: all_data = json.loads(all_data_file.read()) all_data_types = all_data.keys() # ["cdn", "waf", "cloud", "parking", "saas"] @@ -596,10 +650,10 @@ def _cdn_check(scan_id: str, asset: str, datatype: str) -> dict: scan_lock = threading.RLock() with scan_lock: - if "cdn_check" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["cdn_check"] = {} + if "cdn_check" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["cdn_check"] = {} if bool(res): - this.scans[scan_id]["findings"]["cdn_check"].update({asset: res}) + engine.scans[scan_id]["findings"]["cdn_check"].update({asset: res}) return res @@ -615,10 +669,10 @@ def _waf_check(scan_id: str, asset: str, datatype: str) -> dict: scan_lock = threading.RLock() with scan_lock: - if "waf_check" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["waf_check"] = {} + if "waf_check" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["waf_check"] = {} if bool(res): - this.scans[scan_id]["findings"]["waf_check"].update({asset: res}) + engine.scans[scan_id]["findings"]["waf_check"].update({asset: res}) return res @@ -634,10 +688,10 @@ def _cloud_check(scan_id: str, asset: str, datatype: str) -> dict: scan_lock = threading.RLock() with scan_lock: - if "cloud_check" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["cloud_check"] = {} + if "cloud_check" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["cloud_check"] = {} if bool(res): - this.scans[scan_id]["findings"]["cloud_check"].update({asset: res}) + engine.scans[scan_id]["findings"]["cloud_check"].update({asset: res}) return res @@ -653,24 +707,24 @@ def _saas_check(scan_id: str, asset: str, datatype: str) -> dict: scan_lock = threading.RLock() with scan_lock: - if "saas_check" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["saas_check"] = {} + if "saas_check" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["saas_check"] = {} if bool(res): - this.scans[scan_id]["findings"]["saas_check"].update({asset: res}) + engine.scans[scan_id]["findings"]["saas_check"].update({asset: res}) return res def _do_seg_check(scan_id, asset_value): seg_dict = [] - dns_records = __dns_resolve_asset(asset_value, "MX") + dns_records = _dns_resolve_asset(asset_value, "MX") has_seg = False if len(dns_records) == 0: # seg_dict = {"status": "failed", "reason": f"no MX records found for asset '{asset_value}'"} return - with open(this.scanner["seg_path"]) as seg_providers_file: + with open(engine.scanner["seg_path"]) as seg_providers_file: seg_providers = json.loads(seg_providers_file.read())["seg"] for dns_record in dns_records: @@ -684,44 +738,30 @@ def _do_seg_check(scan_id, asset_value): scan_lock = threading.RLock() with scan_lock: - if "seg_dict" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["seg_dict"] = {} - this.scans[scan_id]["findings"]["seg_dict_dns_records"] = {} + if "seg_dict" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["seg_dict"] = {} + engine.scans[scan_id]["findings"]["seg_dict_dns_records"] = {} - if asset_value not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["seg_dict"][asset_value] = {} - this.scans[scan_id]["findings"]["seg_dict_dns_records"][asset_value] = {} + if asset_value not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["seg_dict"][asset_value] = {} + engine.scans[scan_id]["findings"]["seg_dict_dns_records"][asset_value] = {} if has_seg is True: - this.scans[scan_id]["findings"]["seg_dict"][asset_value] = copy.deepcopy( + engine.scans[scan_id]["findings"]["seg_dict"][asset_value] = copy.deepcopy( seg_dict ) - this.scans[scan_id]["findings"]["seg_dict_dns_records"][asset_value] = ( + engine.scans[scan_id]["findings"]["seg_dict_dns_records"][asset_value] = ( copy.deepcopy(dns_records) ) else: - this.scans[scan_id]["findings"]["no_seg"] = { + engine.scans[scan_id]["findings"]["no_seg"] = { asset_value: "MX records found but no Secure Email Gateway set" } -def _recursive_spf_lookups(spf_line): - spf_lookups = 0 - for word in spf_line.split(" "): - if "include:" in word: - url = word.replace("include:", "") - spf_lookups += 1 - dns_resolve = __dns_resolve_asset(url, "TXT") - for record in dns_resolve: - for value in record["values"]: - if "spf" in value: - spf_lookups += _recursive_spf_lookups(value) - return spf_lookups - - def _do_dmarc_check(scan_id, asset_value): dmarc_dict = {"no_dmarc_record": "info"} - dns_records = __dns_resolve_asset(asset_value, "TXT") + dns_records = _dns_resolve_asset(asset_value, "TXT") for record in dns_records: for value in record["values"]: if "DMARC" in value: @@ -736,9 +776,9 @@ def _do_dmarc_check(scan_id, asset_value): if num < 100: dmarc_dict["dmarc_partial_coverage"] = "medium" - with this.scan_lock: - this.scans[scan_id]["findings"]["dmarc_dict"] = {asset_value: dmarc_dict} - this.scans[scan_id]["findings"]["dmarc_dict_dns_records"] = { + with engine.metadata["scan_lock"]: + engine.scans[scan_id]["findings"]["dmarc_dict"] = {asset_value: dmarc_dict} + engine.scans[scan_id]["findings"]["dmarc_dict_dns_records"] = { asset_value: dns_records } @@ -749,7 +789,7 @@ def _do_dkim_check(scan_id, asset_value): dkim_found_list = {} for selector in dkimlist: dkim_record = selector + "._domainkey." + asset_value - dns_records = __dns_resolve_asset(dkim_record) + dns_records = _dns_resolve_asset(dkim_record) if len(dns_records) > 0: found_dkim = True for dns_record in dns_records: @@ -760,85 +800,289 @@ def _do_dkim_check(scan_id, asset_value): else: dkim_dict["dkim"] = dkim_found_list - with this.scan_lock: - this.scans[scan_id]["findings"]["dkim_dict"] = {asset_value: dkim_dict} - this.scans[scan_id]["findings"]["dkim_dict_dns_records"] = { + with engine.metadata["scan_lock"]: + engine.scans[scan_id]["findings"]["dkim_dict"] = {asset_value: dkim_dict} + engine.scans[scan_id]["findings"]["dkim_dict_dns_records"] = { asset_value: dns_records } -def _perform_spf_check(scan_id, asset_value): - dns_records = __dns_resolve_asset(asset_value, "TXT") - spf_dict = {"no_spf_found": "high", "spf_lookups": 0, "title_prefix": "No SPF"} +def _do_spf_check(scan_id: int, asset_value: str) -> None: + """Check SPF record lookup""" + dns_txt_records = _dns_resolve_asset(asset_value, "TXT") + answers = dns_txt_records[0].get("answers") if dns_txt_records else [] + # Parses SPF records + parsed_spf_record, issues = _parse_spf_record(answers) - for record in dns_records: - for value in record["values"]: - if "spf" in value: - spf_dict.pop("no_spf_found") - spf_lookups = _recursive_spf_lookups(value) - spf_dict["spf_lookups"] = spf_lookups - if spf_lookups > 10: - spf_dict["spf_too_many_lookups"] = "medium" - spf_dict["title_prefix"] = "Too many lookups" - if "+all" in value: - spf_dict["+all_spf_found"] = "very high" - spf_dict["title_prefix"] = "All SPF" - elif "~all" in value: - spf_dict["~all_spf_found"] = "medium" - spf_dict["title_prefix"] = "All SPF" - elif "?all" in value: - spf_dict["no_spf_all_or_?all"] = "high" - spf_dict["title_prefix"] = "No SPF or ALL" - elif "-all" in value: - spf_dict["-all_spf_found?all"] = "info" - spf_dict["title_prefix"] = "All SPF" - elif "all" not in value: - spf_dict["no_spf_all_or_?all"] = "high" - spf_dict["title_prefix"] = "No SPF or ALL" - - with this.scan_lock: - this.scans[scan_id]["findings"]["spf_dict"] = {asset_value: spf_dict} - this.scans[scan_id]["findings"]["spf_dict_dns_records"] = { - asset_value: dns_records + # Issue: DEPRECATED_SPF_RECORD + dns_spf_records = _dns_resolve_asset(asset_value, "SPF") + if dns_spf_records: + issues.append(spf_issues.DEPRECATED_SPF_RECORD) + + # Issue: DNS_LOOKUP_LIMIT + dns_lookup_limit = 10 + try: + dns_lookup_count, spf_lookup_records = get_lookup_count_and_spf_records( + domain=asset_value + ) + except RecursionError as error: + app.logger.info( + f"RecursionError on {asset_value} with get_lookup_count_and_spf_records: {error}" + ) + issues.append( + dict( + spf_issues.DNS_LOOKUP_LIMIT, + extra_info=f"More than {sys.getrecursionlimit()} DNS lookups are required to validate SPF record.", + ) + ) + else: + if dns_lookup_count > dns_lookup_limit: + issues.append( + dict( + spf_issues.DNS_LOOKUP_LIMIT, + value=spf_lookup_records[0] if spf_lookup_records else "", + extra_info=f"{dns_lookup_count} DNS lookups are required to validate SPF record.", + ) + ) + + with engine.metadata["scan_lock"]: + engine.scans[scan_id]["findings"].setdefault("spf_issues", {}) + engine.scans[scan_id]["findings"]["spf_issues"][asset_value] = { + "issues": issues, + "parsed_spf_record": parsed_spf_record, } - return spf_dict + + +def _parse_spf_record(dns_records: list[str]) -> tuple[list, list]: + # Basic mechanisms, they contribute to the language framework. + # They do not specify a particular type of authorization scheme. + basic_mechanisms = ["all", "include"] + # Designated sender mechanisms, they are used to designate a set of addresses as being permitted or + # not permitted to use the for sending mail. + designed_sender_mechanisms = ["a", "mx", "ptr", "ip4", "ip6", "exists"] + + spf_record_count = 0 + issues = [] + + for dns_record in dns_records: + value = dns_record.removeprefix('"').removesuffix('"').replace('" "', "") + # Check the version + if "v=spf1" not in value.lower(): + continue + spf_record_count += 1 + + # Issue: MALFORMED_SPF_RECORD + if value[0] == " ": + issues.append( + dict( + spf_issues.MALFORMED_SPF_RECORD, + value=value, + extra_info="There is an extra space before the start of the string.", + ) + ) + value = value.lstrip(" ") + # Check for extra spaces after the end of the string + if value[-1] == " ": + issues.append( + dict( + spf_issues.MALFORMED_SPF_RECORD, + value=value, + extra_info="There is an extra space after the end of the string.", + ) + ) + value = value.rstrip(" ") + # Check for quoted TXT record + if value[0] == '"' or value[-1] == '"': + issues.append( + dict( + spf_issues.MALFORMED_SPF_RECORD, + value=value, + extra_info="The SPF record is surrounded quotation marks.", + ) + ) + value = value.strip('"') + + # Issue: DIRECTIVES_AFTER_ALL + directives_after_all = re.search(r"[-~?+]?all (.+)", value) + if directives_after_all: + issues.append( + dict( + spf_issues.DIRECTIVES_AFTER_ALL, + value=value, + extra_info=f'These directives after "all" are ignored: {directives_after_all.group(1)}.', + ) + ) + + # Issue: STRING_TOO_LONG + maximum_string_length = 255 + for character_string in dns_record.strip('"').split('" "'): + if len(character_string) > maximum_string_length: + issues.append( + dict( + spf_issues.STRING_TOO_LONG, + value=value, + extra_info=f"This part is {len(character_string)} characters long, " + f"and therefore too long: {character_string}.", + ) + ) + continue + + # List of directives + spf_directives = value.split() + spf_directives.pop(0) # version is not a directive, remove it from directives + + # Issue: MISS_SPF_RECORD_TERMINATION + if not re.search(r"[-~?+]?(all|redirect=)", spf_directives[-1].lower()): + issues.append(dict(spf_issues.MISS_SPF_RECORD_TERMINATION, value=value)) + + for spf_directive in spf_directives: + directive_qualifier = "+" # qualifier is optional, and defaults to "+" + directive_value = "" + if "=" in spf_directive: # Modifiers, and not mechanisms + directive_type, directive_value = spf_directive.split("=") + directive_type = directive_type.lower() + directive_value = directive_value.lower() + parsed_spf_record.append([directive_qualifier, directive_type, directive_value]) + # Unrecognized modifiers MUST be ignored + continue + if ":" in spf_directive: # Mechanisms with value + directive_type, directive_value = spf_directive.split(":") + directive_type = directive_type.lower() + directive_value = directive_value.lower() + else: # Mechanisms without value + directive_type = spf_directive.lower() + if directive_type.startswith(("-", "~", "?", "+")): + directive_qualifier = directive_type[0] + directive_type = directive_type[1:].lower() + + if directive_type not in basic_mechanisms + designed_sender_mechanisms: + issues.append( + dict( + spf_issues.MALFORMED_SPF_RECORD, + value=value, + extra_info=f"'{directive_type}' is an illegal term.", + ) + ) + + if directive_type == "ptr": + issues.append(dict(spf_issues.PRESENCE_OF_PTR, value=value)) + elif directive_type == "all" and directive_qualifier in ["?", "+"]: + issues.append(dict(spf_issues.PERMISSIVE_SPF_RECORD, value=value)) + + parsed_spf_record.append([directive_qualifier, directive_type, directive_value]) + + # Issue: NO_SPF_RECORD + if spf_record_count == 0: + issues.append( + dict( + spf_issues.NO_SPF_RECORD, + extra_info=( + f"Other DNS TXT records are: {', '.join(dns_records)}." + if dns_records + else "There is no DNS TXT record." + ), + ) + ) + # Issue: MULTIPLE_SPF_RECORDS + elif spf_record_count > 1: + issues.append( + dict( + spf_issues.MULTIPLE_SPF_RECORDS, + extra_info=f"Other DNS TXT records are: {', '.join(dns_records)}.", + ) + ) + + return parsed_spf_record, issues + + +def get_lookup_count_and_spf_records(domain: str) -> tuple[int, list[tuple[str, str]]]: + """Count the numbers of DNS queries during SPF evaluation and retrieve the SPF records + + The following terms cause DNS queries: the "include", "a", "mx", "ptr", and "exists" mechanisms, and the "redirect" + modifier. SPF implementations MUST limit the total number of those terms to 10 during SPF evaluation, to avoid + an unreasonable load on the DNS. + + :param domain: A domain name + :return: Number of DNS queries during SPF evaluation, and the list of SPF records queried + """ + dns_records = _dns_resolve_asset(domain, "TXT") + if not dns_records: + return 0, [] + + spf_records = list( + filter( + lambda dns_record: dns_record.lower().startswith("v=spf1"), + dns_records[0].get("values"), + ) + ) + if not spf_records: + return 0, [] + + spf_record = spf_records[0] + lookup_domains = re.findall( + r"\b[+\-~?]?(?:include:|redirect=)(\S+)\b", spf_record, re.IGNORECASE + ) + other_terms_count = len( + re.findall(r"\b[+\-~?]?(a|mx|ptr|exists):?\b", spf_record, re.IGNORECASE) + ) + if not lookup_domains: + return other_terms_count, [(domain, spf_record)] + + dns_lookup_count = len(lookup_domains) + other_terms_count + spf_lookup_records = [(domain, spf_record)] + for lookup_domain in lookup_domains: + domain_dns_lookup_count, domain_spf_lookup_records = ( + get_lookup_count_and_spf_records(lookup_domain) + ) + dns_lookup_count += domain_dns_lookup_count + spf_lookup_records.extend(domain_spf_lookup_records) + + return dns_lookup_count, spf_lookup_records def _dns_resolve(scan_id, asset, check_subdomains=False): scan_lock = threading.RLock() with scan_lock: - if "dns_resolve" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["dns_resolve"] = {} - this.scans[scan_id]["findings"]["dns_resolve"][asset] = {} - this.scans[scan_id]["findings"]["dns_resolve"][asset] = copy.deepcopy( - __dns_resolve_asset(asset) + if "dns_resolve" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["dns_resolve"] = {} + engine.scans[scan_id]["findings"]["dns_resolve"][asset] = {} + engine.scans[scan_id]["findings"]["dns_resolve"][asset] = copy.deepcopy( + _dns_resolve_asset(asset) ) - return this.scans[scan_id]["findings"]["dns_resolve"][asset] + return engine.scans[scan_id]["findings"]["dns_resolve"][asset] -def __dns_resolve_asset(asset, type_of_record=False): +def _dns_resolve_asset( + asset: str, type_of_record: str = None +) -> list[dict[str, str | list[str]]]: sub_res = [] - try: - record_types = ["CNAME", "A", "AAAA", "MX", "NS", "TXT", "SOA", "SRV"] - if type_of_record: - record_types = [type_of_record] - for record_type in record_types: - try: - answers = this.resolver.query(asset, record_type) - sub_res.append( - { - "record_type": record_type, - "values": [str(rdata) for rdata in answers], - } - ) - except dns.resolver.NoAnswer: - pass - except dns.resolver.Timeout: - pass - except Exception: - pass - except dns.resolver.NXDOMAIN: - pass + record_types = ["CNAME", "A", "AAAA", "MX", "NS", "TXT", "SOA", "SRV"] + if type_of_record: + record_types = [type_of_record] + for record_type in record_types: + try: + answers = engine.metadata["resolver"].query(asset, record_type) + except dns.resolver.NoAnswer: + pass + except dns.resolver.Timeout: + pass + except dns.resolver.NXDOMAIN: + pass + except Exception as e: + app.logger.error( + f"DNS resolve raises an exception for asset '{asset}': {e}" + ) + else: + sub_res.append( + { + "record_type": record_type, + "values": [ + str(rdata).strip('"').replace('" "', " ") for rdata in answers + ], + "answers": [str(rdata) for rdata in answers], + } + ) + return sub_res @@ -850,7 +1094,9 @@ def _reverse_dns(scan_id, asset): return res try: - answers = this.resolver.query(dns.reversename.from_address(asset), "PTR") + answers = engine.metadata["resolver"].query( + dns.reversename.from_address(asset), "PTR" + ) res.update({asset: [str(rdata) for rdata in answers]}) except dns.resolver.NoAnswer: pass @@ -861,10 +1107,10 @@ def _reverse_dns(scan_id, asset): scan_lock = threading.RLock() with scan_lock: - if "reverse_dns" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["reverse_dns"] = {} + if "reverse_dns" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["reverse_dns"] = {} if bool(res): - this.scans[scan_id]["findings"]["reverse_dns"].update(res) + engine.scans[scan_id]["findings"]["reverse_dns"].update(res) return res @@ -910,10 +1156,10 @@ def _get_whois(scan_id, asset): scan_lock = threading.RLock() with scan_lock: - if "whois" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["whois"] = {} + if "whois" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["whois"] = {} if bool(res): - this.scans[scan_id]["findings"]["whois"].update(res) + engine.scans[scan_id]["findings"]["whois"].update(res) return res @@ -1041,30 +1287,30 @@ def _subdomain_bruteforce(scan_id, asset): # Check wildcard domain w_domain = "{}.{}".format(get_random_string(), asset) - if len(__dns_resolve_asset(w_domain)) > 0: + if len(_dns_resolve_asset(w_domain)) > 0: return res valid_sudoms = [] for sub in SUB_LIST: subdom = ".".join((sub, asset)) - results = __dns_resolve_asset(subdom) + results = _dns_resolve_asset(subdom) if len(results) > 0: valid_sudoms.append(subdom) # add the subdomain in scan['findings']['subdomains_list'] if not exists - # if 'do_subdomains_resolve' in this.scans[scan_id]['options'].keys() and this.scans[scan_id]['options']['do_subdomains_resolve']: + # if 'do_subdomains_resolve' in engine.scans[scan_id]['options'].keys() and engine.scans[scan_id]['options']['do_subdomains_resolve']: # print("passe la") - # if 'subdomains_list' in this.scans[scan_id]['findings'].keys(): - # if asset in this.scans[scan_id]['findings']['subdomains_list']: - # if subdom not in this.scans[scan_id]['findings']['subdomains_list'][asset]: - # this.scans[scan_id]['findings']['subdomains_list'][asset].extend(valid_sudoms) + # if 'subdomains_list' in engine.scans[scan_id]['findings'].keys(): + # if asset in engine.scans[scan_id]['findings']['subdomains_list']: + # if subdom not in engine.scans[scan_id]['findings']['subdomains_list'][asset]: + # engine.scans[scan_id]['findings']['subdomains_list'][asset].extend(valid_sudoms) # else: - # this.scans[scan_id]['findings']['subdomains_list'][asset] = valid_sudoms + # engine.scans[scan_id]['findings']['subdomains_list'][asset] = valid_sudoms # else: - # this.scans[scan_id]['findings']['subdomains_list'] = {} - # this.scans[scan_id]['findings']['subdomains_list'][asset] = valid_sudoms - # @todo: mutex on this.scans[scan_id]['findings']['subdomains_resolve'] + # engine.scans[scan_id]['findings']['subdomains_list'] = {} + # engine.scans[scan_id]['findings']['subdomains_list'][asset] = valid_sudoms + # @todo: mutex on engine.scans[scan_id]['findings']['subdomains_resolve'] return res @@ -1089,38 +1335,38 @@ def _subdomain_enum(scan_id, asset): res.update({asset: sub_res}) - if "subdomains_list" in this.scans[scan_id]["findings"].keys(): - if asset in this.scans[scan_id]["findings"]["subdomains_list"]: + if "subdomains_list" in engine.scans[scan_id]["findings"].keys(): + if asset in engine.scans[scan_id]["findings"]["subdomains_list"]: for subdom in sub_res: if ( subdom - not in this.scans[scan_id]["findings"]["subdomains_list"][asset] + not in engine.scans[scan_id]["findings"]["subdomains_list"][asset] ): - this.scans[scan_id]["findings"]["subdomains_list"][asset].extend( + engine.scans[scan_id]["findings"]["subdomains_list"][asset].extend( sub_res ) else: - this.scans[scan_id]["findings"]["subdomains_list"][asset] = list(sub_res) + engine.scans[scan_id]["findings"]["subdomains_list"][asset] = list(sub_res) else: - this.scans[scan_id]["findings"]["subdomains_list"] = {} - this.scans[scan_id]["findings"]["subdomains_list"][asset] = list(sub_res) + engine.scans[scan_id]["findings"]["subdomains_list"] = {} + engine.scans[scan_id]["findings"]["subdomains_list"][asset] = list(sub_res) if ( - "do_subdomains_resolve" in this.scans[scan_id]["options"].keys() - and this.scans[scan_id]["options"]["do_subdomains_resolve"] + "do_subdomains_resolve" in engine.scans[scan_id]["options"].keys() + and engine.scans[scan_id]["options"]["do_subdomains_resolve"] ): res_subdomains = {} for s in sub_res: - data = __dns_resolve_asset(s) + data = _dns_resolve_asset(s) if len(data) > 0: res_subdomains.update({s: data}) - # with this.scan_lock: - if "subdomains_resolve" not in this.scans[scan_id]["findings"].keys(): - this.scans[scan_id]["findings"]["subdomains_resolve"] = {} - if asset not in this.scans[scan_id]["findings"]["subdomains_resolve"].keys(): - this.scans[scan_id]["findings"]["subdomains_resolve"][asset] = {} - this.scans[scan_id]["findings"]["subdomains_resolve"][asset].update( + # with engine.scan_lock: + if "subdomains_resolve" not in engine.scans[scan_id]["findings"].keys(): + engine.scans[scan_id]["findings"]["subdomains_resolve"] = {} + if asset not in engine.scans[scan_id]["findings"]["subdomains_resolve"].keys(): + engine.scans[scan_id]["findings"]["subdomains_resolve"][asset] = {} + engine.scans[scan_id]["findings"]["subdomains_resolve"][asset].update( res_subdomains ) @@ -1129,35 +1375,36 @@ def _subdomain_enum(scan_id, asset): @app.route("/engines/owl_dns/stop/") def stop_scan(scan_id): + # leaving engine because of futures field in status_scan res = {"page": "stop"} - if scan_id not in this.scans.keys(): + if scan_id not in engine.scans.keys(): res.update( {"status": "error", "reason": "scan_id '{}' not found".format(scan_id)} ) return jsonify(res) - scan_status(scan_id) - if this.scans[scan_id]["status"] != "SCANNING": + status_scan(scan_id) + if engine.scans[scan_id]["status"] != "SCANNING": res.update( { "status": "error", "reason": "scan '{}' is not running (status={})".format( - scan_id, this.scans[scan_id]["status"] + scan_id, engine.scans[scan_id]["status"] ), } ) return jsonify(res) - for t in this.scans[scan_id]["threads"]: + for t in engine.scans[scan_id]["threads"]: try: t.join() - this.scans[scan_id]["threads"].remove(t) + engine.scans[scan_id]["threads"].remove(t) except Exception: pass - this.scans[scan_id]["status"] = "STOPPED" - this.scans[scan_id]["finished_at"] = int(time.time() * 1000) + engine.scans[scan_id]["status"] = "STOPPED" + engine.scans[scan_id]["finished_at"] = int(time.time() * 1000) res.update({"status": "success"}) return jsonify(res) @@ -1167,7 +1414,7 @@ def stop_scan(scan_id): @app.route("/engines/owl_dns/stopscans", methods=["GET"]) def stop(): res = {"page": "stopscans"} - for scan_id in this.scans.keys(): + for scan_id in engine.scans.keys(): stop_scan(scan_id) res.update({"status": "SUCCESS"}) @@ -1176,138 +1423,136 @@ def stop(): @app.route("/engines/owl_dns/clean") def clean(): - res = {"page": "clean"} - stop() - this.scans.clear() - # _loadconfig() - res.update({"status": "SUCCESS"}) - return jsonify(res) + """Clean all scans.""" + reloadconfig() + return engine.clean() @app.route("/engines/owl_dns/clean/") def clean_scan(scan_id): - res = {"page": "clean_scan"} - res.update({"scan_id": scan_id}) - - if scan_id not in this.scans.keys(): - res.update( - {"status": "error", "reason": "scan_id '{}' not found".format(scan_id)} - ) - return jsonify(res) - - # Terminate thread if any - for t in this.scans[scan_id]["threads"]: - try: - t.join() - this.scans[scan_id]["threads"].remove(t) - except Exception as e: - print(e) - pass - - # Remove Scan for current scans - this.scans.pop(scan_id) - res.update({"status": "removed"}) - return jsonify(res) + """Clean scan identified by id.""" + return engine.clean_scan(scan_id) @app.route("/engines/owl_dns/status/") -def scan_status(scan_id): - if scan_id not in this.scans.keys(): +def status_scan(scan_id): + if scan_id not in engine.scans.keys(): return jsonify( {"status": "ERROR", "details": "scan_id '{}' not found".format(scan_id)} ) all_threads_finished = True - if "threads" in this.scans[scan_id]: - for t in this.scans[scan_id]["threads"]: + if "threads" in engine.scans[scan_id]: + for t in engine.scans[scan_id]["threads"]: if t.is_alive(): - this.scans[scan_id]["status"] = "SCANNING" + engine.scans[scan_id]["status"] = "SCANNING" all_threads_finished = False break else: # Terminate thread t.join() - this.scans[scan_id]["threads"].remove(t) + engine.scans[scan_id]["threads"].remove(t) - if "futures" in this.scans[scan_id]: - for f in this.scans[scan_id]["futures"]: + if "futures" in engine.scans[scan_id]: + for f in engine.scans[scan_id]["futures"]: if not f.done(): - this.scans[scan_id]["status"] = "SCANNING" + engine.scans[scan_id]["status"] = "SCANNING" all_threads_finished = False break else: - # try: - # dnstwist_asset, dnstwist_results = f.result() - # this.scans[scan_id]['dnstwist'][dnstwist_asset] = dnstwist_results - # except Exception: - # pass - this.scans[scan_id]["futures"].remove(f) - - if "threads" not in this.scans[scan_id] and "futures" not in this.scans[scan_id]: - this.scans[scan_id]["status"] = "STARTED" + try: + dnstwist_asset, dnstwist_results = f.result() + engine.scans[scan_id]["dnstwist"][dnstwist_asset] = dnstwist_results + except Exception: + pass + engine.scans[scan_id]["futures"].remove(f) + + if ( + "threads" not in engine.scans[scan_id] + and "futures" not in engine.scans[scan_id] + ): + engine.scans[scan_id]["status"] = "STARTED" all_threads_finished = False try: if ( all_threads_finished - and len(this.scans[scan_id]["threads"]) == 0 - and len(this.scans[scan_id]["futures"]) == 0 + and len(engine.scans[scan_id]["threads"]) == 0 + and len(engine.scans[scan_id]["futures"]) == 0 ): - this.scans[scan_id]["status"] = "FINISHED" - this.scans[scan_id]["finished_at"] = int(time.time() * 1000) + engine.scans[scan_id]["status"] = "FINISHED" + engine.scans[scan_id]["finished_at"] = int(time.time() * 1000) except Exception: pass - return jsonify({"status": this.scans[scan_id]["status"]}) + return jsonify({"status": engine.scans[scan_id]["status"]}) + + +@app.route("/engines/owl_dns/fullstatus") +def get_full_status(self): + """Return engine status with all assets on scans.""" + return _status_owl_dns(True) @app.route("/engines/owl_dns/status") def status(): + return _status_owl_dns() + + +def _status_owl_dns(full_status=False): + """Get the status of the engine and all its scans.""" + # FIXME ARS-280 this is c/c because of weird use of threadpool and futures field res = {"page": "status"} + engine.scanner["status"] = "READY" + status_code = 200 - if len(this.scans) == APP_MAXSCANS * 2: - this.scanner["status"] = "BUSY" - else: - this.scanner["status"] = "READY" + # display info on the scanner + res.update({"scanner": engine.scanner}) - scans = [] - for scan_id in this.scans.keys(): - scan_status(scan_id) - scans.append( - { - scan_id: { - "status": this.scans[scan_id]["status"], - "started_at": this.scans[scan_id]["started_at"], - "assets": this.scans[scan_id]["assets"], + # display the status of scans performed + scans = {} + all_scans = list(engine.scans.keys()).copy() + for scan in all_scans: + try: + engine.status_scan(scan) + scans.update( + { + scan: { + "status": engine.scans[scan]["status"], + "options": engine.scans[scan]["options"], + "nb_findings": engine.scans[scan]["nb_findings"], + "nb_assets": len(engine.scans[scan]["assets"]), + "position": engine.scans[scan]["position"], + "root_scan_id": engine.scans[scan]["root_scan_id"], + } } - } - ) - - res.update( - { - "nb_scans": len(this.scans), - "status": this.scanner["status"], - "scanner": this.scanner, - "scans": scans, - } - ) + ) + if full_status: + scans[scan].update({"assets": engine.scans[scan]["assets"]}) + except Exception: + pass + res.update({"scans": scans}) - return jsonify(res) + if engine._engine_is_busy() is True: + engine.scanner["status"] = "BUSY" + conf_file = engine.base_dir + "/" + engine.name + ".json" + if not os.path.exists(conf_file): + engine.scanner["status"] = "ERROR" -@app.route("/engines/owl_dns/info") -def info(): - status() - return jsonify({"page": "info", "engine_config": this.scanner}) + res.update({"status": engine.scanner["status"]}) + if engine.scanner["status"] == "ERROR": + status_code = 500 + return jsonify(res), status_code def _parse_results(scan_id): issues = [] summary = {} - # scan = this.scans[scan_id] - scan = copy.deepcopy(this.scans[scan_id]) + # scan = engine.scans[scan_id] + scan = copy.deepcopy(engine.scans[scan_id]) nb_vulns = { "info": 0, "low": 0, @@ -1318,12 +1563,11 @@ def _parse_results(scan_id): ts = int(time.time() * 1000) # dnstwist - # print(this.scans[scan_id]['dnstwist'].keys()) - if "dnstwist" in this.scans[scan_id].keys(): - for asset in this.scans[scan_id]["dnstwist"].keys(): + if "dnstwist" in engine.scans[scan_id].keys(): + for asset in engine.scans[scan_id]["dnstwist"].keys(): try: - dnstwist_issues = dnstwist.parse_results( - ts, asset, this.scans[scan_id]["dnstwist"][asset] + dnstwist_issues = DnsTwist.parse_results( + ts, asset, engine.scans[scan_id]["dnstwist"][asset] ) except KeyError: app.logger.error("dnstwist: missing result (domain-name)") @@ -1333,13 +1577,13 @@ def _parse_results(scan_id): issues.append(dnstwist_issue) # dns resolve - if "dns_resolve" in this.scans[scan_id]["findings"].keys(): - for asset in this.scans[scan_id]["findings"]["dns_resolve"].keys(): + if "dns_resolve" in engine.scans[scan_id]["findings"].keys(): + for asset in engine.scans[scan_id]["findings"]["dns_resolve"].keys(): dns_resolve_str = "" - dns_records = this.scans[scan_id]["findings"]["dns_resolve"][asset] + dns_records = engine.scans[scan_id]["findings"]["dns_resolve"][asset] # print(asset, dns_records) # for key, value in sorted(scan['findings']['dns_resolve'].items(), key=lambda x:x[1], reverse=True): - # for key, value in sorted(this.scans[scan_id]['findings']['dns_resolve'][asset].items(), key=lambda x:x[1], reverse=True): + # for key, value in sorted(engine.scans[scan_id]['findings']['dns_resolve'][asset].items(), key=lambda x:x[1], reverse=True): # for value in dns_records: # for record in value: # entry = "Record type '{}': {}".format(record['record_type'], ", ".join(record['values'])) @@ -1422,33 +1666,38 @@ def _parse_results(scan_id): } ) - if "spf_dict" in scan["findings"].keys(): - for asset in scan["findings"]["spf_dict"].keys(): - spf_check = scan["findings"]["spf_dict"][asset] - spf_check_dns_records = scan["findings"]["spf_dict_dns_records"][asset] - spf_hash = hashlib.sha1( - str(spf_check_dns_records).encode("utf-8") - ).hexdigest()[:6] - spf_check.pop("spf_lookups") - title_prefix = spf_check.pop("title_prefix") + if "spf_issues" in scan["findings"]: + for asset_value in scan["findings"]["spf_issues"]: + issues_from_spf_check = scan["findings"]["spf_issues"][asset_value][ + "issues" + ] + parsed_spf_record = scan["findings"]["spf_issues"][asset_value]["parsed_spf_record"] + + for spf_issue in issues_from_spf_check: + description = spf_issue.get("description") + if spf_issue.get("value"): + description += f"\n\nThe SPF record is: {spf_issue['value']}" + if spf_issue.get("extra_info"): + description += f"\n\n{spf_issue['extra_info']}" - for c in spf_check: - h = str(c) + str(spf_check_dns_records) - spf_hash = hashlib.sha1(h.encode("utf-8")).hexdigest()[:6] issues.append( { "issue_id": len(issues) + 1, - "severity": spf_check[c], - "confidence": "certain", - "target": {"addr": [asset], "protocol": "domain"}, - "title": "{} found for '{}' (HASH: {})".format( - title_prefix, asset, spf_hash - ), - "description": "{}\n".format(c), - "solution": "n/a", + "severity": spf_issue.get("severity", "info"), + "confidence": spf_issue.get("confidence", "certain"), + "target": {"addr": [asset_value], "protocol": "domain"}, + "title": spf_issue.get("title"), + "description": description, + "solution": spf_issue.get("solution"), "metadata": {"tags": ["domains", "spf"]}, "type": "spf_check", - "raw": scan["findings"]["spf_dict"][asset], + "raw": { + "description": spf_issue.get("description"), + "solution": spf_issue.get("solution"), + "parsed": parsed_spf_record, + "value": spf_issue.get("value"), + "extra_info": spf_issue.get("extra_info"), + }, "timestamp": ts, } ) @@ -2129,7 +2378,7 @@ def _parse_results(scan_id): "nb_high": nb_vulns["high"], "nb_critical": nb_vulns["critical"], "engine_name": "owl_dns", - "engine_version": this.scanner["version"], + "engine_version": engine.scanner["version"], } return issues, summary @@ -2144,18 +2393,18 @@ def getfindings(scan_id): ) # check if the scan_id exists - if scan_id not in this.scans.keys(): + if scan_id not in engine.scans.keys(): res.update({"status": "error", "reason": f"scan_id '{scan_id}' not found"}) return jsonify(res) # check if the scan is finished # status() - scan_status(scan_id) - if this.scans[scan_id]["status"] != "FINISHED": + status_scan(scan_id) + if engine.scans[scan_id]["status"] != "FINISHED": res.update( { "status": "error", - "reason": f"scan_id '{scan_id}' not finished (status={this.scans[scan_id]['status']})", + "reason": f"scan_id '{scan_id}' not finished (status={engine.scans[scan_id]['status']})", } ) return jsonify(res) @@ -2182,59 +2431,20 @@ def getfindings(scan_id): @app.route("/engines/owl_dns/getreport/") def getreport(scan_id): - if not scan_id.isdecimal(): - return jsonify( - {"status": "error", "reason": "scan_id must be numeric digits only"} - ) - filepath = f"{BASE_DIR}/results/owl_dns_{scan_id}.json" - - if not os.path.exists(filepath): - return jsonify( - { - "status": "error", - "reason": f"report file for scan_id '{scan_id}' not found", - } - ) - - return send_from_directory(f"{BASE_DIR}/results/", "owl_dns_{scan_id}.json") - - -def _json_serial(obj): - """ - JSON serializer for objects not serializable by default json code - Used for datetime serialization when the results are written in file - """ - if isinstance(obj, datetime.datetime) or isinstance(obj, datetime.date): - serial = obj.isoformat() - return serial - raise TypeError("Type not serializable") + """Get report on finished scans.""" + return engine.getreport(scan_id) @app.route("/engines/owl_dns/test") def test(): - if not APP_DEBUG: - return jsonify({"page": "test"}) - - res = "

Test Page (DEBUG):

" - for rule in app.url_map.iter_rules(): - options = {} - for arg in rule.arguments: - options[arg] = "[{0}]".format(arg) - - methods = ",".join(rule.methods) - url = url_for(rule.endpoint, **options) - res += urllib.request.url2pathname( - "{0:50s} {1:20s} {2}
".format( - rule.endpoint, methods, url - ) - ) - - return res + """Return test page.""" + return engine.test() @app.errorhandler(404) def page_not_found(e): - return jsonify({"page": "not found"}) + """Page not found.""" + return engine.page_not_found() @app.before_first_request diff --git a/engines/owl_dns/etc/issues/spf_issues.py b/engines/owl_dns/etc/issues/spf_issues.py new file mode 100644 index 00000000..a0a17f87 --- /dev/null +++ b/engines/owl_dns/etc/issues/spf_issues.py @@ -0,0 +1,131 @@ +# fmt: off +NO_SPF_RECORD = { + "severity": "low", + "confidence": "certain", + "title": "No SPF record", + "description": "An SPF (Sender Policy Framework) record defines the mail servers and domains that are allowed to " + "send email on behalf of your domain. It also tells receiving servers what to do with messages " + "after they're checked.", + "solution": "List which servers are allowed to send email on behalf of your domain, and add an SPF record on that " + "domain. If your domain doesn't send mail, this SPF record must be added: v=spf1 -all, or at least " + "v=spf1 ~all", +} + +# RFC 7208, Section 3.2 +MULTIPLE_SPF_RECORDS = { + "severity": "low", + "confidence": "certain", + "title": "Multiple SPF records", + "description": "A domain name must not have multiple records that would cause an authorization check to select " + "more than one record (see RFC 7208, Section 3.2).", + "solution": "Keep only one SPF record and delete the others: you should always update your SPF record, rather than " + "creating a new record in addition to the existing one.", +} + +# RFC 7208, Section 3.3 +STRING_TOO_LONG = { + "severity": "low", + "confidence": "certain", + "title": "String longer than 255 characters", + "description": "A TXT record string cannot be longer than 255 characters (see RFC 7208, Section 3.3).", + "solution": "A single TXT record can be composed of more than one string, which are useful in constructing " + "records that would exceed the 255-octet maximum length of a character-string within a single TXT " + "record.", +} + +# RFC 7208, Section 4.6.4 +DNS_LOOKUP_LIMIT = { + "severity": "low", + "confidence": "certain", + "title": "High number of DNS lookup", + "description": "The following terms cause DNS queries: the INCLUDE, A, MX, PTR, and EXISTS mechanisms, " + "and the REDIRECTS modifier. SPF implementations limits the total number of those terms to 10 " + "during SPF evaluation, to avoid unreasonable load on the DNS.", + "solution": "Review and adjust if necessary." +} + +# TODO: RFC 7208, Section 4.6.4 +# ARS-437 +# In addition for MX mechanism, the evaluation of each "MX" record MUST NOT result in querying more than 10 address +# records -- either "A" or "AAAA" resource records. + +# TODO: RFC 7208, Section 4.6.4 +# ARS-437 +# In addition for PTR mechanism, the evaluation of each "PTR" record MUST NOT result in querying more than 10 address +# records -- either "A" or "AAAA" resource records. + +# TODO: RFC 7208, Section 4.6.4 +# ARS-437 +# SPF implementations SHOULD limit "void lookups" to two (DNS queries return either a positive answer (RCODE 0) with an +# answer count of 0, or a "Name Error" (RCODE) answer. + +# RFC 7208, Section 5.1 +DIRECTIVES_AFTER_ALL = { + "severity": "low", + "confidence": "certain", + "title": "Directives after ALL not allowed", + "description": "Mechanisms after ALL will never be tested and are ignored by mail servers (see RFC 7208, " + "Section 5.1).", + "solution": "Be sure to insert all desired tags before the ~all stipulation or the ensuing text will be " + "disregarded.", +} + +# RFC 7208, Section 5.5 +PRESENCE_OF_PTR = { + "severity": "low", + "confidence": "certain", + "title": "Mechanism PTR not recommended", + "description": "Use of PTR is discourage, because it is slow and not as reliable as other mechanisms in cases " + "of DNS errors, and it places a large burden on the .arpa name servers (see RFC 7208, " + "Section 5.5). Besides, several senders may ignore the SPF record when this mechanism is used.", + "solution": "Alternatives mechanisms should be used instead. If used, proper PTR records have to be in place for " + "the domain's hosts and the PTR mechanism should be one of the last mechanisms checked.", +} + +# RFC 7208, Section 14.1 +DEPRECATED_SPF_RECORD = { + "severity": "low", + "confidence": "certain", + "title": "Deprecated SPF record", + "description": "SPF (Sender Policy Framework) records must now only be published as a TXT resource record type, " + "with code 16, and not with formerly supported SPF resource record type, with code 99 (see RFC " + "7208, Section 14.1).", + "solution": "Change SPF resource record type (code 99) to TXT resource record (code 16).", +} + +# Custom issues / Best practices + +# Malformed SPF record +# - extra space before the start of the string +# - extra space after the end of the string +# - surrounded by quotation marks +# - illegal mechanisms +MALFORMED_SPF_RECORD = { + "severity": "low", + "confidence": "certain", + "title": "Malformed SPF record" +} + +# Permissive SPF record +# - +all or just all +# - ?all +PERMISSIVE_SPF_RECORD = { + "severity": "low", + "confidence": "certain", + "title": "Permissive SPF record", + "description": "An SPF record is interpreted from left to right, the all mechanism will match all senders that " + "did not match the preceding mechanisms. Therefore, you should place the all mechanism at the end " + "of the SPF record, and use it with the ~ (softfail) or - (fail) prefix. Do note that if no prefix " + "is set, the + (pass) is used by default. This setup is discouraged.", + "solution": "Use more strict mechanism like '-all', or '~all' if you do not feel ready yet." +} + +# Missing end of record, with ALL mechanism or REDIRECT modifier +MISS_SPF_RECORD_TERMINATION = { + "severity": "low", + "confidence": "certain", + "title": "Miss SPF record termination", + "description": "An SPF record should conclude with either an 'all' mechanism or a 'redirect' modifier." +} + +# fmt: on diff --git a/engines/owl_dns/modules/dkimsignatures.py b/engines/owl_dns/modules/dkimsignatures.py index 04cad0b0..77862c11 100644 --- a/engines/owl_dns/modules/dkimsignatures.py +++ b/engines/owl_dns/modules/dkimsignatures.py @@ -12,4 +12,3 @@ "mxvault", "dkim", ] - diff --git a/engines/owl_dns/modules/dnstwist.py b/engines/owl_dns/modules/dnstwist.py index 1137342d..fe63a8d6 100644 --- a/engines/owl_dns/modules/dnstwist.py +++ b/engines/owl_dns/modules/dnstwist.py @@ -11,8 +11,8 @@ DNSTWIST_NB_THREADS = 5 -class dnstwist: - identifier = 'dnstwist' +class DnsTwist: + identifier = "dnstwist" def __init__(self, path): self.loadconfig(path) @@ -20,19 +20,31 @@ def __init__(self, path): def loadconfig(self, path): try: sys.path.append(path) - globals()['dnstwist'] = __import__('dnstwist') + globals()["dnstwist"] = __import__("dnstwist") print("[+] INFO - dnstwist module sucessfully loaded.") return True except Exception: print("[+] ERROR - Not able to load dnstwist module.") return False - def search_subdomains(scan_id, domain, tld=False, ssdeep=False, geoip=False, mxcheck=False, whois=False, banners=False, timeout=DNSTWIST_TIMEOUT, nb_threads=DNSTWIST_NB_THREADS): - cmd = "{} -r -f json -t {}".format(globals()['dnstwist'].__file__, nb_threads) + @classmethod + def search_subdomains( + cls, + domain, + tld=False, + ssdeep=False, + geoip=False, + mxcheck=False, + whois=False, + banners=False, + timeout=DNSTWIST_TIMEOUT, + nb_threads=DNSTWIST_NB_THREADS, + ): + cmd = "{} -r -f json -t {}".format(globals()["dnstwist"].__file__, nb_threads) if tld and os.path.exists(tld): cmd += " --tld {}".format(tld) if ssdeep: - cmd += " -s" + cmd += " --lsh" if geoip: cmd += " -g" if mxcheck: @@ -43,9 +55,11 @@ def search_subdomains(scan_id, domain, tld=False, ssdeep=False, geoip=False, mxc cmd += " -b" cmd += " {}".format(domain) - outs = b'[{}]' + outs = b"[{}]" try: - outs = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout) + outs = subprocess.check_output( + cmd, stderr=subprocess.STDOUT, shell=True, timeout=timeout + ) except subprocess.TimeoutExpired: print("[+] ERROR - Timeout reached ({}s) for cmd: {}".format(timeout, cmd)) @@ -54,51 +68,67 @@ def search_subdomains(scan_id, domain, tld=False, ssdeep=False, geoip=False, mxc else: return domain, {} - def parse_results(ts, asset, domains): + @classmethod + def parse_results(cls, ts, asset, domains): issues = [] for domain in domains: - if domain['fuzzer'] == 'original*': + if domain["fuzzer"] == "*original": continue + result_str = "" - if 'dns-a' in domain.keys(): - result_str += "Resolved IPv4 (A): \n{}\n\n".format("\n".join(domain['dns-a'])) - if 'dns-aaaa' in domain.keys(): - result_str += "Resolved IPv6 (aaaa): \n{}\n\n".format("\n".join(domain['dns-aaaa'])) - if 'dns-mx' in domain.keys(): - result_str += "Resolved MX: \n{}\n\n".format("\n".join(domain['dns-mx'])) - if 'dns-ns' in domain.keys(): - result_str += "Resolving Nameservers: \n{}\n\n".format("\n".join(domain['dns-ns'])) - if 'geoip-country' in domain.keys(): - result_str += "GeoIP location: {}\n\n".format(domain['geoip-country']) - if 'fuzzer' in domain.keys(): - result_str += "Fuzzer source: {}\n\n".format(domain['fuzzer']) - if 'whois-created' in domain.keys() and domain['whois-created'] != "None": - result_str += "Whois creation date: {}\n".format(domain['whois-created']) - if 'whois-updated' in domain.keys() and domain['whois-updated'] != "None": - result_str += "Whois last update: {}\n".format(domain['whois-updated']) + if "dns-a" in domain.keys(): + result_str += "Resolved IPv4 (A): \n{}\n\n".format( + "\n".join(domain["dns-a"]) + ) + if "dns-aaaa" in domain.keys(): + result_str += "Resolved IPv6 (aaaa): \n{}\n\n".format( + "\n".join(domain["dns-aaaa"]) + ) + if "dns-mx" in domain.keys(): + result_str += "Resolved MX: \n{}\n\n".format( + "\n".join(domain["dns-mx"]) + ) + if "dns-ns" in domain.keys(): + result_str += "Resolving Nameservers: \n{}\n\n".format( + "\n".join(domain["dns-ns"]) + ) + if "geoip-country" in domain.keys(): + result_str += "GeoIP location: {}\n\n".format(domain["geoip-country"]) + + if "fuzzer" in domain.keys(): + result_str += "Fuzzer source: {}\n\n".format(domain["fuzzer"]) + + if "whois-created" in domain.keys() and domain["whois-created"] != "None": + result_str += "Whois creation date: {}\n".format( + domain["whois-created"] + ) + + if "whois-updated" in domain.keys() and domain["whois-updated"] != "None": + result_str += "Whois last update: {}\n".format(domain["whois-updated"]) result_hash = hashlib.sha1(result_str.encode("utf-8")).hexdigest()[:6] - issues.append({ - "issue_id": len(issues)+1, - "severity": "low", - "confidence": "certain", - "target": { - "addr": [asset], - "protocol": "domain" - }, - "title": "Suspicious domain found: {} (HASH: {})".format( - domain['domain-name'], result_hash), - "description": "DNS information for '{}':\n\n{}".format( - domain['domain-name'], result_str), - "solution": "Check suspiciousness of domain '{}'".format( - domain['domain-name'] - ), - "metadata": { - "tags": ["domains", "dns", "fraudulent", "typosquatting"] - }, - "type": "typosquated_domain", - "raw": domain, - "timestamp": ts - }) + issues.append( + { + "issue_id": len(issues) + 1, + "severity": "low", + "confidence": "certain", + "target": {"addr": [asset], "protocol": "domain"}, + "title": "Suspicious domain found: {} (HASH: {})".format( + domain["domain"], result_hash + ), + "description": "DNS information for '{}':\n\n{}".format( + domain["domain"], result_str + ), + "solution": "Check suspiciousness of domain '{}'".format( + domain["domain"] + ), + "metadata": { + "tags": ["domains", "dns", "fraudulent", "typosquatting"] + }, + "type": "typosquated_domain", + "raw": domain, + "timestamp": ts, + } + ) return issues diff --git a/engines/owl_dns/owl_dns.json.sample b/engines/owl_dns/owl_dns.json.sample index 00699445..5d0dc37c 100644 --- a/engines/owl_dns/owl_dns.json.sample +++ b/engines/owl_dns/owl_dns.json.sample @@ -1,6 +1,6 @@ { "name": "PatrOwl - Dns module", - "version": "1.5.8", + "version": "1.5.9", "description": "DNS Scanner", "allowed_asset_types": ["ip", "domain", "fqdn", "keyword"], "sublist3r_bin_path": "/opt/patrowl-engines/owl_dns/external-libs/Sublist3r", diff --git a/engines/owl_dns/requirements.txt b/engines/owl_dns/requirements.txt index 34c8786f..e792233c 100644 --- a/engines/owl_dns/requirements.txt +++ b/engines/owl_dns/requirements.txt @@ -23,3 +23,4 @@ urllib3==1.26.13 validators==0.20.0 werkzeug==2.2.3 #whois==0.9.27 +psutil==5.9.8 \ No newline at end of file diff --git a/engines/owl_dns/tests/test_spf.py b/engines/owl_dns/tests/test_spf.py new file mode 100644 index 00000000..3e4b6663 --- /dev/null +++ b/engines/owl_dns/tests/test_spf.py @@ -0,0 +1,449 @@ +import unittest +from unittest import mock + +from engines.owl_dns.engine_owl_dns import ( + _dns_resolve_asset, + _parse_spf_record, + get_lookup_count_and_spf_records, +) +from engines.owl_dns.etc.issues import spf_issues + + +class TestSPF(unittest.TestCase): + maxDiff = None + + @mock.patch("dns.resolver.Resolver.resolve") + def test_dns_resolve_asset(self, mock_resolve): + # Arrange: set up the mock with a random SPF record + mock_resolve.return_value = ['"v=spf1 include:spf.protection.outlook.com -all"'] + + # Act + dns_records = _dns_resolve_asset("patrowl.io", "TXT") + + # Assert + mock_resolve.assert_called_with("patrowl.io", "TXT") + self.assertCountEqual( + dns_records, + [ + { + "record_type": "TXT", + "values": ["v=spf1 include:spf.protection.outlook.com -all"], + "answers": ['"v=spf1 include:spf.protection.outlook.com -all"'], + }, + ], + ) + + def test_parse_spf_record_with_no_dns_record(self): + # Arrange + dns_records = [] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [dict(spf_issues.NO_SPF_RECORD, extra_info="There is no DNS TXT record.")], + ) + + def test_parse_spf_record_with_no_spf_record(self): + # Arrange + dns_records = ['"BLA-BLA-BLA"', '"BLA-BLA-BLA-2"'] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.NO_SPF_RECORD, + extra_info=f"Other DNS TXT records are: {', '.join(dns_records)}.", + ) + ], + ) + + def test_parse_spf_record_with_multiple_spf_records(self): + # Arrange + dns_records = [ + '"v=spf1 include:spf.protection.outlook -all"', + '"v=spf1 include:_spf.google.com ~all"', + '"v=spf1 redirect=_spf.facebook.com"', + ] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.MULTIPLE_SPF_RECORDS, + extra_info=f"Other DNS TXT records are: {', '.join(dns_records)}.", + ) + ], + ) + + def test_parse_spf_record_with_directive_after_all(self): + # Arrange + dns_records = [ + '"v=spf1 ~all include:spf.protection.outlook -all"', + ] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.DIRECTIVES_AFTER_ALL, + value="v=spf1 ~all include:spf.protection.outlook -all", + extra_info='These directives after "all" are ignored: include:spf.protection.outlook -all.', + ) + ], + ) + + def test_parse_spf_record_with_string_too_long(self): + # Arrange + dns_records = [ + '"v=spf1 include:spf.protection.outlook include:veryloooooooooooooooooooooooooooooooooooooooooooooooooooooo' + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + 'oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog -all"', + ] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.STRING_TOO_LONG, + value="v=spf1 include:spf.protection.outlook include:verylooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooog -all", + extra_info="This part is 510 characters long, and therefore too long: v=spf1 include:spf.protection" + ".outlook include:veryloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog -all.", + ) + ], + ) + + def test_parse_spf_record_with_multiple_strings_too_long(self): + # Arrange + dns_records = [ + '"v=spf1 include:spf.protection.outlook include:veryloooooooooooooooooooooooooooooooooooooooooooooooooooooo' + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + 'oooooooooooooooooooooooooooooooooooooooooooo" "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + 'ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog -all"', + ] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.STRING_TOO_LONG, + value="v=spf1 include:spf.protection.outlook include:verylooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooog -all", + extra_info="This part is 256 characters long, and therefore too long: ooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooog -all.", + ) + ], + ) + + def test_parse_spf_record_with_multiple_correct_strings_length(self): + # Arrange + dns_records = [ + '"v=spf1 include:spf.protection.outlook include:veryloooooooooooooooooooooooooooooooooooooooooooooooooooooo' + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + 'oooooooooooooooooooooooooooooooooooooooooooo" "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo' + "oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + 'oooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooog -all"', + ] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [], + ) + + def test_parse_spf_record_with_ptr_mechanism(self): + # Arrange + dns_records = ['"v=spf1 include:spf.protection.outlook ptr -all"'] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.PRESENCE_OF_PTR, + value="v=spf1 include:spf.protection.outlook ptr -all", + ) + ], + ) + + @mock.patch("dns.resolver.Resolver.resolve") + def test_check_dns_lookup_limit_less_than_10(self, mock_resolve): + # Arrange + mock_resolve.side_effect = [ + [ + '"v=spf1 include:spf.protection.outlook.com include:servers.mcsv.net include:7593890.spf10.hubspotemail.net -all"' + ], + [ + '"v=spf1 ip4:40.92.0.0/15 ip4:40.107.0.0/16 ip4:52.100.0.0/14 ip4:104.47.0.0/17 ip6:2a01:111:f400::/48 ip6:2a01:111:f403::/49 ip6:2a01:111:f403:8000::/51 ip6:2a01:111:f403:c000::/51 ip6:2a01:111:f403:f000::/52 -all"' + ], + [ + '"v=spf1 ip4:205.201.128.0/20 ip4:198.2.128.0/18 ip4:148.105.8.0/21 -all"' + ], + [ + '"v=spf1 ip4:3.93.157.0/24 ip4:3.210.190.0/24 ip4:18.208.124.128/25 ip4:54.174.52.0/24 ip4:54.174.57.0/24 ip4:54.174.59.0/24 ip4:54.174.60.0/23 ip4:54.174.63.0/24 ip4:108.179.144.0/20 ip4:139.180.17.0/24 ip4:141.193.184.32/27 ip4:141.193.184.64/26 ip4:141.193.184.128/25 ip4:141.193.185.32/27 ip4:141.193.185.64/26 ip4:141.193.185.128/25 ip4:143.244.80.0/20 ip4:158.247.16.0/20 -all "' + ], + ] + + # Act + dns_lookup_count, spf_lookup_records = get_lookup_count_and_spf_records( + domain="patrowl.io" + ) + + # Assert + self.assertEqual(dns_lookup_count, 3) + + @mock.patch("dns.resolver.Resolver.resolve") + def test_check_dns_lookup_limit_recursion_error(self, mock_resolve): + # Arrange (5000 DNS lookup) + mock_resolve.side_effect = [ + ['"v=spf1 include:spf.protection.outlook.com -all"'] for _ in range(5000) + ] + # Assert + self.assertRaises( + RecursionError, + lambda: get_lookup_count_and_spf_records(domain="patrowl.io"), + ) + + def test_parse_spf_record_with_extra_spaces_before_the_start_of_the_string(self): + # Arrange + dns_records = ['" v=spf1 include:spf.protection.outlook -all"'] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.MALFORMED_SPF_RECORD, + value=" v=spf1 include:spf.protection.outlook -all", + extra_info="There is an extra space before the start of the string.", + ) + ], + ) + + def test_parse_spf_record_with_extra_spaces_after_the_end_of_the_string(self): + # Arrange + dns_records = ['"v=spf1 include:spf.protection.outlook -all "'] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.MALFORMED_SPF_RECORD, + value="v=spf1 include:spf.protection.outlook -all ", + extra_info="There is an extra space after the end of the string.", + ) + ], + ) + + def test_parse_spf_record_surrounded_by_quotation_marks(self): + # Arrange + dns_records = ['""v=spf1 include:spf.protection.outlook -all""'] + + # Act and Assert + result, issues = _parse_spf_record(dns_records=dns_records) + + self.assertCountEqual( + issues, + [ + dict( + spf_issues.MALFORMED_SPF_RECORD, + value='"v=spf1 include:spf.protection.outlook -all"', + extra_info="The SPF record is surrounded quotation marks.", + ) + ], + ) + + def test_parse_spf_record_with_illegal_term(self): + # Arrange + dns_records_1 = [ + '"v=spf1 include:spf.protection.outlook includes:spf.protection.outlook -all"' + ] + + # Act + result_1, issues_1 = _parse_spf_record(dns_records=dns_records_1) + + # Assert + self.assertCountEqual( + issues_1, + [ + dict( + spf_issues.MALFORMED_SPF_RECORD, + value="v=spf1 include:spf.protection.outlook includes:spf.protection.outlook -all", + extra_info="'includes' is an illegal term.", + ) + ], + ) + + # Arrange + dns_records_2 = ['"v=spf1 include:spf.protection.outlook -alll"'] + + # Act + result_2, issues_2 = _parse_spf_record(dns_records=dns_records_2) + + # Assert + self.assertCountEqual( + issues_2, + [ + dict( + spf_issues.MALFORMED_SPF_RECORD, + value="v=spf1 include:spf.protection.outlook -alll", + extra_info="'alll' is an illegal term.", + ) + ], + ) + + def test_parse_spf_record_with_uppercase(self): + # Arrange + dns_records = ['"V=SPF1 InClUdE:spf.protection.outlook -All"'] + + # Act + result, issues = _parse_spf_record(dns_records=dns_records) + + # Assert + self.assertCountEqual( + issues, + [], + ) + + def test_parse_spf_record_with_permissive_all(self): + # Arrange + dns_records_1 = ['"v=spf1 include:spf.protection.outlook all"'] + + # Act + result_1, issues_1 = _parse_spf_record(dns_records=dns_records_1) + + # Assert + self.assertCountEqual( + issues_1, + [ + dict( + spf_issues.PERMISSIVE_SPF_RECORD, + value="v=spf1 include:spf.protection.outlook all", + ) + ], + ) + + # Arrange + dns_records_2 = ['"v=spf1 include:spf.protection.outlook +all"'] + + # Act + result_2, issues_2 = _parse_spf_record(dns_records=dns_records_2) + + # Assert + self.assertCountEqual( + issues_2, + [ + dict( + spf_issues.PERMISSIVE_SPF_RECORD, + value="v=spf1 include:spf.protection.outlook +all", + ) + ], + ) + + # Arrange + dns_records_3 = ['"v=spf1 include:spf.protection.outlook ?all"'] + + # Act + result_3, issues_3 = _parse_spf_record(dns_records=dns_records_3) + + # Assert + self.assertCountEqual( + issues_3, + [ + dict( + spf_issues.PERMISSIVE_SPF_RECORD, + value="v=spf1 include:spf.protection.outlook ?all", + ) + ], + ) + + def test_parse_spf_record_without_spf_record_termination(self): + # Arrange + dns_records = ['"v=spf1 include:spf.protection.outlook"'] + + # Act + result, issues = _parse_spf_record(dns_records=dns_records) + + # Assert + self.assertCountEqual( + issues, + [ + dict( + spf_issues.MISS_SPF_RECORD_TERMINATION, + value="v=spf1 include:spf.protection.outlook", + ) + ], + ) + + def test_parse_spf_record_with_all_spf_record_termination(self): + # Arrange + dns_records = ['"v=spf1 include:spf.protection.outlook -all"'] + + # Act + result, issues = _parse_spf_record(dns_records=dns_records) + + # Assert + self.assertCountEqual( + issues, + [], + ) + + def test_parse_spf_record_with_redirect_spf_record_termination(self): + # Arrange + dns_records = ['"v=spf1 redirect=_spf.facebook.com"'] + + # Act + result, issues = _parse_spf_record(dns_records=dns_records) + + # Assert + self.assertCountEqual( + issues, + [], + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/engines/update_ver.sh b/engines/update_ver.sh new file mode 100755 index 00000000..eadde6fa --- /dev/null +++ b/engines/update_ver.sh @@ -0,0 +1,22 @@ +#!/bin/bash +echo "[*] Starting ..." +if [ $# -ne 3 ]; then + echo "[!] 3 arguments required;" + echo " | ./update_ver.sh " + echo "[!] Quitting." + exit +fi +echo "[+] Updating version ..." +cd ${3} +sed -i "s/${1}/${2}/g" VERSION +sed -i "s/${1}/${2}/g" Dockerfile +sed -i "s/${1}/${2}/g" __init__.py +sed -i "s/${1}/${2}/g" ${3}.json.sample + + +echo "[+] Adding to version control ..." +git add Dockerfile VERSION __init__.py ${3}.json.sample ../../VERSION +git commit -m "Updated VERSION (${3})" +cd .. +echo "[+] Updated ${3} to ${2}." +echo "[*] Done."