Projects STRLCPY LogonTracer Commits 2bb79861
🤬
  • fix multiple vulnerabilities in logontracer.py

    - fix multiple vulnerabilities
    - fix unPEP8 codes
    - remove dead codes
  • Loading...
  • t-tani committed with GitHub 5 years ago
    2bb79861
    1 parent e8dde6e9
  • ■ ■ ■ ■ ■ ■
    logontracer.py
    skipped 8 lines
    9 9  import sys
    10 10  import re
    11 11  import argparse
    12  -import itertools
    13 12  import datetime
    14 13  import subprocess
    15 14   
    skipped 68 lines
    84 83   
    85 84  # EVTX Header
    86 85  EVTX_HEADER = b"\x45\x6C\x66\x46\x69\x6C\x65\x00"
     86 + 
     87 +# String Chack list
     88 +UCHECK = r"[%*+=\[\]\\/|;:\"<>?,&]"
     89 +HCHECK = r"[*\\/|:\"<>?&]"
    87 90   
    88 91  # LogonTracer folder path
    89 92  FPATH = os.path.dirname(os.path.abspath(__file__))
    skipped 164 lines
    254 257  if args.port:
    255 258   WEB_PORT = args.port
    256 259   
     260 + 
    257 261  # Web application index.html
    258 262  @app.route('/')
    259 263  def index():
    skipped 17 lines
    277 281  # Web application upload
    278 282  @app.route("/upload", methods=["POST"])
    279 283  def do_upload():
    280  - filelist= ""
     284 + UPLOAD_DIR = os.path.join(FPATH, 'upload')
     285 + filelist = ""
     286 + 
     287 + if os.path.exists(UPLOAD_DIR) is False:
     288 + os.mkdir(UPLOAD_DIR)
     289 + print("[*] make upload folder %s." % UPLOAD_DIR)
     290 + 
    281 291   try:
    282 292   timezone = request.form["timezone"]
    283 293   logtype = request.form["logtype"]
    284  - for i in range(0, len(request.files)):
     294 + for i in range(0, len(request.files)):
    285 295   loadfile = "file" + str(i)
    286 296   file = request.files[loadfile]
    287 297   if file and file.filename:
    288  - filename = file.filename
     298 + if "EVTX" in logtype:
     299 + filename = os.path.join(UPLOAD_DIR, str(i) + ".evtx")
     300 + elif "XML" in logtype:
     301 + filename = os.path.join(UPLOAD_DIR, str(i) + ".xml")
     302 + else:
     303 + continue
    289 304   file.save(filename)
    290 305   filelist += filename + " "
    291 306   if "EVTX" in logtype:
    292 307   logoption = " -e "
    293  - if "XML" in logtype:
     308 + elif "XML" in logtype:
    294 309   logoption = " -x "
    295  - parse_command = "nohup python3 " + FPATH + "/logontracer.py --delete -z " + timezone + logoption + filelist + " -u " + NEO4J_USER + " -p " + NEO4J_PASSWORD + " > " + FPATH + "/static/logontracer.log 2>&1 &";
     310 + else:
     311 + return "FAIL"
     312 + if not re.search(r"\A-{0,1}[0-9]{1,2}\Z", timezone):
     313 + return "FAIL"
     314 + 
     315 + parse_command = "nohup python3 " + FPATH + "/logontracer.py --delete -z " + timezone + logoption + filelist + " -u " + NEO4J_USER + " -p " + NEO4J_PASSWORD + " > " + FPATH + "/static/logontracer.log 2>&1 &"
    296 316   subprocess.call("rm -f " + FPATH + "/static/logontracer.log > /dev/null", shell=True)
    297 317   subprocess.call(parse_command, shell=True)
    298  - #parse_evtx(filename)
     318 + # parse_evtx(filename)
    299 319   return "SUCCESS"
     320 + 
    300 321   except:
    301 322   return "FAIL"
    302 323   
    skipped 7 lines
    310 331   for _, event in counts.iterrows():
    311 332   column = int((datetime.datetime.strptime(event["dates"], "%Y-%m-%d %H:%M:%S") - starttime).total_seconds() / 3600)
    312 333   row = users.index(event["username"])
    313  - #count_array[row, column, 0] = count_array[row, column, 0] + count
     334 + # count_array[row, column, 0] = count_array[row, column, 0] + count
    314 335   if event["eventid"] == 4624:
    315 336   count_array[0, row, column] = event["count"]
    316 337   elif event["eventid"] == 4625:
    skipped 5 lines
    322 343   elif event["eventid"] == 4776:
    323 344   count_array[4, row, column] = event["count"]
    324 345   
    325  - #count_average = count_array.mean(axis=0)
     346 + # count_average = count_array.mean(axis=0)
    326 347   count_sum = np.sum(count_array, axis=0)
    327 348   count_average = count_sum.mean(axis=0)
    328 349   num = 0
    skipped 148 lines
    477 498   data_array[data_array == 4769] = 2
    478 499   data_array[data_array == 4624] = 3
    479 500   data_array[data_array == 4625] = 4
    480  - #model = hmm.GaussianHMM(n_components=3, covariance_type="full", n_iter=10000)
     501 + # model = hmm.GaussianHMM(n_components=3, covariance_type="full", n_iter=10000)
    481 502   model = hmm.MultinomialHMM(n_components=3, n_iter=10000)
    482  - #model.startprob_ = start_probability
     503 + # model.startprob_ = start_probability
    483 504   model.emissionprob_ = emission_probability
    484 505   model.fit(np.array([data_array], dtype="int").T, lengths)
    485 506   joblib.dump(model, FPATH + "/model/hmm.pkl")
    skipped 3 lines
    489 510   rep_xml = record_xml.replace("xmlns=\"http://schemas.microsoft.com/win/2004/08/events/event\"", "")
    490 511   set_xml = "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\" ?>%s" % rep_xml
    491 512   fin_xml = set_xml.encode("utf-8")
    492  - return etree.fromstring(fin_xml)
     513 + parser = etree.XMLParser(resolve_entities=False)
     514 + return etree.fromstring(fin_xml, parser)
    493 515   
    494 516   
    495 517  def xml_records(filename):
    skipped 6 lines
    502 524   yield xml, e
    503 525   
    504 526   if args.xmls:
    505  - with open(filename,'r') as fx:
     527 + with open(filename, 'r') as fx:
    506 528   xdata = fx.read()
    507 529   fixdata = xdata.replace("<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>", "").replace("</Events>", "").replace("<Events>", "")
    508 530   # fixdata = xdata.replace("<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>", "")
    skipped 7 lines
    516 538   except etree.XMLSyntaxError as e:
    517 539   yield xml, e
    518 540   
     541 + 
    519 542  # Parse the EVTX file
    520 543  def parse_evtx(evtx_list):
    521 544   event_set = pd.DataFrame(index=[], columns=["eventid", "ipaddress", "username", "logintype", "status", "authname"])
    522 545   count_set = pd.DataFrame(index=[], columns=["dates", "eventid", "username"])
    523  - ml_frame = pd.DataFrame(index=[], columns=["date","user","host","id"])
     546 + ml_frame = pd.DataFrame(index=[], columns=["date", "user", "host", "id"])
    524 547   username_set = []
    525 548   domain_set = []
    526 549   admins = []
    skipped 35 lines
    562 585   
    563 586   if args.todate:
    564 587   try:
    565  - tdatetime = datetime.datetime.strptime(args.todate, "%Y%m%d%H%M%S")
     588 + tdatetime = datetime.datetime.strptime(args.todate, "%Y%m%d%H%M%S")
    566 589   print("[*] Parse the EVTX from %s." % tdatetime.strftime("%Y-%m-%d %H:%M:%S"))
    567 590   except:
    568 591   sys.exit("[!] To date does not match format '%Y%m%d%H%M%S'.")
    skipped 17 lines
    586 609   record_sum = record_sum + last_record
    587 610   break
    588 611   except:
    589  - record_sum = record_sum + fh.next_record_number()
     612 + record_sum = record_sum + fh.next_record_number()
    590 613   
    591 614   if args.xmls:
    592 615   with open(evtx_file, "r") as fb:
    skipped 57 lines
    650 673   
    651 674   if eventid == 4672:
    652 675   for data in event_data:
    653  - if data.get("Name") in "SubjectUserName" and data.text != None:
     676 + if data.get("Name") in "SubjectUserName" and data.text is not None and not re.search(UCHECK, data.text):
    654 677   username = data.text.split("@")[0]
    655 678   if username[-1:] not in "$":
    656 679   username = username.lower() + "@"
    skipped 3 lines
    660 683   admins.append(username)
    661 684   elif eventid in [4720, 4726]:
    662 685   for data in event_data:
    663  - if data.get("Name") in "TargetUserName" and data.text != None:
     686 + if data.get("Name") in "TargetUserName" and data.text is not None and not re.search(UCHECK, data.text):
    664 687   username = data.text.split("@")[0]
    665 688   if username[-1:] not in "$":
    666 689   username = username.lower() + "@"
    skipped 5 lines
    672 695   delusers[username] = etime.strftime("%Y-%m-%d %H:%M:%S")
    673 696   elif eventid == 4719:
    674 697   for data in event_data:
    675  - if data.get("Name") in "SubjectUserName" and data.text != None:
     698 + if data.get("Name") in "SubjectUserName" and data.text is not None and not re.search(UCHECK, data.text):
    676 699   username = data.text.split("@")[0]
    677 700   if username[-1:] not in "$":
    678 701   username = username.lower() + "@"
    679 702   else:
    680 703   username = "-"
    681  - if data.get("Name") in "CategoryId" and data.text != None:
     704 + if data.get("Name") in "CategoryId" and data.text is not None and re.search(r"\A%%\d{4}\Z", data.text):
    682 705   category = data.text
    683  - if data.get("Name") in "SubcategoryGuid" and data.text != None:
     706 + if data.get("Name") in "SubcategoryGuid" and data.text is not None and re.search(r"\A{[\w\-]*}\Z", data.text):
    684 707   guid = data.text
    685 708   policylist.append([etime.strftime("%Y-%m-%d %H:%M:%S"), username, category, guid.lower()])
    686 709   elif eventid in [4728, 4732, 4756]:
    687 710   for data in event_data:
    688  - if data.get("Name") in "TargetUserName" and data.text != None:
     711 + if data.get("Name") in "TargetUserName" and data.text is not None and not re.search(UCHECK, data.text):
    689 712   groupname = data.text
    690  - elif data.get("Name") in "MemberSid" and data.text not in "-" and data.text != None:
     713 + elif data.get("Name") in "MemberSid" and data.text not in "-" and data.text is not None and re.search(r"\AS-[0-9\-]*\Z", data.text):
    691 714   usid = data.text
    692 715   addgroups[usid] = "AddGroup: " + groupname + "(" + etime.strftime("%Y-%m-%d %H:%M:%S") + ") "
    693 716   elif eventid in [4729, 4733, 4757]:
    694 717   for data in event_data:
    695  - if data.get("Name") in "TargetUserName" and data.text != None:
     718 + if data.get("Name") in "TargetUserName" and data.text is not None and not re.search(UCHECK, data.text):
    696 719   groupname = data.text
    697  - elif data.get("Name") in "MemberSid" and data.text not in "-" and data.text != None:
     720 + elif data.get("Name") in "MemberSid" and data.text not in "-" and data.text is not None and re.search(r"\AS-[0-9\-]*\Z", data.text):
    698 721   usid = data.text
    699 722   removegroups[usid] = "RemoveGroup: " + groupname + "(" + etime.strftime("%Y-%m-%d %H:%M:%S") + ") "
    700 723   elif eventid == 4662:
    701 724   for data in event_data:
    702  - if data.get("Name") in "SubjectUserName" and data.text != None:
     725 + if data.get("Name") in "SubjectUserName" and data.text is not None and not re.search(UCHECK, data.text):
    703 726   username = data.text.split("@")[0]
    704 727   if username[-1:] not in "$":
    705 728   username = username.lower() + "@"
    skipped 5 lines
    711 734   dcsync_count[username] = 0
    712 735   elif eventid in [5137, 5141]:
    713 736   for data in event_data:
    714  - if data.get("Name") in "SubjectUserName" and data.text != None:
     737 + if data.get("Name") in "SubjectUserName" and data.text is not None and not re.search(UCHECK, data.text):
    715 738   username = data.text.split("@")[0]
    716 739   if username[-1:] not in "$":
    717 740   username = username.lower() + "@"
    skipped 5 lines
    723 746   dcshadow_check.append(etime.strftime("%Y-%m-%d %H:%M:%S"))
    724 747   else:
    725 748   for data in event_data:
    726  - if data.get("Name") in ["IpAddress", "Workstation"] and data.text != None:
     749 + if data.get("Name") in ["IpAddress", "Workstation"] and data.text is not None and (not re.search(HCHECK, data.text) or re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", data.text)):
    727 750   ipaddress = data.text.split("@")[0]
    728 751   ipaddress = ipaddress.lower().replace("::ffff:", "")
    729 752   ipaddress = ipaddress.replace("\\", "")
    730 753   
    731  - if data.get("Name") == "WorkstationName" and data.text != None:
     754 + if data.get("Name") == "WorkstationName" and data.text is not None and (not re.search(HCHECK, data.text) or re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", data.text)):
    732 755   hostname = data.text.split("@")[0]
    733 756   hostname = hostname.lower().replace("::ffff:", "")
    734 757   hostname = hostname.replace("\\", "")
    735 758   
    736  - if data.get("Name") in "TargetUserName" and data.text != None:
     759 + if data.get("Name") in "TargetUserName" and data.text is not None and not re.search(UCHECK, data.text):
    737 760   username = data.text.split("@")[0]
    738 761   if username[-1:] not in "$":
    739 762   username = username.lower() + "@"
    740 763   else:
    741 764   username = "-"
    742 765   
    743  - if data.get("Name") in "TargetDomainName" and data.text != None:
     766 + if data.get("Name") in "TargetDomainName" and data.text is not None and not re.search(HCHECK, data.text):
    744 767   domain = data.text
    745 768   
    746  - if data.get("Name") in ["TargetUserSid", "TargetSid"] and data.text != None and data.text[0:2] in "S-1":
     769 + if data.get("Name") in ["TargetUserSid", "TargetSid"] and data.text is not None and re.search(r"\AS-[0-9\-]*\Z", data.text):
    747 770   sid = data.text
    748 771   
    749  - if data.get("Name") in "LogonType":
     772 + if data.get("Name") in "LogonType" and re.search(r"\A\d{1,2}\Z", data.text):
    750 773   logintype = int(data.text)
    751 774   
    752  - if data.get("Name") in "Status":
     775 + if data.get("Name") in "Status" and re.search(r"\A0x\w{8}\Z", data.text):
    753 776   status = data.text
    754 777   
    755  - if data.get("Name") in "AuthenticationPackageName":
     778 + if data.get("Name") in "AuthenticationPackageName" and re.search(r"\A\w*\Z", data.text):
    756 779   authname = data.text
    757 780   
    758 781   if username != "-" and ipaddress != "::1" and ipaddress != "127.0.0.1" and (ipaddress != "-" or hostname != "-"):
    skipped 3 lines
    762 785   else:
    763 786   event_series = pd.Series([eventid, hostname, username, logintype, status, authname], index=event_set.columns)
    764 787   ml_series = pd.Series([etime.strftime("%Y-%m-%d %H:%M:%S"), username, hostname, eventid], index=ml_frame.columns)
    765  - event_set = event_set.append(event_series, ignore_index = True)
     788 + event_set = event_set.append(event_series, ignore_index=True)
    766 789   ml_frame = ml_frame.append(ml_series, ignore_index=True)
    767 790   # print("%s,%i,%s,%s,%s,%s" % (eventid, ipaddress, username, comment, logintype))
    768 791   count_series = pd.Series([stime.strftime("%Y-%m-%d %H:%M:%S"), eventid, username], index=count_set.columns)
    769  - count_set = count_set.append(count_series, ignore_index = True)
     792 + count_set = count_set.append(count_series, ignore_index=True)
    770 793   # print("%s,%s" % (stime.strftime("%Y-%m-%d %H:%M:%S"), username))
    771 794   
    772 795   if domain != "-":
    skipped 8 lines
    781 804   if sid != "-":
    782 805   sids[username] = sid
    783 806   
    784  - if hostname != "-" and ipaddress != "-" :
     807 + if hostname != "-" and ipaddress != "-":
    785 808   hosts[hostname] = ipaddress
    786 809   
    787 810   if authname in "NTML" and authname not in ntmlauth:
    skipped 11 lines
    799 822   user_data = node.xpath("/Event/UserData/ns:LogFileCleared/ns:SubjectUserName", namespaces={"ns": namespace})
    800 823   domain_data = node.xpath("/Event/UserData/ns:LogFileCleared/ns:SubjectDomainName", namespaces={"ns": namespace})
    801 824   
    802  - if user_data[0].text != None:
     825 + if user_data[0].text is not None:
    803 826   username = user_data[0].text.split("@")[0]
    804 827   if username[-1:] not in "$":
    805 828   deletelog.append(username.lower())
    skipped 2 lines
    808 831   else:
    809 832   deletelog.append("-")
    810 833   
    811  - if domain_data[0].text != None:
     834 + if domain_data[0].text is not None:
    812 835   deletelog.append(domain_data[0].text)
    813 836   else:
    814 837   deletelog.append("-")
    skipped 38 lines
    853 876   print("[*] Creating a graph data.")
    854 877   
    855 878   try:
    856  - graph_http = "http://" + NEO4J_USER + ":" + NEO4J_PASSWORD +"@" + NEO4J_SERVER + ":" + NEO4J_PORT + "/db/data/"
     879 + graph_http = "http://" + NEO4J_USER + ":" + NEO4J_PASSWORD + "@" + NEO4J_SERVER + ":" + NEO4J_PORT + "/db/data/"
    857 880   GRAPH = Graph(graph_http)
    858 881   except:
    859 882   sys.exit("[!] Can't connect Neo4j Database.")
    860 883   
    861 884   tx = GRAPH.begin()
    862  - hosts_inv = {v:k for k, v in hosts.items()}
     885 + hosts_inv = {v: k for k, v in hosts.items()}
    863 886   for ipaddress in event_set["ipaddress"].drop_duplicates():
    864 887   if ipaddress in hosts_inv:
    865 888   hostname = hosts_inv[ipaddress]
    skipped 26 lines
    892 915   ustatus += "DCShadow(" + dcshadow[username] + ") "
    893 916   if not ustatus:
    894 917   ustatus = "-"
    895  - tx.append(statement_user, {"user": username[:-1], "rank": ranks[username],"rights": rights,"sid": sid,"status": ustatus,
    896  - "counts": ",".join(map(str, timelines[i*6])), "counts4624": ",".join(map(str, timelines[i*6+1])),
    897  - "counts4625": ",".join(map(str, timelines[i*6+2])), "counts4768": ",".join(map(str, timelines[i*6+3])),
    898  - "counts4769": ",".join(map(str, timelines[i*6+4])), "counts4776": ",".join(map(str, timelines[i*6+5])),
    899  - "detect": ",".join(map(str, detects[i]))})
     918 + tx.append(statement_user, {"user": username[:-1], "rank": ranks[username], "rights": rights, "sid": sid, "status": ustatus,
     919 + "counts": ",".join(map(str, timelines[i*6])), "counts4624": ",".join(map(str, timelines[i*6+1])),
     920 + "counts4625": ",".join(map(str, timelines[i*6+2])), "counts4768": ",".join(map(str, timelines[i*6+3])),
     921 + "counts4769": ",".join(map(str, timelines[i*6+4])), "counts4776": ",".join(map(str, timelines[i*6+5])),
     922 + "detect": ",".join(map(str, detects[i]))})
    900 923   i += 1
    901 924   
    902 925   for domain in domains:
    skipped 1 lines
    904 927   
    905 928   for _, events in event_set.iterrows():
    906 929   tx.append(statement_r, {"user": events["username"][:-1], "IP": events["ipaddress"], "id": events["eventid"], "logintype": events["logintype"],
    907  - "status": events["status"], "count": events["count"], "authname": events["authname"]})
     930 + "status": events["status"], "count": events["count"], "authname": events["authname"]})
    908 931   
    909 932   for username, domain in domain_set_uniq:
    910 933   tx.append(statement_dr, {"user": username[:-1], "domain": domain})
    911 934   
    912 935   tx.append(statement_date, {"Daterange": "Daterange", "start": datetime.datetime(*starttime.timetuple()[:4]).strftime("%Y-%m-%d %H:%M:%S"),
    913  - "end": datetime.datetime(*endtime.timetuple()[:4]).strftime("%Y-%m-%d %H:%M:%S")})
     936 + "end": datetime.datetime(*endtime.timetuple()[:4]).strftime("%Y-%m-%d %H:%M:%S")})
    914 937   
    915 938   if len(deletelog):
    916 939   tx.append(statement_del, {"deletetime": deletelog[0], "user": deletelog[1], "domain": deletelog[2]})
    skipped 45 lines
    962 985   sys.exit("[!] scikit-learn must be installed for this script.")
    963 986   
    964 987   try:
    965  - graph_http = "http://" + NEO4J_USER + ":" + NEO4J_PASSWORD +"@" + NEO4J_SERVER + ":" + NEO4J_PORT + "/db/data/"
     988 + graph_http = "http://" + NEO4J_USER + ":" + NEO4J_PASSWORD + "@" + NEO4J_SERVER + ":" + NEO4J_PORT + "/db/data/"
    966 989   GRAPH = Graph(graph_http)
    967 990   except:
    968 991   sys.exit("[!] Can't connect Neo4j Database.")
    skipped 24 lines
    993 1016   parse_evtx(args.xmls)
    994 1017   
    995 1018   print("[*] Script end. %s" % datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
     1019 + 
    996 1020   
    997 1021  if __name__ == "__main__":
    998 1022   main()
    skipped 1 lines
Please wait...
Page is in error, reload to recover