搜索
查看: 228|回复: 0

【100行Python代码扫描器】 XSS vulnerability scanner (DSXS)

[复制链接]

1839

主题

2255

帖子

1万

积分

管理员

Rank: 9Rank: 9Rank: 9

积分
11913
发表于 2014-12-2 21:11:22 | 显示全部楼层 |阅读模式
  1. #!/usr/bin/env python
  2. import cookielib, optparse, random, re, string, urllib, urllib2, urlparse

  3. NAME    = "Damn Small xss Scanner (DSXS) < 100 LoC (Lines of Code)"
  4. VERSION = "0.1q"
  5. AUTHOR  = "Miroslav Stampar (@stamparm)"
  6. LICENSE = "Public domain (FREE)"

  7. SMALLER_CHAR_POOL    = ('<', '>')                               # characters used for XSS tampering of parameter values (smaller set - for avoiding possible sqli errors)
  8. LARGER_CHAR_POOL     = ('\'', '"', '>', '<', ';')               # characters used for XSS tampering of parameter values (larger set)
  9. GET, POST            = "GET", "POST"                            # enumerator-like values used for marking current phase
  10. PREFIX_SUFFIX_LENGTH = 5                                        # length of random prefix/suffix used in XSS tampering
  11. CONTEXT_DISPLAY_OFFSET = 10                                     # offset outside the affected context for displaying in vulnerability report
  12. COOKIE, UA, REFERER = "Cookie", "User-Agent", "Referer"         # optional HTTP header names
  13. TIMEOUT = 30                                                    # connection timeout in seconds

  14. XSS_PATTERNS = (                                                # each (pattern) item consists of (r"context regex", (prerequisite unfiltered characters), "info text", r"content removal regex")
  15.     (r"\A[^<>]*%(chars)s[^<>]*\Z", ('<', '>'), "".xss.", pure text response, %(filtering)s filtering", None),
  16.     (r"<!--[^>]*%(chars)s|%(chars)s[^<]*-->", ('<', '>'), ""<!--.'.xss.'.-->", inside the comment, %(filtering)s filtering", None),
  17.     (r"(?s)<script[^>]*>[^<]*?'[^<']*%(chars)s|%(chars)s[^<']*'[^<]*</script>", ('\'', ';'), ""<script>.'.xss.'.</script>", enclosed by <script> tags, inside single-quotes, %(filtering)s filtering", None),
  18.     (r'(?s)<script[^>]*>[^<]*?"[^<"]*%(chars)s|%(chars)s[^<"]*"[^<]*</script>', ('"', ';'), "'<script>.".xss.".</script>', enclosed by <script> tags, inside double-quotes, %(filtering)s filtering", None),
  19.     (r"(?s)<script[^>]*>[^<]*?%(chars)s|%(chars)s[^<]*</script>", (';',), ""<script>.xss.</script>", enclosed by <script> tags, %(filtering)s filtering", None),
  20.     (r">[^<]*%(chars)s[^<]*(<|\Z)", ('<', '>'), "">.xss.<", outside of tags, %(filtering)s filtering", r"(?s)<script.+?</script>|<!--.*?-->"),
  21.     (r"<[^>]*'[^>']*%(chars)s[^>']*'[^>]*>", ('\'',), ""<.'.xss.'.>", inside the tag, inside single-quotes, %(filtering)s filtering", r"(?s)<script.+?</script>|<!--.*?-->"),
  22.     (r'<[^>]*"[^>"]*%(chars)s[^>"]*"[^>]*>', ('"',), "'<.".xss.".>', inside the tag, inside double-quotes, %(filtering)s filtering", r"(?s)<script.+?</script>|<!--.*?-->"),
  23.     (r"<[^>]*%(chars)s[^>]*>", (), ""<.xss.>", inside the tag, outside of quotes, %(filtering)s filtering", r"(?s)<script.+?</script>|<!--.*?-->")
  24. )

  25. USER_AGENTS = (                                                 # items used for picking random HTTP User-Agent header value
  26.     "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7_0; en-US) AppleWebKit/534.21 (KHTML, like Gecko) Chrome/11.0.678.0 Safari/534.21",
  27.     "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
  28.     "Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:0.9.2) Gecko/20020508 Netscape6/6.1",
  29.     "Mozilla/5.0 (X11;U; Linux i686; en-GB; rv:1.9.1) Gecko/20090624 Ubuntu/9.04 (jaunty) Firefox/3.5",
  30. )

  31. _headers = {}                                                   # used for storing dictionary with optional header values

  32. def _retrieve_content(url, data=None):
  33.     try:
  34.         req = urllib2.Request("".join(url[i].replace(' ', "%20") if i > url.find('?') else url[i] for i in xrange(len(url))), data, _headers)
  35.         retval = urllib2.urlopen(req, timeout=TIMEOUT).read()
  36.     except Exception, ex:
  37.         retval = ex.read() if hasattr(ex, "read") else getattr(ex, "msg", str())
  38.     return retval or ""

  39. def _contains(content, chars):
  40.     content = re.sub(r"\\[%s]" % re.escape("".join(chars)), "", content) if chars else content
  41.     return all(char in content for char in chars)

  42. def scan_page(url, data=None):
  43.     retval, usable = False, False
  44.     url, data = re.sub(r"=(&|\Z)", "=1\g<1>", url) if url else url, re.sub(r"=(&|\Z)", "=1\g<1>", data) if data else data
  45.     try:
  46.         for phase in (GET, POST):
  47.             current = url if phase is GET else (data or "")
  48.             for match in re.finditer(r"((\A|[?&])(?P<parameter>[\w\[\]]+)=)(?P<value>[^&]+)", current):
  49.                 found, usable = False, True
  50.                 print "* scanning %s parameter '%s'" % (phase, match.group("parameter"))
  51.                 prefix, suffix = ("".join(random.sample(string.ascii_lowercase, PREFIX_SUFFIX_LENGTH)) for i in xrange(2))
  52.                 for pool in (LARGER_CHAR_POOL, SMALLER_CHAR_POOL):
  53.                     if not found:
  54.                         tampered = current.replace(match.group(0), "%s%s" % (match.group(0), urllib.quote("%s%s%s%s" % ("'" if pool == LARGER_CHAR_POOL else "", prefix, "".join(random.sample(pool, len(pool))), suffix))))
  55.                         content = (_retrieve_content(tampered, data) if phase is GET else _retrieve_content(url, tampered)).replace("%s%s" % ("'" if pool == LARGER_CHAR_POOL else "", prefix), prefix)
  56.                         for sample in re.finditer("%s([^ ]+?)%s" % (prefix, suffix), content, re.I):
  57.                             for regex, condition, info, content_removal_regex in XSS_PATTERNS:
  58.                                 context = re.search(regex % {"chars": re.escape(sample.group(0))}, re.sub(content_removal_regex or "", "", content), re.I)
  59.                                 if context and not found and sample.group(1).strip():
  60.                                     if _contains(sample.group(1), condition):
  61.                                         print " (i) %s parameter '%s' appears to be XSS vulnerable (%s)" % (phase, match.group("parameter"), info % dict((("filtering", "no" if all(char in sample.group(1) for char in LARGER_CHAR_POOL) else "some"),)))
  62.                                         found = retval = True
  63.                                     break
  64.         if not usable:
  65.             print " (x) no usable GET/POST parameters found"
  66.     except KeyboardInterrupt:
  67.         print "\r (x) Ctrl-C pressed"
  68.     return retval

  69. def init_options(proxy=None, cookie=None, ua=None, referer=None):
  70.     global _headers
  71.     _headers = dict(filter(lambda _: _[1], ((COOKIE, cookie), (UA, ua or NAME), (REFERER, referer))))
  72.     urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler({'http': proxy})) if proxy else None)

  73. if __name__ == "__main__":
  74.     print "%s #v%s\n by: %s\n" % (NAME, VERSION, AUTHOR)
  75.     parser = optparse.OptionParser(version=VERSION)
  76.     parser.add_option("-u", "--url", dest="url", help="Target URL (e.g. "http://www.target.com/page.php?id=1")")
  77.     parser.add_option("--data", dest="data", help="POST data (e.g. "query=test")")
  78.     parser.add_option("--cookie", dest="cookie", help="HTTP Cookie header value")
  79.     parser.add_option("--user-agent", dest="ua", help="HTTP User-Agent header value")
  80.     parser.add_option("--random-agent", dest="randomAgent", action="store_true", help="Use randomly selected HTTP User-Agent header value")
  81.     parser.add_option("--referer", dest="referer", help="HTTP Referer header value")
  82.     parser.add_option("--proxy", dest="proxy", help="HTTP proxy address (e.g. "http://127.0.0.1:8080")")
  83.     options, _ = parser.parse_args()
  84.     if options.url:
  85.         init_options(options.proxy, options.cookie, options.ua if not options.randomAgent else random.choice(USER_AGENTS), options.referer)
  86.         result = scan_page(options.url if options.url.startswith("http") else "http://%s" % options.url, options.data)
  87.         print "\nscan results: %s vulnerabilities found" % ("possible" if result else "no")
  88.     else:
  89.         parser.print_help()
复制代码
过段时间可能会取消签到功能了
您需要登录后才可以回帖 登录 | Join BUC

本版积分规则

Powered by Discuz!

© 2012-2015 Baiker Union of China.

快速回复 返回顶部 返回列表