dork.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-"
  3. # vim: set expandtab tabstop=4 shiftwidth=4:
  4. """
  5. This file is part of the XSSer project, https://xsser.03c8.net
  6. Copyright (c) 2010/2019 | psy <epsylon@riseup.net>
  7. xsser is free software; you can redistribute it and/or modify it under
  8. the terms of the GNU General Public License as published by the Free
  9. Software Foundation version 3 of the License.
  10. xsser is distributed in the hope that it will be useful, but WITHOUT ANY
  11. WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  12. FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
  13. details.
  14. You should have received a copy of the GNU General Public License along
  15. with xsser; if not, write to the Free Software Foundation, Inc., 51
  16. Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. ........
  18. List of search engines: http://en.wikipedia.org/wiki/List_of_search_engines
  19. Currently supported: duck(default), startpage, yahoo, bing
  20. """
  21. import urllib2, traceback, re, random, urllib
  22. urllib2.socket.setdefaulttimeout(5.0)
  23. DEBUG = 0
  24. class Dorker(object):
  25. def __init__(self, engine='duck'):
  26. self._engine = engine
  27. self.search_engines = [] # available dorking search engines
  28. self.search_engines.append('duck')
  29. self.search_engines.append('startpage')
  30. self.search_engines.append('yahoo')
  31. self.search_engines.append('bing')
  32. self.agents = [] # user-agents
  33. try:
  34. f = open("core/fuzzing/user-agents.txt").readlines() # set path for user-agents
  35. except:
  36. f = open("fuzzing/user-agents.txt").readlines() # set path for user-agents when testing
  37. for line in f:
  38. self.agents.append(line)
  39. def dork(self, search):
  40. """
  41. Perform a search and return links.
  42. """
  43. if self._engine == 'bing': # works at 20-02-2011 -> 19-02-2016 -> 09-04-2018 -> 26-08-2019
  44. search_url = 'https://www.bing.com/search?q="' + str(search) + '"'
  45. print "\nSearching query:", urllib2.unquote(search_url)
  46. elif self._engine == 'yahoo': # works at 20-02-2011 -> 19-02-2016 -> -> 09-04-2018 -> 26-08-2019
  47. search_url = 'https://search.yahoo.com/search?q="' + str(search) + '"'
  48. print "\nSearching query:", urllib2.unquote(search_url)
  49. elif self._engine == 'duck': # works at 26-08-2019
  50. search_url = 'https://duckduckgo.com/html/'
  51. q = 'instreamset:(url):"' + str(search) + '"' # set query to search literally on results
  52. query_string = { 'q':q }
  53. print "\nSearching query:", urllib2.unquote(search_url) + " [POST: (" + q + ")]"
  54. elif self._engine == 'startpage': # works at 26-08-2019
  55. search_url = 'https://www.startpage.com/do/asearch'
  56. q = 'url:"' + str(search) + '"' # set query to search literally on results
  57. query_string = { 'cmd':'process_search', 'query':q }
  58. print "\nSearching query:", urllib2.unquote(search_url) + " [POST: (" + q + ")]"
  59. else:
  60. print "\n[Error] This search engine is not being supported!\n"
  61. print '-'*25
  62. print "\n[Info] Use one from this list:\n"
  63. for e in self.search_engines:
  64. print "+ "+e
  65. print "\n ex: xsser -d 'profile.asp?num=' --De 'duck'"
  66. print " ex: xsser -l --De 'startpage'"
  67. print "\n[Info] Or try them all:\n\n ex: xsser -d 'news.php?id=' --Da\n"
  68. try:
  69. self.search_url = search_url
  70. user_agent = random.choice(self.agents).strip() # set random user-agent
  71. referer = '127.0.0.1' # set referer to localhost / WAF black magic!
  72. headers = {'User-Agent' : user_agent, 'Referer' : referer}
  73. if self._engine == 'bing' or self._engine == 'yahoo': # using GET
  74. req = urllib2.Request(search_url, None, headers)
  75. elif self._engine == 'duck' or self._engine == 'startpage': # using POST
  76. data = urllib.urlencode(query_string)
  77. req = urllib2.Request(search_url, data, headers)
  78. html_data = urllib2.urlopen(req).read()
  79. print "\n[Info] Retrieving requested info..."
  80. except urllib2.URLError, e:
  81. if DEBUG:
  82. traceback.print_exc()
  83. print "\n[Error] Cannot connect!"
  84. print "\n" + "-"*50
  85. return
  86. if self._engine == 'bing':
  87. regex = '<h2><a href="(.+?)" h=' # regex magics 08/2019
  88. if self._engine == 'yahoo':
  89. regex = 'RU=(.+?)/RK=' # regex magics 08/2019
  90. if self._engine == 'duck':
  91. regex = '<a class="result__url" href="(.+?)">' # regex 08/2019
  92. if self._engine == 'startpage':
  93. regex = 'target="_blank">(.+?)</a>' # regex magics 08/2019
  94. pattern = re.compile(regex)
  95. links = re.findall(pattern, html_data)
  96. found_links = []
  97. if links:
  98. for link in links:
  99. link = urllib2.unquote(link)
  100. if self._engine == "yahoo":
  101. if "RU=https://www.yahoo.com/" in link:
  102. link = "" # invalid url
  103. if search.upper() in link.upper(): # parse that search query is on url
  104. sep = search
  105. link2 = link.split(sep,1)[0]
  106. if link2 not in found_links: # parse that target is not duplicated
  107. found_links.append(link)
  108. else:
  109. print "\n[Error] Not any link found for that query!"
  110. return found_links
  111. if __name__ == '__main__':
  112. for a in ['bing', 'yahoo', 'duck', 'startpage']: # working at: 28/08/2019
  113. dork = Dorker(a)
  114. res = dork.dork("news.php?id=")
  115. if res:
  116. print "\n[+] Search Engine:", a, "| Found: ", len(res), "\n"
  117. for b in res:
  118. print " *", b