main.py 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-"
  3. """
  4. This file is part of the orb project, https://orb.03c8.net
  5. Orb - 2016/2017/2018 - by psy (epsylon@riseup.net)
  6. You should have received a copy of the GNU General Public License along
  7. with RedSquat; if not, write to the Free Software Foundation, Inc., 51
  8. Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  9. """
  10. from options import OrbOptions
  11. from update import Updater
  12. from orb import ClientThread
  13. import webbrowser, socket, traceback, sys, urllib2, urllib, re, urlparse, os, datetime, random
  14. import json
  15. # ask for libs
  16. try:
  17. import whois
  18. except:
  19. print "[Warning] - Error importing: whois lib. \n\n On Debian based systems:\n\n $ sudo apt-get install python-whois\n"
  20. print "[Source] - Pypi-whois: https://pypi.python.org/pypi/whois\n"
  21. sys.exit(2)
  22. try:
  23. import nmap
  24. except:
  25. print "[Warning] - Error importing: nmap lib. \n\n On Debian based systems:\n\n $ sudo apt-get install python-nmap\n"
  26. print "[Source] - python-nmap: https://pypi.python.org/pypi/python-nmap\n"
  27. sys.exit(2)
  28. try:
  29. import dns.resolver
  30. except:
  31. print "[Warning] - Error importing: dns lib. \n\n On Debian based systems:\n\n $ sudo apt-get install python-dns\n"
  32. print "[Source] - Pydnspython: https://pypi.python.org/pypi/dnspython\n"
  33. sys.exit(2)
  34. DEBUG = 0
  35. class Orb(object):
  36. def __init__(self):
  37. self.search_engines = [] # available search engines
  38. #self.search_engines.append('duck') -> deprecated [27/03/2018]
  39. #self.search_engines.append('google') -> deprecated [27/03/2018]
  40. self.search_engines.append('bing')
  41. self.search_engines.append('yahoo')
  42. #self.search_engines.append('yandex') -> deprecated [27/03/2018]
  43. self.engine_fail = False # search engines controller
  44. self.dns_Amachines = [] # used to check if ip = DNS-A records
  45. self.socials = None # used to get social links from source file
  46. self.news = None # used to get news links from source file
  47. self.url_links = [] # urls extracted from search engines
  48. self.sub_links = [] # subdomains extracted from search engines
  49. self.extract_wikipedia_record = True # used to not repeat wikipedia descriptions
  50. self.extract_financial_record = True # used to not repeat financial records
  51. self.extract_ranked_links = False # used to extract ranked links on search engines
  52. self.top_ranked = {}
  53. self.wikipedia_texts = [] # wikipedia descriptions
  54. self.social_links = {}
  55. self.news_links = {}
  56. self.ranked_record = 0
  57. self.agents = [] # user-agents
  58. self.ips_scanner = [] # IPs related with scanner without dns records
  59. f = open("core/sources/user-agents.txt").readlines()
  60. for line in f:
  61. self.agents.append(line)
  62. def set_options(self, options):
  63. self.options = options
  64. def create_options(self, args=None):
  65. self.optionParser = OrbOptions()
  66. self.options = self.optionParser.get_options(args)
  67. if not self.options:
  68. return False
  69. return self.options
  70. def banner(self):
  71. print '='*75, "\n"
  72. print " _|_| _| "
  73. print "_| _| _| _|_| _|_|_| "
  74. print "_| _| _|_| _| _| "
  75. print "_| _| _| _| _| "
  76. print " _|_| _| _|_|_| "
  77. print self.optionParser.description, "\n"
  78. print '='*75
  79. def try_running(self, func, error, args=None):
  80. options = self.options
  81. args = args or []
  82. try:
  83. return func(*args)
  84. except Exception as e:
  85. print(error, "error")
  86. if DEBUG:
  87. traceback.print_exc()
  88. def generate_report(self): # generate raw log/report
  89. if not os.path.exists('reports/'):
  90. os.makedirs('reports/')
  91. if not self.options.gui: # generate report when no gui
  92. if not os.path.exists('reports/' + self.options.target):
  93. os.makedirs('reports/' + self.options.target)
  94. namefile = self.options.target + "_" + str(datetime.datetime.now())
  95. if self.options.verbose:
  96. print "\n[Verbose] - Generating log: " + 'reports/' + self.options.target + "/" + namefile + ".raw", "\n"
  97. self.report = open('reports/' + self.options.target + "/" + namefile + ".raw", 'a') # generate .raw file
  98. def generate_json(self): # generate json report
  99. if not os.path.exists('reports/'):
  100. os.makedirs('reports/')
  101. if not self.options.gui: # generate report when no gui
  102. if not os.path.exists('reports/' + self.options.target):
  103. os.makedirs('reports/' + self.options.target)
  104. namefile = self.options.json
  105. if self.options.verbose:
  106. print "[Verbose] - Generating JSON: " + 'reports/' + self.options.target + "/" + namefile, "\n"
  107. if os.path.exists('reports/' + self.options.target + "/" + namefile):
  108. os.remove('reports/' + self.options.target + "/" + namefile) # remove previous report if exists
  109. self.json_report = open('reports/' + self.options.target + "/" + namefile, 'w') # generate new .json file each time
  110. def send_request(self, url): # send requests unique point
  111. user_agent = random.choice(self.agents).strip() # set random user-agent
  112. referer = '127.0.0.1' # set referer to localhost / WAF black magic!
  113. headers = {'User-Agent' : user_agent, 'Referer' : referer}
  114. req = urllib2.Request(url, None, headers)
  115. req_reply = urllib2.urlopen(req).read()
  116. return req_reply
  117. #def search_using_duck(self, target): # duckduckgo.com
  118. # url = 'https://duckduckgo.com/html/?'
  119. # if self.extract_ranked_links == True: # extract ranked links
  120. # q = 'inurl:"' + str(target) + '"' # ex: inurl:"target"
  121. # else: # extract subdomains
  122. # q = 'site:.' + str(target) # ex: site:.target.com
  123. # query_string = { 'q':q}
  124. # data = urllib.urlencode(query_string)
  125. # url = url + data
  126. # try:
  127. # req_reply = self.send_request(url)
  128. # except:
  129. # return
  130. # regex_s = '<a class="result__url" href="(.+?)">' # regex magics (extract urls)
  131. # pattern_s = re.compile(regex_s)
  132. # url_links = re.findall(pattern_s, req_reply)
  133. # return url_links
  134. #def search_using_google(self, target): # google.com
  135. # if self.options.engineloc: # set engine location to increase responses
  136. # url = 'https://www.google.' + self.options.engineloc + '/xhtml?'
  137. # else:
  138. # url = 'https://www.google.com/xhtml?'
  139. # if self.extract_ranked_links == True: # extract ranked links
  140. # q = 'inurl:"' + str(target) + '"' # ex: inurl:"target"
  141. # else: # extract subdomains
  142. # q = 'site:.' + str(target) # ex: site:.target.com
  143. # start = 0 # set index number of first entry
  144. # num = 50 # 5 pages
  145. # gws_rd = 'ssl' # set SSL as default
  146. # query_string = { 'q':q, 'start':start, 'num':num, 'gws_rd':gws_rd }
  147. # data = urllib.urlencode(query_string)
  148. # url = url + data
  149. # try:
  150. # req_reply = self.send_request(url)
  151. # except:
  152. # return
  153. # regex = '<h3 class="r"><a href="/url(.+?)">' # regex magics
  154. # pattern = re.compile(regex)
  155. # url_links = re.findall(pattern, req_reply)
  156. # return url_links
  157. def search_using_bing(self, target): # bing.com
  158. url = 'https://www.bing.com/search?'
  159. if self.extract_ranked_links == True: # extract ranked links
  160. q = str(target) # inurl not allow on bing
  161. else: # extract subdomains
  162. q = 'site:.' + str(target) # ex: site:.target.com
  163. start = 0 # set index number of first entry
  164. if self.options.engineloc: # add search engine location on query: &cc=
  165. query_string = { 'q':q, 'first':start, 'cc':self.options.engineloc}
  166. else:
  167. query_string = { 'q':q, 'first':start }
  168. data = urllib.urlencode(query_string)
  169. url = url + data
  170. try:
  171. req_reply = self.send_request(url)
  172. except:
  173. return
  174. regex = '<li class="b_algo"><h2><a href="(.+?)">' # regex magics
  175. pattern = re.compile(regex)
  176. url_links = re.findall(pattern, req_reply)
  177. return url_links
  178. def search_using_yahoo(self, target): # yahoo.com
  179. if self.options.engineloc: # set engine location to increase responses
  180. url = 'https://' + self.options.engineloc + '.search.yahoo.com/search?'
  181. else:
  182. url = 'https://search.yahoo.com/search?'
  183. if self.extract_ranked_links == True: # extract ranked links
  184. q = str(target)
  185. else: # extract subdomains
  186. q = '.' + str(target)
  187. start = 0 # set index number of first entry
  188. query_string = { 'q':q, 'first':start, 'ei':'UTF-8', 'nojs':1 }
  189. data = urllib.urlencode(query_string)
  190. url = url + data
  191. try:
  192. req_reply = self.send_request(url)
  193. except:
  194. return
  195. regex = 'RU=(.+?)/RK=' # regex magics [27/03/2018]
  196. pattern = re.compile(regex)
  197. url_links = re.findall(pattern, req_reply)
  198. return url_links
  199. #def search_using_yandex(self, target): # yandex.com
  200. # url = 'https://yandex.com/search/?'
  201. # if self.extract_ranked_links == True: # extract ranked links
  202. # q = str(target)
  203. # else: # extract subdomains
  204. # q = 'site:.' + str(target)
  205. # start = 0 # set index number of first entry
  206. # # generate random number on range 1-9999999999 with float point
  207. # # to provide a fake redircnt (ex: 1458153459.1) / black magic!
  208. # try:
  209. # import random
  210. # num = random.uniform(0, 9999999999)
  211. # except:
  212. # num = 1458153459.1
  213. # query_string = { 'text':q, 'p':start , 'redircnt':num}
  214. # data = urllib.urlencode(query_string)
  215. # url = url + data
  216. # try:
  217. # req_reply = self.send_request(url)
  218. # except:
  219. # return
  220. # regex = '<a class="link serp-url__link" target="_blank" href="(.+?)"' # regex magics 16/03/2016
  221. # pattern = re.compile(regex)
  222. # url_links = re.findall(pattern, req_reply)
  223. # return url_links
  224. def search_using_torch(self, target): # http://xmh57jrzrnw6insl.onion
  225. try:
  226. url = 'http://xmh57jrzrnw6insl.onion/4a1f6b371c/search.cgi?' # [28/03/2018] https://onion+hash+search.cgi
  227. q = str(target)
  228. start = 0
  229. query_string = { 'q':q, 'cmd':'Search!' }
  230. data = urllib.urlencode(query_string)
  231. url = url + data
  232. try:
  233. req_reply = self.send_request(url)
  234. except:
  235. print "- Not found!"
  236. if not self.options.nolog: # generate log
  237. self.report.write("\n- Deep Web: Not found!\n\n")
  238. return
  239. if "No documents were found" in req_reply: # no records found
  240. print "[Info] - No documents were found!"
  241. if not self.options.nolog: # generate log
  242. self.report.write("- Deep Web: Not found!\n\n")
  243. else:
  244. regex = '<A HREF="(.+?)" TARGET' # regex magics - 26/03/2016
  245. pattern = re.compile(regex)
  246. url_links = re.findall(pattern, req_reply)
  247. for url in url_links:
  248. print "- Onion URL ->", url
  249. if not self.options.nolog: # generate log
  250. self.report.write("- Onion URL -> " + url + "\n")
  251. if self.options.json: # write reply to json
  252. self.json_report.write(json.dumps(['Deep Web',{'Onion': url}], separators=(',', ':')))
  253. if not self.options.nolog: # generate log
  254. self.report.write("\n") # zen
  255. except: # return when fails
  256. print "- Not found!"
  257. if not self.options.nolog: # generate log
  258. self.report.write("\n- Deep Web: Not found!\n\n")
  259. return
  260. def extract_financial(self, target): # extract financial records
  261. try: # search on yahoo financial
  262. url = 'https://finance.yahoo.com/lookup?' # [29/03/2018]
  263. s = str(self.options.target).upper() # uppercase required
  264. query_string = {'s':s}
  265. data = urllib.urlencode(query_string)
  266. url = url + data
  267. if self.options.verbose:
  268. print "\n[Verbose] - Financial query used:", url + "\n"
  269. try:
  270. req_reply = self.send_request(url)
  271. except:
  272. print "- Not found!"
  273. if not self.options.nolog: # generate log
  274. self.report.write("\n- Financial: Not found!\n")
  275. self.extract_financial_record = False
  276. return
  277. regex = '{"exchange":(.+?)"}' # regex magics [28/03/2018]
  278. pattern = re.compile(regex)
  279. records = re.findall(pattern, req_reply)
  280. for record in records:
  281. regex2 = '"symbol":"(.+?)","industryLink' # regex magics [28/03/2018]
  282. pattern2 = re.compile(regex2)
  283. symbol = re.findall(pattern2, record)
  284. regex3 = '"companyName":"(.+?)","industryName"' # regex magics [28/03/2018]
  285. pattern3 = re.compile(regex3)
  286. name = re.findall(pattern3, record)
  287. sep = '"lastValue":"' # regex magics [28/03/2018]
  288. prize = record.split(sep, 1)[1]
  289. if 'industryName' in str(name): # parse empty name
  290. name = "['']"
  291. print "- SYMBOL:", symbol, "-> Name:", name, "-> Last prize:", prize
  292. if not self.options.nolog: # generate log
  293. self.report.write("- SYMBOL: " + str(symbol) + " -> Name: " + str(name) + " -> Last prize: " + str(prize) + "\n")
  294. if self.options.json: # write reply to json
  295. self.json_report.write(json.dumps(['Financial',{'SYMBOL': symbol,'Name': name,'Last prize': prize}], separators=(',', ':')))
  296. self.extract_financial_record = False
  297. if not self.options.nolog: # generate log
  298. self.report.write("\n") # raw format task
  299. except: # return when fails
  300. print "[Info] - Financial: Not found!\n"
  301. if not self.options.nolog: # generate log
  302. self.report.write("\n- Financial: Not found!\n")
  303. self.extract_financial_record = False
  304. return
  305. def extract_social(self, url): # extract social links
  306. if self.options.public: # safe/return when no extract public records option
  307. return
  308. if self.options.social: # safe/return when no extract social records option
  309. return
  310. for s in self.socials:
  311. if s in url: # found record
  312. self.social_links[s] = url # add s/url to dict
  313. else:
  314. pass
  315. def extract_news(self, url): # extract news links (using a list from file)
  316. if self.options.public: # safe/return when no extract public records option
  317. return
  318. if self.options.news: # safe/return when no extract news records option
  319. return
  320. for n in self.news:
  321. if n in url: # found record
  322. self.news_links[n] = url # add n/url to dict
  323. else:
  324. pass
  325. def extract_wikipedia(self, url): # extract wikipedia info
  326. try:
  327. req_reply = self.send_request(url)
  328. except:
  329. return
  330. regex = '<p><b>(.+?)</p>' # regex magics (description)
  331. pattern = re.compile(regex)
  332. descr = re.findall(pattern, req_reply)
  333. for d in descr:
  334. d_cleanner = re.compile('<.*?>') # clean descriptions
  335. d_clean = re.sub(d_cleanner,'', d)
  336. wikipedia = re.sub(r'\[.*?\]\ *', '', d_clean)
  337. if "may refer to" in wikipedia:
  338. wikipedia = "There are multiple records for this entry -> " + str(url)
  339. return wikipedia
  340. def extract_from_engine(self, engine, target): # search using engine
  341. #if engine == "duck": # using duck
  342. # url_links = self.search_using_duck(target)
  343. #if engine == "google": # using google
  344. # url_links = self.search_using_google(target)
  345. if engine == "bing": # using bing
  346. url_links = self.search_using_bing(target)
  347. if engine == "yahoo": #using yahoo
  348. url_links = self.search_using_yahoo(target)
  349. #if engine == "yandex": #using yandex
  350. # url_links = self.search_using_yandex(target)
  351. if not url_links: # not records found
  352. self.engine_fail = True
  353. else:
  354. for url in url_links:
  355. if engine == "yahoo" or engine == "bing": # post-parse regex magics
  356. sep = '"'
  357. url = url.split(sep, 1)[0]
  358. url = urllib.unquote(url)
  359. #if engine == "google":
  360. # url = url.replace("?q=", "")
  361. # sep = '&amp;sa='
  362. # url = url.split(sep, 1)[0]
  363. if self.extract_ranked_links == True: # ranked links
  364. if target in url: # only add urls related with target
  365. self.url_links.append(url)
  366. if self.ranked_record == 0:
  367. if target in url: # only add urls related with target
  368. self.top_ranked[engine] = url # add s/url to dict
  369. self.ranked_record = self.ranked_record + 1
  370. else: # subdomains
  371. self.sub_links.append(url)
  372. self.engine_fail = False
  373. def extract_ranked(self, target, engine): # extract ranked link
  374. if self.options.public: # safe/return when no extract public records option
  375. return
  376. self.extract_ranked_links = True # used to perform different queries to search engines
  377. self.ranked_record = 0 # extract ranked link
  378. self.extract_from_engine(engine, target)
  379. self.extract_ranked_links = False # list semaphore to off
  380. def public_records_output(self): # output public records after parsing
  381. # extract and order data gathered + report when found
  382. print "="*14
  383. print "*Top Ranked*:"
  384. print "="*14
  385. if not self.top_ranked:
  386. print "- Not found!"
  387. if not self.options.nolog: # generate log
  388. self.report.write("\n- Top Ranked: Not found!\n\n")
  389. else:
  390. for key,val in self.top_ranked.items():
  391. print("- {} -> {}".format(key, val))
  392. if not self.options.nolog: # generate log
  393. self.report.write("- Top ranked: " + key + " -> " + val + "\n")
  394. if self.options.json: # write reply to json
  395. self.json_report.write(json.dumps(['Ranked',{'Engine': key, 'Top': val}], separators=(',', ':')))
  396. if not self.options.nolog: # generate log
  397. self.report.write("\n") # raw format task
  398. if self.extract_wikipedia_record == True: # not need to repeat wikipedia descriptions on each extension
  399. print "="*14
  400. print "*Wikipedia*:"
  401. print "="*14
  402. if not self.wikipedia_texts:
  403. print "- Not found!"
  404. if not self.options.nolog: # generate log
  405. self.report.write("- Wikipedia: Not found!\n\n")
  406. else:
  407. for wikipedia in self.wikipedia_texts:
  408. if wikipedia is not None:
  409. print "-", wikipedia
  410. if not self.options.nolog: # generate log
  411. self.report.write("- " + wikipedia + "\n")
  412. if self.options.json: # write reply to json (non parsed ascii)
  413. self.json_report.write(json.dumps(['Wikipedia',{'Description': wikipedia}], separators=(',', ':'), ensure_ascii=False))
  414. if wikipedia is None:
  415. print "- Not found!"
  416. if not self.options.nolog: # generate log
  417. self.report.write("\n") # raw format task
  418. if not self.options.social:
  419. print "="*14
  420. print "*Social*:"
  421. print "="*14
  422. if not self.social_links:
  423. print "- Not found!"
  424. if not self.options.nolog: # generate log
  425. self.report.write("- Social: Not found!\n\n")
  426. else:
  427. for key,val in self.social_links.items():
  428. print("- {} -> {}".format(key, val))
  429. if not self.options.nolog: # generate log
  430. self.report.write("- " + key + " -> " + val + "\n")
  431. if self.options.json: # write reply to json
  432. self.json_report.write(json.dumps(['Social',{key:val}], separators=(',', ':')))
  433. if not self.options.nolog: # generate log
  434. self.report.write("\n") # raw format task
  435. if not self.options.news:
  436. print "="*14
  437. print "*News*:"
  438. print "="*14
  439. if not self.news_links:
  440. print "- Not found!"
  441. if not self.options.nolog: # generate log
  442. self.report.write("- News: Not found!\n\n")
  443. else:
  444. for key,val in self.news_links.items():
  445. print("- {} -> {}".format(key, val))
  446. if not self.options.nolog: # generate log
  447. self.report.write("- " + key + " -> " + val + "\n")
  448. if self.options.json: # write reply to json
  449. self.json_report.write(json.dumps(['News',{key:val}], separators=(',', ':')))
  450. if not self.options.nolog: # generate log
  451. self.report.write("\n") # raw format task
  452. def extract_public(self, target): # extract general public records
  453. if self.options.public: # safe/return when no extract public records option
  454. return
  455. if self.options.allengines: # search using all search engines available (pass to next when fails)
  456. for engine in self.search_engines:
  457. self.extract_ranked(target, engine)
  458. else:
  459. if self.options.engine:
  460. if self.options.engine in self.search_engines:
  461. engine = str(self.options.engine)
  462. else:
  463. engine = "yahoo"
  464. print "\n- You are setting a non supported search engine. Using default: " + engine + "\n"
  465. else:
  466. engine = "yahoo" # used by default
  467. self.extract_ranked(target, engine)
  468. if self.engine_fail == True: # pass other tests when no urls
  469. if not self.options.allengines:
  470. print "\n- [" + target + "] -> Not any link found using:", engine + "\n"
  471. if not self.options.nolog: # generate log
  472. self.report.write("\n***[Info] - [" + target + "] -> Not any link found using: " + engine + "\n\n")
  473. else:
  474. for url in self.url_links: # search on results retrieved by all engines used
  475. #if self.extract_wikipedia_record == True: # extract mode
  476. # if "wikipedia.org" in url: # wikipedia record!
  477. # wikipedia = self.extract_wikipedia(url) # extract data from wikipedia
  478. # if wikipedia not in self.wikipedia_texts: # not repeat entries
  479. # self.wikipedia_texts.append(wikipedia)
  480. if not self.options.social:
  481. self.extract_social(url)
  482. if not self.options.news:
  483. self.extract_news(url)
  484. if self.extract_wikipedia_record == True: # visit directly to wikipedia when is not located any record by search engines
  485. url_wiki = "https://en.wikipedia.org/wiki/" + str(target).title() # wikipedia default path to extract records
  486. if self.options.verbose:
  487. print "\n[Verbose] - Wikipedia query used:", url_wiki + "\n"
  488. wikipedia = self.extract_wikipedia(url_wiki) # extract data from wikipedia
  489. if wikipedia not in self.wikipedia_texts: # not repeat entries
  490. self.wikipedia_texts.append(wikipedia)
  491. self.public_records_output() # output parsed public records
  492. if not self.options.deep: # search for deep web records
  493. print "="*14
  494. print "*Deep Web*:"
  495. print "="*14
  496. self.search_using_torch(target)
  497. if not self.options.financial: # search for financial records
  498. if self.extract_financial_record == True: # extract mode
  499. print "="*14
  500. print "*Financial*:"
  501. print "="*14
  502. self.extract_financial(target)
  503. self.extract_financial_record = False
  504. def extract_whois(self, target): # extract whois data from target domain
  505. print "="*14
  506. print "*Whois*:"
  507. print "="*14
  508. try:
  509. domain = whois.query(target, ignore_returncode=True) # ignore return code
  510. if domain.creation_date is None: # return when no creation date
  511. print "- Not found!\n"
  512. if not self.options.nolog: # generate log
  513. self.report.write("- Whois: Not found!\n\n")
  514. return
  515. except: # return when fails performing query
  516. print "- Not found!"
  517. if not self.options.nolog: # generate log
  518. self.report.write("- Whois: Not found!\n\n")
  519. return
  520. else:
  521. print "- Domain: " + str(domain.name)
  522. print "- Registrant: " + str(domain.registrar)
  523. print "- Creation date: " + str(domain.creation_date)
  524. print "- Expiration: " + str(domain.expiration_date)
  525. print "- Last update: " + str(domain.last_updated)
  526. if not self.options.nolog: # write reply to log
  527. self.report.write("- Domain: " + str(domain.name) + "\n")
  528. self.report.write("- Registrant: " + str(domain.registrar) + "\n")
  529. self.report.write("- Creation date: " + str(domain.creation_date) + "\n")
  530. self.report.write("- Expiration: " + str(domain.expiration_date) + "\n")
  531. self.report.write("- Last update: " + str(domain.last_updated) + "\n")
  532. if self.options.json: # write reply to json
  533. self.json_report.write(json.dumps(['Whois',{'Domain': str(domain.name), 'Registrant': str(domain.registrar),'Creation date': str(domain.creation_date),'Expiration': str(domain.expiration_date),'Last update': str(domain.last_updated)}], separators=(',', ':')))
  534. def extract_cvs(self, cve_info): # using CVE extended detail from web.nvd.nist.gov
  535. url = 'https://web.nvd.nist.gov/view/vuln/detail?vulnId'
  536. q = str(cve_info) # product extracted from scanner
  537. query_string = { '':q}
  538. data = urllib.urlencode(query_string)
  539. url = url + data
  540. if self.options.verbose:
  541. print "\n[Verbose] - CVS database query used:", url + "\n"
  542. try:
  543. req_reply = self.send_request(url)
  544. except:
  545. if self.options.verbose:
  546. print('\n[Error] - Cannot extract CVS records...\n')
  547. return
  548. regex_cvs = '<p data-testid="vuln-description">(.+?)</p>\r' # regex magics [28/03/2018]
  549. pattern_cvs = re.compile(regex_cvs)
  550. cvs = re.findall(pattern_cvs, req_reply)
  551. print "" # zen output
  552. for cvs_desc in cvs:
  553. cvs_desc = cvs_desc.replace('This is a potential security issue, you are being redirected to <a href="http://nvd.nist.gov">http://nvd.nist.gov</a','')
  554. cvs_desc = cvs_desc.replace("<strong>", "")
  555. cvs_desc = cvs_desc.replace("</strong>", "")
  556. sep = '<'
  557. cvs_desc = cvs_desc.split(sep, 1)[0]
  558. cvs_desc = cvs_desc.replace(">","-----")
  559. print " ", cvs_desc # 10 tab for zen
  560. if not self.options.nolog: # write reply to log
  561. self.report.write(" " + cvs_desc + "\n")
  562. if self.options.json: # write reply to json
  563. self.json_report.write(json.dumps(['CVS',{'Description': str(cvs_desc)}], separators=(',', ':')))
  564. def extract_cve(self, product): # extract vulnerabilities from CVE database
  565. url = 'https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword'
  566. q = str(product) # product extracted from scanner
  567. query_string = { '':q}
  568. data = urllib.urlencode(query_string)
  569. url = url + data
  570. if self.options.verbose:
  571. print "\n[Verbose] - CVE database query used:", url
  572. try:
  573. req_reply = self.send_request(url)
  574. except:
  575. if self.options.verbose:
  576. print('\n[Error] - Cannot resolve CVE records...\n')
  577. return
  578. if req_reply == "": # no records found
  579. print "- Not any record found on CVE database!"
  580. if not self.options.nolog: # write reply to log
  581. self.report.write("- Not any record found on CVE database!" + "\n")
  582. regex_s = '<td valign="top" nowrap="nowrap"><a href="(.+?)">' # regex magics
  583. pattern_s = re.compile(regex_s)
  584. CVE_links = re.findall(pattern_s, req_reply)
  585. for cve in CVE_links:
  586. cve_info = cve.replace("/cgi-bin/cvename.cgi?name=","")
  587. print "\n +", cve_info, "->", "https://cve.mitre.org" + cve # 8 tab for zen
  588. if not self.options.nolog: # write reply to log
  589. self.report.write("\n + " + cve_info + "->" + "https://cve.mitre.org" + cve + "\n")
  590. if self.options.json: # write reply to json
  591. self.json_report.write(json.dumps(['CVE',{'ID': str(cve_info), 'Link': "https://cve.mitre.org" + str(cve)}], separators=(',', ':')))
  592. if not self.options.cvs: # extract description from vulnerability (CVS)
  593. self.extract_cvs(cve_info)
  594. def search_subdomains(self, target): # try to extract subdomains from target domain (1. using search engines)
  595. # extract subdomains using search engines results (taking data from 'past')
  596. self.extract_ranked_links = False # use correct subdomains query term on search engines
  597. print "="*14
  598. print "*Subdomains*:"
  599. print "="*14
  600. for engine in self.search_engines:
  601. self.extract_from_engine(engine, target)
  602. if not self.sub_links: # not records found
  603. print "- Not any subdomain found!"
  604. if not self.options.nolog: # write reply to log
  605. self.report.write("- Subdomains: Not any found!" + "\n\n")
  606. else:
  607. record_s = 0
  608. short = "." + str(target)
  609. subdomains = []
  610. for url in self.sub_links:
  611. if "www." in url:
  612. url = url.replace("www.", "") # remove www.
  613. if short in url: # subdomain
  614. url_s = urlparse.urlparse(url)
  615. subdomain = str(url_s.hostname.split('.')[0] + "." + str(target))
  616. if not subdomain in subdomains:
  617. subdomains.append(subdomain)
  618. for s in subdomains:
  619. print "- " + s
  620. if not self.options.nolog: # write reply to log
  621. self.report.write("- Subdomain: " + s + "\n")
  622. if self.options.json: # write reply to json
  623. self.json_report.write(json.dumps(['Subdomains',{'Subdomain': str(s)}], separators=(',', ':')))
  624. record_s = record_s + 1
  625. if not self.options.nolog: # generate log
  626. self.report.write("\n") # zen
  627. if record_s == 0:
  628. print "- Not any subdomain found!"
  629. if not self.options.nolog: # write reply to log
  630. self.report.write("- Subdomains: Not any found!" + "\n\n")
  631. def resolve_ip(self, target): # try to resolve an ip from target domain
  632. data = socket.gethostbyname_ex(target) # reverse resolve target
  633. for ip in data[2]:
  634. self.ip = ip
  635. self.ips_scanner.append(ip) # add to list of scanner found IPs without DNS
  636. print "- " + str(ip)
  637. if not self.options.nolog: # write reply to log
  638. self.report.write("- IP: " + str(ip) + "\n")
  639. if self.options.json: # write reply to json
  640. self.json_report.write(json.dumps(['Server',{'IP': str(ip)}], separators=(',', ':')))
  641. if not self.options.nolog: # generate log
  642. self.report.write("\n") # zen
  643. return ip
  644. def scan_target(self, target): # try to discover Open Ports
  645. if self.options.scanner: # safe/return when no scanning option
  646. return
  647. open_ports = 0 # open ports counter
  648. if not self.options.proto:
  649. proto = "TCP+UDP"
  650. else:
  651. proto = "TCP"
  652. #proto = str(self.options.proto)
  653. #proto = proto.upper()
  654. nm = nmap.PortScanner()
  655. if self.options.ports:
  656. ports = self.options.ports
  657. else:
  658. ports = '1-65535' # scanning all ports by default (1-65535)
  659. #if proto == "UDP": # scan UDP ports (UDP Scan)
  660. # nm.scan(str(target), str(ports), arguments='-sU -sV', sudo=False)
  661. # if self.options.verbose:
  662. # print "-Using:", nm.command_line()
  663. if proto == "TCP": # scan TCP ports (TCP connect()+Service scan)
  664. nm.scan(str(target), str(ports), arguments='-sT -sV', sudo=False)
  665. if self.options.verbose:
  666. print "-Using:", nm.command_line()
  667. elif proto == "TCP+UDP": # scan TCP+UDP ports (NoPing+Service scan)
  668. nm.scan(str(target), str(ports), arguments='-PN -sV', sudo=False)
  669. if self.options.verbose:
  670. print "-Using:", nm.command_line()
  671. #else:
  672. # print "\n[Info] - You are not setting a supported protocol. Options are: 'UDP', 'TCP' or 'TCP+UDP'.\n"
  673. # nm.scan(str(target), str(ports), arguments='-PN -sV', sudo=False) # (NoPing+Service scan)
  674. # if self.options.verbose:
  675. # print "-Using:", nm.command_line()
  676. for host in nm.all_hosts():
  677. print('\n * Host : %s' % host)
  678. if not self.options.nolog: # write reply to log
  679. self.report.write('\n * Host : ' + str(host) + "\n")
  680. print(' * State : %s' % nm[host].state())
  681. if not self.options.nolog: # write reply to log
  682. self.report.write(' * State : ' + str(nm[host].state()) + "\n")
  683. for proto in nm[host].all_protocols():
  684. print(' - Protocol : %s' % proto)
  685. if not self.options.nolog: # write reply to log
  686. self.report.write(" - Protocol: " + proto + "\n")
  687. if self.options.json: # write json report
  688. self.json_report.write(json.dumps(['Scanner',{'Protocol': str(proto)}], separators=(',', ':')))
  689. lport = nm[host][proto].keys()
  690. lport.sort()
  691. for port in lport:
  692. if not self.options.banner: # extract banners from services discovered
  693. if str(nm[host][proto][port]['state']) == "open": # results open ports+banner
  694. print " + Port:", port, "(", nm[host][proto][port]['state'], ") -", nm[host][proto][port]['product'], " |", nm[host][proto][port]['version'], nm[host][proto][port]['name'], nm[host][proto][port]['extrainfo'], nm[host][proto][port]['cpe']
  695. if not self.options.nolog: # write reply to log
  696. self.report.write(" + Port:" + str(port) + "(" + str(nm[host][proto][port]['state']) + ") - " + str(nm[host][proto][port]['product']) + str(nm[host][proto][port]['version']) + str(nm[host][proto][port]['name']) + str(nm[host][proto][port]['extrainfo']) + str(nm[host][proto][port]['cpe']) + "\n")
  697. if self.options.json: # write json report
  698. self.json_report.write(json.dumps(['Scanner',{'Port': str(port), 'State': str(nm[host][proto][port]['state']), 'Version': str(nm[host][proto][port]['version']), 'Name': str(nm[host][proto][port]['name']), 'Info': str(nm[host][proto][port]['extrainfo']), 'CPE': str(nm[host][proto][port]['cpe'])}], separators=(',', ':')))
  699. open_ports = open_ports + 1
  700. if not self.options.cve: # extract vulnerabilities from CVE (Common Vulnerabilities and Exposures)
  701. product = str(nm[host][proto][port]['product'])
  702. cve = self.extract_cve(product)
  703. print "" # zen output
  704. else: # not extract banners
  705. if str(nm[host][proto][port]['state']) == "open": # only results when open port
  706. print " + Port:", port, "(", nm[host][proto][port]['state'], ")"
  707. if not self.options.nolog: # write reply to log
  708. self.report.write(" + Port:" + str(port) + "(" + str(nm[host][proto][port]['state']) + ")")
  709. if self.options.json: # write json report
  710. self.json_report.write(json.dumps(['Scanner',{'Port': str(port), 'State': str(nm[host][proto][port]['state'])}], separators=(',', ':')))
  711. open_ports = open_ports + 1
  712. if self.options.filtered: # add filtered ports to results
  713. if str(nm[host][proto][port]['state']) == "filtered": # results filtered ports (no banners)
  714. print " + Port:", port, "(", nm[host][proto][port]['state'], ")"
  715. if not self.options.nolog: # write reply to log
  716. self.report.write(" + Port:" + str(port) + "(" + str(nm[host][proto][port]['state']) + ")")
  717. if self.options.json: # write json report
  718. self.json_report.write(json.dumps(['Scanner',{'Port': str(port), 'State': str(nm[host][proto][port]['state'])}], separators=(',', ':')))
  719. if not open_ports > 0:
  720. print "\n- Not any open port found!"
  721. if not self.options.nolog: # write reply to log
  722. self.report.write("\n- Not any open port found + \n\n")
  723. def resolve_dns(self, target): # try to discover DNS records + perform portscanning
  724. resolver = dns.resolver.Resolver()
  725. if self.options.resolv: # use DNS resolver provided by user
  726. resolvers = str(self.options.resolv)
  727. resolvers = resolvers.split(",")
  728. resolver.nameservers = resolvers
  729. if self.options.verbose:
  730. print "[Verbose] - Using DNS resolvers: [" + self.options.resolv + "]\n"
  731. else: # use default Google Inc. DNS resolvers (8.8.8.8, 8.8.4.4)
  732. resolver.nameservers = ['8.8.8.8', '8.8.4.4'] # google DNS resolvers
  733. if self.options.verbose:
  734. print "[Verbose] - Using DNS resolvers: [8.8.8.8, 8.8.4.4]\n"
  735. try:
  736. answers = resolver.query(target, "A") # A records
  737. for rdata in answers:
  738. print "- [A]:", rdata
  739. self.dns_Amachines.append(rdata)
  740. if not self.options.nolog: # write reply to log
  741. self.report.write("- DNS [A]: " + str(rdata) + "\n")
  742. if self.options.json: # write json report
  743. self.json_report.write(json.dumps(['DNS',{'A': str(rdata)}], separators=(',', ':')))
  744. if not self.options.scanner: # try port-scanner on DNS-A records
  745. if not self.options.scandns:
  746. scanner = self.scan_target(rdata)
  747. print "-"*12
  748. if not self.options.nolog: # write reply to log
  749. self.report.write("-"*12 + "\n")
  750. except:
  751. pass
  752. try:
  753. answers = resolver.query(target, "NS") # NS records
  754. for rdata in answers:
  755. rdata = str(rdata) # NS records ends with "." (removing)
  756. rdata = rdata[:-1]
  757. data = socket.gethostbyname_ex(rdata) # reverse resolve NS server
  758. for ip in data[2]:
  759. self.ip = ip
  760. print "- [NS]:", rdata, "(" + str(self.ip) + ")"
  761. if not self.options.nolog: # write reply to log
  762. self.report.write("- DNS [NS]: " + str(rdata) + "(" + str(self.ip) + ")" + "\n")
  763. if self.options.json: # write json report
  764. self.json_report.write(json.dumps(['DNS',{'NS': str(rdata)}], separators=(',', ':')))
  765. if not self.options.scanner:
  766. if not self.options.scandns:
  767. if not self.options.scanns: # try port-scanner on DNS-NS records
  768. scanner = self.scan_target(rdata)
  769. print "-"*12
  770. if not self.options.nolog: # write reply to log
  771. self.report.write("-"*12 + "\n")
  772. except:
  773. pass
  774. try:
  775. answers = resolver.query(target, "MX") # MX records
  776. for rdata in answers:
  777. rdata = str(rdata) # MX records ends with "." (removing)
  778. rdata = rdata[:-1]
  779. rdata = rdata.replace("10 ", "") # MX records starts with "10 " (removing)
  780. data = socket.gethostbyname_ex(rdata) # reverse resolve MX server (mailserver)
  781. for ip in data[2]:
  782. self.ip = ip
  783. print "- [MX]:", rdata, "(" + str(self.ip) + ")"
  784. if not self.options.nolog: # write reply to log
  785. self.report.write("- DNS [MX]: " + str(rdata) + "(" + str(self.ip) + ")" + "\n")
  786. if self.options.json: # write json report
  787. self.json_report.write(json.dumps(['DNS',{'MX': str(rdata)}], separators=(',', ':')))
  788. if not self.options.scanner: # try port-scanner on DNS-MX records
  789. if not self.options.scandns:
  790. if not self.options.scanmx:
  791. scanner = self.scan_target(rdata)
  792. print "-"*12
  793. if not self.options.nolog: # write reply to log
  794. self.report.write("-"*12 + "\n")
  795. except: #pass when no MX records
  796. pass
  797. try:
  798. answers = resolver.query(target, "TXT") # TXT records
  799. for rdata in answers:
  800. print "- [TXT]:", rdata
  801. if not self.options.nolog: # write reply to log
  802. self.report.write("- DNS [TXT]: " + str(rdata) + "\n")
  803. if self.options.json: # write json report
  804. self.json_report.write(json.dumps(['DNS',{'TXT': str(rdata)}], separators=(',', ':')))
  805. print "-"*12
  806. if not self.options.nolog: # write reply to log
  807. self.report.write("-"*12 + "\n")
  808. except: #pass when no TXT records
  809. pass
  810. def run(self, opts=None):
  811. if opts:
  812. options = self.create_options(opts)
  813. self.set_options(options)
  814. options = self.options
  815. if not self.options.gui: # generate report when no gui
  816. self.banner()
  817. # check tor connection
  818. if options.checktor:
  819. try:
  820. print("\nSending request to: https://check.torproject.org\n")
  821. tor_reply = urllib2.urlopen("https://check.torproject.org").read()
  822. your_ip = tor_reply.split('<strong>')[1].split('</strong>')[0].strip()
  823. if not tor_reply or 'Congratulations' not in tor_reply:
  824. print("It seems that Tor is not properly set.\n")
  825. print("Your IP address appears to be: " + your_ip + "\n")
  826. else:
  827. print("Congratulations!. Tor is properly being used :-)\n")
  828. print("Your IP address appears to be: " + your_ip + "\n")
  829. except:
  830. print("Cannot reach TOR checker system!. Are you correctly connected?\n")
  831. sys.exit(2)
  832. # check/update for latest stable version
  833. if options.update:
  834. try:
  835. print("\nTrying to update automatically to the latest stable version\n")
  836. Updater()
  837. except:
  838. print("\nSomething was wrong!. You should clone Orb manually with:\n")
  839. print("$ git clone https://github.com/epsylon/orb\n")
  840. sys.exit(2)
  841. # logging / reporting
  842. if not options.nolog: # generate log
  843. self.generate_report()
  844. if options.json: # generate json report
  845. self.generate_json()
  846. # footprinting (only passive)
  847. if options.passive:
  848. self.options.scanner = True # not scan ports on machines
  849. self.options.scandns = True # not scan on DNS records
  850. self.options.scanns = True # not scan on NS records
  851. self.options.scanmx = True # not scan on MX records
  852. self.options.banner = True # not banner grabbing
  853. self.options.cve = True # not CVE
  854. self.options.cvs = True # not CVS
  855. # footprinting (only active)
  856. if options.active:
  857. self.options.public = True # not search for public records
  858. self.options.financial = True # not search for financial records
  859. self.options.deep = True # not search for deep web records
  860. self.options.social = True # not search for social records
  861. self.options.news = True # not search for news records
  862. self.options.whois = True # not extract whois information
  863. self.options.subs = True # not try to discover subdomains (with passive methods) / bruteforce ¿next release? :)
  864. # footprinting (full) / by default
  865. if options.target:
  866. # public records / deepweb, financial, social, news ...
  867. if not options.public: # search for public records
  868. print "="*60
  869. print "[Info] - Retrieving general data ..."
  870. print "="*60
  871. if not options.social: # retrieve social urls
  872. if not options.socialf: # try default list
  873. f = open('core/sources/social.txt')
  874. else: # extract social links from list provided by user
  875. try:
  876. f = open(options.socialf)
  877. except:
  878. if os.path.exists(options.socialf) == True:
  879. print '[Error] - Cannot open:', options.socialf, "\n"
  880. return
  881. else:
  882. print '[Error] - Cannot found:', options.socialf, "\n"
  883. return
  884. self.socials = f.readlines()
  885. self.socials = [ social.replace('\n','') for social in self.socials ]
  886. f.close()
  887. if not options.news: # retrieve news urls
  888. if not options.newsf: # try default list
  889. f = open('core/sources/news.txt')
  890. else: # extract social news from list provided by user
  891. try:
  892. f = open(options.newsf)
  893. except:
  894. if os.path.exists(options.newsf) == True:
  895. print '[Error] - Cannot open:', options.newsf, "\n"
  896. return
  897. else:
  898. print '[Error] - Cannot found:', options.newsf, "\n"
  899. return
  900. self.news = f.readlines()
  901. self.news = [ new.replace('\n','') for new in self.news ]
  902. f.close()
  903. public = self.extract_public(options.target)
  904. if not options.nolog: # generate log
  905. self.report.write("-"*22 + "\n")
  906. # domains / extract extensions from source provided (comma separated)
  907. print "="*60
  908. print "[Info] - Retrieving data by TLDs ..."
  909. print "="*60
  910. tld_record = False # tld records
  911. self.extract_wikipedia_record = False
  912. if options.ext: # by user
  913. extensions = [str(options.ext)]
  914. extensions = options.ext.split(",")
  915. print "\n[Info] - Using extensions provided by user...\n"
  916. elif options.extfile: # from file
  917. try:
  918. print "\n[Info] - Extracting extensions from file...\n"
  919. f = open(options.extfile)
  920. extensions = f.readlines()
  921. extensions = [ ext.replace('\n','') for ext in extensions ]
  922. f.close()
  923. if not extensions:
  924. print "[Error] - Cannot extract 'extensions' from file.\n"
  925. return
  926. except:
  927. if os.path.exists(options.extfile) == True:
  928. print '[Error] - Cannot open:', options.extfile, "\n"
  929. return
  930. else:
  931. print '[Error] - Cannot found:', options.extfile, "\n"
  932. return
  933. else: # IANA (default) original + country (09/03/2016)
  934. print "\n[Info] - Using extensions supported by IANA...\n"
  935. f = open("core/sources/iana-exts.txt") # extract IANA list provided by default
  936. extensions = f.readlines()
  937. extensions = [ ext.replace('\n','') for ext in extensions ]
  938. f.close()
  939. if not extensions:
  940. print "[Error] - Cannot extract 'IANA extensions' from file.\n"
  941. return
  942. for e in extensions: # extract domain info and perform different tasks
  943. target = str(options.target + e)
  944. print "="*40
  945. print "[Info] - Trying TLD:", target
  946. print "="*40
  947. # public records (by extension)
  948. if not options.public: # search for public records
  949. # clear previous data to reuse containers
  950. self.url_links[:] = [] # clear a list / black magic!
  951. self.top_ranked.clear() # clear top ranked dict
  952. self.social_links.clear() # clear social dict
  953. self.news_links.clear() # clear news dict
  954. public = self.extract_public(target)
  955. # whois
  956. if not options.whois: # try to extract whois data
  957. if options.verbose:
  958. print "\n[Verbose] - Trying whois to: " + target + "\n"
  959. whois = self.extract_whois(target)
  960. # subdomains
  961. if not options.subs: # try to discover subdomains on target domain
  962. if options.verbose:
  963. print "\n[Verbose] - Trying to resolve subdomains for:", target, "\n"
  964. self.sub_links[:] = [] # clear subs list
  965. try:
  966. subdomains = self.search_subdomains(target)
  967. except:
  968. print "- Not any subdomain found using TLD:", target
  969. if not options.nolog: # generate log
  970. self.report.write("- Subdomains: Not any subdomain found using TLD provided: " + target + "\n\n")
  971. if options.json: # generate json
  972. self.json_report.write(json.dumps(['Subdomains',{target: 'not any subdomain found'}], separators=(',', ':')))
  973. # ip
  974. print "="*14
  975. print "*IP*:"
  976. print "="*14
  977. if options.verbose:
  978. print "\n[Verbose] - Trying to resolve IP for:", target, "\n"
  979. try:
  980. ip = self.resolve_ip(target) # try to resolve an ip from target domain
  981. tld_record = True
  982. except:
  983. print "Not any IP found using TLD:", target
  984. if not options.nolog: # generate log
  985. self.report.write("- IP: Not any IP found using TLD provided: " + target + "\n\n")
  986. if options.json: # generate json
  987. self.json_report.write(json.dumps(['TLD',{target: 'not any IP found'}], separators=(',', ':')))
  988. tld_record = False
  989. # dns + scanning
  990. if not options.dns: # try to discover DNS records
  991. print "="*14
  992. print "*DNS records*:"
  993. print "="*14
  994. if options.verbose:
  995. print "\n[Verbose] - Trying to resolve DNS records for:", target, "\n"
  996. try:
  997. dns = self.resolve_dns(target)
  998. except:
  999. print "- Not any DNS record found using TLD:", target
  1000. if not options.nolog: # generate log
  1001. self.report.write("- DNS: Not any DNS record found using TLD provided: " + target + "\n\n")
  1002. if options.json: # generate json
  1003. self.json_report.write(json.dumps(['DNS',{target: 'not any DNS record found'}], separators=(',', ':')))
  1004. # rest of scanning tasks (when ip != DNS[A])
  1005. if not options.scanner and tld_record == True: # try port-scanner on IP
  1006. if not options.dns: # using DNS A
  1007. for Amachine in self.dns_Amachines:
  1008. if str(Amachine) == str(ip):
  1009. if not options.scandns: # pass when DNS was scanned
  1010. pass
  1011. else:
  1012. print "[Info] - Trying to discover open ports on:", ip, "\n"
  1013. scanner = self.scan_target(ip)
  1014. else:
  1015. print "[Info] - Trying to discover open ports on:", ip, "\n"
  1016. scanner = self.scan_target(ip)
  1017. else: # only IP test
  1018. for ip in self.ips_scanner: # scan all ips found without DNS
  1019. if options.verbose:
  1020. print "\n[Verbose] - Trying to discover open ports on:", ip, "\n"
  1021. scanner = self.scan_target(ip)
  1022. print "" # zen output extensions separator
  1023. if not options.nolog:
  1024. self.report.write("-"*22 + "\n")
  1025. if not options.nolog: # close log (.raw)
  1026. self.report.close()
  1027. if options.json: # close json
  1028. self.json_report.close()
  1029. # start web-gui
  1030. if options.gui:
  1031. host = '0.0.0.0' # local network
  1032. port = 9999 # local port
  1033. try:
  1034. webbrowser.open('http://127.0.0.1:9999', new=1)
  1035. tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  1036. tcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
  1037. tcpsock.bind((host,port))
  1038. while True:
  1039. tcpsock.listen(4)
  1040. (clientsock, (ip, port)) = tcpsock.accept()
  1041. newthread = ClientThread(ip, port, clientsock)
  1042. newthread.start()
  1043. except (KeyboardInterrupt, SystemExit):
  1044. sys.exit()
  1045. if __name__ == "__main__":
  1046. app = Orb()
  1047. options = app.create_options()
  1048. if options:
  1049. app.set_options(options)
  1050. app.run()