#!/usr/bin/env python
# -*- coding: utf-8 -*-"
"""
UFONet - (DDoS botnet + DoS tool) via Web Abuse - 2013/2014/2015/2016/2017/2018 - by psy (epsylon@riseup.net)
You should have received a copy of the GNU General Public License along
with UFONet; if not, write to the Free Software Foundation, Inc., 51
Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import urllib, urllib2, ssl, random, re
from urlparse import urlparse
# Inspector spidering class
class Inspector(object):
def __init__(self,ufonet):
self.ufonet=ufonet
# set initial counters for objets
self.c_images = 0
self.c_mov = 0
self.c_webm = 0
self.c_avi = 0
self.c_swf = 0
self.c_mpg = 0
self.c_mpeg = 0
self.c_mp3 = 0
self.c_ogg = 0
self.c_ogv = 0
self.c_wmv = 0
self.c_css = 0
self.c_js = 0
self.c_xml = 0
self.c_php = 0
self.c_html = 0
self.c_jsp = 0
self.c_asp = 0
self.c_txt = 0
self.ctx = ssl.create_default_context() # creating context to bypass SSL cert validation (black magic)
self.ctx.check_hostname = False
self.ctx.verify_mode = ssl.CERT_NONE
def proxy_transport(self, proxy):
proxy_url = self.ufonet.extract_proxy(proxy)
proxy = urllib2.ProxyHandler({'https': proxy_url})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
def inspecting(self, target):
# inspect HTML target's components sizes (ex: http://target.com/foo)
# [images, .mov, .webm, .avi, .swf, .mpg, .mpeg, .mp3, .ogg, .ogv,
# .wmv, .css, .js, .xml, .php, .html, .jsp, .asp, .txt]
biggest_files = {}
if target.endswith(""):
target.replace("", "/")
self.ufonet.user_agent = random.choice(self.ufonet.agents).strip() # suffle user-agent
headers = {'User-Agent' : self.ufonet.user_agent, 'Referer' : self.ufonet.referer} # set fake user-agent and referer
try:
if self.ufonet.options.proxy: # set proxy
self.proxy_transport(self.ufonet.options.proxy)
req = urllib2.Request(target, None, headers)
target_reply = urllib2.urlopen(req).read()
else:
req = urllib2.Request(target, None, headers)
target_reply = urllib2.urlopen(req, context=self.ctx).read()
except:
print('[Error] - Unable to connect to target\n')
return #sys.exit(2)
try: # search for image files
regex_img = []
regex_img1 = "
' # search on target's results using regex without quotations
#regex_img.append(regex_img3)
for regimg in regex_img:
pattern_img = re.compile(regimg)
img_links = re.findall(pattern_img, target_reply)
imgs = {}
for img in img_links:
if self.ufonet.options.proxy: # set proxy
self.proxy_transport(self.ufonet.options.proxy)
self.ufonet.user_agent = random.choice(self.ufonet.agents).strip() # suffle user-agent
headers = {'User-Agent' : self.ufonet.user_agent, 'Referer' : self.ufonet.referer} # set fake user-agent and referer
print('+Image found: ' + img)
try:
if img.startswith('http'):
if self.ufonet.options.proxy: # set proxy
self.proxy_transport(self.ufonet.options.proxy)
req = urllib2.Request(target_url, None, headers)
img_file = urllib2.urlopen(req).read()
else:
req = urllib2.Request(target_url, None, headers)
img_file = urllib2.urlopen(req, context=self.ctx).read()
else:
target_host = urlparse(target)
target_url = target_host.scheme + "://" + target_host.netloc + target_host.path
if not target_url.endswith('/'): # add "/" to end of target
target_url = target_url + "/"
if self.ufonet.options.proxy: # set proxy
self.proxy_transport(self.ufonet.options.proxy)
req = urllib2.Request(target_url + img, None, headers)
img_file = urllib2.urlopen(req).read()
else:
req = urllib2.Request(target_url + img, None, headers)
img_file = urllib2.urlopen(req, context=self.ctx).read()
size = len(img_file)
except:
print('[Error] - Unable to retrieve info from Image')
size = 0
imgs[img] = int(size)
print('(Size: ' + str(size) + ' Bytes)')
self.c_images = self.c_images + 1
print '-'*12
biggest_image = max(imgs.keys(), key=lambda x: imgs[x]) # search/extract biggest image value from dict
biggest_files[biggest_image] = imgs[biggest_image] # add biggest image to list
except: # if not any image found, go for next
pass
try: # search for .mov files
regex_mov = []
regex_mov1 = "