Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add the --ssl option to disable the ssl verification if needed. #32

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 4 additions & 5 deletions lib/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,16 +154,15 @@ def get_method(self):
Log.info("URL is not an HTTP url, ignoring")

@classmethod
def main(self,url,proxy,headers,payload,cookie,method=2):
def main(self,url,proxy,headers,payload,cookie,ssl,method=2):

print(W+"*"*15)
self.payload=payload
self.url=url

self.session=session(proxy,headers,cookie)
Log.info("Checking connection to: "+Y+url)
Log.info("Checking connection to: "+Y+url)
try:
ctr=self.session.get(url)
ctr=self.session.get(url,verify=ssl)
self.body=ctr.text
except Exception as e:
Log.high("Internal error: "+str(e))
Expand Down
31 changes: 16 additions & 15 deletions lib/crawler/crawler.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import requests
import requests
from lib.helper.Log import *
from lib.helper.helper import *
from lib.core import *
Expand All @@ -7,47 +8,47 @@
from multiprocessing import Process

class crawler:

visited=[]

@classmethod
def getLinks(self,base,proxy,headers,cookie):
def getLinks(self,base,proxy,headers,cookie,ssl):

lst=[]

conn=session(proxy,headers,cookie)
text=conn.get(base).text
text=conn.get(base, verify=ssl).text
isi=BeautifulSoup(text,"html.parser")


for obj in isi.find_all("a",href=True):
url=obj["href"]


if urljoin(base,url) in self.visited:
continue

elif url.startswith("mailto:") or url.startswith("javascript:"):
continue
# :// will check if there any subdomain or any other domain but it will pass directory
# :// will check if there any subdomain or any other domain but it will pass directory
elif url.startswith(base) or "://" not in url :
lst.append(urljoin(base,url))
self.visited.append(urljoin(base,url))

return lst

@classmethod
def crawl(self,base,depth,proxy,headers,level,method,cookie):
def crawl(self,base,depth,proxy,headers,level,method,cookie,ssl):

urls=self.getLinks(base,proxy,headers,cookie,ssl)

urls=self.getLinks(base,proxy,headers,cookie)

for url in urls:
if url.startswith("https://") or url.startswith("http://"):
p=Process(target=core.main, args=(url,proxy,headers,level,cookie,method))
p.start()
p.join()
if depth != 0:
self.crawl(url,depth-1,base,proxy,level,method,cookie)
self.crawl(url,depth-1,base,proxy,level,method,cookie,ssl)

else:
break
16 changes: 10 additions & 6 deletions pwnxss.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,17 +44,21 @@ def start():
pos_opt.add_argument("--proxy",default=None,metavar="",help="Set proxy (e.g. {'https':'https://10.10.1.10:1080'})")
pos_opt.add_argument("--about",action="store_true",help="Print information about PwnXSS tool")
pos_opt.add_argument("--cookie",help="Set cookie (e.g {'ID':'1094200543'})",default='''{"ID":"1094200543"}''',metavar="")

pos_opt.add_argument("--ssl",help="Put False to disable the ssl verification",default=True,metavar="")

getopt=parse.parse_args()
if getopt.ssl == "false" or getopt.ssl == "False":
getopt.ssl = False

print(logo)
Log.info("Starting PwnXSS...")
if getopt.u:
core.main(getopt.u,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.method)
crawler.crawl(getopt.u,int(getopt.depth),getopt.proxy,getopt.user_agent,check(getopt),getopt.method,getopt.cookie)
core.main(getopt.u,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.ssl,getopt.method)

crawler.crawl(getopt.u,int(getopt.depth),getopt.proxy,getopt.user_agent,check(getopt),getopt.method,getopt.cookie, getopt.ssl)

elif getopt.single:
core.main(getopt.single,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.method)
core.main(getopt.single,getopt.proxy,getopt.user_agent,check(getopt),getopt.cookie,getopt.ssl,getopt.method)

elif getopt.about:
print("""
Expand Down