from os import write
import requests
#from bs4 import BeautifulSoup
#import subprocess
#获取网页
def GetHtml(url):
try:
r =requests.get(url,timeout=30)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
return "产生异常,网页获取失败!"
#将获取的网页信息转换为html形式
from bs4 import BeautifulSoup
def GetUrl():
# url = "http://www.baidu.com"
return input("输入要测试的URL:")
def TransHtml(txt):
return BeautifulSoup(txt,'html.parser')
#拼接url
from urllib.parse import urljoin
from urllib.parse import urlparse
from urllib.parse import urlunparse
from posixpath import normpath
def LinkUrl(base,url):
a_url = urljoin(base,url)
arr = urlparse(a_url)
path = normpath(arr[2])
return urlunparse((arr.scheme,arr.netloc,path,arr.params,arr.query,arr.fragment))
#输出所有a标签的href值到txt文件
import re
def GetLink(obj,url):
with open('test.txt','w') as f:
#查找所有a开头的标签
for link in obj.find_all(re.compile('^a')):
hf = link.get('href')
new_url = LinkUrl(url,link.get('href'))
#拼接命令行参数
cmd = "sqlmap -u " + new_url + " --batch >>result.txt"
subprocess.run(cmd,shell=True)
# if(hf.find('http')):
# f.write(link.get('href')+'
')
# continue
# else:
# f.write(url+'/'+hf+'
')
#调用sqlmap扫描所有链接并搜寻表单自动注入
# sqlmap -m text.txt --batch --form
import subprocess
subprocess.run('sqlmap -m test.text --batch --form',shell=True)
#读取一行文本
# class action():
# def __init__(self) -> None:
# super().__init__()
#菜单
# class menu():
# def __init__(self):
# self.action =action()
# self.choices = {
# "1":GetHtml(),
# "2":
# }
import sys
def choices():
op = int(input("输入您的选择:"))
if op == 1:
url=GetUrl()
demo=GetHtml(url)
soup=TransHtml(demo)
GetLink(soup,url)
if op == 2:
url = GetUrl()
demo = GetHtml(url)
soup = TransHtml(demo)
userinput = input("输入参数:")
cmd = "sqlmap " + userinput
subprocess.run(cmd,shell=True)
if op == 3:
sys.exit()
def display_menu():
print("1.自动测试")
print("2.手动测试")
print("3.退出")
choices()
#主函数
if __name__=="__main__":
display_menu()