日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當(dāng)前位置: 首頁(yè) > 编程语言 > python >内容正文

python

python网站后台_Python 网站后台扫描脚本

發(fā)布時(shí)間:2023/12/10 python 34 豆豆
生活随笔 收集整理的這篇文章主要介紹了 python网站后台_Python 网站后台扫描脚本 小編覺得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.

Python 網(wǎng)站后臺(tái)掃描腳本

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

#!/usr/bin/python

#coding=utf-8

import sys

import urllib

import time

url= "http://123.207.123.228/"

txt= open(r"C:\Users\ww\Desktop\houtaiphp.txt","r")

open_url= []

all_url= []

def search_url(url,txt):

withopen(r"C:\Users\ww\Desktop\houtaiphp.txt","r") as f :

for eachin f:

each= each.replace('\n','')

urllist= url+each

all_url.append(urllist)

print("查找:"+urllist+'\n')

try:

req= urllib.urlopen(urllist)

if req.getcode()== 200:

open_url.append(urllist)

if req.getcode()== 301:

open_url.append(urllist)

except:

pass

def main():

search_url(url,txt)

if open_url:

print("后臺(tái)地址:")

for eachin open_url:

print("[+]"+each)

else:

print("沒有找到網(wǎng)站后臺(tái)")

if __name__== "__main__":

main()

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

#!/usr/bin/python

#coding=utf-8

import sys

import urllib

import time

url= "http://123.207.123.228/"

txt= open(r"C:\Users\ww\Desktop\houtaiphp.txt","r")

open_url= []

all_url= []

def search_url(url,txt):

withopen(r"C:\Users\ww\Desktop\houtaiphp.txt","r") as f :

for eachin f:

each= each.replace('\n','')

urllist= url+each

all_url.append(urllist)

handle_url(urllist)

def handle_url(urllist):

print("查找:"+urllist+'\n')

try:

req= urllib.urlopen(urllist)

if req.getcode()== 200:

open_url.append(urllist)

if req.getcode()== 301:

open_url.append(urllist)

except:

pass

def main():

search_url(url,txt)

if open_url:

print("后臺(tái)地址:")

for eachin open_url:

print("[+]"+each)

else:

print("沒有找到網(wǎng)站后臺(tái)")

if __name__== "__main__":

main()

師傅讓我多看看-->多線程

這里就加個(gè)多線程吧。

#!/usr/bin/python

#coding=utf-8

import sys

import urllib

import time

import threading

url = "http://123.207.123.228/"

txt = open(r"C:\Users\ww\Desktop\houtaiphp.txt","r")

open_url = []

all_url = []

threads = []

def search_url(url,txt):

with open(r"C:\Users\ww\Desktop\houtaiphp.txt","r") as f :

for each in f:

each = each.replace('\n','')

urllist = url+each

all_url.append(urllist)

def handle_url(urllist):

print("查找:"+urllist+'\n')

try:

req = urllib.urlopen(urllist)

if req.getcode() == 200:

open_url.append(urllist)

if req.getcode() == 301:

open_url.append(urllist)

except:

pass

def main():

search_url(url,txt)

for each in all_url:

t = threading.Thread(target = handle_url,args=(each,))

threads.append(t)

t.start()

for t in threads:

t.join()

if open_url:

print("后臺(tái)地址:")

for each in open_url:

print("[+]"+each)

else:

print("沒有找到網(wǎng)站后臺(tái)")

if __name__ == "__main__":

start = time.clock()

main()

end = time.clock()

print("spend time is:%.3f seconds" %(end-start))

多線程和沒加線程的時(shí)間對(duì)比

--------------------------------------------------------------------------------------------------------------------------------------------------

利用zoomeye搜索

調(diào)用ZoomEye API獲取信息

主要涉及模塊urllib,json,os模塊。

# coding: utf-8

import os

import requests

import json

access_token = ''

ip_list = []

def login():

"""

輸入用戶米密碼 進(jìn)行登錄操作

:return: 訪問口令 access_token

"""

user = raw_input('[-] input : username :')

passwd = raw_input('[-] input : password :')

data = {

'username' : user,

'password' : passwd

}

data_encoded = json.dumps(data) # dumps 將 python 對(duì)象轉(zhuǎn)換成 json 字符串

try:

r = requests.post(url = 'https://api.zoomeye.org/user/login',data = data_encoded)

r_decoded = json.loads(r.text) # loads() 將 json 字符串轉(zhuǎn)換成 python 對(duì)象

global access_token

access_token = r_decoded['access_token']

except Exception,e:

print '[-] info : username or password is wrong, please try again '

exit()

def saveStrToFile(file,str):

"""

將字符串寫如文件中

:return:

"""

with open(file,'w') as output:

output.write(str)

def saveListToFile(file,list):

"""

將列表逐行寫如文件中

:return:

"""

s = '\n'.join(list)

with open(file,'w') as output:

output.write(s)

def apiTest():

"""

進(jìn)行 api 使用測(cè)試

:return:

"""

page = 1

global access_token

with open('access_token.txt','r') as input:

access_token = input.read()

# 將 token 格式化并添加到 HTTP Header 中

headers = {

'Authorization' : 'JWT ' + access_token,

}

# print headers

while(True):

try:

r = requests.get(url = 'https://api.zoomeye.org/host/search?query="phpmyadmin"&facet=app,os&page=' + str(page),

headers = headers)

r_decoded = json.loads(r.text)

# print r_decoded

# print r_decoded['total']

for x in r_decoded['matches']:

print x['ip']

ip_list.append(x['ip'])

print '[-] info : count ' + str(page * 10)

except Exception,e:

# 若搜索請(qǐng)求超過 API 允許的最大條目限制 或者 全部搜索結(jié)束,則終止請(qǐng)求

if str(e.message) == 'matches':

print '[-] info : account was break, excceeding the max limitations'

break

else:

print '[-] info : ' + str(e.message)

else:

if page == 10:

break

page += 1

def main():

# 訪問口令文件不存在則進(jìn)行登錄操作

if not os.path.isfile('access_token.txt'):

print '[-] info : access_token file is not exist, please login'

login()

saveStrToFile('access_token.txt',access_token)

apiTest()

saveListToFile('ip_list.txt',ip_list)

if __name__ == '__main__':

main()

上面的腳本是搜索 phpmyadmin 的。搜索得到的 IP 會(huì)保存在同路徑下的 ip_list.txt 文件。

但是搜索到的 ip 并不是都能夠訪問的,所以這里寫個(gè)了識(shí)別 phpmyadmin 的腳本,判斷是否存在,是則輸出。

#!/usr/bin/python

#coding=utf-8

import sys

import time

import requests

headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0"}##瀏覽器請(qǐng)求頭

open_url = []

all_url = []

payloa = 'http://'

payload = '/phpmyadmin/index.php'

def search_url():

with open(r"C:\Users\ww\Desktop\ip_list.txt","r") as f :

for each in f:

each = each.replace('\n','')

urllist = payloa+each+payload

all_url.append(urllist)

handle_url(urllist)

def handle_url(urllist):

#print('\n'+urllist)

#print '----------------------------'

try:

start_htm = requests.get(urllist,headers=headers)

#print start_htm

if start_htm.status_code == 200:

print '*******************************************'

print urllist

except:

pass

if __name__ == "__main__":

search_url()

加個(gè)多線程,畢竟工作量很大。

#!/usr/bin/python

#coding=utf-8

import sys

import time

import requests

import threading

headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0"}##瀏覽器請(qǐng)求頭

open_url = []

all_url = []

threads = []

payloa = 'http://'

payload = '/phpmyadmin/index.php'

def search_url():

with open(r"C:\Users\ww\Desktop\ip_list.txt","r") as f :

for each in f:

each = each.replace('\n','')

urllist = payloa+each+payload

all_url.append(urllist)

#handle_url(urllist)

def handle_url(urllist):

#print('\n'+urllist)

#print '----------------------------'

try:

start_htm = requests.get(urllist,headers=headers)

#print start_htm

if start_htm.status_code == 200:

print '*******************************************'

print urllist

except:

pass

def main():

search_url()

for each in all_url:

t = threading.Thread(target=handle_url,args=(each,))

threads.append(t)

t.start()

for t in threads:

t.join()

if __name__ == "__main__":

start = time.clock()

main()

end = time.clock()

print("spend time is %.3f seconds" %(end-start))

這下就方便了許多。

任重而道遠(yuǎn)!

總結(jié)

以上是生活随笔為你收集整理的python网站后台_Python 网站后台扫描脚本的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯(cuò),歡迎將生活随笔推薦給好友。