forked from dvopsway/datasploit
-
Notifications
You must be signed in to change notification settings - Fork 1
/
domain_GooglePDF.py
executable file
·53 lines (46 loc) · 1.54 KB
/
domain_GooglePDF.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#!/usr/bin/env python
from bs4 import BeautifulSoup
import sys
import urllib2
import re
import string
'''
This code is a bit messed up. Lists files from first page only. Needs a lot of modification.
'''
def googlesearch(query, ext):
print query
google="https://www.google.co.in/search?filter=0&q=site:"
getrequrl="https://www.google.co.in/search?filter=0&num=100&q=%s&start=" % (query)
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req=urllib2.Request(getrequrl, headers=hdr)
response=urllib2.urlopen(req)
encoding = response.headers.getparam('charset')
data = response.read().decode(encoding)
data=re.sub('<b>','',data)
for e in ('>','=','<','\\','(',')','"','http',':','//'):
data = string.replace(data,e,' ')
r1 = re.compile('[-_.a-zA-Z0-9.-_]*'+'\.'+ ext)
res = r1.findall(data)
if res==[]:
print "No results were found"
else:
return res
domain=sys.argv[1]
print "\t\t\t[+] PDF Files\n"
list_ext = ["pdf", "xls", "docx"]
for x in list_ext:
query = "site:%s+filetype:%s" % (domain, x)
results = googlesearch(query, x)
if results:
results=set(results)
for x in results:
x= re.sub('<li class="first">','',x)
x= re.sub('</li>','',x)
print x
print "\n"
print "====================\n"