-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
113 lines (96 loc) · 3.11 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#!/usr/bin python3
# utf-8
try:
import random
import time
import pyuser_agent
# import json
from os import system, name, getcwd
import proxyscrape
import requests
from colorama import Fore
except:
print('''
[!] Error: Module(s) not found.
[!] Please install the required modules via pip.
[!] pip install -r requirements.txt
''')
pass
time.sleep(2.7)
exit(1)
# Proxyscrape INIT
collector = proxyscrape.create_collector('collector-1', 'socks4')
# Global variables
urls = []
root = getcwd() + "/"
# UAs
UA = pyuser_agent.UA()
browsers = UA.list
browser = random.choice(browsers)
UA.browser = browser
# Clear Screen
def cls():
if name == 'nt':
_ = system('cls')
pass
else:
_ = system('clear')
pass
print(Fore.MAGENTA + '''
╔╦╗╔═╗╦═╗╦╔═╔═╗╔═╗╦═╗╔═╗╔═╗
║║║ ║╠╦╝╠╩╗╠═╝╠═╣╠╦╝╚═╗║╣
═╩╝╚═╝╩╚═╩ ╩╩ ╩ ╩╩╚═╚═╝╚═╝
''')
print(Fore.BLUE + '''
--------------------------------------------------
A proxyless parsing solution for your dork-lists
Developer: @daturadev
=========================================================
''')
print(Fore.RED + '''
PROVIDED FREE AND OPEN-SOURCE ON GITHUB, DO NOT GET SCAMMED!
''')
time.sleep(random.randint(1, 3))
cls()
# Grab dork file
path = input('''
[?] Input path to dork-list:
Example: lib/dorks.txt
''')
file = root + path
cls()
# Parse Dorks
with open(file, 'r') as dork_file:
for dork in dork_file:
# Rotate proxies
proxy = collector.get_proxy()
ip = str(proxy.host)
port = str(proxy.port)
protocol = str(proxy.type)
proxies = {
"http": private_proxy,
protocol: f"{protocol}://{ip}:{port}"
}
# Capture response elements, normalize, and append to [urls]
with open(f'{root}lib/results.txt', 'w+'):
# Credit: https://jereze.com/code/image-search-api/
r = requests.get("https://api.qwant.com/v3/search",
params={
'count': 10,
'q': 'kittens',
't': 'web',
'safesearch': 'on',
'locale': 'en_US',
'offset': 0,
'device': 'desktop'
},
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
})
response = r.json()
# urls.append(response)
print(Fore.GREEN + f"{response}\n")
pass
# Helpful Article Credits:
# Structure/Manipulation of HTTP Requests & Responses | https://mleue.com/posts/simple-python-http-request-parser/