-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathwebcrawler
More file actions
320 lines (258 loc) · 10.3 KB
/
webcrawler
File metadata and controls
320 lines (258 loc) · 10.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
#!/usr/bin/env python3
import cgi
import socket
import sys
import ssl
from collections import deque
from html.parser import HTMLParser
# tracks unique URLs already crawled
visited_links = set()
# a queue to track URLs to be crawled
crawl_pages = deque()
# set of unique secret flags found in the pages
secret_flags = set()
# holds the middlewaretoken
token_list = list()
class FakebookHTMLParser(HTMLParser):
"""
The FakebookHTMLParser extends the HTML Parser to parse through the response for 'a'/'h2'/'input' tags in search of
more URLs and/or secret flags respectively.
It also extracts the content of the 'h2' flags to get the content in between the tag i.e. the secret flag.
"""
is_h2 = False
acc_data = ''
token = False
def handle_starttag(self, tag, attrs):
if tag == 'a': # only looking at the links tags
for attrs in attrs:
link = attrs[1]
if link.startswith("/fakebook"):
crawl_pages.append(link) # add to the queue of URLs to be crawled
if tag == "h2": # only interested in getting secret flags under h2
for attributes in attrs:
if "secret_flag" in attributes: # if secret flags under the attributes then set flag to found-- true
self.is_h2 = True
if tag == 'input':
for attrs in attrs:
if self.token: # this line contains the csfrmiddlewaretoken
token_list.append(attrs[1])
self.token = False
if attrs[1] == "csrfmiddlewaretoken": # the token will follow this attrs so we set to true
self.token = True
def handle_endtag(self, tag):
if tag == 'h2': # after finding end tag, set found flag to false
self.is_h2 = False
if self.acc_data != '': # if data has content then parse it to print and get the secret flag
flag = self.acc_data.split()
f = flag[1]
if f not in secret_flags:
secret_flags.add(f) # add secret flag to the set of flags
print(f)
self.acc_data = '' # set data to empty string for next secret flag
def handle_data(self, data): # this collects the content between the h2 tag
if self.is_h2:
self.acc_data += data
def parse_cmd_line():
""" Parses the command line arguments for username and password. Throws error for invalid info"""
username = ""
password = ""
try:
username = sys.argv[1]
password = sys.argv[2]
return username, password
except:
if username == "":
sys.exit("Please provide appropriate user name.")
if password == "":
sys.exit("Please provide appropriate password.")
def create_socket():
"""Creates a TLS wrapped socket to create a connection to http server. """
port = 443
host_name = 'project2.5700.network'
# building connection
try:
context = ssl.create_default_context()
sock = socket.create_connection((host_name, port))
wrapped_socket = context.wrap_socket(sock, server_hostname='project2.5700.network')
return wrapped_socket
except socket.error:
sys.exit("Connection error.")
def send_get_request(path, sock, host, cookie1=None, cookie2=None):
""" Sends get requests to the server, using all necessary header fields with HTTP1.1"""
if cookie1 is None and cookie2 is None: # no cookies given
msg = f'GET {path} HTTP/1.1\r\n' \
f'{host}\r\n\r\n'
elif cookie2 is None: # only cookie 1 given
msg = f'GET {path} HTTP/1.1\r\n' \
f'{host}\r\n' \
f'Cookie: {cookie1}\r\n\r\n'
else: # both cookies given
msg = f'GET {path} HTTP/1.1\r\n' \
f'{host}\r\n' \
f'Cookie: {cookie1}; {cookie2}\r\n\r\n'
sock.send(msg.encode())
def receive_msg(sock):
"""Receive the message in a loop based on the content length given in the header"""
msg = sock.recv(4096).decode()
length = getContent_length(msg)
while True:
if length == "0":
break
elif not msg.endswith('</html>\n'):
msg += sock.recv(4096).decode()
else:
break
return msg
def getContent_length(msg):
"""Extracts the content length of the URL"""
if "Content-Length:" in msg:
m = msg.split()
idx = m.index("Content-Length:")
length = m[idx + 1]
return length
def cookie_jar(msg):
"""Stores the session and/or the csrf cookies"""
first_cookie = None
# break msg into three parts - header, spacing, body
msg = msg.partition('\r\n\r\n')
# store only the header
header = msg[0]
# search for cookies
for h in header.split('\r\n'):
if h.startswith("Set-Cookie: "):
if first_cookie is not None: # if there is already one cookie then grab the second
second_cookie = h.split()[1].strip(";")
return first_cookie, second_cookie
first_cookie = h.split()[1].strip(";")
return first_cookie # only one cookie obtained
def create_login_body(username, password, token):
"""Creating the body of the user's login POST request"""
return f'username={username}&' \
f'password={password}&' \
f'csrfmiddlewaretoken={token}&' \
'next=/fakebook'
def login_user(sock, path, host, body_len, body, cookie1, cookie2):
"""POST request to log user into fakebook"""
post_msg = f'POST {path} HTTP/1.1\r\n' \
f'{host}\r\n' \
'Content-Type: application/x-www-form-urlencoded\r\n' \
f'Content-Length: {body_len}\r\n' \
f'Cookie: {cookie2}; {cookie1}\r\n\r\n' \
f'{body}'
sock.send(post_msg.encode())
def start_crawling(msg, sock, host, cookie3, cookie4):
"""
Implements the basic web crawler for this program.
We use the HTML Parser object to parse through the current URL in search for more URLs and/or secret flags until all
secret flags are found for the user.
Also accounts for and appropriately handles different errors received on parsing through pages.
"""
parser = FakebookHTMLParser()
# feed the response to get the first set of URLs in the queue
parser.feed(msg)
# keep crawling until all flags are found or queue is empty
while len(crawl_pages) != 0:
# get the first site to be visited
curr = (crawl_pages.popleft())
if curr not in visited_links: # if URL has not been visited
send_get_request(curr, sock, host, cookie1=cookie3, cookie2=cookie4)
msg = receive_msg(sock)
error = handle_error(msg)
if error == 2: # don't do anything
pass
elif error == 4: # abandon URL
visited_links.add(curr)
continue
elif error == 5: # add it back to the queue to be requested again
crawl_pages.append(curr)
continue
elif error == 3: # grab new URL and request again
redirected_url = handle_err3(msg)
# add redirected URL to queue if location is present
if redirected_url != "":
crawl_pages.append(redirected_url)
# feed the msg to parser to search for more links and secret flags
parser.feed(msg)
# after the site has been parsed, add it to the set of visited links
visited_links.add(curr)
# if we have all the secret flags then end the program
if len(secret_flags) == 5:
break
def handle_err3(msg):
"""
This method is designed to handle error 301 which is Moved Permanently: This is known as an HTTP redirect.
The method extracts the URL from the server response and sends it back to the crawler.
"""
# break msg into three parts - header, spacing, body
msg = msg.partition('\r\n\r\n')
# store only the header
header = msg[0]
redirect_url = ""
# search for redirected link
for h in header.split('\r\n'):
if h.startswith("Location: "):
redirect_url = h.split()[1]
return redirect_url
def handle_error(msg):
"""
Returns the appropriate code values indicating the type of error occurred so that it can be handled.
Different status codes are:
200 - everything is okay.
301 - Moved Permanently: This is known as an HTTP redirect.
403 - Forbidden and 404 - Not Found
500 - Internal Server Error: Indicates that the Server could not or would not handle the request from the client.
"""
msg = msg.split()[1]
if msg == "200":
return 2
elif msg == "302":
return 2
elif msg == "301":
return 3
elif msg == "403":
return 4
elif msg == "404":
return 4
elif msg == "500" or "501":
return 5
def main():
host = "Host: project2.5700.network"
root_path = "/"
fakebook = "/fakebook/"
login_path = "/accounts/login/?next=/fakebook/"
# Parse the username and password from the command line
username, password = parse_cmd_line()
# Create TLS wrapped socket
sock = create_socket()
# get the root page
send_get_request(root_path, sock, host)
# check the received message
msg = receive_msg(sock)
# store session cookie
cookie_1 = cookie_jar(msg)
# send get request for login page
send_get_request(login_path, sock, host, cookie1=cookie_1)
# check message for login page
msg = receive_msg(sock)
# retrieving csrf cookie and middle token
cookie2 = cookie_jar(msg)
parser = FakebookHTMLParser()
parser.feed(msg)
token = token_list[0]
# creating login body for user
body = create_login_body(username, password, token)
body_len = len(body.encode())
# login user
login_user(sock, login_path, host, body_len, body, cookie_1, cookie2)
msg = receive_msg(sock)
# store new cookies
cookie3, cookie4 = cookie_jar(msg)
# send request to go to my fakebook page
send_get_request(fakebook, sock, host, cookie1=cookie3, cookie2=cookie4)
msg = receive_msg(sock)
# start our crawler
start_crawling(msg, sock, host, cookie3, cookie4)
# close the socket - program end
sock.close()
if __name__ == "__main__":
main()