-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathscrapelist.py
More file actions
84 lines (63 loc) · 2.42 KB
/
scrapelist.py
File metadata and controls
84 lines (63 loc) · 2.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import time
import requests
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
import util
import url_tools
URL_BASE = 'https://losangeles.craigslist.org/search/mcy?s='
URL_SUFFIX = '&hasPic=1&min_price=800&max_price=2500'
BIG_FILE = 'posts.json'
POSTS_PER_PAGE = 120
def add_new_posts(posts_df, posts_new):
def get_post_id(post):
return post['data-pid']
def get_post_title(post):
return post.find(name='a', attrs={'class': 'result-title hdrlnk'}).text
def get_post_price(post):
return post.find(name='span', attrs={'class': 'result-price'}).text[1:]
def get_post_date(post):
return post.find(name='time', attrs={'class': 'result-date'})['datetime']
def filter_out_post_by_title(title):
result = False
if 'scooter' in title.lower():
result = True
return result
for post in posts_new:
title = get_post_title(post)
post_id = get_post_id(post)
date = get_post_date(post)
price = get_post_price(post)
if filter_out_post_by_title(title):
continue
if title not in posts_df['title'].values:
series_new = pd.Series({'title': title,
'id': [post_id],
'date': [date],
'price': [price]})
posts_df = posts_df.append(series_new, ignore_index=True)
else:
index = np.where(posts_df['title'] == title)[0][0]
posts_df.loc[index, 'id'].append(post_id)
posts_df.loc[index, 'date'].append(date)
posts_df.loc[index, 'price'].append(price)
return posts_df
def main():
posts = util.load_json_to_dataframe(BIG_FILE)
post_to_start_with = 0
url_builder = url_tools.url_builder
url_builder.starting_post_num = post_to_start_with
while True:
req = requests.get(url_builder.get_url())
soup = BeautifulSoup(req.text, 'html.parser')
posts_new = soup.find_all(attrs={'class': 'result-row'})
if len(posts_new) == 0:
break
posts = add_new_posts(posts, posts_new)
post_to_start_with += POSTS_PER_PAGE
url_builder.starting_post_num = post_to_start_with
# don't mind me, Craigslist. Just doing some normal, non-automated browsing
time.sleep(5)
posts.to_json(BIG_FILE)
if __name__ == "__main__":
thing = main()