-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathzillowscraper.py
More file actions
337 lines (280 loc) · 14.5 KB
/
zillowscraper.py
File metadata and controls
337 lines (280 loc) · 14.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
import requests
import json
import pandas as pd
import time
import random
import undetected_chromedriver as uc
def get_zillow_session():
# 1. Start the undetected browser
options = uc.ChromeOptions()
# options.add_argument('--headless') # Headless often triggers blocks; use GUI for best results
driver = uc.Chrome(options=options)
print("Opening Zillow to solve challenge...")
driver.get('https://www.zillow.com')
# 2. Wait for cookies to be set.
# If a captcha appears, you'll have time to solve it here.
time.sleep(10)
# 3. Extract cookies from Selenium
selenium_cookies = driver.get_cookies()
# 4. Transfer to a requests Session
session = requests.Session()
for cookie in selenium_cookies:
session.cookies.set(cookie['name'], cookie['value'])
# Set a realistic User-Agent that matches the browser we just used
user_agent = driver.execute_script("return navigator.userAgent;")
session.headers.update({"User-Agent": user_agent})
print("Cookies captured and transferred to session.")
driver.quit()
return session
# Headers from the curl command
# Cookies from the curl command's -b flag
# This string needs to be parsed into a dictionary for requests.
# Function to parse cookie string into a dictionary
def parse_cookie_string(cookie_str):
cookies = {}
for pair in cookie_str.split('; '):
if '=' in pair:
key, value = pair.split('=', 1)
cookies[key] = value
return cookies
# Raw row payload from the curl command (as a JSON string)
# Parse the JSON string into a Python dictionary
# get our saved row
# Make the PUT request
def scrape_data(save_interval=20):
"""
Scrape property data from Zillow with rate limiting and error handling.
Args:
save_interval: Number of pages to collect before saving intermediate backup
Returns:
List of JSON responses from successful requests
"""
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.6',
'content-type': 'application/json',
'dnt': '1',
'origin': 'https://www.zillow.com',
'priority': 'u=1, i',
'referer': 'https://www.zillow.com/irving-tx/',
'sec-ch-ua': '"Not)A;Brand";v="8", "Chromium";v="138", "Brave";v="138"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'sec-gpc': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36',
}
aggregate_data = []
url = 'https://www.zillow.com/async-create-search-page-state'
# Use Session for connection pooling
session = get_zillow_session()
print("Session established, waiting 2 seconds...")
time.sleep(2) # wait for the session to be established
current_page = 1
max_retries = 3
base_delay = 1.0 # Base delay in seconds
max_delay = 5.0 # Maximum delay in seconds
def save_backup(data, page_num):
"""Save intermediate backup of collected data."""
backup_file = f'housing_data_backup_page_{page_num}.json'
try:
with open(backup_file, 'w') as f:
if len(data) > 0:
json.dump(data, f)
else:
print("No data to save")
return
print(f" Backup saved to {backup_file}")
except Exception as e:
print(f" Warning: Could not save backup: {e}")
def is_blocked(response):
"""Check if response indicates bot blocking."""
return (response.status_code == 403 and
response.headers.get('X-Px-Blocked') == '1')
while True:
try:
# Calculate progressive delay (longer pause every 50 pages)
if current_page % 50 == 0 and current_page > 1:
delay = min(base_delay * 3, max_delay)
print(f" Long pause after page {current_page}...")
else:
delay = base_delay + (current_page // 50) * 0.5
delay = min(delay, max_delay)
# Add random jitter to avoid predictable patterns
jitter = random.uniform(0.2, 0.8)
sleep_time = delay + jitter
if current_page > 1:
time.sleep(sleep_time)
raw_data_str = '{"searchQueryState":{"pagination":{"currentPage":%d},"isMapVisible":false,"mapBounds":{"west":-97.15455254907226,"east":-96.76831445092773,"south":32.74662904769037,"north":32.979064219199735},"mapZoom":12,"regionSelection":[{"regionId":12065,"regionType":6}],"filterState":{"sortSelection":{"value":"globalrelevanceex"}},"isListVisible":true},"wants":{"cat1":["listResults"],"cat2":["total"]},"requestId":2,"isDebugRequest":false}' % current_page
print(f'Requesting page {current_page}...')
data_payload = json.loads(raw_data_str)
# Update referer with current page
session.headers['referer'] = f'https://www.zillow.com/irving-tx/?searchQueryState=%7B%22pagination%22%3A%7B%22currentPage%22%3A{current_page}%7D%7D'
# Retry logic with exponential backoff
retry_count = 0
success = False
while retry_count <= max_retries and not success:
try:
response = session.put(url, json=data_payload, timeout=30)
if response.status_code == 200:
# Check if response has actual data
try:
json_data = response.json()
result_list = json_data.get('cat1', {}).get('searchResults', {}).get('listResults', [])
if not result_list:
print(f" No more results found (end of listings)")
success = True
return aggregate_data
aggregate_data.append(json_data)
print(f" ✓ Success! Collected {len(result_list)} properties")
current_page += 1
success = True
# Save incremental backup
if len(aggregate_data) % save_interval == 0:
print(f" Saving incremental backup...")
save_backup(aggregate_data, current_page - 1)
except (json.JSONDecodeError, KeyError) as e:
print(f" Warning: Unexpected response format: {e}")
if retry_count < max_retries:
retry_count += 1
continue
else:
break
elif is_blocked(response):
print(f" ✗ Blocked by PerimeterX (403)")
if retry_count < max_retries:
retry_count += 1
backoff_time = 30 * (2 ** (retry_count - 1)) # 30s, 60s, 120s
print(f" Retrying in {backoff_time} seconds (attempt {retry_count}/{max_retries})...")
time.sleep(backoff_time)
else:
print(f" Max retries reached. Saving collected data...")
save_backup(aggregate_data, current_page)
return aggregate_data
elif response.status_code == 429:
print(f" Rate limited (429). Waiting 60 seconds...")
time.sleep(60)
retry_count += 1
if retry_count > max_retries:
save_backup(aggregate_data, current_page)
return aggregate_data
elif response.status_code >= 500:
print(f" Server error ({response.status_code}). Retrying...")
retry_count += 1
if retry_count <= max_retries:
time.sleep(10 * retry_count)
else:
save_backup(aggregate_data, current_page)
return aggregate_data
else:
print(f" Request failed with status code: {response.status_code}")
print(f" Response preview: {response.text[:200]}")
save_backup(aggregate_data, current_page)
return aggregate_data
except requests.exceptions.Timeout:
print(f" Request timeout. Retrying...")
retry_count += 1
if retry_count <= max_retries:
time.sleep(5 * retry_count)
else:
save_backup(aggregate_data, current_page)
return aggregate_data
except requests.exceptions.RequestException as e:
print(f" Request exception: {e}")
retry_count += 1
if retry_count <= max_retries:
time.sleep(5 * retry_count)
else:
save_backup(aggregate_data, current_page)
return aggregate_data
if not success:
print(f" Failed to get valid response after {max_retries} retries")
save_backup(aggregate_data, current_page)
return aggregate_data
except KeyboardInterrupt:
print(f"\n Interrupted by user. Saving collected data...")
save_backup(aggregate_data, current_page)
return aggregate_data
except Exception as e:
print(f" Unexpected error: {e}")
save_backup(aggregate_data, current_page)
return aggregate_data
return aggregate_data
def parse_data(aggregate_data):
"""Parse JSON data into a DataFrame with numerical and categorical fields only."""
data = []
for page_data in aggregate_data:
try:
result_list = page_data.get('cat1', {}).get('searchResults', {}).get('listResults', [])
if not result_list:
continue
for house in result_list:
row = {}
home_info = house.get('hdpData', {}).get('homeInfo', {})
# Extract numerical fields
row["zpid"] = home_info.get('zpid')
row["price"] = home_info.get('priceForHDP') or home_info.get('price')
row["bedrooms"] = home_info.get('bedrooms')
row["bathrooms"] = home_info.get('bathrooms')
row["living_area"] = home_info.get('livingArea')
row["latitude"] = home_info.get('latitude')
row["longitude"] = home_info.get('longitude')
row["zestimate"] = home_info.get('zestimate')
row["rent_zestimate"] = home_info.get('rentZestimate')
row["tax_assessed_value"] = home_info.get('taxAssessedValue')
row["lot_area_value"] = home_info.get('lotAreaValue')
row["days_on_zillow"] = home_info.get('daysOnZillow')
row["time_on_zillow"] = home_info.get('timeOnZillow')
# Extract categorical fields
row["home_type"] = home_info.get('homeType')
row['address'] = home_info.get('streetAddress')
row["home_status"] = home_info.get('homeStatus') or home_info.get('homeStatusForHDP')
row["status_type"] = house.get('statusType')
row["address_city"] = home_info.get('city') or house.get('addressCity')
row["address_state"] = home_info.get('state') or house.get('addressState')
row["address_zipcode"] = home_info.get('zipcode') or house.get('addressZipcode')
row["lot_area_unit"] = home_info.get('lotAreaUnit')
row["country"] = home_info.get('country')
row["currency"] = home_info.get('currency')
# Extract boolean flags (convert to int for CSV: 1=True, 0=False, None=unknown)
row["is_zillow_owned"] = 1 if home_info.get('isZillowOwned') else 0
row["is_non_owner_occupied"] = 1 if home_info.get('isNonOwnerOccupied') else 0
row["is_premier_builder"] = 1 if home_info.get('isPremierBuilder') else 0
row["is_featured"] = 1 if home_info.get('isFeatured') else 0
row["is_showcase_listing"] = 1 if home_info.get('isShowcaseListing') else 0
row["is_unmappable"] = 1 if home_info.get('isUnmappable') else 0
row["is_preforeclosure_auction"] = 1 if home_info.get('isPreforeclosureAuction') else 0
data.append(row)
except (KeyError, TypeError) as e:
print(f" Warning: Error parsing page data: {e}")
continue
return pd.DataFrame(data)
if __name__ == '__main__':
print("=" * 60)
print("Zillow Property Scraper - Irving, TX")
print("=" * 60)
print("\nScraping property data from Zillow...")
scraped_json_data = scrape_data(save_interval=20)
if not scraped_json_data:
print("\nNo data collected. Exiting.")
exit(1)
print(f"\nCollected {len(scraped_json_data)} pages of data")
print("Parsing data into DataFrame...")
home_df = parse_data(scraped_json_data)
if home_df.empty:
print("Warning: No properties found in collected data.")
exit(1)
output_file = 'irving_tx_housing.csv'
print(f"\nWriting data to {output_file}...")
home_df.to_csv(output_file, index=False)
print(f"\n{'=' * 60}")
print(f"Successfully saved {len(home_df)} properties to {output_file}")
print(f"{'=' * 60}")
print(f"\nDataFrame shape: {home_df.shape}")
print(f"Columns ({len(home_df.columns)}): {', '.join(home_df.columns)}")
print("\nFirst few rows:")
print(home_df.head())
print("\nData summary:")
print(home_df.describe())