-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprocess_data.py
More file actions
94 lines (69 loc) · 2.83 KB
/
process_data.py
File metadata and controls
94 lines (69 loc) · 2.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import pandas as pd
import re
import os
import glob
import numpy as np
import json
import csv
import cassandra
from cassandra.cluster import Cluster
from cql_queries import insert_artist_songs, insert_artist_song_username, insert_username_song
files = '/event_data'
def process_data(files):
"""
Takes in string of file location as files and processes eventdata
into a new csv file 'event_datafile_new.csv'.
"""
filepath = os.getcwd() + files
for root, dirs, files in os.walk(filepath):
file_path_list = glob.glob(os.path.join(root,'*'))
full_data_rows_list = []
for f in file_path_list:
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
csvreader = csv.reader(csvfile)
next(csvreader)
for line in csvreader:
full_data_rows_list.append(line)
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
#with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
# print(sum(1 for line in f))
return 'event_datafile_new.csv'
def insert_data(session, file):
"""
Reads data from new datafile csv into tables on Cassandra cluster.
- takes in a connection with a cassandra cluster as session
and a file as file.
"""
#file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
session.execute(insert_artist_songs, ( int(line[8]), int(line[3]), line[0], line[9], float(line[5]) ))
session.execute(insert_artist_song_username, ( int(line[10]), int(line[8]), int(line[3]), line[1], line[4], line[0], line[9] ))
session.execute(insert_username_song , ( line[9], int(line[8]), int(line[3]), line[1], line[4] ))
def main():
"""
Processes event data files into new datafile csv and inserts data into
tables in sparkifydb.
- creates connection and session on cluster
- processes data into new file
- inserts data into tables
- closes session and connection
"""
cluster = Cluster(['127.0.0.1'])
session = cluster.connect()
session.set_keyspace('sparkifydb')
insert_data(session, process_data(files))
session.shutdown()
cluster.shutdown()
if __name__ == "__main__":
main()