-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathDataflowTest.py
More file actions
104 lines (77 loc) · 3.3 KB
/
DataflowTest.py
File metadata and controls
104 lines (77 loc) · 3.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import argparse
import json
import logging
import apache_beam as beam
import pandas as pd
from apache_beam.options.pipeline_options import PipelineOptions
from google.cloud import storage
from smart_open import open
import os
#os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = 'project-demo-1508-9f80e4c2c1c6.json'
""" sample json that we are going to be parsing through our program, store this in a gcs bucket and then
run this script which converts
{
"product": {
"id": "1234567890",
"title": "Awesome Product",
"vendor": "Vendor Test",
"product_type": "Test",
"created_at": "2022-10-11T16:07:45-4:00",
"updated_at": "2022-10-15T14:32:09-4:00",
}
}
"""
class ReadFile(beam.DoFn):
def __init__(self, input_path):
self.input_path = input_path
def start_bundle(self):
from google.cloud import storage
self.client = storage.Client()
def process(self, something):
clear_data = []
with open(self.input_path) as fin:
ss=fin.read()
data = json.loads(ss)
product = data.get('product')
if product and product.get('id'): #verifies if there exists a key/value pair
product_id = product.get('id')
vendor = product.get('vendor')
product_type = product.get('product_type')
updated_at = product.get('updated_at')
created_at = product.get('created_at')
"""product_options = product.get('product_options')
option_ids =[]
if product_options:
for option in product_options:
option_ids.append(option.get('id'))"""
clear_data.append([product_id, vendor, product_type, updated_at, created_at])
yield clear_data
class WriteCSVFile(beam.DoFn):
def __init__(self, bucket_name):
self.bucket_name = bucket_name
def start_bundle(self):
from google.cloud import storage
self.client = storage.Client()
def process(self, mylist):
df = pd.DataFrame(mylist, columns={'product_id': str, 'vendor': str, 'product_type': str, 'updated_at': str, 'created_at': str})
bucket = self.client.get_bucket(self.bucket_name)
bucket.blob(f"csv_exports.csv").upload_from_string(df.to_csv(index=False), 'text/csv')
class DataflowOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls,parser):
parser.add_argument('--input_path', type=str, default='gs://new-buclet-8022/input.json') #change bucket name here in the 'gs://...' format
parser.add_argument('--output_bucket', type=str, default='new-buclet-8022') #change bucket name here in similar format to <--
def run(argv=None):
parser = argparse.ArgumentParser()
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
dataflow_options = pipeline_options.view_as(DataflowOptions)
with beam.Pipeline(options=pipeline_options) as pipeline:
(pipeline
| 'Start' >> beam.Create([None])
| 'Read JSON' >> beam.ParDo(ReadFile(dataflow_options.input_path))
| 'Write CSV' >> beam.ParDo(WriteCSVFile(dataflow_options.output_bucket))
)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()