-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun_pipeline.py
More file actions
169 lines (134 loc) · 5.34 KB
/
run_pipeline.py
File metadata and controls
169 lines (134 loc) · 5.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
#!/usr/bin/env python
import os
import sys
import argparse
import subprocess
import json
from pathlib import Path
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='Run the RepuSense NLP pipeline')
# Main options
parser.add_argument('--company', type=str, help='Company name to analyze')
parser.add_argument('--start-date', type=str, help='Start date (YYYY-MM-DD)')
parser.add_argument('--end-date', type=str, help='End date (YYYY-MM-DD)')
# Use existing data
parser.add_argument('--use-existing', action='store_true',
help='Use existing data instead of fetching new data')
parser.add_argument('--existing-file', type=str,
help='Path to existing data file')
# Skip steps
parser.add_argument('--skip-topic', action='store_true',
help='Skip topic modeling')
parser.add_argument('--skip-sentiment', action='store_true',
help='Skip sentiment analysis')
parser.add_argument('--skip-keyword', action='store_true',
help='Skip keyword extraction')
# Initialize data directory structure
parser.add_argument('--init-structure', action='store_true',
help='Initialize the data directory structure')
return parser.parse_args()
# Load configuration from config.json
def load_config():
config_path = Path(__file__).parent / "config.json"
if config_path.exists():
try:
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
print(f"Loaded configuration from {config_path}")
return config
except Exception as e:
print(f"Error loading configuration: {str(e)}")
print("No configuration file found, using defaults")
return {}
def init_directory_structure():
"""Initialize the data directory structure based on config."""
config = load_config()
base_dir = Path(__file__).parent
# Get directory names from config
data_dir_name = config.get('pipeline', {}).get('data_dir', "data")
data_storage_dir = config.get('pipeline', {}).get('data_storage_dir', "data_storage")
processed_data_dir = config.get('pipeline', {}).get('processed_data_dir', "processed_data")
nlp_results_dir = config.get('pipeline', {}).get('nlp_results_dir', "nlp_results")
# Create the main data directory
data_dir = base_dir / data_dir_name
os.makedirs(data_dir, exist_ok=True)
# Create subdirectories
directories = [
data_dir / data_storage_dir,
data_dir / processed_data_dir,
data_dir / nlp_results_dir
]
# Create each directory
for directory in directories:
os.makedirs(directory, exist_ok=True)
print(f"Created directory: {directory}")
print("Data directory structure initialized successfully.")
def run_pipeline(args):
"""Run the NLP pipeline with the specified options."""
print("Running NLP pipeline...")
# Load configuration
config = load_config()
# Construct the command
cmd = [
"python", "-m", "nlp_pipeline.main",
"--company", args.company,
"--output-dir", str(Path(__file__).parent / "data")
]
# Add date range if provided
if args.start_date:
cmd.extend(["--start-date", args.start_date])
if args.end_date:
cmd.extend(["--end-date", args.end_date])
# Add options
if args.use_existing:
cmd.append("--use-existing")
if args.existing_file:
cmd.extend(["--existing-file", args.existing_file])
elif args.company:
# Try to find company-specific data file
default_file = f"scrapping script/reddit_nlp_{args.company}_2024-01-01_2025-12-31.json"
if os.path.exists(default_file):
cmd.extend(["--existing-file", default_file])
else:
# Default to the inwi example if no company specified
cmd.extend(["--existing-file", "scrapping script/reddit_nlp_inwi_2024-01-01_2025-12-31.json"])
if args.skip_topic:
cmd.append("--skip-topic")
if args.skip_sentiment:
cmd.append("--skip-sentiment")
if args.skip_keyword:
cmd.append("--skip-keyword")
# Run the pipeline
print(f"Executing command: {' '.join(cmd)}")
result = subprocess.run(cmd)
if result.returncode != 0:
print("Pipeline execution failed!")
sys.exit(1)
print("Pipeline execution completed successfully!")
def run_api():
"""Run the API server."""
print("Starting API server...")
# Load configuration
config = load_config()
api_port = config.get('api', {}).get('port', 8000)
api_host = config.get('api', {}).get('host', '0.0.0.0')
cmd = [
sys.executable, "-m", "uvicorn",
"nlp_pipeline.api.main:app",
"--host", api_host,
"--port", str(api_port)
]
print(f"Executing command: {' '.join(cmd)}")
subprocess.run(cmd)
def main():
"""Main entry point."""
args = parse_args()
# Initialize directory structure if requested
if args.init_structure:
init_directory_structure()
return
# Run the pipeline
run_pipeline(args)
if __name__ == "__main__":
main()