forked from Nikhil-Jones/Fake-News-Detector
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrie.py
More file actions
260 lines (209 loc) · 10.6 KB
/
trie.py
File metadata and controls
260 lines (209 loc) · 10.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
from config import SUSPICIOUS_KEYWORDS
import re
class TrieNode:
def __init__(self):
self.children = {}
self.is_end_of_word = False
self.word = ""
self.severity = "medium"
self.category = "general"
class EnhancedTrie:
def __init__(self):
self.root = TrieNode()
self._build_enhanced_trie()
def _build_enhanced_trie(self):
keyword_categories = {
"medical_critical": [
"chocolate cures cancer", "soda improves lifespan", "miracle cure",
"cure cancer", "cures cancer", "instant cure", "natural cure",
"doctors hate", "big pharma suppress", "forbidden cure"
],
"absurd_critical": [
"aliens landed", "ufo landed", "time travel discovered",
"teleportation invented", "immortality found", "never die",
"live forever", "psychic powers", "supernatural abilities"
],
"dangerous_high": [
"drink 10 liters", "ignore doctors", "replace medicine",
"no side effects guaranteed", "overdose safe", "mega dose safe"
],
"conspiracy_medium": [
"cover up", "secret government", "hidden truth", "deep state",
"they don't want you to know", "mainstream media lies"
],
"clickbait_low": [
"you won't believe", "shocking revelation", "doctors are furious",
"this one trick", "what happens next will shock you"
]
}
for category, keywords in keyword_categories.items():
severity = category.split('_')[1] if '_' in category else "medium"
cat_type = category.split('_')[0] if '_' in category else "general"
for keyword in keywords:
self.insert(keyword.lower(), severity, cat_type)
for keyword in SUSPICIOUS_KEYWORDS:
if not self.search(keyword.lower()):
self.insert(keyword.lower(), "medium", "general")
legitimate_phrases = [
"peer-reviewed study", "published in journal", "according to official sources",
"confirmed by", "government report", "weather forecast", "researchers suggest"
]
for phrase in legitimate_phrases:
self.insert(phrase.lower(), "low", "legitimate")
def insert(self, word, severity="medium", category="general"): # O(L) where L is Length of the word/phrase.
node = self.root
for char in word:
if char not in node.children:
node.children[char] = TrieNode()
node = node.children[char]
node.is_end_of_word = True
node.word = word
node.severity = severity
node.category = category
def search(self, word): # O(L) where L is Length of the word/phrase.
node = self.root
for char in word.lower():
if char not in node.children:
return False
node = node.children[char]
return node.is_end_of_word
def find_suspicious_keywords(self, text): # O(T*P*L) Where T is number of word in text , P is max phrase length in words, L is avg keyword length
found_keywords = {}
text_lower = text.lower()
words = text_lower.split()
for i, word in enumerate(words):
node = self._find_word_node(word)
if node and node.is_end_of_word:
if word not in found_keywords:
found_keywords[word] = {
"positions": [],
"severity": node.severity,
"category": node.category
}
found_keywords[word]["positions"].append(i)
for phrase_length in range(2, 6):
for i in range(len(words) - phrase_length + 1):
phrase = " ".join(words[i:i+phrase_length])
node = self._find_word_node(phrase)
if node and node.is_end_of_word:
if phrase not in found_keywords:
found_keywords[phrase] = {
"positions": [],
"severity": node.severity,
"category": node.category
}
found_keywords[phrase]["positions"].append(i)
enhanced_patterns = self._find_pattern_matches(text_lower)
found_keywords.update(enhanced_patterns)
return found_keywords
def _find_word_node(self, word): # O(L) where L is Length of the word/phrase.
node = self.root
for char in word:
if char not in node.children:
return None
node = node.children[char]
return node
def _find_pattern_matches(self, text): # O(P*T) where P is the number of regex patterns, T is the length of text
pattern_matches = {}
medical_patterns = [
(r"(chocolate|candy|sugar)\s+(cure|heal|treat|prevent)s?\s+(cancer|diabetes|aids)", "critical", "medical"),
(r"(soda|cola)\s+.*(improve|extend|increase)\s+(lifespan|life|health)", "critical", "medical"),
(r"drinking\s+\d+\s+liters?\s+.*daily", "critical", "dangerous"),
(r"doctors?\s+(hate|suppress|hide)\s+this", "high", "medical")
]
absurd_patterns = [
(r"aliens?\s+(landed|visited|came)\s+(yesterday|today|in)", "critical", "absurd"),
(r"time\s+travel\s+(discovered|invented|possible)", "critical", "absurd"),
(r"(immortal|never\s+die|live\s+forever)", "critical", "absurd"),
(r"(teleport|psychic|supernatural)\w*", "high", "absurd")
]
all_patterns = medical_patterns + absurd_patterns
for pattern, severity, category in all_patterns:
matches = re.finditer(pattern, text, re.IGNORECASE)
for match in matches:
matched_text = match.group(0).lower()
if matched_text not in pattern_matches:
pattern_matches[matched_text] = {
"positions": [],
"severity": severity,
"category": category
}
pattern_matches[matched_text]["positions"].append(match.start())
return pattern_matches
def calculate_suspicion_score(self, text): # O(T * P * L) where T is the number of words in the text, P is the max phrase length in words, L is avg keyword length.
found_keywords = self.find_suspicious_keywords(text)
if not found_keywords:
return 0.0
total_score = 0.0
severity_weights = {
"critical": 1.0,
"high": 0.7,
"medium": 0.5,
"low": 0.3
}
category_multipliers = {
"medical": 1.5,
"absurd": 1.4,
"dangerous": 1.3,
"conspiracy": 1.1,
"general": 1.0
}
for keyword, data in found_keywords.items():
severity = data.get("severity", "medium")
category = data.get("category", "general")
positions = data.get("positions", [])
base_score = severity_weights.get(severity, 0.5)
multiplier = category_multipliers.get(category, 1.0)
frequency_bonus = min(len(positions) * 0.1, 0.3)
keyword_score = (base_score * multiplier) + frequency_bonus
total_score += keyword_score
normalized_score = min(total_score / 3.0, 1.0)
return normalized_score
def get_keyword_explanations(self, text): # O(T * P * L + K) where T is the number of words in text, P is the max phrase length in words, L is avg keyword length, K is the number of found keywords.
found_keywords = self.find_suspicious_keywords(text)
explanations = []
by_severity = {"critical": [], "high": [], "medium": [], "low": []}
for keyword, data in found_keywords.items():
severity = data.get("severity", "medium")
category = data.get("category", "general")
count = len(data.get("positions", []))
by_severity[severity].append({
"keyword": keyword,
"category": category,
"count": count
})
if by_severity["critical"]:
critical_keywords = [item["keyword"] for item in by_severity["critical"][:3]]
explanations.append(f"CRITICAL: Found dangerous content: {', '.join(critical_keywords)}")
if by_severity["high"]:
high_keywords = [item["keyword"] for item in by_severity["high"][:3]]
explanations.append(f"HIGH RISK: Suspicious patterns detected: {', '.join(high_keywords)}")
if by_severity["medium"]:
medium_keywords = [item["keyword"] for item in by_severity["medium"][:2]]
explanations.append(f"Found suspicious keywords: {', '.join(medium_keywords)}")
if by_severity["low"]:
low_keywords = [item["keyword"] for item in by_severity["low"][:2]]
explanations.append(f"Clickbait indicators found: {', '.join(low_keywords)}")
return explanations
if __name__ == "__main__":
trie = EnhancedTrie()
test_cases = [
"Scientists have confirmed that chocolate cures cancer in just one week.",
"Aliens landed in Times Square yesterday and gave out free iPhones.",
"Drinking 10 liters of soda daily will improve your lifespan by 20 years.",
"Breaking news: Miracle cure discovered! Doctors hate this one weird trick.",
"Time travel has been invented by secret government scientists."
]
print("Enhanced Trie Test Results:")
print("=" * 60)
for i, text in enumerate(test_cases, 1):
print(f"\n{i}. Text: {text}")
found = trie.find_suspicious_keywords(text)
score = trie.calculate_suspicion_score(text)
explanations = trie.get_keyword_explanations(text)
print(f" Suspicion Score: {score:.2f}")
print(f" Found Keywords: {list(found.keys())}")
print(f" Explanations:")
for exp in explanations:
print(f" - {exp}")
print("-" * 60)