-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprojectkedua_rere.py
More file actions
162 lines (127 loc) · 4.73 KB
/
projectkedua_rere.py
File metadata and controls
162 lines (127 loc) · 4.73 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
# -*- coding: utf-8 -*-
"""projectkedua_rere.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1vEFgJknJ2k6kBTvmx48swmeSkbOCIds7
"""
#upload data
import pandas as pd
from google.colab import files
files.upload()
# load dataset
df = pd.read_csv('bbc-news-data.csv', sep='\t')
df.head(10)
# data columns
df.columns
# total data
df.shape
# data info
df.info()
# categories
df.category.value_counts()
# delete columns (unused column)
df_new = df.drop(columns=['filename'])
df_new
# import and download package
import nltk, os, re, string
from keras.layers import Input, LSTM, Bidirectional, SpatialDropout1D, Dropout, Flatten, Dense, Embedding, BatchNormalization
from keras.models import Model
from keras.callbacks import EarlyStopping
from keras.preprocessing.text import Tokenizer, text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
nltk.download('wordnet')
nltk.download('stopwords')
# lower-case all characters
df_new.title = df_new.title.apply(lambda x: x.lower())
df_new.content = df_new.content.apply(lambda x: x.lower())
# removing functuation
def cleaner(data):
return(data.translate(str.maketrans('','', string.punctuation)))
df_new.title = df_new.title.apply(lambda x: cleaner(x))
df_new.content = df_new.content.apply(lambda x: lem(x))
## lematization
lemmatizer = WordNetLemmatizer()
def lem(data):
pos_dict = {'N': wn.NOUN, 'V': wn.VERB, 'J': wn.ADJ, 'R': wn.ADV}
return(' '.join([lemmatizer.lemmatize(w,pos_dict.get(t, wn.NOUN)) for w,t in nltk.pos_tag(data.split())]))
df_new.title = df_new.title.apply(lambda x: lem(x))
df_new.content = df_new.content.apply(lambda x: lem(x))
# removing number
def rem_numbers(data):
return re.sub('[0-9]+','',data)
df_new['title'].apply(rem_numbers)
df_new['content'].apply(rem_numbers)
# removing stopword
st_words = stopwords.words()
def stopword(data):
return(' '.join([w for w in data.split() if w not in st_words ]))
df_new.title = df_new.title.apply(lambda x: stopword(x))
df_new.content = df_new.content.apply(lambda x: lem(x))
# view data after cleansing
df_new.head(10)
# data category one-hot-encoding
category = pd.get_dummies(df_new.category)
df_new_cat = pd.concat([df_new, category], axis=1)
df_new_cat = df_new_cat.drop(columns='category')
df_new_cat.head(10)
# change dataframe value to numpy array
news = df_new_cat['title'].values + '' + df_new_cat['content'].values
label = df_new_cat[['business', 'entertainment', 'politics', 'sport', 'tech']].values
# view news array
news
# view label array
label
# Split data into training and validation
from sklearn.model_selection import train_test_split
news_train, news_test, label_train, label_test = train_test_split(news, label, test_size=0.2, shuffle=True)
# tokenizer
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
tokenizer = Tokenizer(num_words=5000, oov_token='x', filters='!"#$%&()*+,-./:;<=>@[\]^_`{|}~ ')
tokenizer.fit_on_texts(news_train)
tokenizer.fit_on_texts(news_test)
sekuens_train = tokenizer.texts_to_sequences(news_train)
sekuens_test = tokenizer.texts_to_sequences(news_test)
padded_train = pad_sequences(sekuens_train)
padded_test = pad_sequences(sekuens_test)
# model
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Embedding(input_dim=5000, output_dim=64),
tf.keras.layers.LSTM(128),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(5, activation='softmax')
])
model.compile(optimizer='adam', metrics=['accuracy'], loss='categorical_crossentropy',)
model.summary()
# callback
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.9 and logs.get('val_accuracy')>0.9):
self.model.stop_training = True
print("\nThe accuracy of the training set and the validation set has reached > 90%!")
callbacks = myCallback()
# model fit
history = model.fit(padded_train, label_train, epochs=50,
validation_data=(padded_test, label_test), verbose=2, callbacks=[callbacks], validation_steps=30)
# plot of accuracy
import matplotlib.pyplot as plt
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# plot of loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()