Sentiment Analysis with LSTM#
import numpy as np
Loading Data#
# Deep Learn with Python Version
## Loading data
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000 # vocab size
max_len = 500 # text length to consider
batch_size = 128
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=max_features)
print(train_data[0]) # first train doc
print(train_labels[0]) # first train label
[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32]
1
# check class
print(type(train_data))
print(type(train_labels))
# check dtype
print(train_data.dtype)
print(train_labels.dtype)
# check shape
print(train_data.shape)
print(test_data.shape)
<class 'numpy.ndarray'>
<class 'numpy.ndarray'>
object
int64
(25000,)
(25000,)
## vectorize labels
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
print(type(y_train))
print(y_train.dtype)
<class 'numpy.ndarray'>
float32
Padding Text Length#
## Padding Texts Lengths
train_data = sequence.pad_sequences(train_data, maxlen=max_len)
test_data = sequence.pad_sequences(test_data, maxlen=max_len)
print(train_data.shape)
print(test_data.shape)
(25000, 500)
(25000, 500)
Model Defining#
## Model Building
from keras.models import Sequential
from keras.layers import Dense, Embedding, Dropout, SpatialDropout1D
from keras.layers import LSTM
EMBEDDING_DIM = 128 # dimension for dense embeddings for each token
LSTM_DIM = 64 # total LSTM units
model = Sequential()
model.add(Embedding(input_dim=max_features, output_dim=EMBEDDING_DIM, input_length=max_len))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(LSTM_DIM, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss="binary_crossentropy", optimizer="adam",
metrics=["accuracy"])
Model Fitting#
# ## Model fitting
# history = model.fit(train_data, train_labels,
# epochs=10,
# batch_size= 128,
# validation_split=0.2)
## Comment out to skip
Epoch 1/10
157/157 [==============================] - 111s 709ms/step - loss: 0.4556 - accuracy: 0.7739 - val_loss: 0.3904 - val_accuracy: 0.8310
Epoch 2/10
157/157 [==============================] - 112s 714ms/step - loss: 0.2479 - accuracy: 0.9057 - val_loss: 0.2884 - val_accuracy: 0.8804
Epoch 3/10
157/157 [==============================] - 111s 709ms/step - loss: 0.2162 - accuracy: 0.9179 - val_loss: 0.3065 - val_accuracy: 0.8748
Epoch 4/10
157/157 [==============================] - 122s 777ms/step - loss: 0.1547 - accuracy: 0.9423 - val_loss: 0.3361 - val_accuracy: 0.8764
Epoch 5/10
157/157 [==============================] - 124s 791ms/step - loss: 0.1228 - accuracy: 0.9561 - val_loss: 0.3708 - val_accuracy: 0.8800
Epoch 6/10
157/157 [==============================] - 119s 759ms/step - loss: 0.0934 - accuracy: 0.9685 - val_loss: 0.3716 - val_accuracy: 0.8794
Epoch 7/10
157/157 [==============================] - 111s 710ms/step - loss: 0.0775 - accuracy: 0.9730 - val_loss: 0.4497 - val_accuracy: 0.8748
Epoch 8/10
157/157 [==============================] - 124s 788ms/step - loss: 0.0839 - accuracy: 0.9705 - val_loss: 0.4725 - val_accuracy: 0.8250
Epoch 9/10
157/157 [==============================] - 119s 757ms/step - loss: 0.0700 - accuracy: 0.9775 - val_loss: 0.4114 - val_accuracy: 0.8686
Epoch 10/10
157/157 [==============================] - 118s 752ms/step - loss: 0.1145 - accuracy: 0.9603 - val_loss: 0.4440 - val_accuracy: 0.8694
Model Evaluation#
## Plotting results
def plot(history):
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc)+1)
## Accuracy plot
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
## Loss plot
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# plot(history)
Model Saving#
# model.save('../data/sent-analysis-lstm-v1.h5')
import keras
model = keras.models.load_model('../data/sent-analysis-lstm-v1.h5')
Model Prediction#
## Prediction and model performance
pred_test = (model.predict(test_data) > 0.5).astype("int32")
print(pred_test[:10,:])
[[0]
[1]
[1]
[0]
[1]
[1]
[1]
[0]
[1]
[1]]
# functions from Text Analytics with Python book
def get_metrics(true_labels, predicted_labels):
print('Accuracy:', np.round(
metrics.accuracy_score(true_labels,
predicted_labels),
4))
print('Precision:', np.round(
metrics.precision_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('Recall:', np.round(
metrics.recall_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('F1 Score:', np.round(
metrics.f1_score(true_labels,
predicted_labels,
average='weighted'),
4))
def display_confusion_matrix(true_labels, predicted_labels, classes=[1,0]):
total_classes = len(classes)
level_labels = [total_classes*[0], list(range(total_classes))]
cm = metrics.confusion_matrix(y_true=true_labels, y_pred=predicted_labels,
labels=classes)
cm_frame = pd.DataFrame(data=cm,
columns=pd.MultiIndex(levels=[['Predicted:'], classes],
codes=level_labels),
index=pd.MultiIndex(levels=[['Actual:'], classes],
codes=level_labels))
print(cm_frame)
def display_classification_report(true_labels, predicted_labels, classes=[1,0]):
report = metrics.classification_report(y_true=true_labels,
y_pred=predicted_labels,
labels=classes)
print(report)
def display_model_performance_metrics(true_labels, predicted_labels, classes=[1,0]):
print('Model Performance metrics:')
print('-'*30)
get_metrics(true_labels=true_labels, predicted_labels=predicted_labels)
print('\nModel Classification report:')
print('-'*30)
display_classification_report(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
print('\nPrediction Confusion Matrix:')
print('-'*30)
display_confusion_matrix(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
from sklearn import metrics
import pandas as pd
display_model_performance_metrics(test_labels, pred_test.flatten(), classes=[0,1])
Model Performance metrics:
------------------------------
Accuracy: 0.8614
Precision: 0.8617
Recall: 0.8614
F1 Score: 0.8614
Model Classification report:
------------------------------
precision recall f1-score support
0 0.87 0.85 0.86 12500
1 0.85 0.87 0.86 12500
accuracy 0.86 25000
macro avg 0.86 0.86 0.86 25000
weighted avg 0.86 0.86 0.86 25000
Prediction Confusion Matrix:
------------------------------
Predicted:
0 1
Actual: 0 10615 1885
1 1579 10921