Important Point
import numpy as np
from keras.models import Sequential
from keras.layers import Dense,Dropout,Activation,Flatten
from keras.layers import Convolution2D,MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
from matplotlib import pyplot as plt
import keras.backend as K
import pandas as pd
from sklearn.cross_validation import train_test_split
#import keras.backend as K
def f1_score(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0,1)))
predicted_p = K.sum(K.round(K.clip(y_pred,0,1)))
possible_p = K.sum(K.round(K.clip(y_true,0,1)))
p = tp/(predicted_p + K.epsilon())
r = tp/(possible_p + K.epsilon())
beta = 1
bb = beta**2
fbeta_score = (1 + bb) * (p * r)/(bb * p + r + K.epsilon())
return fbeta_score
def prec(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0,1)))
predicted_p = K.sum(K.round(K.clip(y_pred,0,1)))
possible_p = K.sum(K.round(K.clip(y_true,0,1)))
p = tp/(predicted_p + K.epsilon())
return p
def recall(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0,1)))
predicted_p = K.sum(K.round(K.clip(y_pred,0,1)))
possible_p = K.sum(K.round(K.clip(y_true,0,1)))
p = tp/(predicted_p + K.epsilon())
r = tp/(possible_p + K.epsilon())
return r
def one_hot_encode_object_array(arr):
'''One hot encode a numpy array of objects (e.g. strings)'''
uniques, ids = np.unique(arr, return_inverse=True)
return np_utils.to_categorical(ids, len(uniques))
seed = 7
np.random.seed(seed)
dataframe = pd.read_csv("iris.csv",header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
train_X, test_X, train_y, test_y = train_test_split(X, Y, train_size=0.5, random_state=1)
#print X
Y_train = one_hot_encode_object_array(train_y)
Y_test = one_hot_encode_object_array(test_y)
#print Y_test
model = Sequential()
model.add(Dense(16, input_shape=(4,)))
model.add(Activation('sigmoid'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy',f1_score,prec, recall])
model.fit(train_X,Y_train, verbose=1, batch_size=1, nb_epoch=100)
score = model.evaluate(test_X, Y_test, verbose =0)
print "score is "
print score
-----------------------------------------
we got accuracy of 100%
References
Tutorial link
Code improvement link
we need to convert classes that look like
setosa
versicolor
setosa
virginica
...
to a table that looks like
setosa versicolor virginica
1 0 0
0 1 0
1 0 0
0 0 1
from keras.models import Sequential
from keras.layers import Dense,Dropout,Activation,Flatten
from keras.layers import Convolution2D,MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
from matplotlib import pyplot as plt
import keras.backend as K
import pandas as pd
from sklearn.cross_validation import train_test_split
#import keras.backend as K
def f1_score(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0,1)))
predicted_p = K.sum(K.round(K.clip(y_pred,0,1)))
possible_p = K.sum(K.round(K.clip(y_true,0,1)))
p = tp/(predicted_p + K.epsilon())
r = tp/(possible_p + K.epsilon())
beta = 1
bb = beta**2
fbeta_score = (1 + bb) * (p * r)/(bb * p + r + K.epsilon())
return fbeta_score
def prec(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0,1)))
predicted_p = K.sum(K.round(K.clip(y_pred,0,1)))
possible_p = K.sum(K.round(K.clip(y_true,0,1)))
p = tp/(predicted_p + K.epsilon())
return p
def recall(y_true, y_pred):
tp = K.sum(K.round(K.clip(y_true * y_pred, 0,1)))
predicted_p = K.sum(K.round(K.clip(y_pred,0,1)))
possible_p = K.sum(K.round(K.clip(y_true,0,1)))
p = tp/(predicted_p + K.epsilon())
r = tp/(possible_p + K.epsilon())
return r
def one_hot_encode_object_array(arr):
'''One hot encode a numpy array of objects (e.g. strings)'''
uniques, ids = np.unique(arr, return_inverse=True)
return np_utils.to_categorical(ids, len(uniques))
seed = 7
np.random.seed(seed)
dataframe = pd.read_csv("iris.csv",header=None)
dataset = dataframe.values
X = dataset[:,0:4].astype(float)
Y = dataset[:,4]
train_X, test_X, train_y, test_y = train_test_split(X, Y, train_size=0.5, random_state=1)
#print X
Y_train = one_hot_encode_object_array(train_y)
Y_test = one_hot_encode_object_array(test_y)
#print Y_test
model = Sequential()
model.add(Dense(16, input_shape=(4,)))
model.add(Activation('sigmoid'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy',f1_score,prec, recall])
model.fit(train_X,Y_train, verbose=1, batch_size=1, nb_epoch=100)
score = model.evaluate(test_X, Y_test, verbose =0)
print "score is "
print score
-----------------------------------------
we got accuracy of 100%
References
Tutorial link
Code improvement link
No comments:
Post a Comment