FCAQI Source Code


### Deep Learning Libraries


from PIL import Image
import io
import os
import tensorflow as tf
from sklearn import cross_validation, metrics
from train_dataset import read_dataset, classes, read_prediction_images, test_demo
import numpy as np
import sys
import base64, datetime
import xml


#Setting Batch Size for Model Training
batch_size=25
x_train=[]
#Experiment Analysis
#----------------------------------------------------------- **************************-------------------------------------
def plot_images(expected,predicted):
    print("confusion Matrix:\n ",metrics.confusion_matrix(expected,predicted))



def save_XMLRESULT(pred_cls):
    for p in pred_cls:
        pass


#***************************************************************************************************************************

#Deep Convolutional Neural Network Model (VGG-16)

def cnn_model_fn(features,labels,mode):

    
    input_layer=tf.reshape(features["x"],[batch_size,224,224,3])

     #Layer 1
    conv1=tf.layers.conv2d(input_layer,filters=64,kernel_size=[3,3],padding="same",activation=tf.nn.relu)
    conv1=tf.layers.conv2d(conv1,filters=64,kernel_size=[3,3],padding="same",activation=tf.nn.relu)
    pool1=tf.layers.max_pooling2d(inputs=conv1,pool_size=[2,2],strides=2)
    


     #Layer 2
    conv2=tf.layers.conv2d(inputs=pool1,filters=128,kernel_size=[3,3],padding="same",activation=tf.nn.relu)
    conv2=tf.layers.conv2d(inputs=conv2,filters=128,kernel_size=[3,3],padding="same",activation=tf.nn.relu)
    pool2=tf.layers.max_pooling2d(inputs=conv2,pool_size=[2,2],strides=2)
    

     #Layer 3
    conv3=tf.layers.conv2d(inputs=pool2,filters=256,kernel_size=[3,3],padding="same",activation=tf.nn.relu)
    conv3=tf.layers.conv2d(inputs=conv3,filters=256,kernel_size=[3,3],padding="same",activation=tf.nn.relu)
    conv3=tf.layers.conv2d(inputs=conv3,filters=256,kernel_size=[3,3],padding="same",activation=tf.nn.relu)
    pool3=tf.layers.max_pooling2d(inputs=conv3,pool_size=[2,2],strides=2)
    

     #Layer 4
    conv4=tf.layers.conv2d(inputs=pool3,filters=512,kernel_size=[3,3],strides=1,padding="same",activation=tf.nn.relu)
    conv4=tf.layers.conv2d(inputs=pool3,filters=512,kernel_size=[3,3],strides=1,padding="same",activation=tf.nn.relu)
    conv4=tf.layers.conv2d(inputs=pool3,filters=512,kernel_size=[3,3],strides=1,padding="same",activation=tf.nn.relu)
    pool4=tf.layers.max_pooling2d(inputs=conv4,pool_size=[2,2],strides=2)
    

    #Layer 5
    conv5=tf.layers.conv2d(inputs=pool4,filters=512,kernel_size=[3,3],strides=1,padding="same",activation=tf.nn.relu)
    conv5=tf.layers.conv2d(inputs=pool4,filters=512,kernel_size=[3,3],strides=1,padding="same",activation=tf.nn.relu)
    conv5=tf.layers.conv2d(inputs=pool4,filters=512,kernel_size=[3,3],strides=1,padding="same",activation=tf.nn.relu)
    pool5=tf.layers.max_pooling2d(inputs=conv5,pool_size=[2,2],strides=2)
    

    #Dense Layer

    pool2_flat=tf.reshape(pool5,[batch_size,7*7*512])
    dense=tf.layers.dense(pool2_flat,units=4096,activation=tf.nn.relu)
    dense=tf.layers.dense(pool2_flat,units=4096,activation=tf.nn.relu)
    dense=tf.layers.dense(pool2_flat,units=1000,activation=tf.nn.relu)
    drop_out=tf.layers.dropout(dense,rate=0.2,training=mode==tf.estimator.ModeKeys.TRAIN)

   # logits Layer
   
    logits=tf.layers.dense(drop_out,units=5)
    
    

    prediction={
        "classes":tf.arg_max(input=logits,dimension=1),
        "probabilities":tf.nn.softmax(logits,name='softmax_tensor')
        }

    if mode==tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode,predictions=prediction)

   # Calculate the loss for both train and Eval
    #onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=11)
    loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
    
    
    
  #  Configure the Training for Train Mode
    if mode==tf.estimator.ModeKeys.TRAIN:
        optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.001)
        train_op= optimizer.minimize(loss=loss,global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=train_op)
    
    #Add Eval Metrics for (Eval Op)
    eval_metric_op={
        "accuracy":tf.metrics.accuracy(labels=tf.arg_max(labels,dimension=1),predictions=prediction['classes'])
        }
    return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_op)
##--------------------------------------- Setup and Run Code ---------------------------------------

#Model Setup


def run_model(mode,img):
    global batch_size
    global x_train
    with tf.Session() as session:
        init=tf.initializers.global_variables()
        session.run(init)
        fcaqi_classifier=tf.estimator.Estimator(model_fn=cnn_model_fn,model_dir="Path where You want to save Model Check Points")
    
        #Logging Hook
        tensors_to_log={"probabilities":"softmax_tensor"}
        logging_hook=tf.train.LoggingTensorHook(tensors=tensors_to_log,every_n_iter=50)
  
        if(mode=='training' or mode=='testing'):
            X,Y=read_dataset(DataSetPath)
            batch_size=25
            x_train,y_train,x_test,y_test=cross_validation.train_test_split(X,Y,test_size=0.25,random_state=42)
            if(mode=='training'):
            #Train the VCS Classifier
                train_input_fn=tf.estimator.inputs.numpy_input_fn(
                     x={"x": x_train},
                    y=x_test,
                    batch_size=batch_size,
                    num_epochs=None,
                    shuffle=True)
                fcaqi_classifier.train(input_fn=train_input_fn,steps=2000,hooks=[logging_hook])
            elif(mode=='testing'):
            #Evaluation the VCS Classifier
                eval_input_fn=tf.estimator.inputs.numpy_input_fn(
                    x={"x":y_train},
                    y=y_test,
                    batch_size=batch_size,
                    num_epochs=10,
                    shuffle=False
                    )
                eval_results=fcaqi_classifier.evaluate(input_fn=eval_input_fn)
                print(eval_results)
        elif(mode=='prediction'):
            batch_size=1
          
            predict=tf.estimator.inputs.numpy_input_fn(
                x={"x":img},
                y=None,
                num_epochs=1,
                batch_size=batch_size,
                shuffle=False,
                )
            predictions=fcaqi_classifier.predict(input_fn=predict)
            pred_cls=list(predictions)
            print(pred_cls)
            




#---------------------------------------------------------------------------------------------------
#Model Setup Ending
#Executer Model For Three Different Modes (Training, Evaluation, Prediction)
def main():
    #run_model("testing",None)
      #image=test_demo(sys.argv[1])
      #sys.argv[1]=None
      image=test_demo(PredictionImagePath)
      run_model(mode='prediction',img=image)
      #if(image is not None):
      #    return run_model('prediction',image)
    
            
if __name__ == "__main__":
    sys.exit(int(main() or 0))

Back To Homepage

Comments