Loss value constant and Accuracy fluctuating between 0, 0.5 and 1












0














I am beginner to Tensorflow, and have written the following code for classification of Cats and Dogs using the Kaggle dataset. I have tinkered with the code used on IBM's edX course, and tried to create it for the Cats and Dogs dataset image classifier.



import csv
import re
import matplotlib.pyplot as plt
import datetime
import tensorflow as tf
import numpy as np
import random as rd
import os
from PIL import Image
import random


def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert inputs.shape[0] == targets.shape[0]
if shuffle:
indices = np.arange(inputs.shape[0])
np.random.shuffle(indices)
for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]



os.chdir("C:\Users\dell\Documents\PetImages")
lst = os.listdir()
train_cat =
train_dog =
train_x =
train_y =
count = 0
for anim in lst:
os.chdir(anim)
for img in os.listdir():
image = Image.open(img).convert('L')
width, height = image.size
scale_factor = max(width, height)/100
image =image.resize((int(width/scale_factor), int(height/scale_factor)))
imgarr = np.asarray(image)
zero_array = np.zeros((100,100))
zero_array[:imgarr.shape[0], :imgarr.shape[1]] = imgarr
imgarr = zero_array

#imgarr = tf.keras.utils.normalize(imgarr, axis = 1, order=2)
imgarr = (imgarr)/255.0
if anim == "Cat":
imgarr = imgarr.flatten()
train_cat.append(imgarr)
else :
imgarr = imgarr.flatten()
train_dog.append(imgarr)
#imgplot = plt.imshow(imgarr)
#print(imgarr)
#plt.show()
count+=1
if count == 500:
count = 0
print()
break
print(str(count)+" Files Read.", sep=' ', end='r', flush=True)
os.chdir('../')
print(len(train_cat))

label_cat = [[1,0] for i in train_cat]
label_dog = [[0,1] for i in train_dog]

train_x.extend(train_cat)
train_x.extend(train_dog)
train_y.extend(label_cat)
train_y.extend(label_dog)
print(len(train_x))
width = 100
height = 100
flat = width * height
class_output = 2
x = tf.placeholder(tf.float32, shape=[None, flat])
y_ = tf.placeholder(tf.float32, shape=[None, class_output])

x_image = tf.reshape(x, [-1,100,100,1])
print(x_image)

W_conv1 = tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))

convolve1 = tf.nn.conv2d(x_image, W_conv1, strides=[1,1,1,1], padding="SAME") + b_conv1
h_conv1 = tf.nn.relu(convolve1)
conv1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
print(conv1)

W_conv2 = tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))

convolve2 = tf.nn.conv2d(conv1, W_conv2, strides=[1,1,1,1], padding="SAME") + b_conv2
h_conv2 = tf.nn.relu(convolve2)
conv2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
print(conv2)

layer2_matrix = tf.reshape(conv2, [-1, conv2.shape[1] * conv2.shape[2] * 64])
W_fc1 = tf.Variable(tf.truncated_normal([int(layer2_matrix.shape[1]), 1024], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))

fc1 = tf.matmul(layer2_matrix, W_fc1)+ b_fc1
h_fc1 = tf.nn.relu(fc1)
print(h_fc1)


keep_prob = tf.placeholder(tf.float32)
layer_drop = tf.nn.dropout(h_fc1, keep_prob)
print(layer_drop)


W_fc2 = tf.Variable(tf.truncated_normal([1024, class_output], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[class_output]))

fc2 = tf.matmul(layer_drop, W_fc2)+ b_fc2
h_fc2 = tf.nn.relu(fc2)
print(h_fc2)


y_CNN = tf.nn.softmax(h_fc2)
print(y_CNN)


print(train_y)

print(y_*tf.log(y_CNN))

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_CNN)))

train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_CNN, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

d = list(zip(train_x,train_y))
random.shuffle(d)
train_x, train_y = zip(*d)

print(train_y)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

n_epochs = 20


for n in range(2,1000):
batch = (train_x[n-2:n], train_y[n-2:n])
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
loss = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g, loss %g"%(n, float(train_accuracy), float(loss)))
train_step.run(feed_dict={x:batch[0], y_:batch[1], keep_prob: 0.5})


The problem is that when I run the program, The loss first increases and then decreases to get to a constant value. While the accuracy only fluctuates between three values, 0, 0.5 and 1. Below is the image for same.



The fluctuating accuracy and constant loss



Can anyone please help me with this?










share|improve this question





























    0














    I am beginner to Tensorflow, and have written the following code for classification of Cats and Dogs using the Kaggle dataset. I have tinkered with the code used on IBM's edX course, and tried to create it for the Cats and Dogs dataset image classifier.



    import csv
    import re
    import matplotlib.pyplot as plt
    import datetime
    import tensorflow as tf
    import numpy as np
    import random as rd
    import os
    from PIL import Image
    import random


    def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
    assert inputs.shape[0] == targets.shape[0]
    if shuffle:
    indices = np.arange(inputs.shape[0])
    np.random.shuffle(indices)
    for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
    if shuffle:
    excerpt = indices[start_idx:start_idx + batchsize]
    else:
    excerpt = slice(start_idx, start_idx + batchsize)
    yield inputs[excerpt], targets[excerpt]



    os.chdir("C:\Users\dell\Documents\PetImages")
    lst = os.listdir()
    train_cat =
    train_dog =
    train_x =
    train_y =
    count = 0
    for anim in lst:
    os.chdir(anim)
    for img in os.listdir():
    image = Image.open(img).convert('L')
    width, height = image.size
    scale_factor = max(width, height)/100
    image =image.resize((int(width/scale_factor), int(height/scale_factor)))
    imgarr = np.asarray(image)
    zero_array = np.zeros((100,100))
    zero_array[:imgarr.shape[0], :imgarr.shape[1]] = imgarr
    imgarr = zero_array

    #imgarr = tf.keras.utils.normalize(imgarr, axis = 1, order=2)
    imgarr = (imgarr)/255.0
    if anim == "Cat":
    imgarr = imgarr.flatten()
    train_cat.append(imgarr)
    else :
    imgarr = imgarr.flatten()
    train_dog.append(imgarr)
    #imgplot = plt.imshow(imgarr)
    #print(imgarr)
    #plt.show()
    count+=1
    if count == 500:
    count = 0
    print()
    break
    print(str(count)+" Files Read.", sep=' ', end='r', flush=True)
    os.chdir('../')
    print(len(train_cat))

    label_cat = [[1,0] for i in train_cat]
    label_dog = [[0,1] for i in train_dog]

    train_x.extend(train_cat)
    train_x.extend(train_dog)
    train_y.extend(label_cat)
    train_y.extend(label_dog)
    print(len(train_x))
    width = 100
    height = 100
    flat = width * height
    class_output = 2
    x = tf.placeholder(tf.float32, shape=[None, flat])
    y_ = tf.placeholder(tf.float32, shape=[None, class_output])

    x_image = tf.reshape(x, [-1,100,100,1])
    print(x_image)

    W_conv1 = tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1))
    b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))

    convolve1 = tf.nn.conv2d(x_image, W_conv1, strides=[1,1,1,1], padding="SAME") + b_conv1
    h_conv1 = tf.nn.relu(convolve1)
    conv1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
    print(conv1)

    W_conv2 = tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1))
    b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))

    convolve2 = tf.nn.conv2d(conv1, W_conv2, strides=[1,1,1,1], padding="SAME") + b_conv2
    h_conv2 = tf.nn.relu(convolve2)
    conv2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
    print(conv2)

    layer2_matrix = tf.reshape(conv2, [-1, conv2.shape[1] * conv2.shape[2] * 64])
    W_fc1 = tf.Variable(tf.truncated_normal([int(layer2_matrix.shape[1]), 1024], stddev=0.1))
    b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))

    fc1 = tf.matmul(layer2_matrix, W_fc1)+ b_fc1
    h_fc1 = tf.nn.relu(fc1)
    print(h_fc1)


    keep_prob = tf.placeholder(tf.float32)
    layer_drop = tf.nn.dropout(h_fc1, keep_prob)
    print(layer_drop)


    W_fc2 = tf.Variable(tf.truncated_normal([1024, class_output], stddev=0.1))
    b_fc2 = tf.Variable(tf.constant(0.1, shape=[class_output]))

    fc2 = tf.matmul(layer_drop, W_fc2)+ b_fc2
    h_fc2 = tf.nn.relu(fc2)
    print(h_fc2)


    y_CNN = tf.nn.softmax(h_fc2)
    print(y_CNN)


    print(train_y)

    print(y_*tf.log(y_CNN))

    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_CNN)))

    train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_CNN, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    d = list(zip(train_x,train_y))
    random.shuffle(d)
    train_x, train_y = zip(*d)

    print(train_y)
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    n_epochs = 20


    for n in range(2,1000):
    batch = (train_x[n-2:n], train_y[n-2:n])
    train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
    loss = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
    print("step %d, training accuracy %g, loss %g"%(n, float(train_accuracy), float(loss)))
    train_step.run(feed_dict={x:batch[0], y_:batch[1], keep_prob: 0.5})


    The problem is that when I run the program, The loss first increases and then decreases to get to a constant value. While the accuracy only fluctuates between three values, 0, 0.5 and 1. Below is the image for same.



    The fluctuating accuracy and constant loss



    Can anyone please help me with this?










    share|improve this question



























      0












      0








      0







      I am beginner to Tensorflow, and have written the following code for classification of Cats and Dogs using the Kaggle dataset. I have tinkered with the code used on IBM's edX course, and tried to create it for the Cats and Dogs dataset image classifier.



      import csv
      import re
      import matplotlib.pyplot as plt
      import datetime
      import tensorflow as tf
      import numpy as np
      import random as rd
      import os
      from PIL import Image
      import random


      def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
      assert inputs.shape[0] == targets.shape[0]
      if shuffle:
      indices = np.arange(inputs.shape[0])
      np.random.shuffle(indices)
      for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
      if shuffle:
      excerpt = indices[start_idx:start_idx + batchsize]
      else:
      excerpt = slice(start_idx, start_idx + batchsize)
      yield inputs[excerpt], targets[excerpt]



      os.chdir("C:\Users\dell\Documents\PetImages")
      lst = os.listdir()
      train_cat =
      train_dog =
      train_x =
      train_y =
      count = 0
      for anim in lst:
      os.chdir(anim)
      for img in os.listdir():
      image = Image.open(img).convert('L')
      width, height = image.size
      scale_factor = max(width, height)/100
      image =image.resize((int(width/scale_factor), int(height/scale_factor)))
      imgarr = np.asarray(image)
      zero_array = np.zeros((100,100))
      zero_array[:imgarr.shape[0], :imgarr.shape[1]] = imgarr
      imgarr = zero_array

      #imgarr = tf.keras.utils.normalize(imgarr, axis = 1, order=2)
      imgarr = (imgarr)/255.0
      if anim == "Cat":
      imgarr = imgarr.flatten()
      train_cat.append(imgarr)
      else :
      imgarr = imgarr.flatten()
      train_dog.append(imgarr)
      #imgplot = plt.imshow(imgarr)
      #print(imgarr)
      #plt.show()
      count+=1
      if count == 500:
      count = 0
      print()
      break
      print(str(count)+" Files Read.", sep=' ', end='r', flush=True)
      os.chdir('../')
      print(len(train_cat))

      label_cat = [[1,0] for i in train_cat]
      label_dog = [[0,1] for i in train_dog]

      train_x.extend(train_cat)
      train_x.extend(train_dog)
      train_y.extend(label_cat)
      train_y.extend(label_dog)
      print(len(train_x))
      width = 100
      height = 100
      flat = width * height
      class_output = 2
      x = tf.placeholder(tf.float32, shape=[None, flat])
      y_ = tf.placeholder(tf.float32, shape=[None, class_output])

      x_image = tf.reshape(x, [-1,100,100,1])
      print(x_image)

      W_conv1 = tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1))
      b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))

      convolve1 = tf.nn.conv2d(x_image, W_conv1, strides=[1,1,1,1], padding="SAME") + b_conv1
      h_conv1 = tf.nn.relu(convolve1)
      conv1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
      print(conv1)

      W_conv2 = tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1))
      b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))

      convolve2 = tf.nn.conv2d(conv1, W_conv2, strides=[1,1,1,1], padding="SAME") + b_conv2
      h_conv2 = tf.nn.relu(convolve2)
      conv2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
      print(conv2)

      layer2_matrix = tf.reshape(conv2, [-1, conv2.shape[1] * conv2.shape[2] * 64])
      W_fc1 = tf.Variable(tf.truncated_normal([int(layer2_matrix.shape[1]), 1024], stddev=0.1))
      b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))

      fc1 = tf.matmul(layer2_matrix, W_fc1)+ b_fc1
      h_fc1 = tf.nn.relu(fc1)
      print(h_fc1)


      keep_prob = tf.placeholder(tf.float32)
      layer_drop = tf.nn.dropout(h_fc1, keep_prob)
      print(layer_drop)


      W_fc2 = tf.Variable(tf.truncated_normal([1024, class_output], stddev=0.1))
      b_fc2 = tf.Variable(tf.constant(0.1, shape=[class_output]))

      fc2 = tf.matmul(layer_drop, W_fc2)+ b_fc2
      h_fc2 = tf.nn.relu(fc2)
      print(h_fc2)


      y_CNN = tf.nn.softmax(h_fc2)
      print(y_CNN)


      print(train_y)

      print(y_*tf.log(y_CNN))

      cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_CNN)))

      train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)

      correct_prediction = tf.equal(tf.argmax(y_CNN, 1), tf.argmax(y_, 1))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

      d = list(zip(train_x,train_y))
      random.shuffle(d)
      train_x, train_y = zip(*d)

      print(train_y)
      sess = tf.InteractiveSession()
      sess.run(tf.global_variables_initializer())

      n_epochs = 20


      for n in range(2,1000):
      batch = (train_x[n-2:n], train_y[n-2:n])
      train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
      loss = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
      print("step %d, training accuracy %g, loss %g"%(n, float(train_accuracy), float(loss)))
      train_step.run(feed_dict={x:batch[0], y_:batch[1], keep_prob: 0.5})


      The problem is that when I run the program, The loss first increases and then decreases to get to a constant value. While the accuracy only fluctuates between three values, 0, 0.5 and 1. Below is the image for same.



      The fluctuating accuracy and constant loss



      Can anyone please help me with this?










      share|improve this question















      I am beginner to Tensorflow, and have written the following code for classification of Cats and Dogs using the Kaggle dataset. I have tinkered with the code used on IBM's edX course, and tried to create it for the Cats and Dogs dataset image classifier.



      import csv
      import re
      import matplotlib.pyplot as plt
      import datetime
      import tensorflow as tf
      import numpy as np
      import random as rd
      import os
      from PIL import Image
      import random


      def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
      assert inputs.shape[0] == targets.shape[0]
      if shuffle:
      indices = np.arange(inputs.shape[0])
      np.random.shuffle(indices)
      for start_idx in range(0, inputs.shape[0] - batchsize + 1, batchsize):
      if shuffle:
      excerpt = indices[start_idx:start_idx + batchsize]
      else:
      excerpt = slice(start_idx, start_idx + batchsize)
      yield inputs[excerpt], targets[excerpt]



      os.chdir("C:\Users\dell\Documents\PetImages")
      lst = os.listdir()
      train_cat =
      train_dog =
      train_x =
      train_y =
      count = 0
      for anim in lst:
      os.chdir(anim)
      for img in os.listdir():
      image = Image.open(img).convert('L')
      width, height = image.size
      scale_factor = max(width, height)/100
      image =image.resize((int(width/scale_factor), int(height/scale_factor)))
      imgarr = np.asarray(image)
      zero_array = np.zeros((100,100))
      zero_array[:imgarr.shape[0], :imgarr.shape[1]] = imgarr
      imgarr = zero_array

      #imgarr = tf.keras.utils.normalize(imgarr, axis = 1, order=2)
      imgarr = (imgarr)/255.0
      if anim == "Cat":
      imgarr = imgarr.flatten()
      train_cat.append(imgarr)
      else :
      imgarr = imgarr.flatten()
      train_dog.append(imgarr)
      #imgplot = plt.imshow(imgarr)
      #print(imgarr)
      #plt.show()
      count+=1
      if count == 500:
      count = 0
      print()
      break
      print(str(count)+" Files Read.", sep=' ', end='r', flush=True)
      os.chdir('../')
      print(len(train_cat))

      label_cat = [[1,0] for i in train_cat]
      label_dog = [[0,1] for i in train_dog]

      train_x.extend(train_cat)
      train_x.extend(train_dog)
      train_y.extend(label_cat)
      train_y.extend(label_dog)
      print(len(train_x))
      width = 100
      height = 100
      flat = width * height
      class_output = 2
      x = tf.placeholder(tf.float32, shape=[None, flat])
      y_ = tf.placeholder(tf.float32, shape=[None, class_output])

      x_image = tf.reshape(x, [-1,100,100,1])
      print(x_image)

      W_conv1 = tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1))
      b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))

      convolve1 = tf.nn.conv2d(x_image, W_conv1, strides=[1,1,1,1], padding="SAME") + b_conv1
      h_conv1 = tf.nn.relu(convolve1)
      conv1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
      print(conv1)

      W_conv2 = tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1))
      b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))

      convolve2 = tf.nn.conv2d(conv1, W_conv2, strides=[1,1,1,1], padding="SAME") + b_conv2
      h_conv2 = tf.nn.relu(convolve2)
      conv2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding="SAME")
      print(conv2)

      layer2_matrix = tf.reshape(conv2, [-1, conv2.shape[1] * conv2.shape[2] * 64])
      W_fc1 = tf.Variable(tf.truncated_normal([int(layer2_matrix.shape[1]), 1024], stddev=0.1))
      b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))

      fc1 = tf.matmul(layer2_matrix, W_fc1)+ b_fc1
      h_fc1 = tf.nn.relu(fc1)
      print(h_fc1)


      keep_prob = tf.placeholder(tf.float32)
      layer_drop = tf.nn.dropout(h_fc1, keep_prob)
      print(layer_drop)


      W_fc2 = tf.Variable(tf.truncated_normal([1024, class_output], stddev=0.1))
      b_fc2 = tf.Variable(tf.constant(0.1, shape=[class_output]))

      fc2 = tf.matmul(layer_drop, W_fc2)+ b_fc2
      h_fc2 = tf.nn.relu(fc2)
      print(h_fc2)


      y_CNN = tf.nn.softmax(h_fc2)
      print(y_CNN)


      print(train_y)

      print(y_*tf.log(y_CNN))

      cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_CNN)))

      train_step = tf.train.AdamOptimizer(1e-5).minimize(cross_entropy)

      correct_prediction = tf.equal(tf.argmax(y_CNN, 1), tf.argmax(y_, 1))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

      d = list(zip(train_x,train_y))
      random.shuffle(d)
      train_x, train_y = zip(*d)

      print(train_y)
      sess = tf.InteractiveSession()
      sess.run(tf.global_variables_initializer())

      n_epochs = 20


      for n in range(2,1000):
      batch = (train_x[n-2:n], train_y[n-2:n])
      train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
      loss = cross_entropy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
      print("step %d, training accuracy %g, loss %g"%(n, float(train_accuracy), float(loss)))
      train_step.run(feed_dict={x:batch[0], y_:batch[1], keep_prob: 0.5})


      The problem is that when I run the program, The loss first increases and then decreases to get to a constant value. While the accuracy only fluctuates between three values, 0, 0.5 and 1. Below is the image for same.



      The fluctuating accuracy and constant loss



      Can anyone please help me with this?







      python tensorflow deep-learning kaggle






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited Nov 23 at 4:40

























      asked Nov 22 at 18:30









      praveen londhe

      145




      145





























          active

          oldest

          votes











          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53436487%2floss-value-constant-and-accuracy-fluctuating-between-0-0-5-and-1%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown






























          active

          oldest

          votes













          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.





          Some of your past answers have not been well-received, and you're in danger of being blocked from answering.


          Please pay close attention to the following guidance:


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53436487%2floss-value-constant-and-accuracy-fluctuating-between-0-0-5-and-1%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          What visual should I use to simply compare current year value vs last year in Power BI desktop

          Alexandru Averescu

          Trompette piccolo