import copy, numpy as np
import sklearn
#compute sigmoid nonlinearity
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
#convert output to its derivative
def sigmoid_prime(output):
return output*(1-output)
norm = sklearn.preprocessing
filename = 'output.csv'
data = np.loadtxt(filename, delimiter=',')
#data = data.reshape(len(data), 1)
x = norm.normalize(data[:5])#.T
y = norm.normalize(data[20])#.T
#input variables
alpha = 0.00021
input_dim = 1
hidden_dim = 2
output_dim = 1
iter = 100000
#initialize neural network
syn_0 = 2*np.random.random((input_dim, hidden_dim))-1#.T#-1
syn_1 = 2*np.random.random((hidden_dim, output_dim))- 1 #error begins here
syn_h = 2*np.random.random((hidden_dim, hidden_dim))- 1
#This seems like a way to update with current weights
#like a list that stores all new weight values
syn_0_upd = np.zeros_like(syn_0)
syn_1_upd = np.zeros_like(syn_1)
syn_h_upd = np.zeros_like(syn_h)
#Deltas is the values for gradient descent
overallError = 0
layer_2_deltas = list()
layer_1_values = list()
layer_1_values.append(np.zeros(hidden_dim))
#training
for j in range(iter):
#overallError = 0
#layer_2_deltas = list()
#layer_1_values = list()
#
#layer_1_values.append(np.zeros(hidden_dim))
#for position in range(len(x)):
#x = x
#y = y
#hidden layer (input ~+ prev_hidden)
layer_1 = sigmoid(np.dot(x, syn_0) + np.dot(layer_1_values[-1], syn_h))
#output layer
layer_2 = sigmoid(np.dot(layer_1, syn_1))
#Error measure
layer_2_error = y - layer_2
layer_2_deltas.append((layer_2_error)*sigmoid_prime(layer_2))
overallError += np.abs(layer_2[0])
#Store hidden layer for next use in timestep
layer_1_values.append(copy.deepcopy(layer_1))
future_layer_1_delta = np.zeros(hidden_dim)
#for position in range(len(x)):
layer_1 = layer_1_values[-position-1]
prev_layer_1 = layer_1_values[-position-2]
# error at output layer
layer_2_delta = layer_2_deltas[-position-1]
#error at hidden layer
layer_1_delta = (future_layer_1_delta.dot(syn_h.T) + layer_2_delta.dot(syn_1.T)) * sigmoid_prime(layer_1)
# lets update all our weights so we can try again
syn_1_upd += np.atleast_2d(layer_1).T.dot(layer_2_delta)
syn_h_upd += np.atleast_2d(prev_layer_1).T.dot(layer_2_delta)
syn_0_upd += x.T.dot(layer_2_delta)
future_layer_1_delta = layer_1_delta
syn_0 += syn_0_upd * alpha
syn_1 += syn_1_upd * alpha
syn_h += syn_h_upd * alpha
syn_0_upd *= 0
syn_1_upd *= 0
syn_h_upd *= 0
if(j % 1000 == 0):
print("Error: " + str(overallError))
# you need to predict here print("Pred: " + str())