HURT ME MORE SNAKE, MAKE ME FEEL ALIVE AGAIN!
import numpy
"""training data"""
x = numpy.array([[1,1,1,0,0,0],
[1,0,1,0,0,0],
[1,1,1,0,0,0],
[0,0,1,1,1,0],
[0,0,1,1,0,0],
[0,0,1,1,1,0]])
correct_label = numpy.array([[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1]])
"""initializes weight and bias as zero vectors"""
weight = numpy.zeros((6, 2))
bias = numpy.zeros((6, 2))
def softmax(a_vector):
"""Compute a logit for a vector."""
denom = sum(numpy.exp(a_vector))
logit = numpy.exp(a_vector)/denom
return logit
def softmax_a_set(a_set):
"""computes logits for all vectors in a set"""
softmax_set = numpy.zeros(y.shape)
for x in numpy.nditer(a_set):
x = softmax(x)
return softmax_set
def cross_entropy(logit, label):
"""generates the cross entropy between label and logit"""
return -1*sum(numpy.log(logit)*label)
def train(x):
"""walks the function closer to the best value"""
pass
y = numpy.dot(x,weight) + bias