To this day, I haven't given up (just like I still see many of you here with this software) on trying to hit the lottery with the use of Neural Networks.
I must say that this software is limited as many know, because the parameters are static and only its creator could modify. However, I've taken it a step further in learning the backbone of how this all works. And although its been a long journey and hundreds of hours learning/coding, I have learned quite A LOT.
This being the case, in case it's of any ones interest, I'm just sharing a piece of code I am starting to play with using Python.
I do advise, you need to know what's going on in order to make it work. I wouldn't mind helping anyone willing to give this a try as its fairly simple...
The Internet, specially You tube, is filled with tutorials of Python and Neural Networks.
Python Code:
NOTE THAT THE GAME I TRY TO PLAY IS FLORIDA FANTASY 5 (5/36)
import numpy as np
import time
#variables
n_hidden = 2
n_in = 5
n_out = 5
n_sample = 10
#hyperparameters
learning_rate = 0.00043556
momentum = 0.00023452
#non deterministic seeding
np.random.seed(0)
#transfer functions
def sigmoid(x):
return 1.0/(1.0 + np.exp(-x))
def tanh_prime(x):
return 1 - np.tanh(x)**2
#input data, transpose, layer 1, layer 2, biases
def train(x, t, V, W, bv, bw):
#forward -- matrix multiply + biases
A = np.dot(x, V) + bv
Z = np.tanh(A)
B = np.dot(Z, W) + bw
Y = sigmoid(B)
#backward
Ew = Y - t
Ev = tanh_prime(A) * np.dot(W, Ew)
#predict our loss
dW = np.outer(Z, Ew)
dV = np.outer(x, Ev)
#cross entroppy
loss = -np.mean(t * np.log(Y) + (1 - t) * np.log(1 - Y))
return loss, (dV, dW, Ev, Ew)
def predict(x, V, W, bv, bw):
A = np.dot(x, V) + bv
B = np.dot(np.tanh(A), W) + bw
return (sigmoid(B) > 0.5).astype(int)
#create layers
V = np.random.normal(scale=0.1, size=(n_in, n_hidden))
W = np.random.normal(scale=0.1, size=(n_hidden, n_out))
bv = np.zeros(n_hidden)
bw = np.zeros(n_out)
params = [V, W, bv, bw]
#generate our data
filename = 'fl_fant_5.csv'
data = np.loadtxt(filename, delimiter=',')
X = data[2:3] / 100
T = data[:1] / 100
#training time
for epoch in range(100000):
err = []
upd = [0] * len(params)
t0 = time.clock()
#for each data point, update our weights
for i in range(X.shape[0]):
loss,grad = train(X[i], T[i], * params)
#update loss
for j in range(len(params)):
params[j] -= upd[j]
for j in range(len(params)):
upd[j] = learning_rate * grad[j] + momentum * upd[j]
err.append(loss)
print('Epoch: %d, Loss: %.8f, Time: %.4fs'%(epoch, np.mean(err), time.clock()-t0))
# try to predict something
x = data[4:6] / 100
print('Lott prediction')
print(x)
print(predict(x, * params))