-
Notifications
You must be signed in to change notification settings - Fork 4
/
keras_neural.py
120 lines (108 loc) · 3.01 KB
/
keras_neural.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import cv2
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import sys
import math
import subprocess
import preprocess
import os
import keras.backend as K
# import theano.tensor as T
# from theano import function
first_c = 0
img = cv2.imread('dataset/img0.jpg')
ideal_path = open('data/finaloutput.txt')
image_num = 0
def ispath(x,y):
if img[x,y]<200 and img[x,y]>100:
return True
else:
return False
def custom_loss(y_true,y_pred):
#make graph and array
#print("apf.cpp "+str(A_L) +" "+str(num))
global image_num,first_c
print((y_true.shape))
file_arg = str(y_pred)+" "+str(image_num)
print(file_arg)
array = []
if(first_c):
print("Second!!!!")
image_num+=1
os.system("./apf "+ file_arg)
text = open('apf_out.txt',"r")
for line in text:
lineSplit=line.split(" ")
count = 1
# print(len(lineSplit))
for word in lineSplit:
count+=1
if(count<=len(lineSplit)):
# print(word)
array.append(int(word))
return lossfunction(array,image_num-1)
first_c = 1
print("FIRSTTTT")
for i in range(100):
array.append(99)
return lossfunction(array,1)
def lossfunction(epoch_array,num):
step_down = 10000
error = 0
count = 0
for line in ideal_path:
if(count==num):
lineSplit = line.split(" ")
break
count+=1
i = 0
ideal = 0
count = 1
#Y = np.zeros(shape = (1, 1))
#print(lineSplit)
error = []
real = []
for word in lineSplit:
count+=1
if word == '(' or word == ',' or word == ')' or word == ' ' or word == '':
continue
else:
if(count<=len(lineSplit)) and i < len(epoch_array):
# ideal = epoch_array[i]
i+=1
real.append(int(word))
#print(i)
#print(word)
# error.append((int(ideal) - int(word))*(int(ideal) - int(word)))
# print("error = "+str(error))
# tens_arr = tf.convert_to_tensor(error,dtype=tf.float32)
# Y = tf.reduce_mean(tens_arr)
Y = tf.losses.mean_squared_error(epoch_array,real)
print(Y)
return Y
number_of_features = 57
features = preprocess.all_circles("data/obst.txt")
training_set_size = 50000
X_all = np.zeros(shape = (training_set_size, number_of_features))
Y = np.zeros(shape = (training_set_size, 1))
#Y_all = np.zeros(shape = (train_size,1))
for i in range(training_set_size):
for j in range(number_of_features):
#print("fa",features[i][j])
X_all[i][j] = float(features[i][j])
model = Sequential()
model.add(Dense(18, input_dim=57, kernel_initializer='normal', activation='relu'))
# model.add(Dense(18, kernel_initializer='normal', activation='relu'))
model.add(Dense(9, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.compile(loss='binary_crossentropy', optimizer='adam')
print("going to fit !!!")
model.fit(X_all, Y, epochs=150, batch_size=1, verbose=2)
predictions = model.predict(X)