Tensorflow graph construction and training on building simulation data¶
import numpy as np
from besos import eppy_funcs as ef, sampling
from besos.evaluator import EvaluatorEP
from besos.problem import EPProblem
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tensorflow.compat import v1 as tf
from parameter_sets import parameter_set
tf.disable_v2_behavior()
Couldn't import dot_parser, loading of dot files will not be possible.
WARNING:tensorflow:From /usr/local/lib/python3.7/dist-packages/tensorflow/python/compat/v2_compat.py:96: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.
Instructions for updating:
non-resource variables are not supported in the long term
Generate data set¶
This generates an example model and sampling data, see this example.
parameters = parameter_set(7)
problem = EPProblem(parameters, ["Electricity:Facility"])
building = ef.get_building()
inputs = sampling.dist_sampler(sampling.lhs, problem, 30)
evaluator = EvaluatorEP(problem, building)
outputs = evaluator.df_apply(inputs)
results = inputs.join(outputs)
results.head()
HBox(children=(FloatProgress(value=0.0, description='Executing', max=30.0, style=ProgressStyle(description_wid…
Conductivity | Thickness | U-Factor | Solar Heat Gain Coefficient | ElectricEquipment | Lights | Window to Wall Ratio | Electricity:Facility | |
---|---|---|---|---|---|---|---|---|
0 | 0.152149 | 0.161779 | 0.940276 | 0.442661 | 14.818443 | 14.964690 | 0.643468 | 2.347575e+09 |
1 | 0.084704 | 0.229743 | 1.246230 | 0.089919 | 11.174699 | 14.643703 | 0.860439 | 2.070557e+09 |
2 | 0.173219 | 0.116145 | 4.272470 | 0.045762 | 12.872851 | 12.463522 | 0.835218 | 2.045747e+09 |
3 | 0.127690 | 0.175980 | 2.267145 | 0.880006 | 10.282786 | 13.529331 | 0.626542 | 1.994944e+09 |
4 | 0.194943 | 0.283297 | 1.662965 | 0.200211 | 14.314024 | 10.283424 | 0.157865 | 2.020934e+09 |
Tensorflow Feed-forward Neural Network Example¶
1) Define Network Parameters¶
Static Parameters¶
All network parameters defined in this section are not part of the hyperparameter optimisation. Any of these parameters can be switched to an optimization parameter (see below).
learning_rate = 0.1
training_epochs = 4000
display_step = 300
n_hidden_1 = 10
n_hidden_2 = 10
Hyper-parameters¶
Here we use the L2 norm regularization parameter alpha to calibrate the network bias-variance trade-off.
alpha = tf.placeholder(tf.float32, None, name="Alpha")
hy_par = [1e0, 1e1, 1e3]
2) Train-Test split, standardization¶
Next we split the data into a training set (80%) and a testing set (20%), and normalise it.
train_in, test_in, train_out, test_out = train_test_split(
inputs, outputs, test_size=0.2
)
scaler = StandardScaler()
X_norm = scaler.fit_transform(X=train_in)
X_norm_test = scaler.transform(test_in)
scaler_out = StandardScaler()
y_norm = scaler_out.fit_transform(X=train_out)
y_norm_test = scaler_out.transform(test_out)
3) Set up the Tensorflow graph¶
Set up inputs and outputs as placeholder variables to be used in setting up the Tensorflow graph.
X = tf.placeholder(tf.float32, [None, len(X_norm[0, :])], name="X")
Y = tf.placeholder(tf.float32, [None, len(y_norm[0, :])], name="y")
with tf.name_scope("Variable_Definition"):
weights = {
"h1": tf.Variable(
tf.random_normal([len(X_norm[0, :]), n_hidden_1]), name="HiddenLayer1"
),
"h2": tf.Variable(
tf.random_normal([n_hidden_1, n_hidden_2]), name="HiddenLayer2"
),
"out": tf.Variable(
tf.random_normal([n_hidden_2, len(y_norm[0, :])]), name="OutputLayer1"
),
}
biases = {
"b1": tf.Variable(tf.random_normal([n_hidden_1]), name="Bias"),
"b2": tf.Variable(tf.random_normal([n_hidden_2]), name="Bias"),
"out": tf.Variable(tf.random_normal([1])),
}
with tf.name_scope("FFNN_Model"): # open up the Tensorflow name scope (context manager)
def multilayer_perceptron(
x,
): # this function defines the Graph of our neural network
with tf.name_scope("HL1"):
layer_1 = tf.add(
tf.matmul(x, weights["h1"]), biases["b1"]
) # apply Cartesian Product on inputs to the network (x) and the weights of layer 1, afterwards add the biases.
layer_1 = tf.nn.relu(
layer_1
) # Apply the relu activation function subsequently in each of the neurons
with tf.name_scope("HL2"):
layer_2 = tf.add(
tf.matmul(layer_1, weights["h2"]), biases["b2"]
) # see above only we use layer_1 as input to layer_2
layer_2 = tf.nn.relu(layer_2)
with tf.name_scope("OutputLayer"):
out_layer = tf.matmul(layer_2, weights["out"]) + biases["out"]
return out_layer
# 5b) Construct Model
y_pred = multilayer_perceptron(X)
with tf.name_scope("Cost_regularized"): # next element in the TF-Graph
# set up mean squared error with L2 regularization
loss_op = tf.reduce_mean(tf.square(Y - y_pred)) + alpha * (
tf.nn.l2_loss(weights["h1"])
+ tf.nn.l2_loss(weights["h2"])
+ tf.nn.l2_loss(weights["out"])
)
tf.summary.scalar("Test", loss_op) # observe the loss function throughout the run
R^2 score (operation definition)
with tf.name_scope("R2_Score"):
total_error = tf.reduce_sum(tf.square(Y - tf.reduce_mean(Y)))
unexplained_error = tf.reduce_sum(tf.square(Y - y_pred))
R_squared = tf.subtract(1.0, tf.div(unexplained_error, total_error))
tf.summary.scalar("R2", R_squared)
WARNING:tensorflow:From <ipython-input-10-7910d7f3ebbe>:4: div (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Deprecated in favor of operator or tf.math.divide.
Mean Absolute Error (MAE)
with tf.name_scope("MAE"):
mae = tf.reduce_mean(tf.abs(Y - y_pred))
tf.summary.scalar("MAE", loss_op)
with tf.name_scope("Training"):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name="Training")
train_op = optimizer.minimize(loss_op)
init_op = tf.global_variables_initializer()
def optimize(
training_epochs, display_step, X_train, y_train, X_test, y_test, reg_par=None
):
sess.run(init_op)
fig = plt.figure()
for i in range(training_epochs):
if reg_par == None: # Check if hyperparameter provided
sess.run([train_op, loss_op], feed_dict={X: X_train, Y: y_train})
else:
sess.run([train_op], feed_dict={X: X_train, Y: y_train, alpha: reg_par})
if i % display_step == 0:
# print(i)
pred = sess.run(R_squared, feed_dict={X: X_test, Y: y_test})
plt.plot(i, pred, "bx")
pred = sess.run(R_squared, feed_dict={X: X_train, Y: y_train})
plt.plot(i, pred, "rx")
# create summary
result = sess.run(
merged, feed_dict={X: X_train, Y: y_train, alpha: reg_par}
)
writer.add_summary(result, i)
# plt.pause(0.1)
print(
"Finished! Accuracy of Network:",
sess.run(R_squared, feed_dict={X: X_test, Y: y_test}),
)
plt.close()
return
Execute Tensorflow Graph¶
with tf.Session() as sess:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(
"logs/NN", sess.graph
) # for storing the neural networks
hy_par_temp = hy_par[np.random.randint(0, len(hy_par))]
print("Hyperparameter alpha: %.3f" % hy_par_temp)
print("Training Network")
optimize(
training_epochs,
display_step,
X_norm,
y_norm,
X_norm_test,
y_norm_test,
reg_par=hy_par_temp,
)
# saver.save(sess=sess, save_path=get_save_path(i))
Hyperparameter alpha: 10.000
Training Network
Finished! Accuracy of Network: -0.02072835