Skip to content

Commit e1ff6ef

Browse files
committed
update regression example
1 parent 4d13267 commit e1ff6ef

File tree

1 file changed

+16
-6
lines changed

1 file changed

+16
-6
lines changed

hyperengine/examples/1_6_optimizing_regression.py

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,34 +16,44 @@ def dnn_model(params):
1616

1717
layer = tf.layers.batch_normalization(x, training=training) if params.batch_norm else x
1818
for hidden_size in params.hidden_layers:
19-
layer = tf.layers.dense(layer, units=hidden_size)
19+
layer = tf.layers.dense(layer, units=hidden_size, activation=tf.nn.relu)
20+
layer = tf.layers.dropout(layer, params.dropout, training=training) if params.dropout else layer
2021
predictions = tf.layers.dense(layer, units=1)
2122

2223
loss = tf.reduce_mean((predictions - y) ** 2, name='loss')
2324
optimizer = tf.train.AdamOptimizer(learning_rate=params.learning_rate)
2425
optimizer.minimize(loss, name='minimize')
2526

27+
# Hyper-tuner is generally designed for classification, so it tries to maximize the accuracy.
28+
# But we have a regression problem. A simple possible metric to emulate the accuracy would be:
29+
#
30+
# tf.negative(loss, name='accuracy')
31+
#
32+
# But we'll use this one:
33+
tf.reduce_mean(tf.cast(tf.abs(predictions - y) < 0.5, tf.float32), name='accuracy')
34+
2635
x_train, y_train, x_test, y_test, x_val, y_val = get_wine_data(path='temp-wine/data')
2736
data = hype.Data(train=hype.DataSet(x_train, y_train),
2837
validation=hype.DataSet(x_val, y_val),
2938
test=hype.DataSet(x_test, y_test))
3039

3140
def solver_generator(params):
3241
solver_params = {
33-
'batch_size': 500,
34-
'eval_batch_size': 500,
42+
'batch_size': 1000,
43+
'eval_batch_size': 1000,
3544
'epochs': 20,
3645
'evaluate_test': True,
37-
'eval_flexible': False,
46+
'eval_flexible': True,
3847
}
3948
dnn_model(params)
4049
solver = hype.TensorflowSolver(data=data, hyper_params=params, **solver_params)
4150
return solver
4251

4352

4453
hyper_params_spec = hype.spec.new(
45-
batch_norm = True,
46-
hidden_layers = [10],
54+
batch_norm = hype.spec.random_bool(),
55+
hidden_layers = [hype.spec.choice(range(8, 33))],
56+
dropout = hype.spec.uniform(0.5, 1.0),
4757
learning_rate = 10**hype.spec.uniform(-1, -3),
4858
)
4959
strategy_params = {

0 commit comments

Comments
 (0)