Skip to content

Commit a66c49f

Browse files
committed
Fix context in setup: neuraxle===0.5.2 breaking changes
1 parent e25f2d1 commit a66c49f

File tree

6 files changed

+50
-70
lines changed

6 files changed

+50
-70
lines changed

README.md

Lines changed: 18 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ Neuraxle is a Machine Learning (ML) library for building neat pipelines, providi
1313
Create a tensorflow 1 model step by giving it a graph, an optimizer, and a loss function.
1414

1515
```python
16-
def create_graph(step: TensorflowV1ModelStep):
16+
def create_graph(step: TensorflowV1ModelStep, context: ExecutionContext):
1717
tf.placeholder('float', name='data_inputs')
1818
tf.placeholder('float', name='expected_outputs')
1919

@@ -24,7 +24,7 @@ def create_graph(step: TensorflowV1ModelStep):
2424

2525
"""
2626
# Note: you can also return a tuple containing two elements : tensor for training (fit), tensor for inference (transform)
27-
def create_graph(step: TensorflowV1ModelStep)
27+
def create_graph(step: TensorflowV1ModelStep, context: ExecutionContext)
2828
# ...
2929
decoder_outputs_training = create_training_decoder(step, encoder_state, decoder_cell)
3030
decoder_outputs_inference = create_inference_decoder(step, encoder_state, decoder_cell)
@@ -33,10 +33,10 @@ def create_graph(step: TensorflowV1ModelStep)
3333
"""
3434

3535

36-
def create_loss(step: TensorflowV1ModelStep):
36+
def create_loss(step: TensorflowV1ModelStep, context: ExecutionContext):
3737
return tf.reduce_sum(tf.pow(step['output'] - step['expected_outputs'], 2)) / (2 * N_SAMPLES)
3838

39-
def create_optimizer(step: TensorflowV1ModelStep):
39+
def create_optimizer(step: TensorflowV1ModelStep, context: ExecutionContext):
4040
return tf.train.GradientDescentOptimizer(step.hyperparams['learning_rate'])
4141

4242
model_step = TensorflowV1ModelStep(
@@ -56,10 +56,10 @@ model_step = TensorflowV1ModelStep(
5656
Create a tensorflow 2 model step by giving it a model, an optimizer, and a loss function.
5757

5858
```python
59-
def create_model(step: Tensorflow2ModelStep):
59+
def create_model(step: Tensorflow2ModelStep, context: ExecutionContext):
6060
return LinearModel()
6161

62-
def create_optimizer(step: Tensorflow2ModelStep):
62+
def create_optimizer(step: Tensorflow2ModelStep, context: ExecutionContext):
6363
return tf.keras.optimizers.Adam(0.1)
6464

6565
def create_loss(step: Tensorflow2ModelStep, expected_outputs, predicted_outputs):
@@ -94,36 +94,18 @@ feature_0_metric = metric_3d_to_2d_wrapper(mean_squared_error)
9494
metrics = {'mse': feature_0_metric}
9595

9696
signal_prediction_pipeline = Pipeline([
97-
ForEachDataInput(MeanStdNormalizer()),
98-
ToNumpy(),
99-
Tensorflow2ModelStep(
100-
create_model=create_model,
101-
create_loss=create_loss,
102-
create_optimizer=create_optimizer,
103-
expected_outputs_dtype=tf.dtypes.float32,
104-
data_inputs_dtype=tf.dtypes.float32,
105-
print_loss=True
106-
).set_hyperparams(seq2seq_pipeline_hyperparams)
107-
]).set_name('SignalPrediction')
108-
109-
pipeline = Pipeline([EpochRepeater(
110-
ValidationSplitWrapper(
111-
MetricsWrapper(Pipeline([
112-
TrainOnlyWrapper(DataShuffler()),
113-
MiniBatchSequentialPipeline([
114-
MetricsWrapper(
115-
signal_prediction_pipeline,
116-
metrics=metrics,
117-
name='batch_metrics'
118-
)
119-
], batch_size=batch_size)
120-
]), metrics=metrics,
121-
name='epoch_metrics',
122-
print_metrics=True
123-
),
124-
test_size=validation_size,
125-
scoring_function=feature_0_metric
126-
), epochs=epochs)])
97+
TrainOnly(DataShuffler()),
98+
WindowTimeSeries(),
99+
MeanStdNormalizer(),
100+
MiniBatchSequentialPipeline([
101+
Tensorflow2ModelStep(
102+
create_model=create_model,
103+
create_loss=create_loss,
104+
create_optimizer=create_optimizer,
105+
print_loss=True
106+
).set_hyperparams(seq2seq_pipeline_hyperparams)
107+
])
108+
])
127109

128110
pipeline, outputs = pipeline.fit_transform(data_inputs, expected_outputs)
129111
```

neuraxle_tensorflow/tensorflow.py

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -3,17 +3,24 @@
33

44
class BaseTensorflowModelStep(BaseStep):
55
def __init__(
6-
self,
7-
create_model,
8-
create_loss,
9-
create_optimizer,
10-
step_saver,
11-
create_inputs=None,
12-
data_inputs_dtype=None,
13-
expected_outputs_dtype=None,
14-
print_loss=False,
15-
print_func=None
6+
self,
7+
create_model,
8+
create_loss,
9+
create_optimizer,
10+
step_saver,
11+
create_inputs=None,
12+
data_inputs_dtype=None,
13+
expected_outputs_dtype=None,
14+
print_loss=False,
15+
print_func=None
1616
):
17+
BaseStep.__init__(
18+
self,
19+
savers=[step_saver],
20+
hyperparams=self.__class__.HYPERPARAMS,
21+
hyperparams_space=self.__class__.HYPERPARAMS_SPACE
22+
)
23+
1724
self.create_inputs = create_inputs
1825
self.create_model = create_model
1926
self.create_loss = create_loss
@@ -22,22 +29,13 @@ def __init__(
2229
self.expected_outputs_dtype = expected_outputs_dtype
2330
self.data_inputs_dtype = data_inputs_dtype
2431

25-
self.set_hyperparams(self.__class__.HYPERPARAMS)
26-
self.set_hyperparams_space(self.__class__.HYPERPARAMS_SPACE)
27-
2832
self.train_losses = []
2933
self.test_losses = []
3034
self.print_loss = print_loss
3135
if print_func is None:
3236
print_func = print
3337
self.print_func = print_func
3438

35-
BaseStep.__init__(
36-
self,
37-
savers=[step_saver],
38-
hyperparams=self.HYPERPARAMS
39-
)
40-
4139
def add_new_loss(self, loss, test_only=False):
4240
if test_only:
4341
if not self.is_train:

neuraxle_tensorflow/tensorflow_v1.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def __init__(
6969
self.has_expected_outputs = has_expected_outputs
7070
self.create_feed_dict = create_feed_dict
7171

72-
def setup(self) -> BaseStep:
72+
def setup(self, context: ExecutionContext) -> BaseStep:
7373
"""
7474
Setup tensorflow 1 graph, and session using a variable scope.
7575
@@ -84,15 +84,15 @@ def setup(self) -> BaseStep:
8484
with tf.variable_scope(self.variable_scope, reuse=tf.AUTO_REUSE):
8585
self.session = tf.Session(config=tf.ConfigProto(log_device_placement=True), graph=self.graph)
8686

87-
model = self.create_model(self)
87+
model = self.create_model(self, context)
8888
if not isinstance(model, tuple):
8989
tf.identity(model, name='output')
9090
else:
9191
tf.identity(model[0], name='output')
9292
tf.identity(model[1], name='inference_output')
9393

9494
tf.identity(self.create_loss(self), name='loss')
95-
self.create_optimizer(self).minimize(self['loss'], name='optimizer')
95+
self.create_optimizer(self, context).minimize(self['loss'], name='optimizer')
9696

9797
init = tf.global_variables_initializer()
9898
self.session.run(init)
@@ -258,7 +258,7 @@ def load_step(self, step: 'TensorflowV1ModelStep', context: 'ExecutionContext')
258258
:return: loaded step
259259
"""
260260
step.is_initialized = False
261-
step.setup()
261+
step.setup(context)
262262

263263
with step.graph.as_default():
264264
saver = tf.train.Saver()

neuraxle_tensorflow/tensorflow_v2.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def __init__(
7070
tf_model_checkpoint_folder = 'tensorflow_ckpts'
7171
self.tf_model_checkpoint_folder = tf_model_checkpoint_folder
7272

73-
def setup(self) -> BaseStep:
73+
def setup(self, context: ExecutionContext) -> BaseStep:
7474
"""
7575
Setup optimizer, model, and checkpoints for saving.
7676
@@ -81,8 +81,8 @@ def setup(self) -> BaseStep:
8181
return self
8282

8383
with tf.device(self.device_name):
84-
self.optimizer = self.create_optimizer(self)
85-
self.model = self.create_model(self)
84+
self.optimizer = self.create_optimizer(self, context)
85+
self.model = self.create_model(self, context)
8686

8787
self.checkpoint = tf.train.Checkpoint(step=tf.Variable(1), optimizer=self.optimizer, net=self.model)
8888
self.checkpoint_manager = tf.train.CheckpointManager(
@@ -199,7 +199,7 @@ def load_step(self, step: 'Tensorflow2ModelStep', context: 'ExecutionContext') -
199199
:return: loaded step
200200
"""
201201
step.is_initialized = False
202-
step.setup()
202+
step.setup(context)
203203
step.checkpoint.restore(step.checkpoint_manager.latest_checkpoint)
204204
return step
205205

testing/test_tensorflow_v1.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
MATMUL_VARIABLE_SCOPE = "matmul"
1313

1414

15-
def create_graph(step: TensorflowV1ModelStep):
15+
def create_graph(step: TensorflowV1ModelStep, context: ExecutionContext):
1616
tf.placeholder('float', name='data_inputs')
1717
tf.placeholder('float', name='expected_outputs')
1818

@@ -22,11 +22,11 @@ def create_graph(step: TensorflowV1ModelStep):
2222
return tf.add(tf.multiply(step['data_inputs'], step['weight']), step['bias'])
2323

2424

25-
def create_loss(step: TensorflowV1ModelStep):
25+
def create_loss(step: TensorflowV1ModelStep, context: ExecutionContext):
2626
return tf.reduce_sum(tf.pow(step['output'] - step['expected_outputs'], 2)) / (2 * N_SAMPLES)
2727

2828

29-
def create_optimizer(step: TensorflowV1ModelStep):
29+
def create_optimizer(step: TensorflowV1ModelStep, context: ExecutionContext):
3030
return tf.train.GradientDescentOptimizer(step.hyperparams['learning_rate'])
3131

3232

testing/test_tensorflow_v2.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,16 +23,16 @@ def toy_dataset():
2323
dict(x=inputs, y=labels)).repeat(10).batch(2)
2424

2525

26-
def create_model(step: Tensorflow2ModelStep):
26+
def create_model(step: Tensorflow2ModelStep, context: ExecutionContext):
2727
return LinearModel()
2828

2929

30-
def create_optimizer(step: Tensorflow2ModelStep):
30+
def create_optimizer(step: Tensorflow2ModelStep, context: ExecutionContext):
3131
return tf.keras.optimizers.Adam(0.1)
3232

3333

34-
def create_loss(step: Tensorflow2ModelStep, expected_outputs, actual_outputs):
35-
return tf.reduce_mean(tf.abs(actual_outputs - expected_outputs))
34+
def create_loss(step: Tensorflow2ModelStep, expected_outputs, predicted_outputs):
35+
return tf.reduce_mean(tf.abs(predicted_outputs - expected_outputs))
3636

3737

3838
def test_tensorflowv2_saver(tmpdir):

0 commit comments

Comments
 (0)