Skip to content

Commit

Permalink
Changes for Keras 2.0.7
Browse files Browse the repository at this point in the history
  • Loading branch information
jjallaire committed Aug 25, 2017
1 parent 5e11358 commit 475826b
Show file tree
Hide file tree
Showing 34 changed files with 674 additions and 73 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,5 @@ index.html
external/
issues
runs
MNIST-data
saved_wt.h5
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ sudo: false
env:
- KERAS_BACKEND="tensorflow"
- KERAS_BACKEND="theano" THEANO_FLAGS=optimizer=fast_compile
- KERAS_BACKEND="cntk"
- KERAS_BACKEND="cntk" PYTHONWARNINGS=ignore
- KERAS_IMPLEMENTATION="tensorflow"

cache:
Expand Down
2 changes: 2 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ export(callback_reduce_lr_on_plateau)
export(callback_remote_monitor)
export(callback_tensorboard)
export(callback_terminate_on_naan)
export(clone_model)
export(compile)
export(constraint_maxnorm)
export(constraint_minmaxnorm)
Expand Down Expand Up @@ -158,6 +159,7 @@ export(layer_simple_rnn)
export(layer_spatial_dropout_1d)
export(layer_spatial_dropout_2d)
export(layer_spatial_dropout_3d)
export(layer_subtract)
export(layer_upsampling_1d)
export(layer_upsampling_2d)
export(layer_upsampling_3d)
Expand Down
20 changes: 11 additions & 9 deletions R/layers-convolutional.R
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ layer_conv_3d_transpose <- function(object, filters, kernel_size, strides = c(1,
#' @param pointwise_initializer Initializer for the pointwise kernel matrix.
#' @param depthwise_regularizer Regularizer function applied to the depthwise
#' kernel matrix.
#' @param pointwise_regularizer Regularizer function applied to the depthwise
#' @param pointwise_regularizer Regularizer function applied to the pointwise
#' kernel matrix.
#' @param depthwise_constraint Constraint function applied to the depthwise
#' kernel matrix.
Expand Down Expand Up @@ -802,14 +802,16 @@ layer_cropping_2d <- function(object, cropping = list(c(0L, 0L), c(0L, 0L)), dat
#'
#' @inheritParams layer_cropping_1d
#'
#' @param cropping int, or list of 3 ints, or list of 3 lists of 2 ints.
#' - If int: the same symmetric cropping is applied to width and height.
#' - If list of 3 ints: interpreted as two different symmetric cropping values for
#' height and width: `(symmetric_dim1_crop, symmetric_dim2_crop,
#' symmetric_dim3_crop)`.
#' - If list of 3 lists of 2 ints: interpreted as
#' `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop),
#' (left_dim3_crop, right_dim3_crop))`
#' @param cropping int, or list of 3 ints, or list of 3 lists of 2 ints.
#' - If int: the same symmetric cropping
#' is applied to depth, height, and width.
#' - If list of 3 ints:
#' interpreted as two different
#' symmetric cropping values for depth, height, and width:
#' `(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
#' - If list of 3 list of 2 ints:
#' interpreted as
#' `((left_dim1_crop, right_dim1_crop), (left_dim2_crop, right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
#' @param data_format A string, one of `channels_last` (default) or
#' `channels_first`. The ordering of the dimensions in the inputs.
#' `channels_last` corresponds to inputs with shape `(batch, spatial_dim1,
Expand Down
18 changes: 18 additions & 0 deletions R/layers-merge.R
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,24 @@ layer_add <- function(inputs) {
keras$layers$add(inputs = inputs)
}


#' Layer that subtracts two inputs.
#'
#' It takes as input a list of tensors of size 2, both of the same shape, and
#' returns a single tensor, (`inputs[[1]] - inputs[[2]]``), also of the same
#' shape.
#'
#' @param inputs A list of input tensors (exactly 2).
#'
#' @return A tensor, the difference of the inputs.
#'
#' @family merge layers
#'
#' @export
layer_subtract <- function(inputs) {
keras$layers$subtract(inputs = inputs)
}

#' Layer that multiplies (element-wise) a list of inputs.
#'
#' It takes as input a list of tensors, all of the same shape, and returns a
Expand Down
12 changes: 9 additions & 3 deletions R/metrics-callback.R
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ KerasMetricsCallback <- R6::R6Class("KerasMetricsCallback",
self$metrics[[metric]] <- numeric()

# handle metrics
self$on_metrics(logs, 0.5)
if (length(logs) > 0)
self$on_metrics(logs, 0.5)

if (tfruns::is_run_active()) {
self$write_params(self$params)
Expand All @@ -40,8 +41,13 @@ KerasMetricsCallback <- R6::R6Class("KerasMetricsCallback",
on_metrics = function(logs, sleep) {

# record metrics
for (metric in names(self$metrics))
self$metrics[[metric]] <- c(self$metrics[[metric]], logs[[metric]])
for (metric in names(self$metrics)) {
# take mean if more than one value is reported
value <- logs[[metric]]
if (length(value) > 1)
value <- mean(value)
self$metrics[[metric]] <- c(self$metrics[[metric]], value)
}

# create history object and convert to metrics data frame
history <- keras_training_history(self$params, self$metrics)
Expand Down
165 changes: 134 additions & 31 deletions R/model.R
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,26 @@ keras_model_sequential <- function(layers = NULL, name = NULL) {
}


#' Clone a model instance.
#'
#' Model cloning is similar to calling a model on new inputs, except that it
#' creates new layers (and thus new weights) instead of sharing the weights of
#' the existing layers.
#'
#' @param model Instance of Keras model (could be a functional model or a
#' Sequential model).
#' @param input_tensors Optional list of input tensors to build the model upon.
#' If not provided, placeholders will be created.
#'
#' @export
clone_model <- function(model, input_tensors = NULL) {
keras$models$clone_model(
model = model,
input_tensors = input_tensors
)
}


#' Configure a Keras model for training
#'
#' @param object Model object to compile.
Expand All @@ -93,13 +113,30 @@ keras_model_sequential <- function(layers = NULL, name = NULL) {
#' (2D weights), set this to "temporal". `NULL` defaults to sample-wise
#' weights (1D). If the model has multiple outputs, you can use a different
#' `sample_weight_mode` on each output by passing a list of modes.
#' @param ... Additional named arguments passed to `tf$Session$run`.
#' @param target_tensors By default, Keras will create placeholders for the
#' model's target, which will be fed with the target data during
#' training. If instead you would like to use your own
#' target tensors (in turn, Keras will not expect external
#' data for these targets at training time), you
#' can specify them via the `target_tensors` argument. It can be
#' a single tensor (for a single-output model), a list of tensors,
#' or a named list mapping output names to target tensors.
#' @param weighted_metrics List of metrics to be evaluated and weighted
#' by sample_weight or class_weight during training and testing
#' @param ... When using the Theano/CNTK backends, these arguments
#' are passed into K.function. When using the TensorFlow backend,
#' these arguments are passed into `tf$Session()$run`.
#'
#' @family model functions
#'
#' @export
compile <- function(object, optimizer, loss, metrics = NULL, loss_weights = NULL,
sample_weight_mode = NULL, ...) {
compile <- function(object, optimizer, loss,
metrics = NULL,
loss_weights = NULL,
sample_weight_mode = NULL,
weighted_metrics = NULL,
target_tensors = NULL,
...) {

# handle metrics
if (!is.null(metrics)) {
Expand All @@ -118,16 +155,28 @@ compile <- function(object, optimizer, loss, metrics = NULL, loss_weights = NULL
})
}

# compile model
object$compile(
# args
args <- list(
optimizer = optimizer,
loss = loss,
metrics = metrics,
loss_weights = loss_weights,
sample_weight_mode = sample_weight_mode,
...
sample_weight_mode = sample_weight_mode
)

# keras 2.07 args
if (keras_version() >= "2.0.7") {
args$weighted_metrics <- weighted_metrics
args$target_tensors <- target_tensors
}

# var args
var_args <- list(...)
args <- append(args, var_args)

# compile model
do.call(object$compile, args)

# return model invisible (conventience for chaining)
invisible(object)
}
Expand All @@ -144,7 +193,8 @@ compile <- function(object, optimizer, loss, metrics = NULL, loss_weights = NULL
#' @param y Vector, matrix, or array of target data (or list if the model has
#' multiple outputs). If all outputs in the model are named, you can also pass
#' a list mapping output names to data.
#' @param batch_size Number of samples per gradient update.
#' @param batch_size Integer or `NULL`. Number of samples per gradient update.
#' If unspecified, it will default to 32.
#' @param epochs Number of times to iterate over the training data arrays.
#' @param verbose Verbosity mode (0 = silent, 1 = verbose, 2 = one log line per
#' epoch).
Expand Down Expand Up @@ -175,26 +225,36 @@ compile <- function(object, optimizer, loss, metrics = NULL, loss_weights = NULL
#' sample_weight_mode="temporal" in [compile()].
#' @param initial_epoch epoch at which to start training (useful for resuming a
#' previous training run).
#' @param steps_per_epoch Total number of steps (batches of samples) before
#' declaring one epoch finished and starting the next epoch. When training
#' with Input Tensors such as TensorFlow data tensors, the default `NULL` is
#' equal to the number of unique samples in your dataset divided by the batch
#' size, or 1 if that cannot be determined.
#' @param validation_steps Only relevant if `steps_per_epoch` is specified.
#' Total number of steps (batches of samples) to validate before stopping.
#' @param ... Unused
#'
#' @family model functions
#'
#' @export
fit <- function(object, x, y, batch_size=32, epochs=10,
fit <- function(object, x, y, batch_size=NULL, epochs=10,
verbose=1, callbacks=NULL,
view_metrics = getOption("keras.view_metrics", default = "auto"),
validation_split=0.0, validation_data=NULL, shuffle=TRUE,
class_weight=NULL, sample_weight=NULL, initial_epoch=0, ...) {
class_weight=NULL, sample_weight=NULL, initial_epoch=0,
steps_per_epoch=NULL, validation_steps=NULL, ...) {

# defaults
if (is.null(batch_size) && is.null(steps_per_epoch))
batch_size <- 32L

# resolve view_metrics
if (identical(view_metrics, "auto"))
view_metrics <- resolve_view_metrics(verbose, epochs, object$metrics)

# fit the model
history <- object$fit(
x = to_numpy_array(x),
y = to_numpy_array(y),
batch_size = as.integer(batch_size),
# build args
args <- list(
batch_size = as_nullable_integer(batch_size),
epochs = as.integer(epochs),
verbose = as.integer(verbose),
callbacks = normalize_callbacks(view_metrics, callbacks),
Expand All @@ -206,14 +266,35 @@ fit <- function(object, x, y, batch_size=32, epochs=10,
initial_epoch = as.integer(initial_epoch)
)

if (!missing(x))
args$x <- to_numpy_array(x)
if (!missing(y))
args$y <- to_numpy_array(y)

if (keras_version() >= "2.0.7") {
args$steps_per_epoch <- steps_per_epoch
args$validation_steps <- validation_steps
}

# fit the model
history <- do.call(object$fit, args)

# turn history into an R object so it can be persited and
# and give it a class so we can write print/plot methods
params <- history$params
if (params$do_validation)
params$validation_samples <- dim(history$validation_data[[1]])[[1]]

# normalize metrics
metrics <- history$history
metrics <- lapply(metrics, function(metric) {
as.numeric(lapply(metric, mean))
})

# create history
history <- keras_training_history(
params = params,
metrics = lapply(history$history, as.numeric)
metrics = metrics
)

# return the history invisibly
Expand All @@ -224,25 +305,36 @@ fit <- function(object, x, y, batch_size=32, epochs=10,
#' Evaluate a Keras model

#' @inheritParams fit
#'
#'
#' @param object Model object to evaluate
#'
#' @return Named list of model test loss (or losses for models with multiple outputs)
#' and model metrics.
#'
#' @param steps Total number of steps (batches of samples) before declaring the
#' evaluation round finished. Ignored with the default value of `NULL`.
#'
#' @return Named list of model test loss (or losses for models with multiple
#' outputs) and model metrics.
#'
#' @family model functions
#'
#'
#' @export
evaluate <- function(object, x, y, batch_size = 32, verbose=1, sample_weight = NULL) {
evaluate <- function(object, x, y, batch_size = NULL, verbose=1, sample_weight = NULL, steps = NULL) {

# perform evaluation
result <- object$evaluate(
# defaults
if (is.null(batch_size) && is.null(steps))
batch_size <- 32L

# args
args <- list(
x = to_numpy_array(x),
y = to_numpy_array(y),
batch_size = as.integer(batch_size),
batch_size = as_nullable_integer(batch_size),
verbose = as.integer(verbose),
sample_weight = sample_weight
)
if (keras_version() >= "2.0.7")
args$steps <- steps

# perform evaluation
result <- do.call(object$evaluate, args)

# apply names
names(result) <- object$metrics_names
Expand All @@ -260,6 +352,8 @@ evaluate <- function(object, x, y, batch_size = 32, verbose=1, sample_weight = N
#' Generates output predictions for the input samples, processing the samples in
#' a batched way.
#'
#' @inheritParams evaluate
#'
#' @param object Keras model
#' @param x Input data (vector, matrix, or array)
#' @param batch_size Integer
Expand All @@ -273,14 +367,23 @@ evaluate <- function(object, x, y, batch_size = 32, verbose=1, sample_weight = N
#'
#' @importFrom stats predict
#' @export
predict.keras.engine.training.Model <- function(object, x, batch_size=32, verbose=0, ...) {
predict.keras.engine.training.Model <- function(object, x, batch_size=NULL, verbose=0, steps=NULL, ...) {

# call predict
object$predict(
to_numpy_array(x),
batch_size = as.integer(batch_size),
# defaults
if (is.null(batch_size) && is.null(steps))
batch_size <- 32L

# args
args <- list(
x = to_numpy_array(x),
batch_size = as_nullable_integer(batch_size),
verbose = as.integer(verbose)
)
if (keras_version() >= "2.0.7")
args$steps <- steps

# call predict
do.call(object$predict, args)
}


Expand Down
Loading

0 comments on commit 475826b

Please sign in to comment.