bigdl.keras package

Submodules

bigdl.keras.ToBigDLHelper module

bigdl.keras.ToBigDLHelper.to_bigdl_2d_ordering(order)[source]
bigdl.keras.ToBigDLHelper.to_bigdl_2d_padding(border_mode, *args)[source]
bigdl.keras.ToBigDLHelper.to_bigdl_3d_ordering(order)[source]
bigdl.keras.ToBigDLHelper.to_bigdl_3d_padding(border_mode)[source]
bigdl.keras.ToBigDLHelper.to_bigdl_init(kinit_method)[source]
bigdl.keras.ToBigDLHelper.to_bigdl_reg(reg)[source]

bigdl.keras.backend module

class bigdl.keras.backend.KerasModelWrapper(kmodel)[source]
evaluate(x, y, batch_size=32, sample_weight=None, is_distributed=False)[source]

Evaluate a model by the given metrics. :param x: ndarray or list of ndarray for local mode. RDD[Sample] for distributed mode :param y: ndarray or list of ndarray for local mode and would be None for cluster mode. :param batch_size :param is_distributed: run in local mode or distributed mode. NB: if is_distributed=true, x should be RDD[Sample] and y should be None :return:

fit(x, y=None, batch_size=32, nb_epoch=10, verbose=1, callbacks=None, validation_split=0.0, validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, is_distributed=False)[source]

Optimize the model by the given options

Parameters:x – ndarray or list of ndarray for local mode.

RDD[Sample] for distributed mode :param y: ndarray or list of ndarray for local mode and would be None for cluster mode. is_distributed: used to control run in local or cluster. the default value is False. NB: if is_distributed=true, x should be RDD[Sample] and y should be None :param is_distributed: Whether to train in local mode or distributed mode :return: A Numpy array or RDD[Sample] of predictions.

predict(x, batch_size=None, verbose=None, is_distributed=False)[source]

Generates output predictions for the input samples, processing the samples in a batched way.

# Arguments x: the input data, as a Numpy array or list of Numpy array for local mode. as RDD[Sample] for distributed mode is_distributed: used to control run in local or cluster. the default value is False # Returns A Numpy array or RDD[Sample] of predictions.

bigdl.keras.backend.with_bigdl_backend(kmodel)[source]

bigdl.keras.converter module

class bigdl.keras.converter.DefinitionLoader(kmodel)[source]
classmethod from_hdf5_path(hdf5_path)[source]
Parameters:hdf5_path – hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
Returns:BigDL Model
classmethod from_json_path(json_path)[source]
Parameters:json_path – definition path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
Returns:BigDL Model
classmethod from_json_str(json_str)[source]
classmethod from_kmodel(kmodel)[source]
class bigdl.keras.converter.LayerConverter(klayer, kclayer, input_shape=None)[source]
combo_parameter_layer(blayer, config)[source]
create()[source]
create_activation()[source]
create_activityregularization()[source]
create_atrousconvolution1d()[source]
create_atrousconvolution2d()[source]
create_averagepooling1d()[source]
create_averagepooling2d()[source]
create_averagepooling3d()[source]
create_batchnormalization()[source]
create_bidirectional()[source]
create_convlstm2d()[source]
create_convolution1d()[source]
create_convolution2d()[source]
create_convolution3d()[source]
create_cropping1d()[source]
create_cropping2d()[source]
create_cropping3d()[source]
create_deconvolution2d()[source]
create_dense()[source]
create_dropout()[source]
create_elu()[source]
create_embedding()[source]
create_flatten()[source]
create_gaussiandropout()[source]
create_gaussiannoise()[source]
create_globalaveragepooling1d()[source]
create_globalaveragepooling2d()[source]
create_globalaveragepooling3d()[source]
create_globalmaxpooling1d()[source]
create_globalmaxpooling2d()[source]
create_globalmaxpooling3d()[source]
create_gru()[source]
create_highway()[source]
create_inputlayer()[source]
create_leakyrelu()[source]
create_locallyconnected1d()[source]
create_locallyconnected2d()[source]
create_lstm()[source]
create_masking()[source]
create_maxoutdense()[source]
create_maxpooling1d()[source]
create_maxpooling2d()[source]
create_maxpooling3d()[source]
create_merge()[source]
create_model()[source]
create_parametricsoftplus()[source]
create_permute()[source]
create_prelu()[source]
create_repeatvector()[source]
create_reshape()[source]
create_separableconvolution2d()[source]
create_sequential()[source]
create_simplernn()[source]
create_spatialdropout1d()[source]
create_spatialdropout2d()[source]
create_spatialdropout3d()[source]
create_srelu()[source]
create_thresholdedrelu()[source]
create_timedistributed()[source]
create_timedistributeddense()[source]
create_upsampling1d()[source]
create_upsampling2d()[source]
create_upsampling3d()[source]
create_zeropadding1d()[source]
create_zeropadding2d()[source]
create_zeropadding3d()[source]
fuse(src_blayer, activation)[source]
generate_convlstm2d_cell(klayer, kclayer, input_shape)[source]
generate_gru_cell(klayer, kclayer, input_shape)[source]
generate_lstm_cell(klayer, kclayer, input_shape)[source]
generate_simplernn_cell(klayer, kclayer, input_shape)[source]
get_bdim_order(dim='2D')[source]
get_value_from_init(kinit_method, shape)[source]
class bigdl.keras.converter.WeightLoader[source]
static load_weights_from_hdf5(kmodel, filepath, by_name=False)[source]

Loads all layer weights from a HDF5 save file. filepath can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system. If by_name is False (default) weights are loaded based on the network’s execution order topology, meaning layers in the execution seq should be exactly the same the architecture

If by_name is True, weights are loaded into layers only if they share the same name. This is useful for fine-tuning or transfer-learning models where some of the layers have changed.

static load_weights_from_json_hdf5(weights_hdf5, by_name=False)[source]

The file path can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.

static load_weights_from_kmodel(kmodel)[source]
class bigdl.keras.converter.WeightsConverter[source]

Convert keras weights to bigdl weights The shape of weights would be changed if using different backend, so we only test against TensorFlow backend. TODO: Support th backend as well.

static convert_atrousconvolution1d(weights)[source]
static convert_atrousconvolution2d(weights)[source]
static convert_batchnormalization(weights)[source]
static convert_bidirectional(weights)[source]
static convert_convlstm2d(weights)[source]
static convert_convolution1d(weights)[source]
static convert_convolution2d(weights)[source]
static convert_convolution3d(weights)[source]
static convert_deconvolution2d(weights)[source]
static convert_dense(weights)[source]
static convert_embedding(weights)[source]
static convert_gru(weights)[source]
static convert_highway(weights)[source]
static convert_locallyconnected1d(weights)[source]
static convert_locallyconnected2d(weights)[source]
static convert_lstm(weights)[source]
static convert_maxoutdense(weights)[source]
static convert_separableconvolution2d(weights)[source]
static convert_simplernn(weights)[source]
static convert_srelu(weights)[source]
static convert_timedistributed(weights)[source]
static convert_timedistributeddense(weights)[source]
static get_bigdl_weights_from_klayer()[source]
static get_converter()[source]
static get_weights_from_kmodel()[source]

Convert kmodel’s weights to bigdl format. We are supposing the order is the same as the execution order. :param kmodel: keras model :return: list of ndarray

static to_bigdl_weights(weights)[source]
bigdl.keras.converter.unsupport_exp(name)[source]

bigdl.keras.optimization module

class bigdl.keras.optimization.OptimConverter[source]
static to_bigdl_criterion()[source]
static to_bigdl_metrics()[source]
static to_bigdl_optim_method()[source]

Module contents