Full Code of MultiPath/CopyNet for AI

master dba24d58d505 cached
56 files
609.0 KB
153.7k tokens
579 symbols
1 requests
Download .txt
Showing preview only (633K chars total). Download the full file or copy to clipboard to get everything.
Repository: MultiPath/CopyNet
Branch: master
Commit: dba24d58d505
Files: 56
Total size: 609.0 KB

Directory structure:
gitextract_9j7ufkjh/

├── .idea/
│   └── vcs.xml
├── LICENSE
├── README.md
├── emolga/
│   ├── __init__.py
│   ├── basic/
│   │   ├── __init__.py
│   │   ├── activations.py
│   │   ├── initializations.py
│   │   ├── objectives.py
│   │   └── optimizers.py
│   ├── config.py
│   ├── config_variant.py
│   ├── dataset/
│   │   └── build_dataset.py
│   ├── layers/
│   │   ├── __init__.py
│   │   ├── attention.py
│   │   ├── core.py
│   │   ├── embeddings.py
│   │   ├── gridlstm.py
│   │   ├── ntm_minibatch.py
│   │   └── recurrent.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── core.py
│   │   ├── covc_encdec.py
│   │   ├── encdec.py
│   │   ├── ntm_encdec.py
│   │   ├── pointers.py
│   │   └── variational.py
│   ├── run.py
│   ├── test_lm.py
│   ├── test_nvtm.py
│   ├── test_run.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── generic_utils.py
│   │   ├── io_utils.py
│   │   ├── np_utils.py
│   │   ├── test_utils.py
│   │   └── theano_utils.py
│   └── voc.pkl
└── experiments/
    ├── __init__.py
    ├── bst_dataset.py
    ├── bst_vest.py
    ├── config.py
    ├── copynet.py
    ├── copynet_input.py
    ├── dataset.py
    ├── lcsts_dataset.py
    ├── lcsts_rouge.py
    ├── lcsts_sample.py
    ├── lcsts_test.py
    ├── lcsts_vest.py
    ├── lcsts_vest_new.py
    ├── movie_dataset.py
    ├── syn_vest.py
    ├── syntest.py
    ├── synthetic.py
    ├── weibo_dataset.py
    └── weibo_vest.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .idea/vcs.xml
================================================
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
  <component name="VcsDirectoryMappings">
    <mapping directory="$PROJECT_DIR$" vcs="Git" />
  </component>
</project>

================================================
FILE: LICENSE
================================================
MIT License

Copyright (c) 2016 Jiatao Gu

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: README.md
================================================
# CopyNet
incorporating copying mechanism in sequence-to-sequence learning


================================================
FILE: emolga/__init__.py
================================================
__author__ = 'yinpengcheng'


================================================
FILE: emolga/basic/__init__.py
================================================
__author__ = 'jiataogu'


================================================
FILE: emolga/basic/activations.py
================================================
import theano.tensor as T


def softmax(x):
    return T.nnet.softmax(x.reshape((-1, x.shape[-1]))).reshape(x.shape)


def vector_softmax(x):
    return T.nnet.softmax(x.reshape((1, x.shape[0])))[0]


def time_distributed_softmax(x):
    import warnings
    warnings.warn("time_distributed_softmax is deprecated. Just use softmax!", DeprecationWarning)
    return softmax(x)


def softplus(x):
    return T.nnet.softplus(x)


def relu(x):
    return T.nnet.relu(x)


def tanh(x):
    return T.tanh(x)


def sigmoid(x):
    return T.nnet.sigmoid(x)


def hard_sigmoid(x):
    return T.nnet.hard_sigmoid(x)


def linear(x):
    '''
    The function returns the variable that is passed in, so all types work
    '''
    return x


def maxout2(x):
    shape = x.shape
    if x.ndim == 1:
        shape1 = T.cast(shape[0] / 2, 'int64')
        shape2 = T.cast(2, 'int64')
        x = x.reshape([shape1, shape2])
        x = x.max(1)
    elif x.ndim == 2:
        shape1 = T.cast(shape[1] / 2, 'int64')
        shape2 = T.cast(2, 'int64')
        x = x.reshape([shape[0], shape1, shape2])
        x = x.max(2)
    elif x.ndim == 3:
        shape1 = T.cast(shape[2] / 2, 'int64')
        shape2 = T.cast(2, 'int64')
        x = x.reshape([shape[0], shape[1], shape1, shape2])
        x = x.max(3)
    return x


from emolga.utils.generic_utils import get_from_module


def get(identifier):
    return get_from_module(identifier, globals(), 'activation function')


================================================
FILE: emolga/basic/initializations.py
================================================
import theano
import theano.tensor as T
import numpy as np

from emolga.utils.theano_utils import sharedX, shared_zeros, shared_ones


def get_fans(shape):
    if isinstance(shape, int):
        shape = (1, shape)
    fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])
    fan_out = shape[1] if len(shape) == 2 else shape[0]
    return fan_in, fan_out


def uniform(shape, scale=0.1):
    return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))


def normal(shape, scale=0.05):
    return sharedX(np.random.randn(*shape) * scale)


def lecun_uniform(shape):
    ''' Reference: LeCun 98, Efficient Backprop
        http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
    '''
    fan_in, fan_out = get_fans(shape)
    scale = np.sqrt(3. / fan_in)
    return uniform(shape, scale)


def glorot_normal(shape):
    ''' Reference: Glorot & Bengio, AISTATS 2010
    '''
    fan_in, fan_out = get_fans(shape)
    s = np.sqrt(2. / (fan_in + fan_out))
    return normal(shape, s)


def glorot_uniform(shape):
    fan_in, fan_out = get_fans(shape)
    s = np.sqrt(6. / (fan_in + fan_out))
    return uniform(shape, s)


def he_normal(shape):
    ''' Reference:  He et al., http://arxiv.org/abs/1502.01852
    '''
    fan_in, fan_out = get_fans(shape)
    s = np.sqrt(2. / fan_in)
    return normal(shape, s)


def he_uniform(shape):
    fan_in, fan_out = get_fans(shape)
    s = np.sqrt(6. / fan_in)
    return uniform(shape, s)


def orthogonal(shape, scale=1.1):
    ''' From Lasagne
    '''
    flat_shape = (shape[0], np.prod(shape[1:]))
    a = np.random.normal(0.0, 1.0, flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    # pick the one with the correct shape
    q = u if u.shape == flat_shape else v
    q = q.reshape(shape)
    return sharedX(scale * q[:shape[0], :shape[1]])


def identity(shape, scale=1):
    if len(shape) != 2 or shape[0] != shape[1]:
        raise Exception("Identity matrix initialization can only be used for 2D square matrices")
    else:
        return sharedX(scale * np.identity(shape[0]))


def zero(shape):
    return shared_zeros(shape)


def one(shape):
    return shared_ones(shape)

from emolga.utils.generic_utils import get_from_module
def get(identifier):
    return get_from_module(identifier, globals(), 'initialization')


================================================
FILE: emolga/basic/objectives.py
================================================
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from six.moves import range

if theano.config.floatX == 'float64':
    epsilon = 1.0e-9
else:
    epsilon = 1.0e-7


def mean_squared_error(y_true, y_pred):
    return T.sqr(y_pred - y_true).mean(axis=-1)


def mean_absolute_error(y_true, y_pred):
    return T.abs_(y_pred - y_true).mean(axis=-1)


def mean_absolute_percentage_error(y_true, y_pred):
    return T.abs_((y_true - y_pred) / T.clip(T.abs_(y_true), epsilon, np.inf)).mean(axis=-1) * 100.


def mean_squared_logarithmic_error(y_true, y_pred):
    return T.sqr(T.log(T.clip(y_pred, epsilon, np.inf) + 1.) - T.log(T.clip(y_true, epsilon, np.inf) + 1.)).mean(axis=-1)


def squared_hinge(y_true, y_pred):
    return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean(axis=-1)


def hinge(y_true, y_pred):
    return T.maximum(1. - y_true * y_pred, 0.).mean(axis=-1)


def categorical_crossentropy(y_true, y_pred):
    '''Expects a binary class matrix instead of a vector of scalar classes
    '''
    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
    # scale preds so that the class probas of each sample sum to 1
    y_pred /= y_pred.sum(axis=-1, keepdims=True)
    cce = T.nnet.categorical_crossentropy(y_pred, y_true)
    return cce


def binary_crossentropy(y_true, y_pred):
    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
    bce = T.nnet.binary_crossentropy(y_pred, y_true).mean(axis=-1)
    return bce


def poisson_loss(y_true, y_pred):
    return T.mean(y_pred - y_true * T.log(y_pred + epsilon), axis=-1)

####################################################
# Variational Auto-encoder

def gaussian_kl_divergence(mean, ln_var):
    """Computes the KL-divergence of Gaussian variables from the standard one.

    Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
    representing :math:`\\log(\\sigma^2)`, this function returns a variable
    representing the KL-divergence between the given multi-dimensional Gaussian
    :math:`N(\\mu, S)` and the standard Gaussian :math:`N(0, I)`

    .. math::

       D_{\\mathbf{KL}}(N(\\mu, S) \\| N(0, I)),

    where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\sigma_i^2`
    and :math:`I` is an identity matrix.

    Args:
        mean (~chainer.Variable): A variable representing mean of given
            gaussian distribution, :math:`\\mu`.
        ln_var (~chainer.Variable): A variable representing logarithm of
            variance of given gaussian distribution, :math:`\\log(\\sigma^2)`.

    Returns:
        ~chainer.Variable: A variable representing KL-divergence between
            given gaussian distribution and the standard gaussian.

    """
    var = T.exp(ln_var)
    return  0.5 * T.sum(mean * mean + var - ln_var - 1, 1)


# aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
gkl = GKL = gaussian_kl_divergence

from emolga.utils.generic_utils import get_from_module
def get(identifier):
    return get_from_module(identifier, globals(), 'objective')


================================================
FILE: emolga/basic/optimizers.py
================================================
from __future__ import absolute_import
import theano
import sys

from theano.sandbox.rng_mrg import MRG_RandomStreams
import theano.tensor as T
import logging

from emolga.utils.theano_utils import shared_zeros, shared_scalar, floatX
from emolga.utils.generic_utils import get_from_module
from six.moves import zip
from copy import copy, deepcopy

logger = logging.getLogger(__name__)


def clip_norm(g, c, n):
    if c > 0:
        g = T.switch(T.ge(n, c), g * c / n, g)
    return g


def kl_divergence(p, p_hat):
    return p_hat - p + p * T.log(p / p_hat)


class Optimizer(object):
    def __init__(self, **kwargs):
        self.__dict__.update(kwargs)
        self.updates   = []
        self.save_parm = []

    def add(self, v):
        self.save_parm += [v]

    def get_state(self):
        return [u[0].get_value() for u in self.updates]

    def set_state(self, value_list):
        assert len(self.updates) == len(value_list)
        for u, v in zip(self.updates, value_list):
            u[0].set_value(floatX(v))

    def get_updates(self, params, loss):
        raise NotImplementedError

    def get_gradients(self, loss, params):
        """
        Consider the situation that gradient is weighted.
        """
        if isinstance(loss, list):
            grads = T.grad(loss[0], params, consider_constant=loss[1:])  # gradient of loss
        else:
            grads = T.grad(loss, params)

        if hasattr(self, 'clipnorm') and self.clipnorm > 0:
            print 'use gradient clipping!!'
            norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
            grads = [clip_norm(g, self.clipnorm, norm) for g in grads]

        return grads

    def get_config(self):
        return {"name": self.__class__.__name__}


class SGD(Optimizer):

    def __init__(self, lr=0.05, momentum=0.9, decay=0.01, nesterov=True, *args, **kwargs):
        super(SGD, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.iterations = shared_scalar(0)
        self.lr = shared_scalar(lr)
        self.momentum = shared_scalar(momentum)

    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))
        self.updates = [(self.iterations, self.iterations + 1.)]

        for p, g in zip(params, grads):
            m = shared_zeros(p.get_value().shape)  # momentum
            v = self.momentum * m - lr * g  # velocity
            self.updates.append((m, v))

            if self.nesterov:
                new_p = p + self.momentum * v - lr * g
            else:
                new_p = p + v

            self.updates.append((p, new_p))  # apply constraints
        return self.updates

    def get_config(self):
        return {"name": self.__class__.__name__,
                "lr": float(self.lr.get_value()),
                "momentum": float(self.momentum.get_value()),
                "decay": float(self.decay.get_value()),
                "nesterov": self.nesterov}


class RMSprop(Optimizer):
    def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
        super(RMSprop, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.lr = shared_scalar(lr)
        self.rho = shared_scalar(rho)
        self.iterations = shared_scalar(0)

    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        accumulators = [shared_zeros(p.get_value().shape) for p in params]
        self.updates = [(self.iterations, self.iterations + 1.)]

        for p, g, a in zip(params, grads, accumulators):
            new_a = self.rho * a + (1 - self.rho) * g ** 2  # update accumulator
            self.updates.append((a, new_a))

            new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
            self.updates.append((p, new_p))  # apply constraints
        return self.updates

    def get_config(self):
        return {"name": self.__class__.__name__,
                "lr": float(self.lr.get_value()),
                "rho": float(self.rho.get_value()),
                "epsilon": self.epsilon}


class Adagrad(Optimizer):
    def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
        super(Adagrad, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.lr = shared_scalar(lr)

    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        accumulators = [shared_zeros(p.get_value().shape) for p in params]
        self.updates = []

        for p, g, a, c in zip(params, grads, accumulators, constraints):
            new_a = a + g ** 2  # update accumulator
            self.updates.append((a, new_a))
            new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)
            self.updates.append((p, c(new_p)))  # apply constraints
        return self.updates

    def get_config(self):
        return {"name": self.__class__.__name__,
                "lr": float(self.lr.get_value()),
                "epsilon": self.epsilon}


class Adadelta(Optimizer):
    '''
        Reference: http://arxiv.org/abs/1212.5701
    '''
    def __init__(self, lr=0.1, rho=0.95, epsilon=1e-6, *args, **kwargs):
        super(Adadelta, self).__init__(**kwargs)
        self.__dict__.update(locals())
        self.lr = shared_scalar(lr)
        self.iterations = shared_scalar(0)

    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        accumulators = [shared_zeros(p.get_value().shape) for p in params]
        delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]
        # self.updates = []
        self.updates = [(self.iterations, self.iterations + 1.)]

        for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
            new_a = self.rho * a + (1 - self.rho) * g ** 2  # update accumulator
            self.updates.append((a, new_a))

            # use the new accumulator and the *old* delta_accumulator
            update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a +
                                                             self.epsilon)

            new_p = p - self.lr * update
            self.updates.append((p, new_p))

            # update delta_accumulator
            new_d_a = self.rho * d_a + (1 - self.rho) * update ** 2
            self.updates.append((d_a, new_d_a))
        return self.updates

    def get_config(self):
        return {"name": self.__class__.__name__,
                "lr": float(self.lr.get_value()),
                "rho": self.rho,
                "epsilon": self.epsilon}


class Adam(Optimizer):  # new Adam is designed for our purpose.
    '''
        Reference: http://arxiv.org/abs/1412.6980v8

        Default parameters follow those provided in the original paper.
        We add Gaussian Noise to improve the performance.
    '''
    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, save=False, rng=None, *args, **kwargs):
        super(Adam, self).__init__(**kwargs)
        self.__dict__.update(locals())
        print locals()

        self.iterations = shared_scalar(0,  name='iteration')
        self.lr         = shared_scalar(lr, name='lr')
        self.rng        = MRG_RandomStreams(use_cuda=True)
        self.noise      = []
        self.forget     = dict()
        self.rng        = rng

        self.add(self.iterations)
        self.add(self.lr)

    def add_noise(self, param):
        if param.name not in self.noise:
            logger.info('add gradient noise to {}'.format(param))
            self.noise += [param.name]

    def add_forget(self, param):
        if param.name not in self.forget:
            logger.info('add forgetting list to {}'.format(param))
            self.forget[param.name] = theano.shared(param.get_value())

    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations + 1.)]
        self.pu = []

        t = self.iterations + 1
        lr_t = self.lr * T.sqrt(1 - self.beta_2**t) / (1 - self.beta_1**t)
        for p, g in zip(params, grads):
            m = theano.shared(p.get_value() * 0., name=p.name + '_m')  # zero init of moment
            v = theano.shared(p.get_value() * 0., name=p.name + '_v')  # zero init of velocity

            self.add(m)
            self.add(v)

            # g_noise = self.rng.normal(g.shape, 0, T.sqrt(0.005 * t ** (-0.55)), dtype='float32')

            # if p.name in self.noise:
            #     g_deviated = g + g_noise
            # else:
            #     g_deviated = g

            g_deviated = g  #  + g_noise
            m_t = (self.beta_1 * m) + (1 - self.beta_1) * g_deviated
            v_t = (self.beta_2 * v) + (1 - self.beta_2) * (g_deviated**2)
            u_t = -lr_t * m_t / (T.sqrt(v_t) + self.epsilon)
            p_t = p + u_t

            # # memory reformatting!
            # if p.name in self.forget:
            #     p_t = (1 - p_mem) * p_t + p_mem * self.forget[p.name]
            #     p_s = (1 - p_fgt) * p_t + p_fgt * self.forget[p.name]
            #     self.updates.append((self.forget[p.name], p_s))

            self.updates.append((m, m_t))
            self.updates.append((v, v_t))
            self.updates.append((p, p_t))  # apply constraints
            self.pu.append((p, p_t - p))

        if self.save:
            return self.updates, self.pu
        return self.updates

# aliases
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam


def get(identifier, kwargs=None):
    return get_from_module(identifier, globals(), 'optimizer', instantiate=True,
                           kwargs=kwargs)


================================================
FILE: emolga/config.py
================================================
__author__ = 'jiataogu'
import os
import os.path as path

def setup_ptb2():
    # pretraining setting up.
    # get the lm_config.

    config = dict()
    config['on_unused_input'] = 'ignore'
    config['seed']            = 3030029828
    config['level']           = 'DEBUG'

    # config['model']           = 'RNNLM'
    # config['model']           = 'VAE'
    # config['model']           = 'RNNLM' #'Helmholtz'
    config['model']           = 'HarX'
    config['highway']         = False
    config['use_noise']       = False

    config['optimizer']       = 'adam'  #'adadelta'
    # config['lr']              = 0.1

    # config['optimizer']       = 'sgd'

    # dataset
    config['path']            = path.realpath(path.curdir) + '/'  # '/home/thoma/Work/Dial-DRL/'
    config['vocabulary_set']  = config['path'] + 'dataset/ptbcorpus/voc.pkl'
    config['dataset']         = config['path'] + 'dataset/ptbcorpus/data_train.pkl'
    config['dataset_valid']   = config['path'] + 'dataset/ptbcorpus/data_valid.pkl'
    config['dataset_test']    = config['path'] + 'dataset/ptbcorpus/data_test.pkl'
    # output hdf5 file place.
    config['path_h5']         = config['path'] + 'H5'
    if not os.path.exists(config['path_h5']):
        os.mkdir(config['path_h5'])

    # output log place
    config['path_log']        = config['path'] + 'Logs'
    if not os.path.exists(config['path_log']):
        os.mkdir(config['path_log'])

    # size
    config['batch_size']      = 20
    config['eval_batch_size'] = 20
    config['mode']            = 'RNN'  # NTM
    config['binary']          = False

    # Encoder: dimension
    config['enc_embedd_dim']  = 300
    config['enc_hidden_dim']  = 300
    config['enc_contxt_dim']  = 350
    config['encoder']         = 'RNN'
    config['pooling']         = False

    # Encoder: Model
    config['bidirectional']   = False  # True
    config['decposterior']    = True
    config['enc_use_contxt']  = False

    # Agent: dimension
    config['action_dim']      = 50
    config['output_dim']      = 300

    # Decoder: dimension
    config['dec_embedd_dim']  = 300
    config['dec_hidden_dim']  = 300
    config['dec_contxt_dim']  = 300

    # Decoder: Model
    config['shared_embed']    = False
    config['use_input']       = False
    config['bias_code']       = False   # True
    config['dec_use_contxt']  = True
    config['deep_out']        = False
    config['deep_out_activ']  = 'tanh'  # maxout2
    config['bigram_predict']  = False
    config['context_predict'] = True    # False
    config['leaky_predict']   = False   # True
    config['dropout']         = 0.3

    # Decoder: sampling
    config['max_len']         = 88  # 15
    config['sample_beam']     = 10
    config['sample_stoch']    = False
    config['sample_argmax']   = False

    # Auto-Encoder
    config['nonlinear_A']     = True
    config['nonlinear_B']     = False

    # VAE/Helmholtz: Model
    config['repeats']         = 10
    config['eval_repeats']    = 10
    config['eval_N']          = 10

    config['variant_control'] = False
    config['factor']          = 10.
    config['mult_q']          = 10.

    print 'setup ok.'
    return config




================================================
FILE: emolga/config_variant.py
================================================
__author__ = 'jiataogu'
from config import setup_ptb2
setup = setup_ptb2

"""
This file is for small variant fix on original
"""


def setup_bienc(config=None):
    if config is None:
        config = setup()
    print 'make some modification'

    config['bidirectional'] = True
    config['decposterior']  = False
    return config


def setup_dim(config=None):
    if config is None:
        config = setup()
    print 'make some modification'

    config['enc_embedd_dim'] = 300
    config['enc_hidden_dim'] = 300
    config['action_dim']     = 100

    config['dec_embedd_dim'] = 300
    config['dec_hidden_dim'] = 300
    config['dec_contxt_dim'] = 300
    return config


def setup_rep(config=None):
    if config is None:
        config = setup()
    print 'make some modification'

    config['repeats']        = 5
    return config


def setup_opt(config=None):
    if config is None:
        config = setup()
    print 'make some modification'

    config['optimizer']      = 'Adam'
    return config

================================================
FILE: emolga/dataset/build_dataset.py
================================================
__author__ = 'jiataogu'
import numpy as np
import numpy.random as rng
import cPickle
import pprint
import sys

from collections import OrderedDict
from fuel import datasets
from fuel import transformers
from fuel import schemes
from fuel import streams


def serialize_to_file(obj, path, protocol=cPickle.HIGHEST_PROTOCOL):
    f = open(path, 'wb')
    cPickle.dump(obj, f, protocol=protocol)
    f.close()


def show_txt(array, path):
    f = open(path, 'w')
    for line in array:
        f.write(' '.join(line) + '\n')

    f.close()


def divide_dataset(dataset, test_size, max_size):
    train_set = dict()
    test_set  = dict()

    for w in dataset:
        train_set[w] = dataset[w][test_size:max_size].astype('int32')
        test_set[w]  = dataset[w][:test_size].astype('int32')

    return train_set, test_set


def deserialize_from_file(path):
    f = open(path, 'rb')
    obj = cPickle.load(f)
    f.close()
    return obj


def build_fuel(data):
    # create fuel dataset.
    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('data', data)]))
    dataset.example_iteration_scheme \
                = schemes.ShuffledExampleScheme(dataset.num_examples)
    return dataset, len(data)


def obtain_stream(dataset, batch_size, size=1):
    if size == 1:
        data_stream = dataset.get_example_stream()
        data_stream = transformers.Batch(data_stream, iteration_scheme=schemes.ConstantScheme(batch_size))

        # add padding and masks to the dataset
        data_stream = transformers.Padding(data_stream, mask_sources=('data'))
        return data_stream
    else:
        data_streams = [dataset.get_example_stream() for _ in xrange(size)]
        data_streams = [transformers.Batch(data_stream, iteration_scheme=schemes.ConstantScheme(batch_size))
                        for data_stream in data_streams]
        data_streams = [transformers.Padding(data_stream, mask_sources=('data')) for data_stream in data_streams]
        return data_streams

def build_ptb():
    path = './ptbcorpus/'
    print path
    # make the dataset and vocabulary
    X_train = [l.split() for l in open(path + 'ptb.train.txt').readlines()]
    X_test  = [l.split() for l in open(path + 'ptb.test.txt').readlines()]
    X_valid = [l.split() for l in open(path + 'ptb.valid.txt').readlines()]

    X = X_train + X_test + X_valid
    idx2word    = dict(enumerate(set([w for l in X for w in l]), 1))
    idx2word[0] = '<eol>'
    word2idx    = {v: k for k, v in idx2word.items()}
    ixwords_train = [[word2idx[w] for w in l] for l in X_train]
    ixwords_test  = [[word2idx[w] for w in l] for l in X_test]
    ixwords_valid = [[word2idx[w] for w in l] for l in X_valid]
    ixwords_tv    = [[word2idx[w] for w in l] for l in (X_train + X_valid)]

    max_len = max([len(w) for w in X_train])
    print max_len
    # serialization:
    # serialize_to_file(ixwords_train, path + 'data_train.pkl')
    # serialize_to_file(ixwords_test,  path + 'data_test.pkl')
    # serialize_to_file(ixwords_valid, path + 'data_valid.pkl')
    # serialize_to_file(ixwords_tv,    path + 'data_tv.pkl')
    # serialize_to_file([idx2word, word2idx], path + 'voc.pkl')
    # show_txt(X, 'data.txt')
    print 'save done.'


def filter_unk(X, min_freq=5):
    voc = dict()
    for l in X:
        for w in l:
            if w not in voc:
                voc[w]  = 1
            else:
                voc[w] += 1

    word2idx   = dict()
    word2idx['<eol>'] = 0
    id2word    = dict()
    id2word[0] = '<eol>'

    at         = 1
    for w in voc:
        if voc[w] > min_freq:
            word2idx[w] = at
            id2word[at] = w
            at += 1

    word2idx['<unk>'] = at
    id2word[at] = '<unk>'
    return word2idx, id2word


def build_msr():
    # path = '/home/thoma/Work/Dial-DRL/dataset/MSRSCC/'
    path = '/Users/jiataogu/Work/Dial-DRL/dataset/MSRSCC/'
    print path

    X           = [l.split() for l in open(path + 'train.txt').readlines()]
    word2idx, idx2word = filter_unk(X, min_freq=5)
    print 'vocabulary size={0}. {1} samples'.format(len(word2idx), len(X))

    mean_len = np.mean([len(w) for w in X])
    print 'mean len = {}'.format(mean_len)

    ixwords     = [[word2idx[w]
                    if w in word2idx
                    else word2idx['<unk>']
                    for w in l] for l in X]
    print ixwords[0]
    # serialization:
    serialize_to_file(ixwords, path + 'data_train.pkl')


if __name__ == '__main__':
    build_msr()
    # build_ptb()
    # build_dataset()
    # game = GuessOrder(size=8)
    # q = 'Is there any number smaller de than 6 in the last 3 numbers ?'
    # print game.easy_parse(q)



================================================
FILE: emolga/layers/__init__.py
================================================
__author__ = 'yinpengcheng'


================================================
FILE: emolga/layers/attention.py
================================================
__author__ = 'jiataogu'
from .core import *
"""
Attention Model.
    <::: Two kinds of attention models ::::>
    -- Linear Transformation
    -- Inner Product
"""


class Attention(Layer):
    def __init__(self, target_dim, source_dim, hidden_dim,
                 init='glorot_uniform', name='attention',
                 coverage=False, max_len=50,
                 shared=False):

        super(Attention, self).__init__()
        self.init       = initializations.get(init)
        self.softmax    = activations.get('softmax')
        self.tanh       = activations.get('tanh')
        self.target_dim = target_dim
        self.source_dim = source_dim
        self.hidden_dim = hidden_dim
        self.max_len    = max_len
        self.coverage   = coverage

        if coverage:
            print 'Use Coverage Trick!'

        self.Wa         = self.init((self.target_dim, self.hidden_dim))
        self.Ua         = self.init((self.source_dim, self.hidden_dim))
        self.va         = self.init((self.hidden_dim, 1))

        self.Wa.name, self.Ua.name, self.va.name = \
                '{}_Wa'.format(name), '{}_Ua'.format(name), '{}_va'.format(name)
        self.params     = [self.Wa, self.Ua, self.va]
        if coverage:
            self.Ca      = self.init((1, self.hidden_dim))
            self.Ca.name = '{}_Ca'.format(name)
            self.params += [self.Ca]

    def __call__(self, X, S,
                 Smask=None,
                 return_log=False,
                 Cov=None):
        assert X.ndim + 1 == S.ndim, 'source should be one more dimension than target.'
        # X is the key:    (nb_samples, x_dim)
        # S is the source  (nb_samples, maxlen_s, ctx_dim)
        # Cov is the coverage vector (nb_samples, maxlen_s)

        if X.ndim == 1:
            X = X[None, :]
            S = S[None, :, :]
            if not Smask:
                Smask = Smask[None, :]

        Eng   = dot(X[:, None, :], self.Wa) + dot(S, self.Ua)  # (nb_samples, source_num, hidden_dims)
        Eng   = self.tanh(Eng)
        # location aware:
        if self.coverage:
            Eng += dot(Cov[:, :, None], self.Ca)  # (nb_samples, source_num, hidden_dims)

        Eng   = dot(Eng, self.va)
        Eng   = Eng[:, :, 0]                      # ? (nb_samples, source_num)

        if Smask is not None:
            # I want to use mask!
            EngSum = logSumExp(Eng, axis=1, mask=Smask)
            if return_log:
                return (Eng - EngSum) * Smask
            else:
                return T.exp(Eng - EngSum) * Smask
        else:
            if return_log:
                return T.log(self.softmax(Eng))
            else:
                return self.softmax(Eng)


class CosineAttention(Layer):
    def __init__(self, target_dim, source_dim,
                 init='glorot_uniform',
                 use_pipe=True,
                 name='attention'):

        super(CosineAttention, self).__init__()
        self.init       = initializations.get(init)
        self.softmax    = activations.get('softmax')
        self.softplus   = activations.get('softplus')
        self.tanh       = activations.get('tanh')
        self.use_pipe   = use_pipe

        self.target_dim = target_dim
        self.source_dim = source_dim

        # pipe
        if self.use_pipe:
            self.W_key  = Dense(self.target_dim, self.source_dim, name='W_key')
        else:
            assert target_dim == source_dim
            self.W_key  = Identity(name='W_key')
        self._add(self.W_key)

        # sharpen
        # self.W_beta     = Dense(self.target_dim, 1, name='W_beta')
        # dio-sharpen
        # self.W_beta     = Dense(self.target_dim, self.source_dim, name='W_beta')
        # self._add(self.W_beta)

        # self.gamma      = self.init((source_dim, ))
        # self.gamma      = self.init((target_dim, source_dim))
        # self.gamma.name = 'o_gamma'
        # self.params    += [self.gamma]

    def __call__(self, X, S, Smask=None, return_log=False):
        assert X.ndim + 1 == S.ndim, 'source should be one more dimension than target.'

        if X.ndim == 1:
            X = X[None, :]
            S = S[None, :, :]
            if not Smask:
                Smask = Smask[None, :]

        key   = self.W_key(X)                   # (nb_samples, source_dim)
        # beta  = self.softplus(self.W_beta(X))   # (nb_samples, source_dim)

        Eng   = dot_2d(key, S)  #, g=self.gamma)
        # Eng   = cosine_sim2d(key, S)  # (nb_samples, source_num)
        # Eng   = T.repeat(beta, Eng.shape[1], axis=1) * Eng

        if Smask is not None:
            # I want to use mask!
            EngSum = logSumExp(Eng, axis=1, mask=Smask)
            if return_log:
                return (Eng - EngSum) * Smask
            else:
                return T.exp(Eng - EngSum) * Smask
        else:
            if return_log:
                return T.log(self.softmax(Eng))
            else:
                return self.softmax(Eng)



================================================
FILE: emolga/layers/core.py
================================================
# -*- coding: utf-8 -*-

from emolga.utils.theano_utils import *
import emolga.basic.initializations as initializations
import emolga.basic.activations as activations


class Layer(object):
    def __init__(self):
        self.params  = []
        self.layers  = []
        self.monitor = {}
        self.watchlist = []

    def init_updates(self):
        self.updates = []

    def _monitoring(self):
        # add monitoring variables
        for l in self.layers:
            for v in l.monitor:
                name = v + '@' + l.name
                print name
                self.monitor[name] = l.monitor[v]

    def __call__(self, X, *args, **kwargs):
        return X

    def _add(self, layer):
        if layer:
            self.layers.append(layer)
            self.params += layer.params

    def supports_masked_input(self):
        ''' Whether or not this layer respects the output mask of its previous layer in its calculations. If you try
        to attach a layer that does *not* support masked_input to a layer that gives a non-None output_mask() that is
        an error'''
        return False

    def get_output_mask(self, train=None):
        '''
        For some models (such as RNNs) you want a way of being able to mark some output data-points as
        "masked", so they are not used in future calculations. In such a model, get_output_mask() should return a mask
        of one less dimension than get_output() (so if get_output is (nb_samples, nb_timesteps, nb_dimensions), then the mask
        is (nb_samples, nb_timesteps), with a one for every unmasked datapoint, and a zero for every masked one.

        If there is *no* masking then it shall return None. For instance if you attach an Activation layer (they support masking)
        to a layer with an output_mask, then that Activation shall also have an output_mask. If you attach it to a layer with no
        such mask, then the Activation's get_output_mask shall return None.

        Some emolga have an output_mask even if their input is unmasked, notably Embedding which can turn the entry "0" into
        a mask.
        '''
        return None

    def set_weights(self, weights):
        for p, w in zip(self.params, weights):
            if p.eval().shape != w.shape:
                raise Exception("Layer shape %s not compatible with weight shape %s." % (p.eval().shape, w.shape))
            p.set_value(floatX(w))

    def get_weights(self):
        weights = []
        for p in self.params:
            weights.append(p.get_value())
        return weights

    def get_params(self):
        return self.params

    def set_name(self, name):
        for i in range(len(self.params)):
            if self.params[i].name is None:
                self.params[i].name = '%s_p%d' % (name, i)
            else:
                self.params[i].name = name + '_' + self.params[i].name
        self.name = name


class MaskedLayer(Layer):
    '''
    If your layer trivially supports masking (by simply copying the input mask to the output), then subclass MaskedLayer
    instead of Layer, and make sure that you incorporate the input mask into your calculation of get_output()
    '''
    def supports_masked_input(self):
        return True


class Identity(Layer):
    def __init__(self, name='Identity'):
        super(Identity, self).__init__()
        if name is not None:
            self.set_name(name)

    def __call__(self, X):
        return X


class Dense(Layer):
    def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='tanh', name='Dense',
                 learn_bias=True, negative_bias=False):

        super(Dense, self).__init__()
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.linear = (activation == 'linear')

        # self.input = T.matrix()
        self.W = self.init((self.input_dim, self.output_dim))
        if not negative_bias:
            self.b = shared_zeros((self.output_dim))
        else:
            self.b = shared_ones((self.output_dim))

        self.learn_bias = learn_bias
        if self.learn_bias:
            self.params = [self.W, self.b]
        else:
            self.params = [self.W]

        if name is not None:
            self.set_name(name)

    def set_name(self, name):
        self.W.name = '%s_W' % name
        self.b.name = '%s_b' % name

    def __call__(self, X):
        output = self.activation(T.dot(X, self.W) + 4. * self.b)
        return output

    def reverse(self, Y):
        assert self.linear

        output = T.dot((Y - self.b), self.W.T)
        return output


class Dense2(Layer):
    def __init__(self, input_dim1, input_dim2, output_dim, init='glorot_uniform', activation='tanh', name='Dense', learn_bias=True):

        super(Dense2, self).__init__()
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim1 = input_dim1
        self.input_dim2 = input_dim2
        self.output_dim = output_dim
        self.linear = (activation == 'linear')

        # self.input = T.matrix()

        self.W1 = self.init((self.input_dim1, self.output_dim))
        self.W2 = self.init((self.input_dim2, self.output_dim))
        self.b  = shared_zeros((self.output_dim))

        self.learn_bias = learn_bias
        if self.learn_bias:
            self.params = [self.W1, self.W2, self.b]
        else:
            self.params = [self.W1, self.W2]

        if name is not None:
            self.set_name(name)

    def set_name(self, name):
        self.W1.name = '%s_W1' % name
        self.W2.name = '%s_W2' % name
        self.b.name = '%s_b' % name

    def __call__(self, X1, X2):
        output = self.activation(T.dot(X1, self.W1) + T.dot(X2, self.W2) + self.b)
        return output


class Constant(Layer):
    def __init__(self, input_dim, output_dim, init=None, activation='tanh', name='Bias'):

        super(Constant, self).__init__()
        assert input_dim == output_dim, 'Bias Layer needs to have the same input/output nodes.'

        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.b = shared_zeros(self.output_dim)
        self.params = [self.b]

        if name is not None:
            self.set_name(name)

    def set_name(self, name):
        self.b.name = '%s_b' % name

    def __call__(self, X=None):
        output = self.activation(self.b)
        if X:
            L = X.shape[0]
            output = T.extra_ops.repeat(output[None, :], L, axis=0)
        return output


class MemoryLinear(Layer):
    def __init__(self, input_dim, input_wdth, init='glorot_uniform',
                 activation='tanh', name='Bias', has_input=True):
        super(MemoryLinear, self).__init__()

        self.init       = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim  = input_dim
        self.input_wdth = input_wdth

        self.b = self.init((self.input_dim, self.input_wdth))
        self.params = [self.b]

        if has_input:
            self.P = self.init((self.input_dim, self.input_wdth))
            self.params += [self.P]

        if name is not None:
            self.set_name(name)

    def __call__(self, X=None):
        out = self.b[None, :, :]
        if X:
            out += self.P[None, :, :] * X
        return self.activation(out)


class Dropout(MaskedLayer):
    """
        Hinton's dropout.
    """
    def __init__(self, rng=None, p=1., name=None):
        super(Dropout, self).__init__()
        self.p   = p
        self.rng = rng

    def __call__(self, X, train=True):
        if self.p > 0.:
            retain_prob = 1. - self.p
            if train:
                X *= self.rng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
            else:
                X *= retain_prob
        return X


class Activation(MaskedLayer):
    """
        Apply an activation function to an output.
    """
    def __init__(self, activation):
        super(Activation, self).__init__()
        self.activation = activations.get(activation)

    def __call__(self, X):
        return self.activation(X)



================================================
FILE: emolga/layers/embeddings.py
================================================
# -*- coding: utf-8 -*-

from .core import Layer
from emolga.utils.theano_utils import *
import emolga.basic.initializations as initializations


class Embedding(Layer):
    '''
        Turn positive integers (indexes) into denses vectors of fixed size.
        eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]

        @input_dim: size of vocabulary (highest input integer + 1)
        @out_dim: size of dense representation
    '''

    def __init__(self, input_dim, output_dim, init='uniform', name=None):

        super(Embedding, self).__init__()
        self.init = initializations.get(init)
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.W = self.init((self.input_dim, self.output_dim))

        self.params = [self.W]

        if name is not None:
            self.set_name(name)

    def get_output_mask(self, X):
        return T.ones_like(X) * (1 - T.eq(X, 0))

    def __call__(self, X, mask_zero=False, context=None):
        if context is None:
            out = self.W[X]
        else:
            assert context.ndim == 3
            flag  = False
            if X.ndim == 1:
                flag = True
                X = X[:, None]

            b_size = context.shape[0]

            EMB = T.repeat(self.W[None, :, :], b_size, axis=0)
            EMB = T.concatenate([EMB, context], axis=1)

            m_size = EMB.shape[1]
            e_size = EMB.shape[2]
            maxlen = X.shape[1]

            EMB = EMB.reshape((b_size * m_size, e_size))
            Z   = (T.arange(b_size)[:, None] * m_size + X).reshape((b_size * maxlen,))
            out = EMB[Z]  # (b_size * maxlen, e_size)

            if not flag:
                out = out.reshape((b_size, maxlen, e_size))
            else:
                out = out.reshape((b_size, e_size))

        if mask_zero:
            return out, T.cast(self.get_output_mask(X), dtype='float32')
        else:
            return out


class Zero(Layer):
    def __call__(self, X):
        out = T.zeros(X.shape)
        return out


class Bias(Layer):
    def __call__(self, X):
        tmp = X.flatten()
        tmp = tmp.dimshuffle(0, 'x')
        return tmp


================================================
FILE: emolga/layers/gridlstm.py
================================================
__author__ = 'jiataogu'
"""
The file is the implementation of Grid-LSTM
In this stage we only support 2D LSTM with Pooling.
"""
from recurrent import *
from attention import Attention
import logging
import copy
logger = logging.getLogger(__name__)


class Grid(Recurrent):
    """
    Grid Cell for Grid-LSTM
    ===================================================
    LSTM
            [h', m'] = LSTM(x, h, m):
                gi = sigmoid(Wi * x + Ui * h + Vi * m)  # Vi is peep-hole
                gf = sigmoid(Wf * x + Uf * h + Vf * m)
                go = sigmoid(Wo * x + Uo * h + Vo * m)
                gc = tanh(Wc * x +Uc * h)

                m' = gf @ m + gi @ gc  (@ represents element-wise dot.)
                h' = go @ tanh(m')

    ===================================================
    Grid
    (here is an example for 2D Grid LSTM with priority dimension = 1)
     -------------
    |    c'  d'   |     Grid Block and Grid Updates.
    | a         a'|
    |             |     [d' c'] = LSTM_d([b, d],  c)
    | b         b'|     [a' b'] = LSTM_t([b, d'], a)
    |    c   d    |
     -------------
    ===================================================
    Details please refer to:
        "Grid Long Short-Term Memory", http://arxiv.org/abs/1507.01526
    """
    def __init__(self,
                 output_dims,
                 input_dims,    # [0, ... 0], 0 represents no external inputs.
                 priority=1,
                 peephole=True,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh', inner_activation='sigmoid',
                 use_input=False,
                 name=None, weights=None,
                 identity_connect=None
                 ):
        super(Grid, self).__init__()

        # assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'

        """
        Initialization.
        """
        self.input_dims       = input_dims
        self.output_dims      = output_dims
        self.N                = len(output_dims)
        self.priority         = priority
        self.peephole         = peephole
        self.use_input        = use_input

        self.init             = initializations.get(init)
        self.inner_init       = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation       = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)

        self.identity_connect = identity_connect
        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!

        """
        Others info.
        """
        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def build(self):
        """
        Build the model weights
        """
        logger.info("Building GridPool-LSTM !!")
        self.W = dict()
        self.U = dict()
        self.V = dict()
        self.b = dict()

        # ******************************************************************************************
        for k in xrange(self.N):       # N-Grids (for 2 dimensions, 0 is for time; 1 is for depth.)
            axis  = self.axies[k]
            # input layers:
            if self.input_dims[k] > 0 and self.use_input:
                # use the data information.
                self.W[axis + '#i'], self.W[axis + '#f'], \
                self.W[axis + '#o'], self.W[axis + '#c']  \
                      = [self.init((self.input_dims[k], self.output_dims[k])) for _ in xrange(4)]

            # hidden layers:
            for j in xrange(self.N):   # every hidden states inputs.
                pos   = self.axies[j]
                if k == j:
                    self.U[axis + pos + '#i'], self.U[axis + pos + '#f'], \
                    self.U[axis + pos + '#o'], self.U[axis + pos + '#c']  \
                        = [self.inner_init((self.output_dims[j], self.output_dims[k])) for _ in xrange(4)]
                else:
                    self.U[axis + pos + '#i'], self.U[axis + pos + '#f'], \
                    self.U[axis + pos + '#o'], self.U[axis + pos + '#c']  \
                        = [self.init((self.output_dims[j], self.output_dims[k])) for _ in xrange(4)]

            # bias layers:
            self.b[axis + '#i'], self.b[axis + '#o'], self.b[axis + '#c']  \
                      = [shared_zeros(self.output_dims[k]) for _ in xrange(3)]
            self.b[axis + '#f'] = self.forget_bias_init(self.output_dims[k])

            # peep-hole layers:
            if self.peephole:
                self.V[axis + '#i'], self.V[axis + '#f'], self.V[axis + '#o'] \
                      = [self.init(self.output_dims[k]) for _ in xrange(3)]
        # *****************************************************************************************

        # set names for these weights
        for A, n in zip([self.W, self.U, self.b, self.V], ['W', 'U', 'b', 'V']):
            for w in A:
                A[w].name = n + '_' + w

        # set parameters
        self.params = [self.W[s] for s in self.W] + \
                      [self.U[s] for s in self.U] + \
                      [self.b[s] for s in self.b] + \
                      [self.V[s] for s in self.V]

    def lstm_(self, k, H, m, x, identity=False):
        """
       LSTM
            [h', m'] = LSTM(x, h, m):
                gi = sigmoid(Wi * x + Ui * h + Vi * m)  # Vi is peep-hole
                gf = sigmoid(Wf * x + Uf * h + Vf * m)
                go = sigmoid(Wo * x + Uo * h + Vo * m)
                gc = tanh(Wc * x +Uc * h)

                m' = gf @ m + gi @ gc  (@ represents element-wise dot.)
                h' = go @ tanh(m')

        """
        assert len(H) == self.N, 'we have to use all the hidden states in Grid LSTM'
        axis           = self.axies[k]

        # *************************************************************************
        # bias energy
        ei, ef, eo, ec = [self.b[axis + p] for p in ['#i', '#f', '#o', '#c']]

        # hidden energy
        for j in xrange(self.N):
            pos  = self.axies[j]

            ei  += T.dot(H[j], self.U[axis + pos + '#i'])
            ef  += T.dot(H[j], self.U[axis + pos + '#f'])
            eo  += T.dot(H[j], self.U[axis + pos + '#o'])
            ec  += T.dot(H[j], self.U[axis + pos + '#c'])

        # input energy (if any)
        if self.input_dims[k] > 0 and self.use_input:
            ei  += T.dot(x, self.W[axis + '#i'])
            ef  += T.dot(x, self.W[axis + '#f'])
            eo  += T.dot(x, self.W[axis + '#o'])
            ec  += T.dot(x, self.W[axis + '#c'])

        # peep-hole connections
        if self.peephole:
            ei  += m * self.V[axis + '#i'][None, :]
            ef  += m * self.V[axis + '#f'][None, :]
            eo  += m * self.V[axis + '#o'][None, :]
        # *************************************************************************

        # compute the gates.
        i        = self.inner_activation(ei)
        f        = self.inner_activation(ef)
        o        = self.inner_activation(eo)
        c        = self.activation(ec)

        # update the memory and hidden states.
        m_new    = f * m + i * c
        h_new    = o * self.activation(m_new)

        return h_new, m_new

    def grid_(self,
              hs_i,
              ms_i,
              xs_i,
              priority=1,
              identity=None):
        """
        ===================================================
        Grid (2D as an example)
         -------------
        |    c'  d'   |     Grid Block and Grid Updates.
        | a         a'|
        |             |     [d' c'] = LSTM_d([b, d],  c)
        | b         b'|     [a' b'] = LSTM_t([b, d'], a)   priority
        |    c   d    |
         -------------
         a = my | b = hy | c = mx | d = hx
        ===================================================

        Currently masking is not considered in GridLSTM.
        """
        # compute LSTM updates for non-priority dimensions
        H_new   = hs_i
        M_new   = ms_i
        for k in xrange(self.N):
            if k == priority:
                continue
            m   = ms_i[k]
            x   = xs_i[k]
            H_new[k], M_new[k] \
                = self.lstm_(k, hs_i, m, x)

            if identity is not None:
                if identity[k]:
                    H_new[k] += hs_i[k]

        # compute LSTM updates along the priority dimension
        if priority >= 0:
            hs_ii   = H_new
            H_new[priority], M_new[priority] \
                    = self.lstm_(priority, hs_ii, ms_i[priority], xs_i[priority])
            if identity is not None:
                if identity[priority]:
                    H_new[priority] += hs_ii[priority]

        return H_new, M_new


class GridLSTM3D(Grid):
    """
    Grid-LSTM 3D version,
    which has one flexible dimension (time) and 2 fixed dimensions (x & y)
    """
    def __init__(self,
                 # parameters for Grid.
                 output_dims,
                 input_dims,    # [0, ... 0], 0 represents no external inputs.
                 priority=1,
                 peephole=True,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh', inner_activation='sigmoid',
                 use_input=False,
                 name=None, weights=None,
                 identity_connect=None,

                 # parameters for 2D-GridLSTM
                 depth=10,  # the size of a big grid
                 learn_init=False,
                 pooling=True,
                 attention=False,
                 shared=True,
                 dropout=0,
                 rng=None,
                 ):
        super(Grid, self).__init__()

        assert len(output_dims) == 3, 'in this stage, we only support 3D Grid-LSTM'
        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'
        assert input_dims[2]    == 0, 'we have no z-axis inputs here.'
        assert shared, 'we share the weights in this stage.'
        assert not (attention and pooling), 'attention and pooling cannot be set at the same time.'

        """
        Initialization.
        """
        logger.info(":::: Sequential Grid-Pool LSTM ::::")
        self.input_dims       = input_dims
        self.output_dims      = output_dims
        self.N                = len(output_dims)
        self.depth            = depth
        self.dropout          = dropout

        self.priority         = priority
        self.peephole         = peephole
        self.use_input        = use_input
        self.pooling          = pooling
        self.attention        = attention
        self.learn_init       = learn_init

        self.init             = initializations.get(init)
        self.inner_init       = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation       = activations.get(activation)
        self.relu             = activations.get('relu')
        self.inner_activation = activations.get(inner_activation)

        self.identity_connect = identity_connect
        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!

        if self.identity_connect is not None:
            logger.info('Identity Connection: {}'.format(self.identity_connect))

        """
        Build the model weights.
        """
        # build the centroid grid.
        self.build()

        # input projection layer (projected to time-axis)       [x]
        self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')
        self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')

        self._add(self.Ph)
        self._add(self.Pm)

        # learn init for depth-axis hidden states/memory cells. [y]
        if self.learn_init:
            self.M0  = self.init((depth, depth, output_dims[2]))
            self.H0  = self.init((depth, depth, output_dims[2]))

            self.M0.name, self.H0.name = 'M0', 'H0'
            self.params += [self.M0, self.H0]

        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def _step(self, *args):
        # since depth is not determined, we cannot decide the number of inputs
        # for one time step.
        # if pooling is True:
        #    args = [raw_input] +       (sequence)
        #           [hy] + [my]*depth   (output_info)
        #
        inputs = args[0]  # (nb_samples, x, y)
        Hy_tm1 = [args[k] for k in range(1, 1 + self.depth)]
        My_tm1 = [args[k] for k in range(1 + self.depth, 1 + 2 * self.depth)]

        # x_axis input projection (get hx_t, mx_t)
        hx_t   = self.Ph(inputs)           # (nb_samples, output_dim0, output_dim1)
        mx_t   = self.Pm(inputs)           # (nb_samples, output_dim0, output_dim1)

        # build computation path from bottom to top.
        Hx_t   = [hx_t]
        Mx_t   = [mx_t]
        Hy_t   = []
        My_t   = []
        for d in xrange(self.depth):
            hs_i       = [Hx_t[-1], Hy_tm1[d]]
            ms_i       = [Mx_t[-1], My_tm1[d]]
            xs_i       = [inputs,   T.zeros_like(inputs)]

            hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority=self.priority, identity=self.identity_connect)

            Hx_t      += [hs_o[0]]
            Hy_t      += [hs_o[1]]
            Mx_t      += [ms_o[0]]
            My_t      += [ms_o[1]]

        hx_out = Hx_t[-1]
        mx_out = Mx_t[-1]

        # get the output (output_y, output_x)
        # MAX-Pooling
        if self.pooling:
            # hy_t       = T.max([self.PP(hy) for hy in Hy_t], axis=0)
            hy_t       = T.max([self.PP(T.concatenate([hy, inputs], axis=-1)) for hy in Hy_t], axis=0)
            Hy_t       = [hy_t] * self.depth

        if self.attention:
            HHy_t      = T.concatenate([hy[:, None, :] for hy in Hy_t], axis=1)  # (nb_samples, n_depth, out_dim1)
            annotation = self.A(inputs, HHy_t)   # (nb_samples, n_depth)
            hy_t       = T.sum(HHy_t * annotation[:, :, None], axis=1)           # (nb_samples, out_dim1)
            Hy_t       = [hy_t] * self.depth

        R = Hy_t + My_t + [hx_out, mx_out]
        return tuple(R)

    def __call__(self, X, init_H=None, init_M=None,
                 return_sequence=False, one_step=False,
                 return_info='hy', train=True):
        # It is training/testing path
        self.train = train

        # recently we did not support masking.
        if X.ndim == 2:
            X = X[:, None, :]

        # one step
        if one_step:
            assert init_H is not None, 'previous state must be provided!'
            assert init_M is not None, 'previous cell must be provided!'

        X = X.dimshuffle((1, 0, 2))
        if init_H is None:
            if self.learn_init:
                init_m     = T.repeat(self.M0[:, None, :], X.shape[1], axis=1)
                if self.pooling:
                    init_h = T.repeat(self.H0[None, :], self.depth, axis=0)
                else:
                    init_h = self.H0
                init_h     = T.repeat(init_h[:, None, :], X.shape[1], axis=1)

                init_H     = []
                init_M     = []
                for j in xrange(self.depth):
                    init_H.append(init_h[j])
                    init_M.append(init_m[j])
            else:
                init_H     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth
                init_M     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth
            pass

        # computational graph !
        if not one_step:
            sequences    = [X]
            outputs_info = init_H + init_M + [None, None]
            outputs, _   = theano.scan(
                self._step,
                sequences=sequences,
                outputs_info=outputs_info
            )
        else:
            outputs      = self._step(*([X[0]] + init_H + init_M))

        if   return_info == 'hx':
            if return_sequence:
                return outputs[0].dimshuffle((1, 0, 2))
            return outputs[-2][-1]
        elif return_info == 'hy':
            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode".'
            if return_sequence:
                return outputs[2].dimshuffle((1, 0, 2))
            return outputs[2][-1]
        elif return_info == 'hxhy':
            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode".'
            if return_sequence:
                return outputs[-2].dimshuffle((1, 0, 2)), outputs[2].dimshuffle((1, 0, 2))    # x-y
            return outputs[-2][-1], outputs[2][-1]



class SequentialGridLSTM(Grid):
    """
    Details please refer to:
        "Grid Long Short-Term Memory",
            http://arxiv.org/abs/1507.01526

    SequentialGridLSTM is a typical 2D-GridLSTM,
    which has one flexible dimension (time) and one fixed dimension (depth)
    Input information is added along x-axis.
    """
    def __init__(self,
                 # parameters for Grid.
                 output_dims,
                 input_dims,    # [0, ... 0], 0 represents no external inputs.
                 priority=1,
                 peephole=True,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh', inner_activation='sigmoid',
                 use_input=False,
                 name=None, weights=None,
                 identity_connect=None,

                 # parameters for 2D-GridLSTM
                 depth=5,
                 learn_init=False,
                 pooling=True,
                 attention=False,
                 shared=True,
                 dropout=0,
                 rng=None,
                 ):
        super(Grid, self).__init__()

        assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'
        assert input_dims[1]    == 0, 'we have no y-axis inputs here.'
        assert shared, 'we share the weights in this stage.'
        assert not (attention and pooling), 'attention and pooling cannot be set at the same time.'

        """
        Initialization.
        """
        logger.info(":::: Sequential Grid-Pool LSTM ::::")
        self.input_dims       = input_dims
        self.output_dims      = output_dims
        self.N                = len(output_dims)
        self.depth            = depth
        self.dropout          = dropout

        self.priority         = priority
        self.peephole         = peephole
        self.use_input        = use_input
        self.pooling          = pooling
        self.attention        = attention
        self.learn_init       = learn_init

        self.init             = initializations.get(init)
        self.inner_init       = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation       = activations.get(activation)
        self.relu             = activations.get('relu')
        self.inner_activation = activations.get(inner_activation)

        self.identity_connect = identity_connect
        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!

        if self.identity_connect is not None:
            logger.info('Identity Connection: {}'.format(self.identity_connect))

        """
        Build the model weights.
        """
        # build the centroid grid.
        self.build()

        # input projection layer (projected to time-axis)       [x]
        self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')
        self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')

        self._add(self.Ph)
        self._add(self.Pm)

        # learn init for depth-axis hidden states/memory cells. [y]
        if self.learn_init:
            self.M0      = self.init((depth, output_dims[1]))
            if self.pooling:
                self.H0  = self.init(output_dims[1])
            else:
                self.H0  = self.init((depth, output_dims[1]))

            self.M0.name, self.H0.name = 'M0', 'H0'
            self.params += [self.M0, self.H0]

        # if we use attention instead of max-pooling
        if self.pooling:
            self.PP      = Dense(output_dims[1] + input_dims[0], output_dims[1], # init='orthogonal',
                                 name='PP', activation='linear')
            self._add(self.PP)

        if self.attention:
            self.A       = Attention(target_dim=input_dims[0],
                                     source_dim=output_dims[1],
                                     hidden_dim=200, name='attender')
            self._add(self.A)

        # if self.dropout > 0:
        #     logger.info(">>>>>> USE DropOut !! <<<<<<")
        #     self.D       = Dropout(rng=rng, p=self.dropout, name='Dropout')

        """
        Others info.
        """
        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def _step(self, *args):
        # since depth is not determined, we cannot decide the number of inputs
        # for one time step.
        # if pooling is True:
        #    args = [raw_input] +       (sequence)
        #           [hy] + [my]*depth   (output_info)
        #
        inputs = args[0]
        Hy_tm1 = [args[k] for k in range(1, 1 + self.depth)]
        My_tm1 = [args[k] for k in range(1 + self.depth, 1 + 2 * self.depth)]

        # x_axis input projection (get hx_t, mx_t)
        hx_t   = self.Ph(inputs)           # (nb_samples, output_dim0)
        mx_t   = self.Pm(inputs)           # (nb_samples, output_dim0)

        # build computation path from bottom to top.
        Hx_t   = [hx_t]
        Mx_t   = [mx_t]
        Hy_t   = []
        My_t   = []
        for d in xrange(self.depth):
            hs_i       = [Hx_t[-1], Hy_tm1[d]]
            ms_i       = [Mx_t[-1], My_tm1[d]]
            xs_i       = [inputs,   T.zeros_like(inputs)]

            hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority=self.priority, identity=self.identity_connect)

            Hx_t      += [hs_o[0]]
            Hy_t      += [hs_o[1]]
            Mx_t      += [ms_o[0]]
            My_t      += [ms_o[1]]

        hx_out = Hx_t[-1]
        mx_out = Mx_t[-1]

        # get the output (output_y, output_x)
        # MAX-Pooling
        if self.pooling:
            # hy_t       = T.max([self.PP(hy) for hy in Hy_t], axis=0)
            hy_t       = T.max([self.PP(T.concatenate([hy, inputs], axis=-1)) for hy in Hy_t], axis=0)
            Hy_t       = [hy_t] * self.depth

        if self.attention:
            HHy_t      = T.concatenate([hy[:, None, :] for hy in Hy_t], axis=1)  # (nb_samples, n_depth, out_dim1)
            annotation = self.A(inputs, HHy_t)   # (nb_samples, n_depth)
            hy_t       = T.sum(HHy_t * annotation[:, :, None], axis=1)           # (nb_samples, out_dim1)
            Hy_t       = [hy_t] * self.depth

        R = Hy_t + My_t + [hx_out, mx_out]
        return tuple(R)

    def __call__(self, X, init_H=None, init_M=None,
                 return_sequence=False, one_step=False,
                 return_info='hy', train=True):
        # It is training/testing path
        self.train = train

        # recently we did not support masking.
        if X.ndim == 2:
            X = X[:, None, :]

        # one step
        if one_step:
            assert init_H is not None, 'previous state must be provided!'
            assert init_M is not None, 'previous cell must be provided!'

        X = X.dimshuffle((1, 0, 2))
        if init_H is None:
            if self.learn_init:
                init_m     = T.repeat(self.M0[:, None, :], X.shape[1], axis=1)
                if self.pooling:
                    init_h = T.repeat(self.H0[None, :], self.depth, axis=0)
                else:
                    init_h = self.H0
                init_h     = T.repeat(init_h[:, None, :], X.shape[1], axis=1)

                init_H     = []
                init_M     = []
                for j in xrange(self.depth):
                    init_H.append(init_h[j])
                    init_M.append(init_m[j])
            else:
                init_H     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth
                init_M     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth
            pass

        # computational graph !
        if not one_step:
            sequences    = [X]
            outputs_info = init_H + init_M + [None, None]
            outputs, _   = theano.scan(
                self._step,
                sequences=sequences,
                outputs_info=outputs_info
            )
        else:
            outputs      = self._step(*([X[0]] + init_H + init_M))

        if   return_info == 'hx':
            if return_sequence:
                return outputs[0].dimshuffle((1, 0, 2))
            return outputs[-2][-1]
        elif return_info == 'hy':
            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode".'
            if return_sequence:
                return outputs[2].dimshuffle((1, 0, 2))
            return outputs[2][-1]
        elif return_info == 'hxhy':
            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode".'
            if return_sequence:
                return outputs[-2].dimshuffle((1, 0, 2)), outputs[2].dimshuffle((1, 0, 2))    # x-y
            return outputs[-2][-1], outputs[2][-1]


class PyramidGridLSTM2D(Grid):
    """
    A variant version of Sequential LSTM where we introduce a Pyramid structure.
    """
    def __init__(self,
                 # parameters for Grid.
                 output_dims,
                 input_dims,    # [0, ... 0], 0 represents no external inputs.
                 priority=1,
                 peephole=True,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh', inner_activation='sigmoid',
                 use_input=True,
                 name=None, weights=None,
                 identity_connect=None,

                 # parameters for 2D-GridLSTM
                 depth=5,
                 learn_init=False,
                 shared=True,
                 dropout=0
                 ):

        super(Grid, self).__init__()
        assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'
        assert output_dims[0] == output_dims[1], 'Here we only support square model.'
        assert shared, 'we share the weights in this stage.'
        assert use_input, 'use input and add them in the middle'

        """
        Initialization.
        """
        logger.info(":::: Sequential Grid-Pool LSTM ::::")
        self.input_dims       = input_dims
        self.output_dims      = output_dims
        self.N                = len(output_dims)
        self.depth            = depth
        self.dropout          = dropout

        self.priority         = priority
        self.peephole         = peephole
        self.use_input        = use_input
        self.learn_init       = learn_init

        self.init             = initializations.get(init)
        self.inner_init       = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation       = activations.get(activation)
        self.relu             = activations.get('relu')
        self.inner_activation = activations.get(inner_activation)

        self.identity_connect = identity_connect
        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!

        """
        Build the model weights.
        """
        # build the centroid grid.
        self.build()

        # # input projection layer (projected to time-axis)       [x]
        # self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')
        # self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')
        #
        # self._add(self.Ph)
        # self._add(self.Pm)

        # learn init/
        if self.learn_init:
            self.hx0 = self.init((1, output_dims[0]))
            self.hy0 = self.init((1, output_dims[1]))
            self.mx0 = self.init((1, output_dims[0]))
            self.my0 = self.init((1, output_dims[1]))

            self.hx0.name, self.hy0.name = 'hx0', 'hy0'
            self.mx0.name, self.my0.name = 'mx0', 'my0'
            self.params += [self.hx0, self.hy0, self.mx0, self.my0]

        """
        Others info.
        """
        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def _step(self, *args):
        inputs = args[0]
        hx_tm1 = args[1]
        mx_tm1 = args[2]
        hy_tm1 = args[3]
        my_tm1 = args[4]

        # zero constant inputs.
        pre_info    = [[[T.zeros_like(hx_tm1)
                         for _ in xrange(self.depth)]
                         for _ in xrange(self.depth)]
                         for _ in xrange(4)]  # hx, mx, hy, my

        pre_inputs  = [[T.zeros_like(inputs)
                       for _ in xrange(self.depth)]
                       for _ in xrange(self.depth)]

        for kk in xrange(self.depth):
            pre_inputs[kk][kk] = inputs

        pre_info[0][0][0] = hx_tm1
        pre_info[1][0][0] = mx_tm1
        pre_info[2][0][0] = hy_tm1
        pre_info[3][0][0] = my_tm1

        for step_x in xrange(self.depth):
            for step_y in xrange(self.depth):
                # input hidden/memory/input information
                print pre_info[0][-1][-1], pre_info[2][-1][-1]

                hs_i  = [pre_info[0][step_x][step_y],
                         pre_info[2][step_x][step_y]]
                ms_i  = [pre_info[1][step_x][step_y],
                         pre_info[3][step_x][step_y]]
                xs_i  = [pre_inputs[step_x][step_y],
                         pre_inputs[step_x][step_y]]

                # compute grid-lstm
                hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority =-1)

                # output hidden/memory information
                if (step_x == self.depth - 1) and (step_y == self.depth - 1):
                    hx_t, mx_t, hy_t, my_t = hs_o[0], ms_o[0], hs_o[1], ms_o[1]
                    return hx_t, mx_t, hy_t, my_t

                if step_x + 1 < self.depth:
                    pre_info[0][step_x + 1][step_y] = hs_o[0]
                    pre_info[1][step_x + 1][step_y] = ms_o[0]

                if step_y + 1 < self.depth:
                    pre_info[2][step_x][step_y + 1] = hs_o[1]
                    pre_info[3][step_x][step_y + 1] = ms_o[1]

    def __call__(self, X, init_x=None, init_y=None,
                 return_sequence=False, one_step=False):
        # recently we did not support masking.
        if X.ndim == 2:
            X = X[:, None, :]

        # one step
        if one_step:
            assert init_x is not None, 'previous x must be provided!'
            assert init_y is not None, 'previous y must be provided!'

        X = X.dimshuffle((1, 0, 2))
        if init_x is None:
            if self.learn_init:
                init_mx    = T.repeat(self.mx0, X.shape[1], axis=0)
                init_my    = T.repeat(self.my0, X.shape[1], axis=0)
                init_hx    = T.repeat(self.hx0, X.shape[1], axis=0)
                init_hy    = T.repeat(self.hy0, X.shape[1], axis=0)

                init_input = [init_hx, init_mx, init_hy, init_my]
            else:
                init_x     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[0]), 1)] * 2
                init_y     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * 2

                init_input = init_x + init_y
        else:
            init_input = init_x + init_y

        if not one_step:
            sequence       = [X]
            output_info    = init_input
            outputs, _     = theano.scan(
                self._step,
                sequences=sequence,
                outputs_info=output_info
            )
        else:
            outputs        = self._step(*([X[0]] + init_x + init_y))

        if return_sequence:
            hxs = outputs[0].dimshuffle((1, 0, 2))
            hys = outputs[2].dimshuffle((1, 0, 2))
            hs  = T.concatenate([hxs, hys], axis=-1)
            return hs
        else:
            hx  = outputs[0][-1]
            hy  = outputs[2][-1]
            h   = T.concatenate([hx, hy], axis=-1)
            return h


class PyramidLSTM(Layer):
    """
    A more flexible Pyramid LSTM structure!
    """
    def __init__(self,
                 # parameters for Grid.
                 output_dims,
                 input_dims,    # [0, ... 0], 0 represents no external inputs.
                 priority=1,
                 peephole=True,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh', inner_activation='sigmoid',
                 use_input=True,
                 name=None, weights=None,
                 identity_connect=None,

                 # parameters for 2D-GridLSTM
                 depth=5,
                 learn_init=False,
                 shared=True,
                 dropout=0
                 ):

        super(PyramidLSTM, self).__init__()
        assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'
        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'
        assert output_dims[0] == output_dims[1], 'Here we only support square model.'
        assert shared, 'we share the weights in this stage.'
        assert use_input, 'use input and add them in the middle'

        """
        Initialization.
        """
        logger.info(":::: Sequential Grid-Pool LSTM ::::")
        self.N                = len(output_dims)
        self.depth            = depth
        self.dropout          = dropout

        self.priority         = priority
        self.peephole         = peephole
        self.use_input        = use_input
        self.learn_init       = learn_init

        self.init             = initializations.get(init)
        self.inner_init       = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation       = activations.get(activation)
        self.relu             = activations.get('relu')
        self.inner_activation = activations.get(inner_activation)

        self.identity_connect = identity_connect
        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!

        """
        Build the model weights.
        """
        # build the centroid grid (3 grid versions)
        self.grids = [Grid(output_dims,
                           input_dims,
                           -1,
                           peephole,
                           init, inner_init,
                           forget_bias_init,
                           activation, inner_activation, use_input,
                           name='Grid*{}'.format(k)
                           ) for k in xrange(3)]

        for k in xrange(3):
            self.grids[k].build()
            self._add(self.grids[k])

        # # input projection layer (projected to time-axis)       [x]
        # self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')
        # self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')
        #
        # self._add(self.Ph)
        # self._add(self.Pm)

        # learn init/
        if self.learn_init:
            self.hx0 = self.init((1, output_dims[0]))
            self.hy0 = self.init((1, output_dims[1]))
            self.mx0 = self.init((1, output_dims[0]))
            self.my0 = self.init((1, output_dims[1]))

            self.hx0.name, self.hy0.name = 'hx0', 'hy0'
            self.mx0.name, self.my0.name = 'mx0', 'my0'
            self.params += [self.hx0, self.hy0, self.mx0, self.my0]

        """
        Others info.
        """
        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def _step(self, *args):
        inputs = args[0]
        hx_tm1 = args[1]
        mx_tm1 = args[2]
        hy_tm1 = args[3]
        my_tm1 = args[4]

        # zero constant inputs.
        pre_info    = [[[T.zeros_like(hx_tm1)
                         for _ in xrange(self.depth)]
                         for _ in xrange(self.depth)]
                         for _ in xrange(4)]  # hx, mx, hy, my

        pre_inputs  = [[T.zeros_like(inputs)
                       for _ in xrange(self.depth)]
                       for _ in xrange(self.depth)]

        for kk in xrange(self.depth):
            pre_inputs[kk][kk] = inputs

        pre_info[0][0][0] = hx_tm1
        pre_info[1][0][0] = mx_tm1
        pre_info[2][0][0] = hy_tm1
        pre_info[3][0][0] = my_tm1

        for step_x in xrange(self.depth):
            for step_y in xrange(self.depth):
                # input hidden/memory/input information
                print pre_info[0][-1][-1], pre_info[2][-1][-1]

                hs_i  = [pre_info[0][step_x][step_y],
                         pre_info[2][step_x][step_y]]
                ms_i  = [pre_info[1][step_x][step_y],
                         pre_info[3][step_x][step_y]]
                xs_i  = [pre_inputs[step_x][step_y],
                         pre_inputs[step_x][step_y]]

                # compute grid-lstm
                if (step_x + step_y + 1) < self.depth:
                    hs_o, ms_o = self.grids[0].grid_(hs_i, ms_i, xs_i, priority =-1)
                elif (step_x + step_y + 1) == self.depth:
                    hs_o, ms_o = self.grids[1].grid_(hs_i, ms_i, xs_i, priority =-1)
                else:
                    hs_o, ms_o = self.grids[2].grid_(hs_i, ms_i, xs_i, priority =-1)

                # output hidden/memory information
                if (step_x == self.depth - 1) and (step_y == self.depth - 1):
                    hx_t, mx_t, hy_t, my_t = hs_o[0], ms_o[0], hs_o[1], ms_o[1]
                    return hx_t, mx_t, hy_t, my_t

                if step_x + 1 < self.depth:
                    pre_info[0][step_x + 1][step_y] = hs_o[0]
                    pre_info[1][step_x + 1][step_y] = ms_o[0]

                if step_y + 1 < self.depth:
                    pre_info[2][step_x][step_y + 1] = hs_o[1]
                    pre_info[3][step_x][step_y + 1] = ms_o[1]

    def __call__(self, X, init_x=None, init_y=None,
                 return_sequence=False, one_step=False):
        # recently we did not support masking.
        if X.ndim == 2:
            X = X[:, None, :]

        # one step
        if one_step:
            assert init_x is not None, 'previous x must be provided!'
            assert init_y is not None, 'previous y must be provided!'

        X = X.dimshuffle((1, 0, 2))
        if init_x is None:
            if self.learn_init:
                init_mx    = T.repeat(self.mx0, X.shape[1], axis=0)
                init_my    = T.repeat(self.my0, X.shape[1], axis=0)
                init_hx    = T.repeat(self.hx0, X.shape[1], axis=0)
                init_hy    = T.repeat(self.hy0, X.shape[1], axis=0)

                init_input = [init_hx, init_mx, init_hy, init_my]
            else:
                init_x     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[0]), 1)] * 2
                init_y     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * 2

                init_input = init_x + init_y
        else:
            init_input = init_x + init_y

        if not one_step:
            sequence       = [X]
            output_info    = init_input
            outputs, _     = theano.scan(
                self._step,
                sequences=sequence,
                outputs_info=output_info
            )
        else:
            outputs        = self._step(*([X[0]] + init_x + init_y))

        if return_sequence:
            hxs = outputs[0].dimshuffle((1, 0, 2))
            hys = outputs[2].dimshuffle((1, 0, 2))
            hs  = T.concatenate([hxs, hys], axis=-1)
            return hs
        else:
            hx  = outputs[0][-1]
            hy  = outputs[2][-1]
            h   = T.concatenate([hx, hy], axis=-1)
            return h

================================================
FILE: emolga/layers/ntm_minibatch.py
================================================
__author__ = 'jiataogu'
import theano
import theano.tensor as T

import scipy.linalg as sl
import numpy as np
from .core import *
from .recurrent import *
import copy

"""
This implementation supports both minibatch learning and on-line training.
We need a minibatch version for Neural Turing Machines.
"""


class Reader(Layer):
    """
        "Reader Head" of the Neural Turing Machine.
    """

    def __init__(self, input_dim, memory_width, shift_width, shift_conv,
                 init='glorot_uniform', inner_init='orthogonal',
                 name=None):
        super(Reader, self).__init__()
        self.input_dim = input_dim
        self.memory_dim = memory_width

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)

        self.tanh = activations.get('tanh')
        self.sigmoid = activations.get('sigmoid')
        self.softplus = activations.get('softplus')
        self.vec_softmax = activations.get('vector_softmax')
        self.softmax = activations.get('softmax')

        """
        Reader Params.
        """
        self.W_key = self.init((input_dim, memory_width))
        self.W_shift = self.init((input_dim, shift_width))
        self.W_beta = self.init(input_dim)
        self.W_gama = self.init(input_dim)
        self.W_g = self.init(input_dim)

        self.b_key = shared_zeros(memory_width)
        self.b_shift = shared_zeros(shift_width)
        self.b_beta = theano.shared(floatX(0))
        self.b_gama = theano.shared(floatX(0))
        self.b_g = theano.shared(floatX(0))

        self.shift_conv = shift_conv

        # add params and set names.
        self.params = [self.W_key, self.W_shift, self.W_beta, self.W_gama, self.W_g,
                       self.b_key, self.b_shift, self.b_beta, self.b_gama, self.b_g]

        self.W_key.name, self.W_shift.name, self.W_beta.name, \
        self.W_gama.name, self.W_g.name = 'W_key', 'W_shift', 'W_beta', \
                                          'W_gama', 'W_g'

        self.b_key.name, self.b_shift.name, self.b_beta.name, \
        self.b_gama.name, self.b_g.name = 'b_key', 'b_shift', 'b_beta', \
                                          'b_gama', 'b_g'

    def __call__(self, X, w_temp, m_temp):
        # input dimensions
        # X:      (nb_samples, input_dim)
        # w_temp: (nb_samples, memory_dim)
        # m_temp: (nb_samples, memory_dim, memory_width) ::tensor_memory

        key = dot(X, self.W_key, self.b_key)  # (nb_samples, memory_width)
        shift = self.softmax(
            dot(X, self.W_shift, self.b_shift))  # (nb_samples, shift_width)

        beta = self.softplus(dot(X, self.W_beta, self.b_beta))[:, None]  # (nb_samples, x)
        gamma = self.softplus(dot(X, self.W_gama, self.b_gama)) + 1.  # (nb_samples,)
        gamma = gamma[:, None]  # (nb_samples, x)
        g = self.sigmoid(dot(X, self.W_g, self.b_g))[:, None]  # (nb_samples, x)

        signal = [key, shift, beta, gamma, g]

        w_c = self.softmax(
            beta * cosine_sim2d(key, m_temp))  # (nb_samples, memory_dim) //content-based addressing
        w_g = g * w_c + (1 - g) * w_temp  # (nb_samples, memory_dim) //history interpolation
        w_s = shift_convolve2d(w_g, shift, self.shift_conv)  # (nb_samples, memory_dim) //convolutional shift
        w_p = w_s ** gamma  # (nb_samples, memory_dim) //sharpening
        w_t = w_p / T.sum(w_p, axis=1)[:, None]  # (nb_samples, memory_dim)
        return w_t


class Writer(Reader):
    """
        "Writer head" of the Neural Turing Machine
    """

    def __init__(self, input_dim, memory_width, shift_width, shift_conv,
                 init='glorot_uniform', inner_init='orthogonal',
                 name=None):
        super(Writer, self).__init__(input_dim, memory_width, shift_width, shift_conv,
                                     init, inner_init, name)

        """
        Writer Params.
        """
        self.W_erase = self.init((input_dim, memory_width))
        self.W_add = self.init((input_dim, memory_width))

        self.b_erase = shared_zeros(memory_width)
        self.b_add = shared_zeros(memory_width)

        # add params and set names.
        self.params += [self.W_erase, self.W_add, self.b_erase, self.b_add]

        self.W_erase.name, self.W_add.name = 'W_erase', 'W_add'
        self.b_erase.name, self.b_add.name = 'b_erase', 'b_add'

    def get_fixer(self, X):
        erase = self.sigmoid(dot(X, self.W_erase, self.b_erase))  # (nb_samples, memory_width)
        add   = self.sigmoid(dot(X, self.W_add, self.b_add))  # (nb_samples, memory_width)
        return erase, add


class Controller(Recurrent):
    """
    Controller used in Neural Turing Machine.
        - Core cell (Memory)
        - Reader head
        - Writer head
    It is a simple RNN version. In reality the Neural Turing Machine will use the LSTM cell.
    """

    def __init__(self,
                 input_dim,
                 memory_dim,
                 memory_width,
                 hidden_dim,
                 shift_width=3,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 name=None,
                 readonly=False,
                 curr_input=False,
                 recurrence=False,
                 memorybook=None
                 ):
        super(Controller, self).__init__()
        # Initialization of the dimensions.
        self.input_dim     = input_dim
        self.memory_dim    = memory_dim
        self.memory_width  = memory_width
        self.hidden_dim    = hidden_dim
        self.shift_width   = shift_width

        self.init          = initializations.get(init)
        self.inner_init    = initializations.get(inner_init)
        self.tanh          = activations.get('tanh')
        self.softmax       = activations.get('softmax')
        self.vec_softmax   = activations.get('vector_softmax')

        self.readonly      = readonly
        self.curr_input    = curr_input
        self.recurrence    = recurrence
        self.memorybook    = memorybook

        """
        Controller Module.
        """
        # hidden projection:
        self.W_in          = self.init((input_dim, hidden_dim))
        self.b_in          = shared_zeros(hidden_dim)
        self.W_rd          = self.init((memory_width, hidden_dim))
        self.W_in.name     = 'W_in'
        self.b_in.name     = 'b_in'
        self.W_rd.name     = 'W_rd'
        self.params        = [self.W_in, self.b_in, self.W_rd]

        # use recurrence:
        if self.recurrence:
            self.W_hh      = self.inner_init((hidden_dim, hidden_dim))
            self.W_hh.name = 'W_hh'
            self.params   += [self.W_hh]

        # Shift convolution
        shift_conv         = sl.circulant(np.arange(memory_dim)).T[
                                np.arange(-(shift_width // 2), (shift_width // 2) + 1)][::-1]

        # use the current input for weights.
        if self.curr_input:
            controller_size = self.input_dim + self.hidden_dim
        else:
            controller_size = self.hidden_dim

        # write head
        if not readonly:
            self.writer    = Writer(controller_size, memory_width, shift_width, shift_conv, name='writer')
            self.writer.set_name('writer')
            self._add(self.writer)

        # read head
        self.reader        = Reader(controller_size, memory_width, shift_width, shift_conv, name='reader')
        self.reader.set_name('reader')
        self._add(self.reader)

        # ***********************************************************
        # reserved for None initialization (we don't use these often)
        self.memory_init   = self.init((memory_dim, memory_width))
        self.w_write_init  = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))
        self.w_read_init   = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))
        self.contr_init    = self.tanh(np.random.rand(1, hidden_dim).astype(theano.config.floatX))

        if name is not None:
            self.set_name(name)

    def _controller(self, input_t, read_t, controller_tm1=None):
        # input_t : (nb_sample, input_dim)
        # read_t  : (nb_sample, memory_width)
        # controller_tm1: (nb_sample, hidden_dim)
        if self.recurrence:
            return self.tanh(dot(input_t, self.W_in) +
                             dot(controller_tm1, self.W_hh) +
                             dot(read_t, self.W_rd)  +
                             self.b_in)
        else:
            return self.tanh(dot(input_t, self.W_in) +
                             dot(read_t, self.W_rd)  +
                             self.b_in)

    @staticmethod
    def _read(w_read, memory):
        # w_read : (nb_sample, memory_dim)
        # memory : (nb_sample, memory_dim, memory_width)
        # return dot(w_read, memory)

        return T.sum(w_read[:, :, None] * memory, axis=1)

    @staticmethod
    def _write(w_write, memory, erase, add):
        # w_write: (nb_sample, memory_dim)
        # memory : (nb_sample, memory_dim, memory_width)
        # erase/add: (nb_sample, memory_width)

        w_write  = w_write[:, :, None]
        erase    = erase[:, None, :]
        add      = add[:, None, :]

        m_erased = memory * (1 - w_write * erase)
        memory_t = m_erased + w_write * add  # (nb_sample, memory_dim, memory_width)
        return memory_t

    def _step(self, input_t, mask_t,
              memory_tm1,
              w_write_tm1, w_read_tm1,
              controller_tm1):
        # input_t:     (nb_sample, input_dim)
        # memory_tm1:  (nb_sample, memory_dim, memory_width)
        # w_write_tm1: (nb_sample, memory_dim)
        # w_read_tm1:  (nb_sample, memory_dim)
        # controller_tm1: (nb_sample, hidden_dim)

        # read the memory
        if self.curr_input:
            info     = T.concatenate((controller_tm1, input_t), axis=1)
            w_read_t = self.reader(info, w_read_tm1, memory_tm1)
            read_tm1 = self._read(w_read_t, memory_tm1)
        else:
            read_tm1 = self._read(w_read_tm1, memory_tm1)       # (nb_sample, memory_width)

        # get the new controller (hidden states.)
        if self.recurrence:
            controller_t = self._controller(input_t, read_tm1, controller_tm1)
        else:
            controller_t = self._controller(input_t, read_tm1)  # (nb_sample, controller_size)

        # update the memory cell (if need)
        if not self.readonly:
            if self.curr_input:
                infow          = T.concatenate((controller_t, input_t), axis=1)
                w_write_t      = self.writer(infow, w_write_tm1, memory_tm1)     # (nb_sample, memory_dim)
                erase_t, add_t = self.writer.get_fixer(infow)                    # (nb_sample, memory_width)
            else:
                w_write_t      = self.writer(controller_t, w_write_tm1, memory_tm1)
                erase_t, add_t = self.writer.get_fixer(controller_t)
            memory_t           = self._write(w_write_t, memory_tm1, erase_t, add_t)  # (nb_sample, memory_dim, memory_width)
        else:
            w_write_t          = w_write_tm1
            memory_t           = memory_tm1

        # get the next reading weights.
        if not self.curr_input:
            w_read_t           = self.reader(controller_t, w_read_tm1, memory_t)  # (nb_sample, memory_dim)

        # over masking
        memory_t     = memory_t     * mask_t[:, :, None] + memory_tm1 * (1 - mask_t[:, :, None])
        w_read_t     = w_read_t     * mask_t + w_read_tm1     * (1 - mask_t)
        w_write_t    = w_write_t    * mask_t + w_write_tm1    * (1 - mask_t)
        controller_t = controller_t * mask_t + controller_tm1 * (1 - mask_t)

        return memory_t, w_write_t, w_read_t, controller_t

    def __call__(self, X, mask=None, M=None, init_ww=None,
                 init_wr=None, init_c=None, return_sequence=False,
                 one_step=False, return_full=False):
        # recurrent cell only work for tensor.
        if X.ndim == 2:
            X = X[:, None, :]
        nb_samples = X.shape[0]

        # mask
        if mask is None:
            mask = T.alloc(1., X.shape[0], 1)

        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
        X = X.dimshuffle((1, 0, 2))

        # ***********************************************************************
        # initialization states
        if M is None:
            memory_init  = T.repeat(self.memory_init[None, :, :], nb_samples, axis=0)
        else:
            memory_init  = M

        if init_wr is None:
            w_read_init  = T.repeat(self.w_read_init, nb_samples, axis=0)
        else:
            w_read_init  = init_wr

        if init_ww is None:
            w_write_init = T.repeat(self.w_write_init, nb_samples, axis=0)
        else:
            w_write_init = init_ww

        if init_c is None:
            contr_init   = T.repeat(self.contr_init, nb_samples, axis=0)
        else:
            contr_init   = init_c
        # ************************************************************************

        outputs_info = [memory_init, w_write_init, w_read_init, contr_init]

        if one_step:
            seq = [X[0], padded_mask[0]]
            outputs = self._step(*(seq + outputs_info))
            return outputs
        else:
            seq = [X, padded_mask]
            outputs, _ = theano.scan(
                self._step,
                sequences=seq,
                outputs_info=outputs_info,
                name='controller_recurrence'
            )

        self.monitor['memory_info']   = outputs[0]
        self.monitor['write_weights'] = outputs[1]
        self.monitor['read_weights']  = outputs[2]

        if not return_full:
            if return_sequence:
                return outputs[-1].dimshuffle((1, 0, 2))
            return outputs[-1][-1]
        else:
            if return_sequence:
                return [a.dimshuffle((1, 0, 2)) for a in outputs]
            return [a[-1] for a in outputs]


class AttentionReader(Layer):
    """
        "Reader Head" of the Neural Turing Machine.
    """

    def __init__(self, input_dim, memory_width, shift_width, shift_conv,
                 init='glorot_uniform', inner_init='orthogonal',
                 name=None):
        super(AttentionReader, self).__init__()
        self.input_dim = input_dim
        self.memory_dim = memory_width

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)

        self.tanh = activations.get('tanh')
        self.sigmoid = activations.get('sigmoid')
        self.softplus = activations.get('softplus')
        self.vec_softmax = activations.get('vector_softmax')
        self.softmax = activations.get('softmax')

        """
        Reader Params.
        """
        self.W_key   = self.init((input_dim, memory_width))
        self.W_lock  = self.inner_init((memory_width, memory_width))

        self.W_shift = self.init((input_dim, shift_width))
        self.W_beta = self.init(input_dim)
        self.W_gama = self.init(input_dim)
        self.W_g = self.init(input_dim)

        # self.v     = self.init(memory_width)
        self.b_key = shared_zeros(memory_width)
        self.b_shift = shared_zeros(shift_width)
        self.b_beta = theano.shared(floatX(0))
        self.b_gama = theano.shared(floatX(0))
        self.b_g = theano.shared(floatX(0))

        self.shift_conv = shift_conv

        # add params and set names.
        self.params = [self.W_key, self.W_shift, self.W_beta, self.W_gama, self.W_g,
                       self.b_key, self.b_shift, self.b_beta, self.b_gama, self.b_g,
                       self.W_lock]

        self.W_key.name, self.W_shift.name, self.W_beta.name, \
        self.W_gama.name, self.W_g.name = 'W_key', 'W_shift', 'W_beta', \
                                          'W_gama', 'W_g'
        self.W_lock.name  = 'W_lock'

        self.b_key.name, self.b_shift.name, self.b_beta.name, \
        self.b_gama.name, self.b_g.name = 'b_key', 'b_shift', 'b_beta', \
                                          'b_gama', 'b_g'

    def __call__(self, X, w_temp, m_temp):
        # input dimensions
        # X:      (nb_samples, input_dim)
        # w_temp: (nb_samples, memory_dim)
        # m_temp: (nb_samples, memory_dim, memory_width) ::tensor_memory

        key   = dot(X, self.W_key, self.b_key)  # (nb_samples, memory_width)
        lock  = dot(m_temp, self.W_lock)        # (nb_samples, memory_dim, memory_width)
        shift = self.softmax(
            dot(X, self.W_shift, self.b_shift))  # (nb_samples, shift_width)

        beta = self.softplus(dot(X, self.W_beta, self.b_beta))[:, None]  # (nb_samples, x)
        gamma = self.softplus(dot(X, self.W_gama, self.b_gama)) + 1.  # (nb_samples,)
        gamma = gamma[:, None]  # (nb_samples, x)
        g = self.sigmoid(dot(X, self.W_g, self.b_g))[:, None]  # (nb_samples, x)

        signal = [key, shift, beta, gamma, g]

        energy = T.sum(key[:, None, :] * lock, axis=2)
        # energy = T.tensordot(key[:, None, :] + lock, self.v, [2, 0])
        w_c    = self.softmax(beta * energy)
        # w_c = self.softmax(
        #     beta * cosine_sim2d(key, m_temp))  # (nb_samples, memory_dim) //content-based addressing
        w_g = g * w_c + (1 - g) * w_temp  # (nb_samples, memory_dim) //history interpolation
        w_s = shift_convolve2d(w_g, shift, self.shift_conv)  # (nb_samples, memory_dim) //convolutional shift
        w_p = w_s ** gamma  # (nb_samples, memory_dim) //sharpening
        w_t = w_p / T.sum(w_p, axis=1)[:, None]  # (nb_samples, memory_dim)
        return w_t


class AttentionWriter(AttentionReader):
    """
        "Writer head" of the Neural Turing Machine
    """

    def __init__(self, input_dim, memory_width, shift_width, shift_conv,
                 init='glorot_uniform', inner_init='orthogonal',
                 name=None):
        super(AttentionWriter, self).__init__(input_dim, memory_width, shift_width, shift_conv,
                                     init, inner_init, name)

        """
        Writer Params.
        """
        self.W_erase = self.init((input_dim, memory_width))
        self.W_add = self.init((input_dim, memory_width))

        self.b_erase = shared_zeros(memory_width)
        self.b_add = shared_zeros(memory_width)

        # add params and set names.
        self.params += [self.W_erase, self.W_add, self.b_erase, self.b_add]

        self.W_erase.name, self.W_add.name = 'W_erase', 'W_add'
        self.b_erase.name, self.b_add.name = 'b_erase', 'b_add'

    def get_fixer(self, X):
        erase = self.sigmoid(dot(X, self.W_erase, self.b_erase))  # (nb_samples, memory_width)
        add   = self.sigmoid(dot(X, self.W_add, self.b_add))  # (nb_samples, memory_width)
        return erase, add



class BernoulliController(Recurrent):
    """
    Controller used in Neural Turing Machine.
        - Core cell (Memory): binary memory
        - Reader head
        - Writer head
    It is a simple RNN version. In reality the Neural Turing Machine will use the LSTM cell.
    """

    def __init__(self,
                 input_dim,
                 memory_dim,
                 memory_width,
                 hidden_dim,
                 shift_width=3,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 name=None,
                 readonly=False,
                 curr_input=False,
                 recurrence=False,
                 memorybook=None
                 ):
        super(BernoulliController, self).__init__()
        # Initialization of the dimensions.
        self.input_dim     = input_dim
        self.memory_dim    = memory_dim
        self.memory_width  = memory_width
        self.hidden_dim    = hidden_dim
        self.shift_width   = shift_width

        self.init          = initializations.get(init)
        self.inner_init    = initializations.get(inner_init)
        self.tanh          = activations.get('tanh')
        self.softmax       = activations.get('softmax')
        self.vec_softmax   = activations.get('vector_softmax')
        self.sigmoid       = activations.get('sigmoid')

        self.readonly      = readonly
        self.curr_input    = curr_input
        self.recurrence    = recurrence
        self.memorybook    = memorybook

        """
        Controller Module.
        """
        # hidden projection:
        self.W_in          = self.init((input_dim, hidden_dim))
        self.b_in          = shared_zeros(hidden_dim)
        self.W_rd          = self.init((memory_width, hidden_dim))
        self.W_in.name     = 'W_in'
        self.b_in.name     = 'b_in'
        self.W_rd.name     = 'W_rd'
        self.params        = [self.W_in, self.b_in, self.W_rd]

        # use recurrence:
        if self.recurrence:
            self.W_hh      = self.inner_init((hidden_dim, hidden_dim))
            self.W_hh.name = 'W_hh'
            self.params   += [self.W_hh]

        # Shift convolution
        shift_conv         = sl.circulant(np.arange(memory_dim)).T[
                                np.arange(-(shift_width // 2), (shift_width // 2) + 1)][::-1]

        # use the current input for weights.
        if self.curr_input:
            controller_size = self.input_dim + self.hidden_dim
        else:
            controller_size = self.hidden_dim

        # write head
        if not readonly:
            self.writer    = AttentionWriter(controller_size, memory_width, shift_width, shift_conv, name='writer')
            self.writer.set_name('writer')
            self._add(self.writer)

        # read head
        self.reader        = AttentionReader(controller_size, memory_width, shift_width, shift_conv, name='reader')
        self.reader.set_name('reader')
        self._add(self.reader)

        # ***********************************************************
        # reserved for None initialization (we don't use these often)
        self.memory_init   = self.sigmoid(self.init((memory_dim, memory_width)))
        self.w_write_init  = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))
        self.w_read_init   = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))
        self.contr_init    = self.tanh(np.random.rand(1, hidden_dim).astype(theano.config.floatX))

        if name is not None:
            self.set_name(name)

    def _controller(self, input_t, read_t, controller_tm1=None):
        # input_t : (nb_sample, input_dim)
        # read_t  : (nb_sample, memory_width)
        # controller_tm1: (nb_sample, hidden_dim)
        if self.recurrence:
            return self.tanh(dot(input_t, self.W_in) +
                             dot(controller_tm1, self.W_hh) +
                             dot(read_t, self.W_rd)  +
                             self.b_in)
        else:
            return self.tanh(dot(input_t, self.W_in) +
                             dot(read_t, self.W_rd)  +
                             self.b_in)

    @staticmethod
    def _read(w_read, memory):
        # w_read : (nb_sample, memory_dim)
        # memory : (nb_sample, memory_dim, memory_width)
        # return dot(w_read, memory)

        return T.sum(w_read[:, :, None] * memory, axis=1)

    @staticmethod
    def _write(w_write, memory, erase, add):
        # w_write: (nb_sample, memory_dim)
        # memory : (nb_sample, memory_dim, memory_width)
        # erase/add: (nb_sample, memory_width)

        w_write  = w_write[:, :, None]
        erase    = erase[:, None, :]     # erase is a gate.
        add      = add[:, None, :]       # add is a bias

        # m_erased = memory * (1 - w_write * erase)
        # memory_t = m_erased + w_write * add  # (nb_sample, memory_dim, memory_width)
        memory_t = memory * (1 - w_write * erase) + \
                   add * w_write * (1 - erase)

        return memory_t

    def _step(self, input_t, mask_t,
              memory_tm1,
              w_write_tm1, w_read_tm1,
              controller_tm1):
        # input_t:     (nb_sample, input_dim)
        # memory_tm1:  (nb_sample, memory_dim, memory_width)
        # w_write_tm1: (nb_sample, memory_dim)
        # w_read_tm1:  (nb_sample, memory_dim)
        # controller_tm1: (nb_sample, hidden_dim)

        # read the memory
        if self.curr_input:
            info     = T.concatenate((controller_tm1, input_t), axis=1)
            w_read_t = self.reader(info, w_read_tm1, memory_tm1)
            read_tm1 = self._read(w_read_t, memory_tm1)
        else:
            read_tm1 = self._read(w_read_tm1, memory_tm1)       # (nb_sample, memory_width)

        # get the new controller (hidden states.)
        if self.recurrence:
            controller_t = self._controller(input_t, read_tm1, controller_tm1)
        else:
            controller_t = self._controller(input_t, read_tm1)  # (nb_sample, controller_size)

        # update the memory cell (if need)
        if not self.readonly:
            if self.curr_input:
                infow          = T.concatenate((controller_t, input_t), axis=1)
                w_write_t      = self.writer(infow, w_write_tm1, memory_tm1)     # (nb_sample, memory_dim)
                erase_t, add_t = self.writer.get_fixer(infow)                    # (nb_sample, memory_width)
            else:
                w_write_t      = self.writer(controller_t, w_write_tm1, memory_tm1)
                erase_t, add_t = self.writer.get_fixer(controller_t)
            memory_t           = self._write(w_write_t, memory_tm1, erase_t, add_t)  # (nb_sample, memory_dim, memory_width)
        else:
            w_write_t          = w_write_tm1
            memory_t           = memory_tm1

        # get the next reading weights.
        if not self.curr_input:
            w_read_t           = self.reader(controller_t, w_read_tm1, memory_t)  # (nb_sample, memory_dim)

        # over masking
        memory_t     = memory_t     * mask_t[:, :, None] + memory_tm1 * (1 - mask_t[:, :, None])
        w_read_t     = w_read_t     * mask_t + w_read_tm1     * (1 - mask_t)
        w_write_t    = w_write_t    * mask_t + w_write_tm1    * (1 - mask_t)
        controller_t = controller_t * mask_t + controller_tm1 * (1 - mask_t)

        return memory_t, w_write_t, w_read_t, controller_t

    def __call__(self, X, mask=None, M=None, init_ww=None,
                 init_wr=None, init_c=None, return_sequence=False,
                 one_step=False, return_full=False):
        # recurrent cell only work for tensor.
        if X.ndim == 2:
            X = X[:, None, :]
        nb_samples = X.shape[0]

        # mask
        if mask is None:
            mask = T.alloc(1., X.shape[0], 1)

        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
        X = X.dimshuffle((1, 0, 2))

        # ***********************************************************************
        # initialization states
        if M is None:
            memory_init  = T.repeat(self.memory_init[None, :, :], nb_samples, axis=0)
        else:
            memory_init  = M

        if init_wr is None:
            w_read_init  = T.repeat(self.w_read_init, nb_samples, axis=0)
        else:
            w_read_init  = init_wr

        if init_ww is None:
            w_write_init = T.repeat(self.w_write_init, nb_samples, axis=0)
        else:
            w_write_init = init_ww

        if init_c is None:
            contr_init   = T.repeat(self.contr_init, nb_samples, axis=0)
        else:
            contr_init   = init_c
        # ************************************************************************

        outputs_info = [memory_init, w_write_init, w_read_init, contr_init]

        if one_step:
            seq = [X[0], padded_mask[0]]
            outputs = self._step(*(seq + outputs_info))
            return outputs
        else:
            seq = [X, padded_mask]
            outputs, _ = theano.scan(
                self._step,
                sequences=seq,
                outputs_info=outputs_info,
                name='controller_recurrence'
            )

        self.monitor['memory_info'] = outputs

        if not return_full:
            if return_sequence:
                return outputs[-1].dimshuffle((1, 0, 2))
            return outputs[-1][-1]
        else:
            if return_sequence:
                return [a.dimshuffle((1, 0, 2)) for a in outputs]
            return [a[-1] for a in outputs]

================================================
FILE: emolga/layers/recurrent.py
================================================
# -*- coding: utf-8 -*-
from abc import abstractmethod
from .core import *


class Recurrent(MaskedLayer):
    """
        Recurrent Neural Network
    """

    @staticmethod
    def get_padded_shuffled_mask(mask, pad=0):
        """
        What's going on here?
            [1] change the 2D matrix into 3D.
            [2]
        """
        assert mask, 'mask cannot be None'
        # mask is (nb_samples, time)
        mask = T.shape_padright(mask)    # (nb_samples, time, 1)
        mask = T.addbroadcast(mask, -1)
        mask = mask.dimshuffle(1, 0, 2)  # (time, nb_samples, 1)

        if pad > 0:
            # left-pad in time with 0
            padding = alloc_zeros_matrix(pad, mask.shape[1], 1)
            mask = T.concatenate([padding, mask], axis=0)
        return mask.astype('int8')


class GRU(Recurrent):
    """
        Gated Recurrent Unit - Cho et al. 2014

        Acts as a spatio-temporal projection,
        turning a sequence of vectors into a single vector.

        Eats inputs with shape:
        (nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)

        and returns outputs with shape:
        if not return_sequences:
            (nb_samples, output_dim)
        if return_sequences:
            (nb_samples, max_sample_length, output_dim)

        References:
            On the Properties of Neural Machine Translation: Encoder–Decoder Approaches
                http://www.aclweb.org/anthology/W14-4012
            Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling
                http://arxiv.org/pdf/1412.3555v1.pdf
    """

    def __init__(self,
                 input_dim,
                 output_dim=128,
                 context_dim=None,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='tanh', inner_activation='sigmoid',
                 name=None, weights=None):

        super(GRU, self).__init__()
        """
        Standard GRU.
        """
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)

        self.W_z = self.init((self.input_dim, self.output_dim))
        self.W_r = self.init((self.input_dim, self.output_dim))
        self.W_h = self.init((self.input_dim, self.output_dim))

        self.U_z = self.inner_init((self.output_dim, self.output_dim))
        self.U_r = self.inner_init((self.output_dim, self.output_dim))
        self.U_h = self.inner_init((self.output_dim, self.output_dim))

        self.b_z = shared_zeros(self.output_dim)
        self.b_r = shared_zeros(self.output_dim)
        self.b_h = shared_zeros(self.output_dim)

        # set names
        self.W_z.name, self.U_z.name, self.b_z.name = 'Wz', 'Uz', 'bz'
        self.W_r.name, self.U_r.name, self.b_r.name = 'Wr', 'Ur', 'br'
        self.W_h.name, self.U_h.name, self.b_h.name = 'Wh', 'Uh', 'bh'

        self.params = [
            self.W_z, self.U_z, self.b_z,
            self.W_r, self.U_r, self.b_r,
            self.W_h, self.U_h, self.b_h,
        ]

        """
        GRU with context inputs.
        """
        if context_dim is not None:
            self.context_dim = context_dim
            self.C_z = self.init((self.context_dim, self.output_dim))
            self.C_r = self.init((self.context_dim, self.output_dim))
            self.C_h = self.init((self.context_dim, self.output_dim))
            self.C_z.name, self.C_r.name, self.C_h.name = 'Cz', 'Cr', 'Ch'

            self.params += [self.C_z, self.C_r, self.C_h]

        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def _step(self,
              xz_t, xr_t, xh_t, mask_t,
              h_tm1,
              u_z, u_r, u_h):
        # h_mask_tm1 = mask_tm1 * h_tm1
        # Here we use a GroundHog-like style which allows
        z          = self.inner_activation(xz_t + T.dot(h_tm1, u_z))
        r          = self.inner_activation(xr_t + T.dot(h_tm1, u_r))
        hh_t       = self.activation(xh_t + T.dot(r * h_tm1, u_h))
        h_t        = z * h_tm1 + (1 - z) * hh_t
        h_t        = mask_t * h_t + (1 - mask_t) * h_tm1
        return h_t

    def _step_gate(self,
                   xz_t, xr_t, xh_t, mask_t,
                   h_tm1,
                   u_z, u_r, u_h):
        # h_mask_tm1 = mask_tm1 * h_tm1
        # Here we use a GroundHog-like style which allows
        z          = self.inner_activation(xz_t + T.dot(h_tm1, u_z))
        r          = self.inner_activation(xr_t + T.dot(h_tm1, u_r))
        hh_t       = self.activation(xh_t + T.dot(r * h_tm1, u_h))
        h_t        = z * h_tm1 + (1 - z) * hh_t
        h_t        = mask_t * h_t + (1 - mask_t) * h_tm1
        return h_t, z, r

    def __call__(self, X, mask=None, C=None, init_h=None,
                 return_sequence=False, one_step=False,
                 return_gates=False):
        """
        :param X:    input sequence
        :param mask: input mask
        :param C:    context constant
        :return:
        """
        # recurrent cell only work for tensor
        if X.ndim == 2:
            X = X[:, None, :]
            if mask is not None:
                mask = mask[:, None]

        # mask
        if mask is None:  # sampling or beam-search
            mask = T.alloc(1., X.shape[0], 1)

        # one step
        if one_step:
            assert init_h, 'previous state must be provided!'

        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
        X           = X.dimshuffle((1, 0, 2))        # X:   (max_len, nb_samples, input_dim)

        x_z         = dot(X, self.W_z, self.b_z)  # x_z: (max_len, nb_samples, output_dim)
        x_r         = dot(X, self.W_r, self.b_r)  # x_r: (max_len, nb_samples, output_dim)
        x_h         = dot(X, self.W_h, self.b_h)  # x_h: (max_len, nb_samples, output_dim)

        """
        GRU with constant context. (not attention here.)
        """
        if C is not None:
            assert C.ndim == 2
            ctx_step = C.dimshuffle('x', 0, 1)    # C: (nb_samples, context_dim)
            x_z     += dot(ctx_step, self.C_z)
            x_r     += dot(ctx_step, self.C_r)
            x_h     += dot(ctx_step, self.C_h)

        """
        GRU with additional initial/previous state.
        """
        if init_h is None:
            init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)

        if not return_gates:
            if one_step:
                seq          = [x_z, x_r, x_h, padded_mask]    # A hidden BUG (1)+++(1) !?!!!?!!?!?
                outputs_info = [init_h]
                non_seq      = [self.U_z, self.U_r, self.U_h]
                outputs = self._step(*(seq + outputs_info + non_seq))

            else:
                outputs, updates = theano.scan(
                    self._step,
                    sequences=[x_z, x_r, x_h, padded_mask],
                    outputs_info=init_h,
                    non_sequences=[self.U_z, self.U_r, self.U_h]
                )

            if return_sequence:
                return outputs.dimshuffle((1, 0, 2))
            return outputs[-1]
        else:
            if one_step:
                seq             = [x_z, x_r, x_h, padded_mask]    # A hidden BUG (1)+++(1) !?!!!?!!?!?
                outputs_info    = [init_h]
                non_seq         = [self.U_z, self.U_r, self.U_h]
                outputs, zz, rr = self._step_gate(*(seq + outputs_info + non_seq))

            else:
                outputx, updates = theano.scan(
                    self._step_gate,
                    sequences=[x_z, x_r, x_h, padded_mask],
                    outputs_info=[init_h, None, None],
                    non_sequences=[self.U_z, self.U_r, self.U_h]
                )
                outputs, zz, rr = outputx

            if return_sequence:
                return outputs.dimshuffle((1, 0, 2)), zz.dimshuffle((1, 0, 2)), rr.dimshuffle((1, 0, 2))
            return outputs[-1], zz[-1], rr[-1]


class JZS3(Recurrent):
    """
        Evolved recurrent neural network architectures from the evaluation of thousands
        of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015.

        This corresponds to the `MUT3` architecture described in the paper.

        Takes inputs with shape:
        (nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)

        and returns outputs with shape:
        if not return_sequences:
            (nb_samples, output_dim)
        if return_sequences:
            (nb_samples, max_sample_length, output_dim)

        References:
            An Empirical Exploration of Recurrent Network Architectures
                http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
    """
    def __init__(self,
                 input_dim,
                 output_dim=128,
                 context_dim=None,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='tanh', inner_activation='sigmoid',
                 name=None, weights=None):

        super(JZS3, self).__init__()
        """
        Standard model
        """
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)

        self.W_z = self.init((self.input_dim, self.output_dim))
        self.U_z = self.inner_init((self.output_dim, self.output_dim))
        self.b_z = shared_zeros(self.output_dim)

        self.W_r = self.init((self.input_dim, self.output_dim))
        self.U_r = self.inner_init((self.output_dim, self.output_dim))
        self.b_r = shared_zeros(self.output_dim)

        self.W_h = self.init((self.input_dim, self.output_dim))
        self.U_h = self.inner_init((self.output_dim, self.output_dim))
        self.b_h = shared_zeros(self.output_dim)

        # set names
        self.W_z.name, self.U_z.name, self.b_z.name = 'Wz', 'Uz', 'bz'
        self.W_r.name, self.U_r.name, self.b_r.name = 'Wr', 'Ur', 'br'
        self.W_h.name, self.U_h.name, self.b_h.name = 'Wh', 'Uh', 'bh'

        self.params = [
            self.W_z, self.U_z, self.b_z,
            self.W_r, self.U_r, self.b_r,
            self.W_h, self.U_h, self.b_h,
        ]

        """
        context inputs.
        """
        if context_dim is not None:
            self.context_dim = context_dim
            self.C_z = self.init((self.context_dim, self.output_dim))
            self.C_r = self.init((self.context_dim, self.output_dim))
            self.C_h = self.init((self.context_dim, self.output_dim))
            self.C_z.name, self.C_r.name, self.C_h.name = 'Cz', 'Cr', 'Ch'

            self.params += [self.C_z, self.C_r, self.C_h]

        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def _step(self,
              xz_t, xr_t, xh_t, mask_t,
              h_tm1,
              u_z, u_r, u_h):
        # h_mask_tm1 = mask_tm1 * h_tm1
        z     = self.inner_activation(xz_t + T.dot(T.tanh(h_tm1), u_z))
        r     = self.inner_activation(xr_t + T.dot(h_tm1, u_r))
        hh_t  = self.activation(xh_t + T.dot(r * h_tm1, u_h))
        h_t   = (hh_t * z + h_tm1 * (1 - z)) * mask_t + (1 - mask_t) * h_tm1
        return h_t

    def __call__(self, X, mask=None, C=None, init_h=None, return_sequence=False, one_step=False):
        # recurrent cell only work for tensor
        if X.ndim == 2:
            X = X[:, None, :]

        # mask
        if mask is None:  # sampling or beam-search
            mask = T.alloc(1., X.shape[0], X.shape[1])

        # one step
        if one_step:
            assert init_h, 'previous state must be provided!'

        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
        X = X.dimshuffle((1, 0, 2))

        x_z = dot(X, self.W_z, self.b_z)
        x_r = dot(X, self.W_r, self.b_r)
        x_h = dot(X, self.W_h, self.b_h)

        """
        JZS3 with constant context. (not attention here.)
        """
        if C is not None:
            assert C.ndim == 2
            ctx_step = C.dimshuffle('x', 0, 1)    # C: (nb_samples, context_dim)
            x_z     += dot(ctx_step, self.C_z)
            x_r     += dot(ctx_step, self.C_r)
            x_h     += dot(ctx_step, self.C_h)

        """
        JZS3 with additional initial/previous state.
        """
        if init_h is None:
            init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)

        if one_step:
            seq          = [x_z, x_r, x_h, padded_mask]
            outputs_info = [init_h]
            non_seq      = [self.U_z, self.U_r, self.U_h]
            outputs = self._step(*(seq + outputs_info + non_seq))

        else:
            outputs, updates = theano.scan(
                self._step,
                sequences=[x_z, x_r, x_h, padded_mask],
                outputs_info=init_h,
                non_sequences=[self.U_z, self.U_r, self.U_h],
            )

        if return_sequence:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1]


class LSTM(Recurrent):
    def __init__(self,
                 input_dim=0,
                 output_dim=128,
                 context_dim=None,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh', inner_activation='sigmoid',
                 name=None, weights=None):

        super(LSTM, self).__init__()
        """
        Standard model
        """
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)

        # input gate param.
        self.W_i = self.init((self.input_dim, self.output_dim))
        self.U_i = self.inner_init((self.output_dim, self.output_dim))
        self.b_i = shared_zeros(self.output_dim)

        # forget gate param.
        self.W_f = self.init((self.input_dim, self.output_dim))
        self.U_f = self.inner_init((self.output_dim, self.output_dim))
        self.b_f = self.forget_bias_init(self.output_dim)  # forget gate needs one bias.

        # output gate param.
        self.W_o = self.init((self.input_dim, self.output_dim))
        self.U_o = self.inner_init((self.output_dim, self.output_dim))
        self.b_o = shared_zeros(self.output_dim)

        # memory param.
        self.W_c = self.init((self.input_dim, self.output_dim))
        self.U_c = self.inner_init((self.output_dim, self.output_dim))
        self.b_c = shared_zeros(self.output_dim)

        # set names
        self.W_i.name, self.U_i.name, self.b_i.name = 'Wi', 'Ui', 'bi'
        self.W_f.name, self.U_f.name, self.b_f.name = 'Wf', 'Uf', 'bf'
        self.W_o.name, self.U_o.name, self.b_o.name = 'Wo', 'Uo', 'bo'
        self.W_c.name, self.U_c.name, self.b_c.name = 'Wc', 'Uc', 'bc'

        self.params = [
            self.W_i, self.U_i, self.b_i,
            self.W_f, self.U_f, self.b_f,
            self.W_o, self.U_o, self.b_o,
            self.W_c, self.U_c, self.b_c,
        ]

        """
        context inputs.
        """
        if context_dim is not None:
            self.context_dim = context_dim
            self.C_i = self.init((self.context_dim, self.output_dim))
            self.C_f = self.init((self.context_dim, self.output_dim))
            self.C_o = self.init((self.context_dim, self.output_dim))
            self.C_c = self.init((self.context_dim, self.output_dim))
            self.C_i.name, self.C_f.name, self.C_o.name, self.C_c.name = 'Ci', 'Cf', 'Co', 'Cc'

            self.params += [self.C_i, self.C_f, self.C_o, self.C_c]

        if weights is not None:
            self.set_weights(weights)

        if name is not None:
            self.set_name(name)

    def _step(self,
              xi_t, xf_t, xo_t, xc_t, mask_t,
              h_tm1, c_tm1,
              u_i, u_f, u_o, u_c):
        # h_mask_tm1 = mask_tm1 * h_tm1

        i     = self.inner_activation(xi_t + T.dot(h_tm1, u_i))  # input  gate
        f     = self.inner_activation(xf_t + T.dot(h_tm1, u_f))  # forget gate
        o     = self.inner_activation(xo_t + T.dot(h_tm1, u_o))  # output gate
        c     = self.activation(xc_t + T.dot(h_tm1, u_c))        # memory updates

        # update the memory cell.
        c_t   = f * c_tm1 + i * c
        h_t   = o * self.activation(c_t)

        # masking
        c_t   = c_t * mask_t + (1 - mask_t) * c_tm1
        h_t   = h_t * mask_t + (1 - mask_t) * h_tm1
        return h_t, c_t

    def input_embed(self, X, C=None):
        x_i = dot(X, self.W_i, self.b_i)
        x_f = dot(X, self.W_f, self.b_f)
        x_o = dot(X, self.W_o, self.b_o)
        x_c = dot(X, self.W_c, self.b_c)

        """
        LSTM with constant context. (not attention here.)
        """
        if C is not None:
            assert C.ndim == 2
            ctx_step = C.dimshuffle('x', 0, 1)    # C: (nb_samples, context_dim)
            x_i     += dot(ctx_step, self.C_i)
            x_f     += dot(ctx_step, self.C_f)
            x_o     += dot(ctx_step, self.C_o)
            x_c     += dot(ctx_step, self.C_c)

        return x_i, x_f, x_o, x_c

    def __call__(self, X, mask=None, C=None, init_h=None, init_c=None, return_sequence=False, one_step=False):
        # recurrent cell only work for tensor
        if X.ndim == 2:
            X = X[:, None, :]

        # mask
        if mask is None:  # sampling or beam-search
            mask = T.alloc(1., X.shape[0], X.shape[1])

        # one step
        if one_step:
            assert init_h, 'previous state must be provided!'

        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)
        X = X.dimshuffle((1, 0, 2))
        x_i, x_f, x_o, x_c = self.input_embed(X, C)

        """
        LSTM with additional initial/previous state.
        """
        if init_h is None:
            init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)

        if init_c is None:
            init_c = init_h

        if one_step:
            seq          = [x_i, x_f, x_o, x_c, padded_mask]
            outputs_info = [init_h, init_c]
            non_seq      = [self.U_i, self.U_f, self.U_o, self.U_c]
            outputs = self._step(*(seq + outputs_info + non_seq))

        else:
            outputs, updates = theano.scan(
                self._step,
                sequences=[x_i, x_f, x_o, x_c, padded_mask],
                outputs_info=[init_h, init_c],
                non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
            )

        if return_sequence:
            return outputs[0].dimshuffle((1, 0, 2)), outputs[1].dimshuffle((1, 0, 2))  # H, C
        return outputs[0][-1], outputs[1][-1]




================================================
FILE: emolga/models/__init__.py
================================================
__author__ = 'jiataogu'


================================================
FILE: emolga/models/core.py
================================================
__author__ = 'jiataogu'
import theano
import logging
import deepdish as dd

from emolga.dataset.build_dataset import serialize_to_file, deserialize_from_file
from emolga.utils.theano_utils import floatX

logger = logging.getLogger(__name__)


class Model(object):
    def __init__(self):
        self.layers  = []
        self.params  = []
        self.monitor = {}
        self.watchlist = []

    def _add(self, layer):
        if layer:
            self.layers.append(layer)
            self.params += layer.params

    def _monitoring(self):
        # add monitoring variables
        for l in self.layers:
            for v in l.monitor:
                name = v + '@' + l.name
                print name
                self.monitor[name] = l.monitor[v]

    def compile_monitoring(self, inputs, updates=None):
        logger.info('compile monitoring')
        for i, v in enumerate(self.monitor):
            self.watchlist.append(v)
            logger.info('monitoring [{0}]: {1}'.format(i, v))

        self.watch = theano.function(inputs,
                                     [self.monitor[v] for v in self.watchlist],
                                     updates=updates
                                     )
        logger.info('done.')

    def set_weights(self, weights):
        if hasattr(self, 'save_parm'):
            params = self.params + self.save_parm
        else:
            params = self.params

        for p, w in zip(params, weights):
            print p.name
            if p.eval().shape != w.shape:
                raise Exception("Layer shape %s not compatible with weight shape %s." % (p.eval().shape, w.shape))
            p.set_value(floatX(w))

    def get_weights(self):
        weights = []
        for p in self.params:
            weights.append(p.get_value())

        if hasattr(self, 'save_parm'):
            for v in self.save_parm:
                weights.append(v.get_value())

        return weights

    def set_name(self, name):
        for i in range(len(self.params)):
            if self.params[i].name is None:
                self.params[i].name = '%s_p%d' % (name, i)
            else:
                self.params[i].name = name + '@' + self.params[i].name
        self.name = name

    def save(self, filename):
        if hasattr(self, 'save_parm'):
            params = self.params + self.save_parm
        else:
            params = self.params
        ps = 'save: <\n'
        for p in params:
            ps += '{0}: {1}\n'.format(p.name, p.eval().shape)
        ps += '> to ... {}'.format(filename)
        logger.info(ps)

        # hdf5 module seems works abnormal !!
        # dd.io.save(filename, self.get_weights())
        serialize_to_file(self.get_weights(), filename)

    def load(self, filename):
        logger.info('load the weights.')

        # hdf5 module seems works abnormal !!
        # weights = dd.io.load(filename)
        weights = deserialize_from_file(filename)
        print len(weights)
        self.set_weights(weights)


================================================
FILE: emolga/models/covc_encdec.py
================================================
__author__ = 'jiataogu'
import theano
import logging
import copy
import emolga.basic.objectives as objectives
import emolga.basic.optimizers as optimizers

from theano.compile.nanguardmode import NanGuardMode
from emolga.utils.generic_utils import visualize_
from emolga.layers.core import Dropout, Dense, Dense2, Identity
from emolga.layers.recurrent import *
from emolga.layers.ntm_minibatch import Controller
from emolga.layers.embeddings import *
from emolga.layers.attention import *
from core import Model

logger = logging.getLogger(__name__)
RNN    = GRU             # change it here for other RNN models.
err    = 1e-9


class Encoder(Model):
    """
    Recurrent Neural Network-based Encoder
    It is used to compute the context vector.
    """

    def __init__(self,
                 config, rng, prefix='enc',
                 mode='Evaluation', embed=None, use_context=False):
        super(Encoder, self).__init__()
        self.config = config
        self.rng = rng
        self.prefix = prefix
        self.mode = mode
        self.name = prefix
        self.use_context = use_context

        self.return_embed = False
        self.return_sequence = False

        """
        Create all elements of the Encoder's Computational graph
        """
        # create Embedding layers
        logger.info("{}_create embedding layers.".format(self.prefix))
        if embed:
            self.Embed = embed
        else:
            self.Embed = Embedding(
                self.config['enc_voc_size'],
                self.config['enc_embedd_dim'],
                name="{}_embed".format(self.prefix))
            self._add(self.Embed)

        if self.use_context:
            self.Initializer = Dense(
                config['enc_contxt_dim'],
                config['enc_hidden_dim'],
                activation='tanh',
                name="{}_init".format(self.prefix)
            )
            self._add(self.Initializer)

        """
        Encoder Core
        """
        # create RNN cells
        if not self.config['bidirectional']:
            logger.info("{}_create RNN cells.".format(self.prefix))
            self.RNN = RNN(
                self.config['enc_embedd_dim'],
                self.config['enc_hidden_dim'],
                None if not use_context
                else self.config['enc_contxt_dim'],
                name="{}_cell".format(self.prefix)
            )
            self._add(self.RNN)
        else:
            logger.info("{}_create forward RNN cells.".format(self.prefix))
            self.forwardRNN = RNN(
                self.config['enc_embedd_dim'],
                self.config['enc_hidden_dim'],
                None if not use_context
                else self.config['enc_contxt_dim'],
                name="{}_fw_cell".format(self.prefix)
            )
            self._add(self.forwardRNN)

            logger.info("{}_create backward RNN cells.".format(self.prefix))
            self.backwardRNN = RNN(
                self.config['enc_embedd_dim'],
                self.config['enc_hidden_dim'],
                None if not use_context
                else self.config['enc_contxt_dim'],
                name="{}_bw_cell".format(self.prefix)
            )
            self._add(self.backwardRNN)

        logger.info("create encoder ok.")

    def build_encoder(self, source, context=None, return_embed=False,
                      return_sequence=False,
                      return_gates=False,
                      clean_mask=False):
        """
        Build the Encoder Computational Graph
        """
        # clean_mask means we set the hidden states of masked places as 0.
        # sometimes it will help the program to solve something
        # note that this option only works when return_sequence.
        # we recommend to leave at least one mask in the end of encoded sequence.

        # Initial state
        Init_h = None
        if self.use_context:
            Init_h = self.Initializer(context)

        # word embedding
        if not self.config['bidirectional']:
            X, X_mask = self.Embed(source, True)
            if return_gates:
                X_out, Z, R = self.RNN(X, X_mask, C=context, init_h=Init_h,
                                       return_sequence=return_sequence,
                                       return_gates=True)
            else:
                X_out     = self.RNN(X, X_mask, C=context, init_h=Init_h,
                                     return_sequence=return_sequence,
                                     return_gates=False)
            if return_sequence:
                X_tail    = X_out[:, -1]

                if clean_mask:
                    X_out     = X_out * X_mask[:, :, None]
            else:
                X_tail    = X_out
        else:
            source2 = source[:, ::-1]
            X,  X_mask = self.Embed(source, True)
            X2, X2_mask = self.Embed(source2, True)

            if not return_gates:
                X_out1 = self.backwardRNN(X, X_mask,  C=context, init_h=Init_h, return_sequence=return_sequence)
                X_out2 = self.forwardRNN(X2, X2_mask, C=context, init_h=Init_h, return_sequence=return_sequence)
            else:
                X_out1, Z1, R1  = self.backwardRNN(X, X_mask,  C=context, init_h=Init_h,
                                                   return_sequence=return_sequence,
                                                   return_gates=True)
                X_out2, Z2, R2  = self.forwardRNN(X2, X2_mask, C=context, init_h=Init_h,
                                                  return_sequence=return_sequence,
                                                  return_gates=True)
                Z = T.concatenate([Z1, Z2[:, ::-1, :]], axis=2)
                R = T.concatenate([R1, R2[:, ::-1, :]], axis=2)

            if not return_sequence:
                X_out  = T.concatenate([X_out1, X_out2], axis=1)
                X_tail = X_out
            else:
                X_out  = T.concatenate([X_out1, X_out2[:, ::-1, :]], axis=2)
                X_tail = T.concatenate([X_out1[:, -1], X_out2[:, -1]], axis=1)

                if clean_mask:
                    X_out     = X_out * X_mask[:, :, None]

        X_mask  = T.cast(X_mask, dtype='float32')
        if not return_gates:
            if return_embed:
                return X_out, X, X_mask, X_tail
            return X_out
        else:
            if return_embed:
                return X_out, X, X_mask, X_tail, Z, R
            return X_out, Z, R

    def compile_encoder(self, with_context=False, return_embed=False, return_sequence=False):
        source  = T.imatrix()
        self.return_embed = return_embed
        self.return_sequence = return_sequence
        if with_context:
            context = T.matrix()

            self.encode = theano.function([source, context],
                                          self.build_encoder(source, context,
                                                             return_embed=return_embed,
                                                             return_sequence=return_sequence))
            self.gtenc  = theano.function([source, context],
                                          self.build_encoder(source, context,
                                                             return_embed=return_embed,
                                                             return_sequence=return_sequence,
                                                             return_gates=True))
        else:
            self.encode = theano.function([source],
                                          self.build_encoder(source, None,
                                                             return_embed=return_embed,
                                                             return_sequence=return_sequence))
            self.gtenc  = theano.function([source],
                                          self.build_encoder(source, None,
                                                             return_embed=return_embed,
                                                             return_sequence=return_sequence,
                                                             return_gates=True))


class Decoder(Model):
    """
    Recurrent Neural Network-based Decoder.
    It is used for:
        (1) Evaluation: compute the probability P(Y|X)
        (2) Prediction: sample the best result based on P(Y|X)
        (3) Beam-search
        (4) Scheduled Sampling (how to implement it?)
    """

    def __init__(self,
                 config, rng, prefix='dec',
                 mode='RNN', embed=None,
                 highway=False):
        """
        mode = RNN: use a RNN Decoder
        """
        super(Decoder, self).__init__()
        self.config = config
        self.rng = rng
        self.prefix = prefix
        self.name = prefix
        self.mode = mode

        self.highway = highway
        self.init = initializations.get('glorot_uniform')
        self.sigmoid = activations.get('sigmoid')

        # use standard drop-out for input & output.
        # I believe it should not use for context vector.
        self.dropout = config['dropout']
        if self.dropout > 0:
            logger.info('Use standard-dropout!!!!')
            self.D   = Dropout(rng=self.rng, p=self.dropout, name='{}_Dropout'.format(prefix))

        """
        Create all elements of the Decoder's computational graph.
        """
        # create Embedding layers
        logger.info("{}_create embedding layers.".format(self.prefix))
        if embed:
            self.Embed = embed
        else:
            self.Embed = Embedding(
                self.config['dec_voc_size'],
                self.config['dec_embedd_dim'],
                name="{}_embed".format(self.prefix))
            self._add(self.Embed)

        # create Initialization Layers
        logger.info("{}_create initialization layers.".format(self.prefix))
        if not config['bias_code']:
            self.Initializer = Zero()
        else:
            self.Initializer = Dense(
                config['dec_contxt_dim'],
                config['dec_hidden_dim'],
                activation='tanh',
                name="{}_init".format(self.prefix)
            )

        # create RNN cells
        logger.info("{}_create RNN cells.".format(self.prefix))
        if 'location_embed' in self.config:
            if config['location_embed']:
                dec_embedd_dim = 2 * self.config['dec_embedd_dim']
            else:
                dec_embedd_dim = self.config['dec_embedd_dim']
        else:
            dec_embedd_dim = self.config['dec_embedd_dim']

        self.RNN = RNN(
            dec_embedd_dim,
            self.config['dec_hidden_dim'],
            self.config['dec_contxt_dim'],
            name="{}_cell".format(self.prefix)
        )

        self._add(self.Initializer)
        self._add(self.RNN)

        # HighWay Gating
        if highway:
            logger.info("HIGHWAY CONNECTION~~~!!!")
            assert self.config['context_predict']
            assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']

            self.C_x = self.init((self.config['dec_contxt_dim'],
                                  self.config['dec_hidden_dim']))
            self.H_x = self.init((self.config['dec_hidden_dim'],
                                  self.config['dec_hidden_dim']))
            self.b_x = initializations.get('zero')(self.config['dec_hidden_dim'])

            self.C_x.name = '{}_Cx'.format(self.prefix)
            self.H_x.name = '{}_Hx'.format(self.prefix)
            self.b_x.name = '{}_bx'.format(self.prefix)
            self.params += [self.C_x, self.H_x, self.b_x]

        # create readout layers
        logger.info("_create Readout layers")

        # 1. hidden layers readout.
        self.hidden_readout = Dense(
            self.config['dec_hidden_dim'],
            self.config['output_dim']
            if self.config['deep_out']
            else self.config['dec_voc_size'],
            activation='linear',
            name="{}_hidden_readout".format(self.prefix)
        )

        # 2. previous word readout
        self.prev_word_readout = None
        if self.config['bigram_predict']:
            self.prev_word_readout = Dense(
                dec_embedd_dim,
                self.config['output_dim']
                if self.config['deep_out']
                else self.config['dec_voc_size'],
                activation='linear',
                name="{}_prev_word_readout".format(self.prefix),
                learn_bias=False
            )

        # 3. context readout
        self.context_readout = None
        if self.config['context_predict']:
            if not self.config['leaky_predict']:
                self.context_readout = Dense(
                    self.config['dec_contxt_dim'],
                    self.config['output_dim']
                    if self.config['deep_out']
                    else self.config['dec_voc_size'],
                    activation='linear',
                    name="{}_context_readout".format(self.prefix),
                    learn_bias=False
                )
            else:
                assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']
                self.context_readout = self.hidden_readout

        # option: deep output (maxout)
        if self.config['deep_out']:
            self.activ = Activation(config['deep_out_activ'])
            # self.dropout = Dropout(rng=self.rng, p=config['dropout'])
            self.output_nonlinear = [self.activ]  # , self.dropout]
            self.output = Dense(
                self.config['output_dim'] / 2
                if config['deep_out_activ'] == 'maxout2'
                else self.config['output_dim'],

                self.config['dec_voc_size'],
                activation='softmax',
                name="{}_output".format(self.prefix),
                learn_bias=False
            )
        else:
            self.output_nonlinear = []
            self.output = Activation('softmax')

        # registration:
        self._add(self.hidden_readout)

        if not self.config['leaky_predict']:
            self._add(self.context_readout)

        self._add(self.prev_word_readout)
        self._add(self.output)

        if self.config['deep_out']:
            self._add(self.activ)
        # self._add(self.dropout)

        logger.info("create decoder ok.")

    @staticmethod
    def _grab_prob(probs, X, block_unk=False):
        assert probs.ndim == 3

        batch_size = probs.shape[0]
        max_len = probs.shape[1]
        vocab_size = probs.shape[2]

        probs = probs.reshape((batch_size * max_len, vocab_size))
        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing

    """
    Build the decoder for evaluation
    """
    def prepare_xy(self, target):
        # Word embedding
        Y, Y_mask = self.Embed(target, True)  # (nb_samples, max_len, embedding_dim)

        if self.config['use_input']:
            X = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)
        else:
            X = 0 * Y

        # option ## drop words.

        X_mask    = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)
        Count     = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)
        return X, X_mask, Y, Y_mask, Count

    def build_decoder(self, target, context=None,
                      return_count=False,
                      train=True):

        """
        Build the Decoder Computational Graph
        For training/testing
        """
        X, X_mask, Y, Y_mask, Count = self.prepare_xy(target)

        # input drop-out if any.
        if self.dropout > 0:
            X = self.D(X, train=train)

        # Initial state of RNN
        Init_h = self.Initializer(context)
        if not self.highway:
            X_out  = self.RNN(X, X_mask, C=context, init_h=Init_h, return_sequence=True)

            # Readout
            readout = self.hidden_readout(X_out)
            if self.dropout > 0:
                readout = self.D(readout, train=train)

            if self.config['context_predict']:
                readout += self.context_readout(context).dimshuffle(0, 'x', 1)
        else:
            X      = X.dimshuffle((1, 0, 2))
            X_mask = X_mask.dimshuffle((1, 0))

            def _recurrence(x, x_mask, prev_h, c):
                # compute the highway gate for context vector.
                xx    = dot(c, self.C_x, self.b_x) + dot(prev_h, self.H_x)  # highway gate.
                xx    = self.sigmoid(xx)

                cy    = xx * c   # the path without using RNN
                x_out = self.RNN(x, mask=x_mask, C=c, init_h=prev_h, one_step=True)
                hx    = (1 - xx) * x_out
                return x_out, hx, cy

            outputs, _ = theano.scan(
                _recurrence,
                sequences=[X, X_mask],
                outputs_info=[Init_h, None, None],
                non_sequences=[context]
            )

            # hidden readout + context readout
            readout   = self.hidden_readout( outputs[1].dimshuffle((1, 0, 2)))
            if self.dropout > 0:
                readout = self.D(readout, train=train)

            readout  += self.context_readout(outputs[2].dimshuffle((1, 0, 2)))

            # return to normal size.
            X      = X.dimshuffle((1, 0, 2))
            X_mask = X_mask.dimshuffle((1, 0))

        if self.config['bigram_predict']:
            readout += self.prev_word_readout(X)

        for l in self.output_nonlinear:
            readout = l(readout)

        prob_dist = self.output(readout)  # (nb_samples, max_len, vocab_size)
        # log_old  = T.sum(T.log(self._grab_prob(prob_dist, target)), axis=1)
        log_prob = T.sum(T.log(self._grab_prob(prob_dist, target) + err) * X_mask, axis=1)
        log_ppl  = log_prob / Count

        if return_count:
            return log_prob, Count
        else:
            return log_prob, log_ppl

    """
    Sample one step
    """

    def _step_sample(self, prev_word, prev_stat, context):
        # word embedding (note that for the first word, embedding should be all zero)
        if self.config['use_input']:
            X = T.switch(
                prev_word[:, None] < 0,
                alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim']),
                self.Embed(prev_word)
            )
        else:
            X = alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim'])

        if self.dropout > 0:
            X = self.D(X, train=False)

        # apply one step of RNN
        if not self.highway:
            X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)
            next_stat = X_proj

            # compute the readout probability distribution and sample it
            # here the readout is a matrix, different from the learner.
            readout = self.hidden_readout(next_stat)
            if self.dropout > 0:
                readout = self.D(readout, train=False)

            if self.config['context_predict']:
                readout += self.context_readout(context)
        else:
            xx     = dot(context, self.C_x, self.b_x) + dot(prev_stat, self.H_x)  # highway gate.
            xx     = self.sigmoid(xx)

            X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)
            next_stat = X_proj

            readout  = self.hidden_readout((1 - xx) * X_proj)
            if self.dropout > 0:
                readout = self.D(readout, train=False)

            readout += self.context_readout(xx * context)

        if self.config['bigram_predict']:
            readout += self.prev_word_readout(X)

        for l in self.output_nonlinear:
            readout = l(readout)

        next_prob = self.output(readout)
        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)
        return next_prob, next_sample, next_stat

    """
    Build the sampler for sampling/greedy search/beam search
    """

    def build_sampler(self):
        """
        Build a sampler which only steps once.
        Typically it only works for one word a time?
        """
        logger.info("build sampler ...")
        if self.config['sample_stoch'] and self.config['sample_argmax']:
            logger.info("use argmax search!")
        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):
            logger.info("use stochastic sampling!")
        elif self.config['sample_beam'] > 1:
            logger.info("use beam search! (beam_size={})".format(self.config['sample_beam']))

        # initial state of our Decoder.
        context = T.matrix()  # theano variable.

        init_h = self.Initializer(context)
        logger.info('compile the function: get_init_state')
        self.get_init_state \
            = theano.function([context], init_h, name='get_init_state')
        logger.info('done.')

        # word sampler: 1 x 1
        prev_word = T.vector('prev_word', dtype='int64')
        prev_stat = T.matrix('prev_state', dtype='float32')
        next_prob, next_sample, next_stat \
            = self._step_sample(prev_word, prev_stat, context)

        # next word probability
        logger.info('compile the function: sample_next')
        inputs = [prev_word, prev_stat, context]
        outputs = [next_prob, next_sample, next_stat]

        self.sample_next = theano.function(inputs, outputs, name='sample_next')
        logger.info('done')
        pass

    """
    Build a Stochastic Sampler which can use SCAN to work on GPU.
    However it cannot be used in Beam-search.
    """

    def build_stochastic_sampler(self):
        context = T.matrix()
        init_h = self.Initializer(context)

        logger.info('compile the function: sample')
        pass

    """
    Generate samples, either with stochastic sampling or beam-search!
    """

    def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):
        # beam size
        if k > 1:
            assert not stochastic, 'Beam search does not support stochastic sampling!!'

        # fix length cannot use beam search
        # if fixlen:
        #     assert k == 1

        # prepare for searching
        sample = []
        score = []
        if stochastic:
            score = 0

        live_k = 1
        dead_k = 0

        hyp_samples = [[]] * live_k
        hyp_scores = np.zeros(live_k).astype(theano.config.floatX)
        hyp_states = []

        # get initial state of decoder RNN with context
        next_state = self.get_init_state(context)
        next_word = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)

        # Start searching!
        for ii in xrange(maxlen):
            # print next_word
            ctx = np.tile(context, [live_k, 1])
            next_prob, next_word, next_state \
                = self.sample_next(next_word, next_state, ctx)  # wtf.

            if stochastic:
                # using stochastic sampling (or greedy sampling.)
                if argmax:
                    nw = next_prob[0].argmax()
                    next_word[0] = nw
                else:
                    nw = next_word[0]

                sample.append(nw)
                score += next_prob[0, nw]

                if (not fixlen) and (nw == 0):  # sample reached the end
                    break

            else:
                # using beam-search
                # we can only computed in a flatten way!
                cand_scores = hyp_scores[:, None] - np.log(next_prob)
                cand_flat = cand_scores.flatten()
                ranks_flat = cand_flat.argsort()[:(k - dead_k)]

                # fetch the best results.
                voc_size = next_prob.shape[1]
                trans_index = ranks_flat / voc_size
                word_index = ranks_flat % voc_size
                costs = cand_flat[ranks_flat]

                # get the new hyp samples
                new_hyp_samples = []
                new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)
                new_hyp_states = []

                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):
                    new_hyp_samples.append(hyp_samples[ti] + [wi])
                    new_hyp_scores[idx] = copy.copy(costs[idx])
                    new_hyp_states.append(copy.copy(next_state[ti]))

                # check the finished samples
                new_live_k = 0
                hyp_samples = []
                hyp_scores = []
                hyp_states = []

                for idx in xrange(len(new_hyp_samples)):
                    if (new_hyp_states[idx][-1] == 0) and (not fixlen):
                        sample.append(new_hyp_samples[idx])
                        score.append(new_hyp_scores[idx])
                        dead_k += 1
                    else:
                        new_live_k += 1
                        hyp_samples.append(new_hyp_samples[idx])
                        hyp_scores.append(new_hyp_scores[idx])
                        hyp_states.append(new_hyp_states[idx])

                hyp_scores = np.array(hyp_scores)
                live_k = new_live_k

                if new_live_k < 1:
                    break
                if dead_k >= k:
                    break

                next_word = np.array([w[-1] for w in hyp_samples])
                next_state = np.array(hyp_states)
                pass
            pass

        # end.
        if not stochastic:
            # dump every remaining one
            if live_k > 0:
                for idx in xrange(live_k):
                    sample.append(hyp_samples[idx])
                    score.append(hyp_scores[idx])

        return sample, score


class DecoderAtt(Decoder):
    """
    Recurrent Neural Network-based Decoder [for CopyNet-b Only]
    with Attention Mechanism
    """
    def __init__(self,
                 config, rng, prefix='dec',
                 mode='RNN', embed=None,
                 copynet=False, identity=False):
        super(DecoderAtt, self).__init__(
                config, rng, prefix,
                 mode, embed, False)
        self.init     = initializations.get('glorot_uniform')
        self.copynet  = copynet
        self.identity = identity
        # attention reader
        self.attention_reader = Attention(
            self.config['dec_hidden_dim'],
            self.config['dec_contxt_dim'],
            1000,
            name='source_attention',
            coverage=self.config['coverage']
        )
        self._add(self.attention_reader)

        # if use copynet
        if self.copynet:
            if not self.identity:
                self.Is = Dense(
                    self.config['dec_contxt_dim'],
                    self.config['dec_embedd_dim'],
                    name='in-trans'
                )
            else:
                assert self.config['dec_contxt_dim'] == self.config['dec_embedd_dim']
                self.Is = Identity(name='ini')

            self.Os = Dense(
                self.config['dec_readout_dim']
                if not self.config['location_embed']
                    else self.config['dec_readout_dim'] + self.config['dec_embedd_dim'],
                self.config['dec_contxt_dim'],
                name='out-trans'
            )

            if self.config['copygate']:
                self.Gs = Dense(
                    self.config['dec_readout_dim'] + self.config['dec_embedd_dim'],
                    1,
                    name='copy-gate',
                    activation='linear',
                    learn_bias=True,
                    negative_bias=True
                )
                self._add(self.Gs)

            if self.config['location_embed']:
                self._add(self.Is)
            self._add(self.Os)

        logger.info('adjust decoder ok.')

    """
    Build the decoder for evaluation
    """
    def prepare_xy(self, target, cc_matrix):
        # target:      (nb_samples, index_seq)
        # cc_matrix:   (nb_samples, maxlen_t, maxlen_s)
        # context:     (nb_samples)
        Y,  Y_mask  = self.Embed(target, True)  # (nb_samples, maxlen_t, embedding_dim)
        X           = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)

        # LL          = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, cc_matrix.shape[2]),
        #                              cc_matrix[:, :-1, :]], axis=1)
        LL = cc_matrix

        XL_mask     = T.cast(T.gt(T.sum(LL, axis=2), 0), dtype='float32')
        if not self.config['use_input']:
            X *= 0

        X_mask    = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)
        Count     = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)
        return X, X_mask, LL, XL_mask, Y_mask, Count

    """
    The most different part. Be caution !!
    Very different from traditional RNN search.
    """
    def build_decoder(self,
                      target,
                      cc_matrix,
                      context,
                      c_mask,
                      return_count=False,
                      train=True):
        """
        Build the Computational Graph ::> Context is essential
        """
        assert c_mask is not None, 'context must be supplied for this decoder.'
        assert context.ndim == 3, 'context must have 3 dimentions.'
        # context: (nb_samples, max_len, contxt_dim)
        context_A = self.Is(context)  # (nb_samples, max_len, embed_dim)
        X, X_mask, LL, XL_mask, Y_mask, Count = self.prepare_xy(target, cc_matrix)

        # input drop-out if any.
        if self.dropout > 0:
            X     = self.D(X, train=train)

        # Initial state of RNN
        Init_h   = self.Initializer(context[:, 0, :])  # default order ->
        Init_a   = T.zeros((context.shape[0], context.shape[1]), dtype='float32')
        coverage = T.zeros((context.shape[0], context.shape[1]), dtype='float32')

        X        = X.dimshuffle((1, 0, 2))
        X_mask   = X_mask.dimshuffle((1, 0))
        LL       = LL.dimshuffle((1, 0, 2))            # (maxlen_t, nb_samples, maxlen_s)
        XL_mask  = XL_mask.dimshuffle((1, 0))          # (maxlen_t, nb_samples)

        def _recurrence(x, x_mask, ll, xl_mask, prev_h, prev_a, cov, cc, cm, ca):
            """
            x:      (nb_samples, embed_dims)
            x_mask: (nb_samples, )
            ll:     (nb_samples, maxlen_s)
            xl_mask:(nb_samples, )
            -----------------------------------------
            prev_h: (nb_samples, hidden_dims)
            prev_a: (nb_samples, maxlen_s)
            cov:    (nb_samples, maxlen_s)  *** coverage ***
            -----------------------------------------
            cc:     (nb_samples, maxlen_s, cxt_dim)
            cm:     (nb_samples, maxlen_s)
            ca:     (nb_samples, maxlen_s, ebd_dim)
            """
            # compute the attention and get the context vector
            prob  = self.attention_reader(prev_h, cc, Smask=cm, Cov=cov)
            ncov  = cov + prob

            cxt   = T.sum(cc * prob[:, :, None], axis=1)

            # compute input word embedding (mixed)
            x_in  = T.concatenate([x, T.sum(ca * prev_a[:, :, None], axis=1)], axis=-1)

            # compute the current hidden states of the RNN.
            x_out = self.RNN(x_in, mask=x_mask, C=cxt, init_h=prev_h, one_step=True)

            # compute the current readout vector.
            r_in  = [x_out]
            if self.config['context_predict']:
                r_in  += [cxt]
            if self.config['bigram_predict']:
                r_in  += [x_in]

            # copynet decoding
            r_in    = T.concatenate(r_in, axis=-1)
            r_out = self.hidden_readout(x_out)  # (nb_samples, voc_size)
            if self.config['context_predict']:
                r_out += self.context_readout(cxt)
            if self.config['bigram_predict']:
                r_out += self.prev_word_readout(x_in)

            for l in self.output_nonlinear:
                r_out = l(r_out)

            key     = self.Os(r_in)  # (nb_samples, cxt_dim) :: key
            Eng     = T.sum(key[:, None, :] * cc, axis=-1)

            # # gating
            if self.config['copygate']:
                gt     = self.sigmoid(self.Gs(r_in))  # (nb_samples, 1)
                r_out += T.log(gt.flatten()[:, None])
                Eng   += T.log(1 - gt.flatten()[:, None])

                # r_out *= gt.flatten()[:, None]
                # Eng   *= 1 - gt.flatten()[:, None]

            EngSum  = logSumExp(Eng, axis=-1, mask=cm, c=r_out)

            next_p  = T.concatenate([T.exp(r_out - EngSum), T.exp(Eng - EngSum) * cm], axis=-1)
            next_c  = next_p[:, self.config['dec_voc_size']:] * ll           # (nb_samples, maxlen_s)
            next_b  = next_p[:, :self.config['dec_voc_size']]
            sum_a   = T.sum(next_c, axis=1, keepdims=True)                   # (nb_samples,)
            next_a  = (next_c / (sum_a + err)) * xl_mask[:, None]            # numerically consideration
            return x_out, next_a, ncov, sum_a, next_b

        outputs, _ = theano.scan(
            _recurrence,
            sequences=[X, X_mask, LL, XL_mask],
            outputs_info=[Init_h, Init_a, coverage, None, None],
            non_sequences=[context, c_mask, context_A]
        )
        X_out, source_prob, coverages, source_sum, prob_dist = [z.dimshuffle((1, 0, 2)) for z in outputs]
        X        = X.dimshuffle((1, 0, 2))
        X_mask   = X_mask.dimshuffle((1, 0))
        XL_mask  = XL_mask.dimshuffle((1, 0))

        # unk masking
        U_mask   = T.ones_like(target) * (1 - T.eq(target, 1))
        U_mask  += (1 - U_mask) * (1 - XL_mask)

        # The most different part is here !!
        log_prob = T.sum(T.log(
                self._grab_prob(prob_dist, target) * U_mask +
                source_sum.sum(axis=-1) + err
        ) * X_mask, axis=1)
        log_ppl  = log_prob / (Count + err)

        if return_count:
            return log_prob, Count
        else:
            return log_prob, log_ppl

    """
    Sample one step
    """

    def _step_sample(self,
                     prev_word,
                     prev_stat,
                     prev_loc,
                     prev_cov,
                     context,
                     c_mask,
                     context_A):

        assert c_mask is not None, 'we need the source mask.'
        # word embedding (note that for the first word, embedding should be all zero)
        X = T.switch(
            prev_word[:, None] < 0,
            alloc_zeros_matrix(prev_word.shape[0], 2 * self.config['dec_embedd_dim']),
            T.concatenate([self.Embed(prev_word),
                           T.sum(context_A * prev_loc[:, :, None], axis=1)
                           ], axis=-1)
        )

        if self.dropout > 0:
            X = self.D(X, train=False)

        # apply one step of RNN
        Probs  = self.attention_reader(prev_stat, context, c_mask, Cov=prev_cov)
        ncov   = prev_cov + Probs

        cxt    = T.sum(context * Probs[:, :, None], axis=1)

        X_proj, zz, rr = self.RNN(X, C=cxt,
                                  init_h=prev_stat,
                                  one_step=True,
                                  return_gates=True)
        next_stat = X_proj

        # compute the readout probability distribution and sample it
        # here the readout is a matrix, different from the learner.
        readin      = [next_stat]
        if self.config['context_predict']:
            readin += [cxt]
        if self.config['bigram_predict']:
            readin += [X]
        readin      = T.concatenate(readin, axis=-1)

        # if gating
        # if self.config['copygate']:
        #     gt      = self.sigmoid(self.Gs(readin))   # (nb_samples, dim)
        #     readin *= 1 - gt
        #     readout = self.hidden_readout(next_stat * gt[:, :self.config['dec_hidden_dim']])
        #     if self.config['context_predict']:
        #         readout += self.context_readout(
        #                 cxt * gt[:, self.config['dec_hidden_dim']:
        #                          self.config['dec_hidden_dim'] + self.config['dec_contxt_dim']])
        #     if self.config['bigram_predict']:
        #         readout += self.prev_word_readout(
        #                 X * gt[:, -2 * self.config['dec_embedd_dim']:])
        # else:
        readout = self.hidden_readout(next_stat)
        if self.config['context_predict']:
            readout += self.context_readout(cxt)
        if self.config['bigram_predict']:
            readout += self.prev_word_readout(X)

        for l in self.output_nonlinear:
            readout = l(readout)

        key         = self.Os(readin)
        Eng         = T.sum(key[:, None, :] * context, axis=-1)

        # # gating
        if self.config['copygate']:
            gt       = self.sigmoid(self.Gs(readin))  # (nb_samples, 1)
            readout += T.log(gt.flatten()[:, None])
            Eng     += T.log(1 - gt.flatten()[:, None])

        EngSum      = logSumExp(Eng, axis=-1, mask=c_mask, c=readout)

        next_prob   = T.concatenate([T.exp(readout - EngSum), T.exp(Eng - EngSum) * c_mask], axis=-1)
        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)
        return next_prob, next_sample, next_stat, ncov, next_stat

    def build_sampler(self):
        """
        Build a sampler which only steps once.
        Typically it only works for one word a time?
        """
        logger.info("build sampler ...")
        if self.config['sample_stoch'] and self.config['sample_argmax']:
            logger.info("use argmax search!")
        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):
            logger.info("use stochastic sampling!")
        elif self.config['sample_beam'] > 1:
            logger.info("use beam search! (beam_size={})".format(self.config['sample_beam']))

        # initial state of our Decoder.
        context   = T.tensor3()  # theano variable.
        c_mask    = T.matrix()   # mask of the input sentence.
        context_A = self.Is(context)

        init_h = self.Initializer(context[:, 0, :])
        init_a = T.zeros((context.shape[0], context.shape[1]))
        cov    = T.zeros((context.shape[0], context.shape[1]))

        logger.info('compile the function: get_init_state')
        self.get_init_state \
            = theano.function([context], [init_h, init_a, cov], name='get_init_state')
        logger.info('done.')

        # word sampler: 1 x 1
        prev_word = T.vector('prev_word', dtype='int64')
        prev_stat = T.matrix('prev_state', dtype='float32')
        prev_a    = T.matrix('prev_a', dtype='float32')
        prev_cov  = T.matrix('prev_cov', dtype='float32')

        next_prob, next_sample, next_stat, ncov, alpha \
            = self._step_sample(prev_word,
                                prev_stat,
                                prev_a,
                                prev_cov,
                                context,
                                c_mask,
                                context_A)

        # next word probability
        logger.info('compile the function: sample_next')
        inputs  = [prev_word, prev_stat, prev_a, prev_cov, context, c_mask]
        outputs = [next_prob, next_sample, next_stat, ncov, alpha]
        self.sample_next = theano.function(inputs, outputs, name='sample_next')
        logger.info('done')

    """
    Generate samples, either with stochastic sampling or beam-search!

    [:-:] I have to think over how to modify the BEAM-Search!!
    """
    def get_sample(self,
                   context,
                   c_mask,
                   source,
                   k=1, maxlen=30, stochastic=True,
                   argmax=False, fixlen=False,
                   return_attend=False
                   ):
        # beam size
        if k > 1:
            assert not stochastic, 'Beam search does not support stochastic sampling!!'

        # fix length cannot use beam search
        # if fixlen:
        #     assert k == 1

        # prepare for searching
        Lmax   = self.config['dec_voc_size']
        sample = []
        ppp    = []
        attend = []
        score  = []

        if stochastic:
            score = 0

        live_k = 1
        dead_k = 0

        hyp_samples = [[]] * live_k
        hyp_scores  = np.zeros(live_k).astype(theano.config.floatX)
        hyp_ppps    = [[]] * live_k
        hyp_attends = [[]] * live_k

        # get initial state of decoder RNN with context
        next_state, ss_prob, coverage = self.get_init_state(context)
        next_word = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)

        # Start searching!
        for ii in xrange(maxlen):
            # print next_word
            ctx    = np.tile(context, [live_k, 1, 1])
            cmk    = np.tile(c_mask,  [live_k, 1])
            sss    = np.tile(source,  [live_k, 1])

            # # process word
            def process_():
                # caution for index_0: UNK
                ll  = np.zeros((sss.shape[0], sss.shape[1]), dtype='float32')
                for i in xrange(next_word.shape[0]):
                    if next_word[i] >= Lmax:
                        ll[i][next_word[i] - Lmax] = 1.
                        next_word[i] = sss[i][next_word[i] - Lmax]
                    else:
                        ll[i] = (sss[i] == next_word[i, None])
                        # for k in xrange(sss.shape[1]):
                        #     ll[i][k] = (sss[i][k] == next_word[i])
                return ll, next_word

            # print next_word
            ll, next_word = process_()
            ll_mask = (np.sum(ll, axis=1, keepdims=True) > 0)

            next_a  = ss_prob * ll
            next_a  = next_a / (err + np.sum(next_a, axis=1, keepdims=True)) * ll_mask
            next_prob0, next_word, next_state, coverage, alpha \
                = self.sample_next(next_word, next_state, next_a, coverage, ctx, cmk)
            # print next_prob0.shape[1]
            if not self.config['decode_unk']:
                next_prob0[:, 1]          = 0.
                next_prob0 /= np.sum(next_prob0, axis=1, keepdims=True)

            def merge_():
                # merge the probabilities
                temple_prob  = copy.copy(next_prob0)
                source_prob  = copy.copy(next_prob0[:, Lmax:])
                for i in xrange(next_prob0.shape[0]):
                    for j in xrange(sss.shape[1]):
                        if (sss[i, j] < Lmax) and (sss[i, j] != 1):
                            temple_prob[i, sss[i, j]] += source_prob[i, j]
                            temple_prob[i, Lmax + j]   = 0.

                return temple_prob, source_prob

            next_prob, ss_prob   = merge_()
            next_prob0[:, Lmax:] = 0.
            # print '0', next_prob0[:, 3165]
            # print '01', next_prob[:, 3165]
            # # print next_prob[0, Lmax:]
            # print ss_prob[0, :]

            if stochastic:
                # using stochastic sampling (or greedy sampling.)
                if argmax:
                    nw = next_prob[0].argmax()
                    next_word[0] = nw
                else:
                    nw = self.rng.multinomial(pvals=next_prob).argmax(1)

                sample.append(nw)
                score += next_prob[0, nw]

                if (not fixlen) and (nw == 0):  # sample reached the end
                    break

            else:
                # using beam-search
                # we can only computed in a flatten way!
                cand_scores = hyp_scores[:, None] - np.log(next_prob)
                cand_flat   = cand_scores.flatten()
                ranks_flat  = cand_flat.argsort()[:(k - dead_k)]

                # fetch the best results.
                voc_size    = next_prob.shape[1]
                trans_index = ranks_flat / voc_size
                word_index  = ranks_flat % voc_size
                costs       = cand_flat[ranks_flat]

                # get the new hyp samples
                new_hyp_samples  = []
                new_hyp_ppps     = []
                new_hyp_attends  = []
                new_hyp_scores   = np.zeros(k - dead_k).astype(theano.config.floatX)
                new_hyp_states   = []
                new_hyp_coverage = []
                new_hyp_ss       = []

                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):
                    new_hyp_samples.append(hyp_samples[ti] + [wi])
                    new_hyp_scores[idx] = copy.copy(costs[idx])

                    new_hyp_states.append(copy.copy(next_state[ti]))
                    new_hyp_coverage.append(copy.copy(coverage[ti]))
                    new_hyp_ss.append(copy.copy(ss_prob[ti]))

                    if not return_attend:
                        new_hyp_ppps.append(hyp_ppps[ti] + [[next_prob0[ti][wi], next_prob[ti][wi]]])
                    else:
                        new_hyp_ppps.append(hyp_ppps[ti] + [(ss_prob[ti], alpha[ti])])

                # check the finished samples
                new_live_k   = 0
                hyp_samples  = []
                hyp_scores   = []
                hyp_states   = []
                hyp_coverage = []
                hyp_ppps     = []
                hyp_ss       = []

                for idx in xrange(len(new_hyp_samples)):
                    if (new_hyp_states[idx][-1] == 0) and (not fixlen):
                        sample.append(new_hyp_samples[idx])
                        ppp.append(new_hyp_ppps[idx])
                        score.append(new_hyp_scores[idx])
                        dead_k += 1
                    else:
                        new_live_k += 1
                        hyp_samples.append(new_hyp_samples[idx])
                        hyp_ppps.append(new_hyp_ppps[idx])
                        hyp_scores.append(new_hyp_scores[idx])
                        hyp_states.append(new_hyp_states[idx])
                        hyp_coverage.append(new_hyp_coverage[idx])
                        hyp_ss.append(new_hyp_ss[idx])

                hyp_scores = np.array(hyp_scores)
                live_k = new_live_k

                if new_live_k < 1:
                    break
                if dead_k >= k:
                    break

                next_word  = np.array([w[-1] for w in hyp_samples])
                next_state = np.array(hyp_states)
                coverage   = np.array(hyp_coverage)
                ss_prob    = np.array(hyp_ss)
                pass

        # end.
        if not stochastic:
            # dump every remaining one
            if live_k > 0:
                for idx in xrange(live_k):
                    sample.append(hyp_samples[idx])
                    ppp.append(hyp_ppps[idx])
                    score.append(hyp_scores[idx])

        return sample, score, ppp


class FnnDecoder(Model):
    def __init__(self, config, rng, prefix='fnndec'):
        """
        mode = RNN: use a RNN Decoder
        """
        super(FnnDecoder, self).__init__()
        self.config = config
        self.rng = rng
        self.prefix = prefix
        self.name = prefix

        """
        Create Dense Predictor.
        """

        self.Tr = Dense(self.config['dec_contxt_dim'],
                             self.config['dec_hidden_dim'],
                             activation='maxout2',
                             name='{}_Tr'.format(prefix))
        self._add(self.Tr)

        self.Pr = Dense(self.config['dec_hidden_dim'] / 2,
                             self.config['dec_voc_size'],
                             activation='softmax',
                             name='{}_Pr'.format(prefix))
        self._add(self.Pr)
        logger.info("FF decoder ok.")

    @staticmethod
    def _grab_prob(probs, X):
        assert probs.ndim == 3

        batch_size = probs.shape[0]
        max_len = probs.shape[1]
        vocab_size = probs.shape[2]

        probs = probs.reshape((batch_size * max_len, vocab_size))
        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing

    def build_decoder(self, target, context):
        """
        Build the Decoder Computational Graph
        """
        prob_dist = self.Pr(self.Tr(context[:, None, :]))
        log_prob  = T.sum(T.log(self._grab_prob(prob_dist, target) + err), axis=1)
        return log_prob

    def build_sampler(self):
        context   = T.matrix()
        prob_dist = self.Pr(self.Tr(context))
        next_sample = self.rng.multinomial(pvals=prob_dist).argmax(1)
        self.sample_next = theano.function([context], [prob_dist, next_sample], name='sample_next_{}'.format(self.prefix))
        logger.info('done')

    def get_sample(self, context, argmax=True):

        prob, sample = self.sample_next(context)
        if argmax:
            return prob[0].argmax()
        else:
            return sample[0]


########################################################################################################################
# Encoder-Decoder Models ::::
#
class RNNLM(Model):
    """
    RNN-LM, with context vector = 0.
    It is very similar with the implementation of VAE.
    """
    def __init__(self,
                 config, n_rng, rng,
                 mode='Evaluation'):
        super(RNNLM, self).__init__()

        self.config = config
        self.n_rng  = n_rng  # numpy random stream
        self.rng    = rng  # Theano random stream
        self.mode   = mode
        self.name   = 'rnnlm'

    def build_(self):
        logger.info("build the RNN-decoder")
        self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)

        # registration:
        self._add(self.decoder)

        # objectives and optimizers
        self.optimizer = optimizers.get('adadelta')

        # saved the initial memories
        if self.config['mode'] == 'NTM':
            self.memory    = initializations.get('glorot_uniform')(
                    (self.config['dec_memory_dim'], self.config['dec_memory_wdth']))

        logger.info("create the RECURRENT language model. ok")

    def compile_(self, mode='train', contrastive=False):
        # compile the computational graph.
        # INFO: the parameters.
        # mode: 'train'/ 'display'/ 'policy' / 'all'

        ps = 'params: {\n'
        for p in self.params:
            ps += '{0}: {1}\n'.format(p.name, p.eval().shape)
        ps += '}.'
        logger.info(ps)

        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])
        logger.info("total number of the parameters of the model: {}".format(param_num))

        if mode == 'train' or mode == 'all':
            if not contrastive:
                self.compile_train()
            else:
                self.compile_train_CE()

        if mode == 'display' or mode == 'all':
            self.compile_sample()

        if mode == 'inference' or mode == 'all':
            self.compile_inference()

    def compile_train(self):

        # questions (theano variables)
        inputs  = T.imatrix()  # padded input word sequence (for training)
        if self.config['mode']   == 'RNN':
            context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])
        elif self.config['mode'] == 'NTM':
            context = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)
        else:
            raise NotImplementedError

        # decoding.
        target  = inputs
        logPxz, logPPL = self.decoder.build_decoder(target, context)

        # reconstruction loss
        loss_rec = T.mean(-logPxz)
        loss_ppl = T.exp(T.mean(-logPPL))

        L1       = T.sum([T.sum(abs(w)) for w in self.params])
        loss     = loss_rec

        updates = self.optimizer.get_updates(self.params, loss)

        logger.info("compiling the compuational graph ::training function::")
        train_inputs = [inputs]

        self.train_ = theano.function(train_inputs,
                                      [loss_rec, loss_ppl],
                                      updates=updates,
                                      name='train_fun')
        logger.info("pre-training functions compile done.")

        # add monitoring:
        self.monitor['context'] = context
        self._monitoring()

        # compiling monitoring
        self.compile_monitoring(train_inputs)

    @abstractmethod
    def compile_train_CE(self):
        pass

    def compile_sample(self):
        # context vectors (as)
        self.decoder.build_sampler()
        logger.info("display functions compile done.")

    @abstractmethod
    def compile_inference(self):
        pass

    def default_context(self):
        if self.config['mode'] == 'RNN':
            return np.zeros(shape=(1, self.config['dec_contxt_dim']), dtype=theano.config.floatX)
        elif self.config['mode'] == 'NTM':
            memory = self.memory.get_value()
            memory = memory.reshape((1, memory.shape[0], memory.shape[1]))
            return memory

    def generate_(self, context=None, max_len=None, mode='display'):
        """
        :param action: action vector to guide the question.
                       If None, use a Gaussian to simulate the action.
        :return: question sentence in natural language.
        """
        # assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'
        # assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'

        if context is None:
            context = self.default_context()

        args = dict(k=self.config['sample_beam'],
                    maxlen=self.config['max_len'] if not max_len else max_len,
                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,
                    argmax=self.config['sample_argmax'] if mode == 'display' else None)

        sample, score = self.decoder.get_sample(context, **args)
        if not args['stochastic']:
            score = score / np.array([len(s) for s in sample])
            sample = sample[score.argmin()]
            score = score.min()
        else:
            score /= float(len(sample))

        return sample, np.exp(score)


class AutoEncoder(RNNLM):
    """
    Regular Auto-Encoder: RNN Encoder/Decoder
    """

    def __init__(self,
                 config, n_rng, rng,
                 mode='Evaluation'):
        super(RNNLM, self).__init__()

        self.config = config
        self.n_rng  = n_rng  # numpy random stream
        self.rng    = rng  # Theano random stream
        self.mode   = mode
        self.name = 'vae'

    def build_(self):
        logger.info("build the RNN auto-encoder")
        self.encoder = Encoder(self.config, self.rng, prefix='enc')
        if self.config['shared_embed']:
            self.decoder = Decoder(self.config, self.rng, prefix='dec', embed=self.encoder.Embed)
        else:
            self.decoder = Decoder(self.config, self.rng, prefix='dec')

        """
        Build the Transformation
        """
        if self.config['nonlinear_A']:
            self.action_trans = Dense(
                self.config['enc_hidden_dim'],
                self.config['action_dim'],
                activation='tanh',
                name='action_transform'
            )
        else:
            assert self.config['enc_hidden_dim'] == self.config['action_dim'], \
                    'hidden dimension must match action dimension'
            self.action_trans = Identity(name='action_transform')

        if self.config['nonlinear_B']:
            self.context_trans = Dense(
                self.config['action_dim'],
                self.config['dec_contxt_dim'],
                activation='tanh',
                name='context_transform'
            )
        else:
            assert self.config['dec_contxt_dim'] == self.config['action_dim'], \
                    'action dimension must match context dimension'
            self.context_trans = Identity(name='context_transform')

        # registration
        self._add(self.action_trans)
        self._add(self.context_trans)
        self._add(self.encoder)
        self._add(self.decoder)

        # objectives and optimizers
        self.optimizer = optimizers.get(self.config['optimizer'], kwargs={'lr': self.config['lr']})

        logger.info("create Helmholtz RECURRENT neural network. ok")

    def compile_train(self, mode='train'):
        # questions (theano variables)
        inputs  = T.imatrix()  # padded input word sequence (for training)
        context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])
        assert context.ndim == 2

        # decoding.
        target  = inputs
        logPxz, logPPL = self.decoder.build_decoder(target, context)

        # reconstruction loss
        loss_rec = T.mean(-logPxz)
        loss_ppl = T.exp(T.mean(-logPPL))

        L1       = T.sum([T.sum(abs(w)) for w in self.params])
        loss     = loss_rec

        updates = self.optimizer.get_updates(self.params, loss)

        logger.info("compiling the compuational graph ::training function::")
        train_inputs = [inputs]

        self.train_ = theano.function(train_inputs,
                                      [loss_rec, loss_ppl],
                                      updates=updates,
                                      name='train_fun')
        logger.info("pre-training functions compile done.")

        if mode == 'display' or mode == 'all':
            """
            build the sampler function here <:::>
            """
            # context vectors (as)
            self.decoder.build_sampler()
            logger.info("display functions compile done.")

        # add monitoring:
        self._monitoring()

        # compiling monitoring
        self.compile_monitoring(train_inputs)


class NRM(Model):
    """
    Neural Responding Machine
    A Encoder-Decoder based responding model.
    """
    def __init__(self,
                 config, n_rng, rng,
                 mode='Evaluation',
                 use_attention=False,
                 copynet=False,
                 identity=False):
        super(NRM, self).__init__()

        self.config   = config
        self.n_rng    = n_rng  # numpy random stream
        self.rng      = rng  # Theano random stream
        self.mode     = mode
        self.name     = 'nrm'
        self.attend   = use_attention
        self.copynet  = copynet
        self.identity = identity

    def build_(self, lr=None, iterations=None):
        logger.info("build the Neural Responding Machine")

        # encoder-decoder:: <<==>>
        self.encoder = Encoder(self.config, self.rng, prefix='enc', mode=self.mode)
        if not self.attend:
            self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)
        else:
            self.decoder = DecoderAtt(self.config, self.rng, prefix='dec', mode=self.mode,
                                      copynet=self.copynet, identity=self.identity)

        self._add(self.encoder)
        self._add(self.decoder)

        # objectives and optimizers
        if self.config['optimizer'] == 'adam':
            self.optimizer = optimizers.get(self.config['optimizer'],
                                         kwargs=dict(rng=self.rng,
                                                     save=False))
        else:
            self.optimizer = optimizers.get(self.config['optimizer'])
        if lr is not None:
            self.optimizer.lr.set_value(floatX(lr))
            self.optimizer.iterations.set_value(floatX(iterations))
        logger.info("build ok.")

    def compile_(self, mode='all', contrastive=False):
        # compile the computational graph.
        # INFO: the parameters.
        # mode: 'train'/ 'display'/ 'policy' / 'all'

        ps = 'params: {\n'
        for p in self.params:
            ps += '{0}: {1}\n'.format(p.name, p.eval().shape)
        ps += '}.'
        logger.info(ps)

        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])
        logger.info("total number of the parameters of the model: {}".format(param_num))

        if mode == 'train' or mode == 'all':
            self.compile_train()

        if mode == 'display' or mode == 'all':
            self.compile_sample()

        if mode == 'inference' or mode == 'all':
            self.compile_inference()

    def compile_train(self):

        # questions (theano variables)
        inputs    = T.imatrix()  # padded input word sequence (for training)
        target    = T.imatrix()  # padded target word sequence (for training)
        cc_matrix = T.tensor3()

        # encoding & decoding

        code, _, c_mask, _ = self.encoder.build_encoder(inputs, None, return_sequence=True, return_embed=True)
        # code: (nb_samples, max_len, contxt_dim)
        if 'explicit_loc' in self.config:
            if self.config['explicit_loc']:
                print 'use explicit location!!'
                max_len = code.shape[1]
                expLoc  = T.eye(max_len, self.config['encode_max_len'], dtype='float32')[None, :, :]
                expLoc  = T.repeat(expLoc, code.shape[0], axis=0)
                code    = T.concatenate([code, expLoc], axis=2)

        logPxz, logPPL     = self.decoder.build_decoder(target, cc_matrix,
                                                        code, c_mask)

        # responding loss
        loss_rec = T.mean(-logPxz)
        loss_ppl = T.exp(T.mean(-logPPL))
        loss     = loss_rec

        updates  = self.optimizer.get_updates(self.params, loss)

        logger.info("compiling the compuational graph ::training function::")
        train_inputs = [inputs, target, cc_matrix]

        self.train_ = theano.function(train_inputs,
                                      [loss_rec, loss_ppl],
                                      updates=updates,
                                      name='train_fun')
        self.train_guard = theano.function(train_inputs,
                                      [loss_rec, loss_ppl],
                                      updates=updates,
                                      name='train_fun',
                                      mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))
        logger.info("training functions compile done.")

        # # add monitoring:
        # self.monitor['context'
Download .txt
gitextract_9j7ufkjh/

├── .idea/
│   └── vcs.xml
├── LICENSE
├── README.md
├── emolga/
│   ├── __init__.py
│   ├── basic/
│   │   ├── __init__.py
│   │   ├── activations.py
│   │   ├── initializations.py
│   │   ├── objectives.py
│   │   └── optimizers.py
│   ├── config.py
│   ├── config_variant.py
│   ├── dataset/
│   │   └── build_dataset.py
│   ├── layers/
│   │   ├── __init__.py
│   │   ├── attention.py
│   │   ├── core.py
│   │   ├── embeddings.py
│   │   ├── gridlstm.py
│   │   ├── ntm_minibatch.py
│   │   └── recurrent.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── core.py
│   │   ├── covc_encdec.py
│   │   ├── encdec.py
│   │   ├── ntm_encdec.py
│   │   ├── pointers.py
│   │   └── variational.py
│   ├── run.py
│   ├── test_lm.py
│   ├── test_nvtm.py
│   ├── test_run.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── generic_utils.py
│   │   ├── io_utils.py
│   │   ├── np_utils.py
│   │   ├── test_utils.py
│   │   └── theano_utils.py
│   └── voc.pkl
└── experiments/
    ├── __init__.py
    ├── bst_dataset.py
    ├── bst_vest.py
    ├── config.py
    ├── copynet.py
    ├── copynet_input.py
    ├── dataset.py
    ├── lcsts_dataset.py
    ├── lcsts_rouge.py
    ├── lcsts_sample.py
    ├── lcsts_test.py
    ├── lcsts_vest.py
    ├── lcsts_vest_new.py
    ├── movie_dataset.py
    ├── syn_vest.py
    ├── syntest.py
    ├── synthetic.py
    ├── weibo_dataset.py
    └── weibo_vest.py
Download .txt
SYMBOL INDEX (579 symbols across 46 files)

FILE: emolga/basic/activations.py
  function softmax (line 4) | def softmax(x):
  function vector_softmax (line 8) | def vector_softmax(x):
  function time_distributed_softmax (line 12) | def time_distributed_softmax(x):
  function softplus (line 18) | def softplus(x):
  function relu (line 22) | def relu(x):
  function tanh (line 26) | def tanh(x):
  function sigmoid (line 30) | def sigmoid(x):
  function hard_sigmoid (line 34) | def hard_sigmoid(x):
  function linear (line 38) | def linear(x):
  function maxout2 (line 45) | def maxout2(x):
  function get (line 68) | def get(identifier):

FILE: emolga/basic/initializations.py
  function get_fans (line 8) | def get_fans(shape):
  function uniform (line 16) | def uniform(shape, scale=0.1):
  function normal (line 20) | def normal(shape, scale=0.05):
  function lecun_uniform (line 24) | def lecun_uniform(shape):
  function glorot_normal (line 33) | def glorot_normal(shape):
  function glorot_uniform (line 41) | def glorot_uniform(shape):
  function he_normal (line 47) | def he_normal(shape):
  function he_uniform (line 55) | def he_uniform(shape):
  function orthogonal (line 61) | def orthogonal(shape, scale=1.1):
  function identity (line 73) | def identity(shape, scale=1):
  function zero (line 80) | def zero(shape):
  function one (line 84) | def one(shape):
  function get (line 88) | def get(identifier):

FILE: emolga/basic/objectives.py
  function mean_squared_error (line 13) | def mean_squared_error(y_true, y_pred):
  function mean_absolute_error (line 17) | def mean_absolute_error(y_true, y_pred):
  function mean_absolute_percentage_error (line 21) | def mean_absolute_percentage_error(y_true, y_pred):
  function mean_squared_logarithmic_error (line 25) | def mean_squared_logarithmic_error(y_true, y_pred):
  function squared_hinge (line 29) | def squared_hinge(y_true, y_pred):
  function hinge (line 33) | def hinge(y_true, y_pred):
  function categorical_crossentropy (line 37) | def categorical_crossentropy(y_true, y_pred):
  function binary_crossentropy (line 47) | def binary_crossentropy(y_true, y_pred):
  function poisson_loss (line 53) | def poisson_loss(y_true, y_pred):
  function gaussian_kl_divergence (line 59) | def gaussian_kl_divergence(mean, ln_var):
  function get (line 97) | def get(identifier):

FILE: emolga/basic/optimizers.py
  function clip_norm (line 17) | def clip_norm(g, c, n):
  function kl_divergence (line 23) | def kl_divergence(p, p_hat):
  class Optimizer (line 27) | class Optimizer(object):
    method __init__ (line 28) | def __init__(self, **kwargs):
    method add (line 33) | def add(self, v):
    method get_state (line 36) | def get_state(self):
    method set_state (line 39) | def set_state(self, value_list):
    method get_updates (line 44) | def get_updates(self, params, loss):
    method get_gradients (line 47) | def get_gradients(self, loss, params):
    method get_config (line 63) | def get_config(self):
  class SGD (line 67) | class SGD(Optimizer):
    method __init__ (line 69) | def __init__(self, lr=0.05, momentum=0.9, decay=0.01, nesterov=True, *...
    method get_updates (line 76) | def get_updates(self, params, loss):
    method get_config (line 94) | def get_config(self):
  class RMSprop (line 102) | class RMSprop(Optimizer):
    method __init__ (line 103) | def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):
    method get_updates (line 110) | def get_updates(self, params, loss):
    method get_config (line 123) | def get_config(self):
  class Adagrad (line 130) | class Adagrad(Optimizer):
    method __init__ (line 131) | def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):
    method get_updates (line 136) | def get_updates(self, params, constraints, loss):
    method get_config (line 148) | def get_config(self):
  class Adadelta (line 154) | class Adadelta(Optimizer):
    method __init__ (line 158) | def __init__(self, lr=0.1, rho=0.95, epsilon=1e-6, *args, **kwargs):
    method get_updates (line 164) | def get_updates(self, params, loss):
    method get_config (line 187) | def get_config(self):
  class Adam (line 194) | class Adam(Optimizer):  # new Adam is designed for our purpose.
    method __init__ (line 201) | def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, s...
    method add_noise (line 216) | def add_noise(self, param):
    method add_forget (line 221) | def add_forget(self, param):
    method get_updates (line 226) | def get_updates(self, params, loss):
  function get (line 276) | def get(identifier, kwargs=None):

FILE: emolga/config.py
  function setup_ptb2 (line 5) | def setup_ptb2():

FILE: emolga/config_variant.py
  function setup_bienc (line 10) | def setup_bienc(config=None):
  function setup_dim (line 20) | def setup_dim(config=None):
  function setup_rep (line 35) | def setup_rep(config=None):
  function setup_opt (line 44) | def setup_opt(config=None):

FILE: emolga/dataset/build_dataset.py
  function serialize_to_file (line 15) | def serialize_to_file(obj, path, protocol=cPickle.HIGHEST_PROTOCOL):
  function show_txt (line 21) | def show_txt(array, path):
  function divide_dataset (line 29) | def divide_dataset(dataset, test_size, max_size):
  function deserialize_from_file (line 40) | def deserialize_from_file(path):
  function build_fuel (line 47) | def build_fuel(data):
  function obtain_stream (line 55) | def obtain_stream(dataset, batch_size, size=1):
  function build_ptb (line 70) | def build_ptb():
  function filter_unk (line 99) | def filter_unk(X, min_freq=5):
  function build_msr (line 125) | def build_msr():

FILE: emolga/layers/attention.py
  class Attention (line 11) | class Attention(Layer):
    method __init__ (line 12) | def __init__(self, target_dim, source_dim, hidden_dim,
    method __call__ (line 42) | def __call__(self, X, S,
  class CosineAttention (line 80) | class CosineAttention(Layer):
    method __init__ (line 81) | def __init__(self, target_dim, source_dim,
    method __call__ (line 115) | def __call__(self, X, S, Smask=None, return_log=False):

FILE: emolga/layers/core.py
  class Layer (line 8) | class Layer(object):
    method __init__ (line 9) | def __init__(self):
    method init_updates (line 15) | def init_updates(self):
    method _monitoring (line 18) | def _monitoring(self):
    method __call__ (line 26) | def __call__(self, X, *args, **kwargs):
    method _add (line 29) | def _add(self, layer):
    method supports_masked_input (line 34) | def supports_masked_input(self):
    method get_output_mask (line 40) | def get_output_mask(self, train=None):
    method set_weights (line 56) | def set_weights(self, weights):
    method get_weights (line 62) | def get_weights(self):
    method get_params (line 68) | def get_params(self):
    method set_name (line 71) | def set_name(self, name):
  class MaskedLayer (line 80) | class MaskedLayer(Layer):
    method supports_masked_input (line 85) | def supports_masked_input(self):
  class Identity (line 89) | class Identity(Layer):
    method __init__ (line 90) | def __init__(self, name='Identity'):
    method __call__ (line 95) | def __call__(self, X):
  class Dense (line 99) | class Dense(Layer):
    method __init__ (line 100) | def __init__(self, input_dim, output_dim, init='glorot_uniform', activ...
    method set_name (line 126) | def set_name(self, name):
    method __call__ (line 130) | def __call__(self, X):
    method reverse (line 134) | def reverse(self, Y):
  class Dense2 (line 141) | class Dense2(Layer):
    method __init__ (line 142) | def __init__(self, input_dim1, input_dim2, output_dim, init='glorot_un...
    method set_name (line 167) | def set_name(self, name):
    method __call__ (line 172) | def __call__(self, X1, X2):
  class Constant (line 177) | class Constant(Layer):
    method __init__ (line 178) | def __init__(self, input_dim, output_dim, init=None, activation='tanh'...
    method set_name (line 194) | def set_name(self, name):
    method __call__ (line 197) | def __call__(self, X=None):
  class MemoryLinear (line 205) | class MemoryLinear(Layer):
    method __init__ (line 206) | def __init__(self, input_dim, input_wdth, init='glorot_uniform',
    method __call__ (line 225) | def __call__(self, X=None):
  class Dropout (line 232) | class Dropout(MaskedLayer):
    method __init__ (line 236) | def __init__(self, rng=None, p=1., name=None):
    method __call__ (line 241) | def __call__(self, X, train=True):
  class Activation (line 251) | class Activation(MaskedLayer):
    method __init__ (line 255) | def __init__(self, activation):
    method __call__ (line 259) | def __call__(self, X):

FILE: emolga/layers/embeddings.py
  class Embedding (line 8) | class Embedding(Layer):
    method __init__ (line 17) | def __init__(self, input_dim, output_dim, init='uniform', name=None):
    method get_output_mask (line 31) | def get_output_mask(self, X):
    method __call__ (line 34) | def __call__(self, X, mask_zero=False, context=None):
  class Zero (line 68) | class Zero(Layer):
    method __call__ (line 69) | def __call__(self, X):
  class Bias (line 74) | class Bias(Layer):
    method __call__ (line 75) | def __call__(self, X):

FILE: emolga/layers/gridlstm.py
  class Grid (line 13) | class Grid(Recurrent):
    method __init__ (line 41) | def __init__(self,
    method build (line 86) | def build(self):
    method lstm_ (line 140) | def lstm_(self, k, H, m, x, identity=False):
    method grid_ (line 195) | def grid_(self,
  class GridLSTM3D (line 243) | class GridLSTM3D(Grid):
    method __init__ (line 248) | def __init__(self,
    method _step (line 335) | def _step(self, *args):
    method __call__ (line 386) | def __call__(self, X, init_H=None, init_M=None,
  class SequentialGridLSTM (line 450) | class SequentialGridLSTM(Grid):
    method __init__ (line 460) | def __init__(self,
    method _step (line 569) | def _step(self, *args):
    method __call__ (line 620) | def __call__(self, X, init_H=None, init_M=None,
  class PyramidGridLSTM2D (line 683) | class PyramidGridLSTM2D(Grid):
    method __init__ (line 687) | def __init__(self,
    method _step (line 772) | def _step(self, *args):
    method __call__ (line 825) | def __call__(self, X, init_x=None, init_y=None,
  class PyramidLSTM (line 876) | class PyramidLSTM(Layer):
    method __init__ (line 880) | def __init__(self,
    method _step (line 975) | def _step(self, *args):
    method __call__ (line 1033) | def __call__(self, X, init_x=None, init_y=None,

FILE: emolga/layers/ntm_minibatch.py
  class Reader (line 17) | class Reader(Layer):
    method __init__ (line 22) | def __init__(self, input_dim, memory_width, shift_width, shift_conv,
    method __call__ (line 67) | def __call__(self, X, w_temp, m_temp):
  class Writer (line 93) | class Writer(Reader):
    method __init__ (line 98) | def __init__(self, input_dim, memory_width, shift_width, shift_conv,
    method get_fixer (line 119) | def get_fixer(self, X):
  class Controller (line 125) | class Controller(Recurrent):
    method __init__ (line 134) | def __init__(self,
    method _controller (line 216) | def _controller(self, input_t, read_t, controller_tm1=None):
    method _read (line 231) | def _read(w_read, memory):
    method _write (line 239) | def _write(w_write, memory, erase, add):
    method _step (line 252) | def _step(self, input_t, mask_t,
    method __call__ (line 302) | def __call__(self, X, mask=None, M=None, init_ww=None,
  class AttentionReader (line 369) | class AttentionReader(Layer):
    method __init__ (line 374) | def __init__(self, input_dim, memory_width, shift_width, shift_conv,
    method __call__ (line 424) | def __call__(self, X, w_temp, m_temp):
  class AttentionWriter (line 454) | class AttentionWriter(AttentionReader):
    method __init__ (line 459) | def __init__(self, input_dim, memory_width, shift_width, shift_conv,
    method get_fixer (line 480) | def get_fixer(self, X):
  class BernoulliController (line 487) | class BernoulliController(Recurrent):
    method __init__ (line 496) | def __init__(self,
    method _controller (line 579) | def _controller(self, input_t, read_t, controller_tm1=None):
    method _read (line 594) | def _read(w_read, memory):
    method _write (line 602) | def _write(w_write, memory, erase, add):
    method _step (line 618) | def _step(self, input_t, mask_t,
    method __call__ (line 668) | def __call__(self, X, mask=None, M=None, init_ww=None,

FILE: emolga/layers/recurrent.py
  class Recurrent (line 6) | class Recurrent(MaskedLayer):
    method get_padded_shuffled_mask (line 12) | def get_padded_shuffled_mask(mask, pad=0):
  class GRU (line 31) | class GRU(Recurrent):
    method __init__ (line 54) | def __init__(self,
    method _step (line 115) | def _step(self,
    method _step_gate (line 128) | def _step_gate(self,
    method __call__ (line 141) | def __call__(self, X, mask=None, C=None, init_h=None,
  class JZS3 (line 226) | class JZS3(Recurrent):
    method __init__ (line 246) | def __init__(self,
    method _step (line 307) | def _step(self,
    method __call__ (line 318) | def __call__(self, X, mask=None, C=None, init_h=None, return_sequence=...
  class LSTM (line 373) | class LSTM(Recurrent):
    method __init__ (line 374) | def __init__(self,
    method _step (line 448) | def _step(self,
    method input_embed (line 468) | def input_embed(self, X, C=None):
    method __call__ (line 487) | def __call__(self, X, mask=None, C=None, init_h=None, init_c=None, ret...

FILE: emolga/models/core.py
  class Model (line 12) | class Model(object):
    method __init__ (line 13) | def __init__(self):
    method _add (line 19) | def _add(self, layer):
    method _monitoring (line 24) | def _monitoring(self):
    method compile_monitoring (line 32) | def compile_monitoring(self, inputs, updates=None):
    method set_weights (line 44) | def set_weights(self, weights):
    method get_weights (line 56) | def get_weights(self):
    method set_name (line 67) | def set_name(self, name):
    method save (line 75) | def save(self, filename):
    method load (line 90) | def load(self, filename):

FILE: emolga/models/covc_encdec.py
  class Encoder (line 22) | class Encoder(Model):
    method __init__ (line 28) | def __init__(self,
    method build_encoder (line 102) | def build_encoder(self, source, context=None, return_embed=False,
    method compile_encoder (line 175) | def compile_encoder(self, with_context=False, return_embed=False, retu...
  class Decoder (line 203) | class Decoder(Model):
    method __init__ (line 213) | def __init__(self,
    method _grab_prob (line 379) | def _grab_prob(probs, X, block_unk=False):
    method prepare_xy (line 392) | def prepare_xy(self, target):
    method build_decoder (line 407) | def build_decoder(self, target, context=None,
    method _step_sample (line 485) | def _step_sample(self, prev_word, prev_stat, context):
    method build_sampler (line 539) | def build_sampler(self):
    method build_stochastic_sampler (line 581) | def build_stochastic_sampler(self):
    method get_sample (line 592) | def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=...
  class DecoderAtt (line 703) | class DecoderAtt(Decoder):
    method __init__ (line 708) | def __init__(self,
    method prepare_xy (line 768) | def prepare_xy(self, target, cc_matrix):
    method build_decoder (line 791) | def build_decoder(self,
    method _step_sample (line 918) | def _step_sample(self,
    method build_sampler (line 998) | def build_sampler(self):
    method get_sample (line 1052) | def get_sample(self,
  class FnnDecoder (line 1242) | class FnnDecoder(Model):
    method __init__ (line 1243) | def __init__(self, config, rng, prefix='fnndec'):
    method _grab_prob (line 1271) | def _grab_prob(probs, X):
    method build_decoder (line 1281) | def build_decoder(self, target, context):
    method build_sampler (line 1289) | def build_sampler(self):
    method get_sample (line 1296) | def get_sample(self, context, argmax=True):
  class RNNLM (line 1308) | class RNNLM(Model):
    method __init__ (line 1313) | def __init__(self,
    method build_ (line 1324) | def build_(self):
    method compile_ (line 1341) | def compile_(self, mode='train', contrastive=False):
    method compile_train (line 1367) | def compile_train(self):
    method compile_train_CE (line 1408) | def compile_train_CE(self):
    method compile_sample (line 1411) | def compile_sample(self):
    method compile_inference (line 1417) | def compile_inference(self):
    method default_context (line 1420) | def default_context(self):
    method generate_ (line 1428) | def generate_(self, context=None, max_len=None, mode='display'):
  class AutoEncoder (line 1456) | class AutoEncoder(RNNLM):
    method __init__ (line 1461) | def __init__(self,
    method build_ (line 1472) | def build_(self):
    method compile_train (line 1518) | def compile_train(self, mode='train'):
  class NRM (line 1561) | class NRM(Model):
    method __init__ (line 1566) | def __init__(self,
    method build_ (line 1583) | def build_(self, lr=None, iterations=None):
    method compile_ (line 1609) | def compile_(self, mode='all', contrastive=False):
    method compile_train (line 1632) | def compile_train(self):
    method compile_sample (line 1682) | def compile_sample(self):
    method compile_inference (line 1691) | def compile_inference(self):
    method generate_ (line 1694) | def generate_(self, inputs, mode='display', return_attend=False, retur...
    method evaluate_ (line 1737) | def evaluate_(self, inputs, outputs, idx2word, inputs_unk=None, encode...
    method analyse_ (line 1831) | def analyse_(self, inputs, outputs, idx2word, inputs_unk=None, return_...
    method analyse_cover (line 1888) | def analyse_cover(self, inputs, outputs, idx2word, inputs_unk=None, re...

FILE: emolga/models/encdec.py
  class Encoder (line 172) | class Encoder(Model):
    method __init__ (line 178) | def __init__(self,
    method build_encoder (line 252) | def build_encoder(self, source, context=None, return_embed=False, retu...
    method compile_encoder (line 288) | def compile_encoder(self, with_context=False, return_embed=False, retu...
  class Decoder (line 306) | class Decoder(Model):
    method __init__ (line 316) | def __init__(self,
    method _grab_prob (line 474) | def _grab_prob(probs, X):
    method prepare_xy (line 487) | def prepare_xy(self, target):
    method build_decoder (line 502) | def build_decoder(self, target, context=None,
    method _step_sample (line 580) | def _step_sample(self, prev_word, prev_stat, context):
    method build_sampler (line 634) | def build_sampler(self):
    method build_stochastic_sampler (line 677) | def build_stochastic_sampler(self):
    method get_sample (line 688) | def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=...
  class DecoderAtt (line 799) | class DecoderAtt(Decoder):
    method __init__ (line 804) | def __init__(self,
    method prepare_xy (line 849) | def prepare_xy(self, target, context=None):
    method build_decoder (line 865) | def build_decoder(self,
    method _step_sample (line 948) | def _step_sample(self, prev_word, prev_stat, context, c_mask):
    method build_sampler (line 1010) | def build_sampler(self):
    method get_sample (line 1052) | def get_sample(self, context, c_mask, k=1, maxlen=30, stochastic=True,...
  class FnnDecoder (line 1164) | class FnnDecoder(Model):
    method __init__ (line 1165) | def __init__(self, config, rng, prefix='fnndec'):
    method _grab_prob (line 1193) | def _grab_prob(probs, X):
    method build_decoder (line 1203) | def build_decoder(self, target, context):
    method build_sampler (line 1211) | def build_sampler(self):
    method get_sample (line 1218) | def get_sample(self, context, argmax=True):
  class RNNLM (line 1230) | class RNNLM(Model):
    method __init__ (line 1235) | def __init__(self,
    method build_ (line 1246) | def build_(self):
    method compile_ (line 1263) | def compile_(self, mode='train', contrastive=False):
    method compile_train (line 1289) | def compile_train(self):
    method compile_train_CE (line 1330) | def compile_train_CE(self):
    method compile_sample (line 1333) | def compile_sample(self):
    method compile_inference (line 1339) | def compile_inference(self):
    method default_context (line 1342) | def default_context(self):
    method generate_ (line 1350) | def generate_(self, context=None, max_len=None, mode='display'):
  class AutoEncoder (line 1378) | class AutoEncoder(RNNLM):
    method __init__ (line 1383) | def __init__(self,
    method build_ (line 1394) | def build_(self):
    method compile_train (line 1440) | def compile_train(self, mode='train'):
  class NRM (line 1483) | class NRM(Model):
    method __init__ (line 1488) | def __init__(self,
    method build_ (line 1505) | def build_(self):
    method compile_ (line 1527) | def compile_(self, mode='all', contrastive=False):
    method compile_train (line 1550) | def compile_train(self):
    method compile_sample (line 1588) | def compile_sample(self):
    method compile_inference (line 1597) | def compile_inference(self):
    method generate_ (line 1600) | def generate_(self, inputs, mode='display', return_all=False):
    method evaluate_ (line 1662) | def evaluate_(self, inputs, outputs, idx2word, inputs_unk=None):
    method analyse_ (line 1689) | def analyse_(self, inputs, outputs, idx2word):
    method analyse_cover (line 1706) | def analyse_cover(self, inputs, outputs, idx2word):

FILE: emolga/models/ntm_encdec.py
  class RecurrentBase (line 20) | class RecurrentBase(Model):
    method __init__ (line 24) | def __init__(self, config, model='RNN', prefix='enc', use_contxt=True,...
    method get_context (line 105) | def get_context(self, context):
    method loop (line 127) | def loop(self, X, X_mask, info=None, return_sequence=False, return_ful...
    method step (line 135) | def step(self, X, prev_info):
    method build_ (line 153) | def build_(self):
    method get_init (line 209) | def get_init(self, context):
    method get_next_state (line 228) | def get_next_state(self, prev_X, prev_info):
  class Encoder (line 255) | class Encoder(Model):
    method __init__ (line 261) | def __init__(self,
    method build_encoder (line 306) | def build_encoder(self, source, context=None):
  class Decoder (line 347) | class Decoder(Model):
    method __init__ (line 357) | def __init__(self,
    method _grab_prob (line 462) | def _grab_prob(probs, X):
    method prepare_xy (line 475) | def prepare_xy(self, target):
    method build_decoder (line 490) | def build_decoder(self, target, context=None, return_count=False):
    method _step_embed (line 527) | def _step_embed(self, prev_word):
    method _step_sample (line 540) | def _step_sample(self, X, next_stat, context):
    method build_sampler (line 563) | def build_sampler(self):
    method get_sample (line 595) | def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=...
  class RNNLM (line 717) | class RNNLM(Model):
    method __init__ (line 722) | def __init__(self,
    method build_ (line 733) | def build_(self):
    method compile_ (line 749) | def compile_(self, mode='train', contrastive=False):
    method compile_train (line 775) | def compile_train(self):
    method compile_train_CE (line 815) | def compile_train_CE(self):
    method compile_sample (line 818) | def compile_sample(self):
    method compile_inference (line 823) | def compile_inference(self):
    method default_context (line 826) | def default_context(self):
    method generate_ (line 834) | def generate_(self, context=None, mode='display', max_len=None):
  class Helmholtz (line 862) | class Helmholtz(RNNLM):
    method __init__ (line 871) | def __init__(self,
    method build_ (line 882) | def build_(self):
    method compile_train (line 951) | def compile_train(self):
    method compile_sample (line 1053) | def compile_sample(self):
    method compile_inference (line 1077) | def compile_inference(self):
    method default_context (line 1100) | def default_context(self):
  class BinaryHelmholtz (line 1105) | class BinaryHelmholtz(RNNLM):
    method __init__ (line 1114) | def __init__(self,
    method build_ (line 1125) | def build_(self):
    method compile_train (line 1175) | def compile_train(self):
    method compile_sample (line 1277) | def compile_sample(self):
    method compile_inference (line 1300) | def compile_inference(self):
    method default_context (line 1323) | def default_context(self):
  class AutoEncoder (line 1328) | class AutoEncoder(RNNLM):
    method __init__ (line 1334) | def __init__(self,
    method build_ (line 1345) | def build_(self):
    method compile_train (line 1368) | def compile_train(self, mode='train'):
    method compile_sample (line 1404) | def compile_sample(self):

FILE: emolga/models/pointers.py
  class PtrDecoder (line 19) | class PtrDecoder(Model):
    method __init__ (line 23) | def __init__(self,
    method grab_prob (line 64) | def grab_prob(probs, X):
    method grab_source (line 75) | def grab_source(source, target):
    method build_decoder (line 91) | def build_decoder(self,
    method _step_sample (line 139) | def _step_sample(self, prev_idx, prev_stat,
    method build_sampler (line 156) | def build_sampler(self):
    method get_sample (line 199) | def get_sample(self, context, inputs, source, smask,
  class PointerDecoder (line 316) | class PointerDecoder(Model):
    method __init__ (line 321) | def __init__(self,
    method grab_prob (line 370) | def grab_prob(probs, X):
    method grab_source (line 381) | def grab_source(source, target):
    method build_decoder (line 397) | def build_decoder(self,
    method _step_sample (line 473) | def _step_sample(self,
    method build_sampler (line 495) | def build_sampler(self):
    method get_sample (line 540) | def get_sample(self, context, inputs, source, smask,
  class MemNet (line 616) | class MemNet(Model):
    method __init__ (line 621) | def __init__(self,
    method __call__ (line 653) | def __call__(self, key, memory=None, mem_mask=None, out_memory=None):
  class PtrNet (line 675) | class PtrNet(Model):
    method __init__ (line 679) | def __init__(self, config, n_rng, rng,
    method build_ (line 689) | def build_(self, encoder=None):
    method build_train (line 743) | def build_train(self, memory=None, out_memory=None, compile_train=Fals...
    method build_sampler (line 881) | def build_sampler(self, memory=None, out_mem=None):
    method build_predict_sampler (line 925) | def build_predict_sampler(self):
    method generate_ (line 973) | def generate_(self, inputs, context, source, smask):

FILE: emolga/models/variational.py
  class VAE (line 19) | class VAE(RNNLM):
    method __init__ (line 33) | def __init__(self,
    method _add_tag (line 45) | def _add_tag(self, layer, tag):
    method build_ (line 52) | def build_(self):
    method compile_train (line 115) | def compile_train(self):
    method compile_sample (line 176) | def compile_sample(self):
    method compile_inference (line 192) | def compile_inference(self):
    method default_context (line 208) | def default_context(self):
  class Helmholtz (line 212) | class Helmholtz(VAE):
    method __init__ (line 220) | def __init__(self,
    method build_ (line 235) | def build_(self):
    method dynamic (line 301) | def dynamic(self):
    method compile_ (line 330) | def compile_(self, mode='train', contrastive=False):
    method compile_train (line 356) | def compile_train(self):
    method build_dynamics (line 477) | def build_dynamics(self, states, action, Y):
    method compile_sample (line 487) | def compile_sample(self):
    method compile_inference (line 523) | def compile_inference(self):
    method evaluate_ (line 539) | def evaluate_(self, inputs):
    method compile_train_CE (line 564) | def compile_train_CE(self):
  class HarX (line 695) | class HarX(Helmholtz):
    method __init__ (line 705) | def __init__(self,
    method build_ (line 720) | def build_(self):
    method compile_ (line 798) | def compile_(self, mode='train', contrastive=False):
    method compile_train (line 824) | def compile_train(self):
    method generate_ (line 1015) | def generate_(self, context=None, max_len=None, mode='display'):
  class THarX (line 1023) | class THarX(Helmholtz):
    method __init__ (line 1033) | def __init__(self,
    method build_ (line 1048) | def build_(self):
    method compile_ (line 1126) | def compile_(self, mode='train', contrastive=False):
    method compile_train (line 1152) | def compile_train(self):
    method generate_ (line 1350) | def generate_(self, context=None, max_len=None, mode='display'):
  class NVTM (line 1358) | class NVTM(Helmholtz):
    method __init__ (line 1364) | def __init__(self,
    method build_ (line 1376) | def build_(self):
    method compile_ (line 1492) | def compile_(self, mode='train', contrastive=False):
    method compile_train (line 1518) | def compile_train(self):
    method generate_ (line 1701) | def generate_(self, context=None, max_len=None, mode='display'):

FILE: emolga/run.py
  function simulator (line 79) | def simulator(M=25, display=False):
  function learner (line 162) | def learner(data, fr=1., fs=1., fb=1.):
  function SL_learner (line 182) | def SL_learner(data, batch_size=25):
  function main (line 226) | def main():
  function check_answer (line 246) | def check_answer(x, y, g):
  function display_session (line 258) | def display_session(x, y, g, t, acc, cov):
  function main_sl (line 282) | def main_sl():

FILE: emolga/test_lm.py
  function init_logging (line 18) | def init_logging(logfile):
  function prepare_batch (line 98) | def prepare_batch(batch):

FILE: emolga/test_nvtm.py
  function init_logging (line 18) | def init_logging(logfile):
  function prepare_batch (line 98) | def prepare_batch(batch):

FILE: emolga/test_run.py
  function simulator (line 82) | def simulator(M=25, display=False):
  function learner (line 165) | def learner(data, fr=1., fs=1., fb=1.):
  function SL_learner (line 185) | def SL_learner(data, batch_size=25, eval_freq=0, eval_train=None, eval_t...
  function main (line 261) | def main():
  function check_answer (line 281) | def check_answer(x, y, g):
  function display_session (line 293) | def display_session(x, y, g, t, acc, cov):
  function SL_test (line 317) | def SL_test(test_set):
  function main_sl (line 346) | def main_sl():

FILE: emolga/utils/generic_utils.py
  function get_from_module (line 10) | def get_from_module(identifier, module_params, module_name, instantiate=...
  function make_tuple (line 24) | def make_tuple(*args):
  function printv (line 28) | def printv(v, prefix=''):
  function make_batches (line 50) | def make_batches(size, batch_size):
  function slice_X (line 55) | def slice_X(X, start=None, stop=None):
  class Progbar (line 68) | class Progbar(object):
    method __init__ (line 69) | def __init__(self, target, width=30, verbose=1):
    method update (line 82) | def update(self, current, values=[]):
    method add (line 152) | def add(self, n, values=[]):
    method clear (line 155) | def clear(self):
  function print_sample (line 162) | def print_sample(idx2word, idx):
  function visualize_ (line 172) | def visualize_(subplots, data, w=None, h=None, name=None,
  function vis_Gaussian (line 249) | def vis_Gaussian(subplot, mean, std, name=None, display='off', size=10):

FILE: emolga/utils/io_utils.py
  class HDF5Matrix (line 8) | class HDF5Matrix():
    method __init__ (line 11) | def __init__(self, datapath, dataset, start, end, normalizer=None):
    method __len__ (line 22) | def __len__(self):
    method __getitem__ (line 25) | def __getitem__(self, key):
    method shape (line 52) | def shape(self):
  function save_array (line 56) | def save_array(array, name):
  function load_array (line 65) | def load_array(name):
  function save_config (line 75) | def save_config():
  function load_config (line 79) | def load_config():

FILE: emolga/utils/np_utils.py
  function to_categorical (line 8) | def to_categorical(y, nb_classes=None):
  function normalize (line 21) | def normalize(a, axis=-1, order=2):
  function binary_logloss (line 27) | def binary_logloss(p, y):
  function multiclass_logloss (line 36) | def multiclass_logloss(P, Y):
  function accuracy (line 43) | def accuracy(p, y):
  function probas_to_classes (line 47) | def probas_to_classes(y_pred):
  function categorical_probas_to_classes (line 53) | def categorical_probas_to_classes(p):

FILE: emolga/utils/test_utils.py
  function get_test_data (line 4) | def get_test_data(nb_train=1000, nb_test=500, input_shape=(10,), output_...

FILE: emolga/utils/theano_utils.py
  function floatX (line 10) | def floatX(X):
  function sharedX (line 14) | def sharedX(X, dtype=theano.config.floatX, name=None):
  function shared_zeros (line 18) | def shared_zeros(shape, dtype=theano.config.floatX, name=None):
  function shared_scalar (line 22) | def shared_scalar(val=0., dtype=theano.config.floatX, name=None):
  function shared_ones (line 26) | def shared_ones(shape, dtype=theano.config.floatX, name=None):
  function alloc_zeros_matrix (line 30) | def alloc_zeros_matrix(*dims):
  function alloc_ones_matrix (line 34) | def alloc_ones_matrix(*dims):
  function ndim_tensor (line 38) | def ndim_tensor(ndim):
  function ndim_itensor (line 51) | def ndim_itensor(ndim, name=None):
  function dot (line 62) | def dot(inp, matrix, bias=None):
  function logSumExp (line 87) | def logSumExp(x, axis=None, mask=None, status='theano', c=None, err=1e-7):
  function softmax (line 121) | def softmax(x):
  function masked_softmax (line 125) | def masked_softmax(x, mask, err=1e-9):
  function cosine_sim (line 133) | def cosine_sim(k, M):
  function cosine_sim2d (line 144) | def cosine_sim2d(k, M):
  function dot_2d (line 160) | def dot_2d(k, M, b=None, g=None):
  function shift_convolve (line 182) | def shift_convolve(weight, shift, shift_conv):
  function shift_convolve2d (line 187) | def shift_convolve2d(weight, shift, shift_conv):

FILE: experiments/bst_dataset.py
  class BSTnode (line 7) | class BSTnode(object):
    method __init__ (line 12) | def __init__(self, parent, t):
    method update_stats (line 20) | def update_stats(self):
    method insert (line 24) | def insert(self, t, NodeType):
    method find (line 42) | def find(self, t):
    method rank (line 57) | def rank(self, t):
    method minimum (line 73) | def minimum(self):
    method successor (line 81) | def successor(self):
    method delete (line 90) | def delete(self):
    method check (line 111) | def check(self, lokey, hikey):
    method __repr__ (line 128) | def __repr__(self):
  class BST (line 132) | class BST(object):
    method __init__ (line 140) | def __init__(self, NodeType=BSTnode):
    method reroot (line 145) | def reroot(self):
    method insert (line 148) | def insert(self, t):
    method find (line 157) | def find(self, t):
    method rank (line 164) | def rank(self, t):
    method delete (line 171) | def delete(self, t):
    method check (line 178) | def check(self):
    method __str__ (line 182) | def __str__(self):
  function printsizes (line 233) | def printsizes(node):
  function test (line 240) | def test(args=None, BSTtype=BST):
  function generate (line 263) | def generate():
  function obtain_dataset (line 290) | def obtain_dataset():

FILE: experiments/bst_vest.py
  function init_logging (line 28) | def init_logging(logfile):
  function build_data (line 72) | def build_data(data):
  function output_stream (line 127) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 136) | def prepare_batch(batch, mask, fix_len=None):
  function cc_martix (line 151) | def cc_martix(source, target):
  function unk_filter (line 160) | def unk_filter(data):
  function analysis_ (line 220) | def analysis_(data_plain, t_idx, mode='Training'):

FILE: experiments/config.py
  function setup (line 6) | def setup():
  function setup_syn (line 92) | def setup_syn():
  function setup_bst (line 280) | def setup_bst():
  function setup_lcsts (line 381) | def setup_lcsts():
  function setup_weibo (line 499) | def setup_weibo():

FILE: experiments/copynet.py
  function init_logging (line 20) | def init_logging(logfile):
  function build_data (line 64) | def build_data(source, target):
  function output_stream (line 91) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 100) | def prepare_batch(batch, mask):

FILE: experiments/copynet_input.py
  function init_logging (line 22) | def init_logging(logfile):
  function unk_filter (line 65) | def unk_filter(data):

FILE: experiments/dataset.py
  function repeat_name (line 33) | def repeat_name(l):
  function replace (line 52) | def replace(word):
  function print_str (line 82) | def print_str(data):

FILE: experiments/lcsts_dataset.py
  function build_data (line 117) | def build_data(data):

FILE: experiments/lcsts_rouge.py
  function build_evaluation (line 24) | def build_evaluation(train_set, segment):
  function init_logging (line 73) | def init_logging(logfile):
  function unk_filter (line 141) | def unk_filter(data):

FILE: experiments/lcsts_sample.py
  function init_logging (line 25) | def init_logging(logfile):
  function build_data (line 68) | def build_data(data):
  function unk_filter (line 79) | def unk_filter(data):

FILE: experiments/lcsts_test.py
  function init_logging (line 24) | def init_logging(logfile):
  function build_data (line 64) | def build_data(data):
  function output_stream (line 107) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 116) | def prepare_batch(batch, mask, fix_len=None):

FILE: experiments/lcsts_vest.py
  function init_logging (line 26) | def init_logging(logfile):
  function build_data (line 70) | def build_data(data):
  function output_stream (line 126) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 135) | def prepare_batch(batch, mask, fix_len=None):
  function cc_martix (line 150) | def cc_martix(source, target):
  function unk_filter (line 159) | def unk_filter(data):

FILE: experiments/lcsts_vest_new.py
  function init_logging (line 26) | def init_logging(logfile):
  function build_data (line 70) | def build_data(data):
  function output_stream (line 126) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 135) | def prepare_batch(batch, mask, fix_len=None):
  function cc_martix (line 150) | def cc_martix(source, target):
  function unk_filter (line 159) | def unk_filter(data):

FILE: experiments/movie_dataset.py
  function mark (line 15) | def mark(line):
  function build_data (line 75) | def build_data(data):

FILE: experiments/syn_vest.py
  function init_logging (line 27) | def init_logging(logfile):
  function build_data (line 71) | def build_data(data):
  function output_stream (line 122) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 131) | def prepare_batch(batch, mask, fix_len=None):
  function cc_martix (line 146) | def cc_martix(source, target):
  function unk_filter (line 155) | def unk_filter(data):
  function judge_rule (line 207) | def judge_rule(rule):
  function analysis_ (line 217) | def analysis_(data_plain, mode='Training'):

FILE: experiments/syntest.py
  function init_logging (line 23) | def init_logging(logfile):
  function build_data (line 74) | def build_data(data):
  function output_stream (line 118) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 127) | def prepare_batch(batch, mask, fix_len=None):

FILE: experiments/synthetic.py
  function ftr (line 37) | def ftr(v):
  function build_instance (line 46) | def build_instance():

FILE: experiments/weibo_dataset.py
  function build_data (line 75) | def build_data(data):

FILE: experiments/weibo_vest.py
  function init_logging (line 25) | def init_logging(logfile):
  function build_data (line 69) | def build_data(data):
  function output_stream (line 116) | def output_stream(dataset, batch_size, size=1):
  function prepare_batch (line 125) | def prepare_batch(batch, mask, fix_len=None):
  function cc_martix (line 140) | def cc_martix(source, target):
  function unk_filter (line 149) | def unk_filter(data):
Condensed preview — 56 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (646K chars).
[
  {
    "path": ".idea/vcs.xml",
    "chars": 180,
    "preview": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping dire"
  },
  {
    "path": "LICENSE",
    "chars": 1066,
    "preview": "MIT License\n\nCopyright (c) 2016 Jiatao Gu\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\n"
  },
  {
    "path": "README.md",
    "chars": 75,
    "preview": "# CopyNet\nincorporating copying mechanism in sequence-to-sequence learning\n"
  },
  {
    "path": "emolga/__init__.py",
    "chars": 28,
    "preview": "__author__ = 'yinpengcheng'\n"
  },
  {
    "path": "emolga/basic/__init__.py",
    "chars": 24,
    "preview": "__author__ = 'jiataogu'\n"
  },
  {
    "path": "emolga/basic/activations.py",
    "chars": 1456,
    "preview": "import theano.tensor as T\n\n\ndef softmax(x):\n    return T.nnet.softmax(x.reshape((-1, x.shape[-1]))).reshape(x.shape)\n\n\nd"
  },
  {
    "path": "emolga/basic/initializations.py",
    "chars": 2300,
    "preview": "import theano\nimport theano.tensor as T\nimport numpy as np\n\nfrom emolga.utils.theano_utils import sharedX, shared_zeros,"
  },
  {
    "path": "emolga/basic/objectives.py",
    "chars": 3130,
    "preview": "from __future__ import absolute_import\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom six.moves import "
  },
  {
    "path": "emolga/basic/optimizers.py",
    "chars": 9696,
    "preview": "from __future__ import absolute_import\nimport theano\nimport sys\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams\nim"
  },
  {
    "path": "emolga/config.py",
    "chars": 3176,
    "preview": "__author__ = 'jiataogu'\nimport os\nimport os.path as path\n\ndef setup_ptb2():\n    # pretraining setting up.\n    # get the "
  },
  {
    "path": "emolga/config_variant.py",
    "chars": 1011,
    "preview": "__author__ = 'jiataogu'\nfrom config import setup_ptb2\nsetup = setup_ptb2\n\n\"\"\"\nThis file is for small variant fix on orig"
  },
  {
    "path": "emolga/dataset/build_dataset.py",
    "chars": 4653,
    "preview": "__author__ = 'jiataogu'\nimport numpy as np\nimport numpy.random as rng\nimport cPickle\nimport pprint\nimport sys\n\nfrom coll"
  },
  {
    "path": "emolga/layers/__init__.py",
    "chars": 28,
    "preview": "__author__ = 'yinpengcheng'\n"
  },
  {
    "path": "emolga/layers/attention.py",
    "chars": 4983,
    "preview": "__author__ = 'jiataogu'\nfrom .core import *\n\"\"\"\nAttention Model.\n    <::: Two kinds of attention models ::::>\n    -- Lin"
  },
  {
    "path": "emolga/layers/core.py",
    "chars": 8341,
    "preview": "# -*- coding: utf-8 -*-\n\nfrom emolga.utils.theano_utils import *\nimport emolga.basic.initializations as initializations\n"
  },
  {
    "path": "emolga/layers/embeddings.py",
    "chars": 2163,
    "preview": "# -*- coding: utf-8 -*-\n\nfrom .core import Layer\nfrom emolga.utils.theano_utils import *\nimport emolga.basic.initializat"
  },
  {
    "path": "emolga/layers/gridlstm.py",
    "chars": 41143,
    "preview": "__author__ = 'jiataogu'\n\"\"\"\nThe file is the implementation of Grid-LSTM\nIn this stage we only support 2D LSTM with Pooli"
  },
  {
    "path": "emolga/layers/ntm_minibatch.py",
    "chars": 28501,
    "preview": "__author__ = 'jiataogu'\nimport theano\nimport theano.tensor as T\n\nimport scipy.linalg as sl\nimport numpy as np\nfrom .core"
  },
  {
    "path": "emolga/layers/recurrent.py",
    "chars": 19395,
    "preview": "# -*- coding: utf-8 -*-\nfrom abc import abstractmethod\nfrom .core import *\n\n\nclass Recurrent(MaskedLayer):\n    \"\"\"\n     "
  },
  {
    "path": "emolga/models/__init__.py",
    "chars": 24,
    "preview": "__author__ = 'jiataogu'\n"
  },
  {
    "path": "emolga/models/core.py",
    "chars": 3014,
    "preview": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport deepdish as dd\n\nfrom emolga.dataset.build_dataset import ser"
  },
  {
    "path": "emolga/models/covc_encdec.py",
    "chars": 74597,
    "preview": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport copy\nimport emolga.basic.objectives as objectives\nimport emo"
  },
  {
    "path": "emolga/models/encdec.py",
    "chars": 63179,
    "preview": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport copy\nimport emolga.basic.objectives as objectives\nimport emo"
  },
  {
    "path": "emolga/models/ntm_encdec.py",
    "chars": 53418,
    "preview": "__author__ = 'jiataogu'\n\nimport theano\ntheano.config.exception_verbosity = 'high'\n\nimport logging\nimport copy\n\nimport em"
  },
  {
    "path": "emolga/models/pointers.py",
    "chars": 36210,
    "preview": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport copy\n\nfrom emolga.layers.recurrent import *\nfrom emolga.laye"
  },
  {
    "path": "emolga/models/variational.py",
    "chars": 63827,
    "preview": "__author__ = 'jiataogu'\nimport theano\n# theano.config.exception_verbosity = 'high'\nimport logging\n\nimport emolga.basic.o"
  },
  {
    "path": "emolga/run.py",
    "chars": 13491,
    "preview": "# coding=utf-8\n__author__ = 'jiataogu'\n\nimport logging\n\nfrom matplotlib import pyplot\nfrom theano.sandbox.rng_mrg import"
  },
  {
    "path": "emolga/test_lm.py",
    "chars": 7219,
    "preview": "__author__ = 'jiataogu'\n\nimport logging\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom emo"
  },
  {
    "path": "emolga/test_nvtm.py",
    "chars": 7962,
    "preview": "__author__ = 'jiataogu'\n\nimport logging\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom emo"
  },
  {
    "path": "emolga/test_run.py",
    "chars": 14948,
    "preview": "# coding=utf-8\n__author__ = 'jiataogu'\n\nimport logging\n\nimport theano\nfrom matplotlib import pyplot\nfrom theano.sandbox."
  },
  {
    "path": "emolga/utils/__init__.py",
    "chars": 28,
    "preview": "__author__ = 'yinpengcheng'\n"
  },
  {
    "path": "emolga/utils/generic_utils.py",
    "chars": 8252,
    "preview": "from __future__ import absolute_import\nfrom matplotlib.ticker import FuncFormatter\nimport numpy as np\nimport time\nimport"
  },
  {
    "path": "emolga/utils/io_utils.py",
    "chars": 2110,
    "preview": "from __future__ import absolute_import\nimport h5py\nimport numpy as np\nimport cPickle\nfrom collections import defaultdict"
  },
  {
    "path": "emolga/utils/np_utils.py",
    "chars": 1395,
    "preview": "from __future__ import absolute_import\nimport numpy as np\nimport scipy as sp\nfrom six.moves import range\nfrom six.moves "
  },
  {
    "path": "emolga/utils/test_utils.py",
    "chars": 1091,
    "preview": "import numpy as np\n\n\ndef get_test_data(nb_train=1000, nb_test=500, input_shape=(10,), output_shape=(2,),\n               "
  },
  {
    "path": "emolga/utils/theano_utils.py",
    "chars": 5533,
    "preview": "from __future__ import absolute_import\n\nfrom theano import gof\nfrom theano.tensor import basic as tensor\nimport numpy as"
  },
  {
    "path": "experiments/__init__.py",
    "chars": 23,
    "preview": "__author__ = 'jiataogu'"
  },
  {
    "path": "experiments/bst_dataset.py",
    "chars": 11725,
    "preview": "# coding=utf-8\n__author__ = 'jiataogu'\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file"
  },
  {
    "path": "experiments/bst_vest.py",
    "chars": 9621,
    "preview": "# coding=utf-8\n\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n"
  },
  {
    "path": "experiments/config.py",
    "chars": 20729,
    "preview": "__author__ = 'jiataogu'\nimport os\nimport os.path as path\n\n\ndef setup():\n    config = dict()\n    # config['seed']        "
  },
  {
    "path": "experiments/copynet.py",
    "chars": 5543,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  },
  {
    "path": "experiments/copynet_input.py",
    "chars": 3488,
    "preview": "# coding=utf-8\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import "
  },
  {
    "path": "experiments/dataset.py",
    "chars": 2888,
    "preview": "\"\"\"\nPreprocess the bAbI datset.\n\"\"\"\nimport logging\nimport os\nimport sys\nimport numpy.random as n_rng\nfrom emolga.dataset"
  },
  {
    "path": "experiments/lcsts_dataset.py",
    "chars": 3814,
    "preview": "# coding=utf-8\nimport chardet\nimport sys\nimport numpy as np\nimport jieba as jb\nfrom emolga.dataset.build_dataset import "
  },
  {
    "path": "experiments/lcsts_rouge.py",
    "chars": 6306,
    "preview": "\"\"\"\nEvaluation using ROUGE for LCSTS dataset.\n\"\"\"\n# load the testing set.\nfrom emolga.dataset.build_dataset import deser"
  },
  {
    "path": "experiments/lcsts_sample.py",
    "chars": 5439,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  },
  {
    "path": "experiments/lcsts_test.py",
    "chars": 8550,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  },
  {
    "path": "experiments/lcsts_vest.py",
    "chars": 9913,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  },
  {
    "path": "experiments/lcsts_vest_new.py",
    "chars": 9893,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  },
  {
    "path": "experiments/movie_dataset.py",
    "chars": 2744,
    "preview": "# coding=utf-8\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\nimport string\nimport ra"
  },
  {
    "path": "experiments/syn_vest.py",
    "chars": 9830,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  },
  {
    "path": "experiments/syntest.py",
    "chars": 9967,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  },
  {
    "path": "experiments/synthetic.py",
    "chars": 2907,
    "preview": "__author__ = 'jiataogu'\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\nimport numpy.r"
  },
  {
    "path": "experiments/weibo_dataset.py",
    "chars": 2658,
    "preview": "# coding=utf-8\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\n\nword2idx = dict()\nword"
  },
  {
    "path": "experiments/weibo_vest.py",
    "chars": 9903,
    "preview": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logg"
  }
]

// ... and 1 more files (download for full content)

About this extraction

This page contains the full source code of the MultiPath/CopyNet GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 56 files (609.0 KB), approximately 153.7k tokens, and a symbol index with 579 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!