[
  {
    "path": ".idea/vcs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project version=\"4\">\n  <component name=\"VcsDirectoryMappings\">\n    <mapping directory=\"$PROJECT_DIR$\" vcs=\"Git\" />\n  </component>\n</project>"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2016 Jiatao Gu\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# CopyNet\nincorporating copying mechanism in sequence-to-sequence learning\n"
  },
  {
    "path": "emolga/__init__.py",
    "content": "__author__ = 'yinpengcheng'\n"
  },
  {
    "path": "emolga/basic/__init__.py",
    "content": "__author__ = 'jiataogu'\n"
  },
  {
    "path": "emolga/basic/activations.py",
    "content": "import theano.tensor as T\n\n\ndef softmax(x):\n    return T.nnet.softmax(x.reshape((-1, x.shape[-1]))).reshape(x.shape)\n\n\ndef vector_softmax(x):\n    return T.nnet.softmax(x.reshape((1, x.shape[0])))[0]\n\n\ndef time_distributed_softmax(x):\n    import warnings\n    warnings.warn(\"time_distributed_softmax is deprecated. Just use softmax!\", DeprecationWarning)\n    return softmax(x)\n\n\ndef softplus(x):\n    return T.nnet.softplus(x)\n\n\ndef relu(x):\n    return T.nnet.relu(x)\n\n\ndef tanh(x):\n    return T.tanh(x)\n\n\ndef sigmoid(x):\n    return T.nnet.sigmoid(x)\n\n\ndef hard_sigmoid(x):\n    return T.nnet.hard_sigmoid(x)\n\n\ndef linear(x):\n    '''\n    The function returns the variable that is passed in, so all types work\n    '''\n    return x\n\n\ndef maxout2(x):\n    shape = x.shape\n    if x.ndim == 1:\n        shape1 = T.cast(shape[0] / 2, 'int64')\n        shape2 = T.cast(2, 'int64')\n        x = x.reshape([shape1, shape2])\n        x = x.max(1)\n    elif x.ndim == 2:\n        shape1 = T.cast(shape[1] / 2, 'int64')\n        shape2 = T.cast(2, 'int64')\n        x = x.reshape([shape[0], shape1, shape2])\n        x = x.max(2)\n    elif x.ndim == 3:\n        shape1 = T.cast(shape[2] / 2, 'int64')\n        shape2 = T.cast(2, 'int64')\n        x = x.reshape([shape[0], shape[1], shape1, shape2])\n        x = x.max(3)\n    return x\n\n\nfrom emolga.utils.generic_utils import get_from_module\n\n\ndef get(identifier):\n    return get_from_module(identifier, globals(), 'activation function')\n"
  },
  {
    "path": "emolga/basic/initializations.py",
    "content": "import theano\nimport theano.tensor as T\nimport numpy as np\n\nfrom emolga.utils.theano_utils import sharedX, shared_zeros, shared_ones\n\n\ndef get_fans(shape):\n    if isinstance(shape, int):\n        shape = (1, shape)\n    fan_in = shape[0] if len(shape) == 2 else np.prod(shape[1:])\n    fan_out = shape[1] if len(shape) == 2 else shape[0]\n    return fan_in, fan_out\n\n\ndef uniform(shape, scale=0.1):\n    return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))\n\n\ndef normal(shape, scale=0.05):\n    return sharedX(np.random.randn(*shape) * scale)\n\n\ndef lecun_uniform(shape):\n    ''' Reference: LeCun 98, Efficient Backprop\n        http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf\n    '''\n    fan_in, fan_out = get_fans(shape)\n    scale = np.sqrt(3. / fan_in)\n    return uniform(shape, scale)\n\n\ndef glorot_normal(shape):\n    ''' Reference: Glorot & Bengio, AISTATS 2010\n    '''\n    fan_in, fan_out = get_fans(shape)\n    s = np.sqrt(2. / (fan_in + fan_out))\n    return normal(shape, s)\n\n\ndef glorot_uniform(shape):\n    fan_in, fan_out = get_fans(shape)\n    s = np.sqrt(6. / (fan_in + fan_out))\n    return uniform(shape, s)\n\n\ndef he_normal(shape):\n    ''' Reference:  He et al., http://arxiv.org/abs/1502.01852\n    '''\n    fan_in, fan_out = get_fans(shape)\n    s = np.sqrt(2. / fan_in)\n    return normal(shape, s)\n\n\ndef he_uniform(shape):\n    fan_in, fan_out = get_fans(shape)\n    s = np.sqrt(6. / fan_in)\n    return uniform(shape, s)\n\n\ndef orthogonal(shape, scale=1.1):\n    ''' From Lasagne\n    '''\n    flat_shape = (shape[0], np.prod(shape[1:]))\n    a = np.random.normal(0.0, 1.0, flat_shape)\n    u, _, v = np.linalg.svd(a, full_matrices=False)\n    # pick the one with the correct shape\n    q = u if u.shape == flat_shape else v\n    q = q.reshape(shape)\n    return sharedX(scale * q[:shape[0], :shape[1]])\n\n\ndef identity(shape, scale=1):\n    if len(shape) != 2 or shape[0] != shape[1]:\n        raise Exception(\"Identity matrix initialization can only be used for 2D square matrices\")\n    else:\n        return sharedX(scale * np.identity(shape[0]))\n\n\ndef zero(shape):\n    return shared_zeros(shape)\n\n\ndef one(shape):\n    return shared_ones(shape)\n\nfrom emolga.utils.generic_utils import get_from_module\ndef get(identifier):\n    return get_from_module(identifier, globals(), 'initialization')\n"
  },
  {
    "path": "emolga/basic/objectives.py",
    "content": "from __future__ import absolute_import\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom six.moves import range\n\nif theano.config.floatX == 'float64':\n    epsilon = 1.0e-9\nelse:\n    epsilon = 1.0e-7\n\n\ndef mean_squared_error(y_true, y_pred):\n    return T.sqr(y_pred - y_true).mean(axis=-1)\n\n\ndef mean_absolute_error(y_true, y_pred):\n    return T.abs_(y_pred - y_true).mean(axis=-1)\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n    return T.abs_((y_true - y_pred) / T.clip(T.abs_(y_true), epsilon, np.inf)).mean(axis=-1) * 100.\n\n\ndef mean_squared_logarithmic_error(y_true, y_pred):\n    return T.sqr(T.log(T.clip(y_pred, epsilon, np.inf) + 1.) - T.log(T.clip(y_true, epsilon, np.inf) + 1.)).mean(axis=-1)\n\n\ndef squared_hinge(y_true, y_pred):\n    return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean(axis=-1)\n\n\ndef hinge(y_true, y_pred):\n    return T.maximum(1. - y_true * y_pred, 0.).mean(axis=-1)\n\n\ndef categorical_crossentropy(y_true, y_pred):\n    '''Expects a binary class matrix instead of a vector of scalar classes\n    '''\n    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)\n    # scale preds so that the class probas of each sample sum to 1\n    y_pred /= y_pred.sum(axis=-1, keepdims=True)\n    cce = T.nnet.categorical_crossentropy(y_pred, y_true)\n    return cce\n\n\ndef binary_crossentropy(y_true, y_pred):\n    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)\n    bce = T.nnet.binary_crossentropy(y_pred, y_true).mean(axis=-1)\n    return bce\n\n\ndef poisson_loss(y_true, y_pred):\n    return T.mean(y_pred - y_true * T.log(y_pred + epsilon), axis=-1)\n\n####################################################\n# Variational Auto-encoder\n\ndef gaussian_kl_divergence(mean, ln_var):\n    \"\"\"Computes the KL-divergence of Gaussian variables from the standard one.\n\n    Given two variable ``mean`` representing :math:`\\\\mu` and ``ln_var``\n    representing :math:`\\\\log(\\\\sigma^2)`, this function returns a variable\n    representing the KL-divergence between the given multi-dimensional Gaussian\n    :math:`N(\\\\mu, S)` and the standard Gaussian :math:`N(0, I)`\n\n    .. math::\n\n       D_{\\\\mathbf{KL}}(N(\\\\mu, S) \\\\| N(0, I)),\n\n    where :math:`S` is a diagonal matrix such that :math:`S_{ii} = \\\\sigma_i^2`\n    and :math:`I` is an identity matrix.\n\n    Args:\n        mean (~chainer.Variable): A variable representing mean of given\n            gaussian distribution, :math:`\\\\mu`.\n        ln_var (~chainer.Variable): A variable representing logarithm of\n            variance of given gaussian distribution, :math:`\\\\log(\\\\sigma^2)`.\n\n    Returns:\n        ~chainer.Variable: A variable representing KL-divergence between\n            given gaussian distribution and the standard gaussian.\n\n    \"\"\"\n    var = T.exp(ln_var)\n    return  0.5 * T.sum(mean * mean + var - ln_var - 1, 1)\n\n\n# aliases\nmse = MSE = mean_squared_error\nmae = MAE = mean_absolute_error\nmape = MAPE = mean_absolute_percentage_error\nmsle = MSLE = mean_squared_logarithmic_error\ngkl = GKL = gaussian_kl_divergence\n\nfrom emolga.utils.generic_utils import get_from_module\ndef get(identifier):\n    return get_from_module(identifier, globals(), 'objective')\n"
  },
  {
    "path": "emolga/basic/optimizers.py",
    "content": "from __future__ import absolute_import\nimport theano\nimport sys\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams\nimport theano.tensor as T\nimport logging\n\nfrom emolga.utils.theano_utils import shared_zeros, shared_scalar, floatX\nfrom emolga.utils.generic_utils import get_from_module\nfrom six.moves import zip\nfrom copy import copy, deepcopy\n\nlogger = logging.getLogger(__name__)\n\n\ndef clip_norm(g, c, n):\n    if c > 0:\n        g = T.switch(T.ge(n, c), g * c / n, g)\n    return g\n\n\ndef kl_divergence(p, p_hat):\n    return p_hat - p + p * T.log(p / p_hat)\n\n\nclass Optimizer(object):\n    def __init__(self, **kwargs):\n        self.__dict__.update(kwargs)\n        self.updates   = []\n        self.save_parm = []\n\n    def add(self, v):\n        self.save_parm += [v]\n\n    def get_state(self):\n        return [u[0].get_value() for u in self.updates]\n\n    def set_state(self, value_list):\n        assert len(self.updates) == len(value_list)\n        for u, v in zip(self.updates, value_list):\n            u[0].set_value(floatX(v))\n\n    def get_updates(self, params, loss):\n        raise NotImplementedError\n\n    def get_gradients(self, loss, params):\n        \"\"\"\n        Consider the situation that gradient is weighted.\n        \"\"\"\n        if isinstance(loss, list):\n            grads = T.grad(loss[0], params, consider_constant=loss[1:])  # gradient of loss\n        else:\n            grads = T.grad(loss, params)\n\n        if hasattr(self, 'clipnorm') and self.clipnorm > 0:\n            print 'use gradient clipping!!'\n            norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))\n            grads = [clip_norm(g, self.clipnorm, norm) for g in grads]\n\n        return grads\n\n    def get_config(self):\n        return {\"name\": self.__class__.__name__}\n\n\nclass SGD(Optimizer):\n\n    def __init__(self, lr=0.05, momentum=0.9, decay=0.01, nesterov=True, *args, **kwargs):\n        super(SGD, self).__init__(**kwargs)\n        self.__dict__.update(locals())\n        self.iterations = shared_scalar(0)\n        self.lr = shared_scalar(lr)\n        self.momentum = shared_scalar(momentum)\n\n    def get_updates(self, params, loss):\n        grads = self.get_gradients(loss, params)\n        lr = self.lr * (1.0 / (1.0 + self.decay * self.iterations))\n        self.updates = [(self.iterations, self.iterations + 1.)]\n\n        for p, g in zip(params, grads):\n            m = shared_zeros(p.get_value().shape)  # momentum\n            v = self.momentum * m - lr * g  # velocity\n            self.updates.append((m, v))\n\n            if self.nesterov:\n                new_p = p + self.momentum * v - lr * g\n            else:\n                new_p = p + v\n\n            self.updates.append((p, new_p))  # apply constraints\n        return self.updates\n\n    def get_config(self):\n        return {\"name\": self.__class__.__name__,\n                \"lr\": float(self.lr.get_value()),\n                \"momentum\": float(self.momentum.get_value()),\n                \"decay\": float(self.decay.get_value()),\n                \"nesterov\": self.nesterov}\n\n\nclass RMSprop(Optimizer):\n    def __init__(self, lr=0.001, rho=0.9, epsilon=1e-6, *args, **kwargs):\n        super(RMSprop, self).__init__(**kwargs)\n        self.__dict__.update(locals())\n        self.lr = shared_scalar(lr)\n        self.rho = shared_scalar(rho)\n        self.iterations = shared_scalar(0)\n\n    def get_updates(self, params, loss):\n        grads = self.get_gradients(loss, params)\n        accumulators = [shared_zeros(p.get_value().shape) for p in params]\n        self.updates = [(self.iterations, self.iterations + 1.)]\n\n        for p, g, a in zip(params, grads, accumulators):\n            new_a = self.rho * a + (1 - self.rho) * g ** 2  # update accumulator\n            self.updates.append((a, new_a))\n\n            new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)\n            self.updates.append((p, new_p))  # apply constraints\n        return self.updates\n\n    def get_config(self):\n        return {\"name\": self.__class__.__name__,\n                \"lr\": float(self.lr.get_value()),\n                \"rho\": float(self.rho.get_value()),\n                \"epsilon\": self.epsilon}\n\n\nclass Adagrad(Optimizer):\n    def __init__(self, lr=0.01, epsilon=1e-6, *args, **kwargs):\n        super(Adagrad, self).__init__(**kwargs)\n        self.__dict__.update(locals())\n        self.lr = shared_scalar(lr)\n\n    def get_updates(self, params, constraints, loss):\n        grads = self.get_gradients(loss, params)\n        accumulators = [shared_zeros(p.get_value().shape) for p in params]\n        self.updates = []\n\n        for p, g, a, c in zip(params, grads, accumulators, constraints):\n            new_a = a + g ** 2  # update accumulator\n            self.updates.append((a, new_a))\n            new_p = p - self.lr * g / T.sqrt(new_a + self.epsilon)\n            self.updates.append((p, c(new_p)))  # apply constraints\n        return self.updates\n\n    def get_config(self):\n        return {\"name\": self.__class__.__name__,\n                \"lr\": float(self.lr.get_value()),\n                \"epsilon\": self.epsilon}\n\n\nclass Adadelta(Optimizer):\n    '''\n        Reference: http://arxiv.org/abs/1212.5701\n    '''\n    def __init__(self, lr=0.1, rho=0.95, epsilon=1e-6, *args, **kwargs):\n        super(Adadelta, self).__init__(**kwargs)\n        self.__dict__.update(locals())\n        self.lr = shared_scalar(lr)\n        self.iterations = shared_scalar(0)\n\n    def get_updates(self, params, loss):\n        grads = self.get_gradients(loss, params)\n        accumulators = [shared_zeros(p.get_value().shape) for p in params]\n        delta_accumulators = [shared_zeros(p.get_value().shape) for p in params]\n        # self.updates = []\n        self.updates = [(self.iterations, self.iterations + 1.)]\n\n        for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):\n            new_a = self.rho * a + (1 - self.rho) * g ** 2  # update accumulator\n            self.updates.append((a, new_a))\n\n            # use the new accumulator and the *old* delta_accumulator\n            update = g * T.sqrt(d_a + self.epsilon) / T.sqrt(new_a +\n                                                             self.epsilon)\n\n            new_p = p - self.lr * update\n            self.updates.append((p, new_p))\n\n            # update delta_accumulator\n            new_d_a = self.rho * d_a + (1 - self.rho) * update ** 2\n            self.updates.append((d_a, new_d_a))\n        return self.updates\n\n    def get_config(self):\n        return {\"name\": self.__class__.__name__,\n                \"lr\": float(self.lr.get_value()),\n                \"rho\": self.rho,\n                \"epsilon\": self.epsilon}\n\n\nclass Adam(Optimizer):  # new Adam is designed for our purpose.\n    '''\n        Reference: http://arxiv.org/abs/1412.6980v8\n\n        Default parameters follow those provided in the original paper.\n        We add Gaussian Noise to improve the performance.\n    '''\n    def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, save=False, rng=None, *args, **kwargs):\n        super(Adam, self).__init__(**kwargs)\n        self.__dict__.update(locals())\n        print locals()\n\n        self.iterations = shared_scalar(0,  name='iteration')\n        self.lr         = shared_scalar(lr, name='lr')\n        self.rng        = MRG_RandomStreams(use_cuda=True)\n        self.noise      = []\n        self.forget     = dict()\n        self.rng        = rng\n\n        self.add(self.iterations)\n        self.add(self.lr)\n\n    def add_noise(self, param):\n        if param.name not in self.noise:\n            logger.info('add gradient noise to {}'.format(param))\n            self.noise += [param.name]\n\n    def add_forget(self, param):\n        if param.name not in self.forget:\n            logger.info('add forgetting list to {}'.format(param))\n            self.forget[param.name] = theano.shared(param.get_value())\n\n    def get_updates(self, params, loss):\n        grads = self.get_gradients(loss, params)\n        self.updates = [(self.iterations, self.iterations + 1.)]\n        self.pu = []\n\n        t = self.iterations + 1\n        lr_t = self.lr * T.sqrt(1 - self.beta_2**t) / (1 - self.beta_1**t)\n        for p, g in zip(params, grads):\n            m = theano.shared(p.get_value() * 0., name=p.name + '_m')  # zero init of moment\n            v = theano.shared(p.get_value() * 0., name=p.name + '_v')  # zero init of velocity\n\n            self.add(m)\n            self.add(v)\n\n            # g_noise = self.rng.normal(g.shape, 0, T.sqrt(0.005 * t ** (-0.55)), dtype='float32')\n\n            # if p.name in self.noise:\n            #     g_deviated = g + g_noise\n            # else:\n            #     g_deviated = g\n\n            g_deviated = g  #  + g_noise\n            m_t = (self.beta_1 * m) + (1 - self.beta_1) * g_deviated\n            v_t = (self.beta_2 * v) + (1 - self.beta_2) * (g_deviated**2)\n            u_t = -lr_t * m_t / (T.sqrt(v_t) + self.epsilon)\n            p_t = p + u_t\n\n            # # memory reformatting!\n            # if p.name in self.forget:\n            #     p_t = (1 - p_mem) * p_t + p_mem * self.forget[p.name]\n            #     p_s = (1 - p_fgt) * p_t + p_fgt * self.forget[p.name]\n            #     self.updates.append((self.forget[p.name], p_s))\n\n            self.updates.append((m, m_t))\n            self.updates.append((v, v_t))\n            self.updates.append((p, p_t))  # apply constraints\n            self.pu.append((p, p_t - p))\n\n        if self.save:\n            return self.updates, self.pu\n        return self.updates\n\n# aliases\nsgd = SGD\nrmsprop = RMSprop\nadagrad = Adagrad\nadadelta = Adadelta\nadam = Adam\n\n\ndef get(identifier, kwargs=None):\n    return get_from_module(identifier, globals(), 'optimizer', instantiate=True,\n                           kwargs=kwargs)\n"
  },
  {
    "path": "emolga/config.py",
    "content": "__author__ = 'jiataogu'\nimport os\nimport os.path as path\n\ndef setup_ptb2():\n    # pretraining setting up.\n    # get the lm_config.\n\n    config = dict()\n    config['on_unused_input'] = 'ignore'\n    config['seed']            = 3030029828\n    config['level']           = 'DEBUG'\n\n    # config['model']           = 'RNNLM'\n    # config['model']           = 'VAE'\n    # config['model']           = 'RNNLM' #'Helmholtz'\n    config['model']           = 'HarX'\n    config['highway']         = False\n    config['use_noise']       = False\n\n    config['optimizer']       = 'adam'  #'adadelta'\n    # config['lr']              = 0.1\n\n    # config['optimizer']       = 'sgd'\n\n    # dataset\n    config['path']            = path.realpath(path.curdir) + '/'  # '/home/thoma/Work/Dial-DRL/'\n    config['vocabulary_set']  = config['path'] + 'dataset/ptbcorpus/voc.pkl'\n    config['dataset']         = config['path'] + 'dataset/ptbcorpus/data_train.pkl'\n    config['dataset_valid']   = config['path'] + 'dataset/ptbcorpus/data_valid.pkl'\n    config['dataset_test']    = config['path'] + 'dataset/ptbcorpus/data_test.pkl'\n    # output hdf5 file place.\n    config['path_h5']         = config['path'] + 'H5'\n    if not os.path.exists(config['path_h5']):\n        os.mkdir(config['path_h5'])\n\n    # output log place\n    config['path_log']        = config['path'] + 'Logs'\n    if not os.path.exists(config['path_log']):\n        os.mkdir(config['path_log'])\n\n    # size\n    config['batch_size']      = 20\n    config['eval_batch_size'] = 20\n    config['mode']            = 'RNN'  # NTM\n    config['binary']          = False\n\n    # Encoder: dimension\n    config['enc_embedd_dim']  = 300\n    config['enc_hidden_dim']  = 300\n    config['enc_contxt_dim']  = 350\n    config['encoder']         = 'RNN'\n    config['pooling']         = False\n\n    # Encoder: Model\n    config['bidirectional']   = False  # True\n    config['decposterior']    = True\n    config['enc_use_contxt']  = False\n\n    # Agent: dimension\n    config['action_dim']      = 50\n    config['output_dim']      = 300\n\n    # Decoder: dimension\n    config['dec_embedd_dim']  = 300\n    config['dec_hidden_dim']  = 300\n    config['dec_contxt_dim']  = 300\n\n    # Decoder: Model\n    config['shared_embed']    = False\n    config['use_input']       = False\n    config['bias_code']       = False   # True\n    config['dec_use_contxt']  = True\n    config['deep_out']        = False\n    config['deep_out_activ']  = 'tanh'  # maxout2\n    config['bigram_predict']  = False\n    config['context_predict'] = True    # False\n    config['leaky_predict']   = False   # True\n    config['dropout']         = 0.3\n\n    # Decoder: sampling\n    config['max_len']         = 88  # 15\n    config['sample_beam']     = 10\n    config['sample_stoch']    = False\n    config['sample_argmax']   = False\n\n    # Auto-Encoder\n    config['nonlinear_A']     = True\n    config['nonlinear_B']     = False\n\n    # VAE/Helmholtz: Model\n    config['repeats']         = 10\n    config['eval_repeats']    = 10\n    config['eval_N']          = 10\n\n    config['variant_control'] = False\n    config['factor']          = 10.\n    config['mult_q']          = 10.\n\n    print 'setup ok.'\n    return config\n\n\n"
  },
  {
    "path": "emolga/config_variant.py",
    "content": "__author__ = 'jiataogu'\nfrom config import setup_ptb2\nsetup = setup_ptb2\n\n\"\"\"\nThis file is for small variant fix on original\n\"\"\"\n\n\ndef setup_bienc(config=None):\n    if config is None:\n        config = setup()\n    print 'make some modification'\n\n    config['bidirectional'] = True\n    config['decposterior']  = False\n    return config\n\n\ndef setup_dim(config=None):\n    if config is None:\n        config = setup()\n    print 'make some modification'\n\n    config['enc_embedd_dim'] = 300\n    config['enc_hidden_dim'] = 300\n    config['action_dim']     = 100\n\n    config['dec_embedd_dim'] = 300\n    config['dec_hidden_dim'] = 300\n    config['dec_contxt_dim'] = 300\n    return config\n\n\ndef setup_rep(config=None):\n    if config is None:\n        config = setup()\n    print 'make some modification'\n\n    config['repeats']        = 5\n    return config\n\n\ndef setup_opt(config=None):\n    if config is None:\n        config = setup()\n    print 'make some modification'\n\n    config['optimizer']      = 'Adam'\n    return config"
  },
  {
    "path": "emolga/dataset/build_dataset.py",
    "content": "__author__ = 'jiataogu'\nimport numpy as np\nimport numpy.random as rng\nimport cPickle\nimport pprint\nimport sys\n\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\nfrom fuel import streams\n\n\ndef serialize_to_file(obj, path, protocol=cPickle.HIGHEST_PROTOCOL):\n    f = open(path, 'wb')\n    cPickle.dump(obj, f, protocol=protocol)\n    f.close()\n\n\ndef show_txt(array, path):\n    f = open(path, 'w')\n    for line in array:\n        f.write(' '.join(line) + '\\n')\n\n    f.close()\n\n\ndef divide_dataset(dataset, test_size, max_size):\n    train_set = dict()\n    test_set  = dict()\n\n    for w in dataset:\n        train_set[w] = dataset[w][test_size:max_size].astype('int32')\n        test_set[w]  = dataset[w][:test_size].astype('int32')\n\n    return train_set, test_set\n\n\ndef deserialize_from_file(path):\n    f = open(path, 'rb')\n    obj = cPickle.load(f)\n    f.close()\n    return obj\n\n\ndef build_fuel(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('data', data)]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset, len(data)\n\n\ndef obtain_stream(dataset, batch_size, size=1):\n    if size == 1:\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream, iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('data'))\n        return data_stream\n    else:\n        data_streams = [dataset.get_example_stream() for _ in xrange(size)]\n        data_streams = [transformers.Batch(data_stream, iteration_scheme=schemes.ConstantScheme(batch_size))\n                        for data_stream in data_streams]\n        data_streams = [transformers.Padding(data_stream, mask_sources=('data')) for data_stream in data_streams]\n        return data_streams\n\ndef build_ptb():\n    path = './ptbcorpus/'\n    print path\n    # make the dataset and vocabulary\n    X_train = [l.split() for l in open(path + 'ptb.train.txt').readlines()]\n    X_test  = [l.split() for l in open(path + 'ptb.test.txt').readlines()]\n    X_valid = [l.split() for l in open(path + 'ptb.valid.txt').readlines()]\n\n    X = X_train + X_test + X_valid\n    idx2word    = dict(enumerate(set([w for l in X for w in l]), 1))\n    idx2word[0] = '<eol>'\n    word2idx    = {v: k for k, v in idx2word.items()}\n    ixwords_train = [[word2idx[w] for w in l] for l in X_train]\n    ixwords_test  = [[word2idx[w] for w in l] for l in X_test]\n    ixwords_valid = [[word2idx[w] for w in l] for l in X_valid]\n    ixwords_tv    = [[word2idx[w] for w in l] for l in (X_train + X_valid)]\n\n    max_len = max([len(w) for w in X_train])\n    print max_len\n    # serialization:\n    # serialize_to_file(ixwords_train, path + 'data_train.pkl')\n    # serialize_to_file(ixwords_test,  path + 'data_test.pkl')\n    # serialize_to_file(ixwords_valid, path + 'data_valid.pkl')\n    # serialize_to_file(ixwords_tv,    path + 'data_tv.pkl')\n    # serialize_to_file([idx2word, word2idx], path + 'voc.pkl')\n    # show_txt(X, 'data.txt')\n    print 'save done.'\n\n\ndef filter_unk(X, min_freq=5):\n    voc = dict()\n    for l in X:\n        for w in l:\n            if w not in voc:\n                voc[w]  = 1\n            else:\n                voc[w] += 1\n\n    word2idx   = dict()\n    word2idx['<eol>'] = 0\n    id2word    = dict()\n    id2word[0] = '<eol>'\n\n    at         = 1\n    for w in voc:\n        if voc[w] > min_freq:\n            word2idx[w] = at\n            id2word[at] = w\n            at += 1\n\n    word2idx['<unk>'] = at\n    id2word[at] = '<unk>'\n    return word2idx, id2word\n\n\ndef build_msr():\n    # path = '/home/thoma/Work/Dial-DRL/dataset/MSRSCC/'\n    path = '/Users/jiataogu/Work/Dial-DRL/dataset/MSRSCC/'\n    print path\n\n    X           = [l.split() for l in open(path + 'train.txt').readlines()]\n    word2idx, idx2word = filter_unk(X, min_freq=5)\n    print 'vocabulary size={0}. {1} samples'.format(len(word2idx), len(X))\n\n    mean_len = np.mean([len(w) for w in X])\n    print 'mean len = {}'.format(mean_len)\n\n    ixwords     = [[word2idx[w]\n                    if w in word2idx\n                    else word2idx['<unk>']\n                    for w in l] for l in X]\n    print ixwords[0]\n    # serialization:\n    serialize_to_file(ixwords, path + 'data_train.pkl')\n\n\nif __name__ == '__main__':\n    build_msr()\n    # build_ptb()\n    # build_dataset()\n    # game = GuessOrder(size=8)\n    # q = 'Is there any number smaller de than 6 in the last 3 numbers ?'\n    # print game.easy_parse(q)\n\n"
  },
  {
    "path": "emolga/layers/__init__.py",
    "content": "__author__ = 'yinpengcheng'\n"
  },
  {
    "path": "emolga/layers/attention.py",
    "content": "__author__ = 'jiataogu'\nfrom .core import *\n\"\"\"\nAttention Model.\n    <::: Two kinds of attention models ::::>\n    -- Linear Transformation\n    -- Inner Product\n\"\"\"\n\n\nclass Attention(Layer):\n    def __init__(self, target_dim, source_dim, hidden_dim,\n                 init='glorot_uniform', name='attention',\n                 coverage=False, max_len=50,\n                 shared=False):\n\n        super(Attention, self).__init__()\n        self.init       = initializations.get(init)\n        self.softmax    = activations.get('softmax')\n        self.tanh       = activations.get('tanh')\n        self.target_dim = target_dim\n        self.source_dim = source_dim\n        self.hidden_dim = hidden_dim\n        self.max_len    = max_len\n        self.coverage   = coverage\n\n        if coverage:\n            print 'Use Coverage Trick!'\n\n        self.Wa         = self.init((self.target_dim, self.hidden_dim))\n        self.Ua         = self.init((self.source_dim, self.hidden_dim))\n        self.va         = self.init((self.hidden_dim, 1))\n\n        self.Wa.name, self.Ua.name, self.va.name = \\\n                '{}_Wa'.format(name), '{}_Ua'.format(name), '{}_va'.format(name)\n        self.params     = [self.Wa, self.Ua, self.va]\n        if coverage:\n            self.Ca      = self.init((1, self.hidden_dim))\n            self.Ca.name = '{}_Ca'.format(name)\n            self.params += [self.Ca]\n\n    def __call__(self, X, S,\n                 Smask=None,\n                 return_log=False,\n                 Cov=None):\n        assert X.ndim + 1 == S.ndim, 'source should be one more dimension than target.'\n        # X is the key:    (nb_samples, x_dim)\n        # S is the source  (nb_samples, maxlen_s, ctx_dim)\n        # Cov is the coverage vector (nb_samples, maxlen_s)\n\n        if X.ndim == 1:\n            X = X[None, :]\n            S = S[None, :, :]\n            if not Smask:\n                Smask = Smask[None, :]\n\n        Eng   = dot(X[:, None, :], self.Wa) + dot(S, self.Ua)  # (nb_samples, source_num, hidden_dims)\n        Eng   = self.tanh(Eng)\n        # location aware:\n        if self.coverage:\n            Eng += dot(Cov[:, :, None], self.Ca)  # (nb_samples, source_num, hidden_dims)\n\n        Eng   = dot(Eng, self.va)\n        Eng   = Eng[:, :, 0]                      # ? (nb_samples, source_num)\n\n        if Smask is not None:\n            # I want to use mask!\n            EngSum = logSumExp(Eng, axis=1, mask=Smask)\n            if return_log:\n                return (Eng - EngSum) * Smask\n            else:\n                return T.exp(Eng - EngSum) * Smask\n        else:\n            if return_log:\n                return T.log(self.softmax(Eng))\n            else:\n                return self.softmax(Eng)\n\n\nclass CosineAttention(Layer):\n    def __init__(self, target_dim, source_dim,\n                 init='glorot_uniform',\n                 use_pipe=True,\n                 name='attention'):\n\n        super(CosineAttention, self).__init__()\n        self.init       = initializations.get(init)\n        self.softmax    = activations.get('softmax')\n        self.softplus   = activations.get('softplus')\n        self.tanh       = activations.get('tanh')\n        self.use_pipe   = use_pipe\n\n        self.target_dim = target_dim\n        self.source_dim = source_dim\n\n        # pipe\n        if self.use_pipe:\n            self.W_key  = Dense(self.target_dim, self.source_dim, name='W_key')\n        else:\n            assert target_dim == source_dim\n            self.W_key  = Identity(name='W_key')\n        self._add(self.W_key)\n\n        # sharpen\n        # self.W_beta     = Dense(self.target_dim, 1, name='W_beta')\n        # dio-sharpen\n        # self.W_beta     = Dense(self.target_dim, self.source_dim, name='W_beta')\n        # self._add(self.W_beta)\n\n        # self.gamma      = self.init((source_dim, ))\n        # self.gamma      = self.init((target_dim, source_dim))\n        # self.gamma.name = 'o_gamma'\n        # self.params    += [self.gamma]\n\n    def __call__(self, X, S, Smask=None, return_log=False):\n        assert X.ndim + 1 == S.ndim, 'source should be one more dimension than target.'\n\n        if X.ndim == 1:\n            X = X[None, :]\n            S = S[None, :, :]\n            if not Smask:\n                Smask = Smask[None, :]\n\n        key   = self.W_key(X)                   # (nb_samples, source_dim)\n        # beta  = self.softplus(self.W_beta(X))   # (nb_samples, source_dim)\n\n        Eng   = dot_2d(key, S)  #, g=self.gamma)\n        # Eng   = cosine_sim2d(key, S)  # (nb_samples, source_num)\n        # Eng   = T.repeat(beta, Eng.shape[1], axis=1) * Eng\n\n        if Smask is not None:\n            # I want to use mask!\n            EngSum = logSumExp(Eng, axis=1, mask=Smask)\n            if return_log:\n                return (Eng - EngSum) * Smask\n            else:\n                return T.exp(Eng - EngSum) * Smask\n        else:\n            if return_log:\n                return T.log(self.softmax(Eng))\n            else:\n                return self.softmax(Eng)\n\n"
  },
  {
    "path": "emolga/layers/core.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom emolga.utils.theano_utils import *\nimport emolga.basic.initializations as initializations\nimport emolga.basic.activations as activations\n\n\nclass Layer(object):\n    def __init__(self):\n        self.params  = []\n        self.layers  = []\n        self.monitor = {}\n        self.watchlist = []\n\n    def init_updates(self):\n        self.updates = []\n\n    def _monitoring(self):\n        # add monitoring variables\n        for l in self.layers:\n            for v in l.monitor:\n                name = v + '@' + l.name\n                print name\n                self.monitor[name] = l.monitor[v]\n\n    def __call__(self, X, *args, **kwargs):\n        return X\n\n    def _add(self, layer):\n        if layer:\n            self.layers.append(layer)\n            self.params += layer.params\n\n    def supports_masked_input(self):\n        ''' Whether or not this layer respects the output mask of its previous layer in its calculations. If you try\n        to attach a layer that does *not* support masked_input to a layer that gives a non-None output_mask() that is\n        an error'''\n        return False\n\n    def get_output_mask(self, train=None):\n        '''\n        For some models (such as RNNs) you want a way of being able to mark some output data-points as\n        \"masked\", so they are not used in future calculations. In such a model, get_output_mask() should return a mask\n        of one less dimension than get_output() (so if get_output is (nb_samples, nb_timesteps, nb_dimensions), then the mask\n        is (nb_samples, nb_timesteps), with a one for every unmasked datapoint, and a zero for every masked one.\n\n        If there is *no* masking then it shall return None. For instance if you attach an Activation layer (they support masking)\n        to a layer with an output_mask, then that Activation shall also have an output_mask. If you attach it to a layer with no\n        such mask, then the Activation's get_output_mask shall return None.\n\n        Some emolga have an output_mask even if their input is unmasked, notably Embedding which can turn the entry \"0\" into\n        a mask.\n        '''\n        return None\n\n    def set_weights(self, weights):\n        for p, w in zip(self.params, weights):\n            if p.eval().shape != w.shape:\n                raise Exception(\"Layer shape %s not compatible with weight shape %s.\" % (p.eval().shape, w.shape))\n            p.set_value(floatX(w))\n\n    def get_weights(self):\n        weights = []\n        for p in self.params:\n            weights.append(p.get_value())\n        return weights\n\n    def get_params(self):\n        return self.params\n\n    def set_name(self, name):\n        for i in range(len(self.params)):\n            if self.params[i].name is None:\n                self.params[i].name = '%s_p%d' % (name, i)\n            else:\n                self.params[i].name = name + '_' + self.params[i].name\n        self.name = name\n\n\nclass MaskedLayer(Layer):\n    '''\n    If your layer trivially supports masking (by simply copying the input mask to the output), then subclass MaskedLayer\n    instead of Layer, and make sure that you incorporate the input mask into your calculation of get_output()\n    '''\n    def supports_masked_input(self):\n        return True\n\n\nclass Identity(Layer):\n    def __init__(self, name='Identity'):\n        super(Identity, self).__init__()\n        if name is not None:\n            self.set_name(name)\n\n    def __call__(self, X):\n        return X\n\n\nclass Dense(Layer):\n    def __init__(self, input_dim, output_dim, init='glorot_uniform', activation='tanh', name='Dense',\n                 learn_bias=True, negative_bias=False):\n\n        super(Dense, self).__init__()\n        self.init = initializations.get(init)\n        self.activation = activations.get(activation)\n        self.input_dim = input_dim\n        self.output_dim = output_dim\n        self.linear = (activation == 'linear')\n\n        # self.input = T.matrix()\n        self.W = self.init((self.input_dim, self.output_dim))\n        if not negative_bias:\n            self.b = shared_zeros((self.output_dim))\n        else:\n            self.b = shared_ones((self.output_dim))\n\n        self.learn_bias = learn_bias\n        if self.learn_bias:\n            self.params = [self.W, self.b]\n        else:\n            self.params = [self.W]\n\n        if name is not None:\n            self.set_name(name)\n\n    def set_name(self, name):\n        self.W.name = '%s_W' % name\n        self.b.name = '%s_b' % name\n\n    def __call__(self, X):\n        output = self.activation(T.dot(X, self.W) + 4. * self.b)\n        return output\n\n    def reverse(self, Y):\n        assert self.linear\n\n        output = T.dot((Y - self.b), self.W.T)\n        return output\n\n\nclass Dense2(Layer):\n    def __init__(self, input_dim1, input_dim2, output_dim, init='glorot_uniform', activation='tanh', name='Dense', learn_bias=True):\n\n        super(Dense2, self).__init__()\n        self.init = initializations.get(init)\n        self.activation = activations.get(activation)\n        self.input_dim1 = input_dim1\n        self.input_dim2 = input_dim2\n        self.output_dim = output_dim\n        self.linear = (activation == 'linear')\n\n        # self.input = T.matrix()\n\n        self.W1 = self.init((self.input_dim1, self.output_dim))\n        self.W2 = self.init((self.input_dim2, self.output_dim))\n        self.b  = shared_zeros((self.output_dim))\n\n        self.learn_bias = learn_bias\n        if self.learn_bias:\n            self.params = [self.W1, self.W2, self.b]\n        else:\n            self.params = [self.W1, self.W2]\n\n        if name is not None:\n            self.set_name(name)\n\n    def set_name(self, name):\n        self.W1.name = '%s_W1' % name\n        self.W2.name = '%s_W2' % name\n        self.b.name = '%s_b' % name\n\n    def __call__(self, X1, X2):\n        output = self.activation(T.dot(X1, self.W1) + T.dot(X2, self.W2) + self.b)\n        return output\n\n\nclass Constant(Layer):\n    def __init__(self, input_dim, output_dim, init=None, activation='tanh', name='Bias'):\n\n        super(Constant, self).__init__()\n        assert input_dim == output_dim, 'Bias Layer needs to have the same input/output nodes.'\n\n        self.init = initializations.get(init)\n        self.activation = activations.get(activation)\n        self.input_dim = input_dim\n        self.output_dim = output_dim\n\n        self.b = shared_zeros(self.output_dim)\n        self.params = [self.b]\n\n        if name is not None:\n            self.set_name(name)\n\n    def set_name(self, name):\n        self.b.name = '%s_b' % name\n\n    def __call__(self, X=None):\n        output = self.activation(self.b)\n        if X:\n            L = X.shape[0]\n            output = T.extra_ops.repeat(output[None, :], L, axis=0)\n        return output\n\n\nclass MemoryLinear(Layer):\n    def __init__(self, input_dim, input_wdth, init='glorot_uniform',\n                 activation='tanh', name='Bias', has_input=True):\n        super(MemoryLinear, self).__init__()\n\n        self.init       = initializations.get(init)\n        self.activation = activations.get(activation)\n        self.input_dim  = input_dim\n        self.input_wdth = input_wdth\n\n        self.b = self.init((self.input_dim, self.input_wdth))\n        self.params = [self.b]\n\n        if has_input:\n            self.P = self.init((self.input_dim, self.input_wdth))\n            self.params += [self.P]\n\n        if name is not None:\n            self.set_name(name)\n\n    def __call__(self, X=None):\n        out = self.b[None, :, :]\n        if X:\n            out += self.P[None, :, :] * X\n        return self.activation(out)\n\n\nclass Dropout(MaskedLayer):\n    \"\"\"\n        Hinton's dropout.\n    \"\"\"\n    def __init__(self, rng=None, p=1., name=None):\n        super(Dropout, self).__init__()\n        self.p   = p\n        self.rng = rng\n\n    def __call__(self, X, train=True):\n        if self.p > 0.:\n            retain_prob = 1. - self.p\n            if train:\n                X *= self.rng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)\n            else:\n                X *= retain_prob\n        return X\n\n\nclass Activation(MaskedLayer):\n    \"\"\"\n        Apply an activation function to an output.\n    \"\"\"\n    def __init__(self, activation):\n        super(Activation, self).__init__()\n        self.activation = activations.get(activation)\n\n    def __call__(self, X):\n        return self.activation(X)\n\n"
  },
  {
    "path": "emolga/layers/embeddings.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom .core import Layer\nfrom emolga.utils.theano_utils import *\nimport emolga.basic.initializations as initializations\n\n\nclass Embedding(Layer):\n    '''\n        Turn positive integers (indexes) into denses vectors of fixed size.\n        eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]\n\n        @input_dim: size of vocabulary (highest input integer + 1)\n        @out_dim: size of dense representation\n    '''\n\n    def __init__(self, input_dim, output_dim, init='uniform', name=None):\n\n        super(Embedding, self).__init__()\n        self.init = initializations.get(init)\n        self.input_dim = input_dim\n        self.output_dim = output_dim\n\n        self.W = self.init((self.input_dim, self.output_dim))\n\n        self.params = [self.W]\n\n        if name is not None:\n            self.set_name(name)\n\n    def get_output_mask(self, X):\n        return T.ones_like(X) * (1 - T.eq(X, 0))\n\n    def __call__(self, X, mask_zero=False, context=None):\n        if context is None:\n            out = self.W[X]\n        else:\n            assert context.ndim == 3\n            flag  = False\n            if X.ndim == 1:\n                flag = True\n                X = X[:, None]\n\n            b_size = context.shape[0]\n\n            EMB = T.repeat(self.W[None, :, :], b_size, axis=0)\n            EMB = T.concatenate([EMB, context], axis=1)\n\n            m_size = EMB.shape[1]\n            e_size = EMB.shape[2]\n            maxlen = X.shape[1]\n\n            EMB = EMB.reshape((b_size * m_size, e_size))\n            Z   = (T.arange(b_size)[:, None] * m_size + X).reshape((b_size * maxlen,))\n            out = EMB[Z]  # (b_size * maxlen, e_size)\n\n            if not flag:\n                out = out.reshape((b_size, maxlen, e_size))\n            else:\n                out = out.reshape((b_size, e_size))\n\n        if mask_zero:\n            return out, T.cast(self.get_output_mask(X), dtype='float32')\n        else:\n            return out\n\n\nclass Zero(Layer):\n    def __call__(self, X):\n        out = T.zeros(X.shape)\n        return out\n\n\nclass Bias(Layer):\n    def __call__(self, X):\n        tmp = X.flatten()\n        tmp = tmp.dimshuffle(0, 'x')\n        return tmp\n"
  },
  {
    "path": "emolga/layers/gridlstm.py",
    "content": "__author__ = 'jiataogu'\n\"\"\"\nThe file is the implementation of Grid-LSTM\nIn this stage we only support 2D LSTM with Pooling.\n\"\"\"\nfrom recurrent import *\nfrom attention import Attention\nimport logging\nimport copy\nlogger = logging.getLogger(__name__)\n\n\nclass Grid(Recurrent):\n    \"\"\"\n    Grid Cell for Grid-LSTM\n    ===================================================\n    LSTM\n            [h', m'] = LSTM(x, h, m):\n                gi = sigmoid(Wi * x + Ui * h + Vi * m)  # Vi is peep-hole\n                gf = sigmoid(Wf * x + Uf * h + Vf * m)\n                go = sigmoid(Wo * x + Uo * h + Vo * m)\n                gc = tanh(Wc * x +Uc * h)\n\n                m' = gf @ m + gi @ gc  (@ represents element-wise dot.)\n                h' = go @ tanh(m')\n\n    ===================================================\n    Grid\n    (here is an example for 2D Grid LSTM with priority dimension = 1)\n     -------------\n    |    c'  d'   |     Grid Block and Grid Updates.\n    | a         a'|\n    |             |     [d' c'] = LSTM_d([b, d],  c)\n    | b         b'|     [a' b'] = LSTM_t([b, d'], a)\n    |    c   d    |\n     -------------\n    ===================================================\n    Details please refer to:\n        \"Grid Long Short-Term Memory\", http://arxiv.org/abs/1507.01526\n    \"\"\"\n    def __init__(self,\n                 output_dims,\n                 input_dims,    # [0, ... 0], 0 represents no external inputs.\n                 priority=1,\n                 peephole=True,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 forget_bias_init='one',\n                 activation='tanh', inner_activation='sigmoid',\n                 use_input=False,\n                 name=None, weights=None,\n                 identity_connect=None\n                 ):\n        super(Grid, self).__init__()\n\n        # assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'\n        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'\n\n        \"\"\"\n        Initialization.\n        \"\"\"\n        self.input_dims       = input_dims\n        self.output_dims      = output_dims\n        self.N                = len(output_dims)\n        self.priority         = priority\n        self.peephole         = peephole\n        self.use_input        = use_input\n\n        self.init             = initializations.get(init)\n        self.inner_init       = initializations.get(inner_init)\n        self.forget_bias_init = initializations.get(forget_bias_init)\n        self.activation       = activations.get(activation)\n        self.inner_activation = activations.get(inner_activation)\n\n        self.identity_connect = identity_connect\n        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!\n\n        \"\"\"\n        Others info.\n        \"\"\"\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def build(self):\n        \"\"\"\n        Build the model weights\n        \"\"\"\n        logger.info(\"Building GridPool-LSTM !!\")\n        self.W = dict()\n        self.U = dict()\n        self.V = dict()\n        self.b = dict()\n\n        # ******************************************************************************************\n        for k in xrange(self.N):       # N-Grids (for 2 dimensions, 0 is for time; 1 is for depth.)\n            axis  = self.axies[k]\n            # input layers:\n            if self.input_dims[k] > 0 and self.use_input:\n                # use the data information.\n                self.W[axis + '#i'], self.W[axis + '#f'], \\\n                self.W[axis + '#o'], self.W[axis + '#c']  \\\n                      = [self.init((self.input_dims[k], self.output_dims[k])) for _ in xrange(4)]\n\n            # hidden layers:\n            for j in xrange(self.N):   # every hidden states inputs.\n                pos   = self.axies[j]\n                if k == j:\n                    self.U[axis + pos + '#i'], self.U[axis + pos + '#f'], \\\n                    self.U[axis + pos + '#o'], self.U[axis + pos + '#c']  \\\n                        = [self.inner_init((self.output_dims[j], self.output_dims[k])) for _ in xrange(4)]\n                else:\n                    self.U[axis + pos + '#i'], self.U[axis + pos + '#f'], \\\n                    self.U[axis + pos + '#o'], self.U[axis + pos + '#c']  \\\n                        = [self.init((self.output_dims[j], self.output_dims[k])) for _ in xrange(4)]\n\n            # bias layers:\n            self.b[axis + '#i'], self.b[axis + '#o'], self.b[axis + '#c']  \\\n                      = [shared_zeros(self.output_dims[k]) for _ in xrange(3)]\n            self.b[axis + '#f'] = self.forget_bias_init(self.output_dims[k])\n\n            # peep-hole layers:\n            if self.peephole:\n                self.V[axis + '#i'], self.V[axis + '#f'], self.V[axis + '#o'] \\\n                      = [self.init(self.output_dims[k]) for _ in xrange(3)]\n        # *****************************************************************************************\n\n        # set names for these weights\n        for A, n in zip([self.W, self.U, self.b, self.V], ['W', 'U', 'b', 'V']):\n            for w in A:\n                A[w].name = n + '_' + w\n\n        # set parameters\n        self.params = [self.W[s] for s in self.W] + \\\n                      [self.U[s] for s in self.U] + \\\n                      [self.b[s] for s in self.b] + \\\n                      [self.V[s] for s in self.V]\n\n    def lstm_(self, k, H, m, x, identity=False):\n        \"\"\"\n       LSTM\n            [h', m'] = LSTM(x, h, m):\n                gi = sigmoid(Wi * x + Ui * h + Vi * m)  # Vi is peep-hole\n                gf = sigmoid(Wf * x + Uf * h + Vf * m)\n                go = sigmoid(Wo * x + Uo * h + Vo * m)\n                gc = tanh(Wc * x +Uc * h)\n\n                m' = gf @ m + gi @ gc  (@ represents element-wise dot.)\n                h' = go @ tanh(m')\n\n        \"\"\"\n        assert len(H) == self.N, 'we have to use all the hidden states in Grid LSTM'\n        axis           = self.axies[k]\n\n        # *************************************************************************\n        # bias energy\n        ei, ef, eo, ec = [self.b[axis + p] for p in ['#i', '#f', '#o', '#c']]\n\n        # hidden energy\n        for j in xrange(self.N):\n            pos  = self.axies[j]\n\n            ei  += T.dot(H[j], self.U[axis + pos + '#i'])\n            ef  += T.dot(H[j], self.U[axis + pos + '#f'])\n            eo  += T.dot(H[j], self.U[axis + pos + '#o'])\n            ec  += T.dot(H[j], self.U[axis + pos + '#c'])\n\n        # input energy (if any)\n        if self.input_dims[k] > 0 and self.use_input:\n            ei  += T.dot(x, self.W[axis + '#i'])\n            ef  += T.dot(x, self.W[axis + '#f'])\n            eo  += T.dot(x, self.W[axis + '#o'])\n            ec  += T.dot(x, self.W[axis + '#c'])\n\n        # peep-hole connections\n        if self.peephole:\n            ei  += m * self.V[axis + '#i'][None, :]\n            ef  += m * self.V[axis + '#f'][None, :]\n            eo  += m * self.V[axis + '#o'][None, :]\n        # *************************************************************************\n\n        # compute the gates.\n        i        = self.inner_activation(ei)\n        f        = self.inner_activation(ef)\n        o        = self.inner_activation(eo)\n        c        = self.activation(ec)\n\n        # update the memory and hidden states.\n        m_new    = f * m + i * c\n        h_new    = o * self.activation(m_new)\n\n        return h_new, m_new\n\n    def grid_(self,\n              hs_i,\n              ms_i,\n              xs_i,\n              priority=1,\n              identity=None):\n        \"\"\"\n        ===================================================\n        Grid (2D as an example)\n         -------------\n        |    c'  d'   |     Grid Block and Grid Updates.\n        | a         a'|\n        |             |     [d' c'] = LSTM_d([b, d],  c)\n        | b         b'|     [a' b'] = LSTM_t([b, d'], a)   priority\n        |    c   d    |\n         -------------\n         a = my | b = hy | c = mx | d = hx\n        ===================================================\n\n        Currently masking is not considered in GridLSTM.\n        \"\"\"\n        # compute LSTM updates for non-priority dimensions\n        H_new   = hs_i\n        M_new   = ms_i\n        for k in xrange(self.N):\n            if k == priority:\n                continue\n            m   = ms_i[k]\n            x   = xs_i[k]\n            H_new[k], M_new[k] \\\n                = self.lstm_(k, hs_i, m, x)\n\n            if identity is not None:\n                if identity[k]:\n                    H_new[k] += hs_i[k]\n\n        # compute LSTM updates along the priority dimension\n        if priority >= 0:\n            hs_ii   = H_new\n            H_new[priority], M_new[priority] \\\n                    = self.lstm_(priority, hs_ii, ms_i[priority], xs_i[priority])\n            if identity is not None:\n                if identity[priority]:\n                    H_new[priority] += hs_ii[priority]\n\n        return H_new, M_new\n\n\nclass GridLSTM3D(Grid):\n    \"\"\"\n    Grid-LSTM 3D version,\n    which has one flexible dimension (time) and 2 fixed dimensions (x & y)\n    \"\"\"\n    def __init__(self,\n                 # parameters for Grid.\n                 output_dims,\n                 input_dims,    # [0, ... 0], 0 represents no external inputs.\n                 priority=1,\n                 peephole=True,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 forget_bias_init='one',\n                 activation='tanh', inner_activation='sigmoid',\n                 use_input=False,\n                 name=None, weights=None,\n                 identity_connect=None,\n\n                 # parameters for 2D-GridLSTM\n                 depth=10,  # the size of a big grid\n                 learn_init=False,\n                 pooling=True,\n                 attention=False,\n                 shared=True,\n                 dropout=0,\n                 rng=None,\n                 ):\n        super(Grid, self).__init__()\n\n        assert len(output_dims) == 3, 'in this stage, we only support 3D Grid-LSTM'\n        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'\n        assert input_dims[2]    == 0, 'we have no z-axis inputs here.'\n        assert shared, 'we share the weights in this stage.'\n        assert not (attention and pooling), 'attention and pooling cannot be set at the same time.'\n\n        \"\"\"\n        Initialization.\n        \"\"\"\n        logger.info(\":::: Sequential Grid-Pool LSTM ::::\")\n        self.input_dims       = input_dims\n        self.output_dims      = output_dims\n        self.N                = len(output_dims)\n        self.depth            = depth\n        self.dropout          = dropout\n\n        self.priority         = priority\n        self.peephole         = peephole\n        self.use_input        = use_input\n        self.pooling          = pooling\n        self.attention        = attention\n        self.learn_init       = learn_init\n\n        self.init             = initializations.get(init)\n        self.inner_init       = initializations.get(inner_init)\n        self.forget_bias_init = initializations.get(forget_bias_init)\n        self.activation       = activations.get(activation)\n        self.relu             = activations.get('relu')\n        self.inner_activation = activations.get(inner_activation)\n\n        self.identity_connect = identity_connect\n        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!\n\n        if self.identity_connect is not None:\n            logger.info('Identity Connection: {}'.format(self.identity_connect))\n\n        \"\"\"\n        Build the model weights.\n        \"\"\"\n        # build the centroid grid.\n        self.build()\n\n        # input projection layer (projected to time-axis)       [x]\n        self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')\n        self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')\n\n        self._add(self.Ph)\n        self._add(self.Pm)\n\n        # learn init for depth-axis hidden states/memory cells. [y]\n        if self.learn_init:\n            self.M0  = self.init((depth, depth, output_dims[2]))\n            self.H0  = self.init((depth, depth, output_dims[2]))\n\n            self.M0.name, self.H0.name = 'M0', 'H0'\n            self.params += [self.M0, self.H0]\n\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def _step(self, *args):\n        # since depth is not determined, we cannot decide the number of inputs\n        # for one time step.\n        # if pooling is True:\n        #    args = [raw_input] +       (sequence)\n        #           [hy] + [my]*depth   (output_info)\n        #\n        inputs = args[0]  # (nb_samples, x, y)\n        Hy_tm1 = [args[k] for k in range(1, 1 + self.depth)]\n        My_tm1 = [args[k] for k in range(1 + self.depth, 1 + 2 * self.depth)]\n\n        # x_axis input projection (get hx_t, mx_t)\n        hx_t   = self.Ph(inputs)           # (nb_samples, output_dim0, output_dim1)\n        mx_t   = self.Pm(inputs)           # (nb_samples, output_dim0, output_dim1)\n\n        # build computation path from bottom to top.\n        Hx_t   = [hx_t]\n        Mx_t   = [mx_t]\n        Hy_t   = []\n        My_t   = []\n        for d in xrange(self.depth):\n            hs_i       = [Hx_t[-1], Hy_tm1[d]]\n            ms_i       = [Mx_t[-1], My_tm1[d]]\n            xs_i       = [inputs,   T.zeros_like(inputs)]\n\n            hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority=self.priority, identity=self.identity_connect)\n\n            Hx_t      += [hs_o[0]]\n            Hy_t      += [hs_o[1]]\n            Mx_t      += [ms_o[0]]\n            My_t      += [ms_o[1]]\n\n        hx_out = Hx_t[-1]\n        mx_out = Mx_t[-1]\n\n        # get the output (output_y, output_x)\n        # MAX-Pooling\n        if self.pooling:\n            # hy_t       = T.max([self.PP(hy) for hy in Hy_t], axis=0)\n            hy_t       = T.max([self.PP(T.concatenate([hy, inputs], axis=-1)) for hy in Hy_t], axis=0)\n            Hy_t       = [hy_t] * self.depth\n\n        if self.attention:\n            HHy_t      = T.concatenate([hy[:, None, :] for hy in Hy_t], axis=1)  # (nb_samples, n_depth, out_dim1)\n            annotation = self.A(inputs, HHy_t)   # (nb_samples, n_depth)\n            hy_t       = T.sum(HHy_t * annotation[:, :, None], axis=1)           # (nb_samples, out_dim1)\n            Hy_t       = [hy_t] * self.depth\n\n        R = Hy_t + My_t + [hx_out, mx_out]\n        return tuple(R)\n\n    def __call__(self, X, init_H=None, init_M=None,\n                 return_sequence=False, one_step=False,\n                 return_info='hy', train=True):\n        # It is training/testing path\n        self.train = train\n\n        # recently we did not support masking.\n        if X.ndim == 2:\n            X = X[:, None, :]\n\n        # one step\n        if one_step:\n            assert init_H is not None, 'previous state must be provided!'\n            assert init_M is not None, 'previous cell must be provided!'\n\n        X = X.dimshuffle((1, 0, 2))\n        if init_H is None:\n            if self.learn_init:\n                init_m     = T.repeat(self.M0[:, None, :], X.shape[1], axis=1)\n                if self.pooling:\n                    init_h = T.repeat(self.H0[None, :], self.depth, axis=0)\n                else:\n                    init_h = self.H0\n                init_h     = T.repeat(init_h[:, None, :], X.shape[1], axis=1)\n\n                init_H     = []\n                init_M     = []\n                for j in xrange(self.depth):\n                    init_H.append(init_h[j])\n                    init_M.append(init_m[j])\n            else:\n                init_H     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth\n                init_M     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth\n            pass\n\n        # computational graph !\n        if not one_step:\n            sequences    = [X]\n            outputs_info = init_H + init_M + [None, None]\n            outputs, _   = theano.scan(\n                self._step,\n                sequences=sequences,\n                outputs_info=outputs_info\n            )\n        else:\n            outputs      = self._step(*([X[0]] + init_H + init_M))\n\n        if   return_info == 'hx':\n            if return_sequence:\n                return outputs[0].dimshuffle((1, 0, 2))\n            return outputs[-2][-1]\n        elif return_info == 'hy':\n            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode\".'\n            if return_sequence:\n                return outputs[2].dimshuffle((1, 0, 2))\n            return outputs[2][-1]\n        elif return_info == 'hxhy':\n            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode\".'\n            if return_sequence:\n                return outputs[-2].dimshuffle((1, 0, 2)), outputs[2].dimshuffle((1, 0, 2))    # x-y\n            return outputs[-2][-1], outputs[2][-1]\n\n\n\nclass SequentialGridLSTM(Grid):\n    \"\"\"\n    Details please refer to:\n        \"Grid Long Short-Term Memory\",\n            http://arxiv.org/abs/1507.01526\n\n    SequentialGridLSTM is a typical 2D-GridLSTM,\n    which has one flexible dimension (time) and one fixed dimension (depth)\n    Input information is added along x-axis.\n    \"\"\"\n    def __init__(self,\n                 # parameters for Grid.\n                 output_dims,\n                 input_dims,    # [0, ... 0], 0 represents no external inputs.\n                 priority=1,\n                 peephole=True,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 forget_bias_init='one',\n                 activation='tanh', inner_activation='sigmoid',\n                 use_input=False,\n                 name=None, weights=None,\n                 identity_connect=None,\n\n                 # parameters for 2D-GridLSTM\n                 depth=5,\n                 learn_init=False,\n                 pooling=True,\n                 attention=False,\n                 shared=True,\n                 dropout=0,\n                 rng=None,\n                 ):\n        super(Grid, self).__init__()\n\n        assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'\n        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'\n        assert input_dims[1]    == 0, 'we have no y-axis inputs here.'\n        assert shared, 'we share the weights in this stage.'\n        assert not (attention and pooling), 'attention and pooling cannot be set at the same time.'\n\n        \"\"\"\n        Initialization.\n        \"\"\"\n        logger.info(\":::: Sequential Grid-Pool LSTM ::::\")\n        self.input_dims       = input_dims\n        self.output_dims      = output_dims\n        self.N                = len(output_dims)\n        self.depth            = depth\n        self.dropout          = dropout\n\n        self.priority         = priority\n        self.peephole         = peephole\n        self.use_input        = use_input\n        self.pooling          = pooling\n        self.attention        = attention\n        self.learn_init       = learn_init\n\n        self.init             = initializations.get(init)\n        self.inner_init       = initializations.get(inner_init)\n        self.forget_bias_init = initializations.get(forget_bias_init)\n        self.activation       = activations.get(activation)\n        self.relu             = activations.get('relu')\n        self.inner_activation = activations.get(inner_activation)\n\n        self.identity_connect = identity_connect\n        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!\n\n        if self.identity_connect is not None:\n            logger.info('Identity Connection: {}'.format(self.identity_connect))\n\n        \"\"\"\n        Build the model weights.\n        \"\"\"\n        # build the centroid grid.\n        self.build()\n\n        # input projection layer (projected to time-axis)       [x]\n        self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')\n        self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')\n\n        self._add(self.Ph)\n        self._add(self.Pm)\n\n        # learn init for depth-axis hidden states/memory cells. [y]\n        if self.learn_init:\n            self.M0      = self.init((depth, output_dims[1]))\n            if self.pooling:\n                self.H0  = self.init(output_dims[1])\n            else:\n                self.H0  = self.init((depth, output_dims[1]))\n\n            self.M0.name, self.H0.name = 'M0', 'H0'\n            self.params += [self.M0, self.H0]\n\n        # if we use attention instead of max-pooling\n        if self.pooling:\n            self.PP      = Dense(output_dims[1] + input_dims[0], output_dims[1], # init='orthogonal',\n                                 name='PP', activation='linear')\n            self._add(self.PP)\n\n        if self.attention:\n            self.A       = Attention(target_dim=input_dims[0],\n                                     source_dim=output_dims[1],\n                                     hidden_dim=200, name='attender')\n            self._add(self.A)\n\n        # if self.dropout > 0:\n        #     logger.info(\">>>>>> USE DropOut !! <<<<<<\")\n        #     self.D       = Dropout(rng=rng, p=self.dropout, name='Dropout')\n\n        \"\"\"\n        Others info.\n        \"\"\"\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def _step(self, *args):\n        # since depth is not determined, we cannot decide the number of inputs\n        # for one time step.\n        # if pooling is True:\n        #    args = [raw_input] +       (sequence)\n        #           [hy] + [my]*depth   (output_info)\n        #\n        inputs = args[0]\n        Hy_tm1 = [args[k] for k in range(1, 1 + self.depth)]\n        My_tm1 = [args[k] for k in range(1 + self.depth, 1 + 2 * self.depth)]\n\n        # x_axis input projection (get hx_t, mx_t)\n        hx_t   = self.Ph(inputs)           # (nb_samples, output_dim0)\n        mx_t   = self.Pm(inputs)           # (nb_samples, output_dim0)\n\n        # build computation path from bottom to top.\n        Hx_t   = [hx_t]\n        Mx_t   = [mx_t]\n        Hy_t   = []\n        My_t   = []\n        for d in xrange(self.depth):\n            hs_i       = [Hx_t[-1], Hy_tm1[d]]\n            ms_i       = [Mx_t[-1], My_tm1[d]]\n            xs_i       = [inputs,   T.zeros_like(inputs)]\n\n            hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority=self.priority, identity=self.identity_connect)\n\n            Hx_t      += [hs_o[0]]\n            Hy_t      += [hs_o[1]]\n            Mx_t      += [ms_o[0]]\n            My_t      += [ms_o[1]]\n\n        hx_out = Hx_t[-1]\n        mx_out = Mx_t[-1]\n\n        # get the output (output_y, output_x)\n        # MAX-Pooling\n        if self.pooling:\n            # hy_t       = T.max([self.PP(hy) for hy in Hy_t], axis=0)\n            hy_t       = T.max([self.PP(T.concatenate([hy, inputs], axis=-1)) for hy in Hy_t], axis=0)\n            Hy_t       = [hy_t] * self.depth\n\n        if self.attention:\n            HHy_t      = T.concatenate([hy[:, None, :] for hy in Hy_t], axis=1)  # (nb_samples, n_depth, out_dim1)\n            annotation = self.A(inputs, HHy_t)   # (nb_samples, n_depth)\n            hy_t       = T.sum(HHy_t * annotation[:, :, None], axis=1)           # (nb_samples, out_dim1)\n            Hy_t       = [hy_t] * self.depth\n\n        R = Hy_t + My_t + [hx_out, mx_out]\n        return tuple(R)\n\n    def __call__(self, X, init_H=None, init_M=None,\n                 return_sequence=False, one_step=False,\n                 return_info='hy', train=True):\n        # It is training/testing path\n        self.train = train\n\n        # recently we did not support masking.\n        if X.ndim == 2:\n            X = X[:, None, :]\n\n        # one step\n        if one_step:\n            assert init_H is not None, 'previous state must be provided!'\n            assert init_M is not None, 'previous cell must be provided!'\n\n        X = X.dimshuffle((1, 0, 2))\n        if init_H is None:\n            if self.learn_init:\n                init_m     = T.repeat(self.M0[:, None, :], X.shape[1], axis=1)\n                if self.pooling:\n                    init_h = T.repeat(self.H0[None, :], self.depth, axis=0)\n                else:\n                    init_h = self.H0\n                init_h     = T.repeat(init_h[:, None, :], X.shape[1], axis=1)\n\n                init_H     = []\n                init_M     = []\n                for j in xrange(self.depth):\n                    init_H.append(init_h[j])\n                    init_M.append(init_m[j])\n            else:\n                init_H     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth\n                init_M     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * self.depth\n            pass\n\n        # computational graph !\n        if not one_step:\n            sequences    = [X]\n            outputs_info = init_H + init_M + [None, None]\n            outputs, _   = theano.scan(\n                self._step,\n                sequences=sequences,\n                outputs_info=outputs_info\n            )\n        else:\n            outputs      = self._step(*([X[0]] + init_H + init_M))\n\n        if   return_info == 'hx':\n            if return_sequence:\n                return outputs[0].dimshuffle((1, 0, 2))\n            return outputs[-2][-1]\n        elif return_info == 'hy':\n            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode\".'\n            if return_sequence:\n                return outputs[2].dimshuffle((1, 0, 2))\n            return outputs[2][-1]\n        elif return_info == 'hxhy':\n            assert self.pooling or self.attention, 'y-axis hidden states are only used in the ``Pooling Mode\".'\n            if return_sequence:\n                return outputs[-2].dimshuffle((1, 0, 2)), outputs[2].dimshuffle((1, 0, 2))    # x-y\n            return outputs[-2][-1], outputs[2][-1]\n\n\nclass PyramidGridLSTM2D(Grid):\n    \"\"\"\n    A variant version of Sequential LSTM where we introduce a Pyramid structure.\n    \"\"\"\n    def __init__(self,\n                 # parameters for Grid.\n                 output_dims,\n                 input_dims,    # [0, ... 0], 0 represents no external inputs.\n                 priority=1,\n                 peephole=True,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 forget_bias_init='one',\n                 activation='tanh', inner_activation='sigmoid',\n                 use_input=True,\n                 name=None, weights=None,\n                 identity_connect=None,\n\n                 # parameters for 2D-GridLSTM\n                 depth=5,\n                 learn_init=False,\n                 shared=True,\n                 dropout=0\n                 ):\n\n        super(Grid, self).__init__()\n        assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'\n        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'\n        assert output_dims[0] == output_dims[1], 'Here we only support square model.'\n        assert shared, 'we share the weights in this stage.'\n        assert use_input, 'use input and add them in the middle'\n\n        \"\"\"\n        Initialization.\n        \"\"\"\n        logger.info(\":::: Sequential Grid-Pool LSTM ::::\")\n        self.input_dims       = input_dims\n        self.output_dims      = output_dims\n        self.N                = len(output_dims)\n        self.depth            = depth\n        self.dropout          = dropout\n\n        self.priority         = priority\n        self.peephole         = peephole\n        self.use_input        = use_input\n        self.learn_init       = learn_init\n\n        self.init             = initializations.get(init)\n        self.inner_init       = initializations.get(inner_init)\n        self.forget_bias_init = initializations.get(forget_bias_init)\n        self.activation       = activations.get(activation)\n        self.relu             = activations.get('relu')\n        self.inner_activation = activations.get(inner_activation)\n\n        self.identity_connect = identity_connect\n        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!\n\n        \"\"\"\n        Build the model weights.\n        \"\"\"\n        # build the centroid grid.\n        self.build()\n\n        # # input projection layer (projected to time-axis)       [x]\n        # self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')\n        # self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')\n        #\n        # self._add(self.Ph)\n        # self._add(self.Pm)\n\n        # learn init/\n        if self.learn_init:\n            self.hx0 = self.init((1, output_dims[0]))\n            self.hy0 = self.init((1, output_dims[1]))\n            self.mx0 = self.init((1, output_dims[0]))\n            self.my0 = self.init((1, output_dims[1]))\n\n            self.hx0.name, self.hy0.name = 'hx0', 'hy0'\n            self.mx0.name, self.my0.name = 'mx0', 'my0'\n            self.params += [self.hx0, self.hy0, self.mx0, self.my0]\n\n        \"\"\"\n        Others info.\n        \"\"\"\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def _step(self, *args):\n        inputs = args[0]\n        hx_tm1 = args[1]\n        mx_tm1 = args[2]\n        hy_tm1 = args[3]\n        my_tm1 = args[4]\n\n        # zero constant inputs.\n        pre_info    = [[[T.zeros_like(hx_tm1)\n                         for _ in xrange(self.depth)]\n                         for _ in xrange(self.depth)]\n                         for _ in xrange(4)]  # hx, mx, hy, my\n\n        pre_inputs  = [[T.zeros_like(inputs)\n                       for _ in xrange(self.depth)]\n                       for _ in xrange(self.depth)]\n\n        for kk in xrange(self.depth):\n            pre_inputs[kk][kk] = inputs\n\n        pre_info[0][0][0] = hx_tm1\n        pre_info[1][0][0] = mx_tm1\n        pre_info[2][0][0] = hy_tm1\n        pre_info[3][0][0] = my_tm1\n\n        for step_x in xrange(self.depth):\n            for step_y in xrange(self.depth):\n                # input hidden/memory/input information\n                print pre_info[0][-1][-1], pre_info[2][-1][-1]\n\n                hs_i  = [pre_info[0][step_x][step_y],\n                         pre_info[2][step_x][step_y]]\n                ms_i  = [pre_info[1][step_x][step_y],\n                         pre_info[3][step_x][step_y]]\n                xs_i  = [pre_inputs[step_x][step_y],\n                         pre_inputs[step_x][step_y]]\n\n                # compute grid-lstm\n                hs_o, ms_o = self.grid_(hs_i, ms_i, xs_i, priority =-1)\n\n                # output hidden/memory information\n                if (step_x == self.depth - 1) and (step_y == self.depth - 1):\n                    hx_t, mx_t, hy_t, my_t = hs_o[0], ms_o[0], hs_o[1], ms_o[1]\n                    return hx_t, mx_t, hy_t, my_t\n\n                if step_x + 1 < self.depth:\n                    pre_info[0][step_x + 1][step_y] = hs_o[0]\n                    pre_info[1][step_x + 1][step_y] = ms_o[0]\n\n                if step_y + 1 < self.depth:\n                    pre_info[2][step_x][step_y + 1] = hs_o[1]\n                    pre_info[3][step_x][step_y + 1] = ms_o[1]\n\n    def __call__(self, X, init_x=None, init_y=None,\n                 return_sequence=False, one_step=False):\n        # recently we did not support masking.\n        if X.ndim == 2:\n            X = X[:, None, :]\n\n        # one step\n        if one_step:\n            assert init_x is not None, 'previous x must be provided!'\n            assert init_y is not None, 'previous y must be provided!'\n\n        X = X.dimshuffle((1, 0, 2))\n        if init_x is None:\n            if self.learn_init:\n                init_mx    = T.repeat(self.mx0, X.shape[1], axis=0)\n                init_my    = T.repeat(self.my0, X.shape[1], axis=0)\n                init_hx    = T.repeat(self.hx0, X.shape[1], axis=0)\n                init_hy    = T.repeat(self.hy0, X.shape[1], axis=0)\n\n                init_input = [init_hx, init_mx, init_hy, init_my]\n            else:\n                init_x     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[0]), 1)] * 2\n                init_y     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * 2\n\n                init_input = init_x + init_y\n        else:\n            init_input = init_x + init_y\n\n        if not one_step:\n            sequence       = [X]\n            output_info    = init_input\n            outputs, _     = theano.scan(\n                self._step,\n                sequences=sequence,\n                outputs_info=output_info\n            )\n        else:\n            outputs        = self._step(*([X[0]] + init_x + init_y))\n\n        if return_sequence:\n            hxs = outputs[0].dimshuffle((1, 0, 2))\n            hys = outputs[2].dimshuffle((1, 0, 2))\n            hs  = T.concatenate([hxs, hys], axis=-1)\n            return hs\n        else:\n            hx  = outputs[0][-1]\n            hy  = outputs[2][-1]\n            h   = T.concatenate([hx, hy], axis=-1)\n            return h\n\n\nclass PyramidLSTM(Layer):\n    \"\"\"\n    A more flexible Pyramid LSTM structure!\n    \"\"\"\n    def __init__(self,\n                 # parameters for Grid.\n                 output_dims,\n                 input_dims,    # [0, ... 0], 0 represents no external inputs.\n                 priority=1,\n                 peephole=True,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 forget_bias_init='one',\n                 activation='tanh', inner_activation='sigmoid',\n                 use_input=True,\n                 name=None, weights=None,\n                 identity_connect=None,\n\n                 # parameters for 2D-GridLSTM\n                 depth=5,\n                 learn_init=False,\n                 shared=True,\n                 dropout=0\n                 ):\n\n        super(PyramidLSTM, self).__init__()\n        assert len(output_dims) == 2, 'in this stage, we only support 2D Grid-LSTM'\n        assert len(input_dims)  == len(output_dims), '# of inputs must match # of outputs.'\n        assert output_dims[0] == output_dims[1], 'Here we only support square model.'\n        assert shared, 'we share the weights in this stage.'\n        assert use_input, 'use input and add them in the middle'\n\n        \"\"\"\n        Initialization.\n        \"\"\"\n        logger.info(\":::: Sequential Grid-Pool LSTM ::::\")\n        self.N                = len(output_dims)\n        self.depth            = depth\n        self.dropout          = dropout\n\n        self.priority         = priority\n        self.peephole         = peephole\n        self.use_input        = use_input\n        self.learn_init       = learn_init\n\n        self.init             = initializations.get(init)\n        self.inner_init       = initializations.get(inner_init)\n        self.forget_bias_init = initializations.get(forget_bias_init)\n        self.activation       = activations.get(activation)\n        self.relu             = activations.get('relu')\n        self.inner_activation = activations.get(inner_activation)\n\n        self.identity_connect = identity_connect\n        self.axies            = {0: 'x', 1: 'y', 2: 'z', 3: 'w'}  # only support at most 4D now!\n\n        \"\"\"\n        Build the model weights.\n        \"\"\"\n        # build the centroid grid (3 grid versions)\n        self.grids = [Grid(output_dims,\n                           input_dims,\n                           -1,\n                           peephole,\n                           init, inner_init,\n                           forget_bias_init,\n                           activation, inner_activation, use_input,\n                           name='Grid*{}'.format(k)\n                           ) for k in xrange(3)]\n\n        for k in xrange(3):\n            self.grids[k].build()\n            self._add(self.grids[k])\n\n        # # input projection layer (projected to time-axis)       [x]\n        # self.Ph  = Dense(input_dims[0], output_dims[0], name='Ph')\n        # self.Pm  = Dense(input_dims[0], output_dims[0], name='Pm')\n        #\n        # self._add(self.Ph)\n        # self._add(self.Pm)\n\n        # learn init/\n        if self.learn_init:\n            self.hx0 = self.init((1, output_dims[0]))\n            self.hy0 = self.init((1, output_dims[1]))\n            self.mx0 = self.init((1, output_dims[0]))\n            self.my0 = self.init((1, output_dims[1]))\n\n            self.hx0.name, self.hy0.name = 'hx0', 'hy0'\n            self.mx0.name, self.my0.name = 'mx0', 'my0'\n            self.params += [self.hx0, self.hy0, self.mx0, self.my0]\n\n        \"\"\"\n        Others info.\n        \"\"\"\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def _step(self, *args):\n        inputs = args[0]\n        hx_tm1 = args[1]\n        mx_tm1 = args[2]\n        hy_tm1 = args[3]\n        my_tm1 = args[4]\n\n        # zero constant inputs.\n        pre_info    = [[[T.zeros_like(hx_tm1)\n                         for _ in xrange(self.depth)]\n                         for _ in xrange(self.depth)]\n                         for _ in xrange(4)]  # hx, mx, hy, my\n\n        pre_inputs  = [[T.zeros_like(inputs)\n                       for _ in xrange(self.depth)]\n                       for _ in xrange(self.depth)]\n\n        for kk in xrange(self.depth):\n            pre_inputs[kk][kk] = inputs\n\n        pre_info[0][0][0] = hx_tm1\n        pre_info[1][0][0] = mx_tm1\n        pre_info[2][0][0] = hy_tm1\n        pre_info[3][0][0] = my_tm1\n\n        for step_x in xrange(self.depth):\n            for step_y in xrange(self.depth):\n                # input hidden/memory/input information\n                print pre_info[0][-1][-1], pre_info[2][-1][-1]\n\n                hs_i  = [pre_info[0][step_x][step_y],\n                         pre_info[2][step_x][step_y]]\n                ms_i  = [pre_info[1][step_x][step_y],\n                         pre_info[3][step_x][step_y]]\n                xs_i  = [pre_inputs[step_x][step_y],\n                         pre_inputs[step_x][step_y]]\n\n                # compute grid-lstm\n                if (step_x + step_y + 1) < self.depth:\n                    hs_o, ms_o = self.grids[0].grid_(hs_i, ms_i, xs_i, priority =-1)\n                elif (step_x + step_y + 1) == self.depth:\n                    hs_o, ms_o = self.grids[1].grid_(hs_i, ms_i, xs_i, priority =-1)\n                else:\n                    hs_o, ms_o = self.grids[2].grid_(hs_i, ms_i, xs_i, priority =-1)\n\n                # output hidden/memory information\n                if (step_x == self.depth - 1) and (step_y == self.depth - 1):\n                    hx_t, mx_t, hy_t, my_t = hs_o[0], ms_o[0], hs_o[1], ms_o[1]\n                    return hx_t, mx_t, hy_t, my_t\n\n                if step_x + 1 < self.depth:\n                    pre_info[0][step_x + 1][step_y] = hs_o[0]\n                    pre_info[1][step_x + 1][step_y] = ms_o[0]\n\n                if step_y + 1 < self.depth:\n                    pre_info[2][step_x][step_y + 1] = hs_o[1]\n                    pre_info[3][step_x][step_y + 1] = ms_o[1]\n\n    def __call__(self, X, init_x=None, init_y=None,\n                 return_sequence=False, one_step=False):\n        # recently we did not support masking.\n        if X.ndim == 2:\n            X = X[:, None, :]\n\n        # one step\n        if one_step:\n            assert init_x is not None, 'previous x must be provided!'\n            assert init_y is not None, 'previous y must be provided!'\n\n        X = X.dimshuffle((1, 0, 2))\n        if init_x is None:\n            if self.learn_init:\n                init_mx    = T.repeat(self.mx0, X.shape[1], axis=0)\n                init_my    = T.repeat(self.my0, X.shape[1], axis=0)\n                init_hx    = T.repeat(self.hx0, X.shape[1], axis=0)\n                init_hy    = T.repeat(self.hy0, X.shape[1], axis=0)\n\n                init_input = [init_hx, init_mx, init_hy, init_my]\n            else:\n                init_x     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[0]), 1)] * 2\n                init_y     = [T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dims[1]), 1)] * 2\n\n                init_input = init_x + init_y\n        else:\n            init_input = init_x + init_y\n\n        if not one_step:\n            sequence       = [X]\n            output_info    = init_input\n            outputs, _     = theano.scan(\n                self._step,\n                sequences=sequence,\n                outputs_info=output_info\n            )\n        else:\n            outputs        = self._step(*([X[0]] + init_x + init_y))\n\n        if return_sequence:\n            hxs = outputs[0].dimshuffle((1, 0, 2))\n            hys = outputs[2].dimshuffle((1, 0, 2))\n            hs  = T.concatenate([hxs, hys], axis=-1)\n            return hs\n        else:\n            hx  = outputs[0][-1]\n            hy  = outputs[2][-1]\n            h   = T.concatenate([hx, hy], axis=-1)\n            return h"
  },
  {
    "path": "emolga/layers/ntm_minibatch.py",
    "content": "__author__ = 'jiataogu'\nimport theano\nimport theano.tensor as T\n\nimport scipy.linalg as sl\nimport numpy as np\nfrom .core import *\nfrom .recurrent import *\nimport copy\n\n\"\"\"\nThis implementation supports both minibatch learning and on-line training.\nWe need a minibatch version for Neural Turing Machines.\n\"\"\"\n\n\nclass Reader(Layer):\n    \"\"\"\n        \"Reader Head\" of the Neural Turing Machine.\n    \"\"\"\n\n    def __init__(self, input_dim, memory_width, shift_width, shift_conv,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 name=None):\n        super(Reader, self).__init__()\n        self.input_dim = input_dim\n        self.memory_dim = memory_width\n\n        self.init = initializations.get(init)\n        self.inner_init = initializations.get(inner_init)\n\n        self.tanh = activations.get('tanh')\n        self.sigmoid = activations.get('sigmoid')\n        self.softplus = activations.get('softplus')\n        self.vec_softmax = activations.get('vector_softmax')\n        self.softmax = activations.get('softmax')\n\n        \"\"\"\n        Reader Params.\n        \"\"\"\n        self.W_key = self.init((input_dim, memory_width))\n        self.W_shift = self.init((input_dim, shift_width))\n        self.W_beta = self.init(input_dim)\n        self.W_gama = self.init(input_dim)\n        self.W_g = self.init(input_dim)\n\n        self.b_key = shared_zeros(memory_width)\n        self.b_shift = shared_zeros(shift_width)\n        self.b_beta = theano.shared(floatX(0))\n        self.b_gama = theano.shared(floatX(0))\n        self.b_g = theano.shared(floatX(0))\n\n        self.shift_conv = shift_conv\n\n        # add params and set names.\n        self.params = [self.W_key, self.W_shift, self.W_beta, self.W_gama, self.W_g,\n                       self.b_key, self.b_shift, self.b_beta, self.b_gama, self.b_g]\n\n        self.W_key.name, self.W_shift.name, self.W_beta.name, \\\n        self.W_gama.name, self.W_g.name = 'W_key', 'W_shift', 'W_beta', \\\n                                          'W_gama', 'W_g'\n\n        self.b_key.name, self.b_shift.name, self.b_beta.name, \\\n        self.b_gama.name, self.b_g.name = 'b_key', 'b_shift', 'b_beta', \\\n                                          'b_gama', 'b_g'\n\n    def __call__(self, X, w_temp, m_temp):\n        # input dimensions\n        # X:      (nb_samples, input_dim)\n        # w_temp: (nb_samples, memory_dim)\n        # m_temp: (nb_samples, memory_dim, memory_width) ::tensor_memory\n\n        key = dot(X, self.W_key, self.b_key)  # (nb_samples, memory_width)\n        shift = self.softmax(\n            dot(X, self.W_shift, self.b_shift))  # (nb_samples, shift_width)\n\n        beta = self.softplus(dot(X, self.W_beta, self.b_beta))[:, None]  # (nb_samples, x)\n        gamma = self.softplus(dot(X, self.W_gama, self.b_gama)) + 1.  # (nb_samples,)\n        gamma = gamma[:, None]  # (nb_samples, x)\n        g = self.sigmoid(dot(X, self.W_g, self.b_g))[:, None]  # (nb_samples, x)\n\n        signal = [key, shift, beta, gamma, g]\n\n        w_c = self.softmax(\n            beta * cosine_sim2d(key, m_temp))  # (nb_samples, memory_dim) //content-based addressing\n        w_g = g * w_c + (1 - g) * w_temp  # (nb_samples, memory_dim) //history interpolation\n        w_s = shift_convolve2d(w_g, shift, self.shift_conv)  # (nb_samples, memory_dim) //convolutional shift\n        w_p = w_s ** gamma  # (nb_samples, memory_dim) //sharpening\n        w_t = w_p / T.sum(w_p, axis=1)[:, None]  # (nb_samples, memory_dim)\n        return w_t\n\n\nclass Writer(Reader):\n    \"\"\"\n        \"Writer head\" of the Neural Turing Machine\n    \"\"\"\n\n    def __init__(self, input_dim, memory_width, shift_width, shift_conv,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 name=None):\n        super(Writer, self).__init__(input_dim, memory_width, shift_width, shift_conv,\n                                     init, inner_init, name)\n\n        \"\"\"\n        Writer Params.\n        \"\"\"\n        self.W_erase = self.init((input_dim, memory_width))\n        self.W_add = self.init((input_dim, memory_width))\n\n        self.b_erase = shared_zeros(memory_width)\n        self.b_add = shared_zeros(memory_width)\n\n        # add params and set names.\n        self.params += [self.W_erase, self.W_add, self.b_erase, self.b_add]\n\n        self.W_erase.name, self.W_add.name = 'W_erase', 'W_add'\n        self.b_erase.name, self.b_add.name = 'b_erase', 'b_add'\n\n    def get_fixer(self, X):\n        erase = self.sigmoid(dot(X, self.W_erase, self.b_erase))  # (nb_samples, memory_width)\n        add   = self.sigmoid(dot(X, self.W_add, self.b_add))  # (nb_samples, memory_width)\n        return erase, add\n\n\nclass Controller(Recurrent):\n    \"\"\"\n    Controller used in Neural Turing Machine.\n        - Core cell (Memory)\n        - Reader head\n        - Writer head\n    It is a simple RNN version. In reality the Neural Turing Machine will use the LSTM cell.\n    \"\"\"\n\n    def __init__(self,\n                 input_dim,\n                 memory_dim,\n                 memory_width,\n                 hidden_dim,\n                 shift_width=3,\n                 init='glorot_uniform',\n                 inner_init='orthogonal',\n                 name=None,\n                 readonly=False,\n                 curr_input=False,\n                 recurrence=False,\n                 memorybook=None\n                 ):\n        super(Controller, self).__init__()\n        # Initialization of the dimensions.\n        self.input_dim     = input_dim\n        self.memory_dim    = memory_dim\n        self.memory_width  = memory_width\n        self.hidden_dim    = hidden_dim\n        self.shift_width   = shift_width\n\n        self.init          = initializations.get(init)\n        self.inner_init    = initializations.get(inner_init)\n        self.tanh          = activations.get('tanh')\n        self.softmax       = activations.get('softmax')\n        self.vec_softmax   = activations.get('vector_softmax')\n\n        self.readonly      = readonly\n        self.curr_input    = curr_input\n        self.recurrence    = recurrence\n        self.memorybook    = memorybook\n\n        \"\"\"\n        Controller Module.\n        \"\"\"\n        # hidden projection:\n        self.W_in          = self.init((input_dim, hidden_dim))\n        self.b_in          = shared_zeros(hidden_dim)\n        self.W_rd          = self.init((memory_width, hidden_dim))\n        self.W_in.name     = 'W_in'\n        self.b_in.name     = 'b_in'\n        self.W_rd.name     = 'W_rd'\n        self.params        = [self.W_in, self.b_in, self.W_rd]\n\n        # use recurrence:\n        if self.recurrence:\n            self.W_hh      = self.inner_init((hidden_dim, hidden_dim))\n            self.W_hh.name = 'W_hh'\n            self.params   += [self.W_hh]\n\n        # Shift convolution\n        shift_conv         = sl.circulant(np.arange(memory_dim)).T[\n                                np.arange(-(shift_width // 2), (shift_width // 2) + 1)][::-1]\n\n        # use the current input for weights.\n        if self.curr_input:\n            controller_size = self.input_dim + self.hidden_dim\n        else:\n            controller_size = self.hidden_dim\n\n        # write head\n        if not readonly:\n            self.writer    = Writer(controller_size, memory_width, shift_width, shift_conv, name='writer')\n            self.writer.set_name('writer')\n            self._add(self.writer)\n\n        # read head\n        self.reader        = Reader(controller_size, memory_width, shift_width, shift_conv, name='reader')\n        self.reader.set_name('reader')\n        self._add(self.reader)\n\n        # ***********************************************************\n        # reserved for None initialization (we don't use these often)\n        self.memory_init   = self.init((memory_dim, memory_width))\n        self.w_write_init  = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))\n        self.w_read_init   = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))\n        self.contr_init    = self.tanh(np.random.rand(1, hidden_dim).astype(theano.config.floatX))\n\n        if name is not None:\n            self.set_name(name)\n\n    def _controller(self, input_t, read_t, controller_tm1=None):\n        # input_t : (nb_sample, input_dim)\n        # read_t  : (nb_sample, memory_width)\n        # controller_tm1: (nb_sample, hidden_dim)\n        if self.recurrence:\n            return self.tanh(dot(input_t, self.W_in) +\n                             dot(controller_tm1, self.W_hh) +\n                             dot(read_t, self.W_rd)  +\n                             self.b_in)\n        else:\n            return self.tanh(dot(input_t, self.W_in) +\n                             dot(read_t, self.W_rd)  +\n                             self.b_in)\n\n    @staticmethod\n    def _read(w_read, memory):\n        # w_read : (nb_sample, memory_dim)\n        # memory : (nb_sample, memory_dim, memory_width)\n        # return dot(w_read, memory)\n\n        return T.sum(w_read[:, :, None] * memory, axis=1)\n\n    @staticmethod\n    def _write(w_write, memory, erase, add):\n        # w_write: (nb_sample, memory_dim)\n        # memory : (nb_sample, memory_dim, memory_width)\n        # erase/add: (nb_sample, memory_width)\n\n        w_write  = w_write[:, :, None]\n        erase    = erase[:, None, :]\n        add      = add[:, None, :]\n\n        m_erased = memory * (1 - w_write * erase)\n        memory_t = m_erased + w_write * add  # (nb_sample, memory_dim, memory_width)\n        return memory_t\n\n    def _step(self, input_t, mask_t,\n              memory_tm1,\n              w_write_tm1, w_read_tm1,\n              controller_tm1):\n        # input_t:     (nb_sample, input_dim)\n        # memory_tm1:  (nb_sample, memory_dim, memory_width)\n        # w_write_tm1: (nb_sample, memory_dim)\n        # w_read_tm1:  (nb_sample, memory_dim)\n        # controller_tm1: (nb_sample, hidden_dim)\n\n        # read the memory\n        if self.curr_input:\n            info     = T.concatenate((controller_tm1, input_t), axis=1)\n            w_read_t = self.reader(info, w_read_tm1, memory_tm1)\n            read_tm1 = self._read(w_read_t, memory_tm1)\n        else:\n            read_tm1 = self._read(w_read_tm1, memory_tm1)       # (nb_sample, memory_width)\n\n        # get the new controller (hidden states.)\n        if self.recurrence:\n            controller_t = self._controller(input_t, read_tm1, controller_tm1)\n        else:\n            controller_t = self._controller(input_t, read_tm1)  # (nb_sample, controller_size)\n\n        # update the memory cell (if need)\n        if not self.readonly:\n            if self.curr_input:\n                infow          = T.concatenate((controller_t, input_t), axis=1)\n                w_write_t      = self.writer(infow, w_write_tm1, memory_tm1)     # (nb_sample, memory_dim)\n                erase_t, add_t = self.writer.get_fixer(infow)                    # (nb_sample, memory_width)\n            else:\n                w_write_t      = self.writer(controller_t, w_write_tm1, memory_tm1)\n                erase_t, add_t = self.writer.get_fixer(controller_t)\n            memory_t           = self._write(w_write_t, memory_tm1, erase_t, add_t)  # (nb_sample, memory_dim, memory_width)\n        else:\n            w_write_t          = w_write_tm1\n            memory_t           = memory_tm1\n\n        # get the next reading weights.\n        if not self.curr_input:\n            w_read_t           = self.reader(controller_t, w_read_tm1, memory_t)  # (nb_sample, memory_dim)\n\n        # over masking\n        memory_t     = memory_t     * mask_t[:, :, None] + memory_tm1 * (1 - mask_t[:, :, None])\n        w_read_t     = w_read_t     * mask_t + w_read_tm1     * (1 - mask_t)\n        w_write_t    = w_write_t    * mask_t + w_write_tm1    * (1 - mask_t)\n        controller_t = controller_t * mask_t + controller_tm1 * (1 - mask_t)\n\n        return memory_t, w_write_t, w_read_t, controller_t\n\n    def __call__(self, X, mask=None, M=None, init_ww=None,\n                 init_wr=None, init_c=None, return_sequence=False,\n                 one_step=False, return_full=False):\n        # recurrent cell only work for tensor.\n        if X.ndim == 2:\n            X = X[:, None, :]\n        nb_samples = X.shape[0]\n\n        # mask\n        if mask is None:\n            mask = T.alloc(1., X.shape[0], 1)\n\n        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)\n        X = X.dimshuffle((1, 0, 2))\n\n        # ***********************************************************************\n        # initialization states\n        if M is None:\n            memory_init  = T.repeat(self.memory_init[None, :, :], nb_samples, axis=0)\n        else:\n            memory_init  = M\n\n        if init_wr is None:\n            w_read_init  = T.repeat(self.w_read_init, nb_samples, axis=0)\n        else:\n            w_read_init  = init_wr\n\n        if init_ww is None:\n            w_write_init = T.repeat(self.w_write_init, nb_samples, axis=0)\n        else:\n            w_write_init = init_ww\n\n        if init_c is None:\n            contr_init   = T.repeat(self.contr_init, nb_samples, axis=0)\n        else:\n            contr_init   = init_c\n        # ************************************************************************\n\n        outputs_info = [memory_init, w_write_init, w_read_init, contr_init]\n\n        if one_step:\n            seq = [X[0], padded_mask[0]]\n            outputs = self._step(*(seq + outputs_info))\n            return outputs\n        else:\n            seq = [X, padded_mask]\n            outputs, _ = theano.scan(\n                self._step,\n                sequences=seq,\n                outputs_info=outputs_info,\n                name='controller_recurrence'\n            )\n\n        self.monitor['memory_info']   = outputs[0]\n        self.monitor['write_weights'] = outputs[1]\n        self.monitor['read_weights']  = outputs[2]\n\n        if not return_full:\n            if return_sequence:\n                return outputs[-1].dimshuffle((1, 0, 2))\n            return outputs[-1][-1]\n        else:\n            if return_sequence:\n                return [a.dimshuffle((1, 0, 2)) for a in outputs]\n            return [a[-1] for a in outputs]\n\n\nclass AttentionReader(Layer):\n    \"\"\"\n        \"Reader Head\" of the Neural Turing Machine.\n    \"\"\"\n\n    def __init__(self, input_dim, memory_width, shift_width, shift_conv,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 name=None):\n        super(AttentionReader, self).__init__()\n        self.input_dim = input_dim\n        self.memory_dim = memory_width\n\n        self.init = initializations.get(init)\n        self.inner_init = initializations.get(inner_init)\n\n        self.tanh = activations.get('tanh')\n        self.sigmoid = activations.get('sigmoid')\n        self.softplus = activations.get('softplus')\n        self.vec_softmax = activations.get('vector_softmax')\n        self.softmax = activations.get('softmax')\n\n        \"\"\"\n        Reader Params.\n        \"\"\"\n        self.W_key   = self.init((input_dim, memory_width))\n        self.W_lock  = self.inner_init((memory_width, memory_width))\n\n        self.W_shift = self.init((input_dim, shift_width))\n        self.W_beta = self.init(input_dim)\n        self.W_gama = self.init(input_dim)\n        self.W_g = self.init(input_dim)\n\n        # self.v     = self.init(memory_width)\n        self.b_key = shared_zeros(memory_width)\n        self.b_shift = shared_zeros(shift_width)\n        self.b_beta = theano.shared(floatX(0))\n        self.b_gama = theano.shared(floatX(0))\n        self.b_g = theano.shared(floatX(0))\n\n        self.shift_conv = shift_conv\n\n        # add params and set names.\n        self.params = [self.W_key, self.W_shift, self.W_beta, self.W_gama, self.W_g,\n                       self.b_key, self.b_shift, self.b_beta, self.b_gama, self.b_g,\n                       self.W_lock]\n\n        self.W_key.name, self.W_shift.name, self.W_beta.name, \\\n        self.W_gama.name, self.W_g.name = 'W_key', 'W_shift', 'W_beta', \\\n                                          'W_gama', 'W_g'\n        self.W_lock.name  = 'W_lock'\n\n        self.b_key.name, self.b_shift.name, self.b_beta.name, \\\n        self.b_gama.name, self.b_g.name = 'b_key', 'b_shift', 'b_beta', \\\n                                          'b_gama', 'b_g'\n\n    def __call__(self, X, w_temp, m_temp):\n        # input dimensions\n        # X:      (nb_samples, input_dim)\n        # w_temp: (nb_samples, memory_dim)\n        # m_temp: (nb_samples, memory_dim, memory_width) ::tensor_memory\n\n        key   = dot(X, self.W_key, self.b_key)  # (nb_samples, memory_width)\n        lock  = dot(m_temp, self.W_lock)        # (nb_samples, memory_dim, memory_width)\n        shift = self.softmax(\n            dot(X, self.W_shift, self.b_shift))  # (nb_samples, shift_width)\n\n        beta = self.softplus(dot(X, self.W_beta, self.b_beta))[:, None]  # (nb_samples, x)\n        gamma = self.softplus(dot(X, self.W_gama, self.b_gama)) + 1.  # (nb_samples,)\n        gamma = gamma[:, None]  # (nb_samples, x)\n        g = self.sigmoid(dot(X, self.W_g, self.b_g))[:, None]  # (nb_samples, x)\n\n        signal = [key, shift, beta, gamma, g]\n\n        energy = T.sum(key[:, None, :] * lock, axis=2)\n        # energy = T.tensordot(key[:, None, :] + lock, self.v, [2, 0])\n        w_c    = self.softmax(beta * energy)\n        # w_c = self.softmax(\n        #     beta * cosine_sim2d(key, m_temp))  # (nb_samples, memory_dim) //content-based addressing\n        w_g = g * w_c + (1 - g) * w_temp  # (nb_samples, memory_dim) //history interpolation\n        w_s = shift_convolve2d(w_g, shift, self.shift_conv)  # (nb_samples, memory_dim) //convolutional shift\n        w_p = w_s ** gamma  # (nb_samples, memory_dim) //sharpening\n        w_t = w_p / T.sum(w_p, axis=1)[:, None]  # (nb_samples, memory_dim)\n        return w_t\n\n\nclass AttentionWriter(AttentionReader):\n    \"\"\"\n        \"Writer head\" of the Neural Turing Machine\n    \"\"\"\n\n    def __init__(self, input_dim, memory_width, shift_width, shift_conv,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 name=None):\n        super(AttentionWriter, self).__init__(input_dim, memory_width, shift_width, shift_conv,\n                                     init, inner_init, name)\n\n        \"\"\"\n        Writer Params.\n        \"\"\"\n        self.W_erase = self.init((input_dim, memory_width))\n        self.W_add = self.init((input_dim, memory_width))\n\n        self.b_erase = shared_zeros(memory_width)\n        self.b_add = shared_zeros(memory_width)\n\n        # add params and set names.\n        self.params += [self.W_erase, self.W_add, self.b_erase, self.b_add]\n\n        self.W_erase.name, self.W_add.name = 'W_erase', 'W_add'\n        self.b_erase.name, self.b_add.name = 'b_erase', 'b_add'\n\n    def get_fixer(self, X):\n        erase = self.sigmoid(dot(X, self.W_erase, self.b_erase))  # (nb_samples, memory_width)\n        add   = self.sigmoid(dot(X, self.W_add, self.b_add))  # (nb_samples, memory_width)\n        return erase, add\n\n\n\nclass BernoulliController(Recurrent):\n    \"\"\"\n    Controller used in Neural Turing Machine.\n        - Core cell (Memory): binary memory\n        - Reader head\n        - Writer head\n    It is a simple RNN version. In reality the Neural Turing Machine will use the LSTM cell.\n    \"\"\"\n\n    def __init__(self,\n                 input_dim,\n                 memory_dim,\n                 memory_width,\n                 hidden_dim,\n                 shift_width=3,\n                 init='glorot_uniform',\n                 inner_init='orthogonal',\n                 name=None,\n                 readonly=False,\n                 curr_input=False,\n                 recurrence=False,\n                 memorybook=None\n                 ):\n        super(BernoulliController, self).__init__()\n        # Initialization of the dimensions.\n        self.input_dim     = input_dim\n        self.memory_dim    = memory_dim\n        self.memory_width  = memory_width\n        self.hidden_dim    = hidden_dim\n        self.shift_width   = shift_width\n\n        self.init          = initializations.get(init)\n        self.inner_init    = initializations.get(inner_init)\n        self.tanh          = activations.get('tanh')\n        self.softmax       = activations.get('softmax')\n        self.vec_softmax   = activations.get('vector_softmax')\n        self.sigmoid       = activations.get('sigmoid')\n\n        self.readonly      = readonly\n        self.curr_input    = curr_input\n        self.recurrence    = recurrence\n        self.memorybook    = memorybook\n\n        \"\"\"\n        Controller Module.\n        \"\"\"\n        # hidden projection:\n        self.W_in          = self.init((input_dim, hidden_dim))\n        self.b_in          = shared_zeros(hidden_dim)\n        self.W_rd          = self.init((memory_width, hidden_dim))\n        self.W_in.name     = 'W_in'\n        self.b_in.name     = 'b_in'\n        self.W_rd.name     = 'W_rd'\n        self.params        = [self.W_in, self.b_in, self.W_rd]\n\n        # use recurrence:\n        if self.recurrence:\n            self.W_hh      = self.inner_init((hidden_dim, hidden_dim))\n            self.W_hh.name = 'W_hh'\n            self.params   += [self.W_hh]\n\n        # Shift convolution\n        shift_conv         = sl.circulant(np.arange(memory_dim)).T[\n                                np.arange(-(shift_width // 2), (shift_width // 2) + 1)][::-1]\n\n        # use the current input for weights.\n        if self.curr_input:\n            controller_size = self.input_dim + self.hidden_dim\n        else:\n            controller_size = self.hidden_dim\n\n        # write head\n        if not readonly:\n            self.writer    = AttentionWriter(controller_size, memory_width, shift_width, shift_conv, name='writer')\n            self.writer.set_name('writer')\n            self._add(self.writer)\n\n        # read head\n        self.reader        = AttentionReader(controller_size, memory_width, shift_width, shift_conv, name='reader')\n        self.reader.set_name('reader')\n        self._add(self.reader)\n\n        # ***********************************************************\n        # reserved for None initialization (we don't use these often)\n        self.memory_init   = self.sigmoid(self.init((memory_dim, memory_width)))\n        self.w_write_init  = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))\n        self.w_read_init   = self.softmax(np.random.rand(1, memory_dim).astype(theano.config.floatX))\n        self.contr_init    = self.tanh(np.random.rand(1, hidden_dim).astype(theano.config.floatX))\n\n        if name is not None:\n            self.set_name(name)\n\n    def _controller(self, input_t, read_t, controller_tm1=None):\n        # input_t : (nb_sample, input_dim)\n        # read_t  : (nb_sample, memory_width)\n        # controller_tm1: (nb_sample, hidden_dim)\n        if self.recurrence:\n            return self.tanh(dot(input_t, self.W_in) +\n                             dot(controller_tm1, self.W_hh) +\n                             dot(read_t, self.W_rd)  +\n                             self.b_in)\n        else:\n            return self.tanh(dot(input_t, self.W_in) +\n                             dot(read_t, self.W_rd)  +\n                             self.b_in)\n\n    @staticmethod\n    def _read(w_read, memory):\n        # w_read : (nb_sample, memory_dim)\n        # memory : (nb_sample, memory_dim, memory_width)\n        # return dot(w_read, memory)\n\n        return T.sum(w_read[:, :, None] * memory, axis=1)\n\n    @staticmethod\n    def _write(w_write, memory, erase, add):\n        # w_write: (nb_sample, memory_dim)\n        # memory : (nb_sample, memory_dim, memory_width)\n        # erase/add: (nb_sample, memory_width)\n\n        w_write  = w_write[:, :, None]\n        erase    = erase[:, None, :]     # erase is a gate.\n        add      = add[:, None, :]       # add is a bias\n\n        # m_erased = memory * (1 - w_write * erase)\n        # memory_t = m_erased + w_write * add  # (nb_sample, memory_dim, memory_width)\n        memory_t = memory * (1 - w_write * erase) + \\\n                   add * w_write * (1 - erase)\n\n        return memory_t\n\n    def _step(self, input_t, mask_t,\n              memory_tm1,\n              w_write_tm1, w_read_tm1,\n              controller_tm1):\n        # input_t:     (nb_sample, input_dim)\n        # memory_tm1:  (nb_sample, memory_dim, memory_width)\n        # w_write_tm1: (nb_sample, memory_dim)\n        # w_read_tm1:  (nb_sample, memory_dim)\n        # controller_tm1: (nb_sample, hidden_dim)\n\n        # read the memory\n        if self.curr_input:\n            info     = T.concatenate((controller_tm1, input_t), axis=1)\n            w_read_t = self.reader(info, w_read_tm1, memory_tm1)\n            read_tm1 = self._read(w_read_t, memory_tm1)\n        else:\n            read_tm1 = self._read(w_read_tm1, memory_tm1)       # (nb_sample, memory_width)\n\n        # get the new controller (hidden states.)\n        if self.recurrence:\n            controller_t = self._controller(input_t, read_tm1, controller_tm1)\n        else:\n            controller_t = self._controller(input_t, read_tm1)  # (nb_sample, controller_size)\n\n        # update the memory cell (if need)\n        if not self.readonly:\n            if self.curr_input:\n                infow          = T.concatenate((controller_t, input_t), axis=1)\n                w_write_t      = self.writer(infow, w_write_tm1, memory_tm1)     # (nb_sample, memory_dim)\n                erase_t, add_t = self.writer.get_fixer(infow)                    # (nb_sample, memory_width)\n            else:\n                w_write_t      = self.writer(controller_t, w_write_tm1, memory_tm1)\n                erase_t, add_t = self.writer.get_fixer(controller_t)\n            memory_t           = self._write(w_write_t, memory_tm1, erase_t, add_t)  # (nb_sample, memory_dim, memory_width)\n        else:\n            w_write_t          = w_write_tm1\n            memory_t           = memory_tm1\n\n        # get the next reading weights.\n        if not self.curr_input:\n            w_read_t           = self.reader(controller_t, w_read_tm1, memory_t)  # (nb_sample, memory_dim)\n\n        # over masking\n        memory_t     = memory_t     * mask_t[:, :, None] + memory_tm1 * (1 - mask_t[:, :, None])\n        w_read_t     = w_read_t     * mask_t + w_read_tm1     * (1 - mask_t)\n        w_write_t    = w_write_t    * mask_t + w_write_tm1    * (1 - mask_t)\n        controller_t = controller_t * mask_t + controller_tm1 * (1 - mask_t)\n\n        return memory_t, w_write_t, w_read_t, controller_t\n\n    def __call__(self, X, mask=None, M=None, init_ww=None,\n                 init_wr=None, init_c=None, return_sequence=False,\n                 one_step=False, return_full=False):\n        # recurrent cell only work for tensor.\n        if X.ndim == 2:\n            X = X[:, None, :]\n        nb_samples = X.shape[0]\n\n        # mask\n        if mask is None:\n            mask = T.alloc(1., X.shape[0], 1)\n\n        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)\n        X = X.dimshuffle((1, 0, 2))\n\n        # ***********************************************************************\n        # initialization states\n        if M is None:\n            memory_init  = T.repeat(self.memory_init[None, :, :], nb_samples, axis=0)\n        else:\n            memory_init  = M\n\n        if init_wr is None:\n            w_read_init  = T.repeat(self.w_read_init, nb_samples, axis=0)\n        else:\n            w_read_init  = init_wr\n\n        if init_ww is None:\n            w_write_init = T.repeat(self.w_write_init, nb_samples, axis=0)\n        else:\n            w_write_init = init_ww\n\n        if init_c is None:\n            contr_init   = T.repeat(self.contr_init, nb_samples, axis=0)\n        else:\n            contr_init   = init_c\n        # ************************************************************************\n\n        outputs_info = [memory_init, w_write_init, w_read_init, contr_init]\n\n        if one_step:\n            seq = [X[0], padded_mask[0]]\n            outputs = self._step(*(seq + outputs_info))\n            return outputs\n        else:\n            seq = [X, padded_mask]\n            outputs, _ = theano.scan(\n                self._step,\n                sequences=seq,\n                outputs_info=outputs_info,\n                name='controller_recurrence'\n            )\n\n        self.monitor['memory_info'] = outputs\n\n        if not return_full:\n            if return_sequence:\n                return outputs[-1].dimshuffle((1, 0, 2))\n            return outputs[-1][-1]\n        else:\n            if return_sequence:\n                return [a.dimshuffle((1, 0, 2)) for a in outputs]\n            return [a[-1] for a in outputs]"
  },
  {
    "path": "emolga/layers/recurrent.py",
    "content": "# -*- coding: utf-8 -*-\nfrom abc import abstractmethod\nfrom .core import *\n\n\nclass Recurrent(MaskedLayer):\n    \"\"\"\n        Recurrent Neural Network\n    \"\"\"\n\n    @staticmethod\n    def get_padded_shuffled_mask(mask, pad=0):\n        \"\"\"\n        What's going on here?\n            [1] change the 2D matrix into 3D.\n            [2]\n        \"\"\"\n        assert mask, 'mask cannot be None'\n        # mask is (nb_samples, time)\n        mask = T.shape_padright(mask)    # (nb_samples, time, 1)\n        mask = T.addbroadcast(mask, -1)\n        mask = mask.dimshuffle(1, 0, 2)  # (time, nb_samples, 1)\n\n        if pad > 0:\n            # left-pad in time with 0\n            padding = alloc_zeros_matrix(pad, mask.shape[1], 1)\n            mask = T.concatenate([padding, mask], axis=0)\n        return mask.astype('int8')\n\n\nclass GRU(Recurrent):\n    \"\"\"\n        Gated Recurrent Unit - Cho et al. 2014\n\n        Acts as a spatio-temporal projection,\n        turning a sequence of vectors into a single vector.\n\n        Eats inputs with shape:\n        (nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)\n\n        and returns outputs with shape:\n        if not return_sequences:\n            (nb_samples, output_dim)\n        if return_sequences:\n            (nb_samples, max_sample_length, output_dim)\n\n        References:\n            On the Properties of Neural Machine Translation: Encoder–Decoder Approaches\n                http://www.aclweb.org/anthology/W14-4012\n            Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling\n                http://arxiv.org/pdf/1412.3555v1.pdf\n    \"\"\"\n\n    def __init__(self,\n                 input_dim,\n                 output_dim=128,\n                 context_dim=None,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 activation='tanh', inner_activation='sigmoid',\n                 name=None, weights=None):\n\n        super(GRU, self).__init__()\n        \"\"\"\n        Standard GRU.\n        \"\"\"\n        self.input_dim = input_dim\n        self.output_dim = output_dim\n\n        self.init = initializations.get(init)\n        self.inner_init = initializations.get(inner_init)\n        self.activation = activations.get(activation)\n        self.inner_activation = activations.get(inner_activation)\n\n        self.W_z = self.init((self.input_dim, self.output_dim))\n        self.W_r = self.init((self.input_dim, self.output_dim))\n        self.W_h = self.init((self.input_dim, self.output_dim))\n\n        self.U_z = self.inner_init((self.output_dim, self.output_dim))\n        self.U_r = self.inner_init((self.output_dim, self.output_dim))\n        self.U_h = self.inner_init((self.output_dim, self.output_dim))\n\n        self.b_z = shared_zeros(self.output_dim)\n        self.b_r = shared_zeros(self.output_dim)\n        self.b_h = shared_zeros(self.output_dim)\n\n        # set names\n        self.W_z.name, self.U_z.name, self.b_z.name = 'Wz', 'Uz', 'bz'\n        self.W_r.name, self.U_r.name, self.b_r.name = 'Wr', 'Ur', 'br'\n        self.W_h.name, self.U_h.name, self.b_h.name = 'Wh', 'Uh', 'bh'\n\n        self.params = [\n            self.W_z, self.U_z, self.b_z,\n            self.W_r, self.U_r, self.b_r,\n            self.W_h, self.U_h, self.b_h,\n        ]\n\n        \"\"\"\n        GRU with context inputs.\n        \"\"\"\n        if context_dim is not None:\n            self.context_dim = context_dim\n            self.C_z = self.init((self.context_dim, self.output_dim))\n            self.C_r = self.init((self.context_dim, self.output_dim))\n            self.C_h = self.init((self.context_dim, self.output_dim))\n            self.C_z.name, self.C_r.name, self.C_h.name = 'Cz', 'Cr', 'Ch'\n\n            self.params += [self.C_z, self.C_r, self.C_h]\n\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def _step(self,\n              xz_t, xr_t, xh_t, mask_t,\n              h_tm1,\n              u_z, u_r, u_h):\n        # h_mask_tm1 = mask_tm1 * h_tm1\n        # Here we use a GroundHog-like style which allows\n        z          = self.inner_activation(xz_t + T.dot(h_tm1, u_z))\n        r          = self.inner_activation(xr_t + T.dot(h_tm1, u_r))\n        hh_t       = self.activation(xh_t + T.dot(r * h_tm1, u_h))\n        h_t        = z * h_tm1 + (1 - z) * hh_t\n        h_t        = mask_t * h_t + (1 - mask_t) * h_tm1\n        return h_t\n\n    def _step_gate(self,\n                   xz_t, xr_t, xh_t, mask_t,\n                   h_tm1,\n                   u_z, u_r, u_h):\n        # h_mask_tm1 = mask_tm1 * h_tm1\n        # Here we use a GroundHog-like style which allows\n        z          = self.inner_activation(xz_t + T.dot(h_tm1, u_z))\n        r          = self.inner_activation(xr_t + T.dot(h_tm1, u_r))\n        hh_t       = self.activation(xh_t + T.dot(r * h_tm1, u_h))\n        h_t        = z * h_tm1 + (1 - z) * hh_t\n        h_t        = mask_t * h_t + (1 - mask_t) * h_tm1\n        return h_t, z, r\n\n    def __call__(self, X, mask=None, C=None, init_h=None,\n                 return_sequence=False, one_step=False,\n                 return_gates=False):\n        \"\"\"\n        :param X:    input sequence\n        :param mask: input mask\n        :param C:    context constant\n        :return:\n        \"\"\"\n        # recurrent cell only work for tensor\n        if X.ndim == 2:\n            X = X[:, None, :]\n            if mask is not None:\n                mask = mask[:, None]\n\n        # mask\n        if mask is None:  # sampling or beam-search\n            mask = T.alloc(1., X.shape[0], 1)\n\n        # one step\n        if one_step:\n            assert init_h, 'previous state must be provided!'\n\n        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)\n        X           = X.dimshuffle((1, 0, 2))        # X:   (max_len, nb_samples, input_dim)\n\n        x_z         = dot(X, self.W_z, self.b_z)  # x_z: (max_len, nb_samples, output_dim)\n        x_r         = dot(X, self.W_r, self.b_r)  # x_r: (max_len, nb_samples, output_dim)\n        x_h         = dot(X, self.W_h, self.b_h)  # x_h: (max_len, nb_samples, output_dim)\n\n        \"\"\"\n        GRU with constant context. (not attention here.)\n        \"\"\"\n        if C is not None:\n            assert C.ndim == 2\n            ctx_step = C.dimshuffle('x', 0, 1)    # C: (nb_samples, context_dim)\n            x_z     += dot(ctx_step, self.C_z)\n            x_r     += dot(ctx_step, self.C_r)\n            x_h     += dot(ctx_step, self.C_h)\n\n        \"\"\"\n        GRU with additional initial/previous state.\n        \"\"\"\n        if init_h is None:\n            init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)\n\n        if not return_gates:\n            if one_step:\n                seq          = [x_z, x_r, x_h, padded_mask]    # A hidden BUG (1)+++(1) !?!!!?!!?!?\n                outputs_info = [init_h]\n                non_seq      = [self.U_z, self.U_r, self.U_h]\n                outputs = self._step(*(seq + outputs_info + non_seq))\n\n            else:\n                outputs, updates = theano.scan(\n                    self._step,\n                    sequences=[x_z, x_r, x_h, padded_mask],\n                    outputs_info=init_h,\n                    non_sequences=[self.U_z, self.U_r, self.U_h]\n                )\n\n            if return_sequence:\n                return outputs.dimshuffle((1, 0, 2))\n            return outputs[-1]\n        else:\n            if one_step:\n                seq             = [x_z, x_r, x_h, padded_mask]    # A hidden BUG (1)+++(1) !?!!!?!!?!?\n                outputs_info    = [init_h]\n                non_seq         = [self.U_z, self.U_r, self.U_h]\n                outputs, zz, rr = self._step_gate(*(seq + outputs_info + non_seq))\n\n            else:\n                outputx, updates = theano.scan(\n                    self._step_gate,\n                    sequences=[x_z, x_r, x_h, padded_mask],\n                    outputs_info=[init_h, None, None],\n                    non_sequences=[self.U_z, self.U_r, self.U_h]\n                )\n                outputs, zz, rr = outputx\n\n            if return_sequence:\n                return outputs.dimshuffle((1, 0, 2)), zz.dimshuffle((1, 0, 2)), rr.dimshuffle((1, 0, 2))\n            return outputs[-1], zz[-1], rr[-1]\n\n\nclass JZS3(Recurrent):\n    \"\"\"\n        Evolved recurrent neural network architectures from the evaluation of thousands\n        of models, serving as alternatives to LSTMs and GRUs. See Jozefowicz et al. 2015.\n\n        This corresponds to the `MUT3` architecture described in the paper.\n\n        Takes inputs with shape:\n        (nb_samples, max_sample_length (samples shorter than this are padded with zeros at the end), input_dim)\n\n        and returns outputs with shape:\n        if not return_sequences:\n            (nb_samples, output_dim)\n        if return_sequences:\n            (nb_samples, max_sample_length, output_dim)\n\n        References:\n            An Empirical Exploration of Recurrent Network Architectures\n                http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf\n    \"\"\"\n    def __init__(self,\n                 input_dim,\n                 output_dim=128,\n                 context_dim=None,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 activation='tanh', inner_activation='sigmoid',\n                 name=None, weights=None):\n\n        super(JZS3, self).__init__()\n        \"\"\"\n        Standard model\n        \"\"\"\n        self.input_dim = input_dim\n        self.output_dim = output_dim\n\n        self.init = initializations.get(init)\n        self.inner_init = initializations.get(inner_init)\n        self.activation = activations.get(activation)\n        self.inner_activation = activations.get(inner_activation)\n\n        self.W_z = self.init((self.input_dim, self.output_dim))\n        self.U_z = self.inner_init((self.output_dim, self.output_dim))\n        self.b_z = shared_zeros(self.output_dim)\n\n        self.W_r = self.init((self.input_dim, self.output_dim))\n        self.U_r = self.inner_init((self.output_dim, self.output_dim))\n        self.b_r = shared_zeros(self.output_dim)\n\n        self.W_h = self.init((self.input_dim, self.output_dim))\n        self.U_h = self.inner_init((self.output_dim, self.output_dim))\n        self.b_h = shared_zeros(self.output_dim)\n\n        # set names\n        self.W_z.name, self.U_z.name, self.b_z.name = 'Wz', 'Uz', 'bz'\n        self.W_r.name, self.U_r.name, self.b_r.name = 'Wr', 'Ur', 'br'\n        self.W_h.name, self.U_h.name, self.b_h.name = 'Wh', 'Uh', 'bh'\n\n        self.params = [\n            self.W_z, self.U_z, self.b_z,\n            self.W_r, self.U_r, self.b_r,\n            self.W_h, self.U_h, self.b_h,\n        ]\n\n        \"\"\"\n        context inputs.\n        \"\"\"\n        if context_dim is not None:\n            self.context_dim = context_dim\n            self.C_z = self.init((self.context_dim, self.output_dim))\n            self.C_r = self.init((self.context_dim, self.output_dim))\n            self.C_h = self.init((self.context_dim, self.output_dim))\n            self.C_z.name, self.C_r.name, self.C_h.name = 'Cz', 'Cr', 'Ch'\n\n            self.params += [self.C_z, self.C_r, self.C_h]\n\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def _step(self,\n              xz_t, xr_t, xh_t, mask_t,\n              h_tm1,\n              u_z, u_r, u_h):\n        # h_mask_tm1 = mask_tm1 * h_tm1\n        z     = self.inner_activation(xz_t + T.dot(T.tanh(h_tm1), u_z))\n        r     = self.inner_activation(xr_t + T.dot(h_tm1, u_r))\n        hh_t  = self.activation(xh_t + T.dot(r * h_tm1, u_h))\n        h_t   = (hh_t * z + h_tm1 * (1 - z)) * mask_t + (1 - mask_t) * h_tm1\n        return h_t\n\n    def __call__(self, X, mask=None, C=None, init_h=None, return_sequence=False, one_step=False):\n        # recurrent cell only work for tensor\n        if X.ndim == 2:\n            X = X[:, None, :]\n\n        # mask\n        if mask is None:  # sampling or beam-search\n            mask = T.alloc(1., X.shape[0], X.shape[1])\n\n        # one step\n        if one_step:\n            assert init_h, 'previous state must be provided!'\n\n        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)\n        X = X.dimshuffle((1, 0, 2))\n\n        x_z = dot(X, self.W_z, self.b_z)\n        x_r = dot(X, self.W_r, self.b_r)\n        x_h = dot(X, self.W_h, self.b_h)\n\n        \"\"\"\n        JZS3 with constant context. (not attention here.)\n        \"\"\"\n        if C is not None:\n            assert C.ndim == 2\n            ctx_step = C.dimshuffle('x', 0, 1)    # C: (nb_samples, context_dim)\n            x_z     += dot(ctx_step, self.C_z)\n            x_r     += dot(ctx_step, self.C_r)\n            x_h     += dot(ctx_step, self.C_h)\n\n        \"\"\"\n        JZS3 with additional initial/previous state.\n        \"\"\"\n        if init_h is None:\n            init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)\n\n        if one_step:\n            seq          = [x_z, x_r, x_h, padded_mask]\n            outputs_info = [init_h]\n            non_seq      = [self.U_z, self.U_r, self.U_h]\n            outputs = self._step(*(seq + outputs_info + non_seq))\n\n        else:\n            outputs, updates = theano.scan(\n                self._step,\n                sequences=[x_z, x_r, x_h, padded_mask],\n                outputs_info=init_h,\n                non_sequences=[self.U_z, self.U_r, self.U_h],\n            )\n\n        if return_sequence:\n            return outputs.dimshuffle((1, 0, 2))\n        return outputs[-1]\n\n\nclass LSTM(Recurrent):\n    def __init__(self,\n                 input_dim=0,\n                 output_dim=128,\n                 context_dim=None,\n                 init='glorot_uniform', inner_init='orthogonal',\n                 forget_bias_init='one',\n                 activation='tanh', inner_activation='sigmoid',\n                 name=None, weights=None):\n\n        super(LSTM, self).__init__()\n        \"\"\"\n        Standard model\n        \"\"\"\n        self.input_dim = input_dim\n        self.output_dim = output_dim\n\n        self.init = initializations.get(init)\n        self.inner_init = initializations.get(inner_init)\n        self.forget_bias_init = initializations.get(forget_bias_init)\n        self.activation = activations.get(activation)\n        self.inner_activation = activations.get(inner_activation)\n\n        # input gate param.\n        self.W_i = self.init((self.input_dim, self.output_dim))\n        self.U_i = self.inner_init((self.output_dim, self.output_dim))\n        self.b_i = shared_zeros(self.output_dim)\n\n        # forget gate param.\n        self.W_f = self.init((self.input_dim, self.output_dim))\n        self.U_f = self.inner_init((self.output_dim, self.output_dim))\n        self.b_f = self.forget_bias_init(self.output_dim)  # forget gate needs one bias.\n\n        # output gate param.\n        self.W_o = self.init((self.input_dim, self.output_dim))\n        self.U_o = self.inner_init((self.output_dim, self.output_dim))\n        self.b_o = shared_zeros(self.output_dim)\n\n        # memory param.\n        self.W_c = self.init((self.input_dim, self.output_dim))\n        self.U_c = self.inner_init((self.output_dim, self.output_dim))\n        self.b_c = shared_zeros(self.output_dim)\n\n        # set names\n        self.W_i.name, self.U_i.name, self.b_i.name = 'Wi', 'Ui', 'bi'\n        self.W_f.name, self.U_f.name, self.b_f.name = 'Wf', 'Uf', 'bf'\n        self.W_o.name, self.U_o.name, self.b_o.name = 'Wo', 'Uo', 'bo'\n        self.W_c.name, self.U_c.name, self.b_c.name = 'Wc', 'Uc', 'bc'\n\n        self.params = [\n            self.W_i, self.U_i, self.b_i,\n            self.W_f, self.U_f, self.b_f,\n            self.W_o, self.U_o, self.b_o,\n            self.W_c, self.U_c, self.b_c,\n        ]\n\n        \"\"\"\n        context inputs.\n        \"\"\"\n        if context_dim is not None:\n            self.context_dim = context_dim\n            self.C_i = self.init((self.context_dim, self.output_dim))\n            self.C_f = self.init((self.context_dim, self.output_dim))\n            self.C_o = self.init((self.context_dim, self.output_dim))\n            self.C_c = self.init((self.context_dim, self.output_dim))\n            self.C_i.name, self.C_f.name, self.C_o.name, self.C_c.name = 'Ci', 'Cf', 'Co', 'Cc'\n\n            self.params += [self.C_i, self.C_f, self.C_o, self.C_c]\n\n        if weights is not None:\n            self.set_weights(weights)\n\n        if name is not None:\n            self.set_name(name)\n\n    def _step(self,\n              xi_t, xf_t, xo_t, xc_t, mask_t,\n              h_tm1, c_tm1,\n              u_i, u_f, u_o, u_c):\n        # h_mask_tm1 = mask_tm1 * h_tm1\n\n        i     = self.inner_activation(xi_t + T.dot(h_tm1, u_i))  # input  gate\n        f     = self.inner_activation(xf_t + T.dot(h_tm1, u_f))  # forget gate\n        o     = self.inner_activation(xo_t + T.dot(h_tm1, u_o))  # output gate\n        c     = self.activation(xc_t + T.dot(h_tm1, u_c))        # memory updates\n\n        # update the memory cell.\n        c_t   = f * c_tm1 + i * c\n        h_t   = o * self.activation(c_t)\n\n        # masking\n        c_t   = c_t * mask_t + (1 - mask_t) * c_tm1\n        h_t   = h_t * mask_t + (1 - mask_t) * h_tm1\n        return h_t, c_t\n\n    def input_embed(self, X, C=None):\n        x_i = dot(X, self.W_i, self.b_i)\n        x_f = dot(X, self.W_f, self.b_f)\n        x_o = dot(X, self.W_o, self.b_o)\n        x_c = dot(X, self.W_c, self.b_c)\n\n        \"\"\"\n        LSTM with constant context. (not attention here.)\n        \"\"\"\n        if C is not None:\n            assert C.ndim == 2\n            ctx_step = C.dimshuffle('x', 0, 1)    # C: (nb_samples, context_dim)\n            x_i     += dot(ctx_step, self.C_i)\n            x_f     += dot(ctx_step, self.C_f)\n            x_o     += dot(ctx_step, self.C_o)\n            x_c     += dot(ctx_step, self.C_c)\n\n        return x_i, x_f, x_o, x_c\n\n    def __call__(self, X, mask=None, C=None, init_h=None, init_c=None, return_sequence=False, one_step=False):\n        # recurrent cell only work for tensor\n        if X.ndim == 2:\n            X = X[:, None, :]\n\n        # mask\n        if mask is None:  # sampling or beam-search\n            mask = T.alloc(1., X.shape[0], X.shape[1])\n\n        # one step\n        if one_step:\n            assert init_h, 'previous state must be provided!'\n\n        padded_mask = self.get_padded_shuffled_mask(mask, pad=0)\n        X = X.dimshuffle((1, 0, 2))\n        x_i, x_f, x_o, x_c = self.input_embed(X, C)\n\n        \"\"\"\n        LSTM with additional initial/previous state.\n        \"\"\"\n        if init_h is None:\n            init_h = T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)\n\n        if init_c is None:\n            init_c = init_h\n\n        if one_step:\n            seq          = [x_i, x_f, x_o, x_c, padded_mask]\n            outputs_info = [init_h, init_c]\n            non_seq      = [self.U_i, self.U_f, self.U_o, self.U_c]\n            outputs = self._step(*(seq + outputs_info + non_seq))\n\n        else:\n            outputs, updates = theano.scan(\n                self._step,\n                sequences=[x_i, x_f, x_o, x_c, padded_mask],\n                outputs_info=[init_h, init_c],\n                non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],\n            )\n\n        if return_sequence:\n            return outputs[0].dimshuffle((1, 0, 2)), outputs[1].dimshuffle((1, 0, 2))  # H, C\n        return outputs[0][-1], outputs[1][-1]\n\n\n"
  },
  {
    "path": "emolga/models/__init__.py",
    "content": "__author__ = 'jiataogu'\n"
  },
  {
    "path": "emolga/models/core.py",
    "content": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport deepdish as dd\n\nfrom emolga.dataset.build_dataset import serialize_to_file, deserialize_from_file\nfrom emolga.utils.theano_utils import floatX\n\nlogger = logging.getLogger(__name__)\n\n\nclass Model(object):\n    def __init__(self):\n        self.layers  = []\n        self.params  = []\n        self.monitor = {}\n        self.watchlist = []\n\n    def _add(self, layer):\n        if layer:\n            self.layers.append(layer)\n            self.params += layer.params\n\n    def _monitoring(self):\n        # add monitoring variables\n        for l in self.layers:\n            for v in l.monitor:\n                name = v + '@' + l.name\n                print name\n                self.monitor[name] = l.monitor[v]\n\n    def compile_monitoring(self, inputs, updates=None):\n        logger.info('compile monitoring')\n        for i, v in enumerate(self.monitor):\n            self.watchlist.append(v)\n            logger.info('monitoring [{0}]: {1}'.format(i, v))\n\n        self.watch = theano.function(inputs,\n                                     [self.monitor[v] for v in self.watchlist],\n                                     updates=updates\n                                     )\n        logger.info('done.')\n\n    def set_weights(self, weights):\n        if hasattr(self, 'save_parm'):\n            params = self.params + self.save_parm\n        else:\n            params = self.params\n\n        for p, w in zip(params, weights):\n            print p.name\n            if p.eval().shape != w.shape:\n                raise Exception(\"Layer shape %s not compatible with weight shape %s.\" % (p.eval().shape, w.shape))\n            p.set_value(floatX(w))\n\n    def get_weights(self):\n        weights = []\n        for p in self.params:\n            weights.append(p.get_value())\n\n        if hasattr(self, 'save_parm'):\n            for v in self.save_parm:\n                weights.append(v.get_value())\n\n        return weights\n\n    def set_name(self, name):\n        for i in range(len(self.params)):\n            if self.params[i].name is None:\n                self.params[i].name = '%s_p%d' % (name, i)\n            else:\n                self.params[i].name = name + '@' + self.params[i].name\n        self.name = name\n\n    def save(self, filename):\n        if hasattr(self, 'save_parm'):\n            params = self.params + self.save_parm\n        else:\n            params = self.params\n        ps = 'save: <\\n'\n        for p in params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '> to ... {}'.format(filename)\n        logger.info(ps)\n\n        # hdf5 module seems works abnormal !!\n        # dd.io.save(filename, self.get_weights())\n        serialize_to_file(self.get_weights(), filename)\n\n    def load(self, filename):\n        logger.info('load the weights.')\n\n        # hdf5 module seems works abnormal !!\n        # weights = dd.io.load(filename)\n        weights = deserialize_from_file(filename)\n        print len(weights)\n        self.set_weights(weights)\n"
  },
  {
    "path": "emolga/models/covc_encdec.py",
    "content": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport copy\nimport emolga.basic.objectives as objectives\nimport emolga.basic.optimizers as optimizers\n\nfrom theano.compile.nanguardmode import NanGuardMode\nfrom emolga.utils.generic_utils import visualize_\nfrom emolga.layers.core import Dropout, Dense, Dense2, Identity\nfrom emolga.layers.recurrent import *\nfrom emolga.layers.ntm_minibatch import Controller\nfrom emolga.layers.embeddings import *\nfrom emolga.layers.attention import *\nfrom core import Model\n\nlogger = logging.getLogger(__name__)\nRNN    = GRU             # change it here for other RNN models.\nerr    = 1e-9\n\n\nclass Encoder(Model):\n    \"\"\"\n    Recurrent Neural Network-based Encoder\n    It is used to compute the context vector.\n    \"\"\"\n\n    def __init__(self,\n                 config, rng, prefix='enc',\n                 mode='Evaluation', embed=None, use_context=False):\n        super(Encoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n        self.mode = mode\n        self.name = prefix\n        self.use_context = use_context\n\n        self.return_embed = False\n        self.return_sequence = False\n\n        \"\"\"\n        Create all elements of the Encoder's Computational graph\n        \"\"\"\n        # create Embedding layers\n        logger.info(\"{}_create embedding layers.\".format(self.prefix))\n        if embed:\n            self.Embed = embed\n        else:\n            self.Embed = Embedding(\n                self.config['enc_voc_size'],\n                self.config['enc_embedd_dim'],\n                name=\"{}_embed\".format(self.prefix))\n            self._add(self.Embed)\n\n        if self.use_context:\n            self.Initializer = Dense(\n                config['enc_contxt_dim'],\n                config['enc_hidden_dim'],\n                activation='tanh',\n                name=\"{}_init\".format(self.prefix)\n            )\n            self._add(self.Initializer)\n\n        \"\"\"\n        Encoder Core\n        \"\"\"\n        # create RNN cells\n        if not self.config['bidirectional']:\n            logger.info(\"{}_create RNN cells.\".format(self.prefix))\n            self.RNN = RNN(\n                self.config['enc_embedd_dim'],\n                self.config['enc_hidden_dim'],\n                None if not use_context\n                else self.config['enc_contxt_dim'],\n                name=\"{}_cell\".format(self.prefix)\n            )\n            self._add(self.RNN)\n        else:\n            logger.info(\"{}_create forward RNN cells.\".format(self.prefix))\n            self.forwardRNN = RNN(\n                self.config['enc_embedd_dim'],\n                self.config['enc_hidden_dim'],\n                None if not use_context\n                else self.config['enc_contxt_dim'],\n                name=\"{}_fw_cell\".format(self.prefix)\n            )\n            self._add(self.forwardRNN)\n\n            logger.info(\"{}_create backward RNN cells.\".format(self.prefix))\n            self.backwardRNN = RNN(\n                self.config['enc_embedd_dim'],\n                self.config['enc_hidden_dim'],\n                None if not use_context\n                else self.config['enc_contxt_dim'],\n                name=\"{}_bw_cell\".format(self.prefix)\n            )\n            self._add(self.backwardRNN)\n\n        logger.info(\"create encoder ok.\")\n\n    def build_encoder(self, source, context=None, return_embed=False,\n                      return_sequence=False,\n                      return_gates=False,\n                      clean_mask=False):\n        \"\"\"\n        Build the Encoder Computational Graph\n        \"\"\"\n        # clean_mask means we set the hidden states of masked places as 0.\n        # sometimes it will help the program to solve something\n        # note that this option only works when return_sequence.\n        # we recommend to leave at least one mask in the end of encoded sequence.\n\n        # Initial state\n        Init_h = None\n        if self.use_context:\n            Init_h = self.Initializer(context)\n\n        # word embedding\n        if not self.config['bidirectional']:\n            X, X_mask = self.Embed(source, True)\n            if return_gates:\n                X_out, Z, R = self.RNN(X, X_mask, C=context, init_h=Init_h,\n                                       return_sequence=return_sequence,\n                                       return_gates=True)\n            else:\n                X_out     = self.RNN(X, X_mask, C=context, init_h=Init_h,\n                                     return_sequence=return_sequence,\n                                     return_gates=False)\n            if return_sequence:\n                X_tail    = X_out[:, -1]\n\n                if clean_mask:\n                    X_out     = X_out * X_mask[:, :, None]\n            else:\n                X_tail    = X_out\n        else:\n            source2 = source[:, ::-1]\n            X,  X_mask = self.Embed(source, True)\n            X2, X2_mask = self.Embed(source2, True)\n\n            if not return_gates:\n                X_out1 = self.backwardRNN(X, X_mask,  C=context, init_h=Init_h, return_sequence=return_sequence)\n                X_out2 = self.forwardRNN(X2, X2_mask, C=context, init_h=Init_h, return_sequence=return_sequence)\n            else:\n                X_out1, Z1, R1  = self.backwardRNN(X, X_mask,  C=context, init_h=Init_h,\n                                                   return_sequence=return_sequence,\n                                                   return_gates=True)\n                X_out2, Z2, R2  = self.forwardRNN(X2, X2_mask, C=context, init_h=Init_h,\n                                                  return_sequence=return_sequence,\n                                                  return_gates=True)\n                Z = T.concatenate([Z1, Z2[:, ::-1, :]], axis=2)\n                R = T.concatenate([R1, R2[:, ::-1, :]], axis=2)\n\n            if not return_sequence:\n                X_out  = T.concatenate([X_out1, X_out2], axis=1)\n                X_tail = X_out\n            else:\n                X_out  = T.concatenate([X_out1, X_out2[:, ::-1, :]], axis=2)\n                X_tail = T.concatenate([X_out1[:, -1], X_out2[:, -1]], axis=1)\n\n                if clean_mask:\n                    X_out     = X_out * X_mask[:, :, None]\n\n        X_mask  = T.cast(X_mask, dtype='float32')\n        if not return_gates:\n            if return_embed:\n                return X_out, X, X_mask, X_tail\n            return X_out\n        else:\n            if return_embed:\n                return X_out, X, X_mask, X_tail, Z, R\n            return X_out, Z, R\n\n    def compile_encoder(self, with_context=False, return_embed=False, return_sequence=False):\n        source  = T.imatrix()\n        self.return_embed = return_embed\n        self.return_sequence = return_sequence\n        if with_context:\n            context = T.matrix()\n\n            self.encode = theano.function([source, context],\n                                          self.build_encoder(source, context,\n                                                             return_embed=return_embed,\n                                                             return_sequence=return_sequence))\n            self.gtenc  = theano.function([source, context],\n                                          self.build_encoder(source, context,\n                                                             return_embed=return_embed,\n                                                             return_sequence=return_sequence,\n                                                             return_gates=True))\n        else:\n            self.encode = theano.function([source],\n                                          self.build_encoder(source, None,\n                                                             return_embed=return_embed,\n                                                             return_sequence=return_sequence))\n            self.gtenc  = theano.function([source],\n                                          self.build_encoder(source, None,\n                                                             return_embed=return_embed,\n                                                             return_sequence=return_sequence,\n                                                             return_gates=True))\n\n\nclass Decoder(Model):\n    \"\"\"\n    Recurrent Neural Network-based Decoder.\n    It is used for:\n        (1) Evaluation: compute the probability P(Y|X)\n        (2) Prediction: sample the best result based on P(Y|X)\n        (3) Beam-search\n        (4) Scheduled Sampling (how to implement it?)\n    \"\"\"\n\n    def __init__(self,\n                 config, rng, prefix='dec',\n                 mode='RNN', embed=None,\n                 highway=False):\n        \"\"\"\n        mode = RNN: use a RNN Decoder\n        \"\"\"\n        super(Decoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n        self.name = prefix\n        self.mode = mode\n\n        self.highway = highway\n        self.init = initializations.get('glorot_uniform')\n        self.sigmoid = activations.get('sigmoid')\n\n        # use standard drop-out for input & output.\n        # I believe it should not use for context vector.\n        self.dropout = config['dropout']\n        if self.dropout > 0:\n            logger.info('Use standard-dropout!!!!')\n            self.D   = Dropout(rng=self.rng, p=self.dropout, name='{}_Dropout'.format(prefix))\n\n        \"\"\"\n        Create all elements of the Decoder's computational graph.\n        \"\"\"\n        # create Embedding layers\n        logger.info(\"{}_create embedding layers.\".format(self.prefix))\n        if embed:\n            self.Embed = embed\n        else:\n            self.Embed = Embedding(\n                self.config['dec_voc_size'],\n                self.config['dec_embedd_dim'],\n                name=\"{}_embed\".format(self.prefix))\n            self._add(self.Embed)\n\n        # create Initialization Layers\n        logger.info(\"{}_create initialization layers.\".format(self.prefix))\n        if not config['bias_code']:\n            self.Initializer = Zero()\n        else:\n            self.Initializer = Dense(\n                config['dec_contxt_dim'],\n                config['dec_hidden_dim'],\n                activation='tanh',\n                name=\"{}_init\".format(self.prefix)\n            )\n\n        # create RNN cells\n        logger.info(\"{}_create RNN cells.\".format(self.prefix))\n        if 'location_embed' in self.config:\n            if config['location_embed']:\n                dec_embedd_dim = 2 * self.config['dec_embedd_dim']\n            else:\n                dec_embedd_dim = self.config['dec_embedd_dim']\n        else:\n            dec_embedd_dim = self.config['dec_embedd_dim']\n\n        self.RNN = RNN(\n            dec_embedd_dim,\n            self.config['dec_hidden_dim'],\n            self.config['dec_contxt_dim'],\n            name=\"{}_cell\".format(self.prefix)\n        )\n\n        self._add(self.Initializer)\n        self._add(self.RNN)\n\n        # HighWay Gating\n        if highway:\n            logger.info(\"HIGHWAY CONNECTION~~~!!!\")\n            assert self.config['context_predict']\n            assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']\n\n            self.C_x = self.init((self.config['dec_contxt_dim'],\n                                  self.config['dec_hidden_dim']))\n            self.H_x = self.init((self.config['dec_hidden_dim'],\n                                  self.config['dec_hidden_dim']))\n            self.b_x = initializations.get('zero')(self.config['dec_hidden_dim'])\n\n            self.C_x.name = '{}_Cx'.format(self.prefix)\n            self.H_x.name = '{}_Hx'.format(self.prefix)\n            self.b_x.name = '{}_bx'.format(self.prefix)\n            self.params += [self.C_x, self.H_x, self.b_x]\n\n        # create readout layers\n        logger.info(\"_create Readout layers\")\n\n        # 1. hidden layers readout.\n        self.hidden_readout = Dense(\n            self.config['dec_hidden_dim'],\n            self.config['output_dim']\n            if self.config['deep_out']\n            else self.config['dec_voc_size'],\n            activation='linear',\n            name=\"{}_hidden_readout\".format(self.prefix)\n        )\n\n        # 2. previous word readout\n        self.prev_word_readout = None\n        if self.config['bigram_predict']:\n            self.prev_word_readout = Dense(\n                dec_embedd_dim,\n                self.config['output_dim']\n                if self.config['deep_out']\n                else self.config['dec_voc_size'],\n                activation='linear',\n                name=\"{}_prev_word_readout\".format(self.prefix),\n                learn_bias=False\n            )\n\n        # 3. context readout\n        self.context_readout = None\n        if self.config['context_predict']:\n            if not self.config['leaky_predict']:\n                self.context_readout = Dense(\n                    self.config['dec_contxt_dim'],\n                    self.config['output_dim']\n                    if self.config['deep_out']\n                    else self.config['dec_voc_size'],\n                    activation='linear',\n                    name=\"{}_context_readout\".format(self.prefix),\n                    learn_bias=False\n                )\n            else:\n                assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']\n                self.context_readout = self.hidden_readout\n\n        # option: deep output (maxout)\n        if self.config['deep_out']:\n            self.activ = Activation(config['deep_out_activ'])\n            # self.dropout = Dropout(rng=self.rng, p=config['dropout'])\n            self.output_nonlinear = [self.activ]  # , self.dropout]\n            self.output = Dense(\n                self.config['output_dim'] / 2\n                if config['deep_out_activ'] == 'maxout2'\n                else self.config['output_dim'],\n\n                self.config['dec_voc_size'],\n                activation='softmax',\n                name=\"{}_output\".format(self.prefix),\n                learn_bias=False\n            )\n        else:\n            self.output_nonlinear = []\n            self.output = Activation('softmax')\n\n        # registration:\n        self._add(self.hidden_readout)\n\n        if not self.config['leaky_predict']:\n            self._add(self.context_readout)\n\n        self._add(self.prev_word_readout)\n        self._add(self.output)\n\n        if self.config['deep_out']:\n            self._add(self.activ)\n        # self._add(self.dropout)\n\n        logger.info(\"create decoder ok.\")\n\n    @staticmethod\n    def _grab_prob(probs, X, block_unk=False):\n        assert probs.ndim == 3\n\n        batch_size = probs.shape[0]\n        max_len = probs.shape[1]\n        vocab_size = probs.shape[2]\n\n        probs = probs.reshape((batch_size * max_len, vocab_size))\n        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing\n\n    \"\"\"\n    Build the decoder for evaluation\n    \"\"\"\n    def prepare_xy(self, target):\n        # Word embedding\n        Y, Y_mask = self.Embed(target, True)  # (nb_samples, max_len, embedding_dim)\n\n        if self.config['use_input']:\n            X = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)\n        else:\n            X = 0 * Y\n\n        # option ## drop words.\n\n        X_mask    = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)\n        Count     = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)\n        return X, X_mask, Y, Y_mask, Count\n\n    def build_decoder(self, target, context=None,\n                      return_count=False,\n                      train=True):\n\n        \"\"\"\n        Build the Decoder Computational Graph\n        For training/testing\n        \"\"\"\n        X, X_mask, Y, Y_mask, Count = self.prepare_xy(target)\n\n        # input drop-out if any.\n        if self.dropout > 0:\n            X = self.D(X, train=train)\n\n        # Initial state of RNN\n        Init_h = self.Initializer(context)\n        if not self.highway:\n            X_out  = self.RNN(X, X_mask, C=context, init_h=Init_h, return_sequence=True)\n\n            # Readout\n            readout = self.hidden_readout(X_out)\n            if self.dropout > 0:\n                readout = self.D(readout, train=train)\n\n            if self.config['context_predict']:\n                readout += self.context_readout(context).dimshuffle(0, 'x', 1)\n        else:\n            X      = X.dimshuffle((1, 0, 2))\n            X_mask = X_mask.dimshuffle((1, 0))\n\n            def _recurrence(x, x_mask, prev_h, c):\n                # compute the highway gate for context vector.\n                xx    = dot(c, self.C_x, self.b_x) + dot(prev_h, self.H_x)  # highway gate.\n                xx    = self.sigmoid(xx)\n\n                cy    = xx * c   # the path without using RNN\n                x_out = self.RNN(x, mask=x_mask, C=c, init_h=prev_h, one_step=True)\n                hx    = (1 - xx) * x_out\n                return x_out, hx, cy\n\n            outputs, _ = theano.scan(\n                _recurrence,\n                sequences=[X, X_mask],\n                outputs_info=[Init_h, None, None],\n                non_sequences=[context]\n            )\n\n            # hidden readout + context readout\n            readout   = self.hidden_readout( outputs[1].dimshuffle((1, 0, 2)))\n            if self.dropout > 0:\n                readout = self.D(readout, train=train)\n\n            readout  += self.context_readout(outputs[2].dimshuffle((1, 0, 2)))\n\n            # return to normal size.\n            X      = X.dimshuffle((1, 0, 2))\n            X_mask = X_mask.dimshuffle((1, 0))\n\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        prob_dist = self.output(readout)  # (nb_samples, max_len, vocab_size)\n        # log_old  = T.sum(T.log(self._grab_prob(prob_dist, target)), axis=1)\n        log_prob = T.sum(T.log(self._grab_prob(prob_dist, target) + err) * X_mask, axis=1)\n        log_ppl  = log_prob / Count\n\n        if return_count:\n            return log_prob, Count\n        else:\n            return log_prob, log_ppl\n\n    \"\"\"\n    Sample one step\n    \"\"\"\n\n    def _step_sample(self, prev_word, prev_stat, context):\n        # word embedding (note that for the first word, embedding should be all zero)\n        if self.config['use_input']:\n            X = T.switch(\n                prev_word[:, None] < 0,\n                alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim']),\n                self.Embed(prev_word)\n            )\n        else:\n            X = alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim'])\n\n        if self.dropout > 0:\n            X = self.D(X, train=False)\n\n        # apply one step of RNN\n        if not self.highway:\n            X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)\n            next_stat = X_proj\n\n            # compute the readout probability distribution and sample it\n            # here the readout is a matrix, different from the learner.\n            readout = self.hidden_readout(next_stat)\n            if self.dropout > 0:\n                readout = self.D(readout, train=False)\n\n            if self.config['context_predict']:\n                readout += self.context_readout(context)\n        else:\n            xx     = dot(context, self.C_x, self.b_x) + dot(prev_stat, self.H_x)  # highway gate.\n            xx     = self.sigmoid(xx)\n\n            X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)\n            next_stat = X_proj\n\n            readout  = self.hidden_readout((1 - xx) * X_proj)\n            if self.dropout > 0:\n                readout = self.D(readout, train=False)\n\n            readout += self.context_readout(xx * context)\n\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        next_prob = self.output(readout)\n        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)\n        return next_prob, next_sample, next_stat\n\n    \"\"\"\n    Build the sampler for sampling/greedy search/beam search\n    \"\"\"\n\n    def build_sampler(self):\n        \"\"\"\n        Build a sampler which only steps once.\n        Typically it only works for one word a time?\n        \"\"\"\n        logger.info(\"build sampler ...\")\n        if self.config['sample_stoch'] and self.config['sample_argmax']:\n            logger.info(\"use argmax search!\")\n        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):\n            logger.info(\"use stochastic sampling!\")\n        elif self.config['sample_beam'] > 1:\n            logger.info(\"use beam search! (beam_size={})\".format(self.config['sample_beam']))\n\n        # initial state of our Decoder.\n        context = T.matrix()  # theano variable.\n\n        init_h = self.Initializer(context)\n        logger.info('compile the function: get_init_state')\n        self.get_init_state \\\n            = theano.function([context], init_h, name='get_init_state')\n        logger.info('done.')\n\n        # word sampler: 1 x 1\n        prev_word = T.vector('prev_word', dtype='int64')\n        prev_stat = T.matrix('prev_state', dtype='float32')\n        next_prob, next_sample, next_stat \\\n            = self._step_sample(prev_word, prev_stat, context)\n\n        # next word probability\n        logger.info('compile the function: sample_next')\n        inputs = [prev_word, prev_stat, context]\n        outputs = [next_prob, next_sample, next_stat]\n\n        self.sample_next = theano.function(inputs, outputs, name='sample_next')\n        logger.info('done')\n        pass\n\n    \"\"\"\n    Build a Stochastic Sampler which can use SCAN to work on GPU.\n    However it cannot be used in Beam-search.\n    \"\"\"\n\n    def build_stochastic_sampler(self):\n        context = T.matrix()\n        init_h = self.Initializer(context)\n\n        logger.info('compile the function: sample')\n        pass\n\n    \"\"\"\n    Generate samples, either with stochastic sampling or beam-search!\n    \"\"\"\n\n    def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):\n        # beam size\n        if k > 1:\n            assert not stochastic, 'Beam search does not support stochastic sampling!!'\n\n        # fix length cannot use beam search\n        # if fixlen:\n        #     assert k == 1\n\n        # prepare for searching\n        sample = []\n        score = []\n        if stochastic:\n            score = 0\n\n        live_k = 1\n        dead_k = 0\n\n        hyp_samples = [[]] * live_k\n        hyp_scores = np.zeros(live_k).astype(theano.config.floatX)\n        hyp_states = []\n\n        # get initial state of decoder RNN with context\n        next_state = self.get_init_state(context)\n        next_word = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)\n\n        # Start searching!\n        for ii in xrange(maxlen):\n            # print next_word\n            ctx = np.tile(context, [live_k, 1])\n            next_prob, next_word, next_state \\\n                = self.sample_next(next_word, next_state, ctx)  # wtf.\n\n            if stochastic:\n                # using stochastic sampling (or greedy sampling.)\n                if argmax:\n                    nw = next_prob[0].argmax()\n                    next_word[0] = nw\n                else:\n                    nw = next_word[0]\n\n                sample.append(nw)\n                score += next_prob[0, nw]\n\n                if (not fixlen) and (nw == 0):  # sample reached the end\n                    break\n\n            else:\n                # using beam-search\n                # we can only computed in a flatten way!\n                cand_scores = hyp_scores[:, None] - np.log(next_prob)\n                cand_flat = cand_scores.flatten()\n                ranks_flat = cand_flat.argsort()[:(k - dead_k)]\n\n                # fetch the best results.\n                voc_size = next_prob.shape[1]\n                trans_index = ranks_flat / voc_size\n                word_index = ranks_flat % voc_size\n                costs = cand_flat[ranks_flat]\n\n                # get the new hyp samples\n                new_hyp_samples = []\n                new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)\n                new_hyp_states = []\n\n                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):\n                    new_hyp_samples.append(hyp_samples[ti] + [wi])\n                    new_hyp_scores[idx] = copy.copy(costs[idx])\n                    new_hyp_states.append(copy.copy(next_state[ti]))\n\n                # check the finished samples\n                new_live_k = 0\n                hyp_samples = []\n                hyp_scores = []\n                hyp_states = []\n\n                for idx in xrange(len(new_hyp_samples)):\n                    if (new_hyp_states[idx][-1] == 0) and (not fixlen):\n                        sample.append(new_hyp_samples[idx])\n                        score.append(new_hyp_scores[idx])\n                        dead_k += 1\n                    else:\n                        new_live_k += 1\n                        hyp_samples.append(new_hyp_samples[idx])\n                        hyp_scores.append(new_hyp_scores[idx])\n                        hyp_states.append(new_hyp_states[idx])\n\n                hyp_scores = np.array(hyp_scores)\n                live_k = new_live_k\n\n                if new_live_k < 1:\n                    break\n                if dead_k >= k:\n                    break\n\n                next_word = np.array([w[-1] for w in hyp_samples])\n                next_state = np.array(hyp_states)\n                pass\n            pass\n\n        # end.\n        if not stochastic:\n            # dump every remaining one\n            if live_k > 0:\n                for idx in xrange(live_k):\n                    sample.append(hyp_samples[idx])\n                    score.append(hyp_scores[idx])\n\n        return sample, score\n\n\nclass DecoderAtt(Decoder):\n    \"\"\"\n    Recurrent Neural Network-based Decoder [for CopyNet-b Only]\n    with Attention Mechanism\n    \"\"\"\n    def __init__(self,\n                 config, rng, prefix='dec',\n                 mode='RNN', embed=None,\n                 copynet=False, identity=False):\n        super(DecoderAtt, self).__init__(\n                config, rng, prefix,\n                 mode, embed, False)\n        self.init     = initializations.get('glorot_uniform')\n        self.copynet  = copynet\n        self.identity = identity\n        # attention reader\n        self.attention_reader = Attention(\n            self.config['dec_hidden_dim'],\n            self.config['dec_contxt_dim'],\n            1000,\n            name='source_attention',\n            coverage=self.config['coverage']\n        )\n        self._add(self.attention_reader)\n\n        # if use copynet\n        if self.copynet:\n            if not self.identity:\n                self.Is = Dense(\n                    self.config['dec_contxt_dim'],\n                    self.config['dec_embedd_dim'],\n                    name='in-trans'\n                )\n            else:\n                assert self.config['dec_contxt_dim'] == self.config['dec_embedd_dim']\n                self.Is = Identity(name='ini')\n\n            self.Os = Dense(\n                self.config['dec_readout_dim']\n                if not self.config['location_embed']\n                    else self.config['dec_readout_dim'] + self.config['dec_embedd_dim'],\n                self.config['dec_contxt_dim'],\n                name='out-trans'\n            )\n\n            if self.config['copygate']:\n                self.Gs = Dense(\n                    self.config['dec_readout_dim'] + self.config['dec_embedd_dim'],\n                    1,\n                    name='copy-gate',\n                    activation='linear',\n                    learn_bias=True,\n                    negative_bias=True\n                )\n                self._add(self.Gs)\n\n            if self.config['location_embed']:\n                self._add(self.Is)\n            self._add(self.Os)\n\n        logger.info('adjust decoder ok.')\n\n    \"\"\"\n    Build the decoder for evaluation\n    \"\"\"\n    def prepare_xy(self, target, cc_matrix):\n        # target:      (nb_samples, index_seq)\n        # cc_matrix:   (nb_samples, maxlen_t, maxlen_s)\n        # context:     (nb_samples)\n        Y,  Y_mask  = self.Embed(target, True)  # (nb_samples, maxlen_t, embedding_dim)\n        X           = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)\n\n        # LL          = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, cc_matrix.shape[2]),\n        #                              cc_matrix[:, :-1, :]], axis=1)\n        LL = cc_matrix\n\n        XL_mask     = T.cast(T.gt(T.sum(LL, axis=2), 0), dtype='float32')\n        if not self.config['use_input']:\n            X *= 0\n\n        X_mask    = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)\n        Count     = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)\n        return X, X_mask, LL, XL_mask, Y_mask, Count\n\n    \"\"\"\n    The most different part. Be caution !!\n    Very different from traditional RNN search.\n    \"\"\"\n    def build_decoder(self,\n                      target,\n                      cc_matrix,\n                      context,\n                      c_mask,\n                      return_count=False,\n                      train=True):\n        \"\"\"\n        Build the Computational Graph ::> Context is essential\n        \"\"\"\n        assert c_mask is not None, 'context must be supplied for this decoder.'\n        assert context.ndim == 3, 'context must have 3 dimentions.'\n        # context: (nb_samples, max_len, contxt_dim)\n        context_A = self.Is(context)  # (nb_samples, max_len, embed_dim)\n        X, X_mask, LL, XL_mask, Y_mask, Count = self.prepare_xy(target, cc_matrix)\n\n        # input drop-out if any.\n        if self.dropout > 0:\n            X     = self.D(X, train=train)\n\n        # Initial state of RNN\n        Init_h   = self.Initializer(context[:, 0, :])  # default order ->\n        Init_a   = T.zeros((context.shape[0], context.shape[1]), dtype='float32')\n        coverage = T.zeros((context.shape[0], context.shape[1]), dtype='float32')\n\n        X        = X.dimshuffle((1, 0, 2))\n        X_mask   = X_mask.dimshuffle((1, 0))\n        LL       = LL.dimshuffle((1, 0, 2))            # (maxlen_t, nb_samples, maxlen_s)\n        XL_mask  = XL_mask.dimshuffle((1, 0))          # (maxlen_t, nb_samples)\n\n        def _recurrence(x, x_mask, ll, xl_mask, prev_h, prev_a, cov, cc, cm, ca):\n            \"\"\"\n            x:      (nb_samples, embed_dims)\n            x_mask: (nb_samples, )\n            ll:     (nb_samples, maxlen_s)\n            xl_mask:(nb_samples, )\n            -----------------------------------------\n            prev_h: (nb_samples, hidden_dims)\n            prev_a: (nb_samples, maxlen_s)\n            cov:    (nb_samples, maxlen_s)  *** coverage ***\n            -----------------------------------------\n            cc:     (nb_samples, maxlen_s, cxt_dim)\n            cm:     (nb_samples, maxlen_s)\n            ca:     (nb_samples, maxlen_s, ebd_dim)\n            \"\"\"\n            # compute the attention and get the context vector\n            prob  = self.attention_reader(prev_h, cc, Smask=cm, Cov=cov)\n            ncov  = cov + prob\n\n            cxt   = T.sum(cc * prob[:, :, None], axis=1)\n\n            # compute input word embedding (mixed)\n            x_in  = T.concatenate([x, T.sum(ca * prev_a[:, :, None], axis=1)], axis=-1)\n\n            # compute the current hidden states of the RNN.\n            x_out = self.RNN(x_in, mask=x_mask, C=cxt, init_h=prev_h, one_step=True)\n\n            # compute the current readout vector.\n            r_in  = [x_out]\n            if self.config['context_predict']:\n                r_in  += [cxt]\n            if self.config['bigram_predict']:\n                r_in  += [x_in]\n\n            # copynet decoding\n            r_in    = T.concatenate(r_in, axis=-1)\n            r_out = self.hidden_readout(x_out)  # (nb_samples, voc_size)\n            if self.config['context_predict']:\n                r_out += self.context_readout(cxt)\n            if self.config['bigram_predict']:\n                r_out += self.prev_word_readout(x_in)\n\n            for l in self.output_nonlinear:\n                r_out = l(r_out)\n\n            key     = self.Os(r_in)  # (nb_samples, cxt_dim) :: key\n            Eng     = T.sum(key[:, None, :] * cc, axis=-1)\n\n            # # gating\n            if self.config['copygate']:\n                gt     = self.sigmoid(self.Gs(r_in))  # (nb_samples, 1)\n                r_out += T.log(gt.flatten()[:, None])\n                Eng   += T.log(1 - gt.flatten()[:, None])\n\n                # r_out *= gt.flatten()[:, None]\n                # Eng   *= 1 - gt.flatten()[:, None]\n\n            EngSum  = logSumExp(Eng, axis=-1, mask=cm, c=r_out)\n\n            next_p  = T.concatenate([T.exp(r_out - EngSum), T.exp(Eng - EngSum) * cm], axis=-1)\n            next_c  = next_p[:, self.config['dec_voc_size']:] * ll           # (nb_samples, maxlen_s)\n            next_b  = next_p[:, :self.config['dec_voc_size']]\n            sum_a   = T.sum(next_c, axis=1, keepdims=True)                   # (nb_samples,)\n            next_a  = (next_c / (sum_a + err)) * xl_mask[:, None]            # numerically consideration\n            return x_out, next_a, ncov, sum_a, next_b\n\n        outputs, _ = theano.scan(\n            _recurrence,\n            sequences=[X, X_mask, LL, XL_mask],\n            outputs_info=[Init_h, Init_a, coverage, None, None],\n            non_sequences=[context, c_mask, context_A]\n        )\n        X_out, source_prob, coverages, source_sum, prob_dist = [z.dimshuffle((1, 0, 2)) for z in outputs]\n        X        = X.dimshuffle((1, 0, 2))\n        X_mask   = X_mask.dimshuffle((1, 0))\n        XL_mask  = XL_mask.dimshuffle((1, 0))\n\n        # unk masking\n        U_mask   = T.ones_like(target) * (1 - T.eq(target, 1))\n        U_mask  += (1 - U_mask) * (1 - XL_mask)\n\n        # The most different part is here !!\n        log_prob = T.sum(T.log(\n                self._grab_prob(prob_dist, target) * U_mask +\n                source_sum.sum(axis=-1) + err\n        ) * X_mask, axis=1)\n        log_ppl  = log_prob / (Count + err)\n\n        if return_count:\n            return log_prob, Count\n        else:\n            return log_prob, log_ppl\n\n    \"\"\"\n    Sample one step\n    \"\"\"\n\n    def _step_sample(self,\n                     prev_word,\n                     prev_stat,\n                     prev_loc,\n                     prev_cov,\n                     context,\n                     c_mask,\n                     context_A):\n\n        assert c_mask is not None, 'we need the source mask.'\n        # word embedding (note that for the first word, embedding should be all zero)\n        X = T.switch(\n            prev_word[:, None] < 0,\n            alloc_zeros_matrix(prev_word.shape[0], 2 * self.config['dec_embedd_dim']),\n            T.concatenate([self.Embed(prev_word),\n                           T.sum(context_A * prev_loc[:, :, None], axis=1)\n                           ], axis=-1)\n        )\n\n        if self.dropout > 0:\n            X = self.D(X, train=False)\n\n        # apply one step of RNN\n        Probs  = self.attention_reader(prev_stat, context, c_mask, Cov=prev_cov)\n        ncov   = prev_cov + Probs\n\n        cxt    = T.sum(context * Probs[:, :, None], axis=1)\n\n        X_proj, zz, rr = self.RNN(X, C=cxt,\n                                  init_h=prev_stat,\n                                  one_step=True,\n                                  return_gates=True)\n        next_stat = X_proj\n\n        # compute the readout probability distribution and sample it\n        # here the readout is a matrix, different from the learner.\n        readin      = [next_stat]\n        if self.config['context_predict']:\n            readin += [cxt]\n        if self.config['bigram_predict']:\n            readin += [X]\n        readin      = T.concatenate(readin, axis=-1)\n\n        # if gating\n        # if self.config['copygate']:\n        #     gt      = self.sigmoid(self.Gs(readin))   # (nb_samples, dim)\n        #     readin *= 1 - gt\n        #     readout = self.hidden_readout(next_stat * gt[:, :self.config['dec_hidden_dim']])\n        #     if self.config['context_predict']:\n        #         readout += self.context_readout(\n        #                 cxt * gt[:, self.config['dec_hidden_dim']:\n        #                          self.config['dec_hidden_dim'] + self.config['dec_contxt_dim']])\n        #     if self.config['bigram_predict']:\n        #         readout += self.prev_word_readout(\n        #                 X * gt[:, -2 * self.config['dec_embedd_dim']:])\n        # else:\n        readout = self.hidden_readout(next_stat)\n        if self.config['context_predict']:\n            readout += self.context_readout(cxt)\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        key         = self.Os(readin)\n        Eng         = T.sum(key[:, None, :] * context, axis=-1)\n\n        # # gating\n        if self.config['copygate']:\n            gt       = self.sigmoid(self.Gs(readin))  # (nb_samples, 1)\n            readout += T.log(gt.flatten()[:, None])\n            Eng     += T.log(1 - gt.flatten()[:, None])\n\n        EngSum      = logSumExp(Eng, axis=-1, mask=c_mask, c=readout)\n\n        next_prob   = T.concatenate([T.exp(readout - EngSum), T.exp(Eng - EngSum) * c_mask], axis=-1)\n        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)\n        return next_prob, next_sample, next_stat, ncov, next_stat\n\n    def build_sampler(self):\n        \"\"\"\n        Build a sampler which only steps once.\n        Typically it only works for one word a time?\n        \"\"\"\n        logger.info(\"build sampler ...\")\n        if self.config['sample_stoch'] and self.config['sample_argmax']:\n            logger.info(\"use argmax search!\")\n        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):\n            logger.info(\"use stochastic sampling!\")\n        elif self.config['sample_beam'] > 1:\n            logger.info(\"use beam search! (beam_size={})\".format(self.config['sample_beam']))\n\n        # initial state of our Decoder.\n        context   = T.tensor3()  # theano variable.\n        c_mask    = T.matrix()   # mask of the input sentence.\n        context_A = self.Is(context)\n\n        init_h = self.Initializer(context[:, 0, :])\n        init_a = T.zeros((context.shape[0], context.shape[1]))\n        cov    = T.zeros((context.shape[0], context.shape[1]))\n\n        logger.info('compile the function: get_init_state')\n        self.get_init_state \\\n            = theano.function([context], [init_h, init_a, cov], name='get_init_state')\n        logger.info('done.')\n\n        # word sampler: 1 x 1\n        prev_word = T.vector('prev_word', dtype='int64')\n        prev_stat = T.matrix('prev_state', dtype='float32')\n        prev_a    = T.matrix('prev_a', dtype='float32')\n        prev_cov  = T.matrix('prev_cov', dtype='float32')\n\n        next_prob, next_sample, next_stat, ncov, alpha \\\n            = self._step_sample(prev_word,\n                                prev_stat,\n                                prev_a,\n                                prev_cov,\n                                context,\n                                c_mask,\n                                context_A)\n\n        # next word probability\n        logger.info('compile the function: sample_next')\n        inputs  = [prev_word, prev_stat, prev_a, prev_cov, context, c_mask]\n        outputs = [next_prob, next_sample, next_stat, ncov, alpha]\n        self.sample_next = theano.function(inputs, outputs, name='sample_next')\n        logger.info('done')\n\n    \"\"\"\n    Generate samples, either with stochastic sampling or beam-search!\n\n    [:-:] I have to think over how to modify the BEAM-Search!!\n    \"\"\"\n    def get_sample(self,\n                   context,\n                   c_mask,\n                   source,\n                   k=1, maxlen=30, stochastic=True,\n                   argmax=False, fixlen=False,\n                   return_attend=False\n                   ):\n        # beam size\n        if k > 1:\n            assert not stochastic, 'Beam search does not support stochastic sampling!!'\n\n        # fix length cannot use beam search\n        # if fixlen:\n        #     assert k == 1\n\n        # prepare for searching\n        Lmax   = self.config['dec_voc_size']\n        sample = []\n        ppp    = []\n        attend = []\n        score  = []\n\n        if stochastic:\n            score = 0\n\n        live_k = 1\n        dead_k = 0\n\n        hyp_samples = [[]] * live_k\n        hyp_scores  = np.zeros(live_k).astype(theano.config.floatX)\n        hyp_ppps    = [[]] * live_k\n        hyp_attends = [[]] * live_k\n\n        # get initial state of decoder RNN with context\n        next_state, ss_prob, coverage = self.get_init_state(context)\n        next_word = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)\n\n        # Start searching!\n        for ii in xrange(maxlen):\n            # print next_word\n            ctx    = np.tile(context, [live_k, 1, 1])\n            cmk    = np.tile(c_mask,  [live_k, 1])\n            sss    = np.tile(source,  [live_k, 1])\n\n            # # process word\n            def process_():\n                # caution for index_0: UNK\n                ll  = np.zeros((sss.shape[0], sss.shape[1]), dtype='float32')\n                for i in xrange(next_word.shape[0]):\n                    if next_word[i] >= Lmax:\n                        ll[i][next_word[i] - Lmax] = 1.\n                        next_word[i] = sss[i][next_word[i] - Lmax]\n                    else:\n                        ll[i] = (sss[i] == next_word[i, None])\n                        # for k in xrange(sss.shape[1]):\n                        #     ll[i][k] = (sss[i][k] == next_word[i])\n                return ll, next_word\n\n            # print next_word\n            ll, next_word = process_()\n            ll_mask = (np.sum(ll, axis=1, keepdims=True) > 0)\n\n            next_a  = ss_prob * ll\n            next_a  = next_a / (err + np.sum(next_a, axis=1, keepdims=True)) * ll_mask\n            next_prob0, next_word, next_state, coverage, alpha \\\n                = self.sample_next(next_word, next_state, next_a, coverage, ctx, cmk)\n            # print next_prob0.shape[1]\n            if not self.config['decode_unk']:\n                next_prob0[:, 1]          = 0.\n                next_prob0 /= np.sum(next_prob0, axis=1, keepdims=True)\n\n            def merge_():\n                # merge the probabilities\n                temple_prob  = copy.copy(next_prob0)\n                source_prob  = copy.copy(next_prob0[:, Lmax:])\n                for i in xrange(next_prob0.shape[0]):\n                    for j in xrange(sss.shape[1]):\n                        if (sss[i, j] < Lmax) and (sss[i, j] != 1):\n                            temple_prob[i, sss[i, j]] += source_prob[i, j]\n                            temple_prob[i, Lmax + j]   = 0.\n\n                return temple_prob, source_prob\n\n            next_prob, ss_prob   = merge_()\n            next_prob0[:, Lmax:] = 0.\n            # print '0', next_prob0[:, 3165]\n            # print '01', next_prob[:, 3165]\n            # # print next_prob[0, Lmax:]\n            # print ss_prob[0, :]\n\n            if stochastic:\n                # using stochastic sampling (or greedy sampling.)\n                if argmax:\n                    nw = next_prob[0].argmax()\n                    next_word[0] = nw\n                else:\n                    nw = self.rng.multinomial(pvals=next_prob).argmax(1)\n\n                sample.append(nw)\n                score += next_prob[0, nw]\n\n                if (not fixlen) and (nw == 0):  # sample reached the end\n                    break\n\n            else:\n                # using beam-search\n                # we can only computed in a flatten way!\n                cand_scores = hyp_scores[:, None] - np.log(next_prob)\n                cand_flat   = cand_scores.flatten()\n                ranks_flat  = cand_flat.argsort()[:(k - dead_k)]\n\n                # fetch the best results.\n                voc_size    = next_prob.shape[1]\n                trans_index = ranks_flat / voc_size\n                word_index  = ranks_flat % voc_size\n                costs       = cand_flat[ranks_flat]\n\n                # get the new hyp samples\n                new_hyp_samples  = []\n                new_hyp_ppps     = []\n                new_hyp_attends  = []\n                new_hyp_scores   = np.zeros(k - dead_k).astype(theano.config.floatX)\n                new_hyp_states   = []\n                new_hyp_coverage = []\n                new_hyp_ss       = []\n\n                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):\n                    new_hyp_samples.append(hyp_samples[ti] + [wi])\n                    new_hyp_scores[idx] = copy.copy(costs[idx])\n\n                    new_hyp_states.append(copy.copy(next_state[ti]))\n                    new_hyp_coverage.append(copy.copy(coverage[ti]))\n                    new_hyp_ss.append(copy.copy(ss_prob[ti]))\n\n                    if not return_attend:\n                        new_hyp_ppps.append(hyp_ppps[ti] + [[next_prob0[ti][wi], next_prob[ti][wi]]])\n                    else:\n                        new_hyp_ppps.append(hyp_ppps[ti] + [(ss_prob[ti], alpha[ti])])\n\n                # check the finished samples\n                new_live_k   = 0\n                hyp_samples  = []\n                hyp_scores   = []\n                hyp_states   = []\n                hyp_coverage = []\n                hyp_ppps     = []\n                hyp_ss       = []\n\n                for idx in xrange(len(new_hyp_samples)):\n                    if (new_hyp_states[idx][-1] == 0) and (not fixlen):\n                        sample.append(new_hyp_samples[idx])\n                        ppp.append(new_hyp_ppps[idx])\n                        score.append(new_hyp_scores[idx])\n                        dead_k += 1\n                    else:\n                        new_live_k += 1\n                        hyp_samples.append(new_hyp_samples[idx])\n                        hyp_ppps.append(new_hyp_ppps[idx])\n                        hyp_scores.append(new_hyp_scores[idx])\n                        hyp_states.append(new_hyp_states[idx])\n                        hyp_coverage.append(new_hyp_coverage[idx])\n                        hyp_ss.append(new_hyp_ss[idx])\n\n                hyp_scores = np.array(hyp_scores)\n                live_k = new_live_k\n\n                if new_live_k < 1:\n                    break\n                if dead_k >= k:\n                    break\n\n                next_word  = np.array([w[-1] for w in hyp_samples])\n                next_state = np.array(hyp_states)\n                coverage   = np.array(hyp_coverage)\n                ss_prob    = np.array(hyp_ss)\n                pass\n\n        # end.\n        if not stochastic:\n            # dump every remaining one\n            if live_k > 0:\n                for idx in xrange(live_k):\n                    sample.append(hyp_samples[idx])\n                    ppp.append(hyp_ppps[idx])\n                    score.append(hyp_scores[idx])\n\n        return sample, score, ppp\n\n\nclass FnnDecoder(Model):\n    def __init__(self, config, rng, prefix='fnndec'):\n        \"\"\"\n        mode = RNN: use a RNN Decoder\n        \"\"\"\n        super(FnnDecoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n        self.name = prefix\n\n        \"\"\"\n        Create Dense Predictor.\n        \"\"\"\n\n        self.Tr = Dense(self.config['dec_contxt_dim'],\n                             self.config['dec_hidden_dim'],\n                             activation='maxout2',\n                             name='{}_Tr'.format(prefix))\n        self._add(self.Tr)\n\n        self.Pr = Dense(self.config['dec_hidden_dim'] / 2,\n                             self.config['dec_voc_size'],\n                             activation='softmax',\n                             name='{}_Pr'.format(prefix))\n        self._add(self.Pr)\n        logger.info(\"FF decoder ok.\")\n\n    @staticmethod\n    def _grab_prob(probs, X):\n        assert probs.ndim == 3\n\n        batch_size = probs.shape[0]\n        max_len = probs.shape[1]\n        vocab_size = probs.shape[2]\n\n        probs = probs.reshape((batch_size * max_len, vocab_size))\n        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing\n\n    def build_decoder(self, target, context):\n        \"\"\"\n        Build the Decoder Computational Graph\n        \"\"\"\n        prob_dist = self.Pr(self.Tr(context[:, None, :]))\n        log_prob  = T.sum(T.log(self._grab_prob(prob_dist, target) + err), axis=1)\n        return log_prob\n\n    def build_sampler(self):\n        context   = T.matrix()\n        prob_dist = self.Pr(self.Tr(context))\n        next_sample = self.rng.multinomial(pvals=prob_dist).argmax(1)\n        self.sample_next = theano.function([context], [prob_dist, next_sample], name='sample_next_{}'.format(self.prefix))\n        logger.info('done')\n\n    def get_sample(self, context, argmax=True):\n\n        prob, sample = self.sample_next(context)\n        if argmax:\n            return prob[0].argmax()\n        else:\n            return sample[0]\n\n\n########################################################################################################################\n# Encoder-Decoder Models ::::\n#\nclass RNNLM(Model):\n    \"\"\"\n    RNN-LM, with context vector = 0.\n    It is very similar with the implementation of VAE.\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name   = 'rnnlm'\n\n    def build_(self):\n        logger.info(\"build the RNN-decoder\")\n        self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n\n        # registration:\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get('adadelta')\n\n        # saved the initial memories\n        if self.config['mode'] == 'NTM':\n            self.memory    = initializations.get('glorot_uniform')(\n                    (self.config['dec_memory_dim'], self.config['dec_memory_wdth']))\n\n        logger.info(\"create the RECURRENT language model. ok\")\n\n    def compile_(self, mode='train', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            if not contrastive:\n                self.compile_train()\n            else:\n                self.compile_train_CE()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    def compile_train(self):\n\n        # questions (theano variables)\n        inputs  = T.imatrix()  # padded input word sequence (for training)\n        if self.config['mode']   == 'RNN':\n            context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # decoding.\n        target  = inputs\n        logPxz, logPPL = self.decoder.build_decoder(target, context)\n\n        # reconstruction loss\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n\n        L1       = T.sum([T.sum(abs(w)) for w in self.params])\n        loss     = loss_rec\n\n        updates = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun')\n        logger.info(\"pre-training functions compile done.\")\n\n        # add monitoring:\n        self.monitor['context'] = context\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n\n    @abstractmethod\n    def compile_train_CE(self):\n        pass\n\n    def compile_sample(self):\n        # context vectors (as)\n        self.decoder.build_sampler()\n        logger.info(\"display functions compile done.\")\n\n    @abstractmethod\n    def compile_inference(self):\n        pass\n\n    def default_context(self):\n        if self.config['mode'] == 'RNN':\n            return np.zeros(shape=(1, self.config['dec_contxt_dim']), dtype=theano.config.floatX)\n        elif self.config['mode'] == 'NTM':\n            memory = self.memory.get_value()\n            memory = memory.reshape((1, memory.shape[0], memory.shape[1]))\n            return memory\n\n    def generate_(self, context=None, max_len=None, mode='display'):\n        \"\"\"\n        :param action: action vector to guide the question.\n                       If None, use a Gaussian to simulate the action.\n        :return: question sentence in natural language.\n        \"\"\"\n        # assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'\n        # assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'\n\n        if context is None:\n            context = self.default_context()\n\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'] if not max_len else max_len,\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None)\n\n        sample, score = self.decoder.get_sample(context, **args)\n        if not args['stochastic']:\n            score = score / np.array([len(s) for s in sample])\n            sample = sample[score.argmin()]\n            score = score.min()\n        else:\n            score /= float(len(sample))\n\n        return sample, np.exp(score)\n\n\nclass AutoEncoder(RNNLM):\n    \"\"\"\n    Regular Auto-Encoder: RNN Encoder/Decoder\n    \"\"\"\n\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name = 'vae'\n\n    def build_(self):\n        logger.info(\"build the RNN auto-encoder\")\n        self.encoder = Encoder(self.config, self.rng, prefix='enc')\n        if self.config['shared_embed']:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', embed=self.encoder.Embed)\n        else:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec')\n\n        \"\"\"\n        Build the Transformation\n        \"\"\"\n        if self.config['nonlinear_A']:\n            self.action_trans = Dense(\n                self.config['enc_hidden_dim'],\n                self.config['action_dim'],\n                activation='tanh',\n                name='action_transform'\n            )\n        else:\n            assert self.config['enc_hidden_dim'] == self.config['action_dim'], \\\n                    'hidden dimension must match action dimension'\n            self.action_trans = Identity(name='action_transform')\n\n        if self.config['nonlinear_B']:\n            self.context_trans = Dense(\n                self.config['action_dim'],\n                self.config['dec_contxt_dim'],\n                activation='tanh',\n                name='context_transform'\n            )\n        else:\n            assert self.config['dec_contxt_dim'] == self.config['action_dim'], \\\n                    'action dimension must match context dimension'\n            self.context_trans = Identity(name='context_transform')\n\n        # registration\n        self._add(self.action_trans)\n        self._add(self.context_trans)\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get(self.config['optimizer'], kwargs={'lr': self.config['lr']})\n\n        logger.info(\"create Helmholtz RECURRENT neural network. ok\")\n\n    def compile_train(self, mode='train'):\n        # questions (theano variables)\n        inputs  = T.imatrix()  # padded input word sequence (for training)\n        context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])\n        assert context.ndim == 2\n\n        # decoding.\n        target  = inputs\n        logPxz, logPPL = self.decoder.build_decoder(target, context)\n\n        # reconstruction loss\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n\n        L1       = T.sum([T.sum(abs(w)) for w in self.params])\n        loss     = loss_rec\n\n        updates = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun')\n        logger.info(\"pre-training functions compile done.\")\n\n        if mode == 'display' or mode == 'all':\n            \"\"\"\n            build the sampler function here <:::>\n            \"\"\"\n            # context vectors (as)\n            self.decoder.build_sampler()\n            logger.info(\"display functions compile done.\")\n\n        # add monitoring:\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n\n\nclass NRM(Model):\n    \"\"\"\n    Neural Responding Machine\n    A Encoder-Decoder based responding model.\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation',\n                 use_attention=False,\n                 copynet=False,\n                 identity=False):\n        super(NRM, self).__init__()\n\n        self.config   = config\n        self.n_rng    = n_rng  # numpy random stream\n        self.rng      = rng  # Theano random stream\n        self.mode     = mode\n        self.name     = 'nrm'\n        self.attend   = use_attention\n        self.copynet  = copynet\n        self.identity = identity\n\n    def build_(self, lr=None, iterations=None):\n        logger.info(\"build the Neural Responding Machine\")\n\n        # encoder-decoder:: <<==>>\n        self.encoder = Encoder(self.config, self.rng, prefix='enc', mode=self.mode)\n        if not self.attend:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n        else:\n            self.decoder = DecoderAtt(self.config, self.rng, prefix='dec', mode=self.mode,\n                                      copynet=self.copynet, identity=self.identity)\n\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        if self.config['optimizer'] == 'adam':\n            self.optimizer = optimizers.get(self.config['optimizer'],\n                                         kwargs=dict(rng=self.rng,\n                                                     save=False))\n        else:\n            self.optimizer = optimizers.get(self.config['optimizer'])\n        if lr is not None:\n            self.optimizer.lr.set_value(floatX(lr))\n            self.optimizer.iterations.set_value(floatX(iterations))\n        logger.info(\"build ok.\")\n\n    def compile_(self, mode='all', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            self.compile_train()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    def compile_train(self):\n\n        # questions (theano variables)\n        inputs    = T.imatrix()  # padded input word sequence (for training)\n        target    = T.imatrix()  # padded target word sequence (for training)\n        cc_matrix = T.tensor3()\n\n        # encoding & decoding\n\n        code, _, c_mask, _ = self.encoder.build_encoder(inputs, None, return_sequence=True, return_embed=True)\n        # code: (nb_samples, max_len, contxt_dim)\n        if 'explicit_loc' in self.config:\n            if self.config['explicit_loc']:\n                print 'use explicit location!!'\n                max_len = code.shape[1]\n                expLoc  = T.eye(max_len, self.config['encode_max_len'], dtype='float32')[None, :, :]\n                expLoc  = T.repeat(expLoc, code.shape[0], axis=0)\n                code    = T.concatenate([code, expLoc], axis=2)\n\n        logPxz, logPPL     = self.decoder.build_decoder(target, cc_matrix,\n                                                        code, c_mask)\n\n        # responding loss\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n        loss     = loss_rec\n\n        updates  = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs, target, cc_matrix]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun')\n        self.train_guard = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun',\n                                      mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))\n        logger.info(\"training functions compile done.\")\n\n        # # add monitoring:\n        # self.monitor['context'] = context\n        # self._monitoring()\n        #\n        # # compiling monitoring\n        # self.compile_monitoring(train_inputs)\n\n    def compile_sample(self):\n        if not self.attend:\n            self.encoder.compile_encoder(with_context=False)\n        else:\n            self.encoder.compile_encoder(with_context=False, return_sequence=True, return_embed=True)\n\n        self.decoder.build_sampler()\n        logger.info(\"sampling functions compile done.\")\n\n    def compile_inference(self):\n        pass\n\n    def generate_(self, inputs, mode='display', return_attend=False, return_all=False):\n        # assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'\n        # assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'\n\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'],\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None,\n                    return_attend=return_attend)\n        context, _, c_mask, _, Z, R = self.encoder.gtenc(inputs)\n        # c_mask[0, 3] = c_mask[0, 3] * 0\n        # L   = context.shape[1]\n        # izz = np.concatenate([np.arange(3), np.asarray([1,2]), np.arange(3, L)])\n        # context = context[:, izz, :]\n        # c_mask  = c_mask[:, izz]\n        # inputs  = inputs[:, izz]\n        # context, _, c_mask, _ = self.encoder.encode(inputs)\n        # import pylab as plt\n        # # visualize_(plt.subplots(), Z[0][:, 300:], normal=False)\n        # visualize_(plt.subplots(), context[0], normal=False)\n\n        if 'explicit_loc' in self.config:\n            if self.config['explicit_loc']:\n                max_len = context.shape[1]\n                expLoc  = np.eye(max_len, self.config['encode_max_len'], dtype='float32')[None, :, :]\n                expLoc  = np.repeat(expLoc, context.shape[0], axis=0)\n                context = np.concatenate([context, expLoc], axis=2)\n\n        sample, score, ppp    = self.decoder.get_sample(context, c_mask, inputs, **args)\n        if return_all:\n            return sample, score, ppp\n\n        if not args['stochastic']:\n            score  = score / np.array([len(s) for s in sample])\n            idz    = score.argmin()\n            sample = sample[idz]\n            score  = score.min()\n            ppp    = ppp[idz]\n        else:\n            score /= float(len(sample))\n\n        return sample, np.exp(score), ppp\n\n    def evaluate_(self, inputs, outputs, idx2word, inputs_unk=None, encode=True):\n        def cut_zero_yes(sample, idx2word, ppp=None, Lmax=None):\n            if Lmax is None:\n                Lmax = self.config['dec_voc_size']\n            if ppp is None:\n                if 0 not in sample:\n                    return ['{}'.format(idx2word[w].encode('utf-8'))\n                            if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                            for w in sample]\n\n                return ['{}'.format(idx2word[w].encode('utf-8'))\n                        if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                        for w in sample[:sample.index(0)]]\n            else:\n                if 0 not in sample:\n                    return ['{0} ({1:1.1f})'.format(\n                            idx2word[w].encode('utf-8'), p)\n                            if w < Lmax\n                            else '{0} ({1:1.1f})'.format(\n                            idx2word[inputs[w - Lmax]].encode('utf-8'), p)\n                            for w, p in zip(sample, ppp)]\n                idz = sample.index(0)\n                return ['{0} ({1:1.1f})'.format(\n                        idx2word[w].encode('utf-8'), p)\n                        if w < Lmax\n                        else '{0} ({1:1.1f})'.format(\n                        idx2word[inputs[w - Lmax]].encode('utf-8'), p)\n                        for w, p in zip(sample[:idz], ppp[:idz])]\n\n        def cut_zero_no(sample, idx2word, ppp=None, Lmax=None):\n            if Lmax is None:\n                Lmax = self.config['dec_voc_size']\n            if ppp is None:\n                if 0 not in sample:\n                    return ['{}'.format(idx2word[w])\n                            if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                            for w in sample]\n\n                return ['{}'.format(idx2word[w])\n                        if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                        for w in sample[:sample.index(0)]]\n            else:\n                if 0 not in sample:\n                    return ['{0} ({1:1.1f})'.format(\n                            idx2word[w], p)\n                            if w < Lmax\n                            else '{0} ({1:1.1f})'.format(\n                            idx2word[inputs[w - Lmax]], p)\n                            for w, p in zip(sample, ppp)]\n                idz = sample.index(0)\n                return ['{0} ({1:1.1f})'.format(\n                        idx2word[w].encode('utf-8'), p)\n                        if w < Lmax\n                        else '{0} ({1:1.1f})'.format(\n                        idx2word[inputs[w - Lmax]], p)\n                        for w, p in zip(sample[:idz], ppp[:idz])]\n\n        if inputs_unk is None:\n            result, _, ppp = self.generate_(inputs[None, :])\n        else:\n            result, _, ppp = self.generate_(inputs_unk[None, :])\n\n        if encode:\n            cut_zero = cut_zero_yes\n        else:\n            cut_zero = cut_zero_no\n        pp0, pp1 = [np.asarray(p) for p in zip(*ppp)]\n        pp = (pp1 - pp0) / pp1\n        # pp = (pp1 - pp0) / pp1\n        print len(ppp)\n\n        print '<Environment> [lr={0}][iter={1}]'.format(self.optimizer.lr.get_value(),\n                                                        self.optimizer.iterations.get_value())\n\n        a = '[SOURCE]: {}\\n'.format(' '.join(cut_zero(inputs.tolist(),  idx2word, Lmax=len(idx2word))))\n        b = '[TARGET]: {}\\n'.format(' '.join(cut_zero(outputs.tolist(), idx2word, Lmax=len(idx2word))))\n        c = '[DECODE]: {}\\n'.format(' '.join(cut_zero(result, idx2word)))\n        d = '[CpRate]: {}\\n'.format(' '.join(cut_zero(result, idx2word, pp.tolist())))\n        e = '[CpRate]: {}\\n'.format(' '.join(cut_zero(result, idx2word, result)))\n        print a\n        print '{0} -> {1}'.format(len(a.split()), len(b.split()))\n\n        if inputs_unk is not None:\n            k = '[_INPUT]: {}\\n'.format(' '.join(cut_zero(inputs_unk.tolist(),  idx2word, Lmax=len(idx2word))))\n            print k\n            a += k\n\n        print b\n        print c\n        print d\n        # print e\n        a += b + c + d\n        return a\n\n    def analyse_(self, inputs, outputs, idx2word, inputs_unk=None, return_attend=False, name=None, display=False):\n        def cut_zero(sample, idx2word, ppp=None, Lmax=None):\n            if Lmax is None:\n                Lmax = self.config['dec_voc_size']\n            if ppp is None:\n                if 0 not in sample:\n                    return ['{}'.format(idx2word[w].encode('utf-8'))\n                            if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                            for w in sample]\n\n                return ['{}'.format(idx2word[w].encode('utf-8'))\n                        if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                        for w in sample[:sample.index(0)]]\n            else:\n                if 0 not in sample:\n                    return ['{0} ({1:1.1f})'.format(\n                            idx2word[w].encode('utf-8'), p)\n                            if w < Lmax\n                            else '{0} ({1:1.1f})'.format(\n                            idx2word[inputs[w - Lmax]].encode('utf-8'), p)\n                            for w, p in zip(sample, ppp)]\n                idz = sample.index(0)\n                return ['{0} ({1:1.1f})'.format(\n                        idx2word[w].encode('utf-8'), p)\n                        if w < Lmax\n                        else '{0} ({1:1.1f})'.format(\n                        idx2word[inputs[w - Lmax]].encode('utf-8'), p)\n                        for w, p in zip(sample[:idz], ppp[:idz])]\n\n        if inputs_unk is None:\n            result, _, ppp = self.generate_(inputs[None, :],\n                                            return_attend=return_attend)\n        else:\n            result, _, ppp = self.generate_(inputs_unk[None, :],\n                                            return_attend=return_attend)\n\n        source = '{}'.format(' '.join(cut_zero(inputs.tolist(),  idx2word, Lmax=len(idx2word))))\n        target = '{}'.format(' '.join(cut_zero(outputs.tolist(), idx2word, Lmax=len(idx2word))))\n        decode = '{}'.format(' '.join(cut_zero(result, idx2word)))\n\n        if display:\n            print source\n            print target\n            print decode\n\n            idz    = result.index(0)\n            p1, p2 = [np.asarray(p) for p in zip(*ppp)]\n            print p1.shape\n            import pylab as plt\n            # plt.rc('text', usetex=True)\n            # plt.rc('font', family='serif')\n            visualize_(plt.subplots(), 1 - p1[:idz, :].T, grid=True, name=name)\n            visualize_(plt.subplots(), 1 - p2[:idz, :].T, name=name)\n\n            # visualize_(plt.subplots(), 1 - np.mean(p2[:idz, :], axis=1, keepdims=True).T)\n        return target == decode\n\n    def analyse_cover(self, inputs, outputs, idx2word, inputs_unk=None, return_attend=False, name=None, display=False):\n        def cut_zero(sample, idx2word, ppp=None, Lmax=None):\n            if Lmax is None:\n                Lmax = self.config['dec_voc_size']\n            if ppp is None:\n                if 0 not in sample:\n                    return ['{}'.format(idx2word[w].encode('utf-8'))\n                            if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                            for w in sample]\n\n                return ['{}'.format(idx2word[w].encode('utf-8'))\n                        if w < Lmax else '{}'.format(idx2word[inputs[w - Lmax]].encode('utf-8'))\n                        for w in sample[:sample.index(0)]]\n            else:\n                if 0 not in sample:\n                    return ['{0} ({1:1.1f})'.format(\n                            idx2word[w].encode('utf-8'), p)\n                            if w < Lmax\n                            else '{0} ({1:1.1f})'.format(\n                            idx2word[inputs[w - Lmax]].encode('utf-8'), p)\n                            for w, p in zip(sample, ppp)]\n                idz = sample.index(0)\n                return ['{0} ({1:1.1f})'.format(\n                        idx2word[w].encode('utf-8'), p)\n                        if w < Lmax\n                        else '{0} ({1:1.1f})'.format(\n                        idx2word[inputs[w - Lmax]].encode('utf-8'), p)\n                        for w, p in zip(sample[:idz], ppp[:idz])]\n\n        if inputs_unk is None:\n            results, _, ppp = self.generate_(inputs[None, :],\n                                            return_attend=return_attend,\n                                            return_all=True)\n        else:\n            results, _, ppp = self.generate_(inputs_unk[None, :],\n                                            return_attend=return_attend,\n                                            return_all=True)\n\n        source = '{}'.format(' '.join(cut_zero(inputs.tolist(),  idx2word, Lmax=len(idx2word))))\n        target = '{}'.format(' '.join(cut_zero(outputs.tolist(), idx2word, Lmax=len(idx2word))))\n        # decode = '{}'.format(' '.join(cut_zero(result, idx2word)))\n\n        score  = [target == '{}'.format(' '.join(cut_zero(result, idx2word))) for result in results]\n        return max(score)\n"
  },
  {
    "path": "emolga/models/encdec.py",
    "content": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport copy\nimport emolga.basic.objectives as objectives\nimport emolga.basic.optimizers as optimizers\n\nfrom theano.compile.nanguardmode import NanGuardMode\nfrom emolga.layers.core import Dropout, Dense, Dense2, Identity\nfrom emolga.layers.recurrent import *\nfrom emolga.layers.ntm_minibatch import Controller\nfrom emolga.layers.embeddings import *\nfrom emolga.layers.attention import *\nfrom core import Model\n\nlogger = logging.getLogger(__name__)\nRNN    = GRU             # change it here for other RNN models.\n\n\n########################################################################################################################\n# Encoder/Decoder Blocks ::::\n#\n# Encoder Back-up\n# class Encoder(Model):\n#     \"\"\"\n#     Recurrent Neural Network-based Encoder\n#     It is used to compute the context vector.\n#     \"\"\"\n#\n#     def __init__(self,\n#                  config, rng, prefix='enc',\n#                  mode='Evaluation', embed=None, use_context=False):\n#         super(Encoder, self).__init__()\n#         self.config = config\n#         self.rng = rng\n#         self.prefix = prefix\n#         self.mode = mode\n#         self.name = prefix\n#         self.use_context = use_context\n#\n#         \"\"\"\n#         Create all elements of the Encoder's Computational graph\n#         \"\"\"\n#         # create Embedding layers\n#         logger.info(\"{}_create embedding layers.\".format(self.prefix))\n#         if embed:\n#             self.Embed = embed\n#         else:\n#             self.Embed = Embedding(\n#                 self.config['enc_voc_size'],\n#                 self.config['enc_embedd_dim'],\n#                 name=\"{}_embed\".format(self.prefix))\n#             self._add(self.Embed)\n#\n#         if self.use_context:\n#             self.Initializer = Dense(\n#                 config['enc_contxt_dim'],\n#                 config['enc_hidden_dim'],\n#                 activation='tanh',\n#                 name=\"{}_init\".format(self.prefix)\n#             )\n#             self._add(self.Initializer)\n#\n#         \"\"\"\n#         Encoder Core\n#         \"\"\"\n#         if self.config['encoder'] == 'RNN':\n#             # create RNN cells\n#             if not self.config['bidirectional']:\n#                 logger.info(\"{}_create RNN cells.\".format(self.prefix))\n#                 self.RNN = RNN(\n#                     self.config['enc_embedd_dim'],\n#                     self.config['enc_hidden_dim'],\n#                     None if not use_context\n#                     else self.config['enc_contxt_dim'],\n#                     name=\"{}_cell\".format(self.prefix)\n#                 )\n#                 self._add(self.RNN)\n#             else:\n#                 logger.info(\"{}_create forward RNN cells.\".format(self.prefix))\n#                 self.forwardRNN = RNN(\n#                     self.config['enc_embedd_dim'],\n#                     self.config['enc_hidden_dim'],\n#                     None if not use_context\n#                     else self.config['enc_contxt_dim'],\n#                     name=\"{}_fw_cell\".format(self.prefix)\n#                 )\n#                 self._add(self.forwardRNN)\n#\n#                 logger.info(\"{}_create backward RNN cells.\".format(self.prefix))\n#                 self.backwardRNN = RNN(\n#                     self.config['enc_embedd_dim'],\n#                     self.config['enc_hidden_dim'],\n#                     None if not use_context\n#                     else self.config['enc_contxt_dim'],\n#                     name=\"{}_bw_cell\".format(self.prefix)\n#                 )\n#                 self._add(self.backwardRNN)\n#\n#             logger.info(\"create encoder ok.\")\n#\n#         elif self.config['encoder'] == 'WS':\n#             # create weighted sum layers.\n#             if self.config['ws_weight']:\n#                 self.WS  = Dense(self.config['enc_embedd_dim'],\n#                                  self.config['enc_hidden_dim'], name='{}_ws'.format(self.prefix))\n#                 self._add(self.WS)\n#\n#             logger.info(\"create encoder ok.\")\n#\n#     def build_encoder(self, source, context=None, return_embed=False):\n#         \"\"\"\n#         Build the Encoder Computational Graph\n#         \"\"\"\n#         # Initial state\n#         Init_h = None\n#         if self.use_context:\n#             Init_h = self.Initializer(context)\n#\n#         # word embedding\n#         if self.config['encoder'] == 'RNN':\n#             if not self.config['bidirectional']:\n#                 X, X_mask = self.Embed(source, True)\n#                 if not self.config['pooling']:\n#                     X_out = self.RNN(X, X_mask, C=context, init_h=Init_h, return_sequence=False)\n#                 else:\n#                     X_out = self.RNN(X, X_mask, C=context, init_h=Init_h, return_sequence=True)\n#             else:\n#                 source2 = source[:, ::-1]\n#                 X,  X_mask = self.Embed(source, True)\n#                 X2, X2_mask = self.Embed(source2, True)\n#\n#                 if not self.config['pooling']:\n#                     X_out1 = self.backwardRNN(X, X_mask, C=context, init_h=Init_h, return_sequence=False)\n#                     X_out2 = self.forwardRNN( X2, X2_mask, C=context, init_h=Init_h, return_sequence=False)\n#                     X_out  = T.concatenate([X_out1, X_out2], axis=1)\n#                 else:\n#                     X_out1 = self.backwardRNN(X, X_mask, C=context, init_h=Init_h, return_sequence=True)\n#                     X_out2 = self.forwardRNN( X2, X2_mask, C=context, init_h=Init_h, return_sequence=True)\n#                     X_out  = T.concatenate([X_out1, X_out2], axis=2)\n#\n#             if self.config['pooling'] == 'max':\n#                 X_out = T.max(X_out, axis=1)\n#             elif self.config['pooling'] == 'mean':\n#                 X_out = T.mean(X_out, axis=1)\n#\n#         elif self.config['encoder'] == 'WS':\n#             X, X_mask = self.Embed(source, True)\n#             if self.config['ws_weight']:\n#                 X_out = T.sum(self.WS(X) * X_mask[:, :, None], axis=1) / T.sum(X_mask, axis=1, keepdims=True)\n#             else:\n#                 assert self.config['enc_embedd_dim'] == self.config['enc_hidden_dim'], \\\n#                     'directly sum should match the dimension'\n#                 X_out = T.sum(X * X_mask[:, :, None], axis=1) / T.sum(X_mask, axis=1, keepdims=True)\n#         else:\n#             raise NotImplementedError\n#\n#         if return_embed:\n#             return X_out, X, X_mask\n#         return X_out\n#\n#     def compile_encoder(self, with_context=False):\n#         source  = T.imatrix()\n#         if with_context:\n#             context = T.matrix()\n#             self.encode = theano.function([source, context],\n#                                           self.build_encoder(source, context))\n#         else:\n#             self.encode = theano.function([source],\n#                                       self.build_encoder(source, None))\n\nclass Encoder(Model):\n    \"\"\"\n    Recurrent Neural Network-based Encoder\n    It is used to compute the context vector.\n    \"\"\"\n\n    def __init__(self,\n                 config, rng, prefix='enc',\n                 mode='Evaluation', embed=None, use_context=False):\n        super(Encoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n        self.mode = mode\n        self.name = prefix\n        self.use_context = use_context\n\n        self.return_embed = False\n        self.return_sequence = False\n\n        \"\"\"\n        Create all elements of the Encoder's Computational graph\n        \"\"\"\n        # create Embedding layers\n        logger.info(\"{}_create embedding layers.\".format(self.prefix))\n        if embed:\n            self.Embed = embed\n        else:\n            self.Embed = Embedding(\n                self.config['enc_voc_size'],\n                self.config['enc_embedd_dim'],\n                name=\"{}_embed\".format(self.prefix))\n            self._add(self.Embed)\n\n        if self.use_context:\n            self.Initializer = Dense(\n                config['enc_contxt_dim'],\n                config['enc_hidden_dim'],\n                activation='tanh',\n                name=\"{}_init\".format(self.prefix)\n            )\n            self._add(self.Initializer)\n\n        \"\"\"\n        Encoder Core\n        \"\"\"\n        # create RNN cells\n        if not self.config['bidirectional']:\n            logger.info(\"{}_create RNN cells.\".format(self.prefix))\n            self.RNN = RNN(\n                self.config['enc_embedd_dim'],\n                self.config['enc_hidden_dim'],\n                None if not use_context\n                else self.config['enc_contxt_dim'],\n                name=\"{}_cell\".format(self.prefix)\n            )\n            self._add(self.RNN)\n        else:\n            logger.info(\"{}_create forward RNN cells.\".format(self.prefix))\n            self.forwardRNN = RNN(\n                self.config['enc_embedd_dim'],\n                self.config['enc_hidden_dim'],\n                None if not use_context\n                else self.config['enc_contxt_dim'],\n                name=\"{}_fw_cell\".format(self.prefix)\n            )\n            self._add(self.forwardRNN)\n\n            logger.info(\"{}_create backward RNN cells.\".format(self.prefix))\n            self.backwardRNN = RNN(\n                self.config['enc_embedd_dim'],\n                self.config['enc_hidden_dim'],\n                None if not use_context\n                else self.config['enc_contxt_dim'],\n                name=\"{}_bw_cell\".format(self.prefix)\n            )\n            self._add(self.backwardRNN)\n\n        logger.info(\"create encoder ok.\")\n\n    def build_encoder(self, source, context=None, return_embed=False, return_sequence=False):\n        \"\"\"\n        Build the Encoder Computational Graph\n        \"\"\"\n        # Initial state\n        Init_h = None\n        if self.use_context:\n            Init_h = self.Initializer(context)\n\n        # word embedding\n        if not self.config['bidirectional']:\n            X, X_mask = self.Embed(source, True)\n            X_out     = self.RNN(X, X_mask, C=context, init_h=Init_h, return_sequence=return_sequence)\n            if return_sequence:\n                X_tail    = X_out[:, -1]\n            else:\n                X_tail    = X_out\n        else:\n            source2 = source[:, ::-1]\n            X,  X_mask = self.Embed(source, True)\n            X2, X2_mask = self.Embed(source2, True)\n\n            X_out1 = self.backwardRNN(X, X_mask,  C=context, init_h=Init_h, return_sequence=return_sequence)\n            X_out2 = self.forwardRNN(X2, X2_mask, C=context, init_h=Init_h, return_sequence=return_sequence)\n            if not return_sequence:\n                X_out  = T.concatenate([X_out1, X_out2], axis=1)\n                X_tail = X_out\n            else:\n                X_out  = T.concatenate([X_out1, X_out2[:, ::-1, :]], axis=2)\n                X_tail = T.concatenate([X_out1[:, -1], X_out2[:, -1]], axis=1)\n\n        X_mask  = T.cast(X_mask, dtype='float32')\n        if return_embed:\n            return X_out, X, X_mask, X_tail\n        return X_out\n\n    def compile_encoder(self, with_context=False, return_embed=False, return_sequence=False):\n        source  = T.imatrix()\n        self.return_embed = return_embed\n        self.return_sequence = return_sequence\n        if with_context:\n            context = T.matrix()\n\n            self.encode = theano.function([source, context],\n                                          self.build_encoder(source, context,\n                                                             return_embed=return_embed,\n                                                             return_sequence=return_sequence))\n        else:\n            self.encode = theano.function([source],\n                                          self.build_encoder(source, None,\n                                                             return_embed=return_embed,\n                                                             return_sequence=return_sequence))\n\n\nclass Decoder(Model):\n    \"\"\"\n    Recurrent Neural Network-based Decoder.\n    It is used for:\n        (1) Evaluation: compute the probability P(Y|X)\n        (2) Prediction: sample the best result based on P(Y|X)\n        (3) Beam-search\n        (4) Scheduled Sampling (how to implement it?)\n    \"\"\"\n\n    def __init__(self,\n                 config, rng, prefix='dec',\n                 mode='RNN', embed=None,\n                 highway=False):\n        \"\"\"\n        mode = RNN: use a RNN Decoder\n        \"\"\"\n        super(Decoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n        self.name = prefix\n        self.mode = mode\n\n        self.highway = highway\n        self.init = initializations.get('glorot_uniform')\n        self.sigmoid = activations.get('sigmoid')\n\n        # use standard drop-out for input & output.\n        # I believe it should not use for context vector.\n        self.dropout = config['dropout']\n        if self.dropout > 0:\n            logger.info('Use standard-dropout!!!!')\n            self.D   = Dropout(rng=self.rng, p=self.dropout, name='{}_Dropout'.format(prefix))\n\n        \"\"\"\n        Create all elements of the Decoder's computational graph.\n        \"\"\"\n        # create Embedding layers\n        logger.info(\"{}_create embedding layers.\".format(self.prefix))\n        if embed:\n            self.Embed = embed\n        else:\n            self.Embed = Embedding(\n                self.config['dec_voc_size'],\n                self.config['dec_embedd_dim'],\n                name=\"{}_embed\".format(self.prefix))\n            self._add(self.Embed)\n\n        # create Initialization Layers\n        logger.info(\"{}_create initialization layers.\".format(self.prefix))\n        if not config['bias_code']:\n            self.Initializer = Zero()\n        else:\n            self.Initializer = Dense(\n                config['dec_contxt_dim'],\n                config['dec_hidden_dim'],\n                activation='tanh',\n                name=\"{}_init\".format(self.prefix)\n            )\n\n        # create RNN cells\n        logger.info(\"{}_create RNN cells.\".format(self.prefix))\n        self.RNN = RNN(\n            self.config['dec_embedd_dim'],\n            self.config['dec_hidden_dim'],\n            self.config['dec_contxt_dim'],\n            name=\"{}_cell\".format(self.prefix)\n        )\n\n        self._add(self.Initializer)\n        self._add(self.RNN)\n\n        # HighWay Gating\n        if highway:\n            logger.info(\"HIGHWAY CONNECTION~~~!!!\")\n            assert self.config['context_predict']\n            assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']\n\n            self.C_x = self.init((self.config['dec_contxt_dim'],\n                                  self.config['dec_hidden_dim']))\n            self.H_x = self.init((self.config['dec_hidden_dim'],\n                                  self.config['dec_hidden_dim']))\n            self.b_x = initializations.get('zero')(self.config['dec_hidden_dim'])\n\n            self.C_x.name = '{}_Cx'.format(self.prefix)\n            self.H_x.name = '{}_Hx'.format(self.prefix)\n            self.b_x.name = '{}_bx'.format(self.prefix)\n            self.params += [self.C_x, self.H_x, self.b_x]\n\n        # create readout layers\n        logger.info(\"_create Readout layers\")\n\n        # 1. hidden layers readout.\n        self.hidden_readout = Dense(\n            self.config['dec_hidden_dim'],\n            self.config['output_dim']\n            if self.config['deep_out']\n            else self.config['dec_voc_size'],\n            activation='linear',\n            name=\"{}_hidden_readout\".format(self.prefix)\n        )\n\n        # 2. previous word readout\n        self.prev_word_readout = None\n        if self.config['bigram_predict']:\n            self.prev_word_readout = Dense(\n                self.config['dec_embedd_dim'],\n                self.config['output_dim']\n                if self.config['deep_out']\n                else self.config['dec_voc_size'],\n                activation='linear',\n                name=\"{}_prev_word_readout\".format(self.prefix),\n                learn_bias=False\n            )\n\n        # 3. context readout\n        self.context_readout = None\n        if self.config['context_predict']:\n            if not self.config['leaky_predict']:\n                self.context_readout = Dense(\n                    self.config['dec_contxt_dim'],\n                    self.config['output_dim']\n                    if self.config['deep_out']\n                    else self.config['dec_voc_size'],\n                    activation='linear',\n                    name=\"{}_context_readout\".format(self.prefix),\n                    learn_bias=False\n                )\n            else:\n                assert self.config['dec_contxt_dim'] == self.config['dec_hidden_dim']\n                self.context_readout = self.hidden_readout\n\n        # option: deep output (maxout)\n        if self.config['deep_out']:\n            self.activ = Activation(config['deep_out_activ'])\n            # self.dropout = Dropout(rng=self.rng, p=config['dropout'])\n            self.output_nonlinear = [self.activ]  # , self.dropout]\n            self.output = Dense(\n                self.config['output_dim'] / 2\n                if config['deep_out_activ'] == 'maxout2'\n                else self.config['output_dim'],\n\n                self.config['dec_voc_size'],\n                activation='softmax',\n                name=\"{}_output\".format(self.prefix),\n                learn_bias=False\n            )\n        else:\n            self.output_nonlinear = []\n            self.output = Activation('softmax')\n\n        # registration:\n        self._add(self.hidden_readout)\n\n        if not self.config['leaky_predict']:\n            self._add(self.context_readout)\n\n        self._add(self.prev_word_readout)\n        self._add(self.output)\n\n        if self.config['deep_out']:\n            self._add(self.activ)\n        # self._add(self.dropout)\n\n        logger.info(\"create decoder ok.\")\n\n    @staticmethod\n    def _grab_prob(probs, X):\n        assert probs.ndim == 3\n\n        batch_size = probs.shape[0]\n        max_len = probs.shape[1]\n        vocab_size = probs.shape[2]\n\n        probs = probs.reshape((batch_size * max_len, vocab_size))\n        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing\n\n    \"\"\"\n    Build the decoder for evaluation\n    \"\"\"\n    def prepare_xy(self, target):\n        # Word embedding\n        Y, Y_mask = self.Embed(target, True)  # (nb_samples, max_len, embedding_dim)\n\n        if self.config['use_input']:\n            X = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)\n        else:\n            X = 0 * Y\n\n        # option ## drop words.\n\n        X_mask    = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)\n        Count     = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)\n        return X, X_mask, Y, Y_mask, Count\n\n    def build_decoder(self, target, context=None,\n                      return_count=False,\n                      train=True):\n\n        \"\"\"\n        Build the Decoder Computational Graph\n        For training/testing\n        \"\"\"\n        X, X_mask, Y, Y_mask, Count = self.prepare_xy(target)\n\n        # input drop-out if any.\n        if self.dropout > 0:\n            X = self.D(X, train=train)\n\n        # Initial state of RNN\n        Init_h = self.Initializer(context)\n        if not self.highway:\n            X_out  = self.RNN(X, X_mask, C=context, init_h=Init_h, return_sequence=True)\n\n            # Readout\n            readout = self.hidden_readout(X_out)\n            if self.dropout > 0:\n                readout = self.D(readout, train=train)\n\n            if self.config['context_predict']:\n                readout += self.context_readout(context).dimshuffle(0, 'x', 1)\n        else:\n            X      = X.dimshuffle((1, 0, 2))\n            X_mask = X_mask.dimshuffle((1, 0))\n\n            def _recurrence(x, x_mask, prev_h, c):\n                # compute the highway gate for context vector.\n                xx    = dot(c, self.C_x, self.b_x) + dot(prev_h, self.H_x)  # highway gate.\n                xx    = self.sigmoid(xx)\n\n                cy    = xx * c   # the path without using RNN\n                x_out = self.RNN(x, mask=x_mask, C=c, init_h=prev_h, one_step=True)\n                hx    = (1 - xx) * x_out\n                return x_out, hx, cy\n\n            outputs, _ = theano.scan(\n                _recurrence,\n                sequences=[X, X_mask],\n                outputs_info=[Init_h, None, None],\n                non_sequences=[context]\n            )\n\n            # hidden readout + context readout\n            readout   = self.hidden_readout( outputs[1].dimshuffle((1, 0, 2)))\n            if self.dropout > 0:\n                readout = self.D(readout, train=train)\n\n            readout  += self.context_readout(outputs[2].dimshuffle((1, 0, 2)))\n\n            # return to normal size.\n            X      = X.dimshuffle((1, 0, 2))\n            X_mask = X_mask.dimshuffle((1, 0))\n\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        prob_dist = self.output(readout)  # (nb_samples, max_len, vocab_size)\n        # log_old  = T.sum(T.log(self._grab_prob(prob_dist, target)), axis=1)\n        log_prob = T.sum(T.log(self._grab_prob(prob_dist, target)) * X_mask, axis=1)\n        log_ppl  = log_prob / Count\n\n        if return_count:\n            return log_prob, Count\n        else:\n            return log_prob, log_ppl\n\n    \"\"\"\n    Sample one step\n    \"\"\"\n\n    def _step_sample(self, prev_word, prev_stat, context):\n        # word embedding (note that for the first word, embedding should be all zero)\n        if self.config['use_input']:\n            X = T.switch(\n                prev_word[:, None] < 0,\n                alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim']),\n                self.Embed(prev_word)\n            )\n        else:\n            X = alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim'])\n\n        if self.dropout > 0:\n            X = self.D(X, train=False)\n\n        # apply one step of RNN\n        if not self.highway:\n            X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)\n            next_stat = X_proj\n\n            # compute the readout probability distribution and sample it\n            # here the readout is a matrix, different from the learner.\n            readout = self.hidden_readout(next_stat)\n            if self.dropout > 0:\n                readout = self.D(readout, train=False)\n\n            if self.config['context_predict']:\n                readout += self.context_readout(context)\n        else:\n            xx     = dot(context, self.C_x, self.b_x) + dot(prev_stat, self.H_x)  # highway gate.\n            xx     = self.sigmoid(xx)\n\n            X_proj = self.RNN(X, C=context, init_h=prev_stat, one_step=True)\n            next_stat = X_proj\n\n            readout  = self.hidden_readout((1 - xx) * X_proj)\n            if self.dropout > 0:\n                readout = self.D(readout, train=False)\n\n            readout += self.context_readout(xx * context)\n\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        next_prob = self.output(readout)\n        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)\n        return next_prob, next_sample, next_stat\n\n    \"\"\"\n    Build the sampler for sampling/greedy search/beam search\n    \"\"\"\n\n    def build_sampler(self):\n        \"\"\"\n        Build a sampler which only steps once.\n        Typically it only works for one word a time?\n        \"\"\"\n        logger.info(\"build sampler ...\")\n        if self.config['sample_stoch'] and self.config['sample_argmax']:\n            logger.info(\"use argmax search!\")\n        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):\n            logger.info(\"use stochastic sampling!\")\n        elif self.config['sample_beam'] > 1:\n            logger.info(\"use beam search! (beam_size={})\".format(self.config['sample_beam']))\n\n        # initial state of our Decoder.\n        context = T.matrix()  # theano variable.\n\n        init_h = self.Initializer(context)\n        logger.info('compile the function: get_init_state')\n        self.get_init_state \\\n            = theano.function([context], init_h, name='get_init_state')\n        logger.info('done.')\n\n        # word sampler: 1 x 1\n        prev_word = T.vector('prev_word', dtype='int64')\n        prev_stat = T.matrix('prev_state', dtype='float32')\n\n        next_prob, next_sample, next_stat \\\n            = self._step_sample(prev_word, prev_stat, context)\n\n        # next word probability\n        logger.info('compile the function: sample_next')\n        inputs = [prev_word, prev_stat, context]\n        outputs = [next_prob, next_sample, next_stat]\n\n        self.sample_next = theano.function(inputs, outputs, name='sample_next')\n        logger.info('done')\n        pass\n\n    \"\"\"\n    Build a Stochastic Sampler which can use SCAN to work on GPU.\n    However it cannot be used in Beam-search.\n    \"\"\"\n\n    def build_stochastic_sampler(self):\n        context = T.matrix()\n        init_h = self.Initializer(context)\n\n        logger.info('compile the function: sample')\n        pass\n\n    \"\"\"\n    Generate samples, either with stochastic sampling or beam-search!\n    \"\"\"\n\n    def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):\n        # beam size\n        if k > 1:\n            assert not stochastic, 'Beam search does not support stochastic sampling!!'\n\n        # fix length cannot use beam search\n        # if fixlen:\n        #     assert k == 1\n\n        # prepare for searching\n        sample = []\n        score = []\n        if stochastic:\n            score = 0\n\n        live_k = 1\n        dead_k = 0\n\n        hyp_samples = [[]] * live_k\n        hyp_scores = np.zeros(live_k).astype(theano.config.floatX)\n        hyp_states = []\n\n        # get initial state of decoder RNN with context\n        next_state = self.get_init_state(context)\n        next_word = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)\n\n        # Start searching!\n        for ii in xrange(maxlen):\n            # print next_word\n            ctx = np.tile(context, [live_k, 1])\n            next_prob, next_word, next_state \\\n                = self.sample_next(next_word, next_state, ctx)  # wtf.\n\n            if stochastic:\n                # using stochastic sampling (or greedy sampling.)\n                if argmax:\n                    nw = next_prob[0].argmax()\n                    next_word[0] = nw\n                else:\n                    nw = next_word[0]\n\n                sample.append(nw)\n                score += next_prob[0, nw]\n\n                if (not fixlen) and (nw == 0):  # sample reached the end\n                    break\n\n            else:\n                # using beam-search\n                # we can only computed in a flatten way!\n                cand_scores = hyp_scores[:, None] - np.log(next_prob)\n                cand_flat = cand_scores.flatten()\n                ranks_flat = cand_flat.argsort()[:(k - dead_k)]\n\n                # fetch the best results.\n                voc_size = next_prob.shape[1]\n                trans_index = ranks_flat / voc_size\n                word_index = ranks_flat % voc_size\n                costs = cand_flat[ranks_flat]\n\n                # get the new hyp samples\n                new_hyp_samples = []\n                new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)\n                new_hyp_states = []\n\n                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):\n                    new_hyp_samples.append(hyp_samples[ti] + [wi])\n                    new_hyp_scores[idx] = copy.copy(costs[idx])\n                    new_hyp_states.append(copy.copy(next_state[ti]))\n\n                # check the finished samples\n                new_live_k = 0\n                hyp_samples = []\n                hyp_scores = []\n                hyp_states = []\n\n                for idx in xrange(len(new_hyp_samples)):\n                    if (new_hyp_states[idx][-1] == 0) and (not fixlen):\n                        sample.append(new_hyp_samples[idx])\n                        score.append(new_hyp_scores[idx])\n                        dead_k += 1\n                    else:\n                        new_live_k += 1\n                        hyp_samples.append(new_hyp_samples[idx])\n                        hyp_scores.append(new_hyp_scores[idx])\n                        hyp_states.append(new_hyp_states[idx])\n\n                hyp_scores = np.array(hyp_scores)\n                live_k = new_live_k\n\n                if new_live_k < 1:\n                    break\n                if dead_k >= k:\n                    break\n\n                next_word = np.array([w[-1] for w in hyp_samples])\n                next_state = np.array(hyp_states)\n                pass\n            pass\n\n        # end.\n        if not stochastic:\n            # dump every remaining one\n            if live_k > 0:\n                for idx in xrange(live_k):\n                    sample.append(hyp_samples[idx])\n                    score.append(hyp_scores[idx])\n\n        return sample, score\n\n\nclass DecoderAtt(Decoder):\n    \"\"\"\n    Recurrent Neural Network-based Decoder\n    with Attention Machenism\n    \"\"\"\n    def __init__(self,\n                 config, rng, prefix='dec',\n                 mode='RNN', embed=None,\n                 copynet=False, identity=False):\n        super(DecoderAtt, self).__init__(\n                config, rng, prefix,\n                 mode, embed, False)\n\n        self.copynet  = copynet\n        self.identity = identity\n        # attention reader\n        self.attention_reader = Attention(\n            self.config['dec_hidden_dim'],\n            self.config['dec_contxt_dim'],\n            1000,\n            name='source_attention'\n        )\n        self._add(self.attention_reader)\n\n        # if use copynet\n        if self.copynet:\n\n            if not self.identity:\n                self.Is = Dense(\n                    self.config['dec_contxt_dim'],\n                    self.config['dec_embedd_dim'],\n                    name='in-trans'\n                )\n            else:\n                assert self.config['dec_contxt_dim'] == self.config['dec_embedd_dim']\n                self.Is = Identity(name='ini')\n\n            self.Os = Dense(\n                self.config['dec_readout_dim'],\n                self.config['dec_contxt_dim'],\n                name='out-trans'\n            )\n            self._add(self.Is)\n            self._add(self.Os)\n\n        logger.info('adjust decoder ok.')\n\n    \"\"\"\n    Build the decoder for evaluation\n    \"\"\"\n    def prepare_xy(self, target, context=None):\n        if not self.copynet:\n            # Word embedding\n            Y, Y_mask = self.Embed(target, True)  # (nb_samples, max_len, embedding_dim)\n        else:\n            Y, Y_mask = self.Embed(target, True, context=self.Is(context))\n\n        if self.config['use_input']:\n            X = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)\n        else:\n            X = 0 * Y\n\n        X_mask    = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)\n        Count     = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)\n        return X, X_mask, Y, Y_mask, Count\n\n    def build_decoder(self,\n                      target,\n                      context, c_mask,\n                      return_count=False,\n                      train=True):\n        \"\"\"\n        Build the Computational Graph ::> Context is essential\n        \"\"\"\n        assert c_mask is not None, 'context must be supplied for this decoder.'\n        assert context.ndim == 3, 'context must have 3 dimentions.'\n        # context: (nb_samples, max_len, contxt_dim)\n\n        X, X_mask, Y, Y_mask, Count = self.prepare_xy(target, context)\n\n        # input drop-out if any.\n        if self.dropout > 0:\n            X     = self.D(X, train=train)\n\n        # Initial state of RNN\n        Init_h  = self.Initializer(context[:, 0, :])  # default order ->\n        X       = X.dimshuffle((1, 0, 2))\n        X_mask  = X_mask.dimshuffle((1, 0))\n\n        def _recurrence(x, x_mask, prev_h, cc, cm):\n            # compute the attention and get the context vector\n            prob  = self.attention_reader(prev_h, cc, Smask=cm)\n            c     = T.sum(cc * prob[:, :, None], axis=1)\n            x_out = self.RNN(x, mask=x_mask, C=c, init_h=prev_h, one_step=True)\n            return x_out, prob, c\n\n        outputs, _ = theano.scan(\n            _recurrence,\n            sequences=[X, X_mask],\n            outputs_info=[Init_h, None, None],\n            non_sequences=[context, c_mask]\n        )\n        X_out, Probs, Ctx = [z.dimshuffle((1, 0, 2)) for z in outputs]\n        # return to normal size.\n        X       = X.dimshuffle((1, 0, 2))\n        X_mask  = X_mask.dimshuffle((1, 0))\n\n        # Readout\n        readin  = [X_out]\n        readout = self.hidden_readout(X_out)\n        if self.dropout > 0:\n            readout = self.D(readout, train=train)\n\n        if self.config['context_predict']:\n            readin  += [Ctx]\n            readout += self.context_readout(Ctx)\n\n        if self.config['bigram_predict']:\n            readin  += [X]\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        if self.copynet:\n            readin  = T.concatenate(readin, axis=-1)\n            key     = self.Os(readin)\n\n            # (nb_samples, max_len_T, embed_size) :: key\n            # (nb_samples, max_len_S, embed_size) :: context\n            Eng     = T.sum(key[:, :, None, :] * context[:, None, :, :], axis=-1)\n            # (nb_samples, max_len_T, max_len_S)  :: Eng\n            EngSum  = logSumExp(Eng, axis=2, mask=c_mask[:, None, :], c=readout)\n            prob_dist = T.concatenate([T.exp(readout - EngSum), T.exp(Eng - EngSum) * c_mask[:, None, :]], axis=-1)\n        else:\n            prob_dist = self.output(readout)  # (nb_samples, max_len, vocab_size)\n\n        log_prob = T.sum(T.log(self._grab_prob(prob_dist, target)) * X_mask, axis=1)\n        log_ppl  = log_prob / Count\n\n        if return_count:\n            return log_prob, Count\n        else:\n            return log_prob, log_ppl\n\n    \"\"\"\n    Sample one step\n    \"\"\"\n\n    def _step_sample(self, prev_word, prev_stat, context, c_mask):\n        assert c_mask is not None, 'we need the source mask.'\n        # word embedding (note that for the first word, embedding should be all zero)\n        if self.config['use_input']:\n            if not self.copynet:\n                X = T.switch(\n                    prev_word[:, None] < 0,\n                    alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim']),\n                    self.Embed(prev_word)\n                )\n            else:\n                X = T.switch(\n                    prev_word[:, None] < 0,\n                    alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim']),\n                    self.Embed(prev_word, context=self.Is(context))\n                )\n        else:\n            X = alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim'])\n\n        if self.dropout > 0:\n            X = self.D(X, train=False)\n\n        # apply one step of RNN\n        Probs  = self.attention_reader(prev_stat, context, c_mask)\n        cxt    = T.sum(context * Probs[:, :, None], axis=1)\n        X_proj = self.RNN(X, C=cxt, init_h=prev_stat, one_step=True)\n        next_stat = X_proj\n\n        # compute the readout probability distribution and sample it\n        # here the readout is a matrix, different from the learner.\n        readout = self.hidden_readout(next_stat)\n        readin  = [next_stat]\n        if self.dropout > 0:\n            readout = self.D(readout, train=False)\n\n        if self.config['context_predict']:\n            readout += self.context_readout(cxt)\n            readin  += [cxt]\n\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n            readin  += [X]\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        if self.copynet:\n            readin  = T.concatenate(readin, axis=-1)\n            key     = self.Os(readin)\n\n            # (nb_samples, embed_size) :: key\n            # (nb_samples, max_len_S, embed_size) :: context\n            Eng     = T.sum(key[:, None, :] * context[:, :, :], axis=-1)\n            # (nb_samples, max_len_S)  :: Eng\n            EngSum  = logSumExp(Eng, axis=-1, mask=c_mask, c=readout)\n            next_prob = T.concatenate([T.exp(readout - EngSum), T.exp(Eng - EngSum) * c_mask], axis=-1)\n        else:\n            next_prob = self.output(readout)  # (nb_samples, max_len, vocab_size)\n\n        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)\n        return next_prob, next_sample, next_stat\n\n    def build_sampler(self):\n        \"\"\"\n        Build a sampler which only steps once.\n        Typically it only works for one word a time?\n        \"\"\"\n        logger.info(\"build sampler ...\")\n        if self.config['sample_stoch'] and self.config['sample_argmax']:\n            logger.info(\"use argmax search!\")\n        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):\n            logger.info(\"use stochastic sampling!\")\n        elif self.config['sample_beam'] > 1:\n            logger.info(\"use beam search! (beam_size={})\".format(self.config['sample_beam']))\n\n        # initial state of our Decoder.\n        context = T.tensor3()  # theano variable.\n        c_mask  = T.matrix()   # mask of the input sentence.\n\n        init_h = self.Initializer(context[:, 0, :])\n        logger.info('compile the function: get_init_state')\n        self.get_init_state \\\n            = theano.function([context], init_h, name='get_init_state')\n        logger.info('done.')\n\n        # word sampler: 1 x 1\n        prev_word = T.vector('prev_word', dtype='int64')\n        prev_stat = T.matrix('prev_state', dtype='float32')\n\n        next_prob, next_sample, next_stat \\\n            = self._step_sample(prev_word, prev_stat, context, c_mask)\n\n        # next word probability\n        logger.info('compile the function: sample_next')\n        inputs = [prev_word, prev_stat, context, c_mask]\n        outputs = [next_prob, next_sample, next_stat]\n\n        self.sample_next = theano.function(inputs, outputs, name='sample_next')\n        logger.info('done')\n        pass\n\n    \"\"\"\n    Generate samples, either with stochastic sampling or beam-search!\n    \"\"\"\n    def get_sample(self, context, c_mask, k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):\n        # beam size\n        if k > 1:\n            assert not stochastic, 'Beam search does not support stochastic sampling!!'\n\n        # fix length cannot use beam search\n        # if fixlen:\n        #     assert k == 1\n\n        # prepare for searching\n        sample = []\n        score = []\n        if stochastic:\n            score = 0\n\n        live_k = 1\n        dead_k = 0\n\n        hyp_samples = [[]] * live_k\n        hyp_scores = np.zeros(live_k).astype(theano.config.floatX)\n        hyp_states = []\n\n        # get initial state of decoder RNN with context\n        next_state = self.get_init_state(context)\n        next_word = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)\n\n        # Start searching!\n        for ii in xrange(maxlen):\n            # print next_word\n            ctx    = np.tile(context, [live_k, 1, 1])\n            cmk    = np.tile(c_mask, [live_k, 1])\n            next_prob, next_word, next_state \\\n                = self.sample_next(next_word, next_state, ctx, cmk)\n\n            if stochastic:\n                # using stochastic sampling (or greedy sampling.)\n                if argmax:\n                    nw = next_prob[0].argmax()\n                    next_word[0] = nw\n                else:\n                    nw = next_word[0]\n\n                sample.append(nw)\n                score += next_prob[0, nw]\n\n                if (not fixlen) and (nw == 0):  # sample reached the end\n                    break\n\n            else:\n                # using beam-search\n                # we can only computed in a flatten way!\n                cand_scores = hyp_scores[:, None] - np.log(next_prob)\n                cand_flat = cand_scores.flatten()\n                ranks_flat = cand_flat.argsort()[:(k - dead_k)]\n\n                # fetch the best results.\n                voc_size = next_prob.shape[1]\n                trans_index = ranks_flat / voc_size\n                word_index = ranks_flat % voc_size\n                costs = cand_flat[ranks_flat]\n\n                # get the new hyp samples\n                new_hyp_samples = []\n                new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)\n                new_hyp_states = []\n\n                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):\n                    new_hyp_samples.append(hyp_samples[ti] + [wi])\n                    new_hyp_scores[idx] = copy.copy(costs[idx])\n                    new_hyp_states.append(copy.copy(next_state[ti]))\n\n                # check the finished samples\n                new_live_k = 0\n                hyp_samples = []\n                hyp_scores = []\n                hyp_states = []\n\n                for idx in xrange(len(new_hyp_samples)):\n                    if (new_hyp_states[idx][-1] == 0) and (not fixlen):\n                        sample.append(new_hyp_samples[idx])\n                        score.append(new_hyp_scores[idx])\n                        dead_k += 1\n                    else:\n                        new_live_k += 1\n                        hyp_samples.append(new_hyp_samples[idx])\n                        hyp_scores.append(new_hyp_scores[idx])\n                        hyp_states.append(new_hyp_states[idx])\n\n                hyp_scores = np.array(hyp_scores)\n                live_k = new_live_k\n\n                if new_live_k < 1:\n                    break\n                if dead_k >= k:\n                    break\n\n                next_word = np.array([w[-1] for w in hyp_samples])\n                next_state = np.array(hyp_states)\n                pass\n            pass\n\n        # end.\n        if not stochastic:\n            # dump every remaining one\n            if live_k > 0:\n                for idx in xrange(live_k):\n                    sample.append(hyp_samples[idx])\n                    score.append(hyp_scores[idx])\n\n        return sample, score\n\n\nclass FnnDecoder(Model):\n    def __init__(self, config, rng, prefix='fnndec'):\n        \"\"\"\n        mode = RNN: use a RNN Decoder\n        \"\"\"\n        super(FnnDecoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n        self.name = prefix\n\n        \"\"\"\n        Create Dense Predictor.\n        \"\"\"\n\n        self.Tr = Dense(self.config['dec_contxt_dim'],\n                             self.config['dec_hidden_dim'],\n                             activation='maxout2',\n                             name='{}_Tr'.format(prefix))\n        self._add(self.Tr)\n\n        self.Pr = Dense(self.config['dec_hidden_dim'] / 2,\n                             self.config['dec_voc_size'],\n                             activation='softmax',\n                             name='{}_Pr'.format(prefix))\n        self._add(self.Pr)\n        logger.info(\"FF decoder ok.\")\n\n    @staticmethod\n    def _grab_prob(probs, X):\n        assert probs.ndim == 3\n\n        batch_size = probs.shape[0]\n        max_len = probs.shape[1]\n        vocab_size = probs.shape[2]\n\n        probs = probs.reshape((batch_size * max_len, vocab_size))\n        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing\n\n    def build_decoder(self, target, context):\n        \"\"\"\n        Build the Decoder Computational Graph\n        \"\"\"\n        prob_dist = self.Pr(self.Tr(context[:, None, :]))\n        log_prob  = T.sum(T.log(self._grab_prob(prob_dist, target)), axis=1)\n        return log_prob\n\n    def build_sampler(self):\n        context   = T.matrix()\n        prob_dist = self.Pr(self.Tr(context))\n        next_sample = self.rng.multinomial(pvals=prob_dist).argmax(1)\n        self.sample_next = theano.function([context], [prob_dist, next_sample], name='sample_next_{}'.format(self.prefix))\n        logger.info('done')\n\n    def get_sample(self, context, argmax=True):\n\n        prob, sample = self.sample_next(context)\n        if argmax:\n            return prob[0].argmax()\n        else:\n            return sample[0]\n\n\n########################################################################################################################\n# Encoder-Decoder Models ::::\n#\nclass RNNLM(Model):\n    \"\"\"\n    RNN-LM, with context vector = 0.\n    It is very similar with the implementation of VAE.\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name   = 'rnnlm'\n\n    def build_(self):\n        logger.info(\"build the RNN-decoder\")\n        self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n\n        # registration:\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get('adadelta')\n\n        # saved the initial memories\n        if self.config['mode'] == 'NTM':\n            self.memory    = initializations.get('glorot_uniform')(\n                    (self.config['dec_memory_dim'], self.config['dec_memory_wdth']))\n\n        logger.info(\"create the RECURRENT language model. ok\")\n\n    def compile_(self, mode='train', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            if not contrastive:\n                self.compile_train()\n            else:\n                self.compile_train_CE()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    def compile_train(self):\n\n        # questions (theano variables)\n        inputs  = T.imatrix()  # padded input word sequence (for training)\n        if self.config['mode']   == 'RNN':\n            context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # decoding.\n        target  = inputs\n        logPxz, logPPL = self.decoder.build_decoder(target, context)\n\n        # reconstruction loss\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n\n        L1       = T.sum([T.sum(abs(w)) for w in self.params])\n        loss     = loss_rec\n\n        updates = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun')\n        logger.info(\"pre-training functions compile done.\")\n\n        # add monitoring:\n        self.monitor['context'] = context\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n\n    @abstractmethod\n    def compile_train_CE(self):\n        pass\n\n    def compile_sample(self):\n        # context vectors (as)\n        self.decoder.build_sampler()\n        logger.info(\"display functions compile done.\")\n\n    @abstractmethod\n    def compile_inference(self):\n        pass\n\n    def default_context(self):\n        if self.config['mode'] == 'RNN':\n            return np.zeros(shape=(1, self.config['dec_contxt_dim']), dtype=theano.config.floatX)\n        elif self.config['mode'] == 'NTM':\n            memory = self.memory.get_value()\n            memory = memory.reshape((1, memory.shape[0], memory.shape[1]))\n            return memory\n\n    def generate_(self, context=None, max_len=None, mode='display'):\n        \"\"\"\n        :param action: action vector to guide the question.\n                       If None, use a Gaussian to simulate the action.\n        :return: question sentence in natural language.\n        \"\"\"\n        # assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'\n        # assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'\n\n        if context is None:\n            context = self.default_context()\n\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'] if not max_len else max_len,\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None)\n\n        sample, score = self.decoder.get_sample(context, **args)\n        if not args['stochastic']:\n            score = score / np.array([len(s) for s in sample])\n            sample = sample[score.argmin()]\n            score = score.min()\n        else:\n            score /= float(len(sample))\n\n        return sample, np.exp(score)\n\n\nclass AutoEncoder(RNNLM):\n    \"\"\"\n    Regular Auto-Encoder: RNN Encoder/Decoder\n    \"\"\"\n\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name = 'vae'\n\n    def build_(self):\n        logger.info(\"build the RNN auto-encoder\")\n        self.encoder = Encoder(self.config, self.rng, prefix='enc')\n        if self.config['shared_embed']:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', embed=self.encoder.Embed)\n        else:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec')\n\n        \"\"\"\n        Build the Transformation\n        \"\"\"\n        if self.config['nonlinear_A']:\n            self.action_trans = Dense(\n                self.config['enc_hidden_dim'],\n                self.config['action_dim'],\n                activation='tanh',\n                name='action_transform'\n            )\n        else:\n            assert self.config['enc_hidden_dim'] == self.config['action_dim'], \\\n                    'hidden dimension must match action dimension'\n            self.action_trans = Identity(name='action_transform')\n\n        if self.config['nonlinear_B']:\n            self.context_trans = Dense(\n                self.config['action_dim'],\n                self.config['dec_contxt_dim'],\n                activation='tanh',\n                name='context_transform'\n            )\n        else:\n            assert self.config['dec_contxt_dim'] == self.config['action_dim'], \\\n                    'action dimension must match context dimension'\n            self.context_trans = Identity(name='context_transform')\n\n        # registration\n        self._add(self.action_trans)\n        self._add(self.context_trans)\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get(self.config['optimizer'], kwargs={'lr': self.config['lr']})\n\n        logger.info(\"create Helmholtz RECURRENT neural network. ok\")\n\n    def compile_train(self, mode='train'):\n        # questions (theano variables)\n        inputs  = T.imatrix()  # padded input word sequence (for training)\n        context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])\n        assert context.ndim == 2\n\n        # decoding.\n        target  = inputs\n        logPxz, logPPL = self.decoder.build_decoder(target, context)\n\n        # reconstruction loss\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n\n        L1       = T.sum([T.sum(abs(w)) for w in self.params])\n        loss     = loss_rec\n\n        updates = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun')\n        logger.info(\"pre-training functions compile done.\")\n\n        if mode == 'display' or mode == 'all':\n            \"\"\"\n            build the sampler function here <:::>\n            \"\"\"\n            # context vectors (as)\n            self.decoder.build_sampler()\n            logger.info(\"display functions compile done.\")\n\n        # add monitoring:\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n\n\nclass NRM(Model):\n    \"\"\"\n    Neural Responding Machine\n    A Encoder-Decoder based responding model.\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation',\n                 use_attention=False,\n                 copynet=False,\n                 identity=False):\n        super(NRM, self).__init__()\n\n        self.config   = config\n        self.n_rng    = n_rng  # numpy random stream\n        self.rng      = rng  # Theano random stream\n        self.mode     = mode\n        self.name     = 'nrm'\n        self.attend   = use_attention\n        self.copynet  = copynet\n        self.identity = identity\n\n    def build_(self):\n        logger.info(\"build the Neural Responding Machine\")\n\n        # encoder-decoder:: <<==>>\n        self.encoder = Encoder(self.config, self.rng, prefix='enc', mode=self.mode)\n        if not self.attend:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n        else:\n            self.decoder = DecoderAtt(self.config, self.rng, prefix='dec', mode=self.mode,\n                                      copynet=self.copynet, identity=self.identity)\n\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        # self.optimizer = optimizers.get(self.config['optimizer'])\n        assert self.config['optimizer'] == 'adam'\n        self.optimizer = optimizers.get(self.config['optimizer'],\n                                        kwargs=dict(rng=self.rng,\n                                                    save=False))\n        logger.info(\"build ok.\")\n\n    def compile_(self, mode='all', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            self.compile_train()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    def compile_train(self):\n\n        # questions (theano variables)\n        inputs  = T.imatrix()  # padded input word sequence (for training)\n        target  = T.imatrix()  # padded target word sequence (for training)\n\n        # encoding & decoding\n        if not self.attend:\n            code               = self.encoder.build_encoder(inputs, None)\n            logPxz, logPPL     = self.decoder.build_decoder(target, code)\n        else:\n            code, _, c_mask, _ = self.encoder.build_encoder(inputs, None, return_sequence=True, return_embed=True)\n            logPxz, logPPL     = self.decoder.build_decoder(target, code, c_mask)\n\n        # responding loss\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n        loss     = loss_rec\n\n        updates  = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs, target]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun')\n                                      # mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True))\n        logger.info(\"training functions compile done.\")\n\n        # # add monitoring:\n        # self.monitor['context'] = context\n        # self._monitoring()\n        #\n        # # compiling monitoring\n        # self.compile_monitoring(train_inputs)\n\n    def compile_sample(self):\n        if not self.attend:\n            self.encoder.compile_encoder(with_context=False)\n        else:\n            self.encoder.compile_encoder(with_context=False, return_sequence=True, return_embed=True)\n\n        self.decoder.build_sampler()\n        logger.info(\"sampling functions compile done.\")\n\n    def compile_inference(self):\n        pass\n\n    def generate_(self, inputs, mode='display', return_all=False):\n        # assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'\n        # assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'\n\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'],\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None)\n\n        if not self.attend:\n            context = self.encoder.encode(inputs)\n            sample, score = self.decoder.get_sample(context, **args)\n        else:\n            context, _, c_mask, _ = self.encoder.encode(inputs)\n            sample, score = self.decoder.get_sample(context, c_mask, **args)\n\n        if return_all:\n            return sample, score\n\n        if not args['stochastic']:\n            score = score / np.array([len(s) for s in sample])\n            sample = sample[score.argmin()]\n            score = score.min()\n        else:\n            score /= float(len(sample))\n\n        return sample, np.exp(score)\n\n    # def evaluate_(self, inputs, outputs, idx2word,\n    #               origin=None, idx2word_o=None):\n    #\n    #     def cut_zero(sample, idx2word, idx2word_o):\n    #         Lmax = len(idx2word)\n    #         if not self.copynet:\n    #             if 0 not in sample:\n    #                 return [idx2word[w] for w in sample]\n    #             return [idx2word[w] for w in sample[:sample.index(0)]]\n    #         else:\n    #             if 0 not in sample:\n    #                 if origin is None:\n    #                     return [idx2word[w] if w < Lmax else idx2word[inputs[w - Lmax]]\n    #                             for w in sample]\n    #                 else:\n    #                     return [idx2word[w] if w < Lmax else idx2word_o[origin[w - Lmax]]\n    #                             for w in sample]\n    #             if origin is None:\n    #                 return [idx2word[w] if w < Lmax else idx2word[inputs[w - Lmax]]\n    #                         for w in sample[:sample.index(0)]]\n    #             else:\n    #                 return [idx2word[w] if w < Lmax else idx2word_o[origin[w - Lmax]]\n    #                         for w in sample[:sample.index(0)]]\n    #\n    #     result, _ = self.generate_(inputs[None, :])\n    #\n    #     if origin is not None:\n    #         print '[ORIGIN]: {}'.format(' '.join(cut_zero(origin.tolist(), idx2word_o, idx2word_o)))\n    #     print '[DECODE]: {}'.format(' '.join(cut_zero(result, idx2word, idx2word_o)))\n    #     print '[SOURCE]: {}'.format(' '.join(cut_zero(inputs.tolist(),  idx2word, idx2word_o)))\n    #     print '[TARGET]: {}'.format(' '.join(cut_zero(outputs.tolist(), idx2word, idx2word_o)))\n    #\n    #     return True\n\n    def evaluate_(self, inputs, outputs, idx2word, inputs_unk=None):\n\n        def cut_zero(sample, idx2word, Lmax=None):\n            if Lmax is None:\n                Lmax = self.config['dec_voc_size']\n            if 0 not in sample:\n                return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample]\n            return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample[:sample.index(0)]]\n\n        if inputs_unk is None:\n            result, _ = self.generate_(inputs[None, :])\n        else:\n            result, _ = self.generate_(inputs_unk[None, :])\n\n        a = '[SOURCE]: {}'.format(' '.join(cut_zero(inputs.tolist(),  idx2word)))\n        b = '[TARGET]: {}'.format(' '.join(cut_zero(outputs.tolist(), idx2word)))\n        c = '[DECODE]: {}'.format(' '.join(cut_zero(result, idx2word)))\n        print a\n        if inputs_unk is not None:\n            k = '[_INPUT]: {}\\n'.format(' '.join(cut_zero(inputs_unk.tolist(),  idx2word, Lmax=len(idx2word))))\n            print k\n            a += k\n        print b\n        print c\n        a += b + c\n        return a\n\n    def analyse_(self, inputs, outputs, idx2word):\n        Lmax = len(idx2word)\n\n        def cut_zero(sample, idx2word):\n            if 0 not in sample:\n                return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample]\n\n            return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample[:sample.index(0)]]\n\n        result, _ = self.generate_(inputs[None, :])\n        flag   = 0\n        source = '{}'.format(' '.join(cut_zero(inputs.tolist(),  idx2word)))\n        target = '{}'.format(' '.join(cut_zero(outputs.tolist(), idx2word)))\n        result = '{}'.format(' '.join(cut_zero(result, idx2word)))\n\n        return target == result\n\n    def analyse_cover(self, inputs, outputs, idx2word):\n        Lmax = len(idx2word)\n\n        def cut_zero(sample, idx2word):\n            if 0 not in sample:\n                return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample]\n\n            return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample[:sample.index(0)]]\n\n        results, _ = self.generate_(inputs[None, :], return_all=True)\n        flag   = 0\n        source = '{}'.format(' '.join(cut_zero(inputs.tolist(),  idx2word)))\n        target = '{}'.format(' '.join(cut_zero(outputs.tolist(), idx2word)))\n\n        score  = [target == '{}'.format(' '.join(cut_zero(result, idx2word))) for result in results]\n        return max(score)"
  },
  {
    "path": "emolga/models/ntm_encdec.py",
    "content": "__author__ = 'jiataogu'\n\nimport theano\ntheano.config.exception_verbosity = 'high'\n\nimport logging\nimport copy\n\nimport emolga.basic.objectives as objectives\nimport emolga.basic.optimizers as optimizers\nfrom emolga.layers.recurrent import *\nfrom emolga.layers.ntm_minibatch import Controller, BernoulliController\nfrom emolga.layers.embeddings import *\nfrom core import Model\n\nlogger = logging.getLogger(__name__)\nRNN    = JZS3              # change it here for other RNN models.\n\n\nclass RecurrentBase(Model):\n    \"\"\"\n    The recurrent base for SimpleRNN, GRU, JZS3, LSTM and Neural Turing Machines\n    \"\"\"\n    def __init__(self, config, model='RNN', prefix='enc', use_contxt=True, name=None):\n        super(RecurrentBase, self).__init__()\n\n        self.config     = config\n        self.model      = model\n        self.prefix     = prefix\n        self.use_contxt = use_contxt\n        if not name:\n            self.name   = self.prefix\n        else:\n            self.name   = name\n\n        if self.config['binary']:\n            NTM         = BernoulliController\n        else:\n            NTM         = Controller\n\n        def _build_RNN():\n            logger.info('BUILD::>>>>>>>> Gated Recurrent Units.')\n            core = RNN(\n                self.config['{}_embedd_dim'.format(self.prefix)],\n                self.config['{}_hidden_dim'.format(self.prefix)],\n                self.config['{}_contxt_dim'.format(self.prefix)] if use_contxt else None,\n                name='{}_rnn'.format(self.prefix)\n            )\n\n            if self.config['bias_code']:\n                init = Dense(\n                    self.config['{}_contxt_dim'.format(self.prefix)],\n                    self.config['{}_hidden_dim'.format(self.prefix)],\n                    activation='tanh',\n                    name='{}_init'.format(self.prefix)\n                )\n            else:\n                init = Zero()\n\n            return core, [init]\n\n        def _build_NTM():\n            \"\"\"\n            Build a simple Neural Turing Machine.\n            We use a feedforward controller here.\n            \"\"\"\n            logger.info('BUILD::>>>>>>>> Controller Units.')\n            core = NTM(\n                self.config['{}_embedd_dim'.format(self.prefix)],\n                self.config['{}_memory_dim'.format(self.prefix)],\n                self.config['{}_memory_wdth'.format(self.prefix)],\n                self.config['{}_hidden_dim'.format(self.prefix)],\n                self.config['{}_shift_width'.format(self.prefix)],\n                name=\"{}_ntm\".format(self.prefix),\n                readonly=self.config['{}_read-only'.format(self.prefix)],\n                curr_input=self.config['{}_curr_input'.format(self.prefix)],\n                recurrence=self.config['{}_recurrence'.format(self.prefix)]\n            )\n\n            if self.config['bias_code']:\n                raise NotImplementedError\n            else:\n                init_w = T.nnet.softmax(initializations.get('glorot_uniform')((1, self.config['{}_memory_dim'.format(self.prefix)])))\n                init_r = T.nnet.softmax(initializations.get('glorot_uniform')((1, self.config['{}_memory_dim'.format(self.prefix)])))\n                init_c = initializations.get('glorot_uniform')((1, self.config['{}_hidden_dim'.format(self.prefix)]))\n                return core, [init_w, init_r, init_c]\n\n        if model   == 'RNN':\n            self.core, self.init = _build_RNN()\n        elif model == 'NTM':\n            self.core, self.init = _build_NTM()\n        else:\n            raise NotImplementedError\n\n        self._add(self.core)\n        if model == 'RNN':\n            for init in self.init:\n                self._add(init)\n\n        self.set_name(name)\n\n    # *****************************************************************\n    # For Theano inputs.\n\n    def get_context(self, context):\n        # get context if \"use_context\" is True\n        info  = dict()\n        # if self.use_contxt:\n        if self.model == 'RNN':\n            # context is a matrix (nb_samples, context_dim)\n            info['C'] = context\n            info['init_h'] = self.init[0](context)\n\n        elif self.model == 'NTM':\n            # context is a tensor (nb_samples, memory_dim, memory_width)\n            info['M']       = context\n            if self.config['bias_code']:\n                raise NotImplementedError\n            else:\n                info['init_ww'] = T.repeat(self.init[0], context.shape[0], axis=0)\n                info['init_wr'] = T.repeat(self.init[1], context.shape[0], axis=0)\n                info['init_c']  = T.repeat(self.init[2], context.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n        return info\n\n    def loop(self, X, X_mask, info=None, return_sequence=False, return_full=False):\n        if self.model == 'NTM':\n            info['return_full'] = return_full\n\n        Z = self.core(X, X_mask, return_sequence=return_sequence, **info)\n        self._monitoring()\n        return Z\n\n    def step(self, X, prev_info):\n        # run one step of the Recurrence\n        if self.model == 'RNN':\n            out = self.core(X, one_step=True, **prev_info)\n            next_state = out\n            next_info  = {'init_h': out, 'C': prev_info['C']}\n        elif self.model == 'NTM':\n            out = self.core(X, one_step=True, **prev_info)\n            next_state = out[3]\n            next_info  = dict()\n            next_info['M']       = out[0]\n            next_info['init_ww'] = out[1]\n            next_info['init_wr'] = out[2]\n            next_info['init_c']  = out[3]\n        else:\n            raise NotImplementedError\n        return next_state, next_info\n\n    def build_(self):\n        # build a sampler in theano function for sampling.\n        if self.model == 'RNN':\n            context   = T.matrix()  # theano variable.\n            logger.info('compile the function: get_init_state')\n            info      = self.get_context(context)\n            self.get_init_state \\\n                      = theano.function([context], info['init_h'],\n                                        name='get_init_state')\n\n            # **************************************************** #\n            context   = T.matrix()  # theano variable.\n            prev_X    = T.matrix('prev_X', dtype='float32')\n            prev_stat = T.matrix('prev_state', dtype='float32')\n            prev_info = dict()\n            prev_info['C']      = context\n            prev_info['init_h'] = prev_stat\n\n            next_stat, next_info \\\n                = self.step(prev_X, prev_info)\n\n            logger.info('compile the function: sample_next_state')\n            inputs  = [prev_X, prev_stat, context]\n            outputs = next_stat\n            self.sample_next_state = theano.function(inputs, outputs, name='sample_next_state')\n\n        elif self.model == 'NTM':\n            memory  = T.tensor3()  # theano variable\n            logger.info('compile the funtion: get_init_state')\n            info    = self.get_context(memory)\n\n            self.get_init_wr = theano.function([memory], info['init_wr'], name='get_init_wr')\n            self.get_init_ww = theano.function([memory], info['init_ww'], name='get_init_ww')\n            self.get_init_c  = theano.function([memory], info['init_c'],  name='get_init_c')\n\n            # **************************************************** #\n            memory    = T.tensor3()  # theano variable\n            prev_X    = T.matrix('prev_X',  dtype='float32')\n            prev_ww   = T.matrix('prev_ww', dtype='float32')\n            prev_wr   = T.matrix('prev_wr', dtype='float32')\n            prev_stat = T.matrix('prev_stat', dtype='float32')\n            prev_info = {'M': memory, 'init_ww': prev_ww, 'init_wr': prev_wr, 'init_c': prev_stat}\n            logger.info('compile the function: sample_next_0123')\n\n            next_stat, next_info = self.step(prev_X, prev_info)\n            inputs    = [prev_X, prev_ww, prev_wr, memory, prev_stat]\n            outputs   = [next_info['M'], next_info['init_ww'], next_info['init_wr'], next_stat]\n            self.sample_next_state = theano.function(inputs, outputs, name='sample_next_state')\n\n        else:\n            raise NotImplementedError\n\n        logger.info('done.')\n\n    # *****************************************************************\n    # For Numpy inputs.\n    def get_init(self, context):\n        info = dict()\n        if self.model == 'RNN':\n            info['init_h'] = self.get_init_state(context)\n            info['C']      = context\n        elif self.model == 'NTM':\n            if hasattr(self, 'get_init_ww'):\n                info['init_ww'] = self.get_init_ww(context)\n            if hasattr(self, 'get_init_wr'):\n                info['init_wr'] = self.get_init_wr(context)\n            if hasattr(self, 'get_init_c'):\n                info['init_c']  = self.get_init_c(context)\n\n            info['M'] = context\n        else:\n            raise NotImplementedError\n\n        return info\n\n    def get_next_state(self, prev_X, prev_info):\n        if self.model == 'RNN':\n            next_state = self.sample_next_state(\n                prev_X, prev_info['init_h'], prev_info['C'])\n\n            next_info = dict()\n            next_info['C'] = prev_info['C']\n            next_info['init_h'] = next_state\n        elif self.model == 'NTM':\n            next_info  = dict()\n            assert 'init_ww' in prev_info\n            assert 'init_wr' in prev_info\n            assert 'init_c'  in prev_info\n            assert 'M'       in prev_info\n\n            next_info['M'], next_info['init_ww'], \\\n            next_info['init_wr'], next_info['init_c'] = self.sample_next_state(\n                prev_X, prev_info['init_ww'], prev_info['init_wr'],\n                prev_info['M'], prev_info['init_c'])\n\n            next_state = next_info['init_c']\n        else:\n            raise NotImplementedError\n\n        return next_state, next_info\n\n\nclass Encoder(Model):\n    \"\"\"\n    Recurrent Neural Network/Neural Turing Machine-based Encoder\n    It is used to compute the context vector.\n    \"\"\"\n\n    def __init__(self,\n                 config, rng, prefix='enc',\n                 mode='RNN', embed=None):\n        \"\"\"\n        mode = RNN: use a RNN Encoder\n        mode = NTM: use a NTM Encoder\n        \"\"\"\n        super(Encoder, self).__init__()\n        self.config = config\n        self.rng    = rng\n        self.prefix = prefix\n        self.mode   = mode\n        self.name   = prefix\n\n        \"\"\"\n        Create all elements of the Encoder's Computational graph\n        \"\"\"\n        # create Embedding layers\n        logger.info(\"{}_create embedding layers.\".format(self.prefix))\n        if embed:\n            self.Embed = embed\n        else:\n            self.Embed = Embedding(\n                self.config['enc_voc_size'],\n                self.config['enc_embedd_dim'],\n                name=\"{}_embed\".format(self.prefix))\n            self._add(self.Embed)\n\n        # create Recurrent Base\n        logger.info(\"{}_create Recurrent layers.\".format(self.prefix))\n        if self.mode == 'RNN' and self.config['bidirectional']:\n            self.Forward = RecurrentBase(self.config, model=self.mode, name='forward',\n                                         prefix='enc', use_contxt=self.config['enc_use_contxt'])\n            self.Bakward = RecurrentBase(self.config, model=self.mode, name='backward',\n                                         prefix='enc', use_contxt=self.config['enc_use_contxt'])\n\n            self._add(self.Forward)\n            self._add(self.Bakward)\n        else:\n            self.Recurrence = RecurrentBase(self.config, model=self.mode, name='encoder',\n                                            prefix='enc', use_contxt=self.config['enc_use_contxt'])\n            self._add(self.Recurrence)\n\n        # there is no readout layers for encoder.\n\n    def build_encoder(self, source, context=None):\n        \"\"\"\n        Build the Encoder Computational Graph\n        \"\"\"\n        if self.mode == 'RNN':\n            # we use a Recurrent Neural Network Encoder (GRU)\n            if not self.config['bidirectional']:\n                X, X_mask = self.Embed(source, True)\n                info      = self.Recurrence.get_context(context)\n                X_out = self.Recurrence.loop(X, X_mask, info, return_sequence=False)\n            else:\n                source_back = source[:, ::-1]\n                X1, X1_mask = self.Embed(source, True)\n                X2, X2_mask = self.Embed(source_back, True)\n\n                info        = self.Forward.get_context(context)\n                X_out1      = self.Forward.loop(X1, X1_mask, info, return_sequence=False)\n                info        = self.Bakward.get_context(context)\n                X_out2      = self.Bakward.loop(X2, X2_mask, info, return_sequence=False)\n                # X_out       = T.concatenate([X_out1, X_out2], axis=1)\n                X_out       = 0.5 * X_out1 + 0.5 * X_out2\n        elif self.mode == 'NTM':\n            if not self.config['bidirectional']:\n                X, X_mask = self.Embed(source, True)\n            else:\n                source_back = source[:, ::-1]\n                X1, X1_mask = self.Embed(source, True)\n                X2, X2_mask = self.Embed(source_back, True)\n                X           = T.concatenate([X1, X2], axis=1)\n                X_mask      = T.concatenate([X1_mask, X2_mask], axis=1)\n\n            info  = self.Recurrence.get_context(context)\n            # X_out here is the extracted memorybook. which can be used as a the initial memory of NTM Decoder.\n            X_out = self.Recurrence.loop(X, X_mask, info, return_sequence=False, return_full=True)[0]\n        else:\n            raise NotImplementedError\n\n        self._monitoring()\n        return X_out\n\n\nclass Decoder(Model):\n    \"\"\"\n    Recurrent Neural Network-based Decoder.\n    It is used for:\n        (1) Evaluation: compute the probability P(Y|X)\n        (2) Prediction: sample the best result based on P(Y|X)\n        (3) Beam-search\n        (4) Scheduled Sampling (how to implement it?)\n    \"\"\"\n\n    def __init__(self,\n                 config, rng, prefix='dec',\n                 mode='RNN', embed=None):\n        \"\"\"\n        mode = RNN: use a RNN Decoder\n        mode = NTM: use a NTM Decoder (Neural Turing Machine)\n        \"\"\"\n        super(Decoder, self).__init__()\n        self.config = config\n        self.rng    = rng\n        self.prefix = prefix\n        self.name   = prefix\n        self.mode   = mode\n\n        \"\"\"\n        Create all elements of the Decoder's computational graph.\n        \"\"\"\n        # create Embedding layers\n        logger.info(\"{}_create embedding layers.\".format(self.prefix))\n        if embed:\n            self.Embed = embed\n        else:\n            self.Embed = Embedding(\n                self.config['dec_voc_size'],\n                self.config['dec_embedd_dim'],\n                name=\"{}_embed\".format(self.prefix))\n            self._add(self.Embed)\n\n        # create Recurrent Base.\n        logger.info(\"{}_create Recurrent layers.\".format(self.prefix))\n        self.Recurrence = RecurrentBase(self.config, model=self.mode, name='decoder',\n                                        prefix='dec', use_contxt=self.config['dec_use_contxt'])\n\n        # create readout layers\n        logger.info(\"_create Readout layers\")\n\n        # 1. hidden layers readout.\n        self.hidden_readout = Dense(\n            self.config['dec_hidden_dim'],\n            self.config['output_dim']\n            if self.config['deep_out']\n            else self.config['dec_voc_size'],\n            activation='linear',\n            name=\"{}_hidden_readout\".format(self.prefix)\n        )\n\n        # 2. previous word readout\n        self.prev_word_readout = None\n        if self.config['bigram_predict']:\n            self.prev_word_readout = Dense(\n                self.config['dec_embedd_dim'],\n                self.config['output_dim']\n                if self.config['deep_out']\n                else self.config['dec_voc_size'],\n                activation='linear',\n                name=\"{}_prev_word_readout\".format(self.prefix),\n                learn_bias=False\n            )\n\n        # 3. context readout\n        self.context_readout = None\n        if self.config['context_predict']:\n            self.context_readout = Dense(\n                self.config['dec_contxt_dim'],\n                self.config['output_dim']\n                if self.config['deep_out']\n                else self.config['dec_voc_size'],\n                activation='linear',\n                name=\"{}_context_readout\".format(self.prefix),\n                learn_bias=False\n            )\n\n        # option: deep output (maxout)\n        if self.config['deep_out']:\n            self.activ = Activation(config['deep_out_activ'])\n            # self.dropout = Dropout(rng=self.rng, p=config['dropout'])\n            self.output_nonlinear = [self.activ]  # , self.dropout]\n            self.output = Dense(\n                self.config['output_dim'] / 2\n                if config['deep_out_activ'] == 'maxout2'\n                else self.config['output_dim'],\n\n                self.config['dec_voc_size'],\n                activation='softmax',\n                name=\"{}_output\".format(self.prefix),\n                learn_bias=False\n            )\n        else:\n            self.output_nonlinear = []\n            self.output = Activation('softmax')\n\n        # registration:\n        self._add(self.Recurrence)\n        self._add(self.hidden_readout)\n        self._add(self.context_readout)\n        self._add(self.prev_word_readout)\n        self._add(self.output)\n\n        if self.config['deep_out']:\n            self._add(self.activ)\n        # self._add(self.dropout)\n\n        logger.info(\"create decoder ok.\")\n\n    @staticmethod\n    def _grab_prob(probs, X):\n        assert probs.ndim == 3\n\n        batch_size = probs.shape[0]\n        max_len = probs.shape[1]\n        vocab_size = probs.shape[2]\n\n        probs = probs.reshape((batch_size * max_len, vocab_size))\n        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing\n\n    \"\"\"\n    Build the decoder for evaluation\n    \"\"\"\n    def prepare_xy(self, target):\n        # Word embedding\n        Y, Y_mask = self.Embed(target, True)  # (nb_samples, max_len, embedding_dim)\n\n        if self.config['use_input']:\n            X = T.concatenate([alloc_zeros_matrix(Y.shape[0], 1, Y.shape[2]), Y[:, :-1, :]], axis=1)\n        else:\n            X = 0 * Y\n\n        # option ## drop words.\n\n        X_mask    = T.concatenate([T.ones((Y.shape[0], 1)), Y_mask[:, :-1]], axis=1)\n        Count     = T.cast(T.sum(X_mask, axis=1), dtype=theano.config.floatX)\n        return X, X_mask, Y, Y_mask, Count\n\n    def build_decoder(self, target, context=None, return_count=False):\n        \"\"\"\n        Build the Decoder Computational Graph\n        \"\"\"\n        X, X_mask, Y, Y_mask, Count = self.prepare_xy(target)\n        info  = self.Recurrence.get_context(context)\n        X_out = self.Recurrence.loop(X, X_mask, info=info, return_sequence=True)\n\n        # Readout\n        readout = self.hidden_readout(X_out)\n\n        if self.config['context_predict']:\n            # warning: only supports RNN, cannot supports Memory\n            readout += self.context_readout(context).dimshuffle(0, 'x', 1) \\\n\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        prob_dist = self.output(readout)  # (nb_samples, max_len, vocab_size)\n\n        # log_old  = T.sum(T.log(self._grab_prob(prob_dist, target)), axis=1)\n        log_prob = T.sum(T.log(self._grab_prob(prob_dist, target)) * X_mask, axis=1)\n        log_ppl  = log_prob / Count\n\n        self._monitoring()\n\n        if return_count:\n            return log_prob, Count\n        else:\n            return log_prob, log_ppl\n\n    \"\"\"\n    Sampling Functions.\n    \"\"\"\n    def _step_embed(self, prev_word):\n        # word embedding (note that for the first word, embedding should be all zero)\n        if self.config['use_input']:\n            X = T.switch(\n                prev_word[:, None] < 0,\n                alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim']),\n                self.Embed(prev_word)\n            )\n        else:\n            X = alloc_zeros_matrix(prev_word.shape[0], self.config['dec_embedd_dim'])\n\n        return X\n\n    def _step_sample(self, X, next_stat, context):\n        # compute the readout probability distribution and sample it\n        # here the readout is a matrix, different from the learner.\n        readout = self.hidden_readout(next_stat)\n\n        if context.ndim == 2 and self.config['context_predict']:\n            # warning: only supports RNN, cannot supports Memory\n            readout += self.context_readout(context)\n\n        if self.config['bigram_predict']:\n            readout += self.prev_word_readout(X)\n\n        for l in self.output_nonlinear:\n            readout = l(readout)\n\n        next_prob = self.output(readout)\n        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)\n        return next_prob, next_sample\n\n    \"\"\"\n    Build the sampler for sampling/greedy search/beam search\n    \"\"\"\n\n    def build_sampler(self):\n        \"\"\"\n        Build a sampler which only steps once.\n        Typically it only works for one word a time?\n        \"\"\"\n        prev_word = T.vector('prev_word', dtype='int64')\n        prev_X    = self._step_embed(prev_word)\n        self.prev_embed = theano.function([prev_word], prev_X)\n\n        self.Recurrence.build_()\n\n        prev_X    = T.matrix('prev_X', dtype='float32')\n        next_stat = T.matrix('next_state', dtype='float32')\n        logger.info('compile the function: sample_next')\n\n        if self.config['mode'] == 'RNN':\n            context   = T.matrix('context')\n        else:\n            context   = T.tensor3('memory')\n\n        next_prob, next_sample = self._step_sample(prev_X, next_stat, context)\n        self.sample_next = theano.function([prev_X, next_stat, context],\n                                           [next_prob, next_sample],\n                                           name='sample_next',\n                                           on_unused_input='warn')\n\n        logger.info('done')\n\n    \"\"\"\n    Generate samples, either with stochastic sampling or beam-search!\n    \"\"\"\n\n    def get_sample(self, context, k=1, maxlen=30, stochastic=True, argmax=False):\n        # beam size\n        if k > 1:\n            assert not stochastic, 'Beam search does not support stochastic sampling!!'\n\n        # prepare for searching\n        sample = []\n        score  = []\n        if stochastic:\n            score = 0\n\n        live_k = 1\n        dead_k = 0\n\n        hyp_samples = [[]] * live_k\n        hyp_scores = np.zeros(live_k).astype(theano.config.floatX)\n        hyp_states = []\n        hyp_infos  = []\n\n        # get initial state of decoder Recurrence\n        next_info  = self.Recurrence.get_init(context)\n        # print 'sample with memory:\\t', next_info['M'][0]\n        # next_state = next_info['init_h']\n        next_word  = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)\n        print '<0e~k>'\n        # Start searching!\n        for ii in xrange(maxlen):\n            # print next_word\n            ctx = np.tile(context, [live_k, 1])\n            next_embedding        = self.prev_embed(next_word)\n            next_state, next_info = self.Recurrence.get_next_state(next_embedding, next_info)\n            next_prob, next_word  = self.sample_next(next_embedding, next_state, ctx)  # wtf.\n\n            if stochastic:\n                # using stochastic sampling (or greedy sampling.)\n                if argmax:\n                    nw = next_prob[0].argmax()\n                    next_word[0] = nw\n                else:\n                    nw = next_word[0]\n\n                sample.append(nw)\n                score += next_prob[0, nw]\n\n                if nw == 0:  # sample reached the end\n                    break\n\n            else:\n                # using beam-search\n                # we can only computed in a flatten way!\n                # Recently beam-search does not support NTM !!\n\n                cand_scores = hyp_scores[:, None] - np.log(next_prob)\n                cand_flat = cand_scores.flatten()\n                ranks_flat = cand_flat.argsort()[:(k - dead_k)]\n\n                # fetch the best results.\n                voc_size = next_prob.shape[1]\n                trans_index = ranks_flat / voc_size\n                word_index = ranks_flat % voc_size\n                costs = cand_flat[ranks_flat]\n\n                # get the new hyp samples\n                new_hyp_samples = []\n                new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)\n                new_hyp_states = []\n                new_hyp_infos  = {w: [] for w in next_info}\n\n                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):\n                    new_hyp_samples.append(hyp_samples[ti] + [wi])\n                    new_hyp_scores[idx] = copy.copy(costs[idx])\n                    new_hyp_states.append(copy.copy(next_state[ti]))\n\n                    for w in next_info:\n                        new_hyp_infos[w].append(copy.copy(next_info[w][ti]))\n\n                # check the finished samples\n                new_live_k = 0\n                hyp_samples = []\n                hyp_scores = []\n                hyp_states = []\n                hyp_infos  = {w: [] for w in next_info}\n\n                for idx in xrange(len(new_hyp_samples)):\n                    if new_hyp_states[idx][-1] == 0:\n                        sample.append(new_hyp_samples[idx])\n                        score.append(new_hyp_scores[idx])\n                        dead_k += 1\n                    else:\n                        new_live_k += 1\n                        hyp_samples.append(new_hyp_samples[idx])\n                        hyp_scores.append(new_hyp_scores[idx])\n                        hyp_states.append(new_hyp_states[idx])\n                        for w in next_info:\n                            hyp_infos[w].append(copy.copy(new_hyp_infos[w][ti]))\n\n                hyp_scores = np.array(hyp_scores)\n                live_k = new_live_k\n\n                if new_live_k < 1:\n                    break\n                if dead_k >= k:\n                    break\n\n                next_word = np.array([w[-1] for w in hyp_samples])\n                next_state = np.array(hyp_states)\n                for w in hyp_infos:\n                    next_info[w] = np.array(hyp_infos[w])\n                pass\n            pass\n\n        # end.\n        if not stochastic:\n            # dump every remaining one\n            if live_k > 0:\n                for idx in xrange(live_k):\n                    sample.append(hyp_samples[idx])\n                    score.append(hyp_scores[idx])\n\n        return sample, score\n\n\nclass RNNLM(Model):\n    \"\"\"\n    RNN-LM, with context vector = 0.\n    It is very similar with the implementation of VAE.\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name   = 'rnnlm'\n\n    def build_(self):\n        logger.info(\"build the RNN/NTM-decoder\")\n        self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n\n        # registration:\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get('adadelta')\n\n        # saved the initial memories\n        self.memory    = initializations.get('glorot_uniform')(\n                    (self.config['dec_memory_dim'], self.config['dec_memory_wdth']))\n\n        logger.info(\"create the RECURRENT language model. ok\")\n\n    def compile_(self, mode='train', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            if not contrastive:\n                self.compile_train()\n            else:\n                self.compile_train_CE()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    def compile_train(self):\n\n        # questions (theano variables)\n        inputs  = T.imatrix()  # padded input word sequence (for training)\n        if self.config['mode']   == 'RNN':\n            context = alloc_zeros_matrix(inputs.shape[0], self.config['dec_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # decoding.\n        target  = inputs\n        logPxz, logPPL = self.decoder.build_decoder(target, context)\n\n        # reconstruction loss\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n\n        L1       = T.sum([T.sum(abs(w)) for w in self.params])\n        loss     = loss_rec\n\n        updates = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_rec, loss_ppl],\n                                      updates=updates,\n                                      name='train_fun')\n        logger.info(\"pre-training functions compile done.\")\n\n        # add monitoring:\n        self.monitor['context'] = context\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n\n    def compile_train_CE(self):\n        pass\n\n    def compile_sample(self):\n        # context vectors (as)\n        self.decoder.build_sampler()\n        logger.info(\"display functions compile done.\")\n\n    def compile_inference(self):\n        pass\n\n    def default_context(self):\n        if self.config['mode'] == 'RNN':\n            return np.zeros(shape=(1, self.config['dec_contxt_dim']), dtype=theano.config.floatX)\n        elif self.config['mode'] == 'NTM':\n            memory = self.memory.get_value()\n            memory = memory.reshape((1, memory.shape[0], memory.shape[1]))\n            return memory\n\n    def generate_(self, context=None, mode='display', max_len=None):\n        \"\"\"\n        :param action: action vector to guide the question.\n                       If None, use a Gaussian to simulate the action.\n        :return: question sentence in natural language.\n        \"\"\"\n        # assert self.config['sample_stoch'], 'RNNLM sampling must be stochastic'\n        # assert not self.config['sample_argmax'], 'RNNLM sampling cannot use argmax'\n\n        if context is None:\n            context = self.default_context()\n\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'] if not max_len else max_len,\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None)\n\n        sample, score = self.decoder.get_sample(context, **args)\n        if not args['stochastic']:\n            score = score / np.array([len(s) for s in sample])\n            sample = sample[score.argmin()]\n            score = score.min()\n        else:\n            score /= float(len(sample))\n\n        return sample, np.exp(score)\n\n\nclass Helmholtz(RNNLM):\n    \"\"\"\n    Helmholtz Machine as an probabilistic version AutoEncoder\n    It is very similar with Variational Auto-Encoder\n    We implement the Helmholtz RNN as well as Helmholtz Turing Machine here.\n    Reference:\n        Reweighted Wake-Sleep\n            http://arxiv.org/abs/1406.2751\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='RNN'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name   = 'helmholtz'\n\n    def build_(self):\n        logger.info(\"build the Helmholtz auto-encoder\")\n        if self.mode == 'NTM':\n            assert self.config['enc_memory_dim']  == self.config['dec_memory_dim']\n            assert self.config['enc_memory_wdth'] == self.config['dec_memory_wdth']\n\n        self.encoder = Encoder(self.config, self.rng, prefix='enc', mode=self.mode)\n        if self.config['shared_embed']:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec',\n                                   embed=self.encoder.Embed, mode=self.mode)\n        else:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n\n        # registration\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # The main difference between VAE and HM is that we can use\n        # a more flexible prior instead of Gaussian here.\n        # for example, we use a sigmoid prior here.\n\n        # prior distribution is a bias layer\n        if self.mode == 'RNN':\n            # here we first forcus on Helmholtz Turing Machine\n            # Thus the RNN version will be copied from Dial-DRL projects.\n            raise NotImplementedError\n\n        elif self.mode == 'NTM':\n            self.Prior  = MemoryLinear(\n                self.config['enc_memory_dim'],\n                self.config['enc_memory_wdth'],\n                activation='sigmoid',\n                name='prior_proj',\n                has_input=False\n            )\n\n            self.Post   = MemoryLinear(\n                self.config['enc_memory_dim'],\n                self.config['enc_memory_wdth'],\n                activation='sigmoid',\n                name='post_proj',\n                has_input=True\n            )\n\n            self.Trans  = MemoryLinear(\n                self.config['enc_memory_dim'],\n                self.config['enc_memory_wdth'],\n                activation='linear',\n                name='trans_proj',\n                has_input=True\n            )\n\n            # registration\n            self._add(self.Prior)\n            self._add(self.Post)\n            self._add(self.Trans)\n\n        else:\n            raise NotImplementedError\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get(self.config['optimizer'])\n\n        # saved the initial memories\n        self.memory    = initializations.get('glorot_uniform')(\n                    (self.config['dec_memory_dim'], self.config['dec_memory_wdth']))\n\n        logger.info(\"create Helmholtz Machine. ok\")\n\n    def compile_train(self):\n        # questions (theano variables)\n        inputs         = T.imatrix()  # padded input word sequence (for training)\n        batch_size     = inputs.shape[0]\n        if self.config['mode']   == 'RNN':\n            context    = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context    = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # encoding\n        memorybook     = self.encoder.build_encoder(inputs, context)\n\n        # get Q(a|y) = sigmoid\n        q_dis          = self.Post(memorybook)\n\n        # repeats\n        L              = self.config['repeats']\n        target         = T.repeat(inputs[:, None, :],\n                                  L,\n                                  axis=1).reshape((inputs.shape[0] * L, inputs.shape[1]))\n        q_dis          = T.repeat(q_dis[:, None, :, :],\n                                  L,\n                                  axis=1).reshape((q_dis.shape[0] * L, q_dis.shape[1], q_dis.shape[2]))\n\n        # sample actions\n        u              = self.rng.uniform(q_dis.shape)\n        action         = T.cast(u <= q_dis, dtype=theano.config.floatX)\n\n        # compute the exact probability for actions\n        logQax         = action * T.log(q_dis) + (1 - action) * T.log(1 - q_dis)\n        logQax         = logQax.sum(axis=-1).sum(axis=-1)\n\n        # decoding.\n        memorybook2    = self.Trans(action)\n        logPxa, count  = self.decoder.build_decoder(target, memorybook2, return_count=True)\n\n        # prior.\n        p_dis          = self.Prior()\n        logPa          = action * T.log(p_dis) + (1 - action) * T.log(1 - p_dis)\n        logPa          = logPa.sum(axis=-1).sum(axis=-1)\n\n        \"\"\"\n        Compute the weights\n        \"\"\"\n        # reshape\n        logQax         = logQax.reshape((batch_size, L))\n        logPa          = logPa.reshape((batch_size, L))\n        logPxa         = logPxa.reshape((batch_size, L))\n\n        logPx_a        = logPa + logPxa\n\n        # normalizing the weights\n        log_wk         = logPx_a - logQax\n        log_bpk        = logPa - logQax\n\n        log_w_sum      = logSumExp(log_wk, axis=1)\n        log_bp_sum     = logSumExp(log_bpk, axis=1)\n\n        log_wnk        = log_wk - log_w_sum\n        log_bpnk       = log_bpk - log_bp_sum\n\n        # unbiased log-likelihood estimator\n        logPx          = T.mean(log_w_sum - T.log(L))\n        perplexity     = T.exp(-T.mean((log_w_sum - T.log(L)) / count))\n\n        \"\"\"\n        Compute the Loss function\n        \"\"\"\n        # loss    = weights * log [p(a)p(x|a)/q(a|x)]\n        weights        = T.exp(log_wnk)\n        bp             = T.exp(log_bpnk)\n        bq             = 1. / L\n        ess            = T.mean(1 / T.sum(weights ** 2, axis=1))\n\n        factor         = self.config['factor']\n        if self.config['variant_control']:\n            lossQ   = -T.mean(T.sum(logQax * (weights - bq), axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(logPa  * (weights - bp), axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(logPxa * weights, axis=1))          # log p(x|a)\n            lossP   = lossPxa + lossPa\n\n            updates = self.optimizer.get_updates(self.params, [lossP + factor * lossQ, weights, bp])\n        else:\n            lossQ   = -T.mean(T.sum(logQax * weights, axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(logPa  * weights, axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(logPxa * weights, axis=1))   # log p(x|a)\n            lossP   = lossPxa + lossPa\n\n            updates = self.optimizer.get_updates(self.params, [lossP + factor * lossQ, weights])\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_    = theano.function(train_inputs,\n                                         [lossPa, lossPxa, lossQ, perplexity, ess],\n                                         updates=updates,\n                                         name='train_fun')\n\n        logger.info(\"pre-training functions compile done.\")\n\n    def compile_sample(self):\n        # # for Typical Auto-encoder, only conditional generation is useful.\n        # inputs        = T.imatrix()  # padded input word sequence (for training)\n        # if self.config['mode']   == 'RNN':\n        #     context   = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        # elif self.config['mode'] == 'NTM':\n        #     context   = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        # else:\n        #     raise NotImplementedError\n        # pass\n\n        # sample the memorybook\n        p_dis         = self.Prior()\n        l             = T.iscalar()\n        u             = self.rng.uniform((l, p_dis.shape[-2], p_dis.shape[-1]))\n        binarybook    = T.cast(u <= p_dis, dtype=theano.config.floatX)\n        memorybook    = self.Trans(binarybook)\n\n        self.take     = theano.function([l], [binarybook, memorybook], name='take_action')\n\n        # compile the sampler.\n        self.decoder.build_sampler()\n        logger.info('sampler function compile done.')\n\n    def compile_inference(self):\n        \"\"\"\n        build the hidden action prediction.\n        \"\"\"\n        inputs         = T.imatrix()  # padded input word sequence (for training)\n\n        if self.config['mode']   == 'RNN':\n            context    = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context    = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # encoding\n        memorybook     = self.encoder.build_encoder(inputs, context)\n\n        # get Q(a|y) = sigmoid(.|Posterior * encoded)\n        q_dis          = self.Post(memorybook)\n        p_dis          = self.Prior()\n\n        self.inference_ = theano.function([inputs], [memorybook, q_dis, p_dis])\n        logger.info(\"inference function compile done.\")\n\n    def default_context(self):\n        return self.take(1)[-1]\n\n\n\nclass BinaryHelmholtz(RNNLM):\n    \"\"\"\n    Helmholtz Machine as an probabilistic version AutoEncoder\n    It is very similar with Variational Auto-Encoder\n    We implement the Helmholtz RNN as well as Helmholtz Turing Machine here.\n    Reference:\n        Reweighted Wake-Sleep\n            http://arxiv.org/abs/1406.2751\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='RNN'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name   = 'helmholtz'\n\n    def build_(self):\n        logger.info(\"build the Binary-Helmholtz auto-encoder\")\n        if self.mode == 'NTM':\n            assert self.config['enc_memory_dim']  == self.config['dec_memory_dim']\n            assert self.config['enc_memory_wdth'] == self.config['dec_memory_wdth']\n\n        self.encoder = Encoder(self.config, self.rng, prefix='enc', mode=self.mode)\n        if self.config['shared_embed']:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec',\n                                   embed=self.encoder.Embed, mode=self.mode)\n        else:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n\n        # registration\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # The main difference between VAE and HM is that we can use\n        # a more flexible prior instead of Gaussian here.\n        # for example, we use a sigmoid prior here.\n\n        # prior distribution is a bias layer\n        if self.mode == 'RNN':\n            # here we first forcus on Helmholtz Turing Machine\n            # Thus the RNN version will be copied from Dial-DRL projects.\n            raise NotImplementedError\n\n        elif self.mode == 'NTM':\n            self.Prior  = MemoryLinear(\n                self.config['enc_memory_dim'],\n                self.config['enc_memory_wdth'],\n                activation='sigmoid',\n                name='prior_proj',\n                has_input=False\n            )\n\n            # registration\n            self._add(self.Prior)\n        else:\n            raise NotImplementedError\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get(self.config['optimizer'])\n\n        # saved the initial memories\n        self.memory    = T.nnet.sigmoid(initializations.get('glorot_uniform')(\n                    (self.config['dec_memory_dim'], self.config['dec_memory_wdth'])))\n\n        logger.info(\"create Helmholtz Machine. ok\")\n\n    def compile_train(self):\n        # questions (theano variables)\n        inputs         = T.imatrix()  # padded input word sequence (for training)\n        batch_size     = inputs.shape[0]\n        if self.config['mode']   == 'RNN':\n            context    = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context    = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # encoding\n        memorybook     = self.encoder.build_encoder(inputs, context)\n\n        # get Q(a|y) = sigmoid\n        q_dis          = memorybook\n\n        # repeats\n        L              = self.config['repeats']\n        target         = T.repeat(inputs[:, None, :],\n                                  L,\n                                  axis=1).reshape((inputs.shape[0] * L, inputs.shape[1]))\n        q_dis          = T.repeat(q_dis[:, None, :, :],\n                                  L,\n                                  axis=1).reshape((q_dis.shape[0] * L, q_dis.shape[1], q_dis.shape[2]))\n\n        # sample actions\n        u              = self.rng.uniform(q_dis.shape)\n        action         = T.cast(u <= q_dis, dtype=theano.config.floatX)\n\n        # compute the exact probability for actions\n        logQax         = action * T.log(q_dis) + (1 - action) * T.log(1 - q_dis)\n        logQax         = logQax.sum(axis=-1).sum(axis=-1)\n\n        # decoding.\n        memorybook2    = action\n        logPxa, count  = self.decoder.build_decoder(target, memorybook2, return_count=True)\n\n        # prior.\n        p_dis          = self.Prior()\n        logPa          = action * T.log(p_dis) + (1 - action) * T.log(1 - p_dis)\n        logPa          = logPa.sum(axis=-1).sum(axis=-1)\n\n        \"\"\"\n        Compute the weights\n        \"\"\"\n        # reshape\n        logQax         = logQax.reshape((batch_size, L))\n        logPa          = logPa.reshape((batch_size, L))\n        logPxa         = logPxa.reshape((batch_size, L))\n\n        logPx_a        = logPa + logPxa\n\n        # normalizing the weights\n        log_wk         = logPx_a - logQax\n        log_bpk        = logPa - logQax\n\n        log_w_sum      = logSumExp(log_wk, axis=1)\n        log_bp_sum     = logSumExp(log_bpk, axis=1)\n\n        log_wnk        = log_wk - log_w_sum\n        log_bpnk       = log_bpk - log_bp_sum\n\n        # unbiased log-likelihood estimator\n        logPx          = T.mean(log_w_sum - T.log(L))\n        perplexity     = T.exp(-T.mean((log_w_sum - T.log(L)) / count))\n\n        \"\"\"\n        Compute the Loss function\n        \"\"\"\n        # loss    = weights * log [p(a)p(x|a)/q(a|x)]\n        weights        = T.exp(log_wnk)\n        bp             = T.exp(log_bpnk)\n        bq             = 1. / L\n        ess            = T.mean(1 / T.sum(weights ** 2, axis=1))\n\n        factor         = self.config['factor']\n        if self.config['variant_control']:\n            lossQ   = -T.mean(T.sum(logQax * (weights - bq), axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(logPa  * (weights - bp), axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(logPxa * weights, axis=1))          # log p(x|a)\n            lossP   = lossPxa + lossPa\n\n            updates = self.optimizer.get_updates(self.params, [lossP + factor * lossQ, weights, bp])\n        else:\n            lossQ   = -T.mean(T.sum(logQax * weights, axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(logPa  * weights, axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(logPxa * weights, axis=1))   # log p(x|a)\n            lossP   = lossPxa + lossPa\n\n            updates = self.optimizer.get_updates(self.params, [lossP + factor * lossQ, weights])\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_    = theano.function(train_inputs,\n                                         [lossPa, lossPxa, lossQ, perplexity, ess],\n                                         updates=updates,\n                                         name='train_fun')\n\n        logger.info(\"pre-training functions compile done.\")\n\n    def compile_sample(self):\n        # # for Typical Auto-encoder, only conditional generation is useful.\n        # inputs        = T.imatrix()  # padded input word sequence (for training)\n        # if self.config['mode']   == 'RNN':\n        #     context   = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        # elif self.config['mode'] == 'NTM':\n        #     context   = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        # else:\n        #     raise NotImplementedError\n        # pass\n\n        # sample the memorybook\n        p_dis         = self.Prior()\n        l             = T.iscalar()\n        u             = self.rng.uniform((l, p_dis.shape[-2], p_dis.shape[-1]))\n        binarybook    = T.cast(u <= p_dis, dtype=theano.config.floatX)\n\n        self.take     = theano.function([l], binarybook, name='take_action')\n\n        # compile the sampler.\n        self.decoder.build_sampler()\n        logger.info('sampler function compile done.')\n\n    def compile_inference(self):\n        \"\"\"\n        build the hidden action prediction.\n        \"\"\"\n        inputs         = T.imatrix()  # padded input word sequence (for training)\n\n        if self.config['mode']   == 'RNN':\n            context    = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context    = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # encoding\n        memorybook     = self.encoder.build_encoder(inputs, context)\n\n        # get Q(a|y) = sigmoid(.|Posterior * encoded)\n        q_dis          = memorybook\n        p_dis          = self.Prior()\n\n        self.inference_ = theano.function([inputs], [memorybook, q_dis, p_dis])\n        logger.info(\"inference function compile done.\")\n\n    def default_context(self):\n        return self.take(1)\n\n\n\nclass AutoEncoder(RNNLM):\n    \"\"\"\n    Regular Auto-Encoder: RNN Encoder/Decoder\n    Regular Neural Turing Machine\n    \"\"\"\n\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name   = 'autoencoder'\n\n    def build_(self):\n        logger.info(\"build the RNN/NTM auto-encoder\")\n        self.encoder = Encoder(self.config, self.rng, prefix='enc', mode=self.mode)\n        if self.config['shared_embed']:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec',\n                                   embed=self.encoder.Embed, mode=self.mode)\n        else:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', mode=self.mode)\n\n\n        # registration\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get(self.config['optimizer'])\n\n        # saved the initial memories\n        self.memory    = initializations.get('glorot_uniform')(\n                    (self.config['dec_memory_dim'], self.config['dec_memory_wdth']))\n\n        logger.info(\"create Autoencoder Network. ok\")\n\n    def compile_train(self, mode='train'):\n        # questions (theano variables)\n        inputs      = T.imatrix()  # padded input word sequence (for training)\n        if self.config['mode']   == 'RNN':\n            context    = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context    = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n\n        # encoding\n        memorybook     = self.encoder.build_encoder(inputs, context)\n\n        # decoding.\n        target         = inputs\n        logPxz, logPPL = self.decoder.build_decoder(target, memorybook)\n\n        # reconstruction loss\n        loss_rec       = T.mean(-logPxz)\n        loss_ppl       = T.exp(T.mean(-logPPL))\n\n        loss           = loss_rec\n        updates        = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs   = [inputs]\n\n        self.train_    = theano.function(train_inputs,\n                                         [loss_rec, loss_ppl],\n                                         updates=updates,\n                                         name='train_fun')\n        self.test      = theano.function(train_inputs,\n                                         [loss_rec, loss_ppl],\n                                         name='test_fun')\n        logger.info(\"pre-training functions compile done.\")\n\n    def compile_sample(self):\n        # for Typical Auto-encoder, only conditional generation is useful.\n        inputs        = T.imatrix()  # padded input word sequence (for training)\n        if self.config['mode']   == 'RNN':\n            context   = alloc_zeros_matrix(inputs.shape[0], self.config['enc_contxt_dim'])\n        elif self.config['mode'] == 'NTM':\n            context   = T.repeat(self.memory[None, :, :], inputs.shape[0], axis=0)\n        else:\n            raise NotImplementedError\n        pass\n\n        # encoding\n        memorybook    = self.encoder.build_encoder(inputs, context)\n        self.memorize = theano.function([inputs], memorybook, name='memorize')\n\n        # compile the sampler.\n        self.decoder.build_sampler()\n        logger.info('sampler function compile done.')\n"
  },
  {
    "path": "emolga/models/pointers.py",
    "content": "__author__ = 'jiataogu'\nimport theano\nimport logging\nimport copy\n\nfrom emolga.layers.recurrent import *\nfrom emolga.layers.ntm_minibatch import Controller\nfrom emolga.layers.embeddings import *\nfrom emolga.layers.attention import *\nfrom emolga.layers.highwayNet import *\nfrom emolga.models.encdec import *\nfrom core import Model\n\n# theano.config.exception_verbosity = 'high'\nlogger = logging          #.getLogger(__name__)\nRNN    = GRU              # change it here for other RNN models.\n\n\nclass PtrDecoder(Model):\n    \"\"\"\n    RNN-Decoder for Pointer Networks\n    \"\"\"\n    def __init__(self,\n                 config, rng, prefix='ptrdec'):\n        super(PtrDecoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n\n        \"\"\"\n        Create all elements of the Decoder's computational graph.\n        \"\"\"\n        # create Initialization Layers\n        logger.info(\"{}_create initialization layers.\".format(self.prefix))\n        self.Initializer = Dense(\n            config['ptr_contxt_dim'],\n            config['ptr_hidden_dim'],\n            activation='tanh',\n            name=\"{}_init\".format(self.prefix)\n        )\n\n        # create RNN cells\n        logger.info(\"{}_create RNN cells.\".format(self.prefix))\n        self.RNN = RNN(\n            self.config['ptr_embedd_dim'],\n            self.config['ptr_hidden_dim'],\n            self.config['ptr_contxt_dim'],\n            name=\"{}_cell\".format(self.prefix)\n        )\n        self._add(self.Initializer)\n        self._add(self.RNN)\n\n        # create readout layers\n        logger.info(\"_create Attention-Readout layers\")\n        self.attender = Attention(\n            self.config['ptr_hidden_dim'],\n            self.config['ptr_source_dim'],\n            self.config['ptr_middle_dim'],\n            name='{}_attender'.format(self.prefix)\n        )\n        self._add(self.attender)\n\n    @staticmethod\n    def grab_prob(probs, X):\n        assert probs.ndim == 3\n\n        batch_size = probs.shape[0]\n        max_len = probs.shape[1]\n        vocab_size = probs.shape[2]\n\n        probs = probs.reshape((batch_size * max_len, vocab_size))\n        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing\n\n    @staticmethod\n    def grab_source(source, target):\n        # source : (nb_samples, source_num, source_dim)\n        # target : (nb_samples, target_num)\n        assert source.ndim == 3\n\n        batch_size = source.shape[0]\n        source_num = source.shape[1]\n        source_dim = source.shape[2]\n        target_num = target.shape[1]\n\n        source_flt = source.reshape((batch_size * source_num, source_dim))\n        target_idx = (target + (T.arange(batch_size) * source_num)[:, None]).reshape((batch_size * target_num,))\n\n        value      = source_flt[target_idx].reshape((batch_size, target_num, source_dim))\n        return value\n\n    def build_decoder(self,\n                      inputs,\n                      source, target,\n                      smask=None, tmask=None, context=None):\n        \"\"\"\n        Build the Pointer Network Decoder Computational Graph\n        \"\"\"\n        # inputs : (nb_samples, source_num, ptr_embedd_dim)\n        # source : (nb_samples, source_num, source_dim)\n        # smask  : (nb_samples, source_num)\n        # target : (nb_samples, target_num)\n        # tmask  : (nb_samples, target_num)\n        # context: (nb_sample, context_dim)\n\n        # initialized hidden state.\n        assert context is not None\n        Init_h = self.Initializer(context)\n\n        # target is the source inputs.\n        X      = self.grab_source(inputs, target)  # (nb_samples, target_num, source_dim)\n        X      = T.concatenate([alloc_zeros_matrix(X.shape[0], 1, X.shape[2]),\n                                X[:, :-1, :]], axis=1)\n\n        X      = X.dimshuffle((1, 0, 2))\n        # tmask  = tmask.dimshuffle((1, 0))\n\n        # eat by recurrent net\n        def _recurrence(x, prev_h, c, s, s_mask):\n            # RNN read-out\n            x_out  = self.RNN(x, mask=None, C=c, init_h=prev_h, one_step=True)\n            s_out  = self.attender(x_out, s, s_mask, return_log=True)\n            return x_out, s_out\n\n        outputs, _ = theano.scan(\n            _recurrence,\n            sequences=[X],\n            outputs_info=[Init_h, None],\n            non_sequences=[context, source, smask]\n        )\n\n        log_prob_dist = outputs[-1].dimshuffle((1, 0, 2))\n        # tmask         = tmask.dimshuffle((1, 0))\n        log_prob      = T.sum(self.grab_prob(log_prob_dist, target) * tmask, axis=1)\n        return log_prob\n\n    \"\"\"\n    Sample one step\n    \"\"\"\n    def _step_sample(self, prev_idx, prev_stat,\n                     context, inputs, source, smask):\n        X = T.switch(\n                prev_idx[:, None] < 0,\n                alloc_zeros_matrix(prev_idx.shape[0], self.config['ptr_embedd_dim']),\n                self.grab_source(inputs, prev_idx[:, None])\n                )\n\n        # one step RNN\n        X_out = self.RNN(X, C=context, init_h=prev_stat, one_step=True)\n        next_stat = X_out\n\n        # compute the attention read-out\n        next_prob = self.attender(X_out, source, smask)\n        next_sample = self.rng.multinomial(pvals=next_prob).argmax(1)\n        return next_prob, next_sample, next_stat\n\n    def build_sampler(self):\n        \"\"\"\n        Build a sampler which only steps once.\n        \"\"\"\n        logger.info(\"build sampler ...\")\n        if self.config['sample_stoch'] and self.config['sample_argmax']:\n            logger.info(\"use argmax search!\")\n        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):\n            logger.info(\"use stochastic sampling!\")\n        elif self.config['sample_beam'] > 1:\n            logger.info(\"use beam search! (beam_size={})\".format(self.config['sample_beam']))\n\n        # initial state of our Decoder.\n        context = T.matrix()       # theano variable.\n        init_h  = self.Initializer(context)\n        logger.info('compile the function: get_init_state')\n        self.get_init_state \\\n            = theano.function([context], init_h, name='get_init_state')\n        logger.info('done.')\n\n        # sampler: 1 x 1\n        prev_idx  = T.vector('prev_idx', dtype='int64')\n        prev_stat = T.matrix('prev_state', dtype='float32')\n        inputs    = T.tensor3()\n        source    = T.tensor3()\n        smask     = T.imatrix()\n\n        next_prob, next_sample, next_stat \\\n            = self._step_sample(prev_idx, prev_stat, context,\n                                inputs, source, smask)\n\n        # next word probability\n        logger.info('compile the function: sample_next')\n        inputs = [prev_idx, prev_stat, context, inputs, source, smask]\n        outputs = [next_prob, next_sample, next_stat]\n        self.sample_next = theano.function(inputs, outputs, name='sample_next')\n        logger.info('done')\n        pass\n\n    \"\"\"\n    Generate samples, either with stochastic sampling or beam-search!\n    \"\"\"\n\n    def get_sample(self, context, inputs, source, smask,\n                   k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):\n        # beam size\n        if k > 1:\n            assert not stochastic, 'Beam search does not support stochastic sampling!!'\n\n        # fix length cannot use beam search\n        # if fixlen:\n        #     assert k == 1\n\n        # prepare for searching\n        sample = []\n        score = []\n        if stochastic:\n            score = 0\n\n        live_k = 1\n        dead_k = 0\n\n        hyp_samples = [[]] * live_k\n        hyp_scores = np.zeros(live_k).astype(theano.config.floatX)\n        hyp_states = []\n\n        # get initial state of decoder RNN with context\n        next_state = self.get_init_state(context)\n        next_word = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)\n\n        # Start searching!\n        for ii in xrange(maxlen):\n            # print next_word\n            ctx = np.tile(context, [live_k, 1])\n            ipt = np.tile(inputs,  [live_k, 1, 1])\n            sor = np.tile(source,  [live_k, 1, 1])\n            smk = np.tile(smask,   [live_k, 1])\n\n            next_prob, next_word, next_state \\\n                = self.sample_next(next_word, next_state,\n                                   ctx, ipt, sor, smk)  # wtf.\n\n            if stochastic:\n                # using stochastic sampling (or greedy sampling.)\n                if argmax:\n                    nw = next_prob[0].argmax()\n                    next_word[0] = nw\n                else:\n                    nw = next_word[0]\n\n                sample.append(nw)\n                score += next_prob[0, nw]\n\n                if (not fixlen) and (nw == 0):  # sample reached the end\n                    break\n\n            else:\n                # using beam-search\n                # we can only computed in a flatten way!\n                cand_scores = hyp_scores[:, None] - np.log(next_prob)\n                cand_flat = cand_scores.flatten()\n                ranks_flat = cand_flat.argsort()[:(k - dead_k)]\n\n                # fetch the best results.\n                voc_size = next_prob.shape[1]\n                trans_index = ranks_flat / voc_size\n                word_index = ranks_flat % voc_size\n                costs = cand_flat[ranks_flat]\n\n                # get the new hyp samples\n                new_hyp_samples = []\n                new_hyp_scores = np.zeros(k - dead_k).astype(theano.config.floatX)\n                new_hyp_states = []\n\n                for idx, [ti, wi] in enumerate(zip(trans_index, word_index)):\n                    new_hyp_samples.append(hyp_samples[ti] + [wi])\n                    new_hyp_scores[idx] = copy.copy(costs[idx])\n                    new_hyp_states.append(copy.copy(next_state[ti]))\n\n                # check the finished samples\n                new_live_k = 0\n                hyp_samples = []\n                hyp_scores = []\n                hyp_states = []\n\n                for idx in xrange(len(new_hyp_samples)):\n                    if (new_hyp_states[idx][-1] == 0) and (not fixlen):\n                        sample.append(new_hyp_samples[idx])\n                        score.append(new_hyp_scores[idx])\n                        dead_k += 1\n                    else:\n                        new_live_k += 1\n                        hyp_samples.append(new_hyp_samples[idx])\n                        hyp_scores.append(new_hyp_scores[idx])\n                        hyp_states.append(new_hyp_states[idx])\n\n                hyp_scores = np.array(hyp_scores)\n                live_k = new_live_k\n\n                if new_live_k < 1:\n                    break\n                if dead_k >= k:\n                    break\n\n                next_word = np.array([w[-1] for w in hyp_samples])\n                next_state = np.array(hyp_states)\n                pass\n            pass\n\n        # end.\n        if not stochastic:\n            # dump every remaining one\n            if live_k > 0:\n                for idx in xrange(live_k):\n                    sample.append(hyp_samples[idx])\n                    score.append(hyp_scores[idx])\n\n        return sample, score\n\n\nclass PointerDecoder(Model):\n    \"\"\"\n    RNN-Decoder for Pointer Networks [version 2]\n    Pointer to 2 place once a time.\n    \"\"\"\n    def __init__(self,\n                 config, rng, prefix='ptrdec'):\n        super(PointerDecoder, self).__init__()\n        self.config = config\n        self.rng = rng\n        self.prefix = prefix\n\n        \"\"\"\n        Create all elements of the Decoder's computational graph.\n        \"\"\"\n        # create Initialization Layers\n        logger.info(\"{}_create initialization layers.\".format(self.prefix))\n        self.Initializer = Dense(\n            config['ptr_contxt_dim'],\n            config['ptr_hidden_dim'],\n            activation='tanh',\n            name=\"{}_init\".format(self.prefix)\n        )\n\n        # create RNN cells\n        logger.info(\"{}_create RNN cells.\".format(self.prefix))\n        self.RNN = RNN(\n            self.config['ptr_embedd_dim'],\n            self.config['ptr_hidden_dim'],\n            self.config['ptr_contxt_dim'],\n            name=\"{}_cell\".format(self.prefix)\n        )\n        self._add(self.Initializer)\n        self._add(self.RNN)\n\n        # create 2 attention heads\n        logger.info(\"_create Attention-Readout layers\")\n        self.att_head = Attention(\n            self.config['ptr_hidden_dim'],\n            self.config['ptr_source_dim'],\n            self.config['ptr_middle_dim'],\n            name='{}_head_attender'.format(self.prefix)\n        )\n        self.att_tail = Attention(\n            self.config['ptr_hidden_dim'],\n            self.config['ptr_source_dim'],\n            self.config['ptr_middle_dim'],\n            name='{}_tail_attender'.format(self.prefix)\n        )\n\n        self._add(self.att_head)\n        self._add(self.att_tail)\n\n    @staticmethod\n    def grab_prob(probs, X):\n        assert probs.ndim == 3\n\n        batch_size = probs.shape[0]\n        max_len = probs.shape[1]\n        vocab_size = probs.shape[2]\n\n        probs = probs.reshape((batch_size * max_len, vocab_size))\n        return probs[T.arange(batch_size * max_len), X.flatten(1)].reshape(X.shape)  # advanced indexing\n\n    @staticmethod\n    def grab_source(source, target):\n        # source : (nb_samples, source_num, source_dim)\n        # target : (nb_samples, target_num)\n        assert source.ndim == 3\n\n        batch_size = source.shape[0]\n        source_num = source.shape[1]\n        source_dim = source.shape[2]\n        target_num = target.shape[1]\n\n        source_flt = source.reshape((batch_size * source_num, source_dim))\n        target_idx = (target + (T.arange(batch_size) * source_num)[:, None]).reshape((batch_size * target_num,))\n\n        value      = source_flt[target_idx].reshape((batch_size, target_num, source_dim))\n        return value\n\n    def build_decoder(self,\n                      inputs,\n                      source, target,\n                      smask=None, tmask=None, context=None):\n        \"\"\"\n        Build the Pointer Network Decoder Computational Graph\n        \"\"\"\n        # inputs : (nb_samples, source_num, ptr_embedd_dim)\n        # source : (nb_samples, source_num, source_dim)\n        # smask  : (nb_samples, source_num)\n        # target : (nb_samples, target_num)\n        # tmask  : (nb_samples, target_num)\n        # context: (nb_sample, context_dim)\n\n        # initialized hidden state.\n        assert context is not None\n        Init_h = self.Initializer(context)\n\n        # target is the source inputs.\n        X      = self.grab_source(inputs, target)  # (nb_samples, target_num, source_dim)\n\n        nb_dim = X.shape[0]\n        tg_num = X.shape[1]\n        sc_dim = X.shape[2]\n\n        # since it changes to two pointers once a time:\n        # concatenate + reshape\n        def _get_ht(A, mask=False):\n            if A.ndim == 2:\n                B = A[:, -1:]\n                if mask:\n                    B *= 0.\n                A = T.concatenate([A, B], axis=1)\n                return A[:, ::2], A[:, 1::2]\n            else:\n                B = A[:, -1:, :]\n                print B.ndim\n                if mask:\n                    B *= 0.\n                A = T.concatenate([A, B], axis=1)\n                return A[:, ::2, :], A[:, 1::2, :]\n\n        Xh, Xt = _get_ht(X)\n        Th, Tt = _get_ht(target)\n        Mh, Mt = _get_ht(tmask, mask=True)\n\n        Xa     = Xh + Xt\n        Xa     = T.concatenate([alloc_zeros_matrix(nb_dim, 1, sc_dim),\n                                Xa[:, :-1, :, :]], axis=1)\n        Xa     = Xa.dimshuffle((1, 0, 2))\n\n        # eat by recurrent net\n        def _recurrence(x, prev_h, c, s, s_mask):\n            # RNN read-out\n            x_out  = self.RNN(x, mask=None, C=c, init_h=prev_h, one_step=True)\n            h_out  = self.att_head(x_out, s, s_mask, return_log=True)\n            t_out  = self.att_tail(x_out, s, s_mask, return_log=True)\n\n            return x_out, h_out, t_out\n\n        outputs, _ = theano.scan(\n            _recurrence,\n            sequences=[Xa],\n            outputs_info=[Init_h, None, None],\n            non_sequences=[context, source, smask]\n        )\n        log_prob_head = outputs[1].dimshuffle((1, 0, 2))\n        log_prob_tail = outputs[2].dimshuffle((1, 0, 2))\n\n        log_prob      = T.sum(self.grab_prob(log_prob_head, Th) * Mh, axis=1) \\\n                      + T.sum(self.grab_prob(log_prob_tail, Tt) * Mt, axis=1)\n        return log_prob\n\n    \"\"\"\n    Sample one step\n    \"\"\"\n    def _step_sample(self,\n                     prev_idx_h, prev_idx_t,\n                     prev_stat,\n                     context, inputs, source, smask):\n        X = T.switch(\n                prev_idx_h[:, None] < 0,\n                alloc_zeros_matrix(prev_idx_h.shape[0], self.config['ptr_embedd_dim']),\n                self.grab_source(inputs, prev_idx_h[:, None]) + self.grab_source(inputs, prev_idx_t[:, None])\n                )\n\n        # one step RNN\n        X_out = self.RNN(X, C=context, init_h=prev_stat, one_step=True)\n        next_stat = X_out\n\n        # compute the attention read-out\n        next_prob_h = self.att_head(X_out, source, smask)\n        next_sample_h = self.rng.multinomial(pvals=next_prob_h).argmax(1)\n\n        next_prob_t = self.att_tail(X_out, source, smask)\n        next_sample_t = self.rng.multinomial(pvals=next_prob_t).argmax(1)\n        return next_prob_h, next_sample_h, next_prob_t, next_sample_t, next_stat\n\n    def build_sampler(self):\n        \"\"\"\n        Build a sampler which only steps once.\n        \"\"\"\n        logger.info(\"build sampler ...\")\n        if self.config['sample_stoch'] and self.config['sample_argmax']:\n            logger.info(\"use argmax search!\")\n        elif self.config['sample_stoch'] and (not self.config['sample_argmax']):\n            logger.info(\"use stochastic sampling!\")\n        elif self.config['sample_beam'] > 1:\n            logger.info(\"use beam search! (beam_size={})\".format(self.config['sample_beam']))\n\n        # initial state of our Decoder.\n        context = T.matrix()       # theano variable.\n        init_h  = self.Initializer(context)\n        logger.info('compile the function: get_init_state')\n        self.get_init_state \\\n            = theano.function([context], init_h, name='get_init_state')\n        logger.info('done.')\n\n        # sampler: 1 x 1\n        prev_idxh = T.vector('prev_idxh', dtype='int64')\n        prev_idxt = T.vector('prev_idxt', dtype='int64')\n\n        prev_stat = T.matrix('prev_state', dtype='float32')\n        inputs    = T.tensor3()\n        source    = T.tensor3()\n        smask     = T.imatrix()\n\n        next_prob_h, next_sample_h, next_prob_t, next_sample_t, next_stat \\\n            = self._step_sample(prev_idxh, prev_idxt, prev_stat, context,\n                                inputs, source, smask)\n\n        # next word probability\n        logger.info('compile the function: sample_next')\n        inputs = [prev_idxh, prev_idxt, prev_stat, context, inputs, source, smask]\n        outputs = [next_prob_h, next_sample_h, next_prob_t, next_sample_t, next_stat]\n        self.sample_next = theano.function(inputs, outputs, name='sample_next')\n        logger.info('done')\n        pass\n\n    \"\"\"\n    Generate samples, either with stochastic sampling or beam-search!\n    \"\"\"\n\n    def get_sample(self, context, inputs, source, smask,\n                   k=1, maxlen=30, stochastic=True, argmax=False, fixlen=False):\n        # beam size\n        if k > 1:\n            assert not stochastic, 'Beam search does not support stochastic sampling!!'\n\n        # fix length cannot use beam search\n        # if fixlen:\n        #     assert k == 1\n\n        # prepare for searching\n        sample = []\n        score = []\n        if stochastic:\n            score = 0\n\n        live_k = 1\n        dead_k = 0\n\n        hyp_samples = [[]] * live_k\n        hyp_scores = np.zeros(live_k).astype(theano.config.floatX)\n        hyp_states = []\n\n        # get initial state of decoder RNN with context\n        next_state = self.get_init_state(context)\n\n        next_wordh = -1 * np.ones((1,)).astype('int64')  # indicator for the first target word (bos target)\n        next_wordt = -1 * np.ones((1,)).astype('int64')\n\n        # Start searching!\n        for ii in xrange(maxlen):\n            # print next_word\n            ctx = np.tile(context, [live_k, 1])\n            ipt = np.tile(inputs,  [live_k, 1, 1])\n            sor = np.tile(source,  [live_k, 1, 1])\n            smk = np.tile(smask,   [live_k, 1])\n\n            next_probh, next_wordh, next_probt, next_wordt, next_state \\\n                = self.sample_next(next_wordh, next_wordt, next_state,\n                                   ctx, ipt, sor, smk)  # wtf.\n\n            if stochastic:\n                # using stochastic sampling (or greedy sampling.)\n                if argmax:\n                    nw = next_probh[0].argmax()\n                    next_wordh[0] = nw\n                else:\n                    nw = next_wordh[0]\n\n                sample.append(nw)\n                score += next_probh[0, nw]\n\n                if (not fixlen) and (nw == 0):  # sample reached the end\n                    break\n\n                if argmax:\n                    nw = next_probt[0].argmax()\n                    next_wordt[0] = nw\n                else:\n                    nw = next_wordt[0]\n\n                sample.append(nw)\n                score += next_probt[0, nw]\n\n                if (not fixlen) and (nw == 0):  # sample reached the end\n                    break\n\n            else:\n                # using beam-search\n                # I don't know how to apply 2 point beam-search\n                # we can only computed in a flatten way!\n                assert True, 'In this stage, I do not know how to use Beam-search for this problem.'\n\n        return sample, score\n\n\nclass MemNet(Model):\n    \"\"\"\n    Memory Networks:\n        ==> Assign a Matrix to store rules\n    \"\"\"\n    def __init__(self,\n                 config, rng, learn_memory=False,\n                 prefix='mem'):\n        super(MemNet, self).__init__()\n        self.config = config\n        self.rng    = rng    # Theano random stream\n        self.prefix = prefix\n        self.init = initializations.get('glorot_uniform')\n\n        if learn_memory:\n            self.memory = self.init((self.config['mem_size'], self.config['mem_source_dim']))\n            self.memory.name = '{}_inner_memory'.format(self.prefix)\n            self.params += [self.memory]\n        \"\"\"\n        Create the read-head of the MemoryNets\n        \"\"\"\n        if self.config['mem_type'] == 'dnn':\n            self.attender = Attention(\n                config['mem_hidden_dim'],\n                config['mem_source_dim'],\n                config['mem_middle_dim'],\n                name='{}_attender'.format(self.prefix)\n            )\n        else:\n            self.attender = CosineAttention(\n                config['mem_hidden_dim'],\n                config['mem_source_dim'],\n                use_pipe=config['mem_use_pipe'],\n                name='{}_attender'.format(self.prefix)\n            )\n        self._add(self.attender)\n\n    def __call__(self, key, memory=None, mem_mask=None, out_memory=None):\n        # key:    (nb_samples, mem_hidden_dim)\n        # memory: (nb_samples, mem_size, mem_source_dim)\n        nb_samples = key.shape[0]\n        if not memory:\n            memory   = T.repeat(self.memory[None, :, :], nb_samples, axis=0)\n            mem_mask = None\n\n        if memory.ndim == 2:\n            memory   = T.repeat(memory[None, :, :], nb_samples, axis=0)\n\n        probout     = self.attender(key, memory, mem_mask)  # (nb_samples, mem_size)\n        if self.config['mem_att_drop'] > 0:\n            probout = T.clip(probout - self.config['mem_att_drop'], 0, 1)\n\n        if out_memory is None:\n            readout    = T.sum(memory * probout[:, :, None], axis=1)\n        else:\n            readout    = T.sum(out_memory * probout[:, :, None], axis=1)\n        return readout, probout\n\n\nclass PtrNet(Model):\n    \"\"\"\n    Pointer Networks [with/without] External Rule Memory\n    \"\"\"\n    def __init__(self, config, n_rng, rng,\n                 name='PtrNet', w_mem=True):\n        super(PtrNet, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.name   = name\n        self.w_mem  = w_mem\n\n    def build_(self, encoder=None):\n        logger.info(\"build the Pointer Networks\")\n\n        # encoder\n        if not encoder:\n            self.encoder = Encoder(self.config, self.rng, prefix='enc1')\n            self._add(self.encoder)\n        else:\n            self.encoder = encoder\n\n        if self.config['mem_output_mem']:\n            self.encoder_out = Encoder(self.config, self.rng, prefix='enc_out')\n            self._add(self.encoder_out)\n\n        # twice encoding\n        if self.config['ptr_twice_enc']:\n            self.encoder2 = Encoder(self.config, self.rng, prefix='enc2', use_context=True)\n            self._add(self.encoder2)\n\n        # pointer decoder\n        self.ptrdec  = PtrDecoder(self.config, self.rng)  # PtrDecoder(self.config, self.rng)\n        self._add(self.ptrdec)\n\n        # memory grabber\n        self.grabber = MemNet(self.config, self.rng)\n        self._add(self.grabber)\n\n        # memory predictor :: alternative ::\n        if self.config['use_predict']:\n            logger.info('create a predictor AS Long Term Memory.s')\n            if self.config['pred_type'] == 'highway':\n                self.predictor = HighwayNet(self.config['mem_hidden_dim'],\n                                            self.config['pred_depth'],\n                                            activation='relu',\n                                            name='phw')\n            elif self.config['pred_type'] == 'dense':\n                self.predictor = Dense(self.config['mem_hidden_dim'],\n                                       self.config['mem_hidden_dim'],\n                                       name='pdnn')\n            elif self.config['pred_type'] == 'encoder':\n                config = self.config\n                # config['enc_embedd_dim'] = 300\n                # config['enc_hidden_dim'] = 300\n                self.predictor = Encoder(config, self.rng, prefix='enc3', use_context=False)\n            else:\n                NotImplementedError\n            self._add(self.predictor)\n\n        # objectives and optimizers\n        assert self.config['optimizer'] == 'adam'\n        self.optimizer = optimizers.get(self.config['optimizer'],\n                                        kwargs=dict(rng=self.rng,\n                                                    save=self.config['save_updates']))\n\n    def build_train(self, memory=None, out_memory=None, compile_train=False, guide=None):\n        # training function for Pointer Networks\n        indices  = T.imatrix()  # padded word indices (for training)\n        target   = T.imatrix()  # target indices (leading to relative locations)\n        tmask    = T.imatrix()  # target masks\n        pmask    = T.cast(1 - T.eq(target[:, 0], 0), dtype='float32')\n\n        assert memory is not None, 'we must have an input memory'\n        if self.config['mem_output_mem']:\n            assert out_memory is not None,  'we must have an output memory'\n\n        # L1 of memory\n        loss_mem  = T.sum(abs(T.mean(memory, axis=0)))\n\n        # encoding\n        if not self.config['ptr_twice_enc']:\n            source, inputs, smask, tail = self.encoder.build_encoder(indices, None, return_embed=True, return_sequence=True)\n\n            # grab memory\n            readout, probout = self.grabber(tail, memory)\n\n            if not self.config['use_tail']:\n                tailx = tail * 0.0\n            else:\n                tailx = tail\n\n            if not self.config['use_memory']:\n                readout *= 0.0\n\n            # concatenate\n            context  = T.concatenate([tailx, readout], axis=1)\n\n            # if predict ?\n            # predictor: minimize || readout - predict ||^2\n            if self.config['use_predict']:\n                if self.config['pred_type'] == 'encoder':\n                    predict = self.predictor.build_encoder(indices, None, return_sequence=False)\n                else:\n                    predict = self.predictor(tail)\n\n                # reconstruction loss [note that we only compute loss for correct memory read.]\n                loss_r   = 0.5 * T.sum(pmask * T.sum(T.sqr(predict - readout), axis=-1).reshape(pmask.shape)) / T.sum(pmask)\n\n                # use predicted readout to compute loss\n                contextz = T.concatenate([tailx, predict], axis=1)\n                sourcez, inputsz, smaskz = source, inputs, smask\n        else:\n            tail = self.encoder.build_encoder(indices, None, return_sequence=False)\n\n            # grab memory\n            readout, probout = self.grabber(tail, memory, out_memory)\n\n            # get PrtNet input\n            if not self.config['use_tail']:\n                tailx = tail * 0.0\n            else:\n                tailx = tail\n\n            if not self.config['use_memory']:\n                readout *= 0.0\n\n            # concatenate\n            context0  = T.concatenate([tailx, readout], axis=1)\n\n            # twice encoding ?\n            source, inputs, smask, context = self.encoder2.build_encoder(\n                indices, context=context0, return_embed=True, return_sequence=True)\n\n            # if predict ?\n            # predictor: minimize | readout - predict ||^2\n            if self.config['use_predict']:\n                if self.config['pred_type'] == 'encoder':\n                    predict = self.predictor.build_encoder(indices, None, return_sequence=False)\n                else:\n                    predict = self.predictor(tail)\n\n                # reconstruction loss [note that we only compute loss for correct memory read.]\n                loss_r   = 0.5 * T.sum(pmask * T.sum(T.sqr(predict - readout), axis=-1).reshape(pmask.shape)) / T.sum(pmask)\n                dist     = T.sum(T.sum(T.sqr(tail - readout), axis=-1).reshape(pmask.shape) * pmask) / T.sum(pmask)\n                # use predicted readout to compute loss\n                context1 = T.concatenate([tailx, predict], axis=1)\n\n                # twice encoding..\n                sourcez, inputsz, smaskz, contextz = self.encoder2.build_encoder(\n                indices, context=context1, return_embed=True, return_sequence=True)\n\n        # pointer decoder & loss\n        logProb  = self.ptrdec.build_decoder(inputs, source, target,\n                                             smask, tmask, context)\n        loss     = T.mean(-logProb)\n\n        # if predict?\n        if self.config['use_predict']:\n            logProbz = self.ptrdec.build_decoder(\n                    inputsz, sourcez, target, smaskz, tmask, contextz)\n            loss_z   = -T.sum(pmask * logProbz.reshape(pmask.shape)) / T.sum(pmask)\n\n        # if guidance ?\n        if guide:\n            # attention loss\n            # >>>>>>>   BE CAUTION !!!  <<<<<<\n            # guide vector may contains '-1' which needs a mask for that.\n            mask   = T.ones_like(guide) * (1 - T.eq(guide, -1))\n            loss_g = T.mean(\n                        -T.sum(\n                            T.log(PtrDecoder.grab_prob(probout[:, None, :], guide)),\n                        axis=1).reshape(mask.shape) * mask\n                    )\n\n            # attention accuracy\n            attend = probout.argmax(axis=1, keepdims=True)\n            maxp   = T.sum(probout.max(axis=1).reshape(mask.shape) * mask) / T.cast(T.sum(mask), 'float32')\n            error  = T.sum((abs(attend - guide) * mask) > 0) / T.cast(T.sum(mask), 'float32')\n\n            if self.config['mem_learn_guide']:\n                loss  += loss_g\n\n            # loss += 0.1 * loss_mem\n\n        if compile_train:\n            train_inputs = [indices, target, tmask, memory]\n            if guide:\n                train_inputs += [guide]\n            logger.info(\"compiling the compuational graph ::training function::\")\n            updates  = self.optimizer.get_updates(self.params, loss)\n            self.train_ = theano.function(train_inputs, loss, updates=updates, name='train_sub')\n            logger.info(\"training functions compile done.\")\n\n        # output the building results for Training\n        outputs  = [loss]\n        if guide:\n            outputs += [maxp, error]\n        outputs += [indices, target, tmask]\n        if self.config['use_predict']:\n            outputs += [loss_r, loss_z, dist, readout]\n\n        return outputs\n\n    def build_sampler(self, memory=None, out_mem=None):\n        # training function for Pointer Networks\n        indices  = T.imatrix()  # padded word indices (for training)\n\n        # encoding\n        if not self.config['ptr_twice_enc']:\n            # encoding\n            source, inputs, smask, tail = self.encoder.build_encoder(indices, None, return_embed=True, return_sequence=True)\n\n            # grab memory\n            readout, probout = self.grabber(tail, memory, out_mem)\n\n            if not self.config['use_tail']:\n                tail *= 0.0\n\n            if not self.config['use_memory']:\n                readout *= 0.0\n\n            # concatenate\n            context  = T.concatenate([tail, readout], axis=1)\n        else:\n            tail = self.encoder.build_encoder(indices, None, return_sequence=False)\n\n            # grab memory\n            readout, probout = self.grabber(tail, memory, out_mem)\n            if not self.config['use_tail']:\n                tail *= 0.0\n\n            if not self.config['use_memory']:\n                readout *= 0.0\n\n            # concatenate\n            context0  = T.concatenate([tail, readout], axis=1)\n\n            # twice encoding ?\n            source, inputs, smask, context = self.encoder2.build_encoder(\n                indices, context=context0, return_embed=True, return_sequence=True)\n\n        # monitoring\n        self.monitor['attention_prob'] = probout\n        self._monitoring()\n\n        return context, source, smask, inputs, indices\n\n    def build_predict_sampler(self):\n        # training function for Pointer Networks\n        indices  = T.imatrix()  # padded word indices (for training)\n        flag     = True\n\n        # encoding\n        if not self.config['ptr_twice_enc']:\n            # encoding\n            source, inputs, smask, tail = self.encoder.build_encoder(indices, None, return_embed=True, return_sequence=True)\n\n            # predict memory\n            if self.config['pred_type'] == 'encoder':\n                readout = self.predictor.build_encoder(indices, None, return_sequence=False)\n            else:\n                readout = self.predictor(tail)\n\n            if not self.config['use_tail']:\n                tail *= 0.0\n\n            if not self.config['use_memory']:\n                readout *= 0.0\n\n            # concatenate\n            context  = T.concatenate([tail, readout], axis=1)\n        else:\n            tail = self.encoder.build_encoder(indices, None, return_sequence=False)\n\n            # predict memory\n            if self.config['pred_type'] == 'encoder':\n                readout = self.predictor.build_encoder(indices, None, return_sequence=False)\n            else:\n                readout = self.predictor(tail)\n\n            if not self.config['use_tail']:\n                tail *= 0.0\n\n            if not self.config['use_memory']:\n                readout *= 0.0\n\n            # concatenate\n            context0  = T.concatenate([tail, readout], axis=1)\n\n            # twice encoding ?\n            source, inputs, smask, context = self.encoder2.build_encoder(\n                indices, context=context0, return_embed=True, return_sequence=True)\n\n        return context, source, smask, inputs, indices\n\n    def generate_(self, inputs, context, source, smask):\n        args = dict(k=4, maxlen=5, stochastic=False, argmax=False)\n        sample, score = self.ptrdec.get_sample(context, inputs, source, smask,\n                                               **args)\n        if not args['stochastic']:\n            score = score / np.array([len(s) for s in sample])\n            sample = sample[score.argmin()]\n            score = score.min()\n        else:\n            score /= float(len(sample))\n\n        return sample, np.exp(score)"
  },
  {
    "path": "emolga/models/variational.py",
    "content": "__author__ = 'jiataogu'\nimport theano\n# theano.config.exception_verbosity = 'high'\nimport logging\n\nimport emolga.basic.objectives as objectives\nimport emolga.basic.optimizers as optimizers\nfrom emolga.layers.recurrent import *\nfrom emolga.layers.embeddings import *\nfrom emolga.models.encdec import RNNLM, Encoder, Decoder\nfrom emolga.models.sandbox import SkipDecoder\n\n\nlogger = logging\nRNN = JZS3  # change it here for other RNN models.\n# Decoder = SkipDecoder\n\n\nclass VAE(RNNLM):\n    \"\"\"\n    Variational Auto-Encoder: RNN-Variational Encoder/Decoder,\n    in order to model the sentence generation.\n\n    We implement the original VAE and a better version, IWAE.\n    References:\n        Auto-Encoding Variational Bayes\n            http://arxiv.org/abs/1312.6114\n\n        Importance Weighted Autoencoders\n            http://arxiv.org/abs/1509.00519\n    \"\"\"\n\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode='Evaluation'):\n        super(RNNLM, self).__init__()\n\n        self.config = config\n        self.n_rng  = n_rng  # numpy random stream\n        self.rng    = rng  # Theano random stream\n        self.mode   = mode\n        self.name   = 'vae'\n        self.tparams= dict()\n\n    def _add_tag(self, layer, tag):\n        if tag not in self.tparams:\n            self.tparams[tag] = []\n\n        if layer:\n            self.tparams[tag] += layer.params\n\n    def build_(self):\n        logger.info(\"build the variational auto-encoder\")\n        self.encoder = Encoder(self.config, self.rng, prefix='enc')\n        if self.config['shared_embed']:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', embed=self.encoder.Embed)\n        else:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec')\n\n        # additional parameters for building Gaussian:\n        logger.info(\"create Gaussian layers.\")\n\n        \"\"\"\n        Build the Gaussian distribution.\n        \"\"\"\n        self.action_activ = activations.get('tanh')\n        self.context_mean = Dense(\n            self.config['enc_hidden_dim'] * 2\n            if self.config['bidirectional']\n            else self.config['enc_hidden_dim'],\n\n            self.config['action_dim'],\n            activation='linear',\n            name=\"weight_mean\"\n        )\n\n        self.context_std = Dense(\n            self.config['enc_hidden_dim'] * 2\n            if self.config['bidirectional']\n            else self.config['enc_hidden_dim'],\n\n            self.config['action_dim'],\n            activation='linear',\n            name=\"weight_std\"\n        )\n\n        self.context_trans = Dense(\n            self.config['action_dim'],\n            self.config['dec_contxt_dim'],\n            activation='tanh',\n            name=\"transform\"\n        )\n\n        # registration:\n        self._add(self.context_mean)\n        self._add(self.context_std)\n        self._add(self.context_trans)\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # Q-layers:\n        self._add_tag(self.encoder, 'q')\n        self._add_tag(self.context_mean, 'q')\n        self._add_tag(self.context_std, 'q')\n\n        # P-layers:\n        self._add_tag(self.decoder, 'p')\n        self._add_tag(self.context_trans, 'p')\n\n        # objectives and optimizers\n        self.optimizer = optimizers.get(self.config['optimizer'])\n\n        logger.info(\"create variational RECURRENT auto-encoder. ok\")\n\n    def compile_train(self):\n        \"\"\"\n        build the training function here <:::>\n        \"\"\"\n        # questions (theano variables)\n        inputs = T.imatrix()  # padded input word sequence (for training)\n\n        # encoding. (use backward encoding.)\n        encoded = self.encoder.build_encoder(inputs[:, ::-1])\n\n        # gaussian distribution\n        mean = self.context_mean(encoded)\n        ln_var = self.context_std(encoded)\n\n        # [important] use multiple samples.\n        if self.config['repeats'] > 1:\n            L  = self.config['repeats']\n\n            # repeat mean, ln_var and targets.\n            func_r = lambda x: T.extra_ops.repeat(\n                                x[:, None, :], L,\n                                axis=1).reshape((x.shape[0] * L, x.shape[1]))\n            mean, ln_var, target \\\n                   = [func_r(x) for x in [mean, ln_var, inputs]]\n        else:\n            target = inputs\n\n        action  = mean + T.exp(ln_var / 2.) * self.rng.normal(mean.shape)\n        context = self.context_trans(action)\n\n        # decoding.\n        logPxz, logPPL = self.decoder.build_decoder(target, context)\n\n        # loss function for variational auto-encoding\n        # regulation loss + reconstruction loss\n        loss_reg = T.mean(objectives.get('GKL')(mean, ln_var))\n        loss_rec = T.mean(-logPxz)\n        loss_ppl = T.exp(T.mean(-logPPL))\n\n        m_mean = T.mean(abs(mean))\n        m_ln_var = T.mean(abs(ln_var))\n        L1       = T.sum([T.sum(abs(w)) for w in self.params])\n\n        loss = loss_reg + loss_rec\n        updates = self.optimizer.get_updates(self.params, loss)\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs]\n\n        self.train_ = theano.function(train_inputs,\n                                      [loss_reg, loss_rec, L1, m_ln_var],\n                                      updates=updates,\n                                      name='train_fun')\n        # add monitoring:\n        self.monitor['action'] = action\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n        logger.info(\"pre-training functions compile done.\")\n\n    def compile_sample(self):\n        \"\"\"\n        build the sampler function here <:::>\n        \"\"\"\n        # context vectors (as)\n        self.decoder.build_sampler()\n\n        l = T.iscalar()\n        logger.info(\"compiling the computational graph :: action sampler\")\n        self.action_sampler = theano.function([l], self.rng.normal((l, self.config['action_dim'])))\n\n        action = T.matrix()\n        logger.info(\"compiling the compuational graph ::transform function::\")\n        self.transform = theano.function([action], self.context_trans(action))\n        logger.info(\"display functions compile done.\")\n\n    def compile_inference(self):\n        \"\"\"\n        build the hidden action prediction.\n        \"\"\"\n        inputs = T.imatrix()  # padded input word sequence (for training)\n\n        # encoding. (use backward encoding.)\n        encoded = self.encoder.build_encoder(inputs[:, ::-1])\n\n        # gaussian distribution\n        mean    = self.context_mean(encoded)\n        ln_var  = self.context_std(encoded)\n\n        self.inference_ = theano.function([inputs], [encoded, mean, T.sqrt(T.exp(ln_var))])\n        logger.info(\"inference function compile done.\")\n\n    def default_context(self):\n        return self.transform(self.action_sampler(1))\n\n\nclass Helmholtz(VAE):\n    \"\"\"\n    Another alternative I can think about is the Helmholtz Machine\n    It is trained using a Reweighted Wake Sleep Algorithm.\n    Reference:\n        Reweighted Wake-Sleep\n            http://arxiv.org/abs/1406.2751\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode = 'Evaluation',\n                 dynamic_prior=False,\n                 ):\n        super(VAE, self).__init__(config, n_rng, rng)\n\n        # self.config = config\n        # self.n_rng = n_rng  # numpy random stream\n        # self.rng = rng  # Theano random stream\n        self.mode = mode\n        self.name = 'multitask_helmholtz'\n        self.tparams = dict()\n        self.dynamic_prior = dynamic_prior\n\n    def build_(self):\n        logger.info('Build Helmholtz Recurrent Neural Networks')\n        self.encoder = Encoder(self.config, self.rng, prefix='enc')\n        if self.config['shared_embed']:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec', embed=self.encoder.Embed,\n                                   highway=self.config['highway'])\n        else:\n            self.decoder = Decoder(self.config, self.rng, prefix='dec',\n                                   highway=self.config['highway'])\n\n        # The main difference between VAE and HM is that we can use\n        # a more flexible prior instead of Gaussian here.\n        # for example, we use a sigmoid prior here.\n\n        \"\"\"\n        Build the Sigmoid Layers\n        \"\"\"\n        # prior distribution (bias layer)\n        self.Prior    = Constant(\n            self.config['action_dim'],\n            self.config['action_dim'],\n            activation='sigmoid',\n            name='prior_proj'\n        )\n\n        # Fake Posterior (Q-function)\n        self.Posterior = Dense(\n            self.config['enc_hidden_dim'] * 2\n            if self.config['bidirectional']\n            else self.config['enc_hidden_dim'],\n\n            self.config['action_dim'],\n            activation='sigmoid',\n            name = 'posterior_proj'\n        )\n\n        # Action transform to context\n        self.context_trans = Dense(\n            self.config['action_dim'],\n            self.config['dec_contxt_dim'],\n            activation='linear',\n            name=\"transform\"\n        )\n\n        # registration:\n        self._add(self.Posterior)\n        self._add(self.Prior)\n        self._add(self.context_trans)\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # Q-layers:\n        self._add_tag(self.encoder, 'q')\n        self._add_tag(self.Posterior, 'q')\n\n        # P-layers:\n        self._add_tag(self.Prior, 'p')\n        self._add_tag(self.decoder, 'p')\n        self._add_tag(self.context_trans, 'p')\n\n        # objectives and optimizers\n        self.optimizer_p = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n        self.optimizer_q = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n\n        logger.info(\"create Helmholtz RECURRENT neural network. ok\")\n\n    def dynamic(self):\n        self.Prior   = Dense(\n            self.config['state_dim'],\n            self.config['action_dim'],\n            activation='sigmoid',\n            name='prior_proj'\n        )\n\n        self.params = []\n        self.layers = []\n        self.tparams= dict()\n\n        # add layers again!\n        # registration:\n        self._add(self.Posterior)\n        self._add(self.Prior)\n        self._add(self.context_trans)\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # Q-layers:\n        self._add_tag(self.encoder, 'q')\n        self._add_tag(self.Posterior, 'q')\n\n        # P-layers:\n        self._add_tag(self.Prior, 'p')\n        self._add_tag(self.decoder, 'p')\n        self._add_tag(self.context_trans, 'p')\n\n    def compile_(self, mode='train', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            if not contrastive:\n                self.compile_train()\n            else:\n                self.compile_train_CE()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    def compile_train(self):\n        \"\"\"\n        build the training function here <:::>\n        \"\"\"\n        # get input sentence (x)\n        inputs  = T.imatrix()  # padded input word sequence (for training)\n        batch_size = inputs.shape[0]\n\n        \"\"\"\n        The Computational Flow.\n        \"\"\"\n        # encoding. (use backward encoding.)\n        encoded = self.encoder.build_encoder(inputs[:, ::-1])\n\n        # get Q(a|y) = sigmoid(.|Posterior * encoded)\n        q_dis   = self.Posterior(encoded)\n\n        # use multiple samples\n        L  = T.iscalar('repeats') #self.config['repeats']\n\n        def func_r(x):\n            return T.extra_ops.repeat(x[:, None, :], L, axis=1).reshape((-1, x.shape[1]))  # ?\n\n        q_dis, target = [func_r(x) for x in [q_dis, inputs]]\n\n        # sample actions\n        u       = self.rng.uniform(q_dis.shape)\n        action  = T.cast(u <= q_dis, dtype=theano.config.floatX)\n\n        # compute the exact probability for actions\n        logQax  = T.sum(action * T.log(q_dis) + (1 - action) * T.log(1 - q_dis), axis=1)\n\n        # decoding.\n        context = self.context_trans(action)\n        logPxa, count = self.decoder.build_decoder(target, context, return_count=True)\n        logPPL  = logPxa / count\n        # logPxa, logPPL = self.decoder.build_decoder(target, context)\n\n        # prior.\n        p_dis   = self.Prior(action)\n        logPa   = T.sum(action * T.log(p_dis) + (1 - action) * T.log(1 - p_dis), axis=1)\n\n        \"\"\"\n        Compute the weights\n        \"\"\"\n        # reshape\n        logQax  = logQax.reshape((batch_size, L))\n        logPa   = logPa.reshape((batch_size, L))\n        logPxa  = logPxa.reshape((batch_size, L))\n        count   = count.reshape((batch_size, L))[:, :1]\n\n        # P(x, a) = P(a) * P(x|a)\n        logPx_a = logPa + logPxa\n        log_wk  = logPx_a - logQax\n        log_bpk = logPa - logQax\n\n        log_w_sum  = logSumExp(log_wk, axis=1)\n        log_bp_sum = logSumExp(log_bpk, axis=1)\n\n        log_wnk    = log_wk - log_w_sum\n        log_bpnk   = log_bpk - log_bp_sum\n\n        # unbiased log-likelihood estimator\n        # nll   = -T.mean(log_w_sum - T.log(L))\n        nll        = T.mean(-(log_w_sum - T.log(L)))\n        perplexity = T.exp(T.mean(-(log_w_sum - T.log(L)) / count))\n\n        # perplexity = T.exp(-T.mean((log_w_sum - T.log(L)) / count))\n\n        \"\"\"\n        Compute the Loss function\n        \"\"\"\n        # loss    = weights * log [p(a)p(x|a)/q(a|x)]\n        weights = T.exp(log_wnk)\n        bp      = T.exp(log_bpnk)\n        bq      = 1. / L\n        ess     = T.mean(1 / T.sum(weights ** 2, axis=1))\n\n        # monitoring\n        # self.monitor['action'] = action\n        if self.config['variant_control']:\n            lossQ   = -T.mean(T.sum(logQax * (weights - bq), axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(logPa  * (weights - bp), axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(logPxa * weights, axis=1))          # log p(x|a)\n            lossP   = lossPxa + lossPa\n\n            updates_p = self.optimizer_p.get_updates(self.tparams['p'], [lossP, weights, bp])\n            updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, weights])\n        else:\n            lossQ   = -T.mean(T.sum(logQax * weights, axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(logPa  * weights, axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(logPxa * weights, axis=1))   # log p(x|a)\n            lossP   = lossPxa + lossPa\n            # lossRes = -T.mean(T.nnet.relu(T.sum((logPa + logPxa - logPx0) * weights, axis=1)))\n            # lossP   = 0.1 * lossRes + lossP\n\n            updates_p = self.optimizer_p.get_updates(self.tparams['p'], [lossP, weights])\n            updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, weights])\n\n        updates   = updates_p + updates_q\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs = [inputs] + [theano.Param(L, default=10)]\n\n        self.train_ = theano.function(train_inputs,\n                                      [lossPa, lossPxa, lossQ, perplexity, nll],\n                                      updates=updates,\n                                      name='train_fun')\n\n        logger.info(\"compile the computational graph:: >__< :: explore function\")\n        self.explore_ = theano.function(train_inputs,\n                                        [log_wk, count],\n                                        name='explore_fun')\n\n        # add monitoring:\n        # self._monitoring()\n\n        # compiling monitoring\n        # self.compile_monitoring(train_inputs)\n        logger.info(\"pre-training functions compile done.\")\n\n    def build_dynamics(self, states, action, Y):\n        # this funtion is used to compute probabilities for language generation.\n        # compute the probability of action\n        assert self.dynamic_prior, 'only supports dynamic prior'\n        p_dis     = self.Prior(states)\n        logPa     = T.sum(action * T.log(p_dis) + (1 - action) * T.log(1 - p_dis), axis=1)\n        context   = self.context_trans(action)\n        logPxa, count = self.decoder.build_decoder(Y, context, return_count=True)\n        return logPa, logPxa, count\n\n    def compile_sample(self):\n        \"\"\"\n        build the sampler function here <:::>\n        \"\"\"\n        # context vectors (as)\n        self.decoder.build_sampler()\n\n        logger.info(\"compiling the computational graph :: action sampler\")\n        if self.dynamic_prior:\n            states = T.matrix()\n            p_dis  = self.Prior(states)\n            u      = self.rng.uniform(p_dis.shape)\n        else:\n            p_dis  = self.Prior()\n            l      = T.iscalar()\n            u      = self.rng.uniform((l, p_dis.shape[-1]))\n\n        action  = T.cast(u <= p_dis, dtype=theano.config.floatX)\n\n        if self.dynamic_prior:\n            self.action_sampler = theano.function([states], action)\n        else:\n            self.action_sampler = theano.function([l], action)\n\n        # compute the action probability\n        logPa   = T.sum(action * T.log(p_dis) + (1 - action) * T.log(1 - p_dis), axis=1)\n        if self.dynamic_prior:\n            self.action_prob = theano.function([states, action], logPa)\n        else:\n            self.action_prob = theano.function([action], logPa)\n\n        action  = T.matrix()\n        logger.info(\"compiling the computational graph ::transform function::\")\n        self.transform = theano.function([action], self.context_trans(action))\n        logger.info(\"display functions compile done.\")\n\n    def compile_inference(self):\n        \"\"\"\n        build the hidden action prediction.\n        \"\"\"\n        inputs = T.imatrix()  # padded input word sequence (for training)\n\n        # encoding. (use backward encoding.)\n        encoded = self.encoder.build_encoder(inputs[:, ::-1])\n\n        # get Q(a|y) = sigmoid(.|Posterior * encoded)\n        q_dis   = self.Posterior(encoded)\n        p_dis   = self.Prior(inputs)\n\n        self.inference_ = theano.function([inputs], [encoded, q_dis, p_dis])\n        logger.info(\"inference function compile done.\")\n\n    def evaluate_(self, inputs):\n        \"\"\"\n        build the evaluation function for valid/testing\n        Note that we need multiple sampling for this!\n        \"\"\"\n        log_wks = []\n        count   = None\n        N       = self.config['eval_N']\n        L       = self.config['eval_repeats']\n\n        for _ in xrange(N):\n            log_wk, count = self.explore_(inputs, L)\n            log_wks.append(log_wk)\n\n        log_wk     = np.concatenate(log_wks, axis=1)\n        log_wk_sum = logSumExp(log_wk, axis=1, status='numpy')\n\n        nll        = np.mean(-(log_wk_sum - np.log(N * L)))\n        perplexity = np.exp(np.mean(-(log_wk_sum - np.log(N * L)) / count))\n\n        return nll, perplexity\n\n    \"\"\"\n    OLD CODE::  >>> It doesn't work !\n    \"\"\"\n    def compile_train_CE(self):\n        # compile the computation graph (use contrastive noise, for 1 sample here. )\n\n        \"\"\"\n        build the training function here <:::>\n        \"\"\"\n        # get input sentence (x)\n        inputs  = T.imatrix()  # padded input word sequence x (for training)\n        noises  = T.imatrix()  # padded noise word sequence y (it stands for another question.)\n        batch_size = inputs.shape[0]\n\n        \"\"\"\n        The Computational Flow.\n        \"\"\"\n        # encoding. (use backward encoding.)\n        encodex = self.encoder.build_encoder(inputs[:, ::-1])\n        encodey = self.encoder.build_encoder(noises[:, ::-1])\n\n        # get Q(a|y) = sigmoid(.|Posterior * encoded)\n        q_dis_x = self.Posterior(encodex)\n        q_dis_y = self.Posterior(encodey)\n\n        # use multiple samples\n        if self.config['repeats'] > 1:\n            L  = self.config['repeats']\n\n            # repeat mean, ln_var and targets.\n            func_r = lambda x: T.extra_ops.repeat(\n                                x[:, None, :], L,\n                                axis=1).reshape((x.shape[0] * L, x.shape[1]))\n            q_dis_x, q_dis_y, target \\\n                   = [func_r(x) for x in [q_dis_x, q_dis_y, inputs]]\n        else:\n            target = inputs\n            L  = 1\n\n        # sample actions\n        u = self.rng.uniform(q_dis_x.shape)\n        action  = T.cast(u <= q_dis_x, dtype=theano.config.floatX)\n\n        # compute the exact probability for actions (for data distribution)\n        logQax  = T.sum(action * T.log(q_dis_x) + (1 - action) * T.log(1 - q_dis_x), axis=1)\n\n        # compute the exact probability for actions (for noise distribution)\n        logQay  = T.sum(action * T.log(q_dis_y) + (1 - action) * T.log(1 - q_dis_y), axis=1)\n\n        # decoding.\n        context = self.context_trans(action)\n        logPxa, count = self.decoder.build_decoder(target, context, return_count=True)\n\n        # prior.\n        p_dis   = self.Prior(target)\n        logPa   = T.sum(action * T.log(p_dis) + (1 - action) * T.log(1 - p_dis), axis=1)\n\n        \"\"\"\n        Compute the weights\n        \"\"\"\n        # reshape\n        logQax  = logQax.reshape((batch_size, L))\n        logQay  = logQay.reshape((batch_size, L))\n        logPa   = logPa.reshape((batch_size, L))\n        logPxa  = logPxa.reshape((batch_size, L))\n\n        # P(x, a) = P(a) * P(x|a)\n        # logPx_a = logPa + logPxa\n        logPx_a    = logPa + logPxa\n\n        # normalizing the weights\n        log_wk     = logPx_a - logQax\n        log_bpk    = logPa - logQax\n\n        log_w_sum  = logSumExp(log_wk, axis=1)\n        log_bp_sum = logSumExp(log_bpk, axis=1)\n\n        log_wnk    = log_wk - log_w_sum\n        log_bpnk   = log_bpk - log_bp_sum\n\n        # unbiased log-likelihood estimator\n        logPx   = T.mean(log_w_sum - T.log(L))\n        perplexity = T.exp(-T.mean((log_w_sum - T.log(L)) / count))\n\n        \"\"\"\n        Compute the Loss function\n        \"\"\"\n        # loss    = weights * log [p(a)p(x|a)/q(a|x)]\n        weights = T.exp(log_wnk)\n        bp      = T.exp(log_bpnk)\n        bq      = 1. / L\n        ess     = T.mean(1 / T.sum(weights ** 2, axis=1))\n\n        \"\"\"\n        Contrastive Estimation\n        \"\"\"\n        # lossQ   = -T.mean(T.sum(logQax * (weights - bq), axis=1))   # log q(a|x)\n        logC    = logQax - logQay\n        weightC = weights * (1 - T.nnet.sigmoid(logC))\n        lossQ   = -T.mean(T.sum(logC * weightC, axis=1))\n        # lossQT  = -T.mean(T.sum(T.log(T.nnet.sigmoid(logC)) * weights, axis=1))\n\n        # monitoring\n        self.monitor['action'] = logC\n\n        \"\"\"\n        Maximum-likelihood Estimation\n        \"\"\"\n        lossPa  = -T.mean(T.sum(logPa  * (weights - bp), axis=1))   # log p(a)\n        lossPxa = -T.mean(T.sum(logPxa * weights, axis=1))          # log p(x|a)\n        lossP   = lossPxa + lossPa\n        # loss    = lossQT + lossPa + lossPxa\n\n        updates_p = self.optimizer_p.get_updates(self.tparams['p'], [lossP, weights, bp])\n        updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, weightC])\n        updates   = updates_p + updates_q\n\n        logger.info(\"compiling the compuational graph ::training function::\")\n        train_inputs  = [inputs, noises]\n\n        self.train_ce_ = theano.function(train_inputs,\n                                        [lossPa, lossPxa, lossQ, perplexity, ess],\n                                        updates=updates,\n                                        name='train_fun')\n\n        # add monitoring:\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n\n        logger.info(\"pre-training functions compile done.\")\n\n\nclass HarX(Helmholtz):\n    \"\"\"\n    Another alternative I can think about is the Helmholtz Machine\n    It is trained using a Reweighted Wake Sleep Algorithm.\n    Reference:\n        Reweighted Wake-Sleep\n            http://arxiv.org/abs/1406.2751\n\n    We extend the original Helmholtz Machine to a recurrent way.\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode = 'Evaluation',\n                 dynamic_prior=False,\n                 ):\n        super(VAE, self).__init__(config, n_rng, rng)\n\n        # self.config = config\n        # self.n_rng = n_rng  # numpy random stream\n        # self.rng = rng  # Theano random stream\n        self.mode = mode\n        self.name = 'multitask_helmholtz'\n        self.tparams = dict()\n        self.dynamic_prior = dynamic_prior\n\n    def build_(self):\n        logger.info('Build Helmholtz Recurrent Neural Networks')\n\n        # backward encoder\n        self.encoder = Encoder(self.config, self.rng, prefix='enc')\n\n        # feedforward + hidden content decoder\n        self.decoder = Decoder(self.config, self.rng, prefix='dec',\n                                   embed=self.encoder.Embed\n                                   if self.config['shared_embed']\n                                   else None)\n\n        # The main difference between VAE and HM is that we can use\n        # a more flexible prior instead of Gaussian here.\n        # for example, we use a sigmoid prior here.\n\n        \"\"\"\n        Build the Sigmoid Layers\n        \"\"\"\n        # prior distribution (conditional distribution)\n        self.Prior    = Dense(\n            self.config['dec_hidden_dim'],\n            self.config['action_dim'],\n            activation='sigmoid',\n            name='prior_proj'\n        )\n\n        # Fake Posterior (Q-function)\n        if self.config['decposterior']:\n            self.Posterior = Dense2(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                self.config['dec_hidden_dim'],\n                self.config['action_dim'],\n                activation='sigmoid',\n                name='posterior_proj'\n            )\n        else:\n            self.Posterior = Dense(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                self.config['action_dim'],\n                activation='sigmoid',\n                name='posterior_proj'\n            )\n\n        # Action transform to context\n        self.context_trans = Dense(\n            self.config['action_dim'],\n            self.config['dec_contxt_dim'],\n            activation='linear',\n            name=\"transform\"\n        )\n\n        # registration:\n        self._add(self.Posterior)\n        self._add(self.Prior)\n        self._add(self.context_trans)\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # Q-layers:\n        self._add_tag(self.encoder, 'q')\n        self._add_tag(self.Posterior, 'q')\n\n        # P-layers:\n        self._add_tag(self.Prior, 'p')\n        self._add_tag(self.decoder, 'p')\n        self._add_tag(self.context_trans, 'p')\n\n        # objectives and optimizers\n        self.optimizer_p = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n        self.optimizer_q = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n\n        logger.info(\"create Helmholtz RECURRENT neural network. ok\")\n\n    def compile_(self, mode='train', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            self.compile_train()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    \"\"\"\n    Training\n    \"\"\"\n    def compile_train(self):\n        \"\"\"\n        build the training function here <:::>\n        \"\"\"\n        # get input sentence (x)\n        inputs     = T.imatrix()  # padded input word sequence (for training)\n        batch_size = inputs.shape[0]\n\n        logger.info(\n            \"\"\"\n            The Computational Flow. ---> In a recurrent fashion\n\n            [= v =] <:::\n            Inference-Generation in one scan\n\n            >>>> Encoding without hidden variable. (use backward encoding.)\n            \"\"\"\n        )\n        embeded, mask \\\n                   = self.decoder.Embed(inputs, True)  # (nb_samples, max_len, embedding_dim)\n        encoded    = self.encoder.build_encoder(inputs[:, ::-1], return_sequence=True)[:, ::-1, :]\n        count      = T.cast(T.sum(mask, axis=1), dtype=theano.config.floatX)[:, None]  # (nb_samples,)\n\n        logger.info(\n            \"\"\"\n            >>>> Repeat\n            \"\"\"\n        )\n        L          = T.iscalar('repeats')              # self.config['repeats']\n\n        def _repeat(x, dimshuffle=True):\n            if x.ndim == 3:\n                y = T.extra_ops.repeat(x[:, None, :, :], L, axis=1).reshape((-1, x.shape[1], x.shape[2]))\n                if dimshuffle:\n                    y = y.dimshuffle(1, 0, 2)\n            else:\n                y = T.extra_ops.repeat(x[:, None, :], L, axis=1).reshape((-1, x.shape[1]))\n                if dimshuffle:\n                    y = y.dimshuffle(1, 0)\n            return y\n\n        embeded    = _repeat(embeded)                  # (max_len, nb_samples * L, embedding_dim)\n        encoded    = _repeat(encoded)                  # (max_len, nb_samples * L, enc_hidden_dim)\n        target     = _repeat(inputs, False)            # (nb_samples * L, max_len)\n        mask       = _repeat(mask, False)              # (nb_samples * L, max_len)\n        init_dec   = T.zeros((encoded.shape[1],\n                              self.config['dec_hidden_dim']),\n                              dtype='float32')      # zero initialization\n        uniform    = self.rng.uniform((embeded.shape[0],\n                                       embeded.shape[1],\n                                       self.config['action_dim'])) # uniform dirstribution pre-sampled.\n\n        logger.info(\n            \"\"\"\n            >>>> Recurrence\n            \"\"\"\n        )\n\n        def _recurrence(embed_t, enc_t, u_t, dec_tm1):\n            \"\"\"\n            x_t:   (nb_samples, dec_embedd_dim)\n            enc_t: (nb_samples, enc_hidden_dim)\n            dec_t: (nb_samples, dec_hidden_dim)\n            \"\"\"\n            # get q(z_t|dec_t, enc_t);  sample z_t; compute the Posterior (inference) prob.\n            if self.config['decposterior']:\n                q_dis_t   = self.Posterior(enc_t, dec_tm1)\n            else:\n                q_dis_t   = self.Posterior(enc_t)\n\n            z_t       = T.cast(u_t <= q_dis_t, dtype='float32')\n            log_qzx_t = T.sum(z_t * T.log(q_dis_t) + (1 - z_t) * T.log(1 - q_dis_t), axis=1)  # (nb_samples * L, )\n\n            # compute the prior probability\n            p_dis_t   = self.Prior(dec_tm1)\n            log_pz0_t = T.sum(z_t * T.log(p_dis_t) + (1 - z_t) * T.log(1 - p_dis_t), axis=1)\n\n            # compute the decoding probability\n            context_t = self.context_trans(z_t)\n            readout_t = self.decoder.hidden_readout(dec_tm1) + self.decoder.context_readout(context_t)\n            for l in self.decoder.output_nonlinear:\n                readout_t = l(readout_t)\n            pxz_dis_t = self.decoder.output(readout_t)\n\n            # compute recurrence\n            dec_t   = self.decoder.RNN(embed_t, C=context_t, init_h=dec_tm1, one_step=True)\n\n            return dec_t, z_t, log_qzx_t, log_pz0_t, pxz_dis_t\n\n        # (max_len, nb_samples, ?)\n        outputs, _ = theano.scan(\n            _recurrence,\n            sequences=[embeded, encoded, uniform],\n            outputs_info=[init_dec, None, None, None, None])\n        _, z, log_qzx, log_pz0, pxz_dis = outputs\n\n        # summary of scan/ dimshuffle/ reshape\n        def _grab_prob(probs, x):\n            assert probs.ndim == 3\n            b_size     = probs.shape[0]\n            max_len    = probs.shape[1]\n            vocab_size = probs.shape[2]\n            probs      = probs.reshape((b_size * max_len, vocab_size))\n            return probs[T.arange(b_size * max_len), x.flatten(1)].reshape(x.shape)  # advanced indexing\n\n        log_qzx    = T.sum(log_qzx.dimshuffle(1, 0) * mask, axis=-1).reshape((batch_size, L))\n        log_pz0    = T.sum(log_pz0.dimshuffle(1, 0) * mask, axis=-1).reshape((batch_size, L))\n        log_pxz    = T.sum(T.log(_grab_prob(pxz_dis.dimshuffle(1, 0, 2), target)) * mask, axis=-1).reshape((batch_size, L))\n\n        logger.info(\n            \"\"\"\n            >>>> Compute the weights [+ _ =]\n            \"\"\"\n        )\n        log_pxnz   = log_pz0  + log_pxz    # log p(X, Z)\n        log_wk     = log_pxnz - log_qzx    # log[p(X, Z)/q(Z|X)]\n        log_bpk    = log_pz0  - log_qzx    # log[p(Z)/q(Z|X)]\n\n        log_w_sum  = logSumExp(log_wk, axis=1)\n        log_bp_sum = logSumExp(log_bpk, axis=1)\n\n        log_wnk    = log_wk - log_w_sum\n        log_bpnk   = log_bpk - log_bp_sum\n\n        # unbiased log-likelihood estimator [+ _ =]\n        # Finally come to this place\n        nll        = T.mean(-(log_w_sum - T.log(L)))\n        perplexity = T.exp(T.mean(-(log_w_sum - T.log(L)) / count))\n\n        # perplexity = T.exp(-T.mean((log_w_sum - T.log(L)) / count))\n\n        logger.info(\n            \"\"\"\n            >>>> Compute the gradients [+ _ =]\n            \"\"\"\n        )\n        # loss    = weights * log [p(a)p(x|a)/q(a|x)]\n        weights = T.exp(log_wnk)\n        bp      = T.exp(log_bpnk)\n        bq      = 1. / L\n        ess     = T.mean(1 / T.sum(weights ** 2, axis=1))\n\n        # monitoring\n        self.monitor['hidden state'] = z\n        if self.config['variant_control']:\n            lossQ   = -T.mean(T.sum(log_qzx * (weights - bq), axis=1))   # log q(z|x)\n            lossPa  = -T.mean(T.sum(log_pz0 * (weights - bp), axis=1))   # log p(z)\n            lossPxa = -T.mean(T.sum(log_pxz * weights, axis=1))          # log p(x|z)\n            lossP   = lossPxa + lossPa\n\n            # L2 regu\n            lossP  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['p']])\n            lossQ  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['q']])\n\n            updates_p = self.optimizer_p.get_updates(self.tparams['p'], [lossP, weights, bp])\n            updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, weights])\n        else:\n            lossQ   = -T.mean(T.sum(log_qzx * weights, axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(log_pz0 * weights, axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(log_pxz * weights, axis=1))   # log p(x|a)\n            lossP   = lossPxa + lossPa\n\n            # L2 regu\n            print 'L2 ?'\n            lossP  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['p']])\n            lossQ  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['q']])\n\n            updates_p = self.optimizer_p.get_updates(self.tparams['p'], [lossP, weights])\n            updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, weights])\n\n        updates   = updates_p + updates_q\n        logger.info(\"compiling the compuational graph:: >__< ::training function::\")\n        train_inputs = [inputs] + [theano.Param(L, default=10)]\n\n        self.train_ = theano.function(train_inputs,\n                                      [lossPa, lossPxa, lossQ, perplexity, nll],\n                                      updates=updates,\n                                      name='train_fun')\n\n        logger.info(\"compile the computational graph:: >__< :: explore function\")\n        self.explore_ = theano.function(train_inputs,\n                                        [log_wk, count],\n                                        name='explore_fun')\n\n        # add monitoring:\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs)\n        logger.info(\"pre-training functions compile done.\")\n\n    def generate_(self, context=None, max_len=None, mode='display'):\n        # overwrite the RNNLM generator as there are hidden variables every time step\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'] if not max_len else max_len,\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None)\n\n\nclass THarX(Helmholtz):\n    \"\"\"\n    Another alternative I can think about is the Helmholtz Machine\n    It is trained using a Reweighted Wake Sleep Algorithm.\n    Reference:\n        Reweighted Wake-Sleep\n            http://arxiv.org/abs/1406.2751\n\n    We extend the original Helmholtz Machine to a recurrent way.\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode = 'Evaluation',\n                 dynamic_prior=False,\n                 ):\n        super(VAE, self).__init__(config, n_rng, rng)\n\n        # self.config = config\n        # self.n_rng = n_rng  # numpy random stream\n        # self.rng = rng  # Theano random stream\n        self.mode = mode\n        self.name = 'multitask_helmholtz'\n        self.tparams = dict()\n        self.dynamic_prior = dynamic_prior\n\n    def build_(self):\n        logger.info('Build Helmholtz Recurrent Neural Networks')\n\n        # backward encoder\n        self.encoder = Encoder(self.config, self.rng, prefix='enc')\n\n        # feedforward + hidden content decoder\n        self.decoder = Decoder(self.config, self.rng, prefix='dec',\n                                   embed=self.encoder.Embed\n                                   if self.config['shared_embed']\n                                   else None)\n\n        # The main difference between VAE and HM is that we can use\n        # a more flexible prior instead of Gaussian here.\n        # for example, we use a sigmoid prior here.\n\n        \"\"\"\n        Build the Sigmoid Layers\n        \"\"\"\n        # prior distribution (conditional distribution)\n        self.Prior    = Dense(\n            self.config['dec_hidden_dim'],\n            self.config['action_dim'],\n            activation='softmax',\n            name='prior_proj'\n        )\n\n        # Fake Posterior (Q-function)\n        if self.config['decposterior']:\n            self.Posterior = Dense2(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                self.config['dec_hidden_dim'],\n                self.config['action_dim'],\n                activation='softmax',\n                name='posterior_proj'\n            )\n        else:\n            self.Posterior = Dense(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                self.config['action_dim'],\n                activation='softmax',\n                name='posterior_proj'\n            )\n\n        # Action transform to context\n        self.context_trans = Dense(\n            self.config['action_dim'],\n            self.config['dec_contxt_dim'],\n            activation='linear',\n            name=\"transform\"\n        )\n\n        # registration:\n        self._add(self.Posterior)\n        self._add(self.Prior)\n        self._add(self.context_trans)\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # Q-layers:\n        self._add_tag(self.encoder, 'q')\n        self._add_tag(self.Posterior, 'q')\n\n        # P-layers:\n        self._add_tag(self.Prior, 'p')\n        self._add_tag(self.decoder, 'p')\n        self._add_tag(self.context_trans, 'p')\n\n        # objectives and optimizers\n        self.optimizer_p = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n        self.optimizer_q = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n\n        logger.info(\"create Helmholtz RECURRENT neural network. ok\")\n\n    def compile_(self, mode='train', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            self.compile_train()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    \"\"\"\n    Training\n    \"\"\"\n    def compile_train(self):\n        \"\"\"\n        build the training function here <:::>\n        \"\"\"\n        # get input sentence (x)\n        inputs     = T.imatrix('inputs')  # padded input word sequence (for training)\n        batch_size = inputs.shape[0]\n\n        logger.info(\n            \"\"\"\n            The Computational Flow. ---> In a recurrent fashion\n\n            [= v =] <:::\n            Inference-Generation in one scan\n\n            >>>> Encoding without hidden variable. (use backward encoding.)\n            \"\"\"\n        )\n        embeded, mask \\\n                   = self.decoder.Embed(inputs, True)  # (nb_samples, max_len, embedding_dim)\n        encoded    = self.encoder.build_encoder(inputs[:, ::-1], return_sequence=True)[:, ::-1, :]\n        count      = T.cast(T.sum(mask, axis=1), dtype=theano.config.floatX)[:, None]  # (nb_samples,)\n\n        logger.info(\n            \"\"\"\n            >>>> Repeat\n            \"\"\"\n        )\n        L          = T.iscalar('repeats')              # self.config['repeats']\n\n        def _repeat(x, dimshuffle=True):\n            if x.ndim == 3:\n                y = T.extra_ops.repeat(x[:, None, :, :], L, axis=1).reshape((-1, x.shape[1], x.shape[2]))\n                if dimshuffle:\n                    y = y.dimshuffle(1, 0, 2)\n            else:\n                y = T.extra_ops.repeat(x[:, None, :], L, axis=1).reshape((-1, x.shape[1]))\n                if dimshuffle:\n                    y = y.dimshuffle(1, 0)\n            return y\n\n        embeded    = _repeat(embeded)                  # (max_len, nb_samples * L, embedding_dim)\n        encoded    = _repeat(encoded)                  # (max_len, nb_samples * L, enc_hidden_dim)\n        target     = _repeat(inputs, False)            # (nb_samples * L, max_len)\n        mask       = _repeat(mask, False)              # (nb_samples * L, max_len)\n        init_dec   = T.zeros((encoded.shape[1],\n                              self.config['dec_hidden_dim']),\n                              dtype='float32')      # zero initialization\n        # uniform    = self.rng.uniform((embeded.shape[0],\n        #                                embeded.shape[1],\n        #                                self.config['action_dim'])) # uniform dirstribution pre-sampled.\n\n        logger.info(\n            \"\"\"\n            >>>> Recurrence\n            \"\"\"\n        )\n\n        def _recurrence(embed_t, enc_t, dec_tm1):\n            \"\"\"\n            x_t:   (nb_samples, dec_embedd_dim)\n            enc_t: (nb_samples, enc_hidden_dim)\n            dec_t: (nb_samples, dec_hidden_dim)\n            \"\"\"\n            # get q(z_t|dec_t, enc_t);  sample z_t; compute the Posterior (inference) prob.\n            if self.config['decposterior']:\n                q_dis_t   = self.Posterior(enc_t, dec_tm1)\n            else:\n                q_dis_t   = self.Posterior(enc_t)\n\n            z_t       = self.rng.multinomial(pvals=q_dis_t, dtype='float32')\n            log_qzx_t = T.sum(T.log(q_dis_t) * z_t, axis=1)\n            # log_qzx_t = T.log(q_dis_t[T.arange(q_dis_t.shape[0]), z_t])\n\n            # z_t       = T.cast(u_t <= q_dis_t, dtype='float32')\n            # log_qzx_t = T.sum(z_t * T.log(q_dis_t) + (1 - z_t) * T.log(1 - q_dis_t), axis=1)  # (nb_samples * L, )\n\n            # compute the prior probability\n            p_dis_t   = self.Prior(dec_tm1)\n            log_pz0_t = T.sum(T.log(p_dis_t) * z_t, axis=1)\n            # log_pz0_t = T.log(p_dis_t[T.arange(p_dis_t.shape[0]), z_t])\n            # log_pz0_t = T.sum(z_t * T.log(p_dis_t) + (1 - z_t) * T.log(1 - p_dis_t), axis=1)\n\n            # compute the decoding probability\n            context_t = self.context_trans(z_t)\n            readout_t = self.decoder.hidden_readout(dec_tm1) + self.decoder.context_readout(context_t)\n            for l in self.decoder.output_nonlinear:\n                readout_t = l(readout_t)\n            pxz_dis_t = self.decoder.output(readout_t)\n\n            # compute recurrence\n            dec_t   = self.decoder.RNN(embed_t, C=context_t, init_h=dec_tm1, one_step=True)\n\n            return dec_t, z_t, log_qzx_t, log_pz0_t, pxz_dis_t\n\n        # (max_len, nb_samples, ?)\n        outputs, scan_update = theano.scan(\n            _recurrence,\n            sequences=[embeded, encoded],\n            outputs_info=[init_dec, None, None, None, None])\n        _, z, log_qzx, log_pz0, pxz_dis = outputs\n\n        # summary of scan/ dimshuffle/ reshape\n        def _grab_prob(probs, x):\n            assert probs.ndim == 3\n            b_size     = probs.shape[0]\n            max_len    = probs.shape[1]\n            vocab_size = probs.shape[2]\n            probs      = probs.reshape((b_size * max_len, vocab_size))\n            return probs[T.arange(b_size * max_len), x.flatten(1)].reshape(x.shape)  # advanced indexing\n\n        log_qzx    = T.sum(log_qzx.dimshuffle(1, 0) * mask, axis=-1).reshape((batch_size, L))\n        log_pz0    = T.sum(log_pz0.dimshuffle(1, 0) * mask, axis=-1).reshape((batch_size, L))\n        log_pxz    = T.sum(T.log(_grab_prob(pxz_dis.dimshuffle(1, 0, 2), target)) * mask, axis=-1).reshape((batch_size, L))\n\n        logger.info(\n            \"\"\"\n            >>>> Compute the weights [+ _ =]\n            \"\"\"\n        )\n        log_pxnz   = log_pz0  + log_pxz    # log p(X, Z)\n        log_wk     = log_pxnz - log_qzx    # log[p(X, Z)/q(Z|X)]\n        log_bpk    = log_pz0  - log_qzx    # log[p(Z)/q(Z|X)]\n\n        log_w_sum  = logSumExp(log_wk, axis=1)\n        log_bp_sum = logSumExp(log_bpk, axis=1)\n\n        log_wnk    = log_wk - log_w_sum\n        log_bpnk   = log_bpk - log_bp_sum\n\n        # unbiased log-likelihood estimator [+ _ =]\n        # Finally come to this place\n        nll        = T.mean(-(log_w_sum - T.log(L)))\n        perplexity = T.exp(T.mean(-(log_w_sum - T.log(L)) / count))\n\n        # perplexity = T.exp(-T.mean((log_w_sum - T.log(L)) / count))\n\n        logger.info(\n            \"\"\"\n            >>>> Compute the gradients [+ _ =]\n            \"\"\"\n        )\n        # loss    = weights * log [p(a)p(x|a)/q(a|x)]\n        weights = T.exp(log_wnk)\n        bp      = T.exp(log_bpnk)\n        bq      = 1. / L\n        ess     = T.mean(1 / T.sum(weights ** 2, axis=1))\n\n        # monitoring\n        self.monitor['hidden state'] = z\n        if self.config['variant_control']:\n            lossQ   = -T.mean(T.sum(log_qzx * (weights - bq), axis=1))   # log q(z|x)\n            lossPa  = -T.mean(T.sum(log_pz0 * (weights - bp), axis=1))   # log p(z)\n            lossPxa = -T.mean(T.sum(log_pxz * weights, axis=1))          # log p(x|z)\n            lossP   = lossPxa + lossPa\n\n            # L2 regu\n            lossP  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['p']])\n            lossQ  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['q']])\n\n            updates_p = self.optimizer_p.get_updates(self.tparams['p'], [lossP, weights, bp])\n            updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, weights])\n        else:\n            lossQ   = -T.mean(T.sum(log_qzx * weights, axis=1))   # log q(a|x)\n            lossPa  = -T.mean(T.sum(log_pz0 * weights, axis=1))   # log p(a)\n            lossPxa = -T.mean(T.sum(log_pxz * weights, axis=1))   # log p(x|a)\n            lossP   = lossPxa + lossPa\n\n            # L2 regu\n            print 'L2 ?'\n            lossP  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['p']])\n            lossQ  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['q']])\n\n            updates_p = self.optimizer_p.get_updates(self.tparams['p'], [lossP, weights])\n            updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, weights])\n\n        updates   = updates_p + updates_q + scan_update\n        logger.info(\"compiling the compuational graph:: >__< ::training function::\")\n        train_inputs = [inputs] + [theano.Param(L, default=10)]\n\n        self.train_ = theano.function(train_inputs,\n                                      [lossPa, lossPxa, lossQ, perplexity, nll],\n                                      updates=updates,\n                                      name='train_fun')\n\n        logger.info(\"compile the computational graph:: >__< :: explore function\")\n        self.explore_ = theano.function(train_inputs,\n                                        [log_wk, count],\n                                        updates=scan_update,\n                                        name='explore_fun')\n\n        # add monitoring:\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs, updates=scan_update)\n        logger.info(\"pre-training functions compile done.\")\n\n    def generate_(self, context=None, max_len=None, mode='display'):\n        # overwrite the RNNLM generator as there are hidden variables every time step\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'] if not max_len else max_len,\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None)\n\n\nclass NVTM(Helmholtz):\n    \"\"\"\n    Neural Variational Topical Models\n    We use the Neural Variational Inference and Learning (NVIL) to build the\n    learning, instead of using Helmholtz Machine(Reweighted Wake-sleep)\n    \"\"\"\n    def __init__(self,\n                 config, n_rng, rng,\n                 mode = 'Evaluation',\n                 dynamic_prior=False,\n                 ):\n        super(VAE, self).__init__(config, n_rng, rng)\n\n        self.mode = mode\n        self.name = 'neural_variational'\n        self.tparams = dict()\n        self.dynamic_prior = dynamic_prior\n\n    def build_(self):\n        logger.info('Build Helmholtz Recurrent Neural Networks')\n\n        # backward encoder\n        self.encoder = Encoder(self.config, self.rng, prefix='enc')\n\n        # feedforward + hidden content decoder\n        self.decoder = Decoder(self.config, self.rng, prefix='dec',\n                                   embed=self.encoder.Embed\n                                   if self.config['shared_embed']\n                                   else None)\n\n        # The main difference between VAE and NVIL is that we can use\n        # a more flexible prior instead of Gaussian here.\n        # for example, we use a softmax prior here.\n\n        \"\"\"\n        Build the Prior Layer (Conditional Prior)\n        \"\"\"\n        # prior distribution (conditional distribution)\n        self.Prior    = Dense(\n            self.config['dec_hidden_dim'],\n            self.config['action_dim'],\n            activation='softmax',\n            name='prior_proj'\n        )\n\n        if self.config['decposterior']:   # we use both enc/dec net as input.\n\n            # Variational Posterior (Q-function)\n            self.Posterior = Dense2(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                self.config['dec_hidden_dim'],\n                self.config['action_dim'],\n                activation='softmax',\n                name='posterior_proj'\n            )\n\n            # Baseline Estimator\n            self.C_lambda1 = Dense2(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                self.config['dec_hidden_dim'],\n                100,\n                activation='tanh',\n                name='baseline-1')\n            self.C_lambda2 = Dense(100, 1, activation='linear',\n                                   name='baseline-2')\n        else:\n\n            # Variational Posterior\n            self.Posterior = Dense(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                self.config['action_dim'],\n                activation='softmax',\n                name='posterior_proj'\n            )\n\n            # Baseline Estimator\n            self.C_lambda1 = Dense(\n                self.config['enc_hidden_dim']\n                        if not self.config['bidirectional']\n                        else 2 * self.config['enc_hidden_dim'],\n                100,\n                activation='tanh',\n                name='baseline-1')\n            self.C_lambda2 = Dense(100, 1, activation='linear',\n                                   name='baseline-2')\n\n        # Action transform to context\n        self.context_trans = Dense(\n            self.config['action_dim'],\n            self.config['dec_contxt_dim'],\n            activation='linear',\n            name=\"transform\"\n        )\n\n        # registration:\n        self._add(self.Posterior)\n        self._add(self.Prior)\n        self._add(self.context_trans)\n        self._add(self.C_lambda1)\n        self._add(self.C_lambda2)\n\n        self._add(self.encoder)\n        self._add(self.decoder)\n\n        # Q-layers:\n        self._add_tag(self.encoder, 'q')\n        self._add_tag(self.Posterior, 'q')\n\n        # P-layers:\n        self._add_tag(self.Prior, 'p')\n        self._add_tag(self.decoder, 'p')\n        self._add_tag(self.context_trans, 'p')\n\n        # Lambda-layers\n        self._add_tag(self.C_lambda1, 'l')\n        self._add_tag(self.C_lambda2, 'l')\n\n        # c/v\n        self.c = shared_scalar(0., dtype='float32')\n        self.v = shared_scalar(1., dtype='float32')\n\n        # objectives and optimizers\n        self.optimizer_p = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n        self.optimizer_q = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n        self.optimizer_l = optimizers.get(self.config['optimizer'], kwargs={'clipnorm': 5})\n\n        logger.info(\"create Neural Variational Topic Network. ok\")\n\n    def compile_(self, mode='train', contrastive=False):\n        # compile the computational graph.\n        # INFO: the parameters.\n        # mode: 'train'/ 'display'/ 'policy' / 'all'\n\n        ps = 'params: {\\n'\n        for p in self.params:\n            ps += '{0}: {1}\\n'.format(p.name, p.eval().shape)\n        ps += '}.'\n        logger.info(ps)\n\n        param_num = np.sum([np.prod(p.shape.eval()) for p in self.params])\n        logger.info(\"total number of the parameters of the model: {}\".format(param_num))\n\n        if mode == 'train' or mode == 'all':\n            self.compile_train()\n\n        if mode == 'display' or mode == 'all':\n            self.compile_sample()\n\n        if mode == 'inference' or mode == 'all':\n            self.compile_inference()\n\n    \"\"\"\n    Training\n    \"\"\"\n    def compile_train(self):\n        \"\"\"\n        build the training function here <:::>\n        \"\"\"\n        # get input sentence (x)\n        inputs     = T.imatrix('inputs')  # padded input word sequence (for training)\n        batch_size = inputs.shape[0]\n\n        logger.info(\n            \"\"\"\n            The Computational Flow. ---> In a recurrent fashion\n\n            [= v =] <:::\n            Inference-Generation in one scan\n\n            >>>> Encoding without hidden variable. (use backward encoding.)\n            \"\"\"\n        )\n        embeded, mask \\\n                   = self.decoder.Embed(inputs, True)  # (nb_samples, max_len, embedding_dim)\n        mask       = T.cast(mask, dtype='float32')\n\n        encoded    = self.encoder.build_encoder(inputs[:, ::-1], return_sequence=True)[:, ::-1, :]\n\n        L          = T.iscalar('repeats')              # self.config['repeats']\n\n        def _repeat(x, dimshuffle=True):\n            if x.ndim == 3:\n                y = T.extra_ops.repeat(x[:, None, :, :], L, axis=1).reshape((-1, x.shape[1], x.shape[2]))\n                if dimshuffle:\n                    y = y.dimshuffle(1, 0, 2)\n            else:\n                y = T.extra_ops.repeat(x[:, None, :], L, axis=1).reshape((-1, x.shape[1]))\n                if dimshuffle:\n                    y = y.dimshuffle(1, 0)\n            return y\n\n        embeded    = _repeat(embeded)                  # (max_len, nb_samples * L, embedding_dim)\n        encoded    = _repeat(encoded)                  # (max_len, nb_samples * L, enc_hidden_dim)\n        target     = _repeat(inputs, False)            # (nb_samples * L, max_len)\n        mask       = _repeat(mask, False)\n        count      = T.cast(T.sum(mask, axis=1), dtype=theano.config.floatX)[:, None]  # (nb_samples,)\n\n        init_dec   = T.zeros((encoded.shape[1],\n                              self.config['dec_hidden_dim']),\n                              dtype='float32')         # zero initialization\n\n        logger.info(\n            \"\"\"\n            >>>> Recurrence\n            \"\"\"\n        )\n\n        def _recurrence(embed_t, enc_t, dec_tm1):\n            \"\"\"\n            x_t:   (nb_samples, dec_embedd_dim)\n            enc_t: (nb_samples, enc_hidden_dim)\n            dec_t: (nb_samples, dec_hidden_dim)\n            \"\"\"\n            # get q(z_t|dec_t, enc_t);  sample z_t;\n            # compute the Posterior (inference) prob.\n            # compute the baseline estimator\n            if self.config['decposterior']:\n                q_dis_t   = self.Posterior(enc_t, dec_tm1)\n                c_lmd_t   = self.C_lambda2(self.C_lambda1(enc_t, dec_tm1)).flatten(1)\n\n            else:\n                q_dis_t   = self.Posterior(enc_t)\n                c_lmd_t   = self.C_lambda2(self.C_lambda1(enc_t)).flatten(1)\n\n            # sampling\n            z_t       = self.rng.multinomial(pvals=q_dis_t, dtype='float32')\n            log_qzx_t = T.sum(T.log(q_dis_t) * z_t, axis=1)\n\n            # compute the prior probability\n            p_dis_t   = self.Prior(dec_tm1)\n            log_pz0_t = T.sum(T.log(p_dis_t) * z_t, axis=1)\n\n            # compute the decoding probability\n            context_t = self.context_trans(z_t)\n            readout_t = self.decoder.hidden_readout(dec_tm1) + self.decoder.context_readout(context_t)\n            for l in self.decoder.output_nonlinear:\n                readout_t = l(readout_t)\n            pxz_dis_t = self.decoder.output(readout_t)\n\n            # compute recurrence\n            dec_t   = self.decoder.RNN(embed_t, C=context_t, init_h=dec_tm1, one_step=True)\n\n            return dec_t, z_t, log_qzx_t, log_pz0_t, pxz_dis_t, c_lmd_t\n\n        # (max_len, nb_samples, ?)\n        outputs, scan_update = theano.scan(\n            _recurrence,\n            sequences=[embeded, encoded],\n            outputs_info=[init_dec, None, None, None, None, None])\n        _, z, log_qzx, log_pz0, pxz_dis, c_lmd = outputs\n\n        # summary of scan/ dimshuffle/ reshape\n        def _grab_prob(probs, x):\n            assert probs.ndim == 3\n            b_size     = probs.shape[0]\n            max_len    = probs.shape[1]\n            vocab_size = probs.shape[2]\n            probs      = probs.reshape((b_size * max_len, vocab_size))\n            return probs[T.arange(b_size * max_len), x.flatten(1)].reshape(x.shape)  # advanced indexing\n\n        logger.info(\n            \"\"\"\n            >>>> Compute the weights [+ _ =]\n            \"\"\"\n        )\n        # log Q/P and C\n        log_qzx    = log_qzx.dimshuffle(1, 0) * mask\n        log_pz0    = log_pz0.dimshuffle(1, 0) * mask\n        log_pxz    = T.log(_grab_prob(pxz_dis.dimshuffle(1, 0, 2), target)) * mask\n        c_lambda   = c_lmd.dimshuffle(1, 0) * mask\n\n        Lb         = T.sum(log_pz0 + log_pxz - log_qzx, axis=-1)   # lower bound\n        l_lambda   = log_pz0 + log_pxz - log_qzx - c_lambda\n\n        alpha      = T.cast(0.0, dtype='float32')\n        numel      = T.sum(mask)\n\n        cb         = T.sum(l_lambda) / numel\n        vb         = T.sum(l_lambda ** 2) / T.sum(mask) - cb ** 2\n        c          = self.c * alpha + (1 - alpha) * cb  # T.cast(cb, dtype='float32')\n        v          = self.v * alpha + (1 - alpha) * vb  # T.cast(vb, dtype='float32')\n\n        l_normal   = (l_lambda - c) / T.max((1., T.sqrt(v))) * mask\n        l_base     = T.mean(T.sum(l_normal, axis=1))\n        nll        = T.mean(-Lb)                 # variational lower-bound\n        perplexity = T.exp(T.mean(-Lb[:, None] / count))  # perplexity of lower-bound\n\n        logger.info(\n            \"\"\"\n            >>>> Compute the gradients [+ _ =]\n            \"\"\"\n        )\n\n        # monitoring\n        self.monitor['hidden state'] = z\n\n        lossP   = -T.mean(T.sum(log_pxz  + log_pz0,  axis=1))\n        lossQ   = -T.mean(T.sum(log_qzx  * l_normal, axis=1))\n        lossL   = -T.mean(T.sum(c_lambda * l_normal, axis=1))   # ||L - c - c_lambda||2-> 0\n\n        # lossP   = -T.sum(log_pxz  + log_pz0)  / numel\n        # lossQ   = -T.sum(log_qzx  * l_normal) / numel\n        # lossL   = -T.sum(c_lambda * l_normal) / numel  # ||L - c - c_lambda||2-> 0\n        #\n        # # L2 regu\n        # print 'L2 ?'\n        # lossP  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['p']])\n        # lossQ  += 0.0001 * T.sum([T.sum(p**2) for p in self.tparams['q']])\n\n        updates_p = self.optimizer_p.get_updates(self.tparams['p'], lossP)\n        updates_q = self.optimizer_q.get_updates(self.tparams['q'], [lossQ, l_normal])\n        updates_l = self.optimizer_l.get_updates(self.tparams['l'], [lossL, l_normal])\n\n        updates   = updates_p + updates_q + updates_l + scan_update\n        updates  += [(self.c, c), (self.v, v)]\n\n        logger.info(\"compiling the compuational graph:: >__< ::training function::\")\n        train_inputs = [inputs] + [theano.Param(L, default=1)]\n\n        self.train_ = theano.function(train_inputs,\n                                      [lossL, lossP, lossQ, perplexity, nll, l_base],\n                                      updates=updates,\n                                      name='train_fun')\n\n        logger.info(\"compile the computational graph:: >__< :: explore function\")\n        self.explore_ = theano.function(train_inputs,\n                                        [lossL, lossP, lossQ, perplexity, nll, l_base],\n                                        updates=scan_update,\n                                        name='explore_fun')\n\n        # add monitoring:\n        self._monitoring()\n\n        # compiling monitoring\n        self.compile_monitoring(train_inputs, updates=scan_update)\n        logger.info(\"pre-training functions compile done.\")\n\n    def generate_(self, context=None, max_len=None, mode='display'):\n        # overwrite the RNNLM generator as there are hidden variables every time step\n        args = dict(k=self.config['sample_beam'],\n                    maxlen=self.config['max_len'] if not max_len else max_len,\n                    stochastic=self.config['sample_stoch'] if mode == 'display' else None,\n                    argmax=self.config['sample_argmax'] if mode == 'display' else None)\n"
  },
  {
    "path": "emolga/run.py",
    "content": "# coding=utf-8\n__author__ = 'jiataogu'\n\nimport logging\n\nfrom matplotlib import pyplot\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom config import setup, setup_main\nfrom dataset import deserialize_from_file, divide_dataset, build_fuel, GuessOrder\nfrom game.asker import Asker\nfrom game.responder import Responder\nfrom models.variational import Helmholtz\nfrom utils.generic_utils import *\n\nlogger      = logging.getLogger(__name__)\nlm_config   = setup()\nmain_config = setup_main()\n# logging.basicConfig(level= main_config['level'], format=\"%(asctime)s: %(name)s: %(levelname)s: %(message)s\")\n\nnp.random.seed(main_config['seed'])\nn_rng  = np.random.RandomState(main_config['seed'])\nrng    = RandomStreams(n_rng.randint(2 ** 30), use_cuda=True)\n\n\n\"\"\"\nMain Loop.\n\"\"\"\nprint 'start.'\n\n# load the dataset and build a fuel-dataset.\nidx2word, word2idx = deserialize_from_file(lm_config['vocabulary_set'])\n\n# load the fake_dialogue dataset.\nfake_data = deserialize_from_file(main_config['fake_diag'])\ntrain_set, test_set = divide_dataset(fake_data, main_config['test_size'], 200000)\n\nlm_config['enc_voc_size']   = max(zip(*word2idx.items())[1]) + 1\nlm_config['dec_voc_size']   = lm_config['enc_voc_size']\nlm_config['state_dim']      = main_config['core_hidden_dim']\nmain_config['enc_voc_size'] = lm_config['enc_voc_size']\n\ndatabase           = deserialize_from_file(lm_config['dataset'])\ndataset            = build_fuel(database)\nweights_file       = lm_config['weights_file']\nanswer_templates   = {0: 'I cannot understand.', 1: 'Congrats!', 2: 'Pity.'}\n\nlogger.info('build dataset done. vocabulary size = {0}'.format(lm_config['dec_voc_size']))\n\nstart_time         = time.time()\n# build the environment\ngame               = GuessOrder(rng=n_rng, size=8)\nenvironment        = Responder(game=game)\n\n# load the pretrained generator\ngenerator          = Helmholtz(lm_config, n_rng, rng, dynamic_prior=True)\ngenerator.build_()\ngenerator.load(weights_file)\ngenerator.dynamic()\n\n# build the agent.\nagent              = Asker(main_config, lm_config, n_rng, rng, generator)\nagent.build_()\nagent.compile_asker()\nlogger.info('compile the asker sampler ok.')\n\n# build the scheduled trainer if any.\nagent.compile_scheduled_trainer()\nlogger.info('compile the asker ss-learner ok.')\n\n# build the trainer\nagent.compile_trainer()\nlogger.info('compile the asker learner ok.')\n\nend_time           = time.time()\nlogger.info('compiling done. It costs {} seconds'.format(end_time - start_time))\n\n\ndef simulator(M=25, display=False):\n    \"\"\"\n    Dialogue Simulation\n    \"\"\"\n    start_time = time.time()\n    progbar    = Progbar(M)\n    logger.info('Start simulation.')\n    train_data = {'X': [], 'Y': [], 'A': [], 'R': [], 'G': [], 'T': [], 'text': [], 'acc': []}\n    for ep in xrange(M):\n        environment.reset()\n        episode            = {'x': [], 'y': [], 'a': [], 'r': []}\n\n        conversation       = ''\n        conversation      += '\\n\\n\\n' + '***' * 30\n        conversation      += '\\nGame start.'\n\n        turn               = 0\n        maxturn            = 16\n        kwargs             = {'turn': turn, 'maxturn': maxturn}\n        for k in xrange(maxturn + 1):\n            if kwargs['turn'] == maxturn:\n                guess, score   = agent.act(kwargs)\n                conversation  += '\\n' + '_' * 93 + '[{}]'.format(kwargs['turn'])\n                conversation  += '\\n(´✪ ‿ ✪`)ノ : {}'.format('My answer = ' + ' '.join([str(w) for w in guess]))\n\n                corrects       = environment.get_answer()\n                conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format(' '.join([str(w) for w in corrects]))\n\n                Accuracy       = sum([g == c for g, c in zip(guess, corrects)]) / float(len(guess))\n                conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format('Accuracy = {}%'.format(Accuracy * 100))\n\n                episode['g'] = np.asarray(guess)\n                episode['t'] = np.asarray(corrects)\n                episode['r'].append(Accuracy)\n                episode['c'] = Accuracy\n                break\n\n            next_action, next_sent, kwargs  = agent.act(kwargs)\n            question           = ' '.join(print_sample(idx2word, next_sent)[:-1])\n            conversation      += '\\n' + '_' * 93 + '[{}]'.format(kwargs['turn'])\n            conversation      += '\\n(´◉ ω ◉`)？ : {}'.format(question)\n\n            got                = environment.parse(question)\n            reward             = 0 if got > 0 else -1\n            kwargs['prev_asw'] = np.asarray([got], dtype='int32')\n            conversation += '\\n{:>78} : (●´ε｀●)'.format(answer_templates[got])\n\n            # registration\n            episode['a'].append(next_action)\n            episode['y'].append(next_sent[None, :])\n            episode['x'].append(got)\n            episode['r'].append(reward)\n\n        conversation += '\\nGame End\\n' + '***' * 30\n\n        if display:\n            logger.info(conversation)\n\n        # concatenate\n        train_data['A'].append(np.concatenate(episode['a'], axis=0)[None, :, :])\n        train_data['Y'].append(np.concatenate(episode['y'], axis=0)[None, :, :])\n        train_data['X'].append(np.asarray(episode['x'], dtype='int32')[None, :])\n        train_data['R'].append(np.asarray(episode['r'], dtype='float32')[::-1].cumsum()[::-1][None, :])\n        train_data['G'].append(episode['g'][None, :])\n        train_data['T'].append(episode['t'][None, :])\n        train_data['text'].append(conversation)\n        train_data['acc'].append(episode['c'])\n\n        progbar.update(ep + 1, [('accuracy', episode['c'])])\n\n    train_data['A'] = np.concatenate(train_data['A'], axis=0).astype('float32')\n    train_data['X'] = np.concatenate(train_data['X'], axis=0).astype('int32')\n    train_data['Y'] = np.concatenate(train_data['Y'], axis=0).astype('int32')\n    train_data['R'] = np.concatenate(train_data['R'], axis=0).astype('float32')\n    train_data['G'] = np.concatenate(train_data['G'], axis=0).astype('int32')\n    train_data['T'] = np.concatenate(train_data['T'], axis=0).astype('int32')\n\n    end_time = time.time()\n    print ''\n    logger.info('Simulation {0} eposides with {1} seconds.'.format(M, end_time - start_time))\n    return train_data\n\n\ndef learner(data, fr=1., fs=1., fb=1.):\n    \"\"\"\n    Training.\n    \"\"\"\n    start_time = time.time()\n    X     = data['X']   # answers obtained from the environment;\n    Y     = data['Y']   # questions generated based on policy;\n    A     = data['A']   # actions performed in Helmholtz questions generator;\n    R     = data['R']   # cumulative reward obtained through conversation;\n    guess = data['G']   # final guess order given by the agent\n    truth = data['T']   # real order in the environment\n\n    loss  = agent.train(X, Y, A, R, guess, truth, fr, fs, fb)\n    end_time = time.time()\n    logger.info('Training this batch with {0} seconds.'.format(end_time - start_time))\n    logger.info('REINFORCE Loss = {0}, Supervised loss = {1}, Baseline loss = {2}'.format(\n        loss[0], loss[1], loss[2]))\n    return loss\n\n\ndef SL_learner(data, batch_size=25):\n    \"\"\"\n    Supervised Learning with fake-optimal logs.\n    One epoch for all data.\n    \"\"\"\n    start_time = time.time()\n    X          = data['X'].astype('int32')   # answers obtained from the environment;\n    Y          = data['Y'].astype('int32')   # questions generated based on policy;\n    T          = data['T'].astype('int32')   # real order in the environment\n\n    # index shuffle\n    idx        = np.arange(X.shape[0]).tolist()\n    np.random.shuffle(idx)\n\n    num_batch  = X.shape[0] / batch_size\n    progbar    = Progbar(num_batch)\n    batch_from = 0\n    loss       = []\n    for batch in xrange(num_batch):\n        batch_to    = batch_from + batch_size\n        if batch_to > X.shape[0]:\n            batch_to = X.shape[0]\n\n        batch_X     = X[idx[batch_from: batch_to]]\n        batch_Y     = Y[idx[batch_from: batch_to]]\n        batch_T     = T[idx[batch_from: batch_to]]\n\n        if not main_config['multi_task']:\n            if not main_config['ssl']:\n                loss.append(agent.train_sl(batch_X, batch_Y, batch_T))\n            else:\n                loss.append(agent.train_ssl(batch_X, batch_Y, batch_T, 3, 10.))\n            progbar.update(batch + 1, [('loss', loss[-1])])\n        else:\n            loss.append(agent.train_mul(batch_X, batch_Y, batch_T, 3, 10.))\n            progbar.update(batch + 1, [('loss', loss[-1][0]), ('loss_ssl', loss[-1][1]), ('ppl', loss[-1][2])])\n        batch_from  = batch_to\n\n    end_time   = time.time()\n    logger.info('Training this epoch with {0} seconds.'.format(end_time - start_time))\n    logger.info('Supervised loss = {}'.format(np.mean(loss)))\n    return loss\n\n\ndef main():\n    losses   = []\n    accuracy = []\n    for echo in xrange(4000):\n        logger.info('Iteration = {}'.format(echo))\n        train_data = simulator(M=20)\n\n        print train_data['text'][-1]\n\n        loss       = learner(train_data, fr=0.)\n        losses.append(loss)\n        accuracy  += train_data['acc']\n\n        if echo % 100 == 99:\n            plt.plot(accuracy)\n            plt.show()\n\n    # pkl.dump(losses, open('losses.temp.pkl'))\n\n\ndef check_answer(x, y, g):\n    g     = np.asarray(g)\n    environment.game.set_answer(g)\n    s     = 0\n    for k in xrange(x.shape[1]):\n        question           = ' '.join(print_sample(idx2word, y[0][k].tolist())[:-1])\n        got                = environment.parse(question)\n        if got == 2 - x[0][k]:\n            s += 1.\n    return s / x.shape[1]\n\n\ndef display_session(x, y, g, t, acc, cov):\n    \"\"\"\n    display a dialogue session\n    \"\"\"\n    conversation       = ''\n    conversation      += '\\n\\n\\n' + '***' * 30\n    conversation      += '\\nGame start.'\n\n    for k in xrange(x.shape[1]):\n        question           = ' '.join(print_sample(idx2word, y[0][k].tolist())[:-1])\n        conversation      += '\\n' + '_' * 93 + '[{}]'.format(k + 1)\n        conversation      += '\\n(´◉ ω ◉`)？ : {}'.format(question)\n        got                = x[0][k]\n        conversation += '\\n{:>78} : (●´ε｀●)'.format(answer_templates[got])\n\n    conversation  += '\\n' + '_' * 93 + '[{}]'.format(k + 1)\n    conversation  += '\\n(´✪ ‿ ✪`)ノ : {}'.format('My answer = ' + ' '.join([str(w) for w in g]))\n    conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format(' '.join([str(w) for w in t[0]]))\n    conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format('Accuracy = {}%'.format(acc * 100))\n    conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format('Understand = {}%'.format(cov * 100))\n    conversation  += '\\nGame End\\n' + '***' * 30\n    return conversation\n\n\ndef main_sl():\n    # get the evaluation set.\n    evaluation_set = n_rng.randint(0, train_set['X'].shape[0], main_config['test_size']).tolist()\n    acc_s, acc_t = [], []\n    los_s, los_t = [], []\n    und_s, und_t = [], []\n    for echo in xrange(500):\n        logger.info('Epoch = {}'.format(echo))\n        loss       = SL_learner(train_set, batch_size=50)\n        los_s.append(loss)\n\n        # sampling on training set.\n        logger.info('testing on sampled training set.')\n        progbar    = Progbar(main_config['test_size'])\n        accuracy   = []\n        understand = []\n        untruth    = []\n        at         = 0\n        for k in evaluation_set:\n            at        += 1\n            x          = train_set['X'][None, k]\n            y          = train_set['Y'][None, k]\n            t          = train_set['T'][None, k]\n\n            g, _, acc  = agent.evaluate(x, y, t)\n            cov        = check_answer(x, y, g)\n            cov_t      = check_answer(x, y, t[0].tolist())\n            progbar.update(at, [('acc', acc), ('und', cov)])\n            untruth.append(cov_t)\n            accuracy.append(acc)\n            understand.append(cov)\n\n            # if at == 1:\n            #     x_ = 2 - x\n            #     logger.info(display_session(x_, y, g, t, acc, cov))\n\n        print '\\ntraining set test.. avarage accuracy = {0}% /understand {1}% questions'.format(\n            100 * np.mean(accuracy), 100 * np.mean(understand))\n        print 'check truth {}%'.format(100 * np.mean(untruth))\n\n        acc_s.append(np.mean(accuracy))\n        und_s.append(np.mean(understand))\n\n        # sampling on testing set.\n        logger.info('testing on testing set.')\n        progbar2   = Progbar(main_config['test_size'])\n        accuracy   = []\n        understand = []\n        at         = 0\n        for k in xrange(main_config['test_size']):\n            at        += 1\n            x          = test_set['X'][None, k]\n            y          = test_set['Y'][None, k]\n            t          = test_set['T'][None, k]\n\n            g, _, acc  = agent.evaluate(x, y, t)\n            cov        = check_answer(x, y, g)\n            progbar2.update(at, [('acc', acc), ('und', cov)])\n            accuracy.append(acc)\n            understand.append(cov)\n\n            # if at == 1:\n            #     x_ = 2 - x\n            #     logger.info(display_session(x_, y, g, t, acc, cov))\n\n        print '\\ntesting set test.. avarage accuracy = {0}% /understand {1}% questions'.format(\n            100 * np.mean(accuracy), 100 * np.mean(understand))\n\n        acc_t.append(np.mean(accuracy))\n        und_t.append(np.mean(understand))\n\n        if echo % 20 == 19:\n            pyplot.figure(1)\n            pyplot.plot(acc_s, 'r')\n            pyplot.plot(acc_t, 'g')\n            pyplot.figure(2)\n            pyplot.plot(und_s, 'r')\n            pyplot.plot(und_t, 'g')\n            pyplot.show()\n\n\n# agent.main_config['sample_beam']   = 1\n# agent.main_config['sample_argmax'] = True\nmain_sl()\n"
  },
  {
    "path": "emolga/test_lm.py",
    "content": "__author__ = 'jiataogu'\n\nimport logging\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom emolga.models.encdec import RNNLM, AutoEncoder\nfrom emolga.models.variational import Helmholtz, VAE, HarX, THarX, NVTM\n# from models.ntm_encdec import RNNLM, AutoEncoder, Helmholtz, BinaryHelmholtz\nfrom emolga.utils.generic_utils import *\nfrom emolga.dataset.build_dataset import deserialize_from_file, build_fuel, obtain_stream\nfrom emolga.config import setup_ptbz, setup_ptb2\nfrom emolga.config_variant import *\n\nsetup = setup_bienc\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark  = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger = init_logging(config['path_log'] + '/emolga.RHM.id={}.log'.format(tmark))\nn_rng  = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng    = RandomStreams(n_rng.randint(2 ** 30))\n\nlogger.info('Start!')\n\n# load the dataset and build a fuel-dataset.\nidx2word, word2idx = deserialize_from_file(config['vocabulary_set'])\nconfig['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\nconfig['dec_voc_size'] = config['enc_voc_size']\nlogger.info('build dataset done. vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n# training & valid & tesing set.\ntrain_set, train_size = build_fuel(deserialize_from_file(config['dataset']))\nvalid_set, valid_size = build_fuel(deserialize_from_file(config['dataset_test']))  # use test set for a try\n\n# weiget save.\nsavefile = config['path_h5'] + '/emolga.RHM.id={}.h5'.format(tmark)\n\n# build the agent\nif config['model'] == 'RNNLM':\n    agent = RNNLM(config, n_rng, rng, mode=config['mode'])\nelif config['model'] == 'HarX':\n    agent = THarX(config, n_rng, rng, mode=config['mode'])\nelif config['model'] == 'Helmholtz':\n    agent = Helmholtz(config, n_rng, rng, mode=config['mode'])\nelse:\n    raise NotImplementedError\n\nagent.build_()\nagent.compile_('train')\nprint 'compile ok'\n\n# learning to speak language.\ncount  = 1000\necho   = 0\nepochs = 50\nwhile echo < epochs:\n    echo   += 1\n    loss    = []\n    correct = 0\n    scans   = 0\n\n    # visualization the embedding weights.\n    # if echo > 1:\n    #    plt.figure(3)\n    #    visualize_(plt.subplot(111), agent.decoder.Embed.get_params()[0].get_value(), name='encoder embedding',\n    #  text=idx2word)\n    #    plt.show()\n\n    # if not config['use_noise']:\n\n    # training\n    train_batches = obtain_stream(train_set, config['batch_size']).get_epoch_iterator(as_dict=True)\n    valid_batches = obtain_stream(valid_set, config['eval_batch_size']).get_epoch_iterator(as_dict=True)\n\n    def prepare_batch(batch):\n        data = batch['data'].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data):\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data)\n        return data\n\n    # training\n    logger.info('Epoch = {} -> Training Set Learning...'.format(echo))\n    progbar = Progbar(train_size / config['batch_size'])\n    for it, batch in enumerate(train_batches):\n        # get data\n        data = prepare_batch(batch)\n        if config['model'] == 'RNNLM' or config['model'] == 'AutoEncoder':\n            loss.append(agent.train_(data, config['repeats']))\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n        elif config['model'] == 'Helmholtz' or 'HarX':\n            loss.append(agent.train_(data, config['repeats']))\n            weightss = np.sum([np.sum(abs(w)) for w in agent.get_weights()])\n            progbar.update(it, [('lossPa', loss[-1][0]), ('lossPxa', loss[-1][1]), ('lossQ', loss[-1][2]),\n                                ('perplexity', np.log(loss[-1][3])), ('NLL', loss[-1][4]), ('L1', weightss)])\n\n        \"\"\"\n        watch = agent.watch(data)\n        print '.'\n        pprint(watch[0][0])\n        pprint(watch[2][0])\n        # pprint(watch[2][0])\n        sys.exit(111)\n        \"\"\"\n\n        # if it % 100 == 50:\n        #     sys.exit(-1)\n        #     # print '.'\n        #     # print 'encoded = {}'.format(encoded[11])\n        #     # print 'mean = {}'.format(mean[11])\n        #     # print 'std = {}'.format(std[11])\n        #\n        #     # watch = agent.watch(data)\n        #     # print '.'\n        #     # print 'train memory {}'.format(watch[0][0])\n        #\n        #     for kk in xrange(5):\n        #         # sample a sentence.\n        #         # action        = agent.action_sampler()\n        #         # context       = agent.context_trans(action)\n        #         if config['model'] is 'AutoEncoder':\n        #             source  = data[kk][None, :]\n        #             truth   = ' '.join(print_sample(idx2word, source[0].tolist())[:-1])\n        #             print '\\ntruth: {}'.format(truth)\n        #             context = agent.memorize(source)\n        #             sample, score = agent.generate_(context, max_len=data.shape[1])\n        #         else:\n        #             sample, score = agent.generate_(max_len=data.shape[1])\n        #\n        #         if sample[-1] is not 0:\n        #             sample += [0]  # fix the end.\n        #         question = ' '.join(print_sample(idx2word, sample)[:-1])\n        #         print '\\nsample: {}'.format(question)\n        #         print 'PPL: {}'.format(score)\n        #         scans   += 1.0\n\n    print ' </s>.'\n    logger.info('Epoch = {0} finished.'.format(echo))\n\n    # validation\n    logger.info('Epoch = {} -> Vadlidation Set Evaluation...'.format(echo))\n    progbar = Progbar(valid_size / config['batch_size'])\n    for it, batch in enumerate(valid_batches):\n        # get data\n        data = prepare_batch(batch)\n        if config['model'] == 'Helmholtz' or 'HarX':\n            loss.append(agent.evaluate_(data))\n            progbar.update(it, [('NLL', loss[-1][0]), ('perplexity', np.log(loss[-1][1]))])\n        else:\n            raise NotImplementedError\n\n    print ' </s>.'\n    # save the weights.\n    agent.save(config['path_h5'] + '/emolga.RHM.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\n    # logger.info('Learning percentage: {}'.format(correct / scans))\n\n\n# inference test\n# batches = data_stream.get_epoch_iterator(as_dict=True)\n# for it, batch in enumerate(batches):\n#     data = batch['data'].astype('int32')\n#     data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n#     mean, std = agent.inference_(data)\n#     print mean\n#     break\n# print count"
  },
  {
    "path": "emolga/test_nvtm.py",
    "content": "__author__ = 'jiataogu'\n\nimport logging\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom emolga.models.encdec import RNNLM, AutoEncoder\nfrom emolga.models.variational import Helmholtz, VAE, HarX, THarX, NVTM\n# from models.ntm_encdec import RNNLM, AutoEncoder, Helmholtz, BinaryHelmholtz\nfrom emolga.utils.generic_utils import *\nfrom emolga.dataset.build_dataset import deserialize_from_file, build_fuel, obtain_stream\nfrom emolga.config import setup_ptbz, setup_ptb2\nfrom emolga.config_variant import *\n\nsetup = setup_bienc\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark  = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger = init_logging(config['path_log'] + '/emolga.RHM.id={}.log'.format(tmark))\nn_rng  = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng    = RandomStreams(n_rng.randint(2 ** 30))\n\nlogger.info('Start!')\n\n# load the dataset and build a fuel-dataset.\nidx2word, word2idx = deserialize_from_file(config['vocabulary_set'])\nconfig['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\nconfig['dec_voc_size'] = config['enc_voc_size']\nlogger.info('build dataset done. vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n# training & valid & tesing set.\ntrain_set, train_size = build_fuel(deserialize_from_file(config['dataset']))\nvalid_set, valid_size = build_fuel(deserialize_from_file(config['dataset_test']))  # use test set for a try\n\n# weiget save.\nsavefile = config['path_h5'] + '/emolga.RHM.id={}.h5'.format(tmark)\n\n# build the agent\nif config['model'] == 'RNNLM':\n    agent = RNNLM(config, n_rng, rng, mode=config['mode'])\nelif config['model'] == 'HarX':\n    agent = NVTM(config, n_rng, rng, mode=config['mode'])\nelif config['model'] == 'Helmholtz':\n    agent = Helmholtz(config, n_rng, rng, mode=config['mode'])\nelse:\n    raise NotImplementedError\n\nagent.build_()\nagent.compile_('train')\nprint 'compile ok'\n\n# learning to speak language.\ncount  = 1000\necho   = 0\nepochs = 50\nwhile echo < epochs:\n    echo   += 1\n    loss    = []\n    correct = 0\n    scans   = 0\n\n    # visualization the embedding weights.\n    # if echo > 1:\n    #    plt.figure(3)\n    #    visualize_(plt.subplot(111), agent.decoder.Embed.get_params()[0].get_value(), name='encoder embedding',\n    #  text=idx2word)\n    #    plt.show()\n\n    # if not config['use_noise']:\n\n    # training\n    train_batches = obtain_stream(train_set, config['batch_size']).get_epoch_iterator(as_dict=True)\n    valid_batches = obtain_stream(valid_set, config['eval_batch_size']).get_epoch_iterator(as_dict=True)\n\n    def prepare_batch(batch):\n        data = batch['data'].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data):\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data)\n        return data\n\n    # training\n    logger.info('Epoch = {} -> Training Set Learning...'.format(echo))\n    progbar = Progbar(train_size / config['batch_size'])\n    for it, batch in enumerate(train_batches):\n        # get data\n        data = prepare_batch(batch)\n        if config['model'] == 'RNNLM' or config['model'] == 'AutoEncoder':\n            loss.append(agent.train_(data, config['repeats']))\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n        elif config['model'] == 'Helmholtz' or 'HarX':\n            loss.append(agent.train_(data, 1))\n            weightss = np.sum([np.sum(abs(w)) for w in agent.get_weights()])\n            progbar.update(it, [('lossL', loss[-1][0]), ('lossP', loss[-1][1]), ('lossQ', loss[-1][2]),\n                                ('perplexity', np.log(loss[-1][3])), ('NLL', loss[-1][4]), ('Baseline', loss[-1][5])])\n\n        \"\"\"\n        watch = agent.watch(data)\n        print '.'\n        pprint(watch[0][0])\n        pprint(watch[2][0])\n        # pprint(watch[2][0])\n        sys.exit(111)\n        \"\"\"\n\n        # if it % 100 == 50:\n        #     sys.exit(-1)\n        #     # print '.'\n        #     # print 'encoded = {}'.format(encoded[11])\n        #     # print 'mean = {}'.format(mean[11])\n        #     # print 'std = {}'.format(std[11])\n        #\n        #     # watch = agent.watch(data)\n        #     # print '.'\n        #     # print 'train memory {}'.format(watch[0][0])\n        #\n        #     for kk in xrange(5):\n        #         # sample a sentence.\n        #         # action        = agent.action_sampler()\n        #         # context       = agent.context_trans(action)\n        #         if config['model'] is 'AutoEncoder':\n        #             source  = data[kk][None, :]\n        #             truth   = ' '.join(print_sample(idx2word, source[0].tolist())[:-1])\n        #             print '\\ntruth: {}'.format(truth)\n        #             context = agent.memorize(source)\n        #             sample, score = agent.generate_(context, max_len=data.shape[1])\n        #         else:\n        #             sample, score = agent.generate_(max_len=data.shape[1])\n        #\n        #         if sample[-1] is not 0:\n        #             sample += [0]  # fix the end.\n        #         question = ' '.join(print_sample(idx2word, sample)[:-1])\n        #         print '\\nsample: {}'.format(question)\n        #         print 'PPL: {}'.format(score)\n        #         scans   += 1.0\n\n    print ' </s>.'\n    logger.info('Epoch = {0} finished.'.format(echo))\n    loss = zip(*loss)\n    logger.info('LossL: {0}, LossP: {1}, LossQ: {2}, PPL: {3}, NLL: {4}, Baseline: {5}'.format(\n        np.mean(loss[0]), np.mean(loss[1]), np.mean(loss[2]), np.mean(loss[3]), np.mean(loss[4]), np.mean(loss[5])\n    ))\n\n    # validation\n    loss = []\n    logger.info('Epoch = {} -> Vadlidation Set Evaluation...'.format(echo))\n    progbar = Progbar(valid_size / config['batch_size'])\n    for it, batch in enumerate(valid_batches):\n        # get data\n        data = prepare_batch(batch)\n        if config['model'] == 'Helmholtz' or 'HarX':\n            # loss.append(agent.evaluate_(data))\n            loss.append(agent.explore_(data, 10))\n            weightss = np.sum([np.sum(abs(w)) for w in agent.get_weights()])\n            progbar.update(it, [('lossL', loss[-1][0]), ('lossP', loss[-1][1]), ('lossQ', loss[-1][2]),\n                                ('perplexity', np.log(loss[-1][3])), ('NLL', loss[-1][4]), ('Baseline', loss[-1][5])])\n        else:\n            raise NotImplementedError\n\n    print ' </s>.'\n    loss = zip(*loss)\n    logger.info('LossL: {0}, LossP: {1}, LossQ: {2}, PPL: {3}, NLL: {4}, Baseline: {5}'.format(\n        np.mean(loss[0]), np.mean(loss[1]), np.mean(loss[2]), np.mean(loss[3]), np.mean(loss[4]), np.mean(loss[5])\n    ))\n    # save the weights.\n    agent.save(config['path_h5'] + '/emolga.RHM.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\n    # logger.info('Learning percentage: {}'.format(correct / scans))\n\n\n# inference test\n# batches = data_stream.get_epoch_iterator(as_dict=True)\n# for it, batch in enumerate(batches):\n#     data = batch['data'].astype('int32')\n#     data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n#     mean, std = agent.inference_(data)\n#     print mean\n#     break\n# print count"
  },
  {
    "path": "emolga/test_run.py",
    "content": "# coding=utf-8\n__author__ = 'jiataogu'\n\nimport logging\n\nimport theano\nfrom matplotlib import pyplot\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom config import setup, setup_main\nfrom dataset import deserialize_from_file, divide_dataset, build_fuel, GuessOrder\nfrom game.asker import Asker\nfrom game.responder import Responder\nfrom models.variational import Helmholtz\nfrom utils.generic_utils import *\n\ntheano.config.optimizer = 'fast_compile'\nAsker       = Asker  # GridAsker # PyramidAsker  # GridAsker\nlogger      = logging.getLogger(__name__)\nlm_config   = setup()\nmain_config = setup_main() # setup_grid6()  # setup_pyramid()  # setup_grid()  # setup_main()\n# logging.basicConfig(level= main_config['level'], format=\"%(asctime)s: %(name)s: %(levelname)s: %(message)s\")\n\nnp.random.seed(main_config['seed'])\nn_rng  = np.random.RandomState(main_config['seed'])\nrng    = RandomStreams(n_rng.randint(2 ** 30), use_cuda=True)\n\n\"\"\"\nMain Loop.\n\"\"\"\nprint 'start.'\n\n# load the dataset and build a fuel-dataset.\nidx2word, word2idx = deserialize_from_file(lm_config['vocabulary_set'])\n\n# load the fake_dialogue dataset.\nprint 'Dataset: {}'.format(main_config['fake_diag'])\nfake_data = deserialize_from_file(main_config['fake_diag'])\ntrain_set, test_set = divide_dataset(fake_data, main_config['test_size'], 200000)\n\nlm_config['enc_voc_size']   = max(zip(*word2idx.items())[1]) + 1\nlm_config['dec_voc_size']   = lm_config['enc_voc_size']\nlm_config['state_dim']      = main_config['core_hidden_dim']\nmain_config['enc_voc_size'] = lm_config['enc_voc_size']\n\ndatabase           = deserialize_from_file(lm_config['dataset'])\ndataset            = build_fuel(database)\nweights_file       = lm_config['weights_file']\nanswer_templates   = {0: 'I cannot understand.', 1: 'Congrats!', 2: 'Pity.'}\n\nlogger.info('build dataset done. vocabulary size = {0}'.format(lm_config['dec_voc_size']))\n\nstart_time         = time.time()\n# build the environment\ngame               = GuessOrder(rng=n_rng, size=main_config['game_length'])\nenvironment        = Responder(game=game)\n\n# load the pretrained generator\ngenerator          = Helmholtz(lm_config, n_rng, rng, dynamic_prior=True)\ngenerator.build_()\ngenerator.load(weights_file)\ngenerator.dynamic()\n\n# build the agent.\nagent              = Asker(main_config, lm_config, n_rng, rng, generator)\nagent.build_()\nagent.compile_asker()\nlogger.info('compile the asker sampler ok.')\n\n# # build the scheduled trainer if any.\n# agent.compile_scheduled_trainer()\n# logger.info('compile the asker ss-learner ok.')\n\n# build the trainer\nagent.compile_trainer()\nlogger.info('compile the asker learner ok.')\n\nend_time           = time.time()\nlogger.info('compiling done. It costs {} seconds'.format(end_time - start_time))\n\n\ndef simulator(M=25, display=False):\n    \"\"\"\n    Dialogue Simulation\n    \"\"\"\n    start_time = time.time()\n    progbar    = Progbar(M)\n    logger.info('Start simulation.')\n    train_data = {'X': [], 'Y': [], 'A': [], 'R': [], 'G': [], 'T': [], 'text': [], 'acc': []}\n    for ep in xrange(M):\n        environment.reset()\n        episode            = {'x': [], 'y': [], 'a': [], 'r': []}\n\n        conversation       = ''\n        conversation      += '\\n\\n\\n' + '***' * 30\n        conversation      += '\\nGame start.'\n\n        turn               = 0\n        maxturn            = 16\n        kwargs             = {'turn': turn, 'maxturn': maxturn}\n        for k in xrange(maxturn + 1):\n            if kwargs['turn'] == maxturn:\n                guess, score   = agent.act(kwargs)\n                conversation  += '\\n' + '_' * 93 + '[{}]'.format(kwargs['turn'])\n                conversation  += '\\n(´✪ ‿ ✪`)ノ : {}'.format('My answer = ' + ' '.join([str(w) for w in guess]))\n\n                corrects       = environment.get_answer()\n                conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format(' '.join([str(w) for w in corrects]))\n\n                Accuracy       = sum([g == c for g, c in zip(guess, corrects)]) / float(len(guess))\n                conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format('Accuracy = {}%'.format(Accuracy * 100))\n\n                episode['g'] = np.asarray(guess)\n                episode['t'] = np.asarray(corrects)\n                episode['r'].append(Accuracy)\n                episode['c'] = Accuracy\n                break\n\n            next_action, next_sent, kwargs  = agent.act(kwargs)\n            question           = ' '.join(print_sample(idx2word, next_sent)[:-1])\n            conversation      += '\\n' + '_' * 93 + '[{}]'.format(kwargs['turn'])\n            conversation      += '\\n(´◉ ω ◉`)？ : {}'.format(question)\n\n            got                = environment.parse(question)\n            reward             = 0 if got > 0 else -1\n            kwargs['prev_asw'] = np.asarray([got], dtype='int32')\n            conversation += '\\n{:>78} : (●´ε｀●)'.format(answer_templates[got])\n\n            # registration\n            episode['a'].append(next_action)\n            episode['y'].append(next_sent[None, :])\n            episode['x'].append(got)\n            episode['r'].append(reward)\n\n        conversation += '\\nGame End\\n' + '***' * 30\n\n        if display:\n            logger.info(conversation)\n\n        # concatenate\n        train_data['A'].append(np.concatenate(episode['a'], axis=0)[None, :, :])\n        train_data['Y'].append(np.concatenate(episode['y'], axis=0)[None, :, :])\n        train_data['X'].append(np.asarray(episode['x'], dtype='int32')[None, :])\n        train_data['R'].append(np.asarray(episode['r'], dtype='float32')[::-1].cumsum()[::-1][None, :])\n        train_data['G'].append(episode['g'][None, :])\n        train_data['T'].append(episode['t'][None, :])\n        train_data['text'].append(conversation)\n        train_data['acc'].append(episode['c'])\n\n        progbar.update(ep + 1, [('accuracy', episode['c'])])\n\n    train_data['A'] = np.concatenate(train_data['A'], axis=0).astype('float32')\n    train_data['X'] = np.concatenate(train_data['X'], axis=0).astype('int32')\n    train_data['Y'] = np.concatenate(train_data['Y'], axis=0).astype('int32')\n    train_data['R'] = np.concatenate(train_data['R'], axis=0).astype('float32')\n    train_data['G'] = np.concatenate(train_data['G'], axis=0).astype('int32')\n    train_data['T'] = np.concatenate(train_data['T'], axis=0).astype('int32')\n\n    end_time = time.time()\n    print ''\n    logger.info('Simulation {0} eposides with {1} seconds.'.format(M, end_time - start_time))\n    return train_data\n\n\ndef learner(data, fr=1., fs=1., fb=1.):\n    \"\"\"\n    Training.\n    \"\"\"\n    start_time = time.time()\n    X     = data['X']   # answers obtained from the environment;\n    Y     = data['Y']   # questions generated based on policy;\n    A     = data['A']   # actions performed in Helmholtz questions generator;\n    R     = data['R']   # cumulative reward obtained through conversation;\n    guess = data['G']   # final guess order given by the agent\n    truth = data['T']   # real order in the environment\n\n    loss  = agent.train(X, Y, A, R, guess, truth, fr, fs, fb)\n    end_time = time.time()\n    logger.info('Training this batch with {0} seconds.'.format(end_time - start_time))\n    logger.info('REINFORCE Loss = {0}, Supervised loss = {1}, Baseline loss = {2}'.format(\n        loss[0], loss[1], loss[2]))\n    return loss\n\n\ndef SL_learner(data, batch_size=25, eval_freq=0, eval_train=None, eval_test=None):\n    \"\"\"\n    Supervised Learning with fake-optimal logs.\n    One epoch for all data.\n    \"\"\"\n    start_time = time.time()\n    X          = data['X'].astype('int32')   # answers obtained from the environment;\n    Y          = data['Y'].astype('int32')   # questions generated based on policy;\n    T          = data['T'].astype('int32')   # real order in the environment\n\n    # index shuffle\n    idx        = np.arange(X.shape[0]).tolist()\n    np.random.shuffle(idx)\n\n    num_batch  = X.shape[0] / batch_size\n    progbar    = Progbar(num_batch)\n    batch_from = 0\n    loss       = []\n\n    if eval_freq > 0:\n        eval_batch  = num_batch / eval_freq\n        eval_start  = 0\n\n        batches     = []\n\n        accs, unds  = [], []\n        acct, undt  = [], []\n\n    for batch in xrange(num_batch):\n        batch_to    = batch_from + batch_size\n        if batch_to > X.shape[0]:\n            batch_to = X.shape[0]\n\n        batch_X     = X[idx[batch_from: batch_to]]\n        batch_Y     = Y[idx[batch_from: batch_to]]\n        batch_T     = T[idx[batch_from: batch_to]]\n\n        if not main_config['multi_task']:\n            loss.append(agent.train_sl(batch_X, batch_Y, batch_T))\n            # if not main_config['ssl']:\n            #     loss.append(agent.train_sl(batch_X, batch_Y, batch_T))\n            # else:\n            #     loss.append(agent.train_ssl(batch_X, batch_Y, batch_T, 3, 10.))\n            progbar.update(batch + 1, [('loss', loss[-1])])\n        else:\n            loss.append(agent.train_sl(batch_X, batch_Y, batch_T))\n            # loss.append(agent.train_mul(batch_X, batch_Y, batch_T, 3, 10.))\n            progbar.update(batch + 1, [('loss', loss[-1][0]), ('ppl', loss[-1][1]), ('asw loss', loss[-1][2])])\n        batch_from  = batch_to\n\n        if eval_freq > 0:\n            eval_start += 1\n            if eval_start == eval_batch or batch == num_batch - 1:\n                batches.append(batch_to)\n                if eval_train:\n                    logger.info('\\ntesting on sampled training set.')\n                    acc, und = SL_test(eval_train)\n                    accs.append(acc)\n                    unds.append(und)\n\n                if eval_train:\n                    logger.info('testing on sampled testing set.')\n                    acc, und = SL_test(eval_test)\n                    acct.append(acc)\n                    undt.append(und)\n                eval_start   = 0\n\n    end_time   = time.time()\n    logger.info('Training this epoch with {0} seconds.'.format(end_time - start_time))\n    logger.info('Supervised loss = {}'.format(np.mean(loss)))\n    if eval_freq > 0:\n        eval_details = {'batch_id': batches, 'acc_train': accs, 'acc_test': acct, 'und_train': unds, 'und_test': undt}\n        return loss, eval_details\n    return loss\n\n\ndef main():\n    losses   = []\n    accuracy = []\n    for echo in xrange(4000):\n        logger.info('Iteration = {}'.format(echo))\n        train_data = simulator(M=20)\n\n        print train_data['text'][-1]\n\n        loss       = learner(train_data, fr=0.)\n        losses.append(loss)\n        accuracy  += train_data['acc']\n\n        if echo % 100 == 99:\n            plt.plot(accuracy)\n            plt.show()\n\n    # pkl.dump(losses, open('losses.temp.pkl'))\n\n\ndef check_answer(x, y, g):\n    g     = np.asarray(g)\n    environment.game.set_answer(g)\n    s     = 0\n    for k in xrange(x.shape[1]):\n        question           = ' '.join(print_sample(idx2word, y[0][k].tolist())[:-1])\n        got                = environment.parse(question)\n        if got == 2 - x[0][k]:\n            s += 1.\n    return s / x.shape[1]\n\n\ndef display_session(x, y, g, t, acc, cov):\n    \"\"\"\n    display a dialogue session\n    \"\"\"\n    conversation       = ''\n    conversation      += '\\n\\n\\n' + '***' * 30\n    conversation      += '\\nGame start.'\n\n    for k in xrange(x.shape[1]):\n        question           = ' '.join(print_sample(idx2word, y[0][k].tolist())[:-1])\n        conversation      += '\\n' + '_' * 93 + '[{}]'.format(k + 1)\n        conversation      += '\\n(´◉ ω ◉`)？ : {}'.format(question)\n        got                = x[0][k]\n        conversation += '\\n{:>78} : (●´ε｀●)'.format(answer_templates[got])\n\n    conversation  += '\\n' + '_' * 93 + '[{}]'.format(k + 1)\n    conversation  += '\\n(´✪ ‿ ✪`)ノ : {}'.format('My answer = ' + ' '.join([str(w) for w in g]))\n    conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format(' '.join([str(w) for w in t[0]]))\n    conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format('Accuracy = {}%'.format(acc * 100))\n    conversation  += '\\n{:>78} : ლ（´∀`ლ）'.format('Understand = {}%'.format(cov * 100))\n    conversation  += '\\nGame End\\n' + '***' * 30\n    return conversation\n\n\ndef SL_test(test_set):\n    print '...'\n    progbar    = Progbar(main_config['test_size'])\n    accuracy   = []\n    understand = []\n    # untruth    = []\n    at         = 0\n    for k in xrange(main_config['test_size']):\n        at        += 1\n        x          = test_set['X'][None, k]\n        y          = test_set['Y'][None, k]\n        t          = test_set['T'][None, k]\n\n        g, _, acc  = agent.evaluate(x, y, t)\n        cov        = check_answer(x, y, g)\n        # cov_t      = check_answer(x, y, t[0].tolist())\n        progbar.update(at, [('acc', acc), ('und', cov)])\n        # untruth.append(cov_t)\n        accuracy.append(acc)\n        understand.append(cov)\n\n    acc        = np.mean(accuracy)\n    und        = np.mean(understand)\n    print '\\nevaluation.. avarage accuracy = {0}% /understand {1}% questions'.format(\n        100 * acc, 100 * und)\n    # print 'check truth {}%'.format(100 * np.mean(untruth))\n    return acc, und\n\n\ndef main_sl():\n    # get the evaluation set.\n    evaluation_set  = n_rng.randint(0, train_set['X'].shape[0], main_config['test_size']).tolist()\n    eval_train      = dict()\n    eval_train['X'] = train_set['X'][evaluation_set]\n    eval_train['Y'] = train_set['Y'][evaluation_set]\n    eval_train['T'] = train_set['T'][evaluation_set]\n\n    eval_test       = test_set\n    eval_details    = {'batch_id': [], 'acc_train': [],\n                       'acc_test': [], 'und_train': [], 'und_test': []}\n\n    for echo in xrange(500):\n        logger.info('Epoch = {}'.format(echo))\n        loss, ed    = SL_learner(train_set, batch_size=50, eval_freq=10,\n                                 eval_train=eval_train, eval_test=eval_test)\n        eval_details['acc_train'] += ed['acc_train']\n        eval_details['acc_test']  += ed['acc_test']\n        eval_details['und_train'] += ed['und_train']\n        eval_details['und_test']  += ed['und_test']\n        eval_details['batch_id']  += [(t + 200000 * echo) / 1000.0 for t in ed['batch_id']]\n\n        pyplot.figure(1)\n        pyplot.plot(eval_details['batch_id'], eval_details['acc_train'], 'b')\n        pyplot.plot(eval_details['batch_id'], eval_details['acc_test'], 'r')\n        pyplot.xlabel('iterations (x 1000)')\n        pyplot.ylabel('accuracy')\n        pyplot.savefig('./acc-gru.png')\n\n        pyplot.figure(2)\n        pyplot.plot(eval_details['batch_id'], eval_details['und_train'], 'b')\n        pyplot.plot(eval_details['batch_id'], eval_details['und_test'], 'r')\n        pyplot.xlabel('iterations (x 1000)')\n        pyplot.ylabel('understand rate')\n        pyplot.savefig('./und-gru.png')\n\n        logger.info(\"saving ok!\")\n        # if echo % 20 == 19:\n        #     pyplot.figure(1)\n        #     pyplot.plot(acc_s, 'r')\n        #     pyplot.plot(acc_t, 'g')\n        #     pyplot.figure(2)\n        #     pyplot.plot(und_s, 'r')\n        #     pyplot.plot(und_t, 'g')\n        #     pyplot.show()\n\n# agent.main_config['sample_beam']   = 1\n# agent.main_config['sample_argmax'] = True\nmain_sl()\n"
  },
  {
    "path": "emolga/utils/__init__.py",
    "content": "__author__ = 'yinpengcheng'\n"
  },
  {
    "path": "emolga/utils/generic_utils.py",
    "content": "from __future__ import absolute_import\nfrom matplotlib.ticker import FuncFormatter\nimport numpy as np\nimport time\nimport sys\nimport six\nimport matplotlib.pyplot as plt\nimport matplotlib\n\ndef get_from_module(identifier, module_params, module_name, instantiate=False, kwargs=None):\n    if isinstance(identifier, six.string_types):\n        res = module_params.get(identifier)\n        if not res:\n            raise Exception('Invalid ' + str(module_name) + ': ' + str(identifier))\n        if instantiate and not kwargs:\n            return res()\n        elif instantiate and kwargs:\n            return res(**kwargs)\n        else:\n            return res\n    return identifier\n\n\ndef make_tuple(*args):\n    return args\n\n\ndef printv(v, prefix=''):\n    if type(v) == dict:\n        if 'name' in v:\n            print(prefix + '#' + v['name'])\n            del v['name']\n        prefix += '...'\n        for nk, nv in v.items():\n            if type(nv) in [dict, list]:\n                print(prefix + nk + ':')\n                printv(nv, prefix)\n            else:\n                print(prefix + nk + ':' + str(nv))\n    elif type(v) == list:\n        prefix += '...'\n        for i, nv in enumerate(v):\n            print(prefix + '#' + str(i))\n            printv(nv, prefix)\n    else:\n        prefix += '...'\n        print(prefix + str(v))\n\n\ndef make_batches(size, batch_size):\n    nb_batch = int(np.ceil(size/float(batch_size)))\n    return [(i*batch_size, min(size, (i+1)*batch_size)) for i in range(0, nb_batch)]\n\n\ndef slice_X(X, start=None, stop=None):\n    if type(X) == list:\n        if hasattr(start, '__len__'):\n            return [x[start] for x in X]\n        else:\n            return [x[start:stop] for x in X]\n    else:\n        if hasattr(start, '__len__'):\n            return X[start]\n        else:\n            return X[start:stop]\n\n\nclass Progbar(object):\n    def __init__(self, target, width=30, verbose=1):\n        '''\n            @param target: total number of steps expected\n        '''\n        self.width = width\n        self.target = target\n        self.sum_values = {}\n        self.unique_values = []\n        self.start = time.time()\n        self.total_width = 0\n        self.seen_so_far = 0\n        self.verbose = verbose\n\n    def update(self, current, values=[]):\n        '''\n            @param current: index of current step\n            @param values: list of tuples (name, value_for_last_step).\n            The progress bar will display averages for these values.\n        '''\n        for k, v in values:\n            if k not in self.sum_values:\n                self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]\n                self.unique_values.append(k)\n            else:\n                self.sum_values[k][0] += v * (current - self.seen_so_far)\n                self.sum_values[k][1] += (current - self.seen_so_far)\n        self.seen_so_far = current\n\n        now = time.time()\n        if self.verbose == 1:\n            prev_total_width = self.total_width\n            sys.stdout.write(\"\\b\" * prev_total_width)\n            sys.stdout.write(\"\\r\")\n\n            numdigits = int(np.floor(np.log10(self.target))) + 1\n            barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)\n            bar = barstr % (current, self.target)\n            prog = float(current)/self.target\n            prog_width = int(self.width*prog)\n            if prog_width > 0:\n                bar += ('.'*(prog_width-1))\n                if current < self.target:\n                    bar += '(-w-)'\n                else:\n                    bar += '(-v-)!!'\n            bar += ('~' * (self.width-prog_width))\n            bar += ']'\n            sys.stdout.write(bar)\n            self.total_width = len(bar)\n\n            if current:\n                time_per_unit = (now - self.start) / current\n            else:\n                time_per_unit = 0\n            eta = time_per_unit*(self.target - current)\n            info = ''\n            if current < self.target:\n                info += ' - ETA: %ds' % eta\n            else:\n                info += ' - %ds' % (now - self.start)\n            for k in self.unique_values:\n                if k == 'perplexity' or k == 'PPL':\n                    info += ' - %s: %.4f' % (k, np.exp(self.sum_values[k][0] / max(1, self.sum_values[k][1])))\n                else:\n                    info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n\n            self.total_width += len(info)\n            if prev_total_width > self.total_width:\n                info += ((prev_total_width-self.total_width) * \" \")\n\n            sys.stdout.write(info)\n            sys.stdout.flush()\n\n            if current >= self.target:\n                sys.stdout.write(\"\\n\")\n\n        if self.verbose == 2:\n            if current >= self.target:\n                info = '%ds' % (now - self.start)\n                for k in self.unique_values:\n                    info += ' - %s: %.4f' % (k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))\n                sys.stdout.write(info + \"\\n\")\n\n    def add(self, n, values=[]):\n        self.update(self.seen_so_far + n, values)\n\n    def clear(self):\n        self.sum_values = {}\n        self.unique_values = []\n        self.total_width = 0\n        self.seen_so_far = 0\n\n\ndef print_sample(idx2word, idx):\n    def cut_eol(words):\n        for i, word in enumerate(words):\n            if words[i] == '<eol>':\n                return words[:i + 1]\n        raise Exception(\"No end-of-line found\")\n\n    return cut_eol(map(lambda w_idx : idx2word[w_idx], idx))\n\n\ndef visualize_(subplots, data, w=None, h=None, name=None,\n               display='on', size=10, text=None, normal=True,\n               grid=False):\n    fig, ax = subplots\n    if data.ndim == 1:\n        if w and h:\n            # vector visualization\n            assert w * h == np.prod(data.shape)\n            data = data.reshape((w, h))\n        else:\n            L = data.shape[0]\n            w = int(np.sqrt(L))\n            while L % w > 0:\n                w -= 1\n            h = L / w\n            assert w * h == np.prod(data.shape)\n            data = data.reshape((w, h))\n    else:\n        w = data.shape[0]\n        h = data.shape[1]\n\n    if not size:\n        size = 30 / np.sqrt(w * h)\n\n    print data.shape\n\n    major_ticks = np.arange(0, h, 1)\n    ax.set_xticks(major_ticks)\n    ax.set_xlim(0, h)\n    major_ticks = np.arange(0, w, 1)\n    ax.set_ylim(w, -1)\n    ax.set_yticks(major_ticks)\n    ax.set_aspect('equal')\n    if grid:\n        pass\n        ax.grid(which='both')\n        # ax.axis('equal')\n    if normal:\n        cax = ax.imshow(data, cmap=plt.cm.pink, interpolation='nearest',\n                        vmax=1.0, vmin=0.0, aspect='auto')\n    else:\n        cax = ax.imshow(data, cmap=plt.cm.bone, interpolation='nearest', aspect='auto')\n\n    if name:\n        ax.set_title(name)\n    else:\n        ax.set_title('sample.')\n    import matplotlib.ticker as ticker\n\n    # ax.xaxis.set_ticks(np.arange(0, h, 1.))\n    # ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n    # ax.yaxis.set_ticks(np.arange(0, w, 1.))\n    # ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n\n    # ax.set_xticks(np.linspace(0, 1, h))\n    # ax.set_yticks(np.linspace(0, 1, w))\n    # Move left and bottom spines outward by 10 points\n    # ax.spines['left'].set_position(('outward', size))\n    # ax.spines['bottom'].set_position(('outward', size))\n    # # Hide the right and top spines\n    # ax.spines['right'].set_visible(False)\n    # ax.spines['top'].set_visible(False)\n    # # Only show ticks on the left and bottom spines\n    # ax.yaxis.set_ticks_position('left')\n    # ax.xaxis.set_ticks_position('bottom')\n\n    if text:\n        ax.set_yticks(np.linspace(0, 1, 33) * size * 3.2)\n        ax.set_yticklabels([text[s] for s in xrange(33)])\n    # cbar = fig.colorbar(cax)\n\n    if display == 'on':\n        plt.show()\n    else:\n        return ax\n\n\ndef vis_Gaussian(subplot, mean, std, name=None, display='off', size=10):\n    ax   = subplot\n    data = np.random.normal(size=(2, 10000))\n    data[0] = data[0] * std[0] + mean[0]\n    data[1] = data[1] * std[1] + mean[1]\n\n    ax.scatter(data[0].tolist(), data[1].tolist(), 'r.')\n    if display == 'on':\n        plt.show()\n    else:\n        return ax"
  },
  {
    "path": "emolga/utils/io_utils.py",
    "content": "from __future__ import absolute_import\nimport h5py\nimport numpy as np\nimport cPickle\nfrom collections import defaultdict\n\n\nclass HDF5Matrix():\n    refs = defaultdict(int)\n\n    def __init__(self, datapath, dataset, start, end, normalizer=None):\n        if datapath not in list(self.refs.keys()):\n            f = h5py.File(datapath)\n            self.refs[datapath] = f\n        else:\n            f = self.refs[datapath]\n        self.start = start\n        self.end = end\n        self.data = f[dataset]\n        self.normalizer = normalizer\n\n    def __len__(self):\n        return self.end - self.start\n\n    def __getitem__(self, key):\n        if isinstance(key, slice):\n            if key.stop + self.start <= self.end:\n                idx = slice(key.start+self.start, key.stop + self.start)\n            else:\n                raise IndexError\n        elif isinstance(key, int):\n            if key + self.start < self.end:\n                idx = key+self.start\n            else:\n                raise IndexError\n        elif isinstance(key, np.ndarray):\n            if np.max(key) + self.start < self.end:\n                idx = (self.start + key).tolist()\n            else:\n                raise IndexError\n        elif isinstance(key, list):\n            if max(key) + self.start < self.end:\n                idx = [x + self.start for x in key]\n            else:\n                raise IndexError\n        if self.normalizer is not None:\n            return self.normalizer(self.data[idx])\n        else:\n            return self.data[idx]\n\n    @property\n    def shape(self):\n        return tuple([self.end - self.start, self.data.shape[1]])\n\n\ndef save_array(array, name):\n    import tables\n    f = tables.open_file(name, 'w')\n    atom = tables.Atom.from_dtype(array.dtype)\n    ds = f.createCArray(f.root, 'data', atom, array.shape)\n    ds[:] = array\n    f.close()\n\n\ndef load_array(name):\n    import tables\n    f = tables.open_file(name)\n    array = f.root.data\n    a = np.empty(shape=array.shape, dtype=array.dtype)\n    a[:] = array[:]\n    f.close()\n    return a\n\n\ndef save_config():\n    pass\n\n\ndef load_config():\n    pass"
  },
  {
    "path": "emolga/utils/np_utils.py",
    "content": "from __future__ import absolute_import\nimport numpy as np\nimport scipy as sp\nfrom six.moves import range\nfrom six.moves import zip\n\n\ndef to_categorical(y, nb_classes=None):\n    '''Convert class vector (integers from 0 to nb_classes)\n    to binary class matrix, for use with categorical_crossentropy\n    '''\n    y = np.asarray(y, dtype='int32')\n    if not nb_classes:\n        nb_classes = np.max(y)+1\n    Y = np.zeros((len(y), nb_classes))\n    for i in range(len(y)):\n        Y[i, y[i]] = 1.\n    return Y\n\n\ndef normalize(a, axis=-1, order=2):\n    l2 = np.atleast_1d(np.linalg.norm(a, order, axis))\n    l2[l2 == 0] = 1\n    return a / np.expand_dims(l2, axis)\n\n\ndef binary_logloss(p, y):\n    epsilon = 1e-15\n    p = sp.maximum(epsilon, p)\n    p = sp.minimum(1-epsilon, p)\n    res = sum(y * sp.log(p) + sp.subtract(1, y) * sp.log(sp.subtract(1, p)))\n    res *= -1.0/len(y)\n    return res\n\n\ndef multiclass_logloss(P, Y):\n    score = 0.\n    npreds = [P[i][Y[i]-1] for i in range(len(Y))]\n    score = -(1. / len(Y)) * np.sum(np.log(npreds))\n    return score\n\n\ndef accuracy(p, y):\n    return np.mean([a == b for a, b in zip(p, y)])\n\n\ndef probas_to_classes(y_pred):\n    if len(y_pred.shape) > 1 and y_pred.shape[1] > 1:\n        return categorical_probas_to_classes(y_pred)\n    return np.array([1 if p > 0.5 else 0 for p in y_pred])\n\n\ndef categorical_probas_to_classes(p):\n    return np.argmax(p, axis=1)\n"
  },
  {
    "path": "emolga/utils/test_utils.py",
    "content": "import numpy as np\n\n\ndef get_test_data(nb_train=1000, nb_test=500, input_shape=(10,), output_shape=(2,),\n                  classification=True, nb_class=2):\n    '''\n        classification=True overrides output_shape\n        (i.e. output_shape is set to (1,)) and the output\n        consists in integers in [0, nb_class-1].\n\n        Otherwise: float output with shape output_shape.\n    '''\n    nb_sample = nb_train + nb_test\n    if classification:\n        y = np.random.randint(0, nb_class, size=(nb_sample, 1))\n        X = np.zeros((nb_sample,) + input_shape)\n        for i in range(nb_sample):\n            X[i] = np.random.normal(loc=y[i], scale=1.0, size=input_shape)\n    else:\n        y_loc = np.random.random((nb_sample,))\n        X = np.zeros((nb_sample,) + input_shape)\n        y = np.zeros((nb_sample,) + output_shape)\n        for i in range(nb_sample):\n            X[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=input_shape)\n            y[i] = np.random.normal(loc=y_loc[i], scale=1.0, size=output_shape)\n\n    return (X[:nb_train], y[:nb_train]), (X[nb_train:], y[nb_train:])\n"
  },
  {
    "path": "emolga/utils/theano_utils.py",
    "content": "from __future__ import absolute_import\n\nfrom theano import gof\nfrom theano.tensor import basic as tensor\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\n\ndef floatX(X):\n    return np.asarray(X, dtype=theano.config.floatX)\n\n\ndef sharedX(X, dtype=theano.config.floatX, name=None):\n    return theano.shared(np.asarray(X, dtype=dtype), name=name)\n\n\ndef shared_zeros(shape, dtype=theano.config.floatX, name=None):\n    return sharedX(np.zeros(shape), dtype=dtype, name=name)\n\n\ndef shared_scalar(val=0., dtype=theano.config.floatX, name=None):\n    return theano.shared(np.cast[dtype](val), name=name)\n\n\ndef shared_ones(shape, dtype=theano.config.floatX, name=None):\n    return sharedX(np.ones(shape), dtype=dtype, name=name)\n\n\ndef alloc_zeros_matrix(*dims):\n    return T.alloc(np.cast[theano.config.floatX](0.), *dims)\n\n\ndef alloc_ones_matrix(*dims):\n    return T.alloc(np.cast[theano.config.floatX](1.), *dims)\n\n\ndef ndim_tensor(ndim):\n    if ndim == 1:\n        return T.vector()\n    elif ndim == 2:\n        return T.matrix()\n    elif ndim == 3:\n        return T.tensor3()\n    elif ndim == 4:\n        return T.tensor4()\n    return T.matrix()\n\n\n# get int32 tensor\ndef ndim_itensor(ndim, name=None):\n    if ndim == 2:\n        return T.imatrix(name)\n    elif ndim == 3:\n        return T.itensor3(name)\n    elif ndim == 4:\n        return T.itensor4(name)\n    return T.imatrix(name)\n\n\n# dot-product\ndef dot(inp, matrix, bias=None):\n    \"\"\"\n    Decide the right type of dot product depending on the input\n    arguments\n    \"\"\"\n    if 'int' in inp.dtype and inp.ndim == 2:\n        return matrix[inp.flatten()]\n    elif 'int' in inp.dtype:\n        return matrix[inp]\n    elif 'float' in inp.dtype and inp.ndim == 3:\n        shape0 = inp.shape[0]\n        shape1 = inp.shape[1]\n        shape2 = inp.shape[2]\n        if bias:\n            return (T.dot(inp.reshape((shape0 * shape1, shape2)), matrix) + bias).reshape((shape0, shape1, matrix.shape[1]))\n        else:\n            return T.dot(inp.reshape((shape0 * shape1, shape2)), matrix).reshape((shape0, shape1, matrix.shape[1]))\n    else:\n        if bias:\n            return T.dot(inp, matrix) + bias\n        else:\n            return T.dot(inp, matrix)\n\n\n# Numerically stable log(sum(exp(A))). Can also be used in softmax function.\ndef logSumExp(x, axis=None, mask=None, status='theano', c=None, err=1e-7):\n    \"\"\"\n        Numerically stable log(sum(exp(A))). Can also be used in softmax function.\n        c is the additional input when it doesn't require masking but x need.\n\n    \"\"\"\n    if status == 'theano':\n        J = T\n    else:\n        J = np\n\n    if c is None:\n        x_max = J.max(x, axis=axis, keepdims=True)\n    else:\n        x_max = J.max(J.concatenate([c, x], axis=-1), axis=axis, keepdims=True)\n\n    if c is None:\n        if not mask:\n            l_t = J.sum(J.exp(x - x_max), axis=axis, keepdims=True)\n\n        else:\n            l_t = J.sum(J.exp(x - x_max) * mask, axis=axis, keepdims=True)\n    else:\n        if not mask:\n            l_t = J.sum(J.exp(x - x_max), axis=axis, keepdims=True) + \\\n                  J.sum(J.exp(c - x_max), axis=axis, keepdims=True)\n        else:\n            l_t = J.sum(J.exp(x - x_max) * mask, axis=axis, keepdims=True) + \\\n                  J.sum(J.exp(c - x_max), axis=axis, keepdims=True)\n\n    x_t = J.log(J.maximum(l_t, err)) + x_max\n    return x_t\n\n\ndef softmax(x):\n    return T.nnet.softmax(x.reshape((-1, x.shape[-1]))).reshape(x.shape)\n\n\ndef masked_softmax(x, mask, err=1e-9):\n    assert x.ndim == 2, 'support two-dimension'\n    weights  = softmax(x)\n    weights *= mask\n    weights  = weights / (T.sum(weights, axis=-1)[:, None] + err) * mask\n    return weights\n\n\ndef cosine_sim(k, M):\n    k_unit = k / (T.sqrt(T.sum(k**2)) + 1e-5)\n    # T.patternbroadcast(k_unit.reshape((1,k_unit.shape[0])),(True,False))\n    k_unit = k_unit.dimshuffle(('x', 0))\n    k_unit.name = \"k_unit\"\n    M_lengths = T.sqrt(T.sum(M**2, axis=1)).dimshuffle((0, 'x'))\n    M_unit = M / (M_lengths + 1e-5)\n    M_unit.name = \"M_unit\"\n    return T.sum(k_unit * M_unit, axis=1)\n\n\ndef cosine_sim2d(k, M):\n    # k: (nb_samples, memory_width)\n    # M: (nb_samples, memory_dim, memory_width)\n\n    # norms of keys and memories\n    k_norm = T.sqrt(T.sum(T.sqr(k), 1)) + 1e-5  # (nb_samples,)\n    M_norm = T.sqrt(T.sum(T.sqr(M), 2)) + 1e-5  # (nb_samples, memory_dim,)\n\n    k      = k[:, None, :]                      # (nb_samples, 1, memory_width)\n    k_norm = k_norm[:, None]                    # (nb_samples, 1)\n\n    sim    = T.sum(k * M, axis=2)               # (nb_samples, memory_dim,)\n    sim   /= k_norm * M_norm                    # (nb_samples, memory_dim,)\n    return sim\n\n\ndef dot_2d(k, M, b=None, g=None):\n    # k: (nb_samples, memory_width)\n    # M: (nb_samples, memory_dim, memory_width)\n\n    # norms of keys and memories\n    # k_norm = T.sqrt(T.sum(T.sqr(k), 1)) + 1e-5  # (nb_samples,)\n    # M_norm = T.sqrt(T.sum(T.sqr(M), 2)) + 1e-5  # (nb_samples, memory_dim,)\n\n    k      = k[:, None, :]                      # (nb_samples, 1, memory_width)\n    value  = k * M\n    if b is not None:\n        b  = b[:, None, :]\n        value *= b         # (nb_samples, memory_dim,)\n\n    if g is not None:\n        g  = g[None, None, :]\n        value *= g\n\n    sim    = T.sum(value, axis=2)\n    return sim\n\n\ndef shift_convolve(weight, shift, shift_conv):\n    shift = shift.dimshuffle((0, 'x'))\n    return T.sum(shift * weight[shift_conv], axis=0)\n\n\ndef shift_convolve2d(weight, shift, shift_conv):\n    return T.sum(shift[:, :, None] * weight[:, shift_conv], axis=1)\n"
  },
  {
    "path": "experiments/__init__.py",
    "content": "__author__ = 'jiataogu'"
  },
  {
    "path": "experiments/bst_dataset.py",
    "content": "# coding=utf-8\n__author__ = 'jiataogu'\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\nimport numpy.random as n_rng\n\n\nclass BSTnode(object):\n    \"\"\"\nRepresentation of a node in a binary search tree.\nHas a left child, right child, and key value, and stores its subtree size.\n\"\"\"\n    def __init__(self, parent, t):\n        \"\"\"Create a new leaf with key t.\"\"\"\n        self.key = t\n        self.parent = parent\n        self.left = None\n        self.right = None\n        self.size = 1\n\n    def update_stats(self):\n        \"\"\"Updates this node's size based on its children's sizes.\"\"\"\n        self.size = (0 if self.left is None else self.left.size) + (0 if self.right is None else self.right.size)\n\n    def insert(self, t, NodeType):\n        \"\"\"Insert key t into the subtree rooted at this node (updating subtree size).\"\"\"\n        self.size += 1\n        if t < self.key:\n            if self.left is None:\n                self.left = NodeType(self, t)\n                return self.left\n            else:\n                return self.left.insert(t, NodeType)\n        elif t > self.key:\n            if self.right is None:\n                self.right = NodeType(self, t)\n                return self.right\n            else:\n                return self.right.insert(t, NodeType)\n        else:\n            return self\n\n    def find(self, t):\n        \"\"\"Return the node for key t if it is in this tree, or None otherwise.\"\"\"\n        if t == self.key:\n            return self\n        elif t < self.key:\n            if self.left is None:\n                return None\n            else:\n                return self.left.find(t)\n        else:\n            if self.right is None:\n                return None\n            else:\n                return self.right.find(t)\n\n    def rank(self, t):\n        \"\"\"Return the number of keys <= t in the subtree rooted at this node.\"\"\"\n        left_size = 0 if self.left is None else self.left.size\n        if t == self.key:\n            return left_size + 1\n        elif t < self.key:\n            if self.left is None:\n                return 0\n            else:\n                return self.left.rank(t)\n        else:\n            if self.right is None:\n                return left_size + 1\n            else:\n                return self.right.rank(t) + left_size + 1\n\n    def minimum(self):\n        \"\"\"Returns the node with the smallest key in the subtree rooted by this node.\"\"\"\n        current = self\n        while current.left is not None:\n            current = current.left\n        return current\n\n\n    def successor(self):\n        \"\"\"Returns the node with the smallest key larger than this node's key, or None if this has the largest key in the tree.\"\"\"\n        if self.right is not None:\n            return self.right.minimum()\n        current = self\n        while current.parent is not None and current.parent.right is current:\n            current = current.parent\n        return current.parent\n\n    def delete(self):\n        \"\"\"\"Delete this node from the tree.\"\"\"\n        if self.left is None or self.right is None:\n            if self is self.parent.left:\n                self.parent.left = self.left or self.right\n                if self.parent.left is not None:\n                    self.parent.left.parent = self.parent\n            else:\n                self.parent.right = self.left or self.right\n                if self.parent.right is not None:\n                    self.parent.right.parent = self.parent\n            current = self.parent\n            while current.key is not None:\n                current.update_stats()\n                current = current.parent\n            return self\n        else:\n            s = self.successor()\n            self.key, s.key = s.key, self.key\n            return s.delete()\n\n    def check(self, lokey, hikey):\n        \"\"\"Checks that the subtree rooted at t is a valid BST and all keys are between (lokey, hikey).\"\"\"\n        if lokey is not None and self.key <= lokey:\n            raise \"BST RI violation\"\n        if hikey is not None and self.key >= hikey:\n            raise \"BST RI violation\"\n        if self.left is not None:\n            if self.left.parent is not self:\n                raise \"BST RI violation\"\n            self.left.check(lokey, self.key)\n        if self.right is not None:\n            if self.right.parent is not self:\n                raise \"BST RI violation\"\n            self.right.check(self.key, hikey)\n        if self.size != 1 + (0 if self.left is None else self.left.size) + (0 if self.right is None else self.right.size):\n            raise \"BST RI violation\"\n\n    def __repr__(self):\n        return \"<BST Node, key:\" + str(self.key) + \">\"\n\n\nclass BST(object):\n    \"\"\"\n    Simple binary search tree implementation, augmented with subtree sizes.\n    This BST supports insert, find, and delete-min operations.\n    Each tree contains some (possibly 0) BSTnode objects, representing nodes,\n    and a pointer to the root.\n    \"\"\"\n\n    def __init__(self, NodeType=BSTnode):\n        self.root = None\n        self.NodeType = NodeType\n        self.psroot = self.NodeType(None, None)\n\n    def reroot(self):\n        self.root = self.psroot.left\n\n    def insert(self, t):\n        \"\"\"Insert key t into this BST, modifying it in-place.\"\"\"\n        if self.root is None:\n            self.psroot.left = self.NodeType(self.psroot, t)\n            self.reroot()\n            return self.root\n        else:\n            return self.root.insert(t, self.NodeType)\n\n    def find(self, t):\n        \"\"\"Return the node for key t if is in the tree, or None otherwise.\"\"\"\n        if self.root is None:\n            return None\n        else:\n            return self.root.find(t)\n\n    def rank(self, t):\n        \"\"\"The number of keys <= t in the tree.\"\"\"\n        if self.root is None:\n            return 0\n        else:\n            return self.root.rank(t)\n\n    def delete(self, t):\n        \"\"\"Delete the node for key t if it is in the tree.\"\"\"\n        node = self.find(t)\n        deleted = self.root.delete()\n        self.reroot()\n        return deleted\n\n    def check(self):\n        if self.root is not None:\n            self.root.check(None, None)\n\n    def __str__(self):\n        if self.root is None:\n            return '<empty tree>'\n\n        def nested(node):\n            if node is None:\n                return '0'\n            head  = str(node.key)\n            left  = nested(node.left)\n            right = nested(node.right)\n\n            if left == '0' and right == '0':\n                return head\n            else:\n                return ' '.join(['(', head, left, right, ')'])\n\n        return nested(self.root)\n\n        # def recurse(node):\n        #     if node is None:\n        #         return [], 0, 0\n        #     label = str(node.key)\n        #     left_lines, left_pos, left_width = recurse(node.left)\n        #     right_lines, right_pos, right_width = recurse(node.right)\n        #     middle = max(right_pos + left_width - left_pos + 1, len(label), 2)\n        #     pos = left_pos + middle // 2\n        #     width = left_pos + middle + right_width - right_pos\n        #     while len(left_lines) < len(right_lines):\n        #         left_lines.append(' ' * left_width)\n        #     while len(right_lines) < len(left_lines):\n        #         right_lines.append(' ' * right_width)\n        #     if (middle - len(label)) % 2 == 1 and node.parent is not None and \\\n        #        node is node.parent.left and len(label) < middle:\n        #         label += '.'\n        #     label = label.center(middle, '.')\n        #     if label[0] == '.': label = ' ' + label[1:]\n        #     if label[-1] == '.': label = label[:-1] + ' '\n        #     lines = [' ' * left_pos + label + ' ' * (right_width - right_pos),\n        #              ' ' * left_pos + '/' + ' ' * (middle-2) +\n        #              '\\\\' + ' ' * (right_width - right_pos)] + \\\n        #       [left_line + ' ' * (width - left_width - right_width) +\n        #        right_line\n        #        for left_line, right_line in zip(left_lines, right_lines)]\n        #     return lines, pos, width\n        # return '\\n'.join(recurse(self.root) [0])\n\ntest1 = range(0, 100, 10)\ntest2 = [31, 41, 59, 26, 53, 58, 97, 93, 23]\ntest3 = \"algorithms\"\n\n\ndef printsizes(node):\n    if node is None:\n        print \"node is nil\"\n    else:\n        print \"node\", node.key, \"has a subtree of size\", node.size\n\n\ndef test(args=None, BSTtype=BST):\n    import random, sys\n    random.seed(19920206)\n    if not args:\n        args = sys.argv[1:]\n    if not args:\n        print 'usage: %s <number-of-random-items | item item item ...>' % \\\n              sys.argv[0]\n        sys.exit()\n    elif len(args) == 1:\n        items = (random.randrange(100) for i in xrange(int(args[0])))\n    else:\n        items = [int(i) for i in args]\n\n    tree   = BSTtype()\n    source = []\n    for item in items:\n        tree.insert(item)\n        source += [str(item)]\n    print ' '.join(source)\n    print tree\n\n\ndef generate():\n    import random, sys\n    random.seed(19920206)\n\n    Lmin   = 2 ** 2 - 1\n    Lmax   = 2 ** 4 - 1\n    Xnum   = 1000000\n    voc    = 26\n\n    wfile  = open('/home/thoma/Work/Dial-DRL/dataset/BST_1M.txt', 'w')\n    for id in xrange(Xnum):\n        tree   = BST()\n        items  = (random.randrange(voc) for i in\n                 xrange(random.randint(Lmin, Lmax)))\n        source = []\n        for item in items:\n            item = chr(item + 65)\n            tree.insert(item)\n            source += [str(item)]\n        source = ' '.join(source)\n        target = str(tree)\n        line   = '{0} -> {1}'.format(source, target)\n        wfile.write(line + '\\n')\n        if id % 10000 == 0:\n            print id\n\n\ndef obtain_dataset():\n    rfile = open('/home/thoma/Work/Dial-DRL/dataset/BST_1M.txt', 'r')\n    line  = rfile.readline()\n\n    word2idx = dict()\n    word2idx['<eol>'] = 0\n    word2idx['<unk>'] = 1\n    pairs    = []\n    at       = 2\n    lines    = 0\n    while line:\n        lines += 1\n        line   = line.strip()\n        source, target = line.split('->')\n        source = source.split()\n        target = target.split()\n\n        for w in source:\n            if w not in word2idx:\n                word2idx[w] = at\n                at += 1\n        for w in target:\n            if w not in word2idx:\n                word2idx[w] = at\n                at += 1\n        pairs.append((source, target))\n        if lines % 20000 == 0:\n            print lines\n        line = rfile.readline()\n\n    idx2word = dict()\n    for v, k in word2idx.items():\n        idx2word[k] = v\n\n    Lmax     = len(idx2word)\n    print 'read dataset ok.'\n    print Lmax\n    for i in xrange(Lmax):\n        print idx2word[i]\n\n    def build_data(data):\n        instance = dict(text=[], summary=[], source=[], target=[], target_c=[])\n        for pair in data:\n            source, target = pair\n            A = [word2idx[w] for w in source]\n            B = [word2idx[w] for w in target]\n            # C = np.asarray([[w == l for w in source] for l in target], dtype='float32')\n            C = [0 if w not in source else source.index(w) + Lmax for w in target]\n\n            instance['text']      += [source]\n            instance['summary']   += [target]\n            instance['source']    += [A]\n            instance['target']    += [B]\n            # instance['cc_matrix'] += [C]\n            instance['target_c'] += [C]\n\n        print instance['target'][5000]\n        print instance['target_c'][5000]\n        return instance\n\n    train_set = build_data(pairs[100000:])\n    test_set  = build_data(pairs[:100000])\n    serialize_to_file([train_set, test_set, idx2word, word2idx],\n                      '/home/thoma/Work/Dial-DRL/dataset/BST_1M.data.pkl')\n\n\nif __name__ == '__main__':\n    generate()\n    obtain_dataset()"
  },
  {
    "path": "experiments/bst_vest.py",
    "content": "# coding=utf-8\n\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts, setup_weibo, setup_syn, setup_bst\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\n# setup = setup_lcsts\n# setup = setup_syn\nsetup = setup_bst\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyLCSTS.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\n\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = len(word2idx)\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples  = len(train_set['source'])\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ntrain_data        = build_data(train_set)\ntrain_data_plain  = zip(*(train_set['source'], train_set['target']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\n# train_data_plain  = zip(*(train_set['source'], train_set['target']))\n# test_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size )[:2000].tolist()\nlogger.info('load the data ok.')\nnotrain           = False\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\nagent.build_()\nif notrain:\n    agent.compile_('display')\nelse:\n    agent.compile_('all')\nprint 'compile ok.'\n\n# load the model\n# agent.load(config['path_h5'] +\n# '/experiments.Copy{2}.id={0}.epoch={1}.pkl'.format('20160229-105153', 1, config['modelname']))\n\necho   = 0\nepochs = 10\nskip   = -1  # 25000\nif echo > 0:\n    tmark = '20160229-105153'  # '20160227-013418'    # copynet multi-source model\n    agent.load(config['path_h5'] + '/experiments.Copy{2}.id={0}.epoch={1}.pkl'.format(tmark, echo, config['modelname']))\n\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target'))\n        return data_stream\n\n    def prepare_batch(batch, mask, fix_len=None):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data, fix_len=None):\n            if fix_len is not None:\n                return data[:, : fix_len]\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data, fix_len)\n        return data\n\n    def cc_martix(source, target):\n        cc = np.zeros((source.shape[0], target.shape[1], source.shape[1]), dtype='float32')\n        for k in xrange(source.shape[0]):\n            for j in xrange(target.shape[1]):\n                for i in xrange(source.shape[1]):\n                    if (source[k, i] == target[k, j]) and (source[k, i] > 0):\n                        cc[k][j][i] = 1.\n        return cc\n\n    def unk_filter(data):\n        if config['voc_size'] == -1:\n            return copy.copy(data)\n        else:\n            mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n            data = copy.copy(data * mask + (1 - mask))\n            return data\n\n    # training\n    if not notrain:\n        train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n        logger.info('\\nEpoch = {} -> Training Set Learning...'.format(echo))\n        progbar = Progbar(train_size / config['batch_size'])\n        for it, batch in enumerate(train_batches):\n\n            # skip some iterations\n            if echo == 1  and it < skip:\n                continue\n\n            # obtain data\n            data_s = prepare_batch(batch, 'source')\n            data_t = prepare_batch(batch, 'target')\n            if config['copynet']:\n                data_c = cc_martix(data_s, data_t)\n                # data_c = prepare_batch(batch, 'target_c', data_t.shape[1])\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t), data_c)]\n            else:\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t))]\n\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n            if it % 200 == 0:\n                logger.info('Echo={} Evaluation Sampling.'.format(it))\n                logger.info('generating [training set] samples')\n                for _ in xrange(5):\n                    idx              = int(np.floor(n_rng.rand() * train_size))\n                    train_s, train_t = train_data_plain[idx]\n                    v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                       np.asarray(train_t, dtype='int32'),\n                                                       idx2word,\n                                                       np.asarray(unk_filter(train_s), dtype='int32'))\n                    print '*' * 50\n\n                logger.info('generating [testing set] samples')\n                for _ in xrange(5):\n                    idx            = int(np.floor(n_rng.rand() * test_size))\n                    test_s, test_t = test_data_plain[idx]\n                    v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                     np.asarray(test_t, dtype='int32'),\n                                                     idx2word,\n                                                     np.asarray(unk_filter(test_s), dtype='int32'))\n                    print '*' * 50\n\n            # save the weights.\n            if it % 5000 == 0:\n                agent.save(config['path_h5'] + '/experiments.Copy{2}.id={0}.epoch={1}.pkl'.format(tmark, echo, config['modelname']))\n\n            if (it % 5000 == 0) and it > 0:\n                print 'testing accuracy !!'\n\n                def analysis_(data_plain, t_idx, mode='Training'):\n                    progbar_tr = Progbar(2000)\n                    print '\\n' + '__' * 50\n                    cpy, cpy_pos = 0, 0\n                    for it, idx in enumerate(t_idx):\n                        train_s, train_t = data_plain[idx]\n                        c = float(agent.analyse_(np.asarray(train_s, dtype='int32'),\n                                           np.asarray(train_t, dtype='int32'),\n                                           idx2word))\n                        # copy mode\n                        cpy     += 1\n                        cpy_pos += c\n                        progbar_tr.update(it + 1, [('Copy', cpy_pos)])\n                    logger.info('\\n{0} Accuracy:' +\n                                '\\t{1}/{2} = {3}%'.format(mode, cpy_pos, cpy, 100 * cpy_pos / float(cpy)))\n                    print '==' * 50\n\n                # analysis_(train_data_plain, tr_idx, 'Training')\n                analysis_(test_data_plain,  ts_idx, 'Testing')\n\n\n\n"
  },
  {
    "path": "experiments/config.py",
    "content": "__author__ = 'jiataogu'\nimport os\nimport os.path as path\n\n\ndef setup():\n    config = dict()\n    # config['seed']            = 3030029828\n    config['seed']            = 19920206\n\n    config['use_noise']       = False\n    config['optimizer']       = 'adam'\n    config['save_updates']    = True\n    config['get_instance']    = True\n    config['path']            = '/home/thoma/Work/Dial-DRL'  # path.realpath(path.curdir) + '/'\n    config['dataset']         = config['path'] + '/dataset/bAbI/dataset-b.pkl'\n    config['voc']             = config['path'] + '/dataset/bAbI/voc-b.pkl'\n\n    # output log place\n    config['path_log']        = config['path'] + 'Logs'\n    if not os.path.exists(config['path_log']):\n        os.mkdir(config['path_log'])\n\n    # # output hdf5 file.\n    # config['weights_file']    = config['path'] + '/froslass/model-pool/'\n    # if not os.path.exists(config['weights_file']):\n    #     os.mkdir(config['weights_file'])\n\n    # size\n    config['batch_size']      = 20\n    config['mode']            = 'RNN'  # NTM\n    config['binary']          = False\n\n    # Encoder: Model\n    config['bidirectional']   = True\n    config['enc_use_contxt']  = False\n    config['enc_learn_nrm']   = True\n    config['enc_embedd_dim']  = 100    # 100\n    config['enc_hidden_dim']  = 150    # 180\n    config['enc_contxt_dim']  = 0\n    config['encoder']         = 'RNN'\n    config['pooling']         = False\n\n    # Decoder: dimension\n    config['dec_embedd_dim']  = 100  # 100\n    config['dec_hidden_dim']  = 150  # 180\n    config['dec_contxt_dim']  = config['enc_hidden_dim']       \\\n                                if not config['bidirectional'] \\\n                                else 2 * config['enc_hidden_dim']\n\n    # Decoder: CopyNet\n    config['copynet']         = True\n    config['identity']        = False\n\n    # Decoder: Model\n    config['shared_embed']    = False\n    config['use_input']       = True\n    config['bias_code']       = True\n    config['dec_use_contxt']  = True\n    config['deep_out']        = False\n    config['deep_out_activ']  = 'tanh'  # maxout2\n    config['bigram_predict']  = True\n    config['context_predict'] = True\n    config['dropout']         = 0.0  # 5\n    config['leaky_predict']   = False\n\n    config['dec_readout_dim'] = config['dec_hidden_dim']\n    if config['dec_use_contxt']:\n        config['dec_readout_dim'] += config['dec_contxt_dim']\n    if config['bigram_predict']:\n        config['dec_readout_dim'] += config['dec_embedd_dim']\n\n\n    # Decoder: sampling\n    config['max_len']         = 27\n    config['sample_beam']     = 8\n    config['sample_stoch']    = False\n    config['sample_argmax']   = False\n\n    # Gradient Tracking !!!\n    config['gradient_check']  = True\n    config['gradient_noise']  = True\n\n    config['skip_size']       = 15\n\n    for w in config:\n        print '{0} => {1}'.format(w, config[w])\n    print 'setup ok.'\n    return config\n\n\ndef setup_syn():\n    config = dict()\n    config['seed']            = 3030029828\n    # config['seed']            = 19920206\n\n    # model ids\n    # voc_size 10000:  20160224-021106\n    # voc_size 5000 :  20160224-144747 / 20160224-162424 (discard UNK)\n\n    config['use_noise']       = False\n    config['optimizer']       = 'adam'\n    config['save_updates']    = True\n    config['get_instance']    = True\n    config['path']            = path.realpath(path.curdir)\n    config['path_h5']         = config['path'] + '/H5'\n    # config['dataset']         = config['path'] + '/dataset/lcsts_data-word-full.pkl'\n    config['dataset']         = config['path'] + '/dataset/synthetic_data_c.pkl'\n    config['modelname']       = 'syn'\n\n    # output log place\n    config['path_log']        = config['path'] + '/Logs'\n    config['path_logX']       = config['path'] + '/LogX'\n    if not os.path.exists(config['path_log']):\n        os.mkdir(config['path_log'])\n    if not os.path.exists(config['path_logX']):\n        os.mkdir(config['path_logX'])\n\n    # # output hdf5 file.\n    # config['weights_file']    = config['path'] + '/froslass/model-pool/'\n    # if not os.path.exists(config['weights_file']):\n    #     os.mkdir(config['weights_file'])\n\n    # size\n    config['batch_size']      = 20\n    config['mode']            = 'RNN'  # NTM\n    config['binary']          = False\n    config['voc_size']        = -1     # 20000\n\n    # Encoder: Model\n    config['bidirectional']   = True\n    config['enc_use_contxt']  = False\n    config['enc_learn_nrm']   = True\n    config['enc_embedd_dim']  = 150    # 100\n    config['enc_hidden_dim']  = 300    # 180\n    config['enc_contxt_dim']  = 0\n    config['encoder']         = 'RNN'\n    config['pooling']         = False\n\n    config['encode_max_len']  = 57\n    config['decode_unk']      = False\n    config['explicit_loc']    = True\n\n    # Decoder: dimension\n    config['dec_embedd_dim']  = 150  # 100\n    config['dec_hidden_dim']  = 300  # 180\n    config['dec_contxt_dim']  = config['enc_hidden_dim']       \\\n                                if not config['bidirectional'] \\\n                                else 2 * config['enc_hidden_dim']\n    if config['explicit_loc']:\n        config['dec_contxt_dim'] += config['encode_max_len']\n\n    # Decoder: CopyNet\n    config['copynet']         = True   # False\n    config['identity']        = False\n    config['location_embed']  = True\n    config['coverage']        = True\n    config['copygate']        = False\n\n    # Decoder: Model\n    config['shared_embed']    = False\n    config['use_input']       = True\n    config['bias_code']       = True\n    config['dec_use_contxt']  = True\n    config['deep_out']        = False\n    config['deep_out_activ']  = 'tanh'  # maxout2\n    config['bigram_predict']  = True\n    config['context_predict'] = True\n    config['dropout']         = 0.0  # 5\n    config['leaky_predict']   = False\n\n    config['dec_readout_dim'] = config['dec_hidden_dim']\n    if config['dec_use_contxt']:\n        config['dec_readout_dim'] += config['dec_contxt_dim']\n    if config['bigram_predict']:\n        config['dec_readout_dim'] += config['dec_embedd_dim']\n\n    # Decoder: sampling\n    config['max_len']         = 57\n    config['sample_beam']     = 10\n    config['sample_stoch']    = False\n    config['sample_argmax']   = False\n\n    # Gradient Tracking !!!\n    config['gradient_check']  = True\n    config['gradient_noise']  = True\n\n    config['skip_size']       = 15\n\n    for w in config:\n        print '{0} => {1}'.format(w, config[w])\n    print 'setup ok.'\n    return config\n\n    # config = dict()\n    # # config['seed']            = 3030029828\n    # config['seed']            = 19920206\n    #\n    # config['use_noise']       = False\n    # config['optimizer']       = 'adam'\n    # config['save_updates']    = True\n    # config['get_instance']    = True\n    # config['path']            = '/home/thoma/Work/Dial-DRL'  # path.realpath(path.curdir) + '/'\n    # config['path_h5']         = config['path'] + '/H5'\n    # config['dataset']         = config['path'] + '/dataset/synthetic_data_b.pkl'\n    #\n    # # output log place\n    # config['path_log']        = config['path'] + 'Logs'\n    # if not os.path.exists(config['path_log']):\n    #     os.mkdir(config['path_log'])\n    #\n    # # # output hdf5 file.\n    # # config['weights_file']    = config['path'] + '/froslass/model-pool/'\n    # # if not os.path.exists(config['weights_file']):\n    # #     os.mkdir(config['weights_file'])\n    #\n    # # size\n    # config['batch_size']      = 20\n    # config['mode']            = 'RNN'  # NTM\n    # config['binary']          = False\n    #\n    # # Encoder: Model\n    # config['bidirectional']   = True\n    # config['enc_use_contxt']  = False\n    # config['enc_learn_nrm']   = True\n    # config['enc_embedd_dim']  = 150    # 100\n    # config['enc_hidden_dim']  = 500    # 180\n    # config['enc_contxt_dim']  = 0\n    # config['encoder']         = 'RNN'\n    # config['pooling']         = False\n    #\n    # # Decoder: dimension\n    # config['dec_embedd_dim']  = 150  # 100\n    # config['dec_hidden_dim']  = 500  # 180\n    # config['dec_contxt_dim']  = config['enc_hidden_dim']       \\\n    #                             if not config['bidirectional'] \\\n    #                             else 2 * config['enc_hidden_dim']\n    #\n    # # Decoder: CopyNet\n    # config['copynet']         = True   # False\n    # config['identity']        = False\n    # config['location_embed']  = True\n    #\n    # # Decoder: Model\n    # config['shared_embed']    = False\n    # config['use_input']       = True\n    # config['bias_code']       = True\n    # config['dec_use_contxt']  = True\n    # config['deep_out']        = False\n    # config['deep_out_activ']  = 'tanh'  # maxout2\n    # config['bigram_predict']  = True\n    # config['context_predict'] = True\n    # config['dropout']         = 0.0  # 5\n    # config['leaky_predict']   = False\n    #\n    # config['dec_readout_dim'] = config['dec_hidden_dim']\n    # if config['dec_use_contxt']:\n    #     config['dec_readout_dim'] += config['dec_contxt_dim']\n    # if config['bigram_predict']:\n    #     config['dec_readout_dim'] += config['dec_embedd_dim']\n    #\n    # # Decoder: sampling\n    # config['max_len']         = 57\n    # config['sample_beam']     = 8\n    # config['sample_stoch']    = False\n    # config['sample_argmax']   = False\n    #\n    # # Gradient Tracking !!!\n    # config['gradient_check']  = True\n    # config['gradient_noise']  = True\n    #\n    # config['skip_size']       = 15\n    #\n    # for w in config:\n    #     print '{0} => {1}'.format(w, config[w])\n    # print 'setup ok.'\n    # return config\n\n\ndef setup_bst():\n    config = dict()\n    config['seed']            = 3030029828\n    # config['seed']            = 19920206\n\n    # model ids\n    # voc_size 10000:  20160224-021106\n    # voc_size 5000 :  20160224-144747 / 20160224-162424 (discard UNK)\n\n    config['use_noise']       = False\n    config['optimizer']       = 'adam'\n    config['save_updates']    = True\n    config['get_instance']    = True\n    config['path']            = path.realpath(path.curdir)\n    config['path_h5']         = config['path'] + '/H5'\n    # config['dataset']         = config['path'] + '/dataset/lcsts_data-word-full.pkl'\n    config['dataset']         = config['path'] + '/dataset/BST_1M.data.pkl'\n    config['modelname']       = 'bst'\n\n    # output log place\n    config['path_log']        = config['path'] + '/Logs'\n    config['path_logX']       = config['path'] + '/LogX'\n    if not os.path.exists(config['path_log']):\n        os.mkdir(config['path_log'])\n    if not os.path.exists(config['path_logX']):\n        os.mkdir(config['path_logX'])\n\n    # # output hdf5 file.\n    # config['weights_file']    = config['path'] + '/froslass/model-pool/'\n    # if not os.path.exists(config['weights_file']):\n    #     os.mkdir(config['weights_file'])\n\n    # size\n    config['batch_size']      = 20\n    config['mode']            = 'RNN'  # NTM\n    config['binary']          = False\n    config['voc_size']        = -1     # 20000\n\n    # Encoder: Model\n    config['bidirectional']   = True\n    config['enc_use_contxt']  = False\n    config['enc_learn_nrm']   = True\n    config['enc_embedd_dim']  = 150    # 100\n    config['enc_hidden_dim']  = 300    # 180\n    config['enc_contxt_dim']  = 0\n    config['encoder']         = 'RNN'\n    config['pooling']         = False\n\n    config['decode_unk']      = False\n\n    # Decoder: dimension\n    config['dec_embedd_dim']  = 150  # 100\n    config['dec_hidden_dim']  = 300  # 180\n    config['dec_contxt_dim']  = config['enc_hidden_dim']       \\\n                                if not config['bidirectional'] \\\n                                else 2 * config['enc_hidden_dim']\n\n    # Decoder: CopyNet\n    config['copynet']         = False  # True   # False\n    config['identity']        = False\n    config['location_embed']  = True\n    config['coverage']        = True\n    config['copygate']        = False\n    config['encourage_gen']   = 0.1    # lambda if 0 no encourage\n\n    # Decoder: Model\n    config['shared_embed']    = False\n    config['use_input']       = True\n    config['bias_code']       = True\n    config['dec_use_contxt']  = True\n    config['deep_out']        = False\n    config['deep_out_activ']  = 'tanh'  # maxout2\n    config['bigram_predict']  = True\n    config['context_predict'] = True\n    config['dropout']         = 0.0  # 5\n    config['leaky_predict']   = False\n\n    config['dec_readout_dim'] = config['dec_hidden_dim']\n    if config['dec_use_contxt']:\n        config['dec_readout_dim'] += config['dec_contxt_dim']\n    if config['bigram_predict']:\n        config['dec_readout_dim'] += config['dec_embedd_dim']\n\n    # Decoder: sampling\n    config['max_len']         = 100\n    config['sample_beam']     = 10\n    config['sample_stoch']    = False\n    config['sample_argmax']   = False\n\n    # Gradient Tracking !!!\n    config['gradient_check']  = True\n    config['gradient_noise']  = True\n\n    config['skip_size']       = 15\n\n    for w in config:\n        print '{0} => {1}'.format(w, config[w])\n    print 'setup ok.'\n    return config\n\n\ndef setup_lcsts():\n    config = dict()\n    config['seed']            = 3030029828\n    # config['seed']            = 19920206\n\n    # model ids\n    # voc_size 10000:  20160224-021106\n    # voc_size 5000 :  20160224-144747 / 20160224-162424 (discard UNK)\n\n    config['use_noise']       = False\n    config['optimizer']       = 'adam'\n    config['save_updates']    = True\n    config['get_instance']    = True\n    config['path']            = path.realpath(path.curdir)\n    config['path_h5']         = config['path'] + '/H5'\n    config['dataset']         = config['path'] + '/dataset/lcsts_data-word-full.pkl'\n    # config['dataset']         = config['path'] + '/dataset/lcsts_data-word.pkl'\n    config['modelname']       = 'LCSTS'\n    config['segment']         = True\n\n    # output log place\n    config['path_log']        = config['path'] + '/Logs'\n    config['path_logX']       = config['path'] + '/LogX'\n    config['path_model']      = config['path'] + '/H5'\n    if not os.path.exists(config['path_log']):\n        os.mkdir(config['path_log'])\n    if not os.path.exists(config['path_logX']):\n        os.mkdir(config['path_logX'])\n\n    # # output hdf5 file.\n    # config['weights_file']    = config['path'] + '/froslass/model-pool/'\n    # if not os.path.exists(config['weights_file']):\n    #     os.mkdir(config['weights_file'])\n\n    # size\n    config['batch_size']      = 20\n    config['mode']            = 'RNN'  # NTM\n    config['binary']          = False\n    config['voc_size']        = 20000   # 20000\n\n    # # based on characters (modified)\n    # config['segment']         = False\n    # config['dataset']         = config['path'] + '/dataset/lcsts_data-char-full.pkl'\n    # config['modelname']       = 'LCSTS-CCC'\n    # config['voc_size']        = 3000\n\n    # trained_model\n    # config['trained_model']   = config['path_model'] + '/experiments.CopyLCSTSXXX.id=20160305-004957.epoch=1.iter=20000.pkl'\n    # config['trained_model']   = config['path_model'] + '/experiments.CopyLCSTSXXX.id=20160301-105813.epoch=2.iter=80000.pkl'\n    config['trained_model']   = config['path_model'] + '/experiments.CopyLCSTSXXX.id=20160301-114653.epoch=2.iter=100000.pkl'\n\n    # Encoder: Model\n    config['bidirectional']   = True\n    config['enc_use_contxt']  = False\n    config['enc_learn_nrm']   = True\n    config['enc_embedd_dim']  = 500    # 100\n    config['enc_hidden_dim']  = 750    # 180\n    config['enc_contxt_dim']  = 0\n    config['encoder']         = 'RNN'\n    config['pooling']         = False\n\n    config['encode_max_len']  = 140\n    config['decode_unk']      = False\n\n    # Decoder: sample\n    config['max_len']         = 33\n    config['sample_beam']     = 30  # 10\n    config['sample_stoch']    = False\n    config['sample_argmax']   = False\n\n    # Decoder: train\n    config['dec_embedd_dim']  = 500  # 100\n    config['dec_hidden_dim']  = 750  # 180\n    config['dec_contxt_dim']  = config['enc_hidden_dim']       \\\n                                if not config['bidirectional'] \\\n                                else 2 * config['enc_hidden_dim']\n\n    config['explicit_loc']    = False\n    if config['explicit_loc']:\n        config['dec_contxt_dim'] += config['encode_max_len']\n\n    # Decoder: CopyNet\n    config['copynet']         = True   # False\n    config['identity']        = False\n    config['location_embed']  = True\n    config['coverage']        = True\n    config['copygate']        = False\n\n    # Decoder: Model\n    config['shared_embed']    = False\n    config['use_input']       = True\n    config['bias_code']       = True\n    config['dec_use_contxt']  = True\n    config['deep_out']        = False\n    config['deep_out_activ']  = 'tanh'  # maxout2\n    config['bigram_predict']  = True\n    config['context_predict'] = True\n    config['dropout']         = 0.0  # 5\n    config['leaky_predict']   = False\n\n    config['dec_readout_dim'] = config['dec_hidden_dim']\n    if config['dec_use_contxt']:\n        config['dec_readout_dim'] += config['dec_contxt_dim']\n    if config['bigram_predict']:\n        config['dec_readout_dim'] += config['dec_embedd_dim']\n\n    # Gradient Tracking !!!\n    config['gradient_check']  = True\n    config['gradient_noise']  = True\n\n    config['skip_size']       = 15\n\n    for w in config:\n        print '{0} => {1}'.format(w, config[w])\n    print 'setup ok.'\n    return config\n\n\ndef setup_weibo():\n    config = dict()\n    config['seed']            = 3030029828\n    # config['seed']            = 19920206\n\n    # model ids\n\n    config['use_noise']       = False\n    config['optimizer']       = 'adam'\n    config['save_updates']    = True\n    config['get_instance']    = True\n    config['path']            = path.realpath(path.curdir)\n    config['path_h5']         = config['path'] + '/H5'\n    # config['dataset']         = config['path'] + '/dataset/lcsts_data-word-full.pkl'\n    # config['dataset']         = config['path'] + '/dataset/weibo_data-word-cooc.pkl'\n    config['dataset']         = config['path'] + '/dataset/movie_dialogue_data.pkl'\n\n    # output log place\n    config['path_log']        = config['path'] + '/Logs'\n    config['path_logX']       = config['path'] + '/LogX'\n    if not os.path.exists(config['path_log']):\n        os.mkdir(config['path_log'])\n    if not os.path.exists(config['path_logX']):\n        os.mkdir(config['path_logX'])\n\n    # # output hdf5 file.\n    # config['weights_file']    = config['path'] + '/froslass/model-pool/'\n    # if not os.path.exists(config['weights_file']):\n    #     os.mkdir(config['weights_file'])\n\n    # size\n    config['batch_size']      = 20\n    config['mode']            = 'RNN'  # NTM\n    config['binary']          = False\n    config['voc_size']        = 10000  # 30000\n\n    # Encoder: Model\n    config['bidirectional']   = True\n    config['enc_use_contxt']  = False\n    config['enc_learn_nrm']   = True\n    config['enc_embedd_dim']  = 350    # 100\n    config['enc_hidden_dim']  = 500    # 180\n    config['enc_contxt_dim']  = 0\n    config['encoder']         = 'RNN'\n    config['pooling']         = False\n\n    config['decode_unk']      = False\n    config['utf-8']           = False\n\n    # Decoder: dimension\n    config['dec_embedd_dim']  = 350  # 100\n    config['dec_hidden_dim']  = 500  # 180\n    config['dec_contxt_dim']  = config['enc_hidden_dim']       \\\n                                if not config['bidirectional'] \\\n                                else 2 * config['enc_hidden_dim']\n\n    # Decoder: CopyNet\n    config['copynet']         = True # False   # False\n    config['identity']        = False\n    config['location_embed']  = True\n    config['coverage']        = True\n    config['copygate']        = True\n    config['killcopy']        = False\n\n    # Decoder: Model\n    config['shared_embed']    = False\n    config['use_input']       = True\n    config['bias_code']       = True\n    config['dec_use_contxt']  = True\n    config['deep_out']        = False\n    config['deep_out_activ']  = 'tanh'  # maxout2\n    config['bigram_predict']  = True\n    config['context_predict'] = True\n    config['dropout']         = 0.0  # 5\n    config['leaky_predict']   = False\n\n    config['dec_readout_dim'] = config['dec_hidden_dim']\n    if config['dec_use_contxt']:\n        config['dec_readout_dim'] += config['dec_contxt_dim']\n    if config['bigram_predict']:\n        config['dec_readout_dim'] += config['dec_embedd_dim']\n\n    # Decoder: sampling\n    config['max_len']         = 50\n    config['sample_beam']     = 10\n    config['sample_stoch']    = False\n    config['sample_argmax']   = False\n\n    # Gradient Tracking !!!\n    config['gradient_check']  = True\n    config['gradient_noise']  = True\n\n    config['skip_size']       = 15\n\n    conc = sorted(config.items(), key=lambda c:c[0])\n    for c, v in conc:\n        print '{0} => {1}'.format(c, v)\n    print 'setup ok.'\n    return config\n\n"
  },
  {
    "path": "experiments/copynet.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.encdec import *\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.Copy.id={}.log'.format(tmark))\nn_rng  = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng    = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\nidx2word, word2idx, idx2word_o, word2idx_o \\\n        = deserialize_from_file(config['voc'])\nidx2word_o[0] = '<eol>'\nword2idx_o['<eol>'] = 0\n\nsource, target, origin = deserialize_from_file(config['dataset'])\nsamlpes = len(source)\n\nconfig['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\nconfig['dec_voc_size'] = config['enc_voc_size']\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samlpes) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(source, target):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', source), ('target', target)]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset, len(source)\n\n\ntrain_data, train_size = build_data(source[int(0.2 * samlpes):], target[int(0.2 * samlpes):])\ntrain_data_plain       = zip(*(source[int(0.2 * samlpes):], target[int(0.2 * samlpes):], origin[int(0.2 * samlpes):]))\ntest_data_plain        = zip(*(source[:int(0.2 * samlpes)], target[:int(0.2 * samlpes)], origin[:int(0.2 * samlpes)]))\ntest_size              = len(test_data_plain)\nlogger.info('load the data ok.')\n\n# build the agent\nagent  = NRM(config, n_rng, rng, mode=config['mode'],\n             use_attention=True, copynet=config['copynet'], identity=config['identity'])\nagent.build_()\nagent.compile_('all')\nprint 'compile ok.'\n\necho   = 0\nepochs = 10\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target'))\n        return data_stream\n\n    def prepare_batch(batch, mask):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data):\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data)\n        return data\n\n    # training\n    train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n    logger.info('Epoch = {} -> Training Set Learning...'.format(echo))\n    progbar = Progbar(train_size / config['batch_size'])\n    for it, batch in enumerate(train_batches):\n        # obtain data\n        data_s, data_t = prepare_batch(batch, 'source'), prepare_batch(batch, 'target')\n        loss += [agent.train_(data_s, data_t)]\n        progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n        if it % 500 == 0:\n            logger.info('generating [training set] samples')\n            for _ in xrange(5):\n                idx              = int(np.floor(n_rng.rand() * train_size))\n                train_s, train_t, train_o = train_data_plain[idx]\n                v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                   np.asarray(train_t, dtype='int32'),\n                                                   idx2word, np.asarray(train_o, dtype='int32'), idx2word_o)\n                print '*' * 50\n\n            logger.info('generating [testing set] samples')\n            for _ in xrange(5):\n                idx            = int(np.floor(n_rng.rand() * test_size))\n                test_s, test_t, test_o = test_data_plain[idx]\n                v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                 np.asarray(test_t, dtype='int32'),\n                                                 idx2word, np.asarray(test_o, dtype='int32'), idx2word_o)\n                print '*' * 50\n"
  },
  {
    "path": "experiments/copynet_input.py",
    "content": "# coding=utf-8\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\nsetup = setup_lcsts\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyLCSTS.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples = len(train_set['source'])\n\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef unk_filter(data):\n    if config['voc_size'] == -1:\n        return copy.copy(data)\n    else:\n        mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n        data = copy.copy(data * mask + (1 - mask))\n        return data\n\nsource = '临近 岁末 ， 新 基金 发行 步入 旺季 ， 11 月份 以来 单周 新基 ' +  \\\n         '发行 数 始终保持 35 只 以上 的 高位 ， 仅 11 月 25 日 一天 ， ' + \\\n         '就 有 12 只 基金 同时 发售 。 国内 首只 公募 对冲 混合型 基金 — 嘉实 绝对 收益 策略 ' + \\\n         '定期 混合 基金 自 发行 首日 便 备受 各界 青睐 ， 每日 认购 均 能 达到 上 亿'\ntarget = '首只 公募 对冲 基金 每日 吸金 上 亿'\n\ntest_s = [word2idx[w.decode('utf-8')] for w in source.split()]\ntest_t = [word2idx[w.decode('utf-8')] for w in target.split()]\n\nlogger.info('load the data ok.')\n\nlogger.info('Evaluate CopyNet')\necho              = 9\ntmark             = '20160226-164053'  # '20160221-025049'  # copy-net model [no unk]\nconfig['copynet'] = True\nagent  = NRM(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\nagent.build_()\nagent.compile_('display')\nagent.load(config['path_h5'] + '/experiments.CopyLCSTS.id={0}.epoch={1}.pkl'.format(tmark, echo))\nlogger.info('generating [testing set] samples')\n\nv      = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                         np.asarray(test_t, dtype='int32'),\n                         idx2word, np.asarray(unk_filter(test_s), dtype='int32'))\nlogger.info('Complete!')"
  },
  {
    "path": "experiments/dataset.py",
    "content": "\"\"\"\nPreprocess the bAbI datset.\n\"\"\"\nimport logging\nimport os\nimport sys\nimport numpy.random as n_rng\nfrom emolga.dataset.build_dataset import serialize_to_file\n\ndata_path = './dataset/bAbI/en-10k/'\ndata = []\nn_rng.seed(19920206)\n\nfor p, folders, docs in os.walk(data_path):\n    for doc in docs:\n        with open(os.path.join(p, doc)) as f:\n            l = f.readline()\n            while l:\n                l = l.strip().lower()\n                l = l[l.find(' ') + 1:]\n                if len(l.split('\\t')) == 1:\n                    data += [l[:-1].split()]\n                l = f.readline()\n\nidx2word = dict(enumerate(set([w for l in data for w in l]), 1))\nword2idx = {v: k for k, v in idx2word.items()}\n\npersons  = [1, 6, 24, 37, 38, 47, 60, 61, 73, 74, 90, 94, 107, 110, 114]\ncolors   = [3, 20, 34, 48, 99, 121]\nshapes   = [11, 15, 27, 99]\n\n\ndef repeat_name(l):\n    ll = []\n    for word in l:\n        if word2idx[word] in persons:\n            k = n_rng.randint(5) + 1\n            ll += [idx2word[persons[i]] for i in n_rng.randint(len(persons), size=k).tolist()]\n        elif word2idx[word] in colors:\n            k = n_rng.randint(5) + 1\n            ll += [idx2word[colors[i]] for i in n_rng.randint(len(colors), size=k).tolist()]\n        elif word2idx[word] in shapes:\n            k = n_rng.randint(5) + 1\n            ll += [idx2word[shapes[i]] for i in n_rng.randint(len(shapes), size=k).tolist()]\n        else:\n            ll += [word]\n    return ll\n\ndata_rep = [repeat_name(l) for l in data]\norigin   = [[word2idx[w] for w in l] for l in data_rep]\n\ndef replace(word):\n    if word2idx[word] in [1, 6, 24, 37, 38, 47, 60, 61, 73, 74, 90, 94, 107, 110, 114]:\n        return '<person>'\n    elif word2idx[word] in [3, 20, 34, 48, 99, 121]:\n        return '<color>'\n    elif word2idx[word] in [11, 15, 27, 99]:\n        return '<shape>'\n    else:\n        return word\n\n# prepare the vocabulary\ndata_clean   = [[replace(w) for w in l] for l in data_rep]\nidx2word2    = dict(enumerate(set([w for l in data_clean for w in l]), 1))\nidx2word2[0] = '<eol>'\nword2idx2    = {v: k for k, v in idx2word2.items()}\nLmax         = len(idx2word2)\n\nfor k in xrange(len(idx2word2)):\n    print k, '\\t', idx2word2[k]\nprint 'Max: {}'.format(Lmax)\n\nserialize_to_file([idx2word2, word2idx2, idx2word, word2idx], './dataset/bAbI/voc-b.pkl')\n\n# get ready for the dataset.\nsource = [[word2idx2[w] for w in l] for l in data_clean]\ntarget = [[word2idx2[w] if w not in ['<person>', '<color>', '<shape>']\n           else it + Lmax\n           for it, w in enumerate(l)] for l in data_clean]\n\n\ndef print_str(data):\n    for d in data:\n        print ' '.join(str(w) for w in d)\n\n\nprint_str(data[10000: 10005])\nprint_str(data_rep[10000: 10005])\nprint_str(data_clean[10000: 10005])\nprint_str(source[10000: 10005])\nprint_str(target[10000: 10005])\n\nserialize_to_file([source, target, origin], './dataset/bAbI/dataset-b.pkl')\n"
  },
  {
    "path": "experiments/lcsts_dataset.py",
    "content": "# coding=utf-8\nimport chardet\nimport sys\nimport numpy as np\nimport jieba as jb\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\n\nword2idx = dict()\nwordfreq = dict()\nword2idx['<eol>'] = 0\nword2idx['<unk>'] = 1\n\nsegment  = False # True\n\n# training set\npairs = []\nf     = open('./dataset/LCSTS/PART_I/PART_full.txt', 'r')\nline  = f.readline().strip()\nat    = 2\nlines = 0\nwhile line:\n    if line == '<summary>':\n        summary = f.readline().strip().decode('utf-8')\n        if segment:\n            summary = [w for w in jb.cut(summary)]\n\n        for w in summary:\n            if w not in wordfreq:\n                wordfreq[w]  = 1\n            else:\n                wordfreq[w] += 1\n            # if w not in word2idx:\n            #     word2idx[w] = at\n            #     at         += 1\n\n        f.readline()\n        f.readline()\n        text    = f.readline().strip().decode('utf-8')\n        if segment:\n            text = [w for w in jb.cut(text)]\n        for w in text:\n            if w not in wordfreq:\n                wordfreq[w]  = 1\n            else:\n                wordfreq[w] += 1\n            # if w not in word2idx:\n            #     word2idx[w] = at\n            #     at         += 1\n\n        pair    = (text, summary)\n        pairs.append(pair)\n        lines  += 1\n        if lines % 20000 == 0:\n            print lines\n    line = f.readline().strip()\n\n# testing set\ntests = []\nf     = open('./dataset/LCSTS/PART_II/PART_II.txt', 'r')\nline  = f.readline().strip()\nlines = 0\nwhile line:\n    if line == '<summary>':\n        summary = f.readline().strip().decode('utf-8')\n        if segment:\n            summary = [w for w in jb.cut(summary)]\n\n        for w in summary:\n            if w not in wordfreq:\n                wordfreq[w]  = 1\n            else:\n                wordfreq[w] += 1\n            # if w not in word2idx:\n            #     word2idx[w] = at\n            #     at         += 1\n\n        f.readline()\n        f.readline()\n        text    = f.readline().strip().decode('utf-8')\n        if segment:\n            text = [w for w in jb.cut(text)]\n        for w in text:\n            if w not in wordfreq:\n                wordfreq[w]  = 1\n            else:\n                wordfreq[w] += 1\n            # if w not in word2idx:\n            #     word2idx[w] = at\n            #     at         += 1\n\n        pair    = (text, summary)\n        tests.append(pair)\n        lines  += 1\n        if lines % 20000 == 0:\n            print lines\n    line = f.readline().strip()\n\nprint len(pairs), len(tests)\n\n# sort the vocabulary\nwordfreq = sorted(wordfreq.items(), key=lambda a:a[1], reverse=True)\nfor w in wordfreq:\n    word2idx[w[0]] = at\n    at += 1\n\nidx2word = {k: v for v, k in word2idx.items()}\nLmax     = len(idx2word)\nprint 'read dataset ok.'\nprint Lmax\nfor i in xrange(Lmax):\n    print idx2word[i].encode('utf-8')\n\n# use character-based model [on]\n# use word-based model     [off]\n\n\ndef build_data(data):\n    instance = dict(text=[], summary=[], source=[], target=[], target_c=[])\n    for pair in data:\n        source, target = pair\n        A = [word2idx[w] for w in source]\n        B = [word2idx[w] for w in target]\n        # C = np.asarray([[w == l for w in source] for l in target], dtype='float32')\n        C = [0 if w not in source else source.index(w) + Lmax for w in target]\n\n        instance['text']      += [source]\n        instance['summary']   += [target]\n        instance['source']    += [A]\n        instance['target']    += [B]\n        # instance['cc_matrix'] += [C]\n        instance['target_c'] += [C]\n\n    print instance['target'][5000]\n    print instance['target_c'][5000]\n    return instance\n\n\ntrain_set = build_data(pairs)\ntest_set  = build_data(tests)\nserialize_to_file([train_set, test_set, idx2word, word2idx], './dataset/lcsts_data-char-full.pkl')\n"
  },
  {
    "path": "experiments/lcsts_rouge.py",
    "content": "\"\"\"\nEvaluation using ROUGE for LCSTS dataset.\n\"\"\"\n# load the testing set.\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\nimport jieba as jb\nimport logging\nimport copy\nfrom pyrouge import Rouge155\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts, setup_weibo, setup_syn\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\nfrom pprint import pprint\nsetup = setup_lcsts\n\n\ndef build_evaluation(train_set, segment):\n    _, _, idx2word, word2idx = deserialize_from_file(train_set)\n    pairs   = []\n    f       = open('./dataset/LCSTS/PART_III/PART_III.txt', 'r')\n    line    = f.readline().strip()\n    lines   = 0\n    segment = segment\n    while line:\n        if '<human_label>' in line:\n            score   = int(line[13])\n            if score >= 3:\n                f.readline()\n                summary = f.readline().strip().decode('utf-8')\n                if segment:\n                    summary = [w for w in jb.cut(summary)]\n                target  = []\n                for w in summary:\n                    if w not in word2idx:\n                        word2idx[w] = len(word2idx)\n                        idx2word[len(idx2word)] = w\n                    target += [word2idx[w]]\n\n                f.readline()\n                f.readline()\n                text    = f.readline().strip().decode('utf-8')\n                if segment:\n                    text = [w for w in jb.cut(text)]\n                source  = []\n                for w in text:\n                    if w not in word2idx:\n                        word2idx[w] = len(word2idx)\n                        idx2word[len(idx2word)] = w\n                    source += [word2idx[w]]\n\n                pair    = (text, summary, score, source, target)\n                pairs.append(pair)\n                lines  += 1\n                if lines % 1000 == 0:\n                    print lines\n        line = f.readline().strip()\n    print 'lines={}'.format(len(pairs))\n    return pairs, word2idx, idx2word\n\n# words, wwi, wiw = build_evaluation('./dataset/lcsts_data-word-full.pkl', True)\n# chars, cwi, ciw = build_evaluation('./dataset/lcsts_data-char-full.pkl', False)\n#\n# serialize_to_file([words, chars, [wwi, wiw], [cwi, ciw]], './dataset/lcsts_evaluate_data.pkl')\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n\n# prepare logging.\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nlogger  = init_logging(config['path_log'] + '/experiments.LCSTS.Eval.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\nsegment = config['segment']\nword_set, char_set, word_voc, char_voc = deserialize_from_file('./dataset/lcsts_evaluate_data.pkl')\n\nif segment:\n    eval_set           = word_set\n    word2idx, idx2word = word_voc\nelse:\n    eval_set           = char_set\n    word2idx, idx2word = char_voc\n\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = len(word2idx)\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples  = len(eval_set)\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\nlogger.info('load the data ok.')\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\nagent.build_()\nagent.compile_('display')\nprint 'compile ok.'\n\n# load the model\nagent.load(config['trained_model'])\n\n\ndef unk_filter(data):\n    if config['voc_size'] == -1:\n        return copy.copy(data)\n    else:\n        mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n        data = copy.copy(data * mask + (1 - mask))\n        return data\n\nrouge    = Rouge155(n_words=40)\nevalsets = {'rouge_1_f_score': 'R1',\n            'rouge_2_f_score': 'R2',\n            'rouge_3_f_score': 'R3',\n            'rouge_4_f_score': 'R4',\n            'rouge_l_f_score': 'RL',\n            'rouge_su4_f_score': 'RSU4'}\nscores = dict()\nfor id, sample in enumerate(eval_set):\n    text, summary, score, source, target = sample\n    v              = agent.evaluate_(np.asarray(source, dtype='int32'),\n                                     np.asarray(target, dtype='int32'),\n                                     idx2word,\n                                     np.asarray(unk_filter(source), dtype='int32')).decode('utf-8').split('\\n')\n\n    print 'ID = {} ||'.format(id) + '*' * 50\n    ref   = ' '.join(['t{}'.format(char_voc[0][u]) for u in ''.join([w for w in v[2][9:].split()])])\n    sym   = ' '.join(['t{}'.format(char_voc[0][u]) for u in ''.join([w for w in v[3][9:].split()])])\n\n    sssss = rouge.score_summary(sym, {'A': ref})\n\n    for si in sssss:\n        if si not in scores:\n            scores[si]  = sssss[si]\n        else:\n            scores[si] += sssss[si]\n\n    for e in evalsets:\n        print '{0}: {1}'.format(evalsets[e], scores[e] / (id + 1)),\n    print './.'\n\n# average\nfor si in scores:\n    scores[si] /= float(len(eval_set))\n"
  },
  {
    "path": "experiments/lcsts_sample.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\nsetup = setup_lcsts\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyLCSTS.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples = len(train_set['source'])\n\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ndef unk_filter(data):\n    if config['voc_size'] == -1:\n        return copy.copy(data)\n    else:\n        mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n        data = copy.copy(data * mask + (1 - mask))\n        return data\n\n\ntrain_data_plain  = zip(*(train_set['source'], train_set['target']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target']))\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size)[:100].tolist()\n\nlogger.info('load the data ok.')\n\n# logger.info('Evaluate Enc-Dec')\n# log_gen           = open(config['path_log'] + '/experiments.CopyLCSTS.generate_{}.log'.format(0), 'w')\n# config['copynet'] = True\n# echo              = 10\n# tmark             = '20160224-185023'  # '20160221-171853'  # enc-dec model [no unk]\n# agent  = NRM(config, n_rng, rng, mode=config['mode'],\n#                   use_attention=True, copynet=config['copynet'], identity=config['identity'])\n# agent.build_()\n# agent.compile_('display')\n# agent.load(config['path_h5'] + '/experiments.CopyLCSTS.id={0}.epoch={1}.pkl'.format(tmark, echo))\n# logger.info('generating [testing set] samples')\n# for idx in ts_idx:\n#     # idx            = int(np.floor(n_rng.rand() * test_size))\n#     test_s, test_t = test_data_plain[idx]\n#     v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n#                                      np.asarray(test_t, dtype='int32'),\n#                                      idx2word)\n#     log_gen.write(v)\n#     log_gen.write('*' * 50 + '\\n')\n# log_gen.close()\n\nlogger.info('Evaluate CopyNet')\necho              = 6\ntmark             = '20160224-185023'  # '20160221-025049'  # copy-net model [no unk]\nlog_cp            = open(config['path_logX'] + '/experiments.copy_{0}_{1}.log'.format(tmark, echo), 'w')\nconfig['copynet'] = True\nagent  = NRM(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\nagent.build_()\nagent.compile_('display')\nagent.load(config['path_h5'] + '/experiments.CopyLCSTS.id={0}.epoch={1}.pkl'.format(tmark, echo))\nlogger.info('generating [testing set] samples')\nfor idx in ts_idx:\n    # idx            = int(np.floor(n_rng.rand() * test_size))\n    test_s, test_t = test_data_plain[idx]\n    v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                     np.asarray(test_t, dtype='int32'),\n                                     idx2word, np.asarray(unk_filter(test_s), dtype='int32'))\n    log_cp.write(v)\n    log_cp.write('*' * 50 + '\\n')\nlog_cp.close()\nlogger.info('Complete!')"
  },
  {
    "path": "experiments/lcsts_test.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\nimport sys\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.cooc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\nsetup = setup_lcsts\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyLCSTS.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\n\nconfig['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\nconfig['dec_voc_size'] = config['enc_voc_size']\nsamples  = len(train_set['source'])\n\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ntrain_data        = build_data(train_set)\ntrain_data_plain  = zip(*(train_set['source'], train_set['target']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size )[:2000].tolist()\nlogger.info('load the data ok.')\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\nagent.build_()\nagent.compile_('all')\nprint 'compile ok.'\n\necho   = 2\nepochs = 10\nif echo > 0:\n    tmark = '20160217-232113'\n    agent.load(config['path_h5'] + '/experiments.CopyLCSTS.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target', 'target_c'))\n        return data_stream\n\n    def prepare_batch(batch, mask, fix_len=None):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data, fix_len=None):\n            if fix_len is not None:\n                return data[:, : fix_len]\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data, fix_len)\n        return data\n\n    # training\n    notrain = False\n    if not notrain:\n        train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n        logger.info('\\nEpoch = {} -> Training Set Learning...'.format(echo))\n        progbar = Progbar(train_size / config['batch_size'])\n        for it, batch in enumerate(train_batches):\n            # obtain data\n            data_s = prepare_batch(batch, 'source')\n            data_t = prepare_batch(batch, 'target')\n            data_c = prepare_batch(batch, 'target_c', data_t.shape[1])\n\n            if config['copynet']:\n                loss += [agent.train_(data_s, data_t, data_c)]\n            else:\n                loss += [agent.train_(data_s, data_t)]\n\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n            if it % 200 == 0:\n                logger.info('Echo={} Evaluation Sampling.'.format(it))\n                logger.info('generating [training set] samples')\n                for _ in xrange(5):\n                    idx              = int(np.floor(n_rng.rand() * train_size))\n                    train_s, train_t = train_data_plain[idx]\n                    v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                       np.asarray(train_t, dtype='int32'),\n                                                       idx2word)\n                    print '*' * 50\n\n                logger.info('generating [testing set] samples')\n                for _ in xrange(5):\n                    idx            = int(np.floor(n_rng.rand() * test_size))\n                    test_s, test_t = test_data_plain[idx]\n                    v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                     np.asarray(test_t, dtype='int32'),\n                                                     idx2word)\n                    print '*' * 50\n\n        # save the weights.\n        agent.save(config['path_h5'] + '/experiments.CopyLCSTS.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\n    # # test accuracy\n    # progbar_tr = Progbar(2000)\n    #\n    # print '\\n' + '__' * 50\n    # gen, gen_pos = 0, 0\n    # cpy, cpy_pos = 0, 0\n    # for it, idx in enumerate(tr_idx):\n    #     train_s, train_t = train_data_plain[idx]\n    #\n    #     c = agent.analyse_(np.asarray(train_s, dtype='int32'),\n    #                        np.asarray(train_t, dtype='int32'),\n    #                        idx2word)\n    #     if c[1] == 0:\n    #         # generation mode\n    #         gen     += 1\n    #         gen_pos += c[0]\n    #     else:\n    #         # copy mode\n    #         cpy     += 1\n    #         cpy_pos += c[0]\n    #\n    #     progbar_tr.update(it + 1, [('Gen', gen_pos), ('Copy', cpy_pos)])\n    #\n    # logger.info('\\nTraining Accuracy:' +\n    #             '\\tGene-Mode: {0}/{1} = {2}%'.format(gen_pos, gen, 100 * gen_pos/float(gen)) +\n    #             '\\tCopy-Mode: {0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos/float(cpy)))\n    #\n    # progbar_ts = Progbar(2000)\n    # print '\\n' + '__' * 50\n    # gen, gen_pos = 0, 0\n    # cpy, cpy_pos = 0, 0\n    # for it, idx in enumerate(ts_idx):\n    #     test_s, test_t = test_data_plain[idx]\n    #     c      = agent.analyse_(np.asarray(test_s, dtype='int32'),\n    #                             np.asarray(test_t, dtype='int32'),\n    #                             idx2word)\n    #     if c[1] == 0:\n    #         # generation mode\n    #         gen     += 1\n    #         gen_pos += c[0]\n    #     else:\n    #         # copy mode\n    #         cpy     += 1\n    #         cpy_pos += c[0]\n    #\n    #     progbar_ts.update(it + 1, [('Gen', gen_pos), ('Copy', cpy_pos)])\n    #\n    # logger.info('\\nTesting Accuracy:' +\n    #             '\\tGene-Mode: {0}/{1} = {2}%'.format(gen_pos, gen, 100 * gen_pos/float(gen)) +\n    #             '\\tCopy-Mode: {0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos/float(cpy)))\n"
  },
  {
    "path": "experiments/lcsts_vest.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts, setup_weibo, setup_syn\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\nsetup = setup_lcsts\n# setup = setup_syn\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyLCSTSXXX.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\n\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = len(word2idx)\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples  = len(train_set['source'])\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ntrain_data        = build_data(train_set)\ntrain_data_plain  = zip(*(train_set['source'], train_set['target']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\n# train_data_plain  = zip(*(train_set['source'], train_set['target']))\n# test_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size )[:2000].tolist()\nlogger.info('load the data ok.')\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\n\necho   = 0\nepochs = 10\nif echo > 0:\n    tmark = '20160221-025049'    # copynet multi-source model\n    agent.load(config['path_h5'] + '/experiments.CopyLCSTSXXX.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\n# recover from Nan\ntmark = '20160307-135907'  # '20160301-105813'\nskip  = 26000\nsep   = 1\n\n# tmark = '20160301-114653'\n# skip  = 14000\nagent.build_(0.001, 26000)\nagent.compile_('all')\nprint 'compile ok.'\nagent.load(config['path_h5'] + '/experiments.CopyLCSTSXXX.id={0}.epoch={1}.iter={2}.pkl'.format(tmark, sep, skip))\n\n\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target'))\n        return data_stream\n\n    def prepare_batch(batch, mask, fix_len=None):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data, fix_len=None):\n            if fix_len is not None:\n                return data[:, : fix_len]\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data, fix_len)\n        return data\n\n    def cc_martix(source, target):\n        cc = np.zeros((source.shape[0], target.shape[1], source.shape[1]), dtype='float32')\n        for k in xrange(source.shape[0]):\n            for j in xrange(target.shape[1]):\n                for i in xrange(source.shape[1]):\n                    if (source[k, i] == target[k, j]) and (source[k, i] > 0):\n                        cc[k][j][i] = 1.\n        return cc\n\n    def unk_filter(data):\n        if config['voc_size'] == -1:\n            return copy.copy(data)\n        else:\n            mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n            data = copy.copy(data * mask + (1 - mask))\n            return data\n\n    # training\n    notrain = False\n    if not notrain:\n        train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n        logger.info('\\nEpoch = {} -> Training Set Learning...'.format(echo))\n        progbar = Progbar(train_size / config['batch_size'])\n        for it, batch in enumerate(train_batches):\n            if (echo < sep):\n                continue\n\n            if (echo == sep) and (skip > it):\n                if it % 200 == 0:\n                    print it\n                continue\n            \n            # obtain data\n            data_s = prepare_batch(batch, 'source')\n            data_t = prepare_batch(batch, 'target')\n            if config['copynet']:\n                data_c = cc_martix(data_s, data_t)\n                # data_c = prepare_batch(batch, 'target_c', data_t.shape[1])\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t), data_c)]\n            else:\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t))]\n\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n            if it % 200 == 0:\n                logger.info('Echo={} Evaluation Sampling.'.format(it))\n                logger.info('generating [training set] samples')\n                for _ in xrange(5):\n                    idx              = int(np.floor(n_rng.rand() * train_size))\n                    train_s, train_t = train_data_plain[idx]\n                    v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                       np.asarray(train_t, dtype='int32'),\n                                                       idx2word,\n                                                       np.asarray(unk_filter(train_s), dtype='int32'))\n                    print '*' * 50\n\n                logger.info('generating [testing set] samples')\n                for _ in xrange(5):\n                    idx            = int(np.floor(n_rng.rand() * test_size))\n                    test_s, test_t = test_data_plain[idx]\n                    v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                     np.asarray(test_t, dtype='int32'),\n                                                     idx2word,\n                                                     np.asarray(unk_filter(test_s), dtype='int32'))\n                    print '*' * 50\n\n        # save the weights.\n            if it % 2000 == 0:\n                agent.save(config['path_h5'] +\n                           '/experiments.CopyLCSTSXXX.id={0}.epoch={1}.iter={2}.pkl'.format(\n                                   tmark, echo, it))\n\n    # # test accuracy\n    test = False\n    if test:\n        progbar_tr = Progbar(2000)\n\n        print '\\n' + '__' * 50\n        cpy, cpy_pos = 0, 0\n        for it, idx in enumerate(tr_idx):\n            train_s, train_t = train_data_plain[idx]\n\n            c = agent.analyse_(np.asarray(train_s, dtype='int32'),\n                               np.asarray(train_t, dtype='int32'),\n                               idx2word)\n            # copy mode\n            cpy     += 1\n            cpy_pos += c\n            progbar_tr.update(it + 1, [('Copy', cpy_pos)])\n\n        logger.info('\\nTraining Accuracy:' +\n                    '\\t{0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos / float(cpy)))\n\n        progbar_ts = Progbar(2000)\n        print '\\n' + '__' * 50\n\n        cpy, cpy_pos = 0, 0\n        for it, idx in enumerate(ts_idx):\n            test_s, test_t = test_data_plain[idx]\n            c      = agent.analyse_(np.asarray(test_s, dtype='int32'),\n                                    np.asarray(test_t, dtype='int32'),\n                                    idx2word)\n            cpy     += 1\n            cpy_pos += c\n            progbar_ts.update(it + 1, [('Copy', cpy_pos)])\n\n        logger.info('\\nTesting Accuracy:' +\n                    '\\t{0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos / float(cpy)))\n"
  },
  {
    "path": "experiments/lcsts_vest_new.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts, setup_weibo, setup_syn\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\nsetup = setup_lcsts\n# setup = setup_syn\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyLCSTSXXX.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\n\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = len(word2idx)\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples  = len(train_set['source'])\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ntrain_data        = build_data(train_set)\ntrain_data_plain  = zip(*(train_set['source'], train_set['target']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\n# train_data_plain  = zip(*(train_set['source'], train_set['target']))\n# test_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size )[:2000].tolist()\nlogger.info('load the data ok.')\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\nagent.build_()\nagent.compile_('all')\nprint 'compile ok.'\n\necho   = 0\nepochs = 10\nif echo > 0:\n    tmark = '20160221-025049'    # copynet multi-source model\n    agent.load(config['path_h5'] + '/experiments.CopyLCSTSXXX.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\n# recover from Nan\n# tmark = '20160301-105813'\nskip  = -1 #100000\nsep   = -1 #2\n\n# tmark = '20160301-114653'\n# skip  = 14000\n# agent.load(config['path_h5'] + '/experiments.CopyLCSTSXXX.id={0}.epoch={1}.iter={2}.pkl'.format(tmark, sep, skip))\n\n\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target'))\n        return data_stream\n\n    def prepare_batch(batch, mask, fix_len=None):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data, fix_len=None):\n            if fix_len is not None:\n                return data[:, : fix_len]\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data, fix_len)\n        return data\n\n    def cc_martix(source, target):\n        cc = np.zeros((source.shape[0], target.shape[1], source.shape[1]), dtype='float32')\n        for k in xrange(source.shape[0]):\n            for j in xrange(target.shape[1]):\n                for i in xrange(source.shape[1]):\n                    if (source[k, i] == target[k, j]) and (source[k, i] > 0):\n                        cc[k][j][i] = 1.\n        return cc\n\n    def unk_filter(data):\n        if config['voc_size'] == -1:\n            return copy.copy(data)\n        else:\n            mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n            data = copy.copy(data * mask + (1 - mask))\n            return data\n\n    # training\n    notrain = False\n    if not notrain:\n        train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n        logger.info('\\nEpoch = {} -> Training Set Learning...'.format(echo))\n        progbar = Progbar(train_size / config['batch_size'])\n        for it, batch in enumerate(train_batches):\n            if (echo < sep):\n                continue\n\n            if (echo == sep) and (skip > it):\n                if it % 200 == 0:\n                    print it\n                continue\n            \n            # obtain data\n            data_s = prepare_batch(batch, 'source')\n            data_t = prepare_batch(batch, 'target')\n            if config['copynet']:\n                data_c = cc_martix(data_s, data_t)\n                # data_c = prepare_batch(batch, 'target_c', data_t.shape[1])\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t), data_c)]\n            else:\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t))]\n\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n            if it % 200 == 0:\n                logger.info('Echo={} Evaluation Sampling.'.format(it))\n                logger.info('generating [training set] samples')\n                for _ in xrange(5):\n                    idx              = int(np.floor(n_rng.rand() * train_size))\n                    train_s, train_t = train_data_plain[idx]\n                    v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                       np.asarray(train_t, dtype='int32'),\n                                                       idx2word,\n                                                       np.asarray(unk_filter(train_s), dtype='int32'))\n                    print '*' * 50\n\n                logger.info('generating [testing set] samples')\n                for _ in xrange(5):\n                    idx            = int(np.floor(n_rng.rand() * test_size))\n                    test_s, test_t = test_data_plain[idx]\n                    v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                     np.asarray(test_t, dtype='int32'),\n                                                     idx2word,\n                                                     np.asarray(unk_filter(test_s), dtype='int32'))\n                    print '*' * 50\n\n        # save the weights.\n            if it % 2000 == 0:\n                agent.save(config['path_h5'] +\n                           '/experiments.CopyLCSTSXXX.id={0}.epoch={1}.iter={2}.pkl'.format(\n                                   tmark, echo, it))\n\n    # # test accuracy\n    test = False\n    if test:\n        progbar_tr = Progbar(2000)\n\n        print '\\n' + '__' * 50\n        cpy, cpy_pos = 0, 0\n        for it, idx in enumerate(tr_idx):\n            train_s, train_t = train_data_plain[idx]\n\n            c = agent.analyse_(np.asarray(train_s, dtype='int32'),\n                               np.asarray(train_t, dtype='int32'),\n                               idx2word)\n            # copy mode\n            cpy     += 1\n            cpy_pos += c\n            progbar_tr.update(it + 1, [('Copy', cpy_pos)])\n\n        logger.info('\\nTraining Accuracy:' +\n                    '\\t{0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos / float(cpy)))\n\n        progbar_ts = Progbar(2000)\n        print '\\n' + '__' * 50\n\n        cpy, cpy_pos = 0, 0\n        for it, idx in enumerate(ts_idx):\n            test_s, test_t = test_data_plain[idx]\n            c      = agent.analyse_(np.asarray(test_s, dtype='int32'),\n                                    np.asarray(test_t, dtype='int32'),\n                                    idx2word)\n            cpy     += 1\n            cpy_pos += c\n            progbar_ts.update(it + 1, [('Copy', cpy_pos)])\n\n        logger.info('\\nTesting Accuracy:' +\n                    '\\t{0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos / float(cpy)))\n"
  },
  {
    "path": "experiments/movie_dataset.py",
    "content": "# coding=utf-8\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\nimport string\nimport random\nimport sys\n\nrandom.seed(19920206)\nword2idx  = dict()\nwordfreq  = dict()\nword2idx['<eol>'] = 0\nword2idx['<unk>'] = 1\nword2freq = dict()\n\n\ndef mark(line):\n    tmp_line = ''\n    for c in line:\n        if c in string.punctuation:\n            if c is not \"'\":\n                tmp_line += ' ' + c + ' '\n            else:\n                tmp_line += ' ' + c\n        else:\n            tmp_line += c\n    tmp_line = tmp_line.lower()\n    words = [w for w in tmp_line.split() if len(w) > 0]\n    for w in words:\n        if w not in word2freq:\n            word2freq[w]  = 1\n        else:\n            word2freq[w] += 1\n    return words\n\n\nfline     = open('./dataset/cornell_movie/movie_lines.txt', 'r')\nsets      = [w.split('+++$+++') for w in fline.read().split('\\n')]\nlines     = {w[0].strip(): mark(w[-1].strip()) for w in sets}\n#\n# for w in lines:\n#     if len(lines[w]) == 0:\n#         print w\n\nfline.close()\nprint 'read lines ok'\nfconv     = open('./dataset/cornell_movie/movie_conversations.txt', 'r')\n\nturns     = []\nconvs     = fconv.readline()\nwhile convs:\n    turn   = eval(convs.split('+++$+++')[-1].strip())\n    turns += zip(turn[:-1], turn[1:])\n    convs  = fconv.readline()\n\npairs     = [(lines[a], lines[b]) for a, b in turns\n             if len(lines[a]) > 0 and len(lines[b]) > 0]\n\n# shuffle!\nrandom.shuffle(pairs)\n\nword2freq = sorted(word2freq.items(), key=lambda a: a[1], reverse=True)\nfor at, w in enumerate(word2freq):\n    word2idx[w[0]] = at + 2\n\nidx2word  = {k: v for v, k in word2idx.items()}\nprint idx2word[1], idx2word[2]\n\nLmax     = len(idx2word)\n# for i in xrange(Lmax):\n#     print idx2word[i]\nprint 'read dataset ok.'\nprint Lmax\nprint pairs[0]\n\n\ndef build_data(data):\n    instance = dict(text=[], summary=[], source=[], target=[], target_c=[])\n    print len(data)\n    for pair in data:\n        source, target = pair\n        A = [word2idx[w] for w in source]\n        B = [word2idx[w] for w in target]\n        # C = np.asarray([[w == l for w in source] for l in target], dtype='float32')\n        C = [0 if w not in source else source.index(w) + Lmax for w in target]\n\n        instance['text']      += [source]\n        instance['summary']   += [target]\n        instance['source']    += [A]\n        instance['target']    += [B]\n        # instance['cc_matrix'] += [C]\n        instance['target_c'] += [C]\n\n    print instance['source'][4000]\n    print instance['target'][4000]\n    print instance['target_c'][4000]\n    return instance\n\n\ntrain_set = build_data(pairs[10000:])\ntest_set  = build_data(pairs[:10000])\nserialize_to_file([train_set, test_set, idx2word, word2idx], './dataset/movie_dialogue_data.pkl')\n"
  },
  {
    "path": "experiments/syn_vest.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts, setup_weibo, setup_syn, setup_bst\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\n# setup = setup_lcsts\nsetup = setup_syn\n# setup = setup_bst\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyLCSTS.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\n\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = len(word2idx)\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples  = len(train_set['source'])\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ntrain_data        = build_data(train_set)\ntrain_data_plain  = zip(*(train_set['source'], train_set['target'], train_set['rule']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target'],  test_set['rule']))\n\n# train_data_plain  = zip(*(train_set['source'], train_set['target']))\n# test_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size )[:2000].tolist()\nlogger.info('load the data ok.')\nconfig['copynet'] = True  # False\nnotrain           = True\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\nagent.build_()\nif notrain:\n    agent.compile_('display')\nelse:\n    agent.compile_('all')\nprint 'compile ok.'\n\necho   = 6\nepochs = 10\nif echo > 0:\n    tmark = '20160227-013418'    # copynet multi-source model\n    agent.load(config['path_h5'] + '/experiments.Copy{2}.id={0}.epoch={1}.pkl'.format(tmark, echo, config['modelname']))\n\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target'))\n        return data_stream\n\n    def prepare_batch(batch, mask, fix_len=None):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data, fix_len=None):\n            if fix_len is not None:\n                return data[:, : fix_len]\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data, fix_len)\n        return data\n\n    def cc_martix(source, target):\n        cc = np.zeros((source.shape[0], target.shape[1], source.shape[1]), dtype='float32')\n        for k in xrange(source.shape[0]):\n            for j in xrange(target.shape[1]):\n                for i in xrange(source.shape[1]):\n                    if (source[k, i] == target[k, j]) and (source[k, i] > 0):\n                        cc[k][j][i] = 1.\n        return cc\n\n    def unk_filter(data):\n        if config['voc_size'] == -1:\n            return copy.copy(data)\n        else:\n            mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n            data = copy.copy(data * mask + (1 - mask))\n            return data\n\n    # training\n    if not notrain:\n        train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n        logger.info('\\nEpoch = {} -> Training Set Learning...'.format(echo))\n        progbar = Progbar(train_size / config['batch_size'])\n        for it, batch in enumerate(train_batches):\n            # obtain data\n            data_s = prepare_batch(batch, 'source')\n            data_t = prepare_batch(batch, 'target')\n            if config['copynet']:\n                data_c = cc_martix(data_s, data_t)\n                # data_c = prepare_batch(batch, 'target_c', data_t.shape[1])\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t), data_c)]\n            else:\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t))]\n\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n            if it % 200 == 0:\n                logger.info('Echo={} Evaluation Sampling.'.format(it))\n                logger.info('generating [training set] samples')\n                for _ in xrange(5):\n                    idx              = int(np.floor(n_rng.rand() * train_size))\n                    train_s, train_t = train_data_plain[idx]\n                    v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                       np.asarray(train_t, dtype='int32'),\n                                                       idx2word,\n                                                       np.asarray(unk_filter(train_s), dtype='int32'))\n                    print '*' * 50\n\n                logger.info('generating [testing set] samples')\n                for _ in xrange(5):\n                    idx            = int(np.floor(n_rng.rand() * test_size))\n                    test_s, test_t = test_data_plain[idx]\n                    v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                     np.asarray(test_t, dtype='int32'),\n                                                     idx2word,\n                                                     np.asarray(unk_filter(test_s), dtype='int32'))\n                    print '*' * 50\n\n        # save the weights.\n        agent.save(config['path_h5'] + '/experiments.Copy{2}.id={0}.epoch={1}.pkl'.format(tmark, echo, config['modelname']))\n\n    # # test accuracy\n    def judge_rule(rule):\n        rule = rule.split()\n        fine = ''\n        for w in rule:\n            if w not in word2idx:\n                fine += w\n        return fine\n\n    test = True\n    if test:\n        def analysis_(data_plain, mode='Training'):\n            progbar_tr = Progbar(2000)\n            print '\\n' + '__' * 50\n            cpy, cpy_pos = 0, 0\n            types = dict()\n            for it, idx in enumerate(tr_idx):\n                train_s, train_t, rule = data_plain[idx]\n                t = judge_rule(rule)\n                c = float(agent.analyse_(np.asarray(train_s, dtype='int32'),\n                                   np.asarray(train_t, dtype='int32'),\n                                   idx2word))\n                # copy mode\n                cpy     += 1\n                cpy_pos += c\n                if t not in types:\n                    types[t] = {}\n                    types[t][0] = c\n                    types[t][1] = 1\n                else:\n                    types[t][0] += c\n                    types[t][1] += 1\n\n                progbar_tr.update(it + 1, [('Copy', cpy_pos)])\n            logger.info('\\n{0} Accuracy:' +\n                        '\\t{1}/{2} = {3}%'.format(mode, cpy_pos, cpy, 100 * cpy_pos / float(cpy)))\n            print '==' * 50\n            for t in types:\n                print 'Type: {0}: {1}/{2}={3}%'.format(t, int(types[t][0]), types[t][1],\n                                                       100 * types[t][0] / float(types[t][1]))\n\n\n        # analysis_(train_data_plain, 'Training')\n        analysis_(test_data_plain, 'Testing')\n"
  },
  {
    "path": "experiments/syntest.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_syn\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.cooc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\nsetup = setup_syn\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.Copy.id={}.log'.format(tmark))\nn_rng  = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng    = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\n# the vocabulary\ntmp      = [chr(x) for x in range(48, 58)]  # '1', ... , '9', '0'\nvoc      = [tmp[a] + tmp[b] + tmp[c]\n            for c in xrange(10)\n            for b in xrange(10)\n            for a in xrange(10)]\nword2idx           = {voc[k]: k + 1 for k in xrange(len(voc))}\nword2idx['<eol>']  = 0\nidx2word           = {word2idx[w]: w for w in word2idx}\nvoc                = ['<eol>'] + voc\n\ntrain_set, test_set = deserialize_from_file(config['dataset'])\n\nconfig['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\nconfig['dec_voc_size'] = config['enc_voc_size']\nsamples  = len(train_set['source'])\n\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ('rule_id', data['rule_id']),\n                                                                    ('rule', data['rule'])]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ntrain_data        = build_data(train_set)\ntrain_data_plain  = zip(*(train_set['source'], train_set['target'], train_set['rule_id'], train_set['rule']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target'],  test_set['rule_id'],  test_set['rule']))\n\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size )[:2000].tolist()\nlogger.info('load the data ok.')\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\nagent.build_()\nagent.compile_('all')\nprint 'compile ok.'\n\necho   = 3\nepochs = 4\nif echo > 0:\n    tmark = '20160216-152155'\n    agent.load(config['path_h5'] + '/experiments.Copy.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target', 'target_c'))\n        return data_stream\n\n    def prepare_batch(batch, mask, fix_len=None):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data, fix_len=None):\n            if fix_len is not None:\n                return data[:, : fix_len]\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data, fix_len)\n        return data\n\n    # training\n    notrain = True\n    if not notrain:\n        train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n        logger.info('\\nEpoch = {} -> Training Set Learning...'.format(echo))\n        progbar = Progbar(train_size / config['batch_size'])\n        for it, batch in enumerate(train_batches):\n            # obtain data\n            data_s = prepare_batch(batch, 'source')\n            data_t = prepare_batch(batch, 'target')\n            data_c = prepare_batch(batch, 'target_c', data_t.shape[1])\n\n            if config['copynet']:\n                loss += [agent.train_(data_s, data_t, data_c)]\n            else:\n                loss += [agent.train_(data_s, data_t)]\n\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n            if it % 200 == 0:\n                logger.info('Echo={} Evaluation Sampling.'.format(it))\n                logger.info('generating [training set] samples')\n                for _ in xrange(5):\n                    idx              = int(np.floor(n_rng.rand() * train_size))\n                    train_s, train_t, _, _ = train_data_plain[idx]\n                    v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                       np.asarray(train_t, dtype='int32'),\n                                                       idx2word)\n                    print '*' * 50\n\n                logger.info('generating [testing set] samples')\n                for _ in xrange(5):\n                    idx            = int(np.floor(n_rng.rand() * test_size))\n                    test_s, test_t, _, _ = test_data_plain[idx]\n                    v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                     np.asarray(test_t, dtype='int32'),\n                                                     idx2word)\n                    print '*' * 50\n\n        # save the weights.\n        agent.save(config['path_h5'] + '/experiments.Copy.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\n    # test accuracy\n    progbar_tr = Progbar(2000)\n\n    print '\\n' + '__' * 50\n    gen, gen_pos = 0, 0\n    cpy, cpy_pos = 0, 0\n    grs, crs     = [], []\n    for it, idx in enumerate(tr_idx):\n        train_s, train_t, rid, rule = train_data_plain[idx]\n\n        c = agent.analyse_(np.asarray(train_s, dtype='int32'),\n                           np.asarray(train_t, dtype='int32'),\n                           idx2word)\n        if c[1] == 0:\n            # generation mode\n            gen     += 1\n            gen_pos += c[0]\n            if c[0] == 1:\n                grs     += [rule]\n        else:\n            # copy mode\n            cpy     += 1\n            cpy_pos += c[0]\n            if c[0] == 1:\n                crs     += [rule]\n\n        progbar_tr.update(it + 1, [('Gen', gen_pos), ('Copy', cpy_pos)])\n    grs = set(grs)\n    crs = set(crs)\n    irs = set.intersection(grs, crs)\n\n    logger.info('\\nTraining Accuracy:' +\n                '\\tGene-Mode: {0}/{1} = {2}%'.format(gen_pos, gen, 100 * gen_pos/float(gen)) +\n                '\\tCopy-Mode: {0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos/float(cpy)) +\n                '\\tGene-Rule: {0}, Copy-Rule: {1}, Intersection: {2}'.format(len(grs), len(crs), len(irs)))\n\n    print 'Generate Mode:'\n    for r in grs:\n        print r\n\n    print 'Copy Mode:'\n    for r in crs:\n        print r\n\n    print 'Interaction:'\n    for r in irs:\n        print r\n\n    progbar_ts = Progbar(2000)\n    print '\\n' + '__' * 50\n    gen, gen_pos = 0, 0\n    cpy, cpy_pos = 0, 0\n    grs, crs     = [], []\n    for it, idx in enumerate(ts_idx):\n        test_s, test_t, rid, rule = test_data_plain[idx]\n        c      = agent.analyse_(np.asarray(test_s, dtype='int32'),\n                                np.asarray(test_t, dtype='int32'),\n                                idx2word)\n        if c[1] == 0:\n            # generation mode\n            gen     += 1\n            gen_pos += c[0]\n            grs     += [rule]\n        else:\n            # copy mode\n            cpy     += 1\n            cpy_pos += c[0]\n            crs     += [rule]\n\n        progbar_ts.update(it + 1, [('Gen', gen_pos), ('Copy', cpy_pos)])\n    grs = set(grs)\n    crs = set(crs)\n    irs = set.intersection(grs, crs)\n\n    logger.info('\\nTesting Accuracy:' +\n                '\\tGene-Mode: {0}/{1} = {2}%'.format(gen_pos, gen, 100 * gen_pos/float(gen)) +\n                '\\tCopy-Mode: {0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos/float(cpy)) +\n                '\\tGene-Rule: {0}, Copy-Rule: {1}, Intersection: {2}'.format(len(grs), len(crs), len(irs)))\n\n    print 'Generate Mode:'\n    for r in grs:\n        print r\n\n    print 'Copy Mode:'\n    for r in crs:\n        print r\n\n    print 'Interaction:'\n    for r in irs:\n        print r"
  },
  {
    "path": "experiments/synthetic.py",
    "content": "__author__ = 'jiataogu'\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\nimport numpy.random as n_rng\n\nn_rng.seed(19920206)\n# the vocabulary\ntmp      = [chr(x) for x in range(48, 58)]  # '1', ... , '9', '0'\nvoc      = [tmp[a] + tmp[b] + tmp[c]\n            for c in xrange(10)\n            for b in xrange(10)\n            for a in xrange(10)]\nword2idx           = {voc[k]: k + 2 for k in xrange(len(voc))}\nword2idx['<eol>']  = 0\nword2idx['<unk>']  = 1\nidx2word           = {word2idx[w]: w for w in word2idx}\nvoc                = ['<eol>', '<unk>'] + voc\n\n# word2idx['X']      = len(voc)\n# idx2word[len(voc)] = 'X'\n# voc               += ['X']\n#\n# word2idx['Y']      = len(voc)\n# idx2word[len(voc)] = 'Y'\n# voc               += ['Y']\n# print word2idx['X'], word2idx['Y']\n\n# load the dataset\nRules, _ = deserialize_from_file('/home/thoma/Work/Dial-DRL/dataset/rules.rnd.n10k.pkl')\nnum      = 200\nrepeats  = 100\nmaxleg   = 15\nLmax     = len(idx2word)\nrules    = dict(source=Rules['source'][:num],\n                target=Rules['target'][:num])\n\n\ndef ftr(v):\n    if v < 10:\n        return '00' + str(v)\n    elif v < 100:\n        return '0' + str(v)\n    else:\n        return str(v)\n\n\ndef build_instance():\n    instance = dict(x=[], y=[], source=[], target=[], target_c=[], rule_id=[], rule=[])\n    for k in xrange(num):\n        source = rules['source'][k]\n        target = rules['target'][k]\n\n        for j in xrange(repeats):\n            X  = n_rng.randint(1000, size= n_rng.randint(maxleg) + 1)\n            Y  = n_rng.randint(1000, size= n_rng.randint(maxleg) + 1)\n            S  = []\n            T  = []\n            for w in source:\n                if w is 'X':\n                    S += [ftr(v) for v in X]\n                elif w is 'Y':\n                    S += [ftr(v) for v in Y]\n                else:\n                    S += [w]\n\n            for w in target:\n                if w is 'X':\n                    T += [ftr(v) for v in X]\n                elif w is 'Y':\n                    T += [ftr(v) for v in Y]\n                else:\n                    T += [w]\n\n            A  = [word2idx[w] for w in S]\n            B  = [word2idx[w] for w in T]\n            C  = [0 if w not in S else S.index(w) + Lmax for w in T]\n\n            instance['x']        += [S]\n            instance['y']        += [T]\n            instance['source']   += [A]\n            instance['target']   += [B]\n            instance['target_c'] += [C]\n\n            instance['rule_id']  += [k]\n            instance['rule']     += [' '.join(source) + ' -> ' + ' '.join(target)]\n\n    return instance\n\ntrain_set = build_instance()\nprint 'build ok.'\ntest_set  = build_instance()\nprint 'build ok.'\n\nserialize_to_file([train_set, test_set, idx2word, word2idx], '/home/thoma/Work/Dial-DRL/dataset/synthetic_data_c.pkl')\n# serialize_to_file([train_set, test_set], '/home/thoma/Work/Dial-DRL/dataset/synthetic_data.pkl')\n"
  },
  {
    "path": "experiments/weibo_dataset.py",
    "content": "# coding=utf-8\nfrom emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file\n\nword2idx = dict()\nwordfreq = dict()\nword2idx['<eol>'] = 0\nword2idx['<unk>'] = 1\n\n# segment  = False # True\n\n# training set\npairs = []\nf     = open('./dataset/weibo/co-occur.txt', 'r')\nline  = f.readline().strip().decode('utf-8')\nat    = 2\nlines = 0\nwhile line:\n    post = line # f.readline().strip().decode('utf-8')\n    post= [w.strip() for w in post.split() if len(w.strip()) > 0]\n    \n    # if segment:\n    #    summary = [w for w in jb.cut(summary)]\n\n    for w in post:\n        if w not in wordfreq:\n            wordfreq[w]  = 1\n        else:\n            wordfreq[w] += 1\n        # if w not in word2idx:\n        #     word2idx[w] = at\n        #     at         += 1\n\n    text    = f.readline().strip().decode('utf-8')\n    text    = [w.strip() for w in text.split() if len(w.strip()) > 0]\n    # if segment:\n    #     text = [w for w in jb.cut(text)]\n    for w in text:\n        if w not in wordfreq:\n            wordfreq[w]  = 1\n        else:\n            wordfreq[w] += 1\n        # if w not in word2idx:\n        #     word2idx[w] = at\n        #     at         += 1\n\n    pair    = (post, text)\n    pairs.append(pair)\n    lines  += 1\n    if lines % 20000 == 0:\n        print lines\n    \n    f.readline()\n    line = f.readline().strip().decode('utf-8')\n\n\n# sort the vocabulary\nwordfreq = sorted(wordfreq.items(), key=lambda a:a[1], reverse=True)\nfor w in wordfreq:\n    word2idx[w[0]] = at\n    at += 1\n\nidx2word = dict()\nfor v, k in word2idx.items():\n    idx2word[k] = v\nLmax     = len(idx2word)\nprint 'read dataset ok.'\nprint Lmax\nfor i in xrange(Lmax):\n    print idx2word[i].encode('utf-8')\n\n# use character-based model [on]\n# use word-based model     [off]\n\n\ndef build_data(data):\n    instance = dict(text=[], summary=[], source=[], target=[], target_c=[])\n    for pair in data:\n        source, target = pair\n        A = [word2idx[w] for w in source]\n        B = [word2idx[w] for w in target]\n        # C = np.asarray([[w == l for w in source] for l in target], dtype='float32')\n        C = [0 if w not in source else source.index(w) + Lmax for w in target]\n\n        instance['text']      += [source]\n        instance['summary']   += [target]\n        instance['source']    += [A]\n        instance['target']    += [B]\n        # instance['cc_matrix'] += [C]\n        instance['target_c'] += [C]\n\n    print instance['target'][5000]\n    print instance['target_c'][5000]\n    return instance\n\n\ntrain_set = build_data(pairs[10000:])\ntest_set  = build_data(pairs[:10000])\nserialize_to_file([train_set, test_set, idx2word, word2idx], './dataset/weibo_data-word-cooc.pkl')\n"
  },
  {
    "path": "experiments/weibo_vest.py",
    "content": "\"\"\"\nThis is the implementation of Copy-NET\nWe start from the basic Seq2seq framework for a auto-encoder.\n\"\"\"\nimport logging\nimport time\nimport numpy as np\nimport sys\nimport copy\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom experiments.config import setup_lcsts, setup_weibo\nfrom emolga.utils.generic_utils import *\nfrom emolga.models.covc_encdec import NRM\nfrom emolga.models.encdec import NRM as NRM0\nfrom emolga.dataset.build_dataset import deserialize_from_file\nfrom collections import OrderedDict\nfrom fuel import datasets\nfrom fuel import transformers\nfrom fuel import schemes\n\nsetup = setup_weibo\n\n\ndef init_logging(logfile):\n    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',\n                                  datefmt='%m/%d/%Y %H:%M:%S'   )\n    fh = logging.FileHandler(logfile)\n    # ch = logging.StreamHandler()\n\n    fh.setFormatter(formatter)\n    # ch.setFormatter(formatter)\n    # fh.setLevel(logging.INFO)\n    # ch.setLevel(logging.INFO)\n    # logging.getLogger().addHandler(ch)\n    logging.getLogger().addHandler(fh)\n    logging.getLogger().setLevel(logging.INFO)\n\n    return logging\n\n# prepare logging.\ntmark   = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))\nconfig  = setup()   # load settings.\nfor w in config:\n    print '{0}={1}'.format(w, config[w])\n\nlogger  = init_logging(config['path_log'] + '/experiments.CopyWeibo.id={}.log'.format(tmark))\nn_rng   = np.random.RandomState(config['seed'])\nnp.random.seed(config['seed'])\nrng     = RandomStreams(n_rng.randint(2 ** 30))\nlogger.info('Start!')\n\ntrain_set, test_set, idx2word, word2idx = deserialize_from_file(config['dataset'])\n\nif config['voc_size'] == -1:   # not use unk\n    config['enc_voc_size'] = max(zip(*word2idx.items())[1]) + 1\n    config['dec_voc_size'] = config['enc_voc_size']\nelse:\n    config['enc_voc_size'] = config['voc_size']\n    config['dec_voc_size'] = config['enc_voc_size']\n\nsamples  = len(train_set['source'])\nlogger.info('build dataset done. ' +\n            'dataset size: {} ||'.format(samples) +\n            'vocabulary size = {0}/ batch size = {1}'.format(\n        config['dec_voc_size'], config['batch_size']))\n\n\ndef build_data(data):\n    # create fuel dataset.\n    dataset     = datasets.IndexableDataset(indexables=OrderedDict([('source', data['source']),\n                                                                    ('target', data['target']),\n                                                                    ('target_c', data['target_c']),\n                                                                    ]))\n    dataset.example_iteration_scheme \\\n                = schemes.ShuffledExampleScheme(dataset.num_examples)\n    return dataset\n\n\ntrain_data        = build_data(train_set)\ntrain_data_plain  = zip(*(train_set['source'], train_set['target']))\ntest_data_plain   = zip(*(test_set['source'],  test_set['target']))\n\ntrain_size        = len(train_data_plain)\ntest_size         = len(test_data_plain)\ntr_idx            = n_rng.permutation(train_size)[:2000].tolist()\nts_idx            = n_rng.permutation(test_size )[:2000].tolist()\nlogger.info('load the data ok.')\nnotrain           = False\n\n# build the agent\nif config['copynet']:\n    agent  = NRM(config, n_rng, rng, mode=config['mode'],\n                 use_attention=True, copynet=config['copynet'], identity=config['identity'])\nelse:\n    agent  = NRM0(config, n_rng, rng, mode=config['mode'],\n                  use_attention=True, copynet=config['copynet'], identity=config['identity'])\n\nagent.build_()\nif notrain:\n    agent.compile_('display')\nelse:\n    agent.compile_('all')\nprint 'compile ok.'\n\necho   = 0\nepochs = 10\nif echo > 0:\n    tmark = '20160227-164324'    # copynet multi-source model\n    agent.load(config['path_h5'] + '/experiments.CopyWeibo.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\nwhile echo < epochs:\n    echo += 1\n    loss  = []\n\n    def output_stream(dataset, batch_size, size=1):\n        data_stream = dataset.get_example_stream()\n        data_stream = transformers.Batch(data_stream,\n                                         iteration_scheme=schemes.ConstantScheme(batch_size))\n\n        # add padding and masks to the dataset\n        data_stream = transformers.Padding(data_stream, mask_sources=('source', 'target'))\n        return data_stream\n\n    def prepare_batch(batch, mask, fix_len=None):\n        data = batch[mask].astype('int32')\n        data = np.concatenate([data, np.zeros((data.shape[0], 1), dtype='int32')], axis=1)\n\n        def cut_zeros(data, fix_len=None):\n            if fix_len is not None:\n                return data[:, : fix_len]\n            for k in range(data.shape[1] - 1, 0, -1):\n                data_col = data[:, k].sum()\n                if data_col > 0:\n                    return data[:, : k + 2]\n            return data\n        data = cut_zeros(data, fix_len)\n        return data\n\n    def cc_martix(source, target):\n        cc = np.zeros((source.shape[0], target.shape[1], source.shape[1]), dtype='float32')\n        for k in xrange(source.shape[0]):\n            for j in xrange(target.shape[1]):\n                for i in xrange(source.shape[1]):\n                    if (source[k, i] == target[k, j]) and (source[k, i] > 0):\n                        cc[k][j][i] = 1.\n        return cc\n\n    def unk_filter(data):\n        if config['voc_size'] == -1:\n            return copy.copy(data)\n        else:\n            mask = (np.less(data, config['voc_size'])).astype(dtype='int32')\n            data = copy.copy(data * mask + (1 - mask))\n            return data\n\n    # training\n    train_batches = output_stream(train_data, config['batch_size']).get_epoch_iterator(as_dict=True)\n    logger.info('\\nEpoch = {} -> Training Set Learning...'.format(echo))\n    progbar = Progbar(train_size / config['batch_size'])\n    for it, batch in enumerate(train_batches):\n        # obtain data\n        if not notrain:\n            data_s = prepare_batch(batch, 'source')\n            data_t = prepare_batch(batch, 'target')\n            if config['copynet']:\n                data_c = cc_martix(data_s, data_t)\n                # data_c = prepare_batch(batch, 'target_c', data_t.shape[1])\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t), data_c)]\n            else:\n                loss += [agent.train_(unk_filter(data_s), unk_filter(data_t))]\n\n            progbar.update(it, [('loss_reg', loss[-1][0]), ('ppl.', loss[-1][1])])\n\n        if it % 200 == 0:\n            logger.info('Echo={} Evaluation Sampling.'.format(it))\n            logger.info('generating [training set] samples')\n            for _ in xrange(5):\n                idx              = int(np.floor(n_rng.rand() * train_size))\n                train_s, train_t = train_data_plain[idx]\n                v                = agent.evaluate_(np.asarray(train_s, dtype='int32'),\n                                                   np.asarray(train_t, dtype='int32'),\n                                                   idx2word,\n                                                   np.asarray(unk_filter(train_s), dtype='int32'),\n                                                   encode=config['utf-8'])\n                print '*' * 50\n\n            logger.info('generating [testing set] samples')\n            for _ in xrange(5):\n                idx            = int(np.floor(n_rng.rand() * test_size))\n                test_s, test_t = test_data_plain[idx]\n                v              = agent.evaluate_(np.asarray(test_s, dtype='int32'),\n                                                 np.asarray(test_t, dtype='int32'),\n                                                 idx2word,\n                                                 np.asarray(unk_filter(test_s), dtype='int32'),\n                                                 encode=config['utf-8'])\n                print '*' * 50\n            \n            if it % 10000 == 0:\n                # save the weights.\n                agent.save(config['path_h5'] + '/experiments.CopyWeibo.id={0}.epoch={1}.pkl'.format(tmark, echo))\n\n    # # test accuracy\n    # progbar_tr = Progbar(2000)\n    #\n    # print '\\n' + '__' * 50\n    # gen, gen_pos = 0, 0\n    # cpy, cpy_pos = 0, 0\n    # for it, idx in enumerate(tr_idx):\n    #     train_s, train_t = train_data_plain[idx]\n    #\n    #     c = agent.analyse_(np.asarray(train_s, dtype='int32'),\n    #                        np.asarray(train_t, dtype='int32'),\n    #                        idx2word)\n    #     if c[1] == 0:\n    #         # generation mode\n    #         gen     += 1\n    #         gen_pos += c[0]\n    #     else:\n    #         # copy mode\n    #         cpy     += 1\n    #         cpy_pos += c[0]\n    #\n    #     progbar_tr.update(it + 1, [('Gen', gen_pos), ('Copy', cpy_pos)])\n    #\n    # logger.info('\\nTraining Accuracy:' +\n    #             '\\tGene-Mode: {0}/{1} = {2}%'.format(gen_pos, gen, 100 * gen_pos/float(gen)) +\n    #             '\\tCopy-Mode: {0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos/float(cpy)))\n    #\n    # progbar_ts = Progbar(2000)\n    # print '\\n' + '__' * 50\n    # gen, gen_pos = 0, 0\n    # cpy, cpy_pos = 0, 0\n    # for it, idx in enumerate(ts_idx):\n    #     test_s, test_t = test_data_plain[idx]\n    #     c      = agent.analyse_(np.asarray(test_s, dtype='int32'),\n    #                             np.asarray(test_t, dtype='int32'),\n    #                             idx2word)\n    #     if c[1] == 0:\n    #         # generation mode\n    #         gen     += 1\n    #         gen_pos += c[0]\n    #     else:\n    #         # copy mode\n    #         cpy     += 1\n    #         cpy_pos += c[0]\n    #\n    #     progbar_ts.update(it + 1, [('Gen', gen_pos), ('Copy', cpy_pos)])\n    #\n    # logger.info('\\nTesting Accuracy:' +\n    #             '\\tGene-Mode: {0}/{1} = {2}%'.format(gen_pos, gen, 100 * gen_pos/float(gen)) +\n    #             '\\tCopy-Mode: {0}/{1} = {2}%'.format(cpy_pos, cpy, 100 * cpy_pos/float(cpy)))\n"
  }
]