[
  {
    "path": ".gitignore",
    "content": "*~\n*.pyc\n.idea\nbuild\ndist\nbag.egg-info\n__pycache__\n*.swp\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"cybag_oa\"]\n\tpath = cybag_oa\n    url = https://github.com/ucb-art/cybag_oa.git\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "Berkeley Analog Generator (BAG) version 2.0 and later.\n\nBAG 2.0 is a complete rewrite of BAG 1.x (which is in pre-alpha stage and\nnever released publicly).\n\n(Very outdated) Documentation and install instructions can be found at <http://bag-framework.readthedocs.io/en/latest/>\n\nA tutorial setup is available at <https://github.com/ucb-art/BAG2_cds_ff_mpt.git/>\n"
  },
  {
    "path": "bag/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This is the bag root package.\n\"\"\"\n\nimport signal\n\nfrom . import math\nfrom .math import float_to_si_string, si_string_to_float\nfrom . import interface\nfrom . import design\nfrom . import data\nfrom . import tech\nfrom . import layout\n\nfrom .core import BagProject, create_tech_info\n\n__all__ = ['interface', 'design', 'data', 'math', 'tech', 'layout', 'BagProject',\n           'float_to_si_string', 'si_string_to_float', 'create_tech_info']\n\n# make sure that SIGINT will always be catched by python.\nsignal.signal(signal.SIGINT, signal.default_int_handler)\n"
  },
  {
    "path": "bag/concurrent/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/concurrent/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package define helper classes used to perform concurrent operations.\n\"\"\""
  },
  {
    "path": "bag/concurrent/core.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module define utility classes for performing concurrent operations.\n\"\"\"\n\nfrom typing import Optional, Sequence, Dict, Union, Tuple, Callable, Any\n\nimport os\nimport asyncio\n# noinspection PyProtectedMember\nfrom asyncio.subprocess import Process\nimport subprocess\nimport multiprocessing\nfrom concurrent.futures import CancelledError\n\n\ndef batch_async_task(coro_list):\n    \"\"\"Execute a list of coroutines or futures concurrently.\n\n    User may press Ctrl-C to cancel all given tasks.\n\n    Parameters\n    ----------\n    coro_list :\n        a list of coroutines or futures to run concurrently.\n\n    Returns\n    -------\n    results :\n        a list of return values or raised exceptions of given tasks.\n    \"\"\"\n    top_future = asyncio.gather(*coro_list, return_exceptions=True)\n\n    loop = asyncio.get_event_loop()\n    try:\n        print('Running tasks, Press Ctrl-C to cancel.')\n        results = loop.run_until_complete(top_future)\n    except KeyboardInterrupt:\n        print('Ctrl-C detected, Cancelling tasks.')\n        top_future.cancel()\n        loop.run_forever()\n        results = None\n\n    return results\n\n\nProcInfo = Tuple[Union[str, Sequence[str]], str, Optional[Dict[str, str]], Optional[str]]\nFlowInfo = Tuple[Union[str, Sequence[str]], str, Optional[Dict[str, str]], Optional[str],\n                 Callable[[Optional[int], str], Any]]\n\n\nclass SubProcessManager(object):\n    \"\"\"A class that provides convenient methods to run multiple subprocesses in parallel using asyncio.\n\n    Parameters\n    ----------\n    max_workers : Optional[int]\n        number of maximum allowed subprocesses.  If None, defaults to system\n        CPU count.\n    cancel_timeout : Optional[float]\n        Number of seconds to wait for a process to terminate once SIGTERM or\n        SIGKILL is issued.  Defaults to 10 seconds.\n    \"\"\"\n\n    def __init__(self, max_workers=None, cancel_timeout=10.0):\n        # type: (Optional[int], Optional[float]) -> None\n        if max_workers is None:\n            max_workers = multiprocessing.cpu_count()\n        if cancel_timeout is None:\n            cancel_timeout = 10.0\n\n        self._cancel_timeout = cancel_timeout\n        self._semaphore = asyncio.Semaphore(max_workers)\n\n    async def _kill_subprocess(self, proc: Optional[Process]) -> None:\n        \"\"\"Helper method; send SIGTERM/SIGKILL to a subprocess.\n\n        This method first sends SIGTERM to the subprocess.  If the process hasn't terminated\n        after a given timeout, it sends SIGKILL.\n\n        Parameter\n        ---------\n        proc : Optional[Process]\n            the process to attempt to terminate.  If None, this method does nothing.\n        \"\"\"\n        if proc is not None:\n            if proc.returncode is None:\n                try:\n                    proc.terminate()\n                    try:\n                        await asyncio.shield(asyncio.wait_for(proc.wait(), self._cancel_timeout))\n                    except CancelledError:\n                        pass\n\n                    if proc.returncode is None:\n                        proc.kill()\n                        try:\n                            await asyncio.shield(\n                                asyncio.wait_for(proc.wait(), self._cancel_timeout))\n                        except CancelledError:\n                            pass\n                except ProcessLookupError:\n                    pass\n\n    async def async_new_subprocess(self,\n                                   args: Union[str, Sequence[str]],\n                                   log: str,\n                                   env: Optional[Dict[str, str]] = None,\n                                   cwd: Optional[str] = None) -> Optional[int]:\n        \"\"\"A coroutine which starts a subprocess.\n\n        If this coroutine is cancelled, it will shut down the subprocess gracefully using\n        SIGTERM/SIGKILL, then raise CancelledError.\n\n        Parameters\n        ----------\n        args : Union[str, Sequence[str]]\n            command to run, as string or sequence of strings.\n        log : str\n            the log file name.\n        env : Optional[Dict[str, str]]\n            an optional dictionary of environment variables.  None to inherit from parent.\n        cwd : Optional[str]\n            the working directory.  None to inherit from parent.\n\n        Returns\n        -------\n        retcode : Optional[int]\n            the return code of the subprocess.\n        \"\"\"\n        if isinstance(args, str):\n            args = [args]\n\n        # get log file name, make directory if necessary\n        log = os.path.abspath(log)\n        if os.path.isdir(log):\n            raise ValueError('log file %s is a directory.' % log)\n        os.makedirs(os.path.dirname(log), exist_ok=True)\n\n        async with self._semaphore:\n            proc = None\n            with open(log, 'w') as logf:\n                logf.write('command: %s\\n' % (' '.join(args)))\n                logf.flush()\n                try:\n                    proc = await asyncio.create_subprocess_exec(*args, stdout=logf,\n                                                                stderr=subprocess.STDOUT,\n                                                                env=env, cwd=cwd)\n                    retcode = await proc.wait()\n                    return retcode\n                except CancelledError as err:\n                    await self._kill_subprocess(proc)\n                    raise err\n\n    async def async_new_subprocess_flow(self,\n                                        proc_info_list: Sequence[FlowInfo]) -> Any:\n        \"\"\"A coroutine which runs a series of subprocesses.\n\n        If this coroutine is cancelled, it will shut down the current subprocess gracefully using\n        SIGTERM/SIGKILL, then raise CancelledError.\n\n        Parameters\n        ----------\n        proc_info_list : Sequence[FlowInfo]\n            a list of processes to execute in series.  Each element is a tuple of:\n\n            args : Union[str, Sequence[str]]\n                command to run, as string or list of string arguments.\n            log : str\n                log file name.\n            env : Optional[Dict[str, str]]\n                environment variable dictionary.  None to inherit from parent.\n            cwd : Optional[str]\n                working directory path.  None to inherit from parent.\n            vfun : Sequence[Callable[[Optional[int], str], Any]]\n                a function to validate if it is ok to execute the next process.  The output of the\n                last function is returned.  The first argument is the return code, the second\n                argument is the log file name.\n\n        Returns\n        -------\n        result : Any\n            the return value of the last validate function.  None if validate function\n            returns False.\n        \"\"\"\n        num_proc = len(proc_info_list)\n        if num_proc == 0:\n            return None\n\n        async with self._semaphore:\n            for idx, (args, log, env, cwd, vfun) in enumerate(proc_info_list):\n                if isinstance(args, str):\n                    args = [args]\n\n                # get log file name, make directory if necessary\n                log = os.path.abspath(log)\n                if os.path.isdir(log):\n                    raise ValueError('log file %s is a directory.' % log)\n                os.makedirs(os.path.dirname(log), exist_ok=True)\n\n                proc, retcode = None, None\n                with open(log, 'w') as logf:\n                    logf.write('command: %s\\n' % (' '.join(args)))\n                    logf.flush()\n                    try:\n                        proc = await asyncio.create_subprocess_exec(*args, stdout=logf,\n                                                                    stderr=subprocess.STDOUT,\n                                                                    env=env, cwd=cwd)\n                        retcode = await proc.wait()\n                    except CancelledError as err:\n                        await self._kill_subprocess(proc)\n                        raise err\n\n                fun_output = vfun(retcode, log)\n                if idx == num_proc - 1:\n                    return fun_output\n                elif not fun_output:\n                    return None\n\n    def batch_subprocess(self, proc_info_list):\n        # type: (Sequence[ProcInfo]) -> Optional[Sequence[Union[int, Exception]]]\n        \"\"\"Run all given subprocesses in parallel.\n\n        Parameters\n        ----------\n        proc_info_list : Sequence[ProcInfo]\n            a list of process information.  Each element is a tuple of:\n\n            args : Union[str, Sequence[str]]\n                command to run, as string or list of string arguments.\n            log : str\n                log file name.\n            env : Optional[Dict[str, str]]\n                environment variable dictionary.  None to inherit from parent.\n            cwd : Optional[str]\n                working directory path.  None to inherit from parent.\n\n        Returns\n        -------\n        results : Optional[Sequence[Union[int, Exception]]]\n            if user cancelled the subprocesses, None is returned.  Otherwise, a list of\n            subprocess return codes or exceptions are returned.\n        \"\"\"\n        num_proc = len(proc_info_list)\n        if num_proc == 0:\n            return []\n\n        coro_list = [self.async_new_subprocess(args, log, env, cwd) for args, log, env, cwd in\n                     proc_info_list]\n\n        return batch_async_task(coro_list)\n\n    def batch_subprocess_flow(self, proc_info_list):\n        # type: (Sequence[Sequence[FlowInfo]]) -> Optional[Sequence[Union[int, Exception]]]\n        \"\"\"Run all given subprocesses flow in parallel.\n\n        Parameters\n        ----------\n        proc_info_list : Sequence[Sequence[FlowInfo]\n            a list of process flow information.  Each element is a sequence of tuples of:\n\n            args : Union[str, Sequence[str]]\n                command to run, as string or list of string arguments.\n            log : str\n                log file name.\n            env : Optional[Dict[str, str]]\n                environment variable dictionary.  None to inherit from parent.\n            cwd : Optional[str]\n                working directory path.  None to inherit from parent.\n            vfun : Sequence[Callable[[Optional[int], str], Any]]\n                a function to validate if it is ok to execute the next process.  The output of the\n                last function is returned.  The first argument is the return code, the second\n                argument is the log file name.\n\n        Returns\n        -------\n        results : Optional[Sequence[Any]]\n            if user cancelled the subprocess flows, None is returned.  Otherwise, a list of\n            flow return values or exceptions are returned.\n        \"\"\"\n        num_proc = len(proc_info_list)\n        if num_proc == 0:\n            return []\n\n        coro_list = [self.async_new_subprocess_flow(flow_info) for flow_info in proc_info_list]\n\n        return batch_async_task(coro_list)\n"
  },
  {
    "path": "bag/core.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This is the core bag module.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Dict, Any, Tuple, Optional, Union, Type, Sequence, TypeVar\n\nimport os\nimport importlib\nimport cProfile\nimport pstats\nfrom pathlib import Path\n\n# noinspection PyPackageRequirements\n\nfrom .interface import ZMQDealer\nfrom .interface.database import DbAccess\nfrom .design import ModuleDB, SchInstance\nfrom .layout.routing import RoutingGrid\nfrom .layout.template import TemplateDB\nfrom .layout.core import DummyTechInfo\nfrom .io import read_file, sim_data, read_yaml_env\nfrom .concurrent.core import batch_async_task\n\nif TYPE_CHECKING:\n    from .interface.simulator import SimAccess\n    from .layout.template import TemplateBase\n    from .layout.core import TechInfo\n    from .design.module import Module\n    from .simulation.core_v2 import TestbenchManager, MeasurementManager\n\n    ModuleType = TypeVar('ModuleType', bound=Module)\n    TemplateType = TypeVar('TemplateType', bound=TemplateBase)\n\n\ndef _get_config_file_abspath(fname):\n    \"\"\"Get absolute path of configuration file using BAG_WORK_DIR environment variable.\"\"\"\n    fname = os.path.basename(fname)\n    if 'BAG_WORK_DIR' not in os.environ:\n        raise ValueError('Environment variable BAG_WORK_DIR not defined')\n\n    work_dir = os.environ['BAG_WORK_DIR']\n    if not os.path.isdir(work_dir):\n        raise ValueError('$BAG_WORK_DIR = %s is not a directory' % work_dir)\n\n    # read port number\n    fname = os.path.join(work_dir, fname)\n    if not os.path.isfile(fname):\n        raise ValueError('Cannot find file: %s' % fname)\n    return fname\n\n\ndef _get_port_number(port_file):\n    # type: (str) -> Tuple[Optional[int], str]\n    \"\"\"Read the port number from the given port file.\n\n    Parameters\n    ----------\n    port_file : str\n        a file containing the communication port number.\n\n    Returns\n    -------\n    port : Optional[int]\n        the port number if reading is successful.\n    msg : str\n        Empty string on success, the error message on failure.\n    \"\"\"\n    try:\n        port_file = _get_config_file_abspath(port_file)\n    except ValueError as err:\n        return None, str(err)\n\n    port = int(read_file(port_file))\n    return port, ''\n\n\ndef _import_class_from_str(class_str):\n    # type: (str) -> Type\n    \"\"\"Given a Python class string, convert it to the Python class.\n\n    Parameters\n    ----------\n    class_str : str\n        a Python class string/\n\n    Returns\n    -------\n    py_class : class\n        a Python class.\n    \"\"\"\n    sections = class_str.split('.')\n\n    module_str = '.'.join(sections[:-1])\n    class_str = sections[-1]\n    modul = importlib.import_module(module_str)\n    return getattr(modul, class_str)\n\n\nclass Testbench(object):\n    \"\"\"A class that represents a testbench instance.\n\n    Parameters\n    ----------\n    sim : :class:`bag.interface.simulator.SimAccess`\n        The SimAccess instance used to issue simulation commands.\n    db : :class:`bag.interface.database.DbAccess`\n        The DbAccess instance used to update testbench schematic.\n    lib : str\n        testbench library.\n    cell : str\n        testbench cell.\n    parameters : Dict[str, str]\n        the simulation parameter dictionary.  The values are string representation\n        of actual parameter values.\n    env_list : Sequence[str]\n        list of defined simulation environments.\n    default_envs : Sequence[str]\n        the selected simulation environments.\n    outputs : Dict[str, str]\n        default output expressions\n\n    Attributes\n    ----------\n    lib : str\n        testbench library.\n    cell : str\n        testbench cell.\n    save_dir : str\n        directory containing the last simulation data.\n    \"\"\"\n\n    def __init__(self,  # type: Testbench\n                 sim,  # type: SimAccess\n                 db,  # type: DbAccess\n                 lib,  # type: str\n                 cell,  # type: str\n                 parameters,  # type: Dict[str, str]\n                 env_list,  # type: Sequence[str]\n                 default_envs,  # type: Sequence[str]\n                 outputs,  # type: Dict[str, str]\n                 ):\n        # type: (...) -> None\n        \"\"\"Create a new testbench instance.\n        \"\"\"\n        self.sim = sim\n        self.db = db\n        self.lib = lib\n        self.cell = cell\n        self.parameters = parameters\n        self.env_parameters = {}\n        self.env_list = env_list\n        self.sim_envs = default_envs\n        self.config_rules = {}\n        self.outputs = outputs\n        self.save_dir = None\n\n    def get_defined_simulation_environments(self):\n        # type: () -> Sequence[str]\n        \"\"\"Return a list of defined simulation environments\"\"\"\n        return self.env_list\n\n    def get_current_simulation_environments(self):\n        # type: () -> Sequence[str]\n        \"\"\"Returns a list of simulation environments this testbench will simulate.\"\"\"\n        return self.sim_envs\n\n    def add_output(self, var, expr):\n        # type: (str, str) -> None\n        \"\"\"Add an output expression to be recorded and exported back to python.\n\n        Parameters\n        ----------\n        var : str\n            output variable name.\n        expr : str\n            the output expression.\n        \"\"\"\n        if var in sim_data.illegal_var_name:\n            raise ValueError('Variable name %s is illegal.' % var)\n        self.outputs[var] = expr\n\n    def set_parameter(self, name, val, precision=6):\n        # type: (str, Union[int, float], int) -> None\n        \"\"\"Sets the value of the given simulation parameter.\n\n        Parameters\n        ----------\n        name : str\n            parameter name.\n        val : Union[int, float]\n            parameter value\n        precision : int\n            the parameter value will be rounded to this precision.\n        \"\"\"\n        param_config = dict(type='single', value=val)\n        if isinstance(val, str):\n            self.parameters[name] = val\n        else:\n            self.parameters[name] = self.sim.format_parameter_value(param_config, precision)\n\n    def set_env_parameter(self, name, val_list, precision=6):\n        # type: (str, Sequence[float], int) -> None\n        \"\"\"Configure the given parameter to have different value across simulation environments.\n\n        Parameters\n        ----------\n        name : str\n            the parameter name.\n        val_list : Sequence[float]\n            the parameter values for each simulation environment.  the order of the simulation\n            environments can be found in self.sim_envs\n        precision : int\n            the parameter value will be rounded to this precision.\n        \"\"\"\n        if len(self.sim_envs) != len(val_list):\n            raise ValueError('env parameter must have %d values.' % len(self.sim_envs))\n\n        default_val = None\n        for env, val in zip(self.sim_envs, val_list):\n            if env not in self.env_parameters:\n                cur_dict = {}\n                self.env_parameters[env] = cur_dict\n            else:\n                cur_dict = self.env_parameters[env]\n\n            param_config = dict(type='single', value=val)\n            cur_val = self.sim.format_parameter_value(param_config, precision)\n            if default_val is None:\n                default_val = cur_val\n            cur_dict[name] = self.sim.format_parameter_value(param_config, precision)\n        self.parameters[name] = default_val\n\n    def set_sweep_parameter(self, name, precision=6, **kwargs):\n        # type: (str, int, **Any) -> None\n        \"\"\"Set to sweep the given parameter.\n\n        To set the sweep values directly:\n\n        tb.set_sweep_parameter('var', values=[1.0, 5.0, 10.0])\n\n        To set a linear sweep with start/stop/step (inclusive start and stop):\n\n        tb.set_sweep_parameter('var', start=1.0, stop=9.0, step=4.0)\n\n        To set a logarithmic sweep with points per decade (inclusive start and stop):\n\n        tb.set_sweep_parameter('var', start=1.0, stop=10.0, num_decade=3)\n\n        Parameters\n        ----------\n        name : str\n            parameter name.\n        precision : int\n            the parameter value will be rounded to this precision.\n        **kwargs : Any\n            the sweep parameters.  Refer to the above for example calls.\n        \"\"\"\n        if 'values' in kwargs:\n            param_config = dict(type='list', values=kwargs['values'])\n        elif 'start' in kwargs and 'stop' in kwargs:\n            start = kwargs['start']\n            stop = kwargs['stop']\n            if 'step' in kwargs:\n                step = kwargs['step']\n                param_config = dict(type='linstep', start=start, stop=stop, step=step)\n            elif 'num_decade' in kwargs:\n                num = kwargs['num_decade']\n                param_config = dict(type='decade', start=start, stop=stop, num=num)\n            else:\n                raise Exception('Unsupported sweep arguments: %s' % kwargs)\n        else:\n            raise Exception('Unsupported sweep arguments: %s' % kwargs)\n\n        self.parameters[name] = self.sim.format_parameter_value(param_config, precision)\n\n    def set_simulation_environments(self, env_list):\n        # type: (Sequence[str]) -> None\n        \"\"\"Enable the given list of simulation environments.\n\n        If more than one simulation environment is specified, then a sweep\n        will be performed.\n\n        Parameters\n        ----------\n        env_list : Sequence[str]\n        \"\"\"\n        self.sim_envs = env_list\n\n    def set_simulation_view(self, lib_name, cell_name, sim_view):\n        # type: (str, str, str) -> None\n        \"\"\"Set the simulation view of the given design.\n\n        For simulation, each design may have multiple views, such as schematic,\n        veriloga, extracted, etc.  This method lets you choose which view to\n        use for netlisting.  the given design can be the top level design or\n        an intermediate instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            design library name.\n        cell_name : str\n            design cell name.\n        sim_view : str\n            the view to simulate with.\n        \"\"\"\n        key = '%s__%s' % (lib_name, cell_name)\n        self.config_rules[key] = sim_view\n\n    def update_testbench(self):\n        # type: () -> None\n        \"\"\"Commit the testbench changes to the CAD database.\n        \"\"\"\n        config_list = []\n        for key, view in self.config_rules.items():\n            lib, cell = key.split('__')\n            config_list.append([lib, cell, view])\n\n        env_params = []\n        for env in self.sim_envs:\n            if env in self.env_parameters:\n                val_table = self.env_parameters[env]\n                env_params.append(list(val_table.items()))\n        self.db.update_testbench(self.lib, self.cell, self.parameters, self.sim_envs, config_list,\n                                 env_params)\n\n    def run_simulation(self, precision=6, sim_tag=None):\n        # type: (int, Optional[str]) -> Optional[str]\n        \"\"\"Run simulation.\n\n        Parameters\n        ----------\n        precision : int\n            the floating point number precision.\n        sim_tag : Optional[str]\n            optional description for this simulation run.\n\n        Returns\n        -------\n        value : Optional[str]\n            the save directory path.  If simulation is cancelled, return None.\n        \"\"\"\n        coro = self.async_run_simulation(precision=precision, sim_tag=sim_tag)\n        batch_async_task([coro])\n        return self.save_dir\n\n    def load_sim_results(self, hist_name, precision=6):\n        # type: (str, int) -> Optional[str]\n        \"\"\"Load previous simulation data.\n\n        Parameters\n        ----------\n        hist_name : str\n            the simulation history name.\n        precision : int\n            the floating point number precision.\n\n        Returns\n        -------\n        value : Optional[str]\n            the save directory path.  If result loading is cancelled, return None.\n        \"\"\"\n        coro = self.async_load_results(hist_name, precision=precision)\n        batch_async_task([coro])\n        return self.save_dir\n\n    async def async_run_simulation(self,\n                                   precision: int = 6,\n                                   sim_tag: Optional[str] = None) -> str:\n        \"\"\"A coroutine that runs the simulation.\n\n        Parameters\n        ----------\n        precision : int\n            the floating point number precision.\n        sim_tag : Optional[str]\n            optional description for this simulation run.\n\n        Returns\n        -------\n        value : str\n            the save directory path.\n        \"\"\"\n        self.save_dir = None\n        self.save_dir = await self.sim.async_run_simulation(self.lib, self.cell, self.outputs,\n                                                            precision=precision, sim_tag=sim_tag)\n        return self.save_dir\n\n    async def async_load_results(self, hist_name: str, precision: int = 6) -> str:\n        \"\"\"A coroutine that loads previous simulation data.\n\n        Parameters\n        ----------\n        hist_name : str\n            the simulation history name.\n        precision : int\n            the floating point number precision.\n\n        Returns\n        -------\n        value : str\n            the save directory path.\n        \"\"\"\n        self.save_dir = None\n        self.save_dir = await self.sim.async_load_results(self.lib, self.cell, hist_name,\n                                                          self.outputs, precision=precision)\n        return self.save_dir\n\n\ndef create_tech_info(bag_config_path=None):\n    # type: (Optional[str]) -> TechInfo\n    \"\"\"Create TechInfo object.\"\"\"\n    if bag_config_path is None:\n        if 'BAG_CONFIG_PATH' not in os.environ:\n            raise Exception('BAG_CONFIG_PATH not defined.')\n        bag_config_path = os.environ['BAG_CONFIG_PATH']\n\n    bag_config = read_yaml_env(bag_config_path)\n    tech_params = read_yaml_env(bag_config['tech_config_path'])\n    if 'class' in tech_params:\n        tech_cls = _import_class_from_str(tech_params['class'])\n        tech_info = tech_cls(tech_params)\n    else:\n        # just make a default tech_info object as place holder.\n        print('*WARNING*: No TechInfo class defined.  Using a dummy version.')\n        tech_info = DummyTechInfo(tech_params)\n\n    return tech_info\n\n\nclass BagProject(object):\n    \"\"\"The main bag controller class.\n\n    This class mainly stores all the user configurations, and issue\n    high level bag commands.\n\n    Parameters\n    ----------\n    bag_config_path : Optional[str]\n        the bag configuration file path.  If None, will attempt to read from\n        environment variable BAG_CONFIG_PATH.\n    port : Optional[int]\n        the BAG server process port number.  If not given, will read from port file.\n\n    Attributes\n    ----------\n    bag_config : Dict[str, Any]\n        the BAG configuration parameters dictionary.\n    tech_info : bag.layout.core.TechInfo\n        the BAG process technology class.\n    \"\"\"\n\n    def __init__(self, bag_config_path=None, port=None):\n        # type: (Optional[str], Optional[int]) -> None\n        if bag_config_path is None:\n            if 'BAG_CONFIG_PATH' not in os.environ:\n                raise Exception('BAG_CONFIG_PATH not defined.')\n            bag_config_path = os.environ['BAG_CONFIG_PATH']\n\n        self.bag_config = read_yaml_env(bag_config_path)\n        bag_tmp_dir = os.environ.get('BAG_TEMP_DIR', None)\n\n        # get port files\n        if port is None:\n            socket_config = self.bag_config['socket']\n            if 'port_file' in socket_config:\n                port, msg = _get_port_number(socket_config['port_file'])\n                if msg:\n                    print('*WARNING* %s' % msg)\n\n        # create ZMQDealer object\n        dealer_kwargs = {}\n        dealer_kwargs.update(self.bag_config['socket'])\n        del dealer_kwargs['port_file']\n\n        # create TechInfo instance\n        self.tech_info = create_tech_info(bag_config_path=bag_config_path)\n\n        # create design module database.\n        try:\n            lib_defs_file = _get_config_file_abspath(self.bag_config['lib_defs'])\n        except ValueError:\n            lib_defs_file = ''\n        sch_exc_libs = self.bag_config['database']['schematic']['exclude_libraries']\n        self.dsn_db = ModuleDB(lib_defs_file, self.tech_info, sch_exc_libs, prj=self)\n\n        if port is not None:\n            # make DbAccess instance.\n            dealer = ZMQDealer(port, **dealer_kwargs)\n            db_cls = _import_class_from_str(self.bag_config['database']['class'])\n            self.impl_db = db_cls(dealer, bag_tmp_dir, self.bag_config['database'])\n            self._default_lib_path = self.impl_db.default_lib_path\n        else:\n            self.impl_db = None  # type: Optional[DbAccess]\n            self._default_lib_path = DbAccess.get_default_lib_path(self.bag_config['database'])\n\n        # make SimAccess instance.\n        sim_cls = _import_class_from_str(self.bag_config['simulation']['class'])\n        self.sim = sim_cls(bag_tmp_dir, self.bag_config['simulation'])  # type: SimAccess\n\n    @property\n    def default_lib_path(self):\n        # type: () -> str\n        return self._default_lib_path\n\n    def close_bag_server(self):\n        # type: () -> None\n        \"\"\"Close the BAG database server.\"\"\"\n        if self.impl_db is not None:\n            self.impl_db.close()\n            self.impl_db = None\n\n    def close_sim_server(self):\n        # type: () -> None\n        \"\"\"Close the BAG simulation server.\"\"\"\n        if self.sim is not None:\n            self.sim.close()\n            self.sim = None\n\n    def import_design_library(self, lib_name):\n        # type: (str) -> None\n        \"\"\"Import all design templates in the given library from CAD database.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the library.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        new_lib_path = self.bag_config['new_lib_path']\n        self.impl_db.import_design_library(lib_name, self.dsn_db, new_lib_path)\n\n    def import_sch_cellview(self, lib_name: str, cell_name: str) -> None:\n        \"\"\"Import the given schematic and symbol template into Python.\n\n        This import process is done recursively.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        new_lib_path = self.bag_config['new_lib_path']\n        self.impl_db.import_sch_cellview(lib_name, cell_name, self.dsn_db, new_lib_path)\n\n    def get_cells_in_library(self, lib_name):\n        # type: (str) -> Sequence[str]\n        \"\"\"Get a list of cells in the given library.\n\n        Returns an empty list if the given library does not exist.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n\n        Returns\n        -------\n        cell_list : Sequence[str]\n            a list of cells in the library\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        return self.impl_db.get_cells_in_library(lib_name)\n\n    def make_template_db(self, impl_lib, grid_specs, use_cybagoa=True, gds_lay_file='',\n                         cache_dir=''):\n        # type: (str, Dict[str, Any], bool, str, str) -> TemplateDB\n        \"\"\"Create and return a new TemplateDB instance.\n\n        Parameters\n        ----------\n        impl_lib : str\n            the library name to put generated layouts in.\n        grid_specs : Dict[str, Any]\n            the routing grid specification dictionary.\n        use_cybagoa : bool\n            True to enable cybagoa acceleration if available.\n        gds_lay_file : str\n            the GDS layout information file.\n        cache_dir : str\n            the cache directory name.\n        \"\"\"\n        layers = grid_specs['layers']\n        widths = grid_specs['widths']\n        spaces = grid_specs['spaces']\n        bot_dir = grid_specs['bot_dir']\n        width_override = grid_specs.get('width_override', None)\n\n        routing_grid = RoutingGrid(self.tech_info, layers, spaces, widths, bot_dir,\n                                   width_override=width_override)\n        tdb = TemplateDB('template_libs.def', routing_grid, impl_lib, use_cybagoa=use_cybagoa,\n                         gds_lay_file=gds_lay_file, cache_dir=cache_dir, prj=self)\n\n        return tdb\n\n    def generate_cell(self,  # type: BagProject\n                      specs,  # type: Dict[str, Any]\n                      temp_cls=None,  # type: Optional[Type[TemplateType]]\n                      gen_lay=True,  # type: bool\n                      gen_sch=False,  # type: bool\n                      run_lvs=False,  # type: bool\n                      run_rcx=False,  # type: bool\n                      use_cybagoa=True,  # type: bool\n                      debug=False,  # type: bool\n                      profile_fname='',  # type: str\n                      use_cache=False,  # type: bool\n                      save_cache=False,  # type: bool\n                      **kwargs,\n                      ):\n        # type: (...) -> Optional[Union[pstats.Stats, Dict[str, Any]]]\n        \"\"\"Generate layout/schematic of a given cell from specification file.\n\n        Parameters\n        ----------\n        specs : Dict[str, Any]\n            the specification dictionary.\n        temp_cls : Optional[Type[TemplateType]]\n            the TemplateBase subclass to instantiate\n            if not provided, it will be imported from lay_class entry in specs dictionary.\n        gen_lay : bool\n            True to generate layout.\n        gen_sch : bool\n            True to generate schematics.\n        run_lvs : bool\n            True to run LVS.\n        run_rcx : bool\n            True to run RCX.\n        use_cybagoa : bool\n            True to enable cybagoa acceleration if available.\n        debug : bool\n            True to print debug messages.\n        profile_fname : str\n            If not empty, profile layout generation, and save statistics to this file.\n        use_cache : bool\n            True to use cached layouts.\n        save_cache : bool\n            True to save instances in this template to cache.\n        **kwargs :\n            Additional optional arguments.\n\n        Returns\n        -------\n        result: Optional[Union[pstats.Stats, Dict[str, Any]]]\n            If profiling is enabled, result will be the statistics object.\n            If the last thing done is layout or schematic, result will contain sch_params\n            If the last thing done is lvs, in case of failure result will\n            contain lvs log file in a dictionary, otherwise None\n            If the last thing done is rcx, in case of failure result will\n            contain rcx log file in a dictionary, otherwise None\n        \"\"\"\n        prefix = kwargs.get('prefix', '')\n        suffix = kwargs.get('suffix', '')\n\n        grid_specs = specs['routing_grid']\n        impl_lib = specs['impl_lib']\n        impl_cell = specs['impl_cell']\n        lay_str = specs.get('lay_class', '')\n        sch_lib = specs.get('sch_lib', '')\n        sch_cell = specs.get('sch_cell', '')\n        params = specs['params']\n        gds_lay_file = specs.get('gds_lay_file', '')\n        cache_dir = specs.get('cache_dir', '')\n\n        if temp_cls is None and lay_str:\n            temp_cls = _import_class_from_str(lay_str)\n\n        has_lay = temp_cls is not None\n        if gen_lay and not has_lay:\n            raise ValueError('layout_class is not specified')\n\n        if use_cache:\n            db_cache_dir = specs.get('cache_dir', '')\n        else:\n            db_cache_dir = ''\n\n        result_pstat = None\n        if has_lay:\n            temp_db = self.make_template_db(impl_lib, grid_specs, use_cybagoa=use_cybagoa,\n                                            gds_lay_file=gds_lay_file, cache_dir=db_cache_dir)\n\n            name_list = [impl_cell]\n            print('computing layout...')\n            if profile_fname:\n                profiler = cProfile.Profile()\n                profiler.runcall(temp_db.new_template, params=params, temp_cls=temp_cls,\n                                 debug=False)\n                profiler.dump_stats(profile_fname)\n                result_pstat = pstats.Stats(profile_fname).strip_dirs()\n\n            temp = temp_db.new_template(params=params, temp_cls=temp_cls, debug=debug)\n            print('computation done.')\n            temp_list = [temp]\n\n            if save_cache and cache_dir:\n                master_list = [inst.master for inst in temp.instance_iter()]\n                print('saving layouts to cache...')\n                temp_db.save_to_cache(master_list, cache_dir, debug=debug)\n                print('saving done.')\n\n            if gen_lay:\n                print('creating layout...')\n                temp_db.batch_layout(self, temp_list, name_list, debug=debug)\n                print('layout done.')\n\n            sch_params = temp.sch_params\n        else:\n            sch_params = params\n\n        if gen_sch:\n            dsn = self.create_design_module(lib_name=sch_lib, cell_name=sch_cell)\n            print('computing schematic...')\n            dsn.design(**sch_params)\n            print('creating schematic...')\n            dsn.implement_design(impl_lib, top_cell_name=impl_cell, prefix=prefix,\n                                 suffix=suffix)\n            print('schematic done.')\n\n        result = sch_params\n        lvs_passed = False\n        if run_lvs:\n            print('running lvs...')\n            lvs_passed, lvs_log = self.run_lvs(impl_lib, impl_cell, gds_lay_file=gds_lay_file)\n            if lvs_passed:\n                print('LVS passed!')\n                result = dict(log='')\n            else:\n                raise ValueError(f'LVS failed, lvs_log: {lvs_log}')\n\n        if run_rcx and ((run_lvs and lvs_passed) or not run_lvs):\n            print('running rcx...')\n            rcx_passed, rcx_log = self.run_rcx(impl_lib, impl_cell)\n            if rcx_passed:\n                print('RCX passed!')\n                result = dict(log='')\n            else:\n                raise ValueError(f'RCX failed, rcx_log: {rcx_log}')\n\n        if result_pstat:\n            return result_pstat\n        return result\n\n    def replace_dut_in_wrapper(self, params: Dict[str, Any], dut_lib: str,\n                               dut_cell: str) -> None:\n        # helper function that replaces dut_lib and dut_cell in the wrapper recursively base on\n        # dut_params\n        dut_params = params.get('dut_params', None)\n        if dut_params is None:\n            params['dut_lib'] = dut_lib\n            params['dut_cell'] = dut_cell\n            return\n        return self.replace_dut_in_wrapper(dut_params, dut_lib, dut_cell)\n\n    def simulate_cell(self,\n                      specs: Dict[str, Any],\n                      gen_cell: bool = True,\n                      gen_wrapper: bool = True,\n                      gen_tb: bool = True,\n                      load_results: bool = False,\n                      extract: bool = False,\n                      run_sim: bool = True) -> Optional[Dict[str, Any]]:\n        \"\"\"\n        Runs a minimum executable parts of the Testbench Manager flow selectively according to\n        a spec dictionary.\n\n        For example you can set the flags to generate a new cell, but since wrapper and test bench\n        exist, maybe you want to skip those, and run the simulation in the end. Maybe you\n        already created the cell all the way up to test bench level, and now you only need to\n        run simulation.\n\n        This function only works with Testbench Managers written in format of\n        simulation.core_v2.TestbenchManager\n\n        Parameters\n        ----------\n        specs:\n            Dictionary of specifications\n            Some non-obvious conventions:\n            - if contains tbm_specs keyword, simulation is ran through testbench manager v2,\n            otherwise there should be a sim_params entry that specifies the simulation.\n            - Wrapper is assumed to be in the specs dictionary, if it is generated outside of\n            this function, gen_wrapper should be False.\n        gen_cell:\n            True to call generate_cell on specs\n        gen_wrapper:\n            True to generate Wrapper. Currently only one top-level wrapper is supported.\n        gen_tb:\n            True to generate test bench. If test bench is created, this flag can be set to False.\n        load_results:\n            True to skip simulation and load the results.\n        extract:\n            False to skip layout generation and only simulate schematic\n        run_sim:\n            True to run simulation. If the purpose of calling this function is just to generate\n            some part of simulation flow to debug, this flag can be set to False.\n        Returns\n        -------\n        results: Optional[Dict[str, Any]]\n            if run_sim/load_results = True, contains simulations results, otherwise it's None.\n        \"\"\"\n\n        impl_lib = specs['impl_lib']\n        impl_cell = specs['impl_cell']\n        root_dir = Path(specs['root_dir'])\n\n        if gen_cell and not load_results:\n            print('generating cell ...')\n            self.generate_cell(specs,\n                               gen_lay=extract,\n                               gen_sch=True,\n                               run_lvs=extract,\n                               run_rcx=extract,\n                               use_cybagoa=True)\n            print('cell generated.')\n\n        # if testbench manager v2 found use that instead of interpreting simulation directly\n        tbm_specs = specs.get('tbm_specs', None)\n        if tbm_specs:\n            tbm_cls_str = tbm_specs['tbm_cls']\n            tbm_cls = _import_class_from_str(tbm_cls_str)\n            tbm: TestbenchManager = tbm_cls(root_dir)\n            sim_view_list = tbm_specs.get('sim_view_list', [])\n            if not sim_view_list:\n                view_name = 'netlist' if extract else 'schematic'\n                sim_view_list.append((impl_cell, view_name))\n            sim_envs = tbm_specs['sim_envs']\n\n            if load_results:\n                return tbm.load_results(impl_cell, tbm_specs)\n\n            results = tbm.simulate(bprj=self,\n                                   impl_lib=impl_lib,\n                                   impl_cell=impl_cell,\n                                   sim_view_list=sim_view_list,\n                                   env_list=sim_envs,\n                                   tb_dict=tbm_specs,\n                                   wrapper_dict=None,\n                                   gen_tb=gen_tb,\n                                   gen_wrapper=gen_wrapper,\n                                   run_sim=run_sim)\n            return results\n\n        sim_params = specs.get('sim_params', None)\n        wrapper = sim_params.get('wrapper', None)\n\n        has_wrapper = wrapper is not None\n        if gen_wrapper and not has_wrapper:\n            raise ValueError('must provide a wrapper in sim_params')\n\n        wrapper_lib = wrapper_cell = wrapped_cell = wrapper_params = None\n        if has_wrapper:\n            wrapper_lib = wrapper['wrapper_lib']\n            wrapper_cell = wrapper['wrapper_cell']\n            wrapper_params = wrapper.get('params', {})\n            wrapper_suffix = wrapper.get('wrapper_suffix', '')\n            if not wrapper_suffix:\n                wrapper_suffix = f'{wrapper_cell}'\n            wrapped_cell = f'{impl_cell}_{wrapper_suffix}'\n\n        if gen_wrapper and not gen_tb:\n            raise ValueError('generated a new wrapper, therefore gen_tb should also be true')\n\n        tb_lib = sim_params['tb_lib']\n        tb_cell = sim_params['tb_cell']\n        tb_params = sim_params.get('tb_params', {})\n        tb_suffix = sim_params.get('tb_suffix', '')\n        if not tb_suffix:\n            tb_suffix = f'{tb_cell}'\n        tb_name = f'{impl_cell}_{tb_suffix}'\n\n        tb_fname = root_dir / Path(tb_name, f'{tb_name}.hdf5')\n\n        if load_results:\n            print(\"loading results ...\")\n            if tb_fname.exists():\n                return sim_data.load_sim_file(tb_fname)\n            raise ValueError(f'simulation results does not exist in {str(tb_fname)}')\n\n        if gen_wrapper and has_wrapper:\n            print('generating wrapper ...')\n            master = self.create_design_module(lib_name=wrapper_lib, cell_name=wrapper_cell)\n            self.replace_dut_in_wrapper(wrapper_params, impl_lib, impl_cell)\n            master.design(**wrapper_params)\n            master.implement_design(impl_lib, wrapped_cell)\n            print('wrapper generated.')\n\n        if gen_tb:\n            print('generating testbench ...')\n            tb_master = self.create_design_module(tb_lib, tb_cell)\n            dut_cell = wrapped_cell if has_wrapper else impl_cell\n            tb_master.design(dut_lib=impl_lib, dut_cell=dut_cell, **tb_params)\n            tb_master.implement_design(impl_lib, tb_name)\n            print('testbench generated.')\n\n        if run_sim:\n            print('setting up ADEXL ...')\n            sim_view_list = sim_params.get('sim_view_list', [])\n            if not sim_view_list:\n                view_name = 'netlist' if extract else 'schematic'\n                sim_view_list.append((impl_cell, view_name))\n\n            sim_envs = sim_params['sim_envs']\n            sim_swp_params = sim_params.get('sim_swp_params', {})\n            sim_vars = sim_params.get('sim_vars', {})\n            sim_outputs = sim_params.get('sim_outputs', {})\n\n            tb = self.configure_testbench(impl_lib, tb_name)\n\n            # set simulation variables\n            for key, val in sim_vars.items():\n                tb.set_parameter(key, val)\n\n            # set sweep parameters\n            for key, val in sim_swp_params.items():\n                tb.set_sweep_parameter(key, **val)\n\n            # set the simulation outputs\n            for key, val in sim_outputs.items():\n                tb.add_output(key, val)\n\n            # change the view_name (netlist or schematic)\n            for cell, view in sim_view_list:\n                tb.set_simulation_view(impl_lib, cell, view)\n\n            tb.set_simulation_environments(sim_envs)\n            tb.update_testbench()\n            print('setup completed.')\n            print('running simulation ...')\n            tb.run_simulation()\n            print('simulation done.')\n            print('loading results ...')\n            results = sim_data.load_sim_results(tb.save_dir)\n            if not results.get('sweep_params', {}):\n                raise ValueError(f'results are empty, either you forgot to specify outputs, or '\n                                 f'simulation failed. check sim_log: {tb.save_dir}/ocn_output.log')\n            print('results loaded.')\n            print('saving results into hdf5')\n            sim_data.save_sim_results(results, tb_fname)\n            print('results saved.')\n            return results\n\n    def measure_cell(self,\n                     specs: Dict[str, Any],\n                     gen_cell: bool = True,\n                     gen_wrapper: bool = True,\n                     gen_tb: bool = True,\n                     load_results: bool = False,\n                     extract: bool = False,\n                     run_sims: bool = True) -> Optional[Dict[str, Any]]:\n        \"\"\"\n        Runs a minimum executable parts of the Measurement Manager flow selectively according to\n        a spec dictionary.\n\n        For example you can set the flags to generate a new cell, but since wrapper and test bench\n        exist, maybe you want to skip those, and run the measurement in the end. Maybe you\n        already created the cell all the way up to test bench level, and now you only need to\n        run simulation.\n\n        This function only works with Measurement Managers written in format of\n        simulation.core_v2.MeasurementManager\n\n        Parameters\n        ----------\n        specs:\n            Dictionary of specifications\n            Some non-obvious conventions:\n            - if contains tbm_specs keyword, simulation is ran through testbench manager v2,\n            otherwise there should be a sim_params entry that specifies the simulation.\n            - Wrapper is assumed to be in the specs dictionary, if it is generated outside of\n            this function, gen_wrapper should be False.\n        gen_cell:\n            True to call generate_cell on specs\n        gen_wrapper:\n            True to generate Wrapper. Currently only one top-level wrapper is supported.\n        gen_tb:\n            True to generate test bench. If test bench is created, this flag can be set to False.\n        load_results:\n            True to skip simulation and load the results.\n        extract:\n            False to skip layout generation and only simulate schematic\n        run_sims:\n            True to run simulations. If the purpose of calling this function is just to generate\n            some part of simulation flow to debug, this flag can be set to False.\n        Returns\n        -------\n        results: Optional[Dict[str, Any]]\n            if run_sim/load_results = True, contains measurement results, otherwise it's None.\n        \"\"\"\n\n        impl_lib = specs['impl_lib']\n        impl_cell = specs['impl_cell']\n        root_dir = Path(specs['root_dir'])\n\n        if gen_cell and not load_results:\n            print('generating cell ...')\n            self.generate_cell(specs,\n                               gen_lay=extract,\n                               gen_sch=True,\n                               run_lvs=extract,\n                               run_rcx=extract,\n                               use_cybagoa=True)\n            print('cell generated.')\n\n        mm_specs = specs['mm_specs']\n        mm_cls_str = mm_specs['mm_cls']\n        mm_cls = _import_class_from_str(mm_cls_str)\n        mm: MeasurementManager = mm_cls(root_dir, mm_specs)\n        return mm.measure(self, impl_lib, impl_cell, load_results=load_results,\n                          gen_wrapper=gen_wrapper, gen_tb=gen_tb, run_sims=run_sims,\n                          extract=extract)\n\n    def create_library(self, lib_name, lib_path=''):\n        # type: (str, str) -> None\n        \"\"\"Create a new library if one does not exist yet.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n        lib_path : str\n            directory to create the library in.  If Empty, use default location.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        return self.impl_db.create_library(lib_name, lib_path=lib_path)\n\n    # noinspection PyUnusedLocal\n    def create_design_module(self, lib_name, cell_name, **kwargs):\n        # type: (str, str, **Any) -> SchInstance\n        \"\"\"Create a new top level design module for the given schematic template\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n        cell_name : str\n            the cell name.\n        **kwargs : Any\n            optional parameters.\n\n        Returns\n        -------\n        dsn : SchInstance\n            a configurable schematic instance of the given schematic generator.\n        \"\"\"\n        return SchInstance(self.dsn_db, lib_name, cell_name, 'XTOP', static=False)\n\n    def new_schematic_instance(self, lib_name='', cell_name='', params=None, sch_cls=None,\n                               debug=False, **kwargs):\n        # type: (str, str, Dict[str, Any], Type[ModuleType], bool, **Any) -> SchInstance\n        \"\"\"Create a new schematic instance\n\n        This method is the schematic equivalent of TemplateDB's new_template() method.\n        By default, we assume the design() function is used to set the schematic parameters.\n        If you use another function (such as design_specs()), then you should specify\n        an optional parameter design_fun equal to the name of that function.\n\n        Parameters\n        ----------\n        lib_name : str\n            schematic library name.\n        cell_name : str\n            schematic name\n        params : Dict[str, Any]\n            the parameter dictionary.\n        sch_cls : Type[TemplateType]\n            the schematic generator class to instantiate.\n        debug : bool\n            True to print debug messages.\n        **kwargs : Any\n            optional parameters.\n\n        Returns\n        -------\n        dsn : SchInstance\n            a schematic instance of the given schematic generator.\n        \"\"\"\n        design_fun = kwargs.get('design_fun', 'design')\n        master = self.dsn_db.new_master(lib_name, cell_name, gen_cls=sch_cls, params=params,\n                                        debug=debug, design_args=None, design_fun=design_fun)\n\n        return SchInstance(self.dsn_db, lib_name, cell_name, 'XTOP', static=False,\n                           master=master)\n\n    def clear_schematic_database(self):\n        # type: () -> None\n        \"\"\"Reset schematic database.\"\"\"\n        self.dsn_db.clear()\n\n    def instantiate_schematic(self, lib_name, content_list, lib_path=''):\n        # type: (str, Sequence[Any], str) -> None\n        \"\"\"Create the given schematic contents in CAD database.\n\n        NOTE: this is BAG's internal method.  TO create schematics, call batch_schematic() instead.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the new library to put the schematic instances.\n        content_list : Sequence[Any]\n            list of schematics to create.\n        lib_path : str\n            the path to create the library in.  If empty, use default location.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        self.impl_db.instantiate_schematic(lib_name, content_list, lib_path=lib_path)\n\n    def batch_schematic(self,  # type: BagProject\n                        lib_name,  # type: str\n                        sch_inst_list,  # type: Sequence[SchInstance]\n                        name_list=None,  # type: Optional[Sequence[Optional[str]]]\n                        prefix='',  # type: str\n                        suffix='',  # type: str\n                        debug=False,  # type: bool\n                        rename_dict=None,  # type: Optional[Dict[str, str]]\n                        ):\n        # type: (...) -> None\n        \"\"\"create all the given schematics in CAD database.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the new library to put the schematic instances.\n        sch_inst_list : Sequence[SchInstance]\n            list of SchInstance objects.\n        name_list : Optional[Sequence[Optional[str]]]\n            list of master cell names.  If not given, default names will be used.\n        prefix : str\n            prefix to add to cell names.\n        suffix : str\n            suffix to add to cell names.\n        debug : bool\n            True to print debugging messages\n        rename_dict : Optional[Dict[str, str]]\n            optional master cell renaming dictionary.\n        \"\"\"\n        master_list = [inst.master for inst in sch_inst_list]\n\n        self.dsn_db.cell_prefix = prefix\n        self.dsn_db.cell_suffix = suffix\n        self.dsn_db.instantiate_masters(master_list, name_list=name_list, lib_name=lib_name,\n                                        debug=debug, rename_dict=rename_dict)\n\n    def configure_testbench(self, tb_lib, tb_cell):\n        # type: (str, str) -> Testbench\n        \"\"\"Update testbench state for the given testbench.\n\n        This method fill in process-specific information for the given testbench, then returns\n        a testbench object which you can use to control simulation.\n\n        Parameters\n        ----------\n        tb_lib : str\n            testbench library name.\n        tb_cell : str\n            testbench cell name.\n\n        Returns\n        -------\n        tb : :class:`bag.core.Testbench`\n            the :class:`~bag.core.Testbench` instance.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n        if self.sim is None:\n            raise Exception('SimAccess is not set up.')\n\n        c, clist, params, outputs = self.impl_db.configure_testbench(tb_lib, tb_cell)\n        return Testbench(self.sim, self.impl_db, tb_lib, tb_cell, params, clist, [c], outputs)\n\n    def load_testbench(self, tb_lib, tb_cell):\n        # type: (str, str) -> Testbench\n        \"\"\"Loads a testbench from the database.\n\n        Parameters\n        ----------\n        tb_lib : str\n            testbench library name.\n        tb_cell : str\n            testbench cell name.\n\n        Returns\n        -------\n        tb : :class:`bag.core.Testbench`\n            the :class:`~bag.core.Testbench` instance.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n        if self.sim is None:\n            raise Exception('SimAccess is not set up.')\n\n        cur_envs, all_envs, params, outputs = self.impl_db.get_testbench_info(tb_lib, tb_cell)\n        return Testbench(self.sim, self.impl_db, tb_lib, tb_cell, params, all_envs,\n                         cur_envs, outputs)\n\n    def instantiate_layout_pcell(self, lib_name, cell_name, inst_lib, inst_cell, params,\n                                 pin_mapping=None, view_name='layout'):\n        # type: (str, str, str, str, Dict[str, Any], Optional[Dict[str, str]], str) -> None\n        \"\"\"Create a layout cell with a single pcell instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            layout library name.\n        cell_name : str\n            layout cell name.\n        inst_lib : str\n            pcell library name.\n        inst_cell : str\n            pcell cell name.\n        params : Dict[str, Any]\n            the parameter dictionary.\n        pin_mapping: Optional[Dict[str, str]]\n            the pin renaming dictionary.\n        view_name : str\n            layout view name, default is \"layout\".\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        pin_mapping = pin_mapping or {}\n        self.impl_db.instantiate_layout_pcell(lib_name, cell_name, view_name,\n                                              inst_lib, inst_cell, params, pin_mapping)\n\n    def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):\n        # type: (str, str, str, Sequence[Any]) -> None\n        \"\"\"Create a batch of layouts.\n\n        Parameters\n        ----------\n        lib_name : str\n            layout library name.\n        view_name : str\n            layout view name.\n        via_tech : str\n            via technology name.\n        layout_list : Sequence[Any]\n            a list of layouts to create\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        self.impl_db.instantiate_layout(lib_name, view_name, via_tech, layout_list)\n\n    def release_write_locks(self, lib_name, cell_view_list):\n        # type: (str, Sequence[Tuple[str, str]]) -> None\n        \"\"\"Release write locks from all the given cells.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n        cell_view_list : Sequence[Tuple[str, str]]\n            list of cell/view name tuples.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        self.impl_db.release_write_locks(lib_name, cell_view_list)\n\n    def run_lvs(self,  # type: BagProject\n                lib_name,  # type: str\n                cell_name,  # type: str\n                **kwargs\n                ):\n        # type: (...) -> Tuple[bool, str]\n        \"\"\"Run LVS on the given cell.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n        **kwargs :\n            optional keyword arguments.  See DbAccess class for details.\n\n        Returns\n        -------\n        value : bool\n            True if LVS succeeds\n        log_fname : str\n            name of the LVS log file.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        coro = self.impl_db.async_run_lvs(lib_name, cell_name, **kwargs)\n        results = batch_async_task([coro])\n        if results is None or isinstance(results[0], Exception):\n            return False, ''\n        return results[0]\n\n    def run_rcx(self,  # type: BagProject\n                lib_name,  # type: str\n                cell_name,  # type: str\n                **kwargs\n                ):\n        # type: (...) -> Tuple[Union[bool, Optional[str]], str]\n        \"\"\"Run RCX on the given cell.\n\n        The behavior and the first return value of this method depends on the\n        input arguments.  The second return argument will always be the RCX\n        log file name.\n\n        If create_schematic is True, this method will run RCX, then if it succeeds,\n        create a schematic of the extracted netlist in the database.  It then returns\n        a boolean value which will be True if RCX succeeds.\n\n        If create_schematic is False, this method will run RCX, then return a string\n        which is the extracted netlist filename. If RCX failed, None will be returned\n        instead.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n            override RCX parameter values.\n        **kwargs :\n            optional keyword arguments.  See DbAccess class for details.\n\n        Returns\n        -------\n        value : Union[bool, str]\n            The return value, as described.\n        log_fname : str\n            name of the RCX log file.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        create_schematic = kwargs.get('create_schematic', True)\n\n        coro = self.impl_db.async_run_rcx(lib_name, cell_name, **kwargs)\n        results = batch_async_task([coro])\n        if results is None or isinstance(results[0], Exception):\n            if create_schematic:\n                return False, ''\n            else:\n                return None, ''\n        return results[0]\n\n    def export_layout(self, lib_name, cell_name, out_file, **kwargs):\n        # type: (str, str, str, **Any) -> str\n        \"\"\"export layout.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        out_file : str\n            output file name.\n        **kwargs : Any\n            optional keyword arguments.  See Checker class for details.\n\n        Returns\n        -------\n        log_fname : str\n            log file name.  Empty if task cancelled.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        coro = self.impl_db.async_export_layout(lib_name, cell_name, out_file, **kwargs)\n        results = batch_async_task([coro])\n        if results is None or isinstance(results[0], Exception):\n            return ''\n        return results[0]\n\n    def batch_export_layout(self, info_list):\n        # type: (Sequence[Tuple[Any, ...]]) -> Optional[Sequence[str]]\n        \"\"\"Export layout of all given cells\n\n        Parameters\n        ----------\n        info_list:\n            list of cell information.  Each element is a tuple of:\n\n            lib_name : str\n                library name.\n            cell_name : str\n                cell name.\n            out_file : str\n                layout output file name.\n            view_name : str\n                layout view name.  Optional.\n            params : Optional[Dict[str, Any]]\n                optional export parameter values.\n\n        Returns\n        -------\n        results : Optional[Sequence[str]]\n            If task is cancelled, return None.  Otherwise, this is a\n            list of log file names.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        coro_list = [self.impl_db.async_export_layout(*info) for info in info_list]\n        temp_results = batch_async_task(coro_list)\n        if temp_results is None:\n            return None\n        return ['' if isinstance(val, Exception) else val for val in temp_results]\n\n    async def async_run_lvs(self, lib_name: str, cell_name: str, **kwargs: Any) -> Tuple[bool, str]:\n        \"\"\"A coroutine for running LVS.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n        **kwargs : Any\n            optional keyword arguments.  See Checker class for details.\n            LVS parameters should be specified as lvs_params.\n\n        Returns\n        -------\n        value : bool\n            True if LVS succeeds\n        log_fname : str\n            name of the LVS log file.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        return await self.impl_db.async_run_lvs(lib_name, cell_name, **kwargs)\n\n    async def async_run_rcx(self,  # type: BagProject\n                            lib_name: str,\n                            cell_name: str,\n                            **kwargs\n                            ) -> Tuple[Union[bool, Optional[str]], str]:\n        \"\"\"Run RCX on the given cell.\n\n        The behavior and the first return value of this method depends on the\n        input arguments.  The second return argument will always be the RCX\n        log file name.\n\n        If create_schematic is True, this method will run RCX, then if it succeeds,\n        create a schematic of the extracted netlist in the database.  It then returns\n        a boolean value which will be True if RCX succeeds.\n\n        If create_schematic is False, this method will run RCX, then return a string\n        which is the extracted netlist filename. If RCX failed, None will be returned\n        instead.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n            override RCX parameter values.\n        **kwargs :\n            optional keyword arguments.  See DbAccess class for details.\n\n        Returns\n        -------\n        value : Union[bool, str]\n            The return value, as described.\n        log_fname : str\n            name of the RCX log file.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        return await self.impl_db.async_run_rcx(lib_name, cell_name, **kwargs)\n\n    def create_schematic_from_netlist(self, netlist, lib_name, cell_name,\n                                      sch_view=None, **kwargs):\n        # type: (str, str, str, Optional[str], **Any) -> None\n        \"\"\"Create a schematic from a netlist.\n\n        This is mainly used to create extracted schematic from an extracted netlist.\n\n        Parameters\n        ----------\n        netlist : str\n            the netlist file name.\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n        sch_view : Optional[str]\n            schematic view name.  The default value is implemendation dependent.\n        **kwargs : Any\n            additional implementation-dependent arguments.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        return self.impl_db.create_schematic_from_netlist(netlist, lib_name, cell_name,\n                                                          sch_view=sch_view, **kwargs)\n\n    def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):\n        # type: (str, str, str, **Any) -> None\n        \"\"\"Create a verilog view for mix-signal simulation.\n\n        Parameters\n        ----------\n        verilog_file : str\n            the verilog file name.\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        **kwargs : Any\n            additional implementation-dependent arguments.\n        \"\"\"\n        if self.impl_db is None:\n            raise Exception('BAG Server is not set up.')\n\n        verilog_file = os.path.abspath(verilog_file)\n        if not os.path.isfile(verilog_file):\n            raise ValueError('%s is not a file.' % verilog_file)\n\n        return self.impl_db.create_verilog_view(verilog_file, lib_name, cell_name, **kwargs)\n"
  },
  {
    "path": "bag/data/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/data/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package defines methods and classes useful for data post-processing.\n\"\"\"\n\n# compatibility import.\nfrom ..io import load_sim_results, save_sim_results, load_sim_file\nfrom .core import Waveform\n\n__all__ = ['load_sim_results', 'save_sim_results', 'load_sim_file',\n           'Waveform', ]\n"
  },
  {
    "path": "bag/data/core.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines core data post-processing classes.\n\"\"\"\n\nimport numpy as np\nimport scipy.interpolate as interp\nimport scipy.cluster.vq as svq\nimport scipy.optimize as sciopt\n\n\nclass Waveform(object):\n    \"\"\"A (usually transient) waveform.\n\n    This class provides interpolation and other convenience functions.\n\n    Parameters\n    ----------\n    xvec : np.multiarray.ndarray\n        the X vector.\n    yvec : np.multiarray.ndarray\n        the Y vector.\n    xtol : float\n        the X value tolerance.\n    order : int\n        the interpolation order.  1 for nearest, 2 for linear, 3 for spline.\n    ext : int or str\n        interpolation extension mode.  See documentation for InterpolatedUnivariateSpline.\n\n    \"\"\"\n    def __init__(self, xvec, yvec, xtol, order=3, ext=3):\n        self._xvec = xvec\n        self._yvec = yvec\n        self._xtol = xtol\n        self._order = order\n        self._ext = ext\n        self._fun = interp.InterpolatedUnivariateSpline(xvec, yvec, k=order, ext=ext)\n\n    @property\n    def xvec(self):\n        \"\"\"the X vector\"\"\"\n        return self._xvec\n\n    @property\n    def yvec(self):\n        \"\"\"the Y vector\"\"\"\n        return self._yvec\n\n    @property\n    def order(self):\n        \"\"\"the interpolation order.  1 for nearest, 2 for linear, 3 for spline.\"\"\"\n        return self._order\n\n    @property\n    def xtol(self):\n        \"\"\"the X value tolerance.\"\"\"\n        return self._xtol\n\n    @property\n    def ext(self):\n        \"\"\"interpolation extension mode.  See documentation for InterpolatedUnivariateSpline.\"\"\"\n        return self._ext\n\n    def __call__(self, *arg, **kwargs):\n        \"\"\"Evaluate the waveform at the given points.\"\"\"\n        return self._fun(*arg, **kwargs)\n\n    def get_xrange(self):\n        \"\"\"Returns the X vector range.\n\n        Returns\n        -------\n        xmin : float\n            minimum X value.\n        xmax : float\n            maximum X value.\n        \"\"\"\n        return self.xvec[0], self.xvec[-1]\n\n    def shift_by(self, xshift):\n        \"\"\"Returns a shifted version of this waveform.\n\n        Parameters\n        ----------\n        xshift : float\n            the amount to shift by.\n\n        Returns\n        -------\n        wvfm : bag.data.core.Waveform\n            a reference to this instance, or a copy if copy is True.\n        \"\"\"\n        return Waveform(self.xvec + xshift, self.yvec, self.xtol, order=self.order, ext=self.ext)\n\n    def get_all_crossings(self, threshold, start=None, stop=None, edge='both'):\n        \"\"\"Returns all X values at which this waveform crosses the given threshold.\n\n        Parameters\n        ----------\n        threshold : float\n            the threshold value.\n        start : float or None\n            if given, search for crossings starting at this X value.\n        stop : float or None\n            if given, search only for crossings before this X value.\n        edge : string\n            crossing type.  Valid values are 'rising', 'falling', or 'both'.\n\n        Returns\n        -------\n        xval_list : list[float]\n            all X values at which crossing occurs.\n        \"\"\"\n        # determine start and stop indices\n        sidx = 0 if start is None else np.searchsorted(self.xvec, [start])[0]\n        if stop is None:\n            eidx = len(self.xvec)\n        else:\n            eidx = np.searchsorted(self.xvec, [stop])[0]\n            if eidx < len(self.xvec) and abs(self.xvec[eidx] - stop) < self.xtol:\n                eidx += 1\n\n        # quantize waveform values, then detect edge.\n        bool_vec = self.yvec[sidx:eidx] >= threshold  # type: np.ndarray\n        qvec = bool_vec.astype(int)\n        dvec = np.diff(qvec)\n\n        # eliminate unwanted edge types.\n        if edge == 'rising':\n            dvec = np.maximum(dvec, 0)\n        elif edge == 'falling':\n            dvec = np.minimum(dvec, 0)\n\n        # get crossing indices\n        idx_list = dvec.nonzero()[0]\n\n        # convert indices to X value using brentq interpolation.\n        def crossing_fun(x):\n            return self._fun(x) - threshold\n\n        xval_list = []\n        for idx in idx_list:\n            t0, t1 = self.xvec[sidx + idx], self.xvec[sidx + idx + 1]\n            try:\n                tcross = sciopt.brentq(crossing_fun, t0, t1, xtol=self.xtol)\n            except ValueError:\n                # no solution, this happens only if we have numerical error\n                # around the threshold.  In this case just pick the endpoint\n                # closest to threshold.\n                va = crossing_fun(t0)\n                vb = crossing_fun(t1)\n                tcross = t0 if abs(va) < abs(vb) else t1\n\n            xval_list.append(tcross)\n\n        return xval_list\n\n    def get_crossing(self, threshold, start=None, stop=None, n=1, edge='both'):\n        \"\"\"Returns the X value at which this waveform crosses the given threshold.\n\n        Parameters\n        ----------\n        threshold : float\n            the threshold value.\n        start : float or None\n            if given, search for the crossing starting at this X value.'\n        stop : float or None\n            if given, search only for crossings before this X value.\n        n : int\n            returns the nth crossing.\n        edge : str\n            crossing type.  Valid values are 'rising', 'falling', or 'both'.\n\n        Returns\n        -------\n        xval : float or None\n            the X value at which the crossing occurs.  None if no crossings are detected.\n        \"\"\"\n        xval_list = self.get_all_crossings(threshold, start=start, stop=stop, edge=edge)\n        if len(xval_list) < n:\n            return None\n        return xval_list[n-1]\n\n    def to_arrays(self, xmin=None, xmax=None):\n        \"\"\"Returns the X and Y arrays representing this waveform.\n\n        Parameters\n        ----------\n        xmin : float or None\n            If given, will start from this value.\n        xmax : float or None\n            If given, will end at this value.\n\n        Returns\n        -------\n        xvec : np.multiarray.ndarray\n            the X array\n        yvec : np.multiarray.ndarray\n            the Y array\n        \"\"\"\n        sidx = 0 if xmin is None else np.searchsorted(self.xvec, [xmin])[0]\n        eidx = len(self.xvec) if xmax is None else np.searchsorted(self.xvec, [xmax])[0]\n\n        if eidx < len(self.xvec) and self.xvec[eidx] == xmax:\n            eidx += 1\n\n        xtemp = self.xvec[sidx:eidx]\n        if xmin is not None and (len(xtemp) == 0 or xtemp[0] != xmin):\n            np.insert(xtemp, 0, [xmin])\n        if xmax is not None and (len(xtemp) == 0 or xtemp[-1] != xmax):\n            np.append(xtemp, [xmax])\n        return xtemp, self(xtemp)\n\n    def get_eye_specs(self, tbit, tsample, thres=0.0, nlev=2):\n        \"\"\"Compute the eye diagram spec of this waveform.\n\n        This algorithm uses the following steps.\n\n        1. set t_off to 0\n        2. sample the waveform at tbit interval, starting at t0 + t_off.\n        3. sort the sampled values, get gap between adjacent values.\n        4. record G, the length of the gap covering thres.\n        5. increment t_off by tsample, go to step 2 and repeat until\n           t_off >= tbit.\n        6. find t_off with maximum G.  This is the eye center.\n        7. at the eye center, compute eye height and eye opening using kmeans\n           clustering algorithm.\n        8. return result.\n\n        Parameters\n        ----------\n        tbit : float\n            eye period.\n        tsample : float\n            the resolution to sample the eye.  Used to find optimal\n            time shift and maximum eye opening.\n        thres : float\n            the eye vertical threshold.\n        nlev : int\n            number of expected levels.  2 for NRZ, 4 for PAM4.\n\n        Returns\n        -------\n        result : dict\n            A dictionary from specification to value.\n        \"\"\"\n\n        tstart, tend = self.get_xrange()\n        toff_vec = np.arange(0, tbit, tsample)\n        best_idx = 0\n        best_gap = 0.0\n        best_values = None\n        mid_lev = nlev // 2\n        for idx, t_off in enumerate(toff_vec):\n            # noinspection PyTypeChecker\n            values = self(np.arange(tstart + t_off, tend, tbit))\n            values.sort()\n\n            up_idx = np.searchsorted(values, [thres])[0]\n            if up_idx == 0 or up_idx == len(values):\n                continue\n            cur_gap = values[up_idx] - values[up_idx - 1]\n            if cur_gap > best_gap:\n                best_idx = idx\n                best_gap = cur_gap\n                best_values = values\n\n        if best_values is None:\n            raise ValueError(\"waveform never cross threshold=%.4g\" % thres)\n\n        vstd = np.std(best_values)\n        vtemp = best_values / vstd\n        tmp_arr = np.linspace(vtemp[0], vtemp[-1], nlev)  # type: np.ndarray\n        clusters = svq.kmeans(vtemp, tmp_arr)[0]\n        # clusters = svq.kmeans(vtemp, 4, iter=50)[0]\n        clusters *= vstd\n        clusters.sort()\n        vcenter = (clusters[mid_lev] + clusters[mid_lev - 1]) / 2.0\n\n        # compute eye opening/margin\n        openings = []\n        tr_widths = []\n        last_val = best_values[0]\n        bot_val = last_val\n        cur_cidx = 0\n        for cur_val in best_values:\n            cur_cluster = clusters[cur_cidx]\n            next_cluster = clusters[cur_cidx + 1]\n            if abs(cur_val - cur_cluster) > abs(cur_val - next_cluster):\n                openings.append(cur_val - last_val)\n                tr_widths.append(last_val - bot_val)\n                cur_cidx += 1\n                if cur_cidx == len(clusters) - 1:\n                    tr_widths.append(best_values[-1] - cur_val)\n                    break\n                bot_val = cur_val\n            last_val = cur_val\n\n        return {'center': (float(toff_vec[best_idx]), vcenter),\n                'levels': clusters,\n                'heights': clusters[1:] - clusters[:-1],\n                'openings': np.array(openings),\n                'trace_widths': np.array(tr_widths)\n                }\n\n    def _add_xy(self, other):\n        if not isinstance(other, Waveform):\n            raise ValueError(\"Trying to add non-Waveform object.\")\n        xnew = np.concatenate((self.xvec, other.xvec))\n        xnew = np.unique(np.around(xnew / self.xtol)) * self.xtol\n        # noinspection PyTypeChecker\n        y1 = self(xnew)\n        y2 = other(xnew)\n        return xnew, y1 + y2\n\n    def __add__(self, other):\n        if np.isscalar(other):\n            return Waveform(np.array(self.xvec), self.yvec + other, self.xtol, order=self.order, ext=self.ext)\n        elif isinstance(other, Waveform):\n            new_order = max(self.order, other.order)\n            xvec, yvec = self._add_xy(other)\n            return Waveform(xvec, yvec, self.xtol, order=new_order, ext=self.ext)\n        else:\n            raise Exception('type %s not supported' % type(other))\n\n    def __neg__(self):\n        return Waveform(np.array(self.xvec), -self.yvec, self.xtol, order=self.order, ext=self.ext)\n\n    def __mul__(self, scale):\n        if not np.isscalar(scale):\n            raise ValueError(\"Can only multiply by scalar.\")\n        return Waveform(np.array(self.xvec), scale * self.yvec, self.xtol, order=self.order, ext=self.ext)\n\n    def __rmul__(self, scale):\n        return self.__mul__(scale)\n"
  },
  {
    "path": "bag/data/dc.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines classes for computing DC operating point.\n\"\"\"\n\nfrom typing import Union, Dict\n\nimport scipy.sparse\nimport scipy.optimize\nimport numpy as np\n\nfrom bag.tech.mos import MosCharDB\n\n\nclass DCCircuit(object):\n    \"\"\"A class that solves DC operating point of a circuit.\n\n    Parameters\n    ----------\n    ndb : MosCharDB\n        nmos characterization database.\n    pdb : MosCharDB\n        pmos characterization database.\n    \"\"\"\n\n    def __init__(self, ndb, pdb):\n        # type: (MosCharDB, MosCharDB) -> None\n        self._n = 1\n        self._ndb = ndb\n        self._pdb = pdb\n        self._transistors = {}\n        self._node_id = {'gnd': 0, 'vss': 0, 'VSS': 0}\n        self._node_name_lookup = {0: 'gnd'}\n        self._node_voltage = {0: 0}\n\n    def _get_node_id(self, name):\n        # type: (str) -> int\n        if name not in self._node_id:\n            ans = self._n\n            self._node_id[name] = ans\n            self._node_name_lookup[ans] = name\n            self._n += 1\n            return ans\n        else:\n            return self._node_id[name]\n\n    def set_voltage_source(self, node_name, voltage):\n        # type: (str, float) -> None\n        \"\"\"\n        Specify voltage the a node.\n\n        Parameters\n        ----------\n        node_name : str\n            the net name.\n        voltage : float\n            voltage of the given net.\n        \"\"\"\n        node_id = self._get_node_id(node_name)\n        self._node_voltage[node_id] = voltage\n\n    def add_transistor(self, d_name, g_name, s_name, b_name, mos_type, intent, w, lch, fg=1):\n        # type: (str, str, str, str, str, str, Union[float, int], float, int) -> None\n        \"\"\"Adds a small signal transistor model to the circuit.\n\n        Parameters\n        ----------\n        d_name : str\n            drain net name.\n        g_name : str\n            gate net name.\n        s_name : str\n            source net name.\n        b_name : str\n            body net name.  Defaults to 'gnd'.\n        mos_type : str\n            transistor type.  Either 'nch' or 'pch'.\n        intent : str\n            transistor threshold flavor.\n        w : Union[float, int]\n            transistor width.\n        lch : float\n            transistor channel length.\n        fg : int\n            transistor number of fingers.\n        \"\"\"\n        node_d = self._get_node_id(d_name)\n        node_g = self._get_node_id(g_name)\n        node_s = self._get_node_id(s_name)\n        node_b = self._get_node_id(b_name)\n\n        # get existing current function.  Initalize if not found.\n        ids_key = (mos_type, intent, lch)\n        if ids_key in self._transistors:\n            arow, acol, bdata, fg_list, ds_list = self._transistors[ids_key]\n        else:\n            arow, acol, bdata, fg_list, ds_list = [], [], [], [], []\n            self._transistors[ids_key] = (arow, acol, bdata, fg_list, ds_list)\n\n        # record Ai and bi data\n        offset = len(fg_list) * 4\n        arow.extend([offset + 1, offset + 1, offset + 2, offset + 2, offset + 3, offset + 3])\n        acol.extend([node_b, node_s, node_d, node_s, node_g, node_s])\n        bdata.append(w)\n        fg_list.append(fg)\n        ds_list.append((node_d, node_s))\n\n    def solve(self, env, guess_dict, itol=1e-10, inorm=1e-6):\n        # type: (str, Dict[str, float], float, float) -> Dict[str, float]\n        \"\"\"Solve DC operating point.\n\n        Parameters\n        ----------\n        env : str\n            the simulation environment.\n        guess_dict : Dict[str, float]\n            initial guess dictionary.\n        itol : float\n            current error tolerance.\n        inorm : float\n            current normalization factor.\n\n        Returns\n        -------\n        op_dict : Dict[str, float]\n            DC operating point dictionary.\n        \"\"\"\n        # step 1: get list of nodes to solve\n        node_list = [idx for idx in range(self._n) if idx not in self._node_voltage]\n        reverse_dict = {nid: idx for idx, nid in enumerate(node_list)}\n        ndim = len(node_list)\n\n        # step 2: get Av and bv\n        amatv = scipy.sparse.csr_matrix(([1] * ndim, (node_list, np.arange(ndim))), shape=(self._n, ndim))\n        bmatv = np.zeros(self._n)\n        for nid, val in self._node_voltage.items():\n            bmatv[nid] = val\n\n        # step 3: gather current functions, and output matrix entries\n        ifun_list = []\n        out_data = []\n        out_row = []\n        out_col = []\n        out_col_cnt = 0\n        for (mos_type, intent, lch), (arow, acol, bdata, fg_list, ds_list) in self._transistors.items():\n            db = self._ndb if mos_type == 'nch' else self._pdb\n            ifun = db.get_function('ids', env=env, intent=intent, l=lch)\n            # step 3A: compute Ai and bi\n            num_tran = len(fg_list)\n            adata = [1, -1] * (3 * num_tran)\n            amati = scipy.sparse.csr_matrix((adata, (arow, acol)), shape=(4 * num_tran, self._n))\n            bmati = np.zeros(4 * num_tran)\n            bmati[0::4] = bdata\n\n            # step 3B: compute A = Ai * Av, b = Ai * bv + bi\n            amat = amati.dot(amatv)\n            bmat = amati.dot(bmatv) + bmati\n            # record scale matrix and function.\n            scale_mat = scipy.sparse.diags(fg_list) / inorm\n            ifun_list.append((ifun, scale_mat, amat, bmat))\n            for node_d, node_s in ds_list:\n                if node_d in reverse_dict:\n                    out_row.append(reverse_dict[node_d])\n                    out_data.append(-1)\n                    out_col.append(out_col_cnt)\n                if node_s in reverse_dict:\n                    out_row.append(reverse_dict[node_s])\n                    out_data.append(1)\n                    out_col.append(out_col_cnt)\n                out_col_cnt += 1\n        # construct output matrix\n        out_mat = scipy.sparse.csr_matrix((out_data, (out_row, out_col)), shape=(ndim, out_col_cnt))\n\n        # step 4: define zero function\n        def zero_fun(varr):\n            iarr = np.empty(out_col_cnt)\n            offset = 0\n            for idsf, smat, ai, bi in ifun_list:\n                num_out = smat.shape[0]\n                # reshape going row first instead of column\n                arg = (ai.dot(varr) + bi).reshape(4, -1, order='F').T\n                if idsf.ndim == 3:\n                    # handle case where transistor source and body are shorted\n                    tmpval = idsf(arg[:, [0, 2, 3]])\n                else:\n                    tmpval = idsf(arg)\n                iarr[offset:offset + num_out] = smat.dot(tmpval)\n                offset += num_out\n            return out_mat.dot(iarr)\n\n        # step 5: define zero function\n        def jac_fun(varr):\n            jarr = np.empty((out_col_cnt, ndim))\n            offset = 0\n            for idsf, smat, ai, bi in ifun_list:\n                num_out = smat.shape[0]\n                # reshape going row first instead of column\n                arg = (ai.dot(varr) + bi).reshape(4, -1, order='F').T\n                if idsf.ndim == 3:\n                    # handle case where transistor source and body are shorted\n                    tmpval = idsf.jacobian(arg[:, [0, 2, 3]])\n                    # noinspection PyTypeChecker\n                    tmpval = np.insert(tmpval, 1, 0.0, axis=len(tmpval.shape) - 1)\n                else:\n                    tmpval = idsf.jacobian(arg)\n                jcur = smat.dot(tmpval)\n                for idx in range(num_out):\n                    # ai is sparse matrix; multiplication is matrix\n                    jarr[offset + idx, :] = jcur[idx, :] @ ai[4 * idx:4 * idx + 4, :]\n                offset += num_out\n            return out_mat.dot(jarr)\n\n        xguess = np.empty(ndim)\n        for name, guess_val in guess_dict.items():\n            xguess[reverse_dict[self._node_id[name]]] = guess_val\n\n        result = scipy.optimize.root(zero_fun, xguess, jac=jac_fun, tol=itol / inorm, method='hybr')\n        if not result.success:\n            raise ValueError('solution failed.')\n\n        op_dict = {self._node_name_lookup[nid]: result.x[idx] for idx, nid in enumerate(node_list)}\n        return op_dict\n"
  },
  {
    "path": "bag/data/digital.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines functions useful for digital verification/postprocessing.\n\"\"\"\n\nfrom typing import Optional, List, Tuple\n\nimport numpy as np\n\nfrom .core import Waveform\n\n\ndef de_bruijn(n, symbols=None):\n    # type: (int, Optional[List[float]]) -> List[float]\n    \"\"\"Returns a De Bruijn sequence with subsequence of length n.\n\n    a De Bruijn sequence with subsequence of length n is a sequence such that\n    all possible subsequences of length n appear exactly once somewhere in the\n    sequence.  This method is useful for simulating the worst case eye diagram\n    given finite impulse response.\n\n    Parameters\n    ----------\n    n : int\n        length of the subsequence.\n    symbols : Optional[List[float]] or None\n        the list of symbols.  If None, defaults to [0.0, 1.0].\n\n    Returns\n    -------\n    seq : List[float]\n        the de bruijn sequence.\n    \"\"\"\n    symbols = symbols or [0.0, 1.0]\n    k = len(symbols)\n\n    a = [0] * (k * n)\n    sequence = []\n\n    def db(t, p):\n        if t > n:\n            if n % p == 0:\n                sequence.extend(a[1:p + 1])\n        else:\n            a[t] = a[t - p]\n            db(t + 1, p)\n            for j in range(a[t - p] + 1, k):\n                a[t] = j\n                db(t + 1, t)\n\n    db(1, 1)\n    return [symbols[i] for i in sequence]\n\n\ndef dig_to_pwl(values, tper, trf, td=0):\n    # type: (List[float], float, float, float) -> Tuple[List[float], List[float]]\n    \"\"\"Convert a list of digital bits to PWL waveform.\n\n    This function supports negative delay.  However, time/value pairs for negative data\n    are truncated.\n\n    Parameters\n    ----------\n    values : List[float]\n        list of values for each bit.\n    tper : float\n        the period in seconds.\n    trf : float\n        the rise/fall time in seconds.\n    td : float\n        the delay\n\n    Returns\n    -------\n    tvec : List[float]\n        the time vector.\n    yvec : List[float]\n        the value vector.\n    \"\"\"\n    y0 = values[0]\n    tcur, ycur = td, y0\n    tvec, yvec = [], []\n    for v in values:\n        if v != ycur:\n            if tcur >= 0:\n                tvec.append(tcur)\n                yvec.append(ycur)\n            elif tcur < 0 < tcur + trf:\n                # make sure time starts at 0\n                tvec.append(0)\n                yvec.append(ycur - (v - ycur) / trf * tcur)\n            ycur = v\n            if tcur + trf >= 0:\n                tvec.append(tcur + trf)\n                yvec.append(ycur)\n            elif tcur + trf < 0 < tcur + tper:\n                # make sure time starts at 0\n                tvec.append(0)\n                yvec.append(ycur)\n            tcur += tper\n        else:\n            if tcur <= 0 < tcur + tper:\n                # make sure time starts at 0\n                tvec.append(0)\n                yvec.append(ycur)\n            tcur += tper\n\n    if not tvec:\n        # only here if input is constant\n        tvec = [0, tper]\n        yvec = [y0, y0]\n    elif tvec[0] > 0:\n        # make time start at 0\n        tvec.insert(0, 0)\n        yvec.insert(0, y0)\n\n    return tvec, yvec\n\n\ndef get_crossing_index(yvec, threshold, n=0, rising=True):\n    # type: (np.array, float, int, bool) -> int\n    \"\"\"Returns the first index that the given numpy array crosses the given threshold.\n\n    Parameters\n    ----------\n    yvec : np.array\n        the numpy array.\n    threshold : float\n        the crossing threshold.\n    n : int\n        returns the nth edge index, with n=0 being the first index.\n    rising : bool\n        True to return rising edge index.  False to return falling edge index.\n\n    Returns\n    -------\n    idx : int\n        the crossing edge index.\n    \"\"\"\n\n    bool_vec = yvec >= threshold\n    qvec = bool_vec.astype(int)\n    dvec = np.diff(qvec)\n\n    dvec = np.maximum(dvec, 0) if rising else np.minimum(dvec, 0)\n    idx_list = dvec.nonzero()[0]\n    return idx_list[n]\n\n\ndef get_flop_timing(tvec, d, q, clk, ttol, data_thres=0.5,\n                    clk_thres=0.5, tstart=0.0, clk_edge='rising', tag=None, invert=False):\n    \"\"\"Calculate flop timing parameters given the associated waveforms.\n\n    This function performs the following steps:\n\n    1. find all valid clock edges.  Compute period of the clock (clock waveform\n       must be periodic).\n    \n    2. For each valid clock edge:\n\n        A. Check if the input changes in the previous cycle.  If so, compute tsetup.\n           Otherwise, tsetup = tperiod.\n    \n        B. Check if input changes in the current cycle.  If so, compute thold.\n           Otherwise, thold = tperiod.\n  \n        C. Check that output transition at most once and that output = input.\n           Otherwise, record an error.\n\n        D. record the output data polarity.\n\n    3. For each output data polarity, compute the minimum tsetup and thold and any\n       errors.  Return summary as a dictionary.\n\n    \n    The output is a dictionary with keys 'setup', 'hold', 'delay', and 'errors'.\n    the setup/hold/delay entries contains 2-element tuples describing the worst\n    setup/hold/delay time.  The first element is the setup/hold/delay time, and\n    the second element is the clock edge time at which it occurs.  The errors field\n    stores all clock edge times at which an error occurs.\n\n\n    Parameters\n    ----------\n    tvec : np.ndarray\n        the time data.\n    d : np.ndarray\n        the input data.\n    q : np.ndarray\n        the output data.\n    clk : np.ndarray\n        the clock data.\n    ttol : float\n        time resolution.\n    data_thres : float\n        the data threshold.\n    clk_thres : float\n        the clock threshold.\n    tstart : float\n        ignore data points before tstart.\n    clk_edge : str\n        the clock edge type.  Valid values are \"rising\", \"falling\", or \"both\".\n    tag : obj\n        an identifier tag to append to results.\n    invert : bool\n        if True, the flop output is inverted from the data.\n\n    Returns\n    -------\n    data : dict[str, any]\n        A dictionary describing the worst setup/hold/delay and errors, if any.\n    \"\"\"\n    d_wv = Waveform(tvec, d, ttol)\n    clk_wv = Waveform(tvec, clk, ttol)\n    q_wv = Waveform(tvec, q, ttol)\n    tend = tvec[-1]\n\n    # get all clock sampling times and clock period\n    samp_times = clk_wv.get_all_crossings(clk_thres, start=tstart, edge=clk_edge)\n    tper = (samp_times[-1] - samp_times[0]) / (len(samp_times) - 1)\n    # ignore last clock cycle if it's not a full cycle.\n    if samp_times[-1] + tper > tend:\n        samp_times = samp_times[:-1]\n\n    # compute setup/hold/error for each clock period\n    data = {'setup': (tper, -1), 'hold': (tper, -1), 'delay': (0.0, -1), 'errors': []}\n    for t in samp_times:\n        d_prev = d_wv.get_all_crossings(data_thres, start=t - tper, stop=t, edge='both')\n        d_cur = d_wv.get_all_crossings(data_thres, start=t, stop=t + tper, edge='both')\n        q_cur = q_wv.get_all_crossings(data_thres, start=t, stop=t + tper, edge='both')\n        d_val = d_wv(t) > data_thres\n        q_val = q_wv(t + tper) > data_thres\n\n        # calculate setup/hold/delay\n        tsetup = t - d_prev[-1] if d_prev else tper\n        thold = d_cur[0] - t if d_cur else tper\n        tdelay = q_cur[0] - t if q_cur else 0.0\n\n        # check if flop has error\n        error = (invert != (q_val != d_val)) or (len(q_cur) > 1)\n\n        # record results\n        if tsetup < data['setup'][0]:\n            data['setup'] = (tsetup, t)\n        if thold < data['hold'][0]:\n            data['hold'] = (thold, t)\n        if tdelay > data['delay'][0]:\n            data['delay'] = (tdelay, t)\n        if error:\n            data['errors'].append(t)\n\n    if tag is not None:\n        data['setup'] += (tag, )\n        data['hold'] += (tag, )\n        data['delay'] += (tag, )\n        data['errors'] = [(t, tag) for t in data['errors']]\n\n    return data\n"
  },
  {
    "path": "bag/data/lti.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines functions and classes useful for characterizing linear time-invariant circuits.\n\"\"\"\n\nfrom typing import Dict, List, Tuple, Union, Optional\n\nimport numpy as np\nimport scipy.signal\nimport scipy.sparse\nimport scipy.sparse.linalg\n# noinspection PyProtectedMember\nfrom scipy.signal.ltisys import StateSpaceContinuous, TransferFunctionContinuous\n\n\nclass LTICircuit(object):\n    \"\"\"A class that models a linear-time-invariant circuit.\n\n    This class computes AC transfer functions for linear-time-invariant circuits.\n\n    Note: Since this class work with AC transfer functions, 'gnd' in this circuit is AC ground.\n\n    Parameters\n    ----------\n    udot_tol : float\n        tolerance to determine if dependency on input derivatives is zero.\n    \"\"\"\n\n    _float_min = np.finfo(np.float64).eps\n\n    def __init__(self, udot_tol=1e-12):\n        # type: (float) -> None\n        self._num_n = 0\n        self._gmat_data = {}  # type: Dict[Tuple[int, int], float]\n        self._cmat_data = {}  # type: Dict[Tuple[int, int], float]\n        self._vcvs_list = []  # type: List[Tuple[int, int, int, int, float]]\n        self._ind_data = {}  # type: Dict[Tuple[int, int], float]\n        self._node_id = {'gnd': -1}\n        self._udot_tol = udot_tol\n\n    def _get_node_id(self, name):\n        # type: (str) -> int\n        if name not in self._node_id:\n            ans = self._num_n\n            self._node_id[name] = ans\n            self._num_n += 1\n            return ans\n        else:\n            return self._node_id[name]\n\n    @staticmethod\n    def _add(mat, key, val):\n        # type: (Dict[Tuple[int, int], float], Tuple[int, int], float) -> None\n        if key in mat:\n            mat[key] += val\n        else:\n            mat[key] = val\n\n    def add_res(self, res, p_name, n_name):\n        # type: (float, str, str) -> None\n        \"\"\"Adds a resistor to the circuit.\n\n        Parameters\n        ----------\n        res : float\n            the resistance value, in Ohms.\n        p_name : str\n            the positive terminal net name.\n        n_name : str\n            the negative terminal net name.\n        \"\"\"\n        # avoid 0 resistance.\n        res_sgn = 1 if res >= 0 else -1\n        g = res_sgn / max(abs(res), self._float_min)\n        self.add_conductance(g, p_name, n_name)\n\n    def add_conductance(self, g, p_name, n_name):\n        # type: (float, str, str) -> None\n        \"\"\"Adds a resistor to the circuit given conductance value.\n\n        Parameters\n        ----------\n        g : float\n            the conductance value, in inverse Ohms.\n        p_name : str\n            the positive terminal net name.\n        n_name : str\n            the negative terminal net name.\n        \"\"\"\n        node_p = self._get_node_id(p_name)\n        node_n = self._get_node_id(n_name)\n\n        if node_p == node_n:\n            return\n        if node_p < node_n:\n            node_p, node_n = node_n, node_p\n\n        self._add(self._gmat_data, (node_p, node_p), g)\n        if node_n >= 0:\n            self._add(self._gmat_data, (node_p, node_n), -g)\n            self._add(self._gmat_data, (node_n, node_p), -g)\n            self._add(self._gmat_data, (node_n, node_n), g)\n\n    def add_vccs(self, gm, p_name, n_name, cp_name, cn_name='gnd'):\n        # type: (float, str, str, str, str) -> None\n        \"\"\"Adds a voltage controlled current source to the circuit.\n\n        Parameters\n        ----------\n        gm : float\n            the gain of the voltage controlled current source, in Siemens.\n        p_name : str\n            the terminal that the current flows out of.\n        n_name : str\n            the terminal that the current flows in to.\n        cp_name : str\n            the positive voltage control terminal.\n        cn_name : str\n            the negative voltage control terminal.  Defaults to 'gnd'.\n        \"\"\"\n        node_p = self._get_node_id(p_name)\n        node_n = self._get_node_id(n_name)\n        node_cp = self._get_node_id(cp_name)\n        node_cn = self._get_node_id(cn_name)\n\n        if node_p == node_n or node_cp == node_cn:\n            return\n\n        if node_cp >= 0:\n            if node_p >= 0:\n                self._add(self._gmat_data, (node_p, node_cp), gm)\n            if node_n >= 0:\n                self._add(self._gmat_data, (node_n, node_cp), -gm)\n        if node_cn >= 0:\n            if node_p >= 0:\n                self._add(self._gmat_data, (node_p, node_cn), -gm)\n            if node_n >= 0:\n                self._add(self._gmat_data, (node_n, node_cn), gm)\n\n    def add_vcvs(self, gain, p_name, n_name, cp_name, cn_name='gnd'):\n        # type: (float, str, str, str, str) -> None\n        \"\"\"Adds a voltage controlled voltage source to the circuit.\n\n        Parameters\n        ----------\n        gain : float\n            the gain of the voltage controlled voltage source.\n        p_name : str\n            the positive terminal of the output voltage source.\n        n_name : str\n            the negative terminal of the output voltage source.\n        cp_name : str\n            the positive voltage control terminal.\n        cn_name : str\n            the negative voltage control terminal.  Defaults to 'gnd'.\n        \"\"\"\n        node_p = self._get_node_id(p_name)\n        node_n = self._get_node_id(n_name)\n        node_cp = self._get_node_id(cp_name)\n        node_cn = self._get_node_id(cn_name)\n\n        if node_p == node_n:\n            raise ValueError('positive and negative terminal of a vcvs cannot be the same.')\n        if node_cp == node_cn:\n            raise ValueError('positive and negative control terminal of a vcvs cannot be the same.')\n        if node_p < node_n:\n            # flip nodes so we always have node_p > node_n, to guarantee node_p >= 0\n            node_p, node_n, node_cp, node_cn = node_n, node_p, node_cn, node_cp\n\n        self._vcvs_list.append((node_p, node_n, node_cp, node_cn, gain))\n\n    def add_cap(self, cap, p_name, n_name):\n        # type: (float, str, str) -> None\n        \"\"\"Adds a capacitor to the circuit.\n\n        Parameters\n        ----------\n        cap : float\n            the capacitance value, in Farads.\n        p_name : str\n            the positive terminal net name.\n        n_name : str\n            the negative terminal net name.\n        \"\"\"\n        node_p = self._get_node_id(p_name)\n        node_n = self._get_node_id(n_name)\n\n        if node_p == node_n:\n            return\n        if node_p < node_n:\n            node_p, node_n = node_n, node_p\n\n        self._add(self._cmat_data, (node_p, node_p), cap)\n        if node_n >= 0:\n            self._add(self._cmat_data, (node_p, node_n), -cap)\n            self._add(self._cmat_data, (node_n, node_p), -cap)\n            self._add(self._cmat_data, (node_n, node_n), cap)\n\n    def add_ind(self, ind, p_name, n_name):\n        # type: (float, str, str) -> None\n        \"\"\"Adds an inductor to the circuit.\n\n        Parameters\n        ----------\n        ind : float\n            the inductance value, in Henries.\n        p_name : str\n            the positive terminal net name.\n        n_name : str\n            the negative terminal net name.\n        \"\"\"\n        node_p = self._get_node_id(p_name)\n        node_n = self._get_node_id(n_name)\n\n        if node_p == node_n:\n            return\n        if node_p < node_n:\n            key = node_n, node_p\n        else:\n            key = node_p, node_n\n\n        if key not in self._ind_data:\n            self._ind_data[key] = ind\n        else:\n            self._ind_data[key] = 1.0 / (1.0 / ind + 1.0 / self._ind_data[key])\n\n    def add_transistor(self, tran_info, d_name, g_name, s_name, b_name='gnd', fg=1, neg_cap=True):\n        # type: (Dict[str, float], str, str, str, str, Union[float, int], bool) -> None\n        \"\"\"Adds a small signal transistor model to the circuit.\n\n        Parameters\n        ----------\n        tran_info : Dict[str, float]\n            a dictionary of 1-finger transistor small signal parameters.  Should contain gm, gds, gb,\n            cgd, cgs, cgb, cds, cdb, and csb.\n        d_name : str\n            drain net name.\n        g_name : str\n            gate net name.\n        s_name : str\n            source net name.\n        b_name : str\n            body net name.  Defaults to 'gnd'.\n        fg : Union[float, int]\n            number of transistor fingers.\n        neg_cap : bool\n            True to allow negative capacitance (which is there due to model fitting).\n        \"\"\"\n        gm = tran_info['gm'] * fg\n        gds = tran_info['gds'] * fg\n        cgd = tran_info['cgd'] * fg\n        cgs = tran_info['cgs'] * fg\n        cds = tran_info['cds'] * fg\n        cgb = tran_info.get('cgb', 0) * fg\n        cdb = tran_info.get('cdb', 0) * fg\n        csb = tran_info.get('csb', 0) * fg\n\n        if not neg_cap:\n            cgd = max(cgd, 0)\n            cgs = max(cgs, 0)\n            cds = max(cds, 0)\n            cgb = max(cgb, 0)\n            cdb = max(cdb, 0)\n            csb = max(csb, 0)\n\n        self.add_vccs(gm, d_name, s_name, g_name, s_name)\n        self.add_conductance(gds, d_name, s_name)\n        self.add_cap(cgd, g_name, d_name)\n        self.add_cap(cgs, g_name, s_name)\n        self.add_cap(cds, d_name, s_name)\n        self.add_cap(cgb, g_name, b_name)\n        self.add_cap(cdb, d_name, b_name)\n        self.add_cap(csb, s_name, b_name)\n\n        if 'gb' in tran_info:\n            # only add these if source is not shorted to body.\n            gb = tran_info['gb'] * fg\n            self.add_vccs(gb, d_name, s_name, b_name, s_name)\n\n    @classmethod\n    def _count_rank(cls, diag):\n        # type: (np.ndarray) -> int\n        diag_abs = np.abs(diag)\n        float_min = cls._float_min\n        rank_tol = diag_abs[0] * diag.size * float_min\n        rank_cnt = diag_abs > rank_tol  # type: np.ndarray\n        return np.count_nonzero(rank_cnt)\n\n    @classmethod\n    def _solve_gx_bw(cls, g, b):\n        # type: (np.ndarray, np.ndarray) -> Tuple[np.ndarray, np.ndarray]\n        \"\"\"Solve the equation G*x + B*[w, w', ...].T = 0 for x.\n\n        Finds matrix Ka, Kw such that x = Ka * a + Kw * [w, w', ...].T solves\n        the given equation for any value of a.\n\n        Parameters\n        ----------\n        g : np.ndarray\n            the G matrix, with shape (M, N) and M < N.\n        b : np.ndarray\n            the B matrix.\n\n        Returns\n        -------\n        ka : np.ndarray\n            the Ky matrix.\n        kw : np.ndarray\n            the Kw matrix.\n        \"\"\"\n        # G = U*S*Vh\n        u, s, vh = scipy.linalg.svd(g, full_matrices=True, overwrite_a=True)\n        # let B=Uh*B, so now S*Vh*x + B*w = 0\n        b = u.T.dot(b)\n        # let y = Vh*x, or x = V*y, so now S*y + U*B*w = 0\n        v = vh.T\n        # truncate the bottom 0 part of S, now S_top*y_top + B_top*w = 0\n        rank = cls._count_rank(s)\n        # check bottom part of B.  If not 0, there's no solution\n        b_abs = np.abs(b)\n        zero_tol = np.amax(b_abs) * cls._float_min\n        if np.count_nonzero(b_abs[rank:, :] > zero_tol) > 0:\n            raise ValueError('B matrix bottom is not zero.  This circuit has no solution.')\n        b_top = b[:rank, :]\n        s_top_inv = 1 / s[:rank]  # type: np.ndarray\n        s_top_inv = np.diag(s_top_inv)\n        # solving, we get y_top = -S_top^-1*B_top*w = Ku*w\n        kw = s_top_inv.dot(-b_top)\n        # now x = V*y = Vl*y_top + Vr*y_bot = Vr*y_bot + Vl*Kw*w = Ky*y_bot = Kw*w\n        vl = v[:, :rank]\n        vr = v[:, rank:]\n        kw = vl.dot(kw)\n        return vr, kw\n\n    @classmethod\n    def _transform_c_qr(cls, g, c, b, d):\n        \"\"\"Reveal redundant variables by transforming C matrix using QR decomposition\"\"\"\n        q, r, p = scipy.linalg.qr(c, pivoting=True)\n        rank = cls._count_rank(np.diag(r))\n        qh = q.T\n        return rank, qh.dot(g[:, p]), r, qh.dot(b), d[:, p]\n\n    # @classmethod\n    # def _transform_c_svd(cls, g, c, b, d):\n    #     \"\"\"Reveal redundant variables by transforming C matrix using SVD decomposition\"\"\"\n    #     u, s, vh = scipy.linalg.svd(c, full_matrices=True, overwrite_a=True)\n    #     uh = u.T\n    #     v = vh.T\n    #     rank = cls._count_rank(s)\n    #     return rank, uh.dot(g).dot(v), np.diag(s), uh.dot(b), d.dot(v)\n\n    @classmethod\n    def _reduce_state_space(cls, g, c, b, d, e, ndim_w):\n        \"\"\"Reduce state space variables.\n\n        Given the state equation G*x + C*x' + B*[w, w', w'', ...].T = 0, and\n        y = D*x + E*[w, w', w'', ...].T, check if C is full rank.  If not,\n        we compute new G, C, and B matrices with reduced dimensions.\n        \"\"\"\n        # step 0: transform C and obtain rank\n        rank, g, c, b, d = cls._transform_c_qr(g, c, b, d)\n        # rank, g, c, b, d = cls._transform_c_svd(g, c, b, d)\n        while rank < c.shape[0]:\n            # step 1: eliminate x' term by looking at bottom part of matrices\n            ctop = c[:rank, :]\n            gtop = g[:rank, :]\n            gbot = g[rank:, :]\n            btop = b[:rank, :]\n            bbot = b[rank:, :]\n            # step 2: find ka and kw from bottom\n            ka, kw = cls._solve_gx_bw(gbot, bbot)\n            # step 3: substitute x = ka * a + kw * [w, w', w'', ...].T\n            g = gtop.dot(ka)\n            c = ctop.dot(ka)\n            b = np.zeros((btop.shape[0], btop.shape[1] + ndim_w))\n            b[:, :btop.shape[1]] = btop + gtop.dot(kw)\n            b[:, ndim_w:] += ctop.dot(kw)\n            enew = np.zeros((e.shape[0], e.shape[1] + ndim_w))\n            enew[:, :-ndim_w] = e + d.dot(kw)\n            e = enew\n            d = d.dot(ka)\n            # step 4: transform C to prepare for next iteration\n            rank, g, c, b, d = cls._transform_c_qr(g, c, b, d)\n            # rank, g, c, b, d = cls._transform_c_svd(g, c, b, d)\n\n        g, c, b, d, e = cls._simplify(g, c, b, d, e, ndim_w)\n        return g, c, b, d, e\n\n    @classmethod\n    def _simplify(cls, g, c, b, d, e, ndim_w):\n        \"\"\"Eliminate input derivatives by re-defining state variables.\n        \"\"\"\n        while b.shape[1] > ndim_w:\n            kw = scipy.linalg.solve_triangular(c, b[:, ndim_w:])\n            bnew = np.dot(g, -kw)\n            bnew[:, :ndim_w] += b[:, :ndim_w]\n            b = bnew\n            e[:, :kw.shape[1]] -= d.dot(kw)\n        return g, c, b, d, e\n\n    def _build_mna_matrices(self, inputs, outputs, in_type='v'):\n        # type: (Union[str, List[str]], Union[str, List[str]], str) -> Tuple[np.ndarray, ...]\n        \"\"\"Create and return MNA matrices representing this circuit.\n\n        Parameters\n        ----------\n        inputs : Union[str, List[str]]\n            the input voltage/current node name(s).\n        outputs : Union[str, List[str]]\n            the output voltage node name(s).\n        in_type : str\n            set to 'v' for input voltage sources.  Otherwise, current sources.\n\n        Returns\n        -------\n        g : np.ndarray\n            the conductance matrix\n        c : np.ndarray\n            the capacitance/inductance matrix.\n        b : np.ndarray\n            the input-to-state matrix.\n        d : np.ndarray\n            the state-to-output matrix.\n        e : np.ndarray\n            the input-to-output matrix.\n        \"\"\"\n        if isinstance(inputs, list) or isinstance(inputs, tuple):\n            node_ins = [self._node_id[name] for name in inputs]\n        else:\n            node_ins = [self._node_id[inputs]]\n        if isinstance(outputs, list) or isinstance(outputs, tuple):\n            node_outs = [self._node_id[name] for name in outputs]\n        else:\n            node_outs = [self._node_id[outputs]]\n\n        is_voltage = (in_type == 'v')\n\n        # step 1: construct matrices\n        gdata, grows, gcols = [], [], []\n        cdata, crows, ccols = [], [], []\n        # step 1A: gather conductors/vccs\n        for (ridx, cidx), gval in self._gmat_data.items():\n            gdata.append(gval)\n            grows.append(ridx)\n            gcols.append(cidx)\n        # step 1B: gather capacitors\n        for (ridx, cidx), cval in self._cmat_data.items():\n            cdata.append(cval)\n            crows.append(ridx)\n            ccols.append(cidx)\n        # step 1C: gather inductors\n        num_states = self._num_n\n        for (node_p, node_n), lval in self._ind_data.items():\n            gdata.append(1)\n            grows.append(node_p)\n            gcols.append(num_states)\n            gdata.append(1)\n            grows.append(num_states)\n            gcols.append(node_p)\n            if node_n >= 0:\n                gdata.append(-1)\n                grows.append(node_n)\n                gcols.append(num_states)\n                gdata.append(-1)\n                grows.append(num_states)\n                gcols.append(node_n)\n            cdata.append(-lval)\n            crows.append(num_states)\n            ccols.append(num_states)\n            num_states += 1\n        # step 1D: add currents from vcvs\n        for node_p, node_n, node_cp, node_cn, gain in self._vcvs_list:\n            # vcvs means vp - vn - A*vcp + A*vcn = 0, and current flows from p to n\n            # current flowing out of p\n            gdata.append(1)\n            grows.append(node_p)\n            gcols.append(num_states)\n            # voltage of p\n            gdata.append(1)\n            grows.append(num_states)\n            gcols.append(node_p)\n            if node_n >= 0:\n                # current flowing into n\n                gdata.append(-1)\n                grows.append(node_n)\n                gcols.append(num_states)\n                # voltage of n\n                gdata.append(-1)\n                grows.append(num_states)\n                gcols.append(node_n)\n            if node_cp >= 0:\n                # voltage of cp\n                gdata.append(-gain)\n                grows.append(num_states)\n                gcols.append(node_cp)\n            if node_cn >= 0:\n                # voltage of cn\n                gdata.append(gain)\n                grows.append(num_states)\n                gcols.append(node_cn)\n            num_states += 1\n\n        ndim_in = len(node_ins)\n        if is_voltage:\n            # step 1E: add current/voltage from input voltage source\n            b = np.zeros((num_states + ndim_in, ndim_in))\n            for in_idx, node_in in enumerate(node_ins):\n                gdata.append(1)\n                grows.append(node_in)\n                gcols.append(num_states)\n                gdata.append(-1)\n                grows.append(num_states)\n                gcols.append(node_in)\n                b[num_states + in_idx, in_idx] = 1\n            num_states += ndim_in\n        else:\n            # inject current to node_in\n            b = np.zeros((num_states, ndim_in))\n            for in_idx, node_in in enumerate(node_ins):\n                b[node_in, in_idx] = -1\n\n        # step 2: create matrices\n        shape = (num_states, num_states)\n        g = scipy.sparse.csc_matrix((gdata, (grows, gcols)), shape=shape).todense().A\n        c = scipy.sparse.csc_matrix((cdata, (crows, ccols)), shape=shape).todense().A\n        ndim_out = len(node_outs)\n        d = scipy.sparse.csc_matrix((np.ones(ndim_out), (np.arange(ndim_out), node_outs)),\n                                    shape=(ndim_out, num_states)).todense().A\n        e = np.zeros((ndim_out, ndim_in))\n\n        return g, c, b, d, e\n\n    def get_state_space(self, inputs, outputs, in_type='v'):\n        # type: (Union[str, List[str]], Union[str, List[str]], str) -> StateSpaceContinuous\n        \"\"\"Compute the state space model from the given inputs to outputs.\n\n        Parameters\n        ----------\n        inputs : Union[str, List[str]]\n            the input voltage/current node name(s).\n        outputs : Union[str, List[str]]\n            the output voltage node name(s).\n        in_type : str\n            set to 'v' for input voltage sources.  Otherwise, current sources.\n\n        Returns\n        -------\n        system : StateSpaceContinuous\n            the scipy state space object.  See scipy.signal package on how to use this object.\n        \"\"\"\n        g0, c0, b0, d0, e0 = self._build_mna_matrices(inputs, outputs, in_type)\n        ndim_in = e0.shape[1]\n        g, c, b, d, e = self._reduce_state_space(g0, c0, b0, d0, e0, ndim_in)\n        amat = scipy.linalg.solve_triangular(c, -g)\n        bmat = scipy.linalg.solve_triangular(c, -b)\n        cmat = d\n        e_abs = np.abs(e)\n        tol = np.amax(e_abs) * self._udot_tol\n        if np.count_nonzero(e_abs[:, ndim_in:] > tol) > 0:\n            print('WARNING: output depends on input derivatives.  Ignored.')\n            print('D matrix: ')\n            print(e)\n        dmat = e[:, :ndim_in]\n\n        return StateSpaceContinuous(amat, bmat, cmat, dmat)\n\n    def get_num_den(self, in_name, out_name, in_type='v', atol=0.0):\n        # type: (str, str, str, float) -> Tuple[np.ndarray, np.ndarray]\n        \"\"\"Compute the transfer function between the two given nodes.\n\n        Parameters\n        ----------\n        in_name : str\n            the input voltage/current node name.\n        out_name : Union[str, List[str]]\n            the output voltage node name.\n        in_type : str\n            set to 'v' for input voltage sources.  Otherwise, current sources.\n        atol : float\n            absolute tolerance for checking zeros in the numerator.  Used to filter out scipy warnings.\n\n        Returns\n        -------\n        num : np.ndarray\n            the numerator polynomial.\n        den : np.ndarray\n            the denominator polynomial.\n        \"\"\"\n        state_space = self.get_state_space(in_name, out_name, in_type=in_type)\n        num, den = scipy.signal.ss2tf(state_space.A, state_space.B, state_space.C, state_space.D)\n        num = num[0, :]\n        # check if numerator has leading zeros.\n        # this makes it so the user have full control over numerical precision, and\n        # avoid scipy bad conditioning warnings.\n        while abs(num[0]) <= atol:\n            num = num[1:]\n\n        return num, den\n\n    def get_transfer_function(self, in_name, out_name, in_type='v', atol=0.0):\n        # type: (str, str, str, float) -> TransferFunctionContinuous\n        \"\"\"Compute the transfer function between the two given nodes.\n\n        Parameters\n        ----------\n        in_name : str\n            the input voltage/current node name.\n        out_name : Union[str, List[str]]\n            the output voltage node name.\n        in_type : str\n            set to 'v' for input voltage sources.  Otherwise, current sources.\n        atol : float\n            absolute tolerance for checking zeros in the numerator.  Used to filter out scipy warnings.\n\n        Returns\n        -------\n        system : TransferFunctionContinuous\n            the scipy transfer function object.  See scipy.signal package on how to use this object.\n        \"\"\"\n        num, den = self.get_num_den(in_name, out_name, in_type=in_type, atol=atol)\n        return TransferFunctionContinuous(num, den)\n\n    def get_impedance(self, node_name, freq, atol=0.0):\n        # type: (str, float, float) -> complex\n        \"\"\"Computes the impedance looking into the given node.\n\n        Parameters\n        ----------\n        node_name : str\n            the node to compute impedance for.  We will inject a current into this node and measure the voltage\n            on this node.\n        freq : float\n            the frequency to compute the impedance at, in Hertz.\n        atol : float\n            absolute tolerance for checking zeros in the numerator.  Used to filter out scipy warnings.\n\n        Returns\n        -------\n        impedance : complex\n            the impedance value, in Ohms.\n        \"\"\"\n        sys = self.get_transfer_function(node_name, node_name, in_type='i', atol=atol)\n        w_test = 2 * np.pi * freq\n        _, zin_vec = sys.freqresp(w=[w_test])\n        return zin_vec[0]\n\n\ndef get_w_crossings(num, den, atol=1e-8):\n    # type: (np.multiarray.ndarray, np.multiarray.ndarray, float) -> Tuple[Optional[float], Optional[float]]\n    \"\"\"Given the numerator and denominator of the transfer function, compute gain margin/phase margin frequencies.\n\n    To determine the crossover frequencies, we write the transfer function as:\n\n    .. math::\n\n        \\\\frac{A(w) + jB(w)}{C(w) + jD(w)}\n\n    where :math:`A(w)`, :math:`B(w)`, :math:`C(w)`, and :math:`D(w)` are real polynomials.  The gain margin frequency\n    is the frequency at which:\n\n    .. math::\n\n        \\\\frac{B(w)}{A(w)} = \\\\frac{D(w)}{C(w)} \\\\implies A(w)D(w) - B(w)C(w) = 0\n\n\n    The phase margin frequency is the frequency at which:\n\n    .. math::\n\n        \\\\frac{A^2(w) + B^2(w)}{C^2(w) + D^2(w)} = 1 \\implies A^2(w) + B^2(w) - C^2(w) - D^2(w) = 0\n\n    This function solves these two equations and returns the smallest real and positive roots.\n\n    Parameters\n    ----------\n    num : np.multiarray.ndarray\n        the numerator polynomial coefficients array.  index 0 is coefficient for highest term.\n    den : np.multiarray.ndarray\n        the denominator polynomial coefficients array.  index 0 is coefficient for highest term.\n    atol : float\n        absolute tolerance used to check if the imaginary part of a root is 0, or if a root is greater than 0.\n\n    Returns\n    -------\n    w_phase : Optional[float]\n        lowest positive frequency in rad/s at which the gain becomes unity.  None if no such frequency exist.\n    w_gain : Optional[float]\n        lower positive frequency in rad/s at which the phase becomes 180 degrees.  None if no such frequency exist.\n    \"\"\"\n    # construct A(w), B(w), C(w), and D(w)\n    num_flip = num[::-1]\n    den_flip = den[::-1]\n    avec = np.copy(num_flip)\n    bvec = np.copy(num_flip)\n    cvec = np.copy(den_flip)\n    dvec = np.copy(den_flip)\n    avec[1::2] = 0\n    avec[2::4] *= -1\n    bvec[0::2] = 0\n    bvec[3::4] *= -1\n    cvec[1::2] = 0\n    cvec[2::4] *= -1\n    dvec[0::2] = 0\n    dvec[3::4] *= -1\n\n    apoly = np.poly1d(avec[::-1])\n    bpoly = np.poly1d(bvec[::-1])\n    cpoly = np.poly1d(cvec[::-1])\n    dpoly = np.poly1d(dvec[::-1])\n\n    # solve for w_phase/w_gain\n    poly_list = [apoly**2 + bpoly**2 - cpoly**2 - dpoly**2,\n                 apoly * dpoly - bpoly * cpoly]\n\n    w_list = [None, None]  # type: List[Optional[float]]\n    for idx in range(2):\n        for root in poly_list[idx].roots:\n            root_real = float(root.real)\n            if abs(root.imag) < atol < root_real:\n                w_list_idx = w_list[idx]\n                if w_list_idx is None or root_real < w_list_idx:\n                        w_list[idx] = root_real\n\n    return w_list[0], w_list[1]\n\n\ndef get_w_3db(num, den, atol=1e-8):\n    # type: (np.multiarray.ndarray, np.multiarray.ndarray, float) -> Optional[float]\n    \"\"\"Given the numerator and denominator of the transfer function, compute the 3dB frequency.\n\n    To determine the 3dB frequency, we first normalize the transfer function so that its DC gain is one,\n    then we write the transfer function as:\n\n    .. math::\n\n        \\\\frac{A(w) + jB(w)}{C(w) + jD(w)}\n\n    where :math:`A(w)`, :math:`B(w)`, :math:`C(w)`, and :math:`D(w)` are real polynomials.  The 3dB frequency\n    is the frequency at which:\n\n    .. math::\n\n        \\\\frac{A^2(w) + B^2(w)}{C^2(w) + D^2(w)} = 0.5 \\implies A^2(w) + B^2(w) - 0.5\\\\left(C^2(w) + D^2(w)\\\\right) = 0\n\n    This function solves this equation and returns the smallest real and positive roots.\n\n    Parameters\n    ----------\n    num : np.multiarray.ndarray\n        the numerator polynomial coefficients array.  index 0 is coefficient for highest term.\n    den : np.multiarray.ndarray\n        the denominator polynomial coefficients array.  index 0 is coefficient for highest term.\n    atol : float\n        absolute tolerance used to check if the imaginary part of a root is 0, or if a root is greater than 0.\n\n    Returns\n    -------\n    w_3db : Optional[float]\n        the 3dB frequency in rad/s.  None if no such frequency exist.\n    \"\"\"\n    # construct A(w), B(w), C(w), and D(w) of normalized transfer function\n    num_flip = num[::-1] / num[-1]\n    den_flip = den[::-1] / den[-1]\n    avec = np.copy(num_flip)\n    bvec = np.copy(num_flip)\n    cvec = np.copy(den_flip)\n    dvec = np.copy(den_flip)\n    avec[1::2] = 0\n    avec[2::4] *= -1\n    bvec[0::2] = 0\n    bvec[3::4] *= -1\n    cvec[1::2] = 0\n    cvec[2::4] *= -1\n    dvec[0::2] = 0\n    dvec[3::4] *= -1\n\n    apoly = np.poly1d(avec[::-1])\n    bpoly = np.poly1d(bvec[::-1])\n    cpoly = np.poly1d(cvec[::-1])\n    dpoly = np.poly1d(dvec[::-1])\n\n    # solve for w_phase/w_gain\n    poly = apoly**2 + bpoly**2 - (cpoly**2 + dpoly**2) / 2  # type: np.poly1d\n    w_ans = None\n    for root in poly.roots:\n        root_real = float(root.real)\n        if abs(root.imag) < atol < root_real and (w_ans is None or root_real < w_ans):\n            w_ans = root_real\n\n    return w_ans\n\n\ndef get_stability_margins(num, den, rtol=1e-8, atol=1e-8):\n    # type: (np.multiarray.ndarray, np.multiarray.ndarray, float, float) -> Tuple[float, float]\n    \"\"\"Given the numerator and denominator of the transfer function, compute phase and gain margins.\n\n    Parameters\n    ----------\n    num : np.multiarray.ndarray\n        the numerator polynomial coefficients array.  index 0 is coefficient for highest term.\n    den : np.multiarray.ndarray\n        the denominator polynomial coefficients array.  index 0 is coefficient for highest term.\n    rtol : float\n        relative tolerance.  Used to check if two frequencies are equal.\n    atol : float\n        absolute tolerance.  Used to check a number is equal to 0.\n\n    Returns\n    -------\n    phase_margin : float\n        the phase margin in degrees. If the system is unstable, a negative number is returned.\n    gain_margin : float\n        the gain margin.\n    \"\"\"\n    poly_n = np.poly1d(num)\n    poly_d = np.poly1d(den)\n\n    # compute gain margin.\n    w_phase, w_gain = get_w_crossings(num, den, atol=atol)\n    if w_gain is None:\n        gain_margin = float('inf')\n    else:\n        gain_margin = abs(poly_d(1j * w_gain) / poly_n(1j * w_gain))\n\n    # compute phase margin\n    if w_phase is None:\n        # gain never equal to 1.  That means gain is always greater than 1 or gain is always less than 1.\n        dc_gain = poly_n(0) / poly_d(0)\n        if dc_gain < 1 - max(rtol, atol):\n            # gain is always less than 1, infinite phase margin\n            phase_margin = float('inf')\n        else:\n            # gain is always greater than 1, unstable\n            phase_margin = -1\n    elif w_gain is not None and w_phase > w_gain + max(w_gain * rtol, atol):\n        # unity gain frequency > 180 degree frequency, we're unstable\n        phase_margin = -1\n    else:\n        phase_margin = np.angle(poly_n(1j * w_phase) / poly_d(1j * w_phase), deg=True) + 180\n\n    return phase_margin, gain_margin\n"
  },
  {
    "path": "bag/data/ltv.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines functions and classes for linear time-varying circuits data post-processing.\n\"\"\"\n\nimport numpy as np\nimport scipy.interpolate as interp\nimport scipy.sparse as sparse\n\n\ndef _even_quotient(a, b, tol=1e-6):\n    \"\"\"Returns a / b if it is an integer, -1 if it is not..\"\"\"\n    num = int(round(a / b))\n    if abs(a - b * num) < abs(b * tol):\n        return num\n    return -1\n\n\nclass LTVImpulseFinite(object):\n    r\"\"\"A class that computes finite impulse response of a linear time-varying circuit.\n\n    This class computes the time-varying impulse response based on PSS/PAC simulation\n    data, and provides several useful query methods.  Your simulation should be set up\n    as follows:\n\n    #. Setup PSS as usual.  We will denote system period as tper and fc = 1/tper.\n\n    #. In PAC, set the maxmimum sidebands to m.\n\n    #. In PAC, set the input frequency sweep to be absolute, and sweep from 0 to\n       n * fstep in steps of fstep, where fstep = fc / k for some integer k.\n\n       k should be chosen so that the output settles back to 0 after time k * tper.  k\n       should also be chosen such that fstep is a nice round frequency.  Otherwise,\n       numerical errors may introduce strange results.\n\n       n should be chosen so that n * fstep is sufficiently large compared to system\n       bandwidth.\n\n    #. In PAC options, set the freqaxis option to be \"in\".\n\n    #. After simulation, PAC should save the output frequency response as a function of\n       output harmonic number and input frequency.  Post-process this into a complex 2D\n       matrix hmat with shape (2 * m + 1, n + 1), and pass it to this class's constructor.\n\n    Parameters\n    ----------\n    hmat : np.ndarray\n        the PAC simulation data matrix with shape (2 * m + 1, n + 1).\n        hmat[a + m, b] is the complex AC gain from input frequency b * fc / k\n        to output frequency a * fc + b * fc / k.\n    m : int\n        number of output sidebands.\n    n : int\n        number of input frequencies.\n    tper : float\n        the system period, in seconds.\n    k : int\n        the ratio between period of the input impulse train and the system period.\n        Must be an integer.\n    out0 : :class:`numpy.ndarray`\n        steady-state output transient waveform with 0 input over 1 period.  This should\n        be a two-column array, where the first column is time vector and second column\n        is the output.  Used to compute transient response.\n\n    Notes\n    -----\n    This class uses the algorithm described in [1]_ to compute impulse response from PSS/PAC\n    simulation data.  The impulse response :math:`h(t, \\tau)` satisfies the following equation:\n\n    .. math:: y(t) = \\int_{-\\infty}^{\\infty} h(t, \\tau) \\cdot x(\\tau)\\ d\\tau\n\n    Intuitively, :math:`h(t, \\tau)` represents the output at time :math:`t` subject to\n    an impulse at time :math:`\\tau`.  As described in the paper, If :math:`w_c` is the system\n    frequency, and :math:`H_m(jw)` is the frequency response of the system at :math:`mw_c + w`\n    due to an input sinusoid with frequency :math:`w`, then the impulse response can be calculated as:\n\n    .. math::\n\n        h(t, \\tau) = \\frac{1}{kT}\\sum_{n=-\\infty}^{\\infty}\\sum_{m=-\\infty}^{\\infty}\n        H_m\\left (j\\dfrac{nw_c}{k}\\right) \\exp \\left[ jmw_ct + j\\dfrac{nw_c}{k} (t - \\tau)\\right]\n\n    where :math:`0 \\le \\tau < T` and :math:`\\tau \\le t \\le \\tau + kT`.\n\n    References\n    ----------\n    .. [1] J. Kim, B. S. Leibowitz and M. Jeeradit, \"Impulse sensitivity function analysis of\n       periodic circuits,\" 2008 IEEE/ACM International Conference on Computer-Aided Design,\n       San Jose, CA, 2008, pp. 386-391.\n\n    .. automethod:: __call__\n    \"\"\"\n    def __init__(self, hmat, m, n, tper, k, out0):\n        hmat = np.asarray(hmat)\n        if hmat.shape != (2 * m + 1, n + 1):\n            raise ValueError('hmat shape = %s not compatible with M=%d, N=%d' %\n                             (hmat.shape, m, n))\n\n        # use symmetry to fill in negative input frequency data.\n        fullh = np.empty((2 * m + 1, 2 * n + 1), dtype=complex)\n        fullh[:, n:] = hmat / (k * tper)\n        fullh[:, :n] = np.fliplr(np.flipud(fullh[:, n + 1:])).conj()\n\n        self.hmat = fullh\n        wc = 2.0 * np.pi / tper\n        self.m_col = np.arange(-m, m + 1) * (1.0j * wc)\n        self.n_col = np.arange(-n, n + 1) * (1.0j * wc / k)\n        self.m_col = self.m_col.reshape((-1, 1))\n        self.n_col = self.n_col.reshape((-1, 1))\n        self.tper = tper\n        self.k = k\n        self.outfun = interp.interp1d(out0[:, 0], out0[:, 1], bounds_error=True,\n                                      assume_sorted=True)\n\n    @staticmethod\n    def _print_debug_msg(result):\n        res_imag = np.imag(result).flatten()\n        res_real = np.real(result).flatten()\n        res_ratio = np.abs(res_imag / (res_real + 1e-18))\n        idx = np.argmax(res_ratio)\n        print('max imag/real ratio: %.4g, imag = %.4g, real = %.4g' %\n              (res_ratio[idx], res_imag[idx], res_real[idx]))\n\n    def __call__(self, t, tau, debug=False):\n        \"\"\"Calculate h(t, tau).\n\n        Compute h(t, tau), which is the output at t subject to an impulse\n        at time tau. standard numpy broadcasting rules apply.\n\n        Parameters\n        ----------\n        t : array-like\n            the output time.\n        tau : array-like\n            the input impulse time.\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        val : :class:`numpy.ndarray`\n            the time-varying impulse response evaluated at the given coordinates.\n        \"\"\"\n        # broadcast arguments to same shape\n        t, tau = np.broadcast_arrays(t, tau)\n\n        # compute impulse using efficient matrix multiply and numpy broadcasting.\n        dt = t - tau\n        zero_indices = (dt < 0) | (dt > self.k * self.tper)\n        t_row = t.reshape((1, -1))\n        dt_row = dt.reshape((1, -1))\n        tmp = np.dot(self.hmat, np.exp(np.dot(self.n_col, dt_row))) * np.exp(np.dot(self.m_col, t_row))\n        result = np.sum(tmp, axis=0).reshape(dt.shape)\n\n        # zero element such that dt < 0 or dt > k * T.\n        result[zero_indices] = 0.0\n\n        if debug:\n            self._print_debug_msg(result)\n\n        # discard imaginary part\n        return np.real(result)\n\n    def _get_core(self, num_points, debug=False):\n        \"\"\"Returns h(dt, tau) matrix and output waveform over 1 period.  Used by lsim.\n\n        Compute h(dt, tau) for 0 <= tau < T and 0 <= dt < kT, where dt = t - tau.\n        \"\"\"\n        dt_vec = np.linspace(0.0, self.k * self.tper, self.k * num_points, endpoint=False)  # type: np.ndarray\n        tvec_per = dt_vec[:num_points]\n        tau_col = tvec_per.reshape((-1, 1))\n        dt_row = dt_vec.reshape((1, -1))\n        # use matrix multiply to sum across n\n        tmp = np.dot(self.hmat, np.exp(np.dot(self.n_col, dt_row)))\n        # use broadcast multiply for exp(-jwm*(t-tau)) term\n        tmp = tmp * np.exp(np.dot(self.m_col, dt_row))\n        # use matrix multiply to sum across m\n        result = np.dot(np.exp(np.dot(tau_col, self.m_col.T)), tmp).T\n\n        if debug:\n            self._print_debug_msg(result)\n\n        # discard imaginary part\n        result = np.real(result)\n        # compute output waveform\n        wvfm = self.outfun(tvec_per)\n        return result, wvfm\n\n    def visualize(self, fig_idx, num_points, num_period,\n                  plot_color=True, plot_3d=False, show=True):\n        \"\"\"Visualize the time-varying impulse response.\n\n        Parameters\n        ----------\n        fig_idx : int\n            starting figure index.\n        num_points : int\n            number of sample points in a period.\n        num_period : int\n            number of output period.\n        plot_color : bool\n            True to create a plot of the time-varying impulse response as 2D color plot.\n        plot_3d : bool\n            True to create a 3D plot of the impulse response.\n        show : bool\n            True to show the plots immediately.  Set to False if you want to create some\n            other plots.\n        \"\"\"\n        if not plot_color and not plot_3d:\n            # do nothing.\n            return\n        tot_points = num_period * num_points\n        tau_vec = np.linspace(0, self.tper, num_points, endpoint=False)\n        dt_vec = np.linspace(0, num_period * self.tper, tot_points, endpoint=False)\n        dt, tau = np.meshgrid(dt_vec, tau_vec, indexing='ij', copy=False)\n        t = tau + dt\n\n        result, _ = self._get_core(num_points)\n        result = result[:num_period * num_points, :]\n\n        import matplotlib.pyplot as plt\n        from matplotlib import cm\n\n        if plot_color:\n            # plot 2D color\n            fig = plt.figure(fig_idx)\n            fig_idx += 1\n            ax = fig.gca()\n            cp = ax.pcolor(t, tau, result, cmap=cm.cubehelix)\n            plt.colorbar(cp)\n            ax.set_title('Impulse response contours')\n            ax.set_ylabel('impulse time')\n            ax.set_xlabel('output time')\n\n        if plot_3d:\n            # plot 3D impulse response\n            # noinspection PyUnresolvedReferences\n            from mpl_toolkits.mplot3d import Axes3D\n\n            fig = plt.figure(fig_idx)\n            ax = fig.add_subplot(111, projection='3d')\n            ax.plot_surface(t, tau, result, rstride=1, cstride=1, linewidth=0, cmap=cm.cubehelix)\n            ax.set_title('Impulse response')\n            ax.set_ylabel('impulse time')\n            ax.set_xlabel('output time')\n\n        if show:\n            plt.show()\n\n    def lsim(self, u, tstep, tstart=0.0, ac_only=False, periodic=False, debug=False):\n        r\"\"\"Compute the output waveform given input waveform.\n\n        This method assumes zero initial state.  The output waveform will be the\n        same length as the input waveform, so pad zeros if necessary.\n\n        Parameters\n        ----------\n        u : array-like\n            the input waveform.\n        tstep : float\n            the input/output time step, in seconds.  Must evenly divide system period.\n        tstart : float\n            the time corresponding to u[0].  Assume u = 0 for all time before tstart.\n            Defaults to 0.\n        ac_only : bool\n            Return output waveform due to AC input only and without steady-state\n            transient.\n        periodic : bool\n            True if the input is periodic.  If so, returns steady state output.\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        y : :class:`numpy.ndarray`\n            the output waveform.\n\n        Notes\n        -----\n        This method computes the integral:\n\n        .. math:: y(t) = \\int_{-\\infty}^{\\infty} h(t, \\tau) \\cdot x(\\tau)\\ d\\tau\n\n        using the following algorithm:\n\n        #. set :math:`d\\tau = \\texttt{tstep}`.\n        #. Compute :math:`h(\\tau + dt, \\tau)` for :math:`0 \\le dt < kT` and\n           :math:`0 \\le \\tau < T`, then express as a kN-by-N matrix.  This matrix\n           completely describes the time-varying impulse response.\n        #. tile the impulse response matrix horizontally until its number of columns\n           matches input signal length, then multiply column i by u[i].\n        #. Compute y as the sum of all anti-diagonals of the matrix computed in\n           previous step, multiplied by :math:`d\\tau`.  Truncate if necessary.\n        \"\"\"\n        u = np.asarray(u)\n        nstep = _even_quotient(self.tper, tstep)\n        ndelay = _even_quotient(tstart, tstep)\n\n        # error checking\n        if len(u.shape) != 1:\n            raise ValueError('u must be a 1D array.')\n        if nstep < 0:\n            raise ValueError('Time step = %.4g does not evenly divide'\n                             'System period = %.4g' % (tstep, self.tper))\n        if ndelay < 0:\n            raise ValueError('Time step = %.4g does not evenly divide'\n                             'Startimg time = %.4g' % (tstep, tstart))\n        if periodic and nstep != u.size:\n            raise ValueError('Periodic waveform must have same period as system period.')\n\n        # calculate and tile hcore\n        ntot = u.size\n        hcore, outwv = self._get_core(nstep, debug=debug)\n        hcore = np.roll(hcore, -ndelay, axis=1)\n        outwv = np.roll(outwv, -ndelay)\n\n        if periodic:\n            # input periodic; more efficient math.\n            hcore *= u\n            hcore = np.tile(hcore, (1, self.k + 1))\n            y = np.bincount(np.sum(np.indices(hcore.shape), axis=0).flat, hcore.flat)\n            y = y[self.k * nstep:(self.k + 1) * nstep] * tstep\n        else:\n            ntile = int(np.ceil(ntot * 1.0 / nstep))\n            hcore = np.tile(hcore, (1, ntile))\n            outwv = np.tile(outwv, (ntile,))\n            hcore = hcore[:, :ntot]\n            outwv = outwv[:ntot]\n\n            # broadcast multiply\n            hcore *= u\n            # magic code from stackoverflow\n            # returns an array of the sums of all anti-diagonals.\n            y = np.bincount(np.sum(np.indices(hcore.shape), axis=0).flat, hcore.flat)[:ntot] * tstep\n\n        if not ac_only:\n            # add output steady state transient\n            y += outwv\n        return y\n\n    def lsim_digital(self, tsym, tstep, data, pulse, tstart=0.0, nchain=1, tdelta=0.0, **kwargs):\n        \"\"\"Compute output waveform given input pulse shape and data.\n\n        This method is similar to :func:`~bag.data.ltv.LTVImpulseFinite.lsim`, but\n        assumes the input is superposition of shifted and scaled copies of a given\n        pulse waveform.  This assumption speeds up the computation and is useful\n        for high speed link design.\n\n        Parameters\n        ----------\n        tsym : float\n            the symbol period, in seconds.  Must evenly divide system period.\n        tstep : float\n            the output time step, in seconds.  Must evenly divide symbol period.\n        data : list[float]\n            list of symbol values.\n        pulse : np.ndarray\n            the pulse waveform as a two-column array.  The first column is time,\n            second column is pulse waveform value.  Linear interpolation will be used\n            if necessary.  Time must start at 0.0 and be increasing.\n        tstart : float\n            time of the first data symbol.  Defaults to 0.0\n        nchain : int\n            number of blocks in a chain.  Defaults to 1.  This argument is useful if\n            you have multiple blocks cascaded together in a chain, and you wish to find\n            the output waveform at the end of the chain.\n        tdelta : float\n            time difference between adjacent elements in a chain.  Defaults to 0.  This\n            argument is useful for simulating a chain of latches, where blocks operate\n            on alternate phases of the clock.\n        kwargs : dict[str, any]\n            additional keyword arguments for :func:`~bag.data.ltv.LTVImpulseFinite.lsim`.\n\n        Returns\n        -------\n        output : :class:`numpy.ndarray`\n            the output waveform over N symbol period, where N is the given data length.\n        \"\"\"\n        # check tsym evenly divides system period\n        nsym = _even_quotient(self.tper, tsym)\n        if nsym < 0:\n            raise ValueError('Symbol period %.4g does not evenly divide '\n                             'system period %.4g' % (tsym, self.tper))\n\n        # check tstep evenly divides tsym\n        nstep = _even_quotient(tsym, tstep)\n        if nstep < 0:\n            raise ValueError('Time step %.4g does not evenly divide '\n                             'symbol period %.4g' % (tstep, tsym))\n\n        # check tstep evenly divides tstart\n        ndelay = _even_quotient(tstart, tstep)\n        if ndelay < 0:\n            raise ValueError('Time step %.4g does not evenly divide '\n                             'starting time %.4g' % (tstep, tstart))\n\n        nper = nstep * nsym\n\n        pulse = np.asarray(pulse)\n        tvec = pulse[:, 0]\n        pvec = pulse[:, 1]\n\n        # find input length\n        # noinspection PyUnresolvedReferences\n        nlast = min(np.nonzero(pvec)[0][-1] + 1, tvec.size - 1)\n        tlast = tvec[nlast]\n        ntot = int(np.ceil(tlast / tstep)) + nchain * self.k * nper + nstep * (nsym - 1)\n\n        # interpolate input\n        pfun = interp.interp1d(tvec, pvec, kind='linear', copy=False, bounds_error=False,\n                               fill_value=0.0, assume_sorted=True)\n        tin = np.linspace(0.0, ntot * tstep, ntot, endpoint=False)\n        pin = pfun(tin)\n\n        # super-impose pulse responses\n        num_out = len(data) * nstep\n        output = np.zeros(num_out)\n        for idx in range(nsym):\n            # get output pulse response\n            pout = pin\n            for j in range(nchain):\n                pout = self.lsim(pout, tstep, tstart=tstart + j * tdelta, periodic=False,\n                                 ac_only=True, **kwargs)\n\n            # construct superposition matrix\n            cur_data = data[idx::nsym]\n            offsets = np.arange(0, len(cur_data) * nper, nper) * -1\n            diags = np.tile(cur_data, (ntot, 1)).T\n            dia_mat = sparse.dia_matrix((diags, offsets), shape=(num_out, ntot))\n\n            # superimpose\n            output += dia_mat.dot(pout)\n            # shift input pulse.\n            pin = np.roll(pin, nstep)\n\n        # compute output steady state waveform\n        out_pss = self.outfun(np.linspace(0.0, self.tper, nper, endpoint=False))\n        out_pss = np.roll(out_pss, -ndelay)\n        for j in range(1, nchain):\n            out_pss = self.lsim(out_pss, tstep, tstart=tstart + j * tdelta, periodic=True,\n                                ac_only=False, **kwargs)\n\n        ntile = int(np.ceil(num_out * 1.0 / nper))\n        out_pss = np.tile(out_pss, (ntile,))\n        output += out_pss[:num_out]\n\n        return output\n"
  },
  {
    "path": "bag/data/mos.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines classes for computing DC operating point.\n\"\"\"\n\nfrom typing import Dict\n\nimport numpy as np\n\n\ndef mos_y_to_ss(sim_data, char_freq, fg, ibias, cfit_method='average'):\n    # type: (Dict[str, np.ndarray], float, int, np.ndarray, str) -> Dict[str, np.ndarray]\n    \"\"\"Convert transistor Y parameters to small-signal parameters.\n\n    This function computes MOSFET small signal parameters from 3-port\n    Y parameter measurements done on gate, drain and source, with body\n    bias fixed.  This functions fits the Y parameter to a capcitor-only\n    small signal model using least-mean-square error.\n\n    Parameters\n    ----------\n    sim_data : Dict[str, np.ndarray]\n        A dictionary of Y parameters values stored as complex numpy arrays.\n    char_freq : float\n        the frequency Y parameters are measured at.\n    fg : int\n        number of transistor fingers used for the Y parameter measurement.\n    ibias : np.ndarray\n        the DC bias current of the transistor.  Always positive.\n    cfit_method : str\n        method used to extract capacitance from Y parameters.  Currently\n        supports 'average' or 'worst'\n\n    Returns\n    -------\n    ss_dict : Dict[str, np.ndarray]\n        A dictionary of small signal parameter values stored as numpy\n        arrays.  These values are normalized to 1-finger transistor.\n    \"\"\"\n    w = 2 * np.pi * char_freq\n\n    gm = (sim_data['y21'].real - sim_data['y31'].real) / 2.0  # type: np.ndarray\n    gds = (sim_data['y22'].real - sim_data['y32'].real) / 2.0  # type: np.ndarray\n    gb = (sim_data['y33'].real - sim_data['y23'].real) / 2.0 - gm - gds  # type: np.ndarray\n\n    cgd12 = -sim_data['y12'].imag / w\n    cgd21 = -sim_data['y21'].imag / w\n    cgs13 = -sim_data['y13'].imag / w\n    cgs31 = -sim_data['y31'].imag / w\n    cds23 = -sim_data['y23'].imag / w\n    cds32 = -sim_data['y32'].imag / w\n    cgg = sim_data['y11'].imag / w\n    cdd = sim_data['y22'].imag / w\n    css = sim_data['y33'].imag / w\n\n    if cfit_method == 'average':\n        cgd = (cgd12 + cgd21) / 2  # type: np.ndarray\n        cgs = (cgs13 + cgs31) / 2  # type: np.ndarray\n        cds = (cds23 + cds32) / 2  # type: np.ndarray\n    elif cfit_method == 'worst':\n        cgd = np.maximum(cgd12, cgd21)\n        cgs = np.maximum(cgs13, cgs31)\n        cds = np.maximum(cds23, cds32)\n    else:\n        raise ValueError('Unknown cfit_method = %s' % cfit_method)\n\n    cgb = cgg - cgd - cgs  # type: np.ndarray\n    cdb = cdd - cds - cgd  # type: np.ndarray\n    csb = css - cgs - cds  # type: np.ndarray\n\n    ibias = ibias / fg\n    gm = gm / fg\n    gds = gds / fg\n    gb = gb / fg\n    cgd = cgd / fg\n    cgs = cgs / fg\n    cds = cds / fg\n    cgb = cgb / fg\n    cdb = cdb / fg\n    csb = csb / fg\n\n    return dict(\n        ibias=ibias,\n        gm=gm,\n        gds=gds,\n        gb=gb,\n        cgd=cgd,\n        cgs=cgs,\n        cds=cds,\n        cgb=cgb,\n        cdb=cdb,\n        csb=csb,\n    )\n"
  },
  {
    "path": "bag/data/plot.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module contains utilities to improve waveform plotting in python.\n\"\"\"\n\nimport numpy as np\nimport scipy.interpolate as interp\n\nfrom matplotlib.lines import Line2D\nfrom matplotlib.figure import Figure\nfrom matplotlib.text import Annotation\nimport matplotlib.pyplot as plt\n\nfrom ..math import float_to_si_string\n\n# Vega category10 palette\ncolor_cycle = ['#1f77b4', '#ff7f0e',\n               '#2ca02c', '#d62728',\n               '#9467bd', '#8c564b',\n               '#e377c2', '#7f7f7f',\n               '#bcbd22', '#17becf',\n               ]\n\n\ndef figure(fig_id, picker=5.0):\n    \"\"\"Create a WaveformPlotter.\n\n    Parameters\n    ----------\n    fig_id : int\n        the figure ID.\n    picker : float\n        picker event pixel tolerance.\n\n    Returns\n    -------\n    plotter : bag.data.plot.WaveformPlotter\n        a plotter that helps you make interactive matplotlib figures.\n    \"\"\"\n    return WaveformPlotter(fig_id, picker=picker)\n\n\ndef plot_waveforms(xvec, panel_list, fig=1):\n    \"\"\"Plot waveforms in vertical panels with shared X axis.\n\n    Parameters\n    ----------\n    xvec : :class:`numpy.ndarray`\n        the X data.\n    panel_list : list[list[(str, :class:`numpy.ndarray`)]]\n        list of lists of Y data.  Each sub-list is one panel.  Each element of the sub-list\n        is a tuple of signal name and signal data.\n    fig : int\n        the figure ID.\n    \"\"\"\n    nrow = len(panel_list)\n\n    if nrow > 0:\n        myfig = plt.figure(fig, FigureClass=MarkerFigure)  # type: MarkerFigure\n        ax0 = None\n        for idx, panel in enumerate(panel_list):\n            if ax0 is None:\n                ax = plt.subplot(nrow, 1, idx + 1)\n                ax0 = ax\n            else:\n                ax = plt.subplot(nrow, 1, idx + 1, sharex=ax0)\n\n            for name, sig in panel:\n                ax.plot(xvec, sig, label=name, picker=5.0)\n\n            box = ax.get_position()\n            ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])\n\n            # Put a legend to the right of the current axis\n            ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n        myfig.setup_callbacks()\n        plt.show(block=False)\n\n\ndef _fpart(x):\n    return x - int(x)\n\n\ndef _rfpart(x):\n    return 1 - _fpart(x)\n\n\ndef draw_line(x0, y0, x1, y1, xmax, grid):\n    \"\"\"Draws an anti-aliased line in img from p1 to p2 with the given color.\"\"\"\n\n    if x0 > x1:\n        # x1 is wrapped around\n        x1 += xmax\n\n    dx, dy = x1 - x0, y1 - y0\n    steep = dx < abs(dy)\n    if steep:\n        x0, y0, x1, y1, dx, dy = y0, x0, y1, x1, dy, dx\n\n    gradient = dy * 1.0 / dx\n    # handle first endpoint\n    xpxl1 = int(x0 + 0.5)\n    yend = y0 + gradient * (xpxl1 - x0)\n    xgap = _rfpart(x0 + 0.5)\n    ypxl1 = int(yend)\n    if steep:\n        grid[ypxl1 % xmax, xpxl1] += _rfpart(yend) * xgap\n        grid[(ypxl1 + 1) % xmax, xpxl1] += _fpart(yend) * xgap\n    else:\n        grid[xpxl1 % xmax, ypxl1] += _rfpart(yend) * xgap\n        grid[xpxl1 % xmax, ypxl1 + 1] += _fpart(yend) * xgap\n\n    intery = yend + gradient  # first y-intersection for the main loop\n\n    # do not color second endpoint to avoid double coloring.\n    xpxl2 = int(x1 + 0.5)\n    # main loop\n    if steep:\n        for x in range(xpxl1 + 1, xpxl2):\n            xval = int(intery)\n            grid[xval % xmax, x] += _rfpart(intery)\n            grid[(xval + 1) % xmax, x] += _fpart(intery)\n            intery += gradient\n    else:\n        for x in range(xpxl1 + 1,  xpxl2):\n            xval = x % xmax\n            grid[xval, int(intery)] += _rfpart(intery)\n            grid[xval, int(intery) + 1] += _fpart(intery)\n            intery += gradient\n\n\ndef plot_eye_heatmap(fig, tvec, yvec, tper, tstart=None, tend=None, toff=None,\n                     tstep=None, vstep=None,\n                     cmap=None, vmargin=0.05, interpolation='gaussian',\n                     repeat=False):\n    \"\"\"Plot eye diagram heat map.\n\n    Parameters\n    ----------\n    fig : int\n        the figure ID.\n    tvec : np.ndarray\n        the time data.\n    yvec : np.ndarray\n        waveform data.\n    tper : float\n        the eye period.\n    tstart : float\n        starting time.  Defaults to first point.\n    tend : float\n        ending time.  Defaults to last point.\n    toff : float\n        eye offset.  Defaults to 0.\n    tstep : float or None\n        horizontal bin size.  Defaults to using 200 bins.\n    vstep : float or None\n        vertical bin size.  Defaults to using 200 bins.\n    cmap :\n        the colormap used for coloring the heat map.  If None, defaults to cubehelix_r\n    vmargin : float\n        vertical margin in percentage of maximum/minimum waveform values.  Defaults\n        to 5 percent.  This is used so that there some room between top/bottom of\n        eye and the plot.\n    interpolation : str\n        interpolation method.  Defaults to 'gaussian'.  Use 'none' for no interpolation.\n    repeat : bool\n        True to repeat the eye diagram once to the right.  This is useful if you\n        want to look at edge transistions.\n    \"\"\"\n    if not toff:\n        toff = 0.0\n    if tstart is None:\n        tstart = tvec[0]\n    if tend is None:\n        tend = tvec[-1]\n\n    if tstep is None:\n        num_h = 200\n    else:\n        num_h = int(np.ceil(tper / tstep))\n\n    arr_idx = (tstart <= tvec) & (tvec < tend)\n    tplot = np.mod((tvec[arr_idx] - toff), tper) / tper * num_h  # type: np.ndarray\n    yplot = yvec[arr_idx]\n\n    # get vertical range\n    ymin, ymax = np.amin(yplot), np.amax(yplot)\n    yrang = (ymax - ymin) * (1 + vmargin)\n    ymid = (ymin + ymax) / 2.0\n    ymin = ymid - yrang / 2.0\n    ymax = ymin + yrang\n\n    if vstep is None:\n        num_v = 200\n    else:\n        num_v = int(np.ceil(yrang / vstep))\n\n    # rescale Y axis\n    yplot = (yplot - ymin) / yrang * num_v\n\n    grid = np.zeros((num_h, num_v), dtype=float)\n    for idx in range(yplot.size - 1):\n        draw_line(tplot[idx], yplot[idx], tplot[idx + 1], yplot[idx + 1], num_h, grid)\n\n    if cmap is None:\n        from matplotlib import cm\n        # noinspection PyUnresolvedReferences\n        cmap = cm.cubehelix_r\n\n    plt.figure(fig)\n    grid = grid.T[::-1, :]\n    if repeat:\n        grid = np.tile(grid, (1, 2))\n        tper *= 2.0\n    plt.imshow(grid, extent=[0, tper, ymin, ymax], cmap=cmap,\n               interpolation=interpolation, aspect='auto')\n    cb = plt.colorbar()\n    cb.set_label('counts')\n    return grid\n\n\ndef plot_eye(fig, tvec, yvec_list, tper, tstart=None, tend=None,\n             toff_list=None, name_list=None, alpha=1.0):\n    \"\"\"Plot eye diagram.\n\n    Parameters\n    ----------\n    fig : int\n        the figure ID.\n    tvec : np.ndarray\n        the time data.\n    yvec_list : list[np.ndarray]\n        list of waveforms to plot in eye diagram.\n    tper : float\n        the period.\n    tstart : float\n        starting time.  Defaults to first point.\n    tend : float\n        ending time.  Defaults to last point.\n    toff_list : list[float]\n        offset to apply to each waveform.  Defaults to zeros.\n    name_list : list[str] or None\n        the name of each waveform.  Defaults to numbers.\n    alpha : float\n        the transparency of each trace.  Can be used to mimic heatmap.\n    \"\"\"\n    if not yvec_list:\n        return\n\n    if not name_list:\n        name_list = [str(num) for num in range(len(yvec_list))]\n    if not toff_list:\n        toff_list = [0.0] * len(yvec_list)\n    if tstart is None:\n        tstart = tvec[0]\n    if tend is None:\n        tend = tvec[-1]\n\n    # get new tstep that evenly divides tper and new x vector\n    tstep_given = (tvec[-1] - tvec[0]) / (tvec.size - 1)\n    num_samp = int(round(tper / tstep_given))\n    t_plot = np.linspace(0.0, tper, num_samp, endpoint=False)\n\n    # find tstart and tend in number of tper.\n    nstart = int(np.floor(tstart / tper))\n    nend = int(np.ceil(tend / tper))\n    ncycle = nend - nstart\n    teye = np.linspace(nstart * tper, nend * tper, num_samp * ncycle, endpoint=False)  # type: np.ndarray\n    teye = teye.reshape((ncycle, num_samp))\n\n    myfig = plt.figure(fig, FigureClass=MarkerFigure)  # type: MarkerFigure\n    ax = plt.subplot()\n    legend_lines = []\n    for idx, yvec in enumerate(yvec_list):\n        color = color_cycle[idx % len(color_cycle)]\n        toff = toff_list[idx]\n        # get eye traces\n        yfun = interp.interp1d(tvec - toff, yvec, kind='linear', copy=False, bounds_error=False,\n                               fill_value=np.nan, assume_sorted=True)\n        plot_list = []\n        for cycle_idx in range(ncycle):\n            plot_list.append(t_plot)\n            plot_list.append(yfun(teye[cycle_idx, :]))\n\n        lines = ax.plot(*plot_list, alpha=alpha, color=color, picker=4.0, linewidth=2)\n        legend_lines.append(lines[0])\n\n    # Put a legend to the right of the current axis\n    box = ax.get_position()\n    ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])\n    ax.legend(legend_lines, name_list, loc='center left', bbox_to_anchor=(1, 0.5))\n\n    myfig.setup_callbacks()\n    plt.show(block=False)\n\n\ndef _find_closest_point(x, y, xvec, yvec, xnorm, ynorm):\n    \"\"\"Find point on PWL waveform described by xvec, yvec closest to (x, y)\"\"\"\n    xnvec = xvec / xnorm\n    ynvec = yvec / ynorm\n    xn = x / xnorm\n    yn = y / ynorm\n\n    dx = np.diff(xnvec)\n    dy = np.diff(ynvec)\n    px = (xn - xnvec[:-1])\n    py = (yn - ynvec[:-1])\n\n    that = (px * dx + py * dy) / (dx ** 2 + dy ** 2)\n    t = np.minimum(np.maximum(that, 0), 1)\n\n    minx = xnvec[:-1] + t * dx\n    miny = ynvec[:-1] + t * dy\n\n    dist = (minx - xn) ** 2 + (miny - yn) ** 2\n    idx = np.argmin(dist)\n    return minx[idx] * xnorm, miny[idx] * ynorm\n\n\nclass WaveformPlotter(object):\n    \"\"\"A custom matplotlib interactive plotting class.\n\n    This class adds many useful features, such as ability to add/remove markers,\n    ability to toggle waveforms on and off, and so on.\n\n    Parameters\n    ----------\n    fig_idx : int\n        the figure index.\n    picker : float\n        picker event pixel tolerance.\n    normal_width : float\n        normal linewidth.\n    select_width : float\n        selected linewidth.\n    \"\"\"\n\n    def __init__(self, fig_idx, picker=5.0, normal_width=1.5, select_width=3.0):\n        self.figure = plt.figure(fig_idx, FigureClass=MarkerFigure)  # type: MarkerFigure\n        self.picker = picker\n        self.norm_lw = normal_width\n        self.top_lw = select_width\n        self.ax = self.figure.gca()\n        self.ax.set_prop_cycle('color', color_cycle)\n        self.leline_lookup = {}\n        self.letext_lookup = {}\n        self.last_top = None\n        self.legend = None\n        self.resized_legend = False\n\n    def plot(self, *args, **kwargs):\n        if self.figure is None:\n            raise ValueError('figure closed already')\n\n        if 'picker' not in kwargs:\n            kwargs['picker'] = self.picker\n        kwargs['linewidth'] = self.norm_lw\n        if 'lw' in kwargs:\n            del kwargs['lw']\n        return self.ax.plot(*args, **kwargs)\n\n    def setup(self):\n        if self.figure is None:\n            raise ValueError('figure closed already')\n\n        self.figure.tight_layout()\n        # Put a legend to the right of the current axis\n        ax_lines, ax_labels = self.ax.get_legend_handles_labels()\n        self.legend = self.ax.legend(ax_lines, ax_labels, loc='center left',\n                                     bbox_to_anchor=(1, 0.5), fancybox=True)\n        le_lines = self.legend.get_lines()\n        le_texts = self.legend.get_texts()\n\n        for leline, letext, axline in zip(le_lines, le_texts, ax_lines):\n            self.leline_lookup[leline] = (letext, axline)\n            self.letext_lookup[letext] = (leline, axline)\n            leline.set_picker(self.picker)\n            letext.set_picker(self.picker)\n            letext.set_alpha(0.5)\n\n        le_texts[-1].set_alpha(1.0)\n        ax_lines[-1].set_zorder(2)\n        ax_lines[-1].set_linewidth(self.top_lw)\n        self.last_top = (le_texts[-1], ax_lines[-1])\n\n        self.figure.register_pick_event(self.leline_lookup, self.legend_line_picked)\n        self.figure.register_pick_event(self.letext_lookup, self.legend_text_picked)\n        self.figure.setup_callbacks()\n        self.figure.canvas.mpl_connect('draw_event', self.fix_legend_location)\n        self.figure.canvas.mpl_connect('close_event', self.figure_closed)\n        self.figure.canvas.mpl_connect('resize_event', self.figure_resized)\n\n    # noinspection PyUnusedLocal\n    def figure_closed(self, event):\n        self.figure.close_figure()\n        self.figure = None\n        self.ax = None\n        self.leline_lookup = None\n        self.letext_lookup = None\n        self.last_top = None\n        self.legend = None\n\n    # noinspection PyUnusedLocal\n    def figure_resized(self, event):\n        self.resized_legend = False\n        self.fix_legend_location(None)\n\n    # noinspection PyUnusedLocal\n    def fix_legend_location(self, event):\n        if not self.resized_legend:\n            self.figure.tight_layout()\n            inv_tran = self.figure.transFigure.inverted()\n            leg_box = inv_tran.transform(self.legend.get_window_extent())\n            leg_width = leg_box[1][0] - leg_box[0][0]\n            box = self.ax.get_position()\n            # print box.x0, box.y0, box.width, box.height, leg_width, leg_frame.get_height()\n            self.ax.set_position([box.x0, box.y0, box.width - leg_width, box.height])\n            self.resized_legend = True\n            self.figure.canvas.draw()\n\n    def legend_line_picked(self, artist):\n        letext, axline = self.leline_lookup[artist]\n        visible = not axline.get_visible()\n        if visible:\n            artist.set_alpha(1.0)\n        else:\n            artist.set_alpha(0.2)\n        if visible and (self.last_top[1] is not axline):\n            # set to be top line\n            self.legend_text_picked(letext, draw=False)\n        self.figure.set_line_visibility(axline, visible)\n\n    def legend_text_picked(self, artist, draw=True):\n        leline, axline = self.letext_lookup[artist]\n        self.last_top[0].set_alpha(0.5)\n        self.last_top[1].set_zorder(1)\n        self.last_top[1].set_linewidth(self.norm_lw)\n        axline.set_zorder(2)\n        artist.set_alpha(1.0)\n        axline.set_linewidth(self.top_lw)\n        self.last_top = (artist, axline)\n\n        # if draw is False, this method is not called from\n        # legend_line_picked(), so we'll never have recursion issues.\n        if draw:\n            if not axline.get_visible():\n                # set line to be visible if not\n                # draw() will be called in legend_line_picked\n                self.legend_line_picked(leline)\n            else:\n                self.figure.canvas.draw()\n\n\n# noinspection PyAbstractClass\nclass MarkerFigure(Figure):\n    def __init__(self, **kwargs):\n        Figure.__init__(self, **kwargs)\n        self.markers = []\n        self.epsilon = 10.0\n        self.drag_idx = -1\n        self.timer = None\n        self.marker_line_info = None\n        self.pick_sets = []\n        self.pick_funs = []\n\n    def set_line_visibility(self, axline, visible):\n        axline.set_visible(visible)\n        if not visible:\n            # delete all markers on this line\n            del_idx_list = [idx for idx, item in enumerate(self.markers) if item[2] is axline]\n            for targ_idx in reversed(del_idx_list):\n                an, pt, _, _ = self.markers[targ_idx]\n                del self.markers[targ_idx]\n                # print targ_idx, an\n                an.set_visible(False)\n                pt.set_visible(False)\n\n        self.canvas.draw()\n\n    def register_pick_event(self, artist_set, fun):\n        self.pick_sets.append(artist_set)\n        self.pick_funs.append(fun)\n\n    def on_button_release(self, event):\n        \"\"\"Disable data cursor dragging. \"\"\"\n        if event.button == 1:\n            self.drag_idx = -1\n\n    def on_motion(self, event):\n        \"\"\"Move data cursor around. \"\"\"\n        ax = event.inaxes\n        if self.drag_idx >= 0 and ax is not None and event.button == 1:\n            xmin, xmax = ax.get_xlim()\n            ymin, ymax = ax.get_ylim()\n            anno, pt, line, bg = self.markers[self.drag_idx]\n            x, y = _find_closest_point(event.xdata, event.ydata,\n                                       line.get_xdata(), line.get_ydata(),\n                                       xmax - xmin, ymax - ymin)\n            pt.set_data([x], [y])\n            xstr, ystr = float_to_si_string(x, 4), float_to_si_string(y, 4)\n            anno.set_text('x: %s\\ny: %s' % (xstr, ystr))\n            anno.xy = (x, y)\n            self.canvas.restore_region(bg)\n            anno.set_visible(True)\n            pt.set_visible(True)\n            ax.draw_artist(anno)\n            ax.draw_artist(pt)\n            self.canvas.blit(ax.bbox)\n\n    def _get_idx_under_point(self, event):\n        \"\"\"Find selected data cursor.\"\"\"\n        mx = event.x\n        my = event.y\n        mind = None\n        minidx = None\n        # find closest marker point\n        for idx, (an, pt, _, _) in enumerate(self.markers):\n            xv, yv = pt.get_xdata()[0], pt.get_ydata()[0]\n            xp, yp = event.inaxes.transData.transform([xv, yv])\n            # print xv, yv, xp, yp, mx, my\n            d = ((mx - xp) ** 2 + (my - yp) ** 2) ** 0.5\n            if mind is None or d < mind:\n                mind = d\n                minidx = idx\n\n        if mind is not None and mind < self.epsilon:\n            return minidx\n        return -1\n\n    def on_pick(self, event):\n        artist = event.artist\n        if not artist.get_visible():\n            return\n        for idx, artist_set in enumerate(self.pick_sets):\n            if artist in artist_set:\n                self.pick_funs[idx](artist)\n                return\n\n        if isinstance(artist, Line2D):\n            mevent = event.mouseevent\n            # figure out if we picked marker or line\n            self.drag_idx = self._get_idx_under_point(mevent)\n\n            if self.drag_idx >= 0:\n                # picked marker.\n                ax = mevent.inaxes\n                an, pt, _, _ = self.markers[self.drag_idx]\n                an.set_visible(False)\n                pt.set_visible(False)\n                self.canvas.draw()\n                self.markers[self.drag_idx][-1] = self.canvas.copy_from_bbox(ax.bbox)\n                an.set_visible(True)\n                pt.set_visible(True)\n                ax.draw_artist(an)\n                ax.draw_artist(pt)\n                self.canvas.blit(ax.bbox)\n\n            else:\n                # save data to plot marker later\n                mxval = mevent.xdata\n                button = mevent.button\n                if mxval is not None and button == 1 and not self.marker_line_info:\n                    self.marker_line_info = (artist, mxval, mevent.ydata,\n                                             button, mevent.inaxes)\n        elif isinstance(artist, Annotation):\n            # delete marker.\n            mevent = event.mouseevent\n            if mevent.button == 3:\n                targ_idx = None\n                for idx, (an, pt, _, _) in enumerate(self.markers):\n                    if an is artist:\n                        targ_idx = idx\n                        break\n                if targ_idx is not None:\n                    an, pt, _, _ = self.markers[targ_idx]\n                    del self.markers[targ_idx]\n                    an.set_visible(False)\n                    pt.set_visible(False)\n                    self.canvas.draw()\n\n    def _create_marker(self):\n        if self.marker_line_info:\n            artist, mxval, myval, button, ax = self.marker_line_info\n            xmin, xmax = ax.get_xlim()\n            ymin, ymax = ax.get_ylim()\n            mxval, myval = _find_closest_point(mxval, myval,\n                                               artist.get_xdata(), artist.get_ydata(),\n                                               xmax - xmin, ymax - ymin)\n            pt = ax.plot(mxval, myval, 'ko', picker=5.0)[0]\n            xstr, ystr = float_to_si_string(mxval, 4), float_to_si_string(myval, 4)\n            msg = 'x: %s\\ny: %s' % (xstr, ystr)\n            anno = ax.annotate(msg, xy=(mxval, myval), bbox=dict(boxstyle='round', fc='yellow', alpha=0.3),\n                               arrowprops=dict(arrowstyle=\"->\"))\n            anno.draggable()\n            anno.set_picker(True)\n\n            self.markers.append([anno, pt, artist, None])\n            ax.draw_artist(anno)\n            ax.draw_artist(pt)\n            self.canvas.blit(ax.bbox)\n            self.marker_line_info = None\n\n    def close_figure(self):\n        self.timer.stop()\n\n    def setup_callbacks(self):\n        self.canvas.mpl_connect('pick_event', self.on_pick)\n        self.canvas.mpl_connect('motion_notify_event', self.on_motion)\n        self.canvas.mpl_connect('button_release_event', self.on_button_release)\n        # use timer to make sure we won't create multiple markers at once when\n        # clicked on overlapping lines.\n        self.timer = self.canvas.new_timer(interval=100)\n        self.timer.add_callback(self._create_marker)\n        self.timer.start()\n"
  },
  {
    "path": "bag/design/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/design/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package defines design template classes.\n\"\"\"\n\nfrom .module import Module, ModuleDB, SchInstance, MosModuleBase, ResPhysicalModuleBase, ResMetalModule\n\n__all__ = ['Module', 'ModuleDB', 'SchInstance', 'MosModuleBase', 'ResPhysicalModuleBase', 'ResMetalModule']\n"
  },
  {
    "path": "bag/design/module.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines base design module class and primitive design classes.\n\"\"\"\n\nimport os\nimport abc\nfrom typing import TYPE_CHECKING, List, Dict, Optional, Tuple, Any, Type, Set, Sequence, \\\n    Callable, Union\n\nfrom ..math import float_to_si_string\nfrom ..io import read_yaml\nfrom ..util.cache import DesignMaster, MasterDB\n\nif TYPE_CHECKING:\n    from ..core import BagProject\n    from ..layout.core import TechInfo\n\n\nclass ModuleDB(MasterDB):\n    \"\"\"A database of all modules.\n\n    This class is responsible for keeping track of module libraries and\n    creating new modules.\n\n    Parameters\n    ----------\n    lib_defs : str\n        path to the design library definition file.\n    tech_info : TechInfo\n        the TechInfo instance.\n    sch_exc_libs : List[str]\n        list of libraries that are excluded from import.\n    prj : Optional[BagProject]\n        the BagProject instance.\n    name_prefix : str\n        generated layout name prefix.\n    name_suffix : str\n        generated layout name suffix.\n    lib_path : str\n        path to create generated library in.\n    \"\"\"\n\n    def __init__(self, lib_defs, tech_info, sch_exc_libs, prj=None, name_prefix='',\n                 name_suffix='', lib_path=''):\n        # type: (str, TechInfo, List[str], Optional[BagProject], str, str, str) -> None\n        MasterDB.__init__(self, '', lib_defs=lib_defs, name_prefix=name_prefix,\n                          name_suffix=name_suffix)\n\n        self._prj = prj\n        self._tech_info = tech_info\n        self._exc_libs = set(sch_exc_libs)\n        self.lib_path = lib_path\n\n    def create_master_instance(self, gen_cls, lib_name, params, used_cell_names, **kwargs):\n        # type: (Type[Module], str, Dict[str, Any], Set[str], **Any) -> Module\n        \"\"\"Create a new non-finalized master instance.\n\n        This instance is used to determine if we created this instance before.\n\n        Parameters\n        ----------\n        gen_cls : Type[Module]\n            the generator Python class.\n        lib_name : str\n            generated instance library name.\n        params : Dict[str, Any]\n            instance parameters dictionary.\n        used_cell_names : Set[str]\n            a set of all used cell names.\n        **kwargs : Any\n            optional arguments for the generator.\n\n        Returns\n        -------\n        master : Module\n            the non-finalized generated instance.\n        \"\"\"\n        kwargs = kwargs.copy()\n        kwargs['lib_name'] = lib_name\n        kwargs['params'] = params\n        kwargs['used_names'] = used_cell_names\n        # noinspection PyTypeChecker\n        return gen_cls(self, **kwargs)\n\n    def create_masters_in_db(self, lib_name, content_list, debug=False):\n        # type: (str, Sequence[Any], bool) -> None\n        \"\"\"Create the masters in the design database.\n\n        Parameters\n        ----------\n        lib_name : str\n            library to create the designs in.\n        content_list : Sequence[Any]\n            a list of the master contents.  Must be created in this order.\n        debug : bool\n            True to print debug messages\n        \"\"\"\n        if self._prj is None:\n            raise ValueError('BagProject is not defined.')\n\n        self._prj.instantiate_schematic(lib_name, content_list, lib_path=self.lib_path)\n\n    @property\n    def tech_info(self):\n        # type: () -> TechInfo\n        \"\"\"the :class:`~bag.layout.core.TechInfo` instance.\"\"\"\n        return self._tech_info\n\n    def is_lib_excluded(self, lib_name):\n        # type: (str) -> bool\n        \"\"\"Returns true if the given schematic library does not contain generators.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name\n\n        Returns\n        -------\n        is_excluded : bool\n            True if given library is excluded.\n        \"\"\"\n        return lib_name in self._exc_libs\n\n\nclass SchInstance(object):\n    \"\"\"A class representing a schematic instance.\n\n    Parameters\n    ----------\n    database : ModuleDB\n        the schematic generator database.\n    gen_lib_name : str\n        the schematic generator library name.\n    gen_cell_name : str\n        the schematic generator cell name.\n    inst_name : str\n        name of this instance.\n    static : bool\n        True if the schematic generator is static.\n    connections : Optional[Dict[str, str]]\n        If given, initialize instance terminal connections to this dictionary.\n    master : Optional[Module]\n        If given, set the master of this instance.\n    parameters : Optional[Dict[str, Any]]\n        If given, set the instance parameters to this dictionary.\n    \"\"\"\n\n    def __init__(self,\n                 database,  # type: MasterDB\n                 gen_lib_name,  # type: str\n                 gen_cell_name,  # type: str\n                 inst_name,  # type: str\n                 static=False,  # type: bool\n                 connections=None,  # type: Optional[Dict[str, str]]\n                 master=None,  # type: Optional[Module]\n                 parameters=None,  # type: Optional[Dict[str, Any]]\n                 ):\n        # type: (...) -> None\n        self._db = database\n        self._master = master\n        self._name = inst_name\n        self._gen_lib_name = gen_lib_name\n        self._gen_cell_name = gen_cell_name\n        self._static = static\n        self._term_mapping = {} if connections is None else connections\n        self.parameters = {} if parameters is None else parameters\n\n    def change_generator(self, gen_lib_name, gen_cell_name, static=False):\n        # type: (str, str, bool) -> None\n        \"\"\"Change the master associated with this instance.\n\n        All instance parameters and terminal mappings will be reset.\n\n        Parameters\n        ----------\n        gen_lib_name : str\n            the new schematic generator library name.\n        gen_cell_name : str\n            the new schematic generator cell name.\n        static : bool\n            True if the schematic generator is static.\n        \"\"\"\n        self._master = None\n        self._gen_lib_name = gen_lib_name\n        self._gen_cell_name = gen_cell_name\n        self._static = static\n        self.parameters.clear()\n        self._term_mapping.clear()\n\n    @property\n    def name(self):\n        # type: () -> str\n        \"\"\"Returns the instance name.\"\"\"\n        return self._name\n\n    @property\n    def connections(self):\n        # type: () -> Dict[str, str]\n        \"\"\"Returns the instance terminals connection dictionary.\"\"\"\n        return self._term_mapping\n\n    @property\n    def is_primitive(self):\n        # type: () -> bool\n        \"\"\"Returns true if this is an instance of a primitive schematic generator.\"\"\"\n        if self._static:\n            return True\n        if self._master is None:\n            raise ValueError('Instance %s has no master.  '\n                             'Did you forget to call design()?' % self._name)\n        return self._master.is_primitive()\n\n    @property\n    def should_delete(self):\n        # type: () -> bool\n        \"\"\"Returns true if this instance should be deleted.\"\"\"\n        return self._master is not None and self._master.should_delete_instance()\n\n    @property\n    def master(self):\n        # type: () -> Optional[Module]\n        return self._master\n\n    @property\n    def master_cell_name(self):\n        # type: () -> str\n        \"\"\"Returns the schematic master cell name.\"\"\"\n        return self._gen_cell_name if self._master is None else self._master.cell_name\n\n    @property\n    def master_key(self):\n        # type: () -> Any\n        return self._master.key\n\n    def copy(self, inst_name, connections=None):\n        # type: (str, Optional[Dict[str, str]]) -> SchInstance\n        \"\"\"Returns a copy of this SchInstance.\n\n        Parameters\n        ----------\n        inst_name : str\n            the new instance name.\n        connections : Optional[Dict[str, str]]\n            If given, will set the connections of this instance to this dictionary.\n\n        Returns\n        -------\n        sch_inst : SchInstance\n            a copy of this SchInstance, with connections potentially updated.\n        \"\"\"\n        if connections is None:\n            connections = self._term_mapping.copy()\n        return SchInstance(self._db, self._gen_lib_name, self._gen_cell_name, inst_name,\n                           static=self._static, connections=connections, master=self._master,\n                           parameters=self.parameters.copy())\n\n    def get_master_lib_name(self, impl_lib):\n        # type: (str) -> str\n        \"\"\"Returns the schematic master library name.\n\n        Parameters\n        ----------\n        impl_lib : str\n            library where schematic masters will be created.\n\n        Returns\n        -------\n        master_lib : str\n            the schematic master library name.\n        \"\"\"\n        return self._gen_lib_name if self.is_primitive else impl_lib\n\n    def design_specs(self, *args, **kwargs):\n        # type: (*Any, **Any) -> None\n        \"\"\"Update the instance master.\"\"\"\n        self._update_master('design_specs', args, kwargs)\n\n    def design(self, *args, **kwargs):\n        # type: (*Any, **Any) -> None\n        \"\"\"Update the instance master.\"\"\"\n        self._update_master('design', args, kwargs)\n\n    def _update_master(self, design_fun, args, kwargs):\n        # type: (str, Tuple[Any, ...], Dict[str, Any]) -> None\n        \"\"\"Create a new master.\"\"\"\n        if args:\n            key = 'args'\n            idx = 1\n            while key in kwargs:\n                key = 'args_%d' % idx\n                idx += 1\n            kwargs[key] = args\n        else:\n            key = None\n        self._master = self._db.new_master(self._gen_lib_name, self._gen_cell_name,\n                                           params=kwargs, design_args=key,\n                                           design_fun=design_fun)  # type: Module\n        if self._master.is_primitive():\n            self.parameters.update(self._master.get_schematic_parameters())\n\n    def implement_design(self, lib_name, top_cell_name='', prefix='', suffix='', **kwargs):\n        # type: (str, str, str, str, **Any) -> None\n        \"\"\"Implement this design module in the given library.\n\n        If the given library already exists, this method will not delete or override\n        any pre-existing cells in that library.\n\n        If you use this method, you do not need to call update_structure(),\n        as this method calls it for you.\n\n        This method only works if BagProject is given.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the new library to put the generated schematics.\n        top_cell_name : str\n            the cell name of the top level design.\n        prefix : str\n            prefix to add to cell names.\n        suffix : str\n            suffix to add to cell names.\n        **kwargs : Any\n            additional arguments.\n        \"\"\"\n        if 'erase' in kwargs:\n            print('DEPRECATED WARNING: erase is no longer supported '\n                  'in implement_design() and has no effect')\n\n        debug = kwargs.get('debug', False)\n        rename_dict = kwargs.get('rename_dict', None)\n\n        if not top_cell_name:\n            top_cell_name = None\n\n        if 'lib_path' in kwargs:\n            self._db.lib_path = kwargs['lib_path']\n        self._db.cell_prefix = prefix\n        self._db.cell_suffix = suffix\n        self._db.instantiate_masters([self._master], [top_cell_name], lib_name=lib_name,\n                                     debug=debug, rename_dict=rename_dict)\n\n    def get_layout_params(self, **kwargs):\n        # type: (Any) -> Dict[str, Any]\n        \"\"\"Backwards compatibility function.\"\"\"\n        if hasattr(self._master, 'get_layout_params'):\n            return getattr(self._master, 'get_layout_params')(**kwargs)\n        else:\n            return kwargs\n\n\nclass Module(DesignMaster, metaclass=abc.ABCMeta):\n    \"\"\"The base class of all schematic generators.  This represents a schematic master.\n\n    This class defines all the methods needed to implement a design in the CAD database.\n\n    Parameters\n    ----------\n    database : ModuleDB\n        the design database object.\n    yaml_fname : str\n        the netlist information file name.\n    **kwargs :\n        additional arguments\n\n    Attributes\n    ----------\n    parameters : dict[str, any]\n        the design parameters dictionary.\n    instances : dict[str, None or :class:`~bag.design.Module` or list[:class:`~bag.design.Module`]]\n        the instance dictionary.\n    \"\"\"\n\n    # noinspection PyUnusedLocal\n    def __init__(self, database, yaml_fname, **kwargs):\n        # type: (ModuleDB, str, **Any) -> None\n\n        lib_name = kwargs['lib_name']\n        params = kwargs['params']\n        used_names = kwargs['used_names']\n        design_fun = kwargs['design_fun']\n        design_args = kwargs['design_args']\n\n        self.tech_info = database.tech_info\n        self.instances = {}  # type: Dict[str, Union[SchInstance, List[SchInstance]]]\n        self.pin_map = {}\n        self.new_pins = []\n        self.parameters = {}\n        self._pin_list = None\n\n        self._yaml_fname = os.path.abspath(yaml_fname)\n        self.sch_info = read_yaml(self._yaml_fname)\n\n        self._orig_lib_name = self.sch_info['lib_name']\n        self._orig_cell_name = self.sch_info['cell_name']\n        self._design_fun = design_fun\n        self._design_args = design_args\n\n        # create initial instances and populate instance map\n        for inst_name, inst_attr in self.sch_info['instances'].items():\n            lib_name = inst_attr['lib_name']\n            cell_name = inst_attr['cell_name']\n            static = database.is_lib_excluded(lib_name)\n            self.instances[inst_name] = SchInstance(database, lib_name, cell_name, inst_name,\n                                                    static=static)\n\n        # fill in pin map\n        for pin in self.sch_info['pins']:\n            self.pin_map[pin] = pin\n\n        # initialize schematic master\n        DesignMaster.__init__(self, database, lib_name, params, used_names)\n\n    @property\n    def pin_list(self):\n        # type: () -> List[str]\n        return self._pin_list\n\n    @abc.abstractmethod\n    def design(self, **kwargs):\n        \"\"\"To be overridden by subclasses to design this module.\n\n        To design instances of this module, you can\n        call their :meth:`.design` method or any other ways you coded.\n\n        To modify schematic structure, call:\n\n        :meth:`.rename_pin`\n\n        :meth:`.delete_instance`\n\n        :meth:`.replace_instance_master`\n\n        :meth:`.reconnect_instance_terminal`\n\n        :meth:`.array_instance`\n        \"\"\"\n        pass\n\n    def finalize(self):\n        # type: () -> None\n        \"\"\"Finalize this master instance.\n        \"\"\"\n        # invoke design function\n        fun = getattr(self, self._design_fun)\n        if self._design_args:\n            args = self.params.pop(self._design_args)\n            fun(*args, **self.params)\n        else:\n            fun(**self.params)\n\n        # backwards compatibility\n        if self.key is None:\n            self.params.clear()\n            self.params.update(self.parameters)\n            self.update_master_info()\n\n        self.children = set()\n        for inst_list in self.instances.values():\n\n            if isinstance(inst_list, SchInstance):\n                if not inst_list.is_primitive:\n                    self.children.add(inst_list.master_key)\n            else:\n                for inst in inst_list:\n                    if not inst.is_primitive:\n                        self.children.add(inst.master_key)\n\n        # compute pins\n        self._pin_list = [pin_name for pin_name, _ in self.new_pins]\n        self._pin_list.extend((val for val in self.pin_map.values() if val))\n\n        # call super finalize routine\n        super(Module, self).finalize()\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Optional[Dict[str, str]]\n        \"\"\"Returns a dictionary from parameter names to descriptions.\n\n        Returns\n        -------\n        param_info : Optional[Dict[str, str]]\n            dictionary from parameter names to descriptions.\n        \"\"\"\n        return None\n\n    def get_master_basename(self):\n        # type: () -> str\n        \"\"\"Returns the base name to use for this instance.\n\n        Returns\n        -------\n        basename : str\n            the base name for this instance.\n        \"\"\"\n        return self._orig_cell_name\n\n    def get_content(self, lib_name, rename_fun):\n        # type: (str, Callable[[str], str]) -> Optional[Tuple[Any,...]]\n        \"\"\"Returns the content of this master instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library to create the design masters in.\n        rename_fun : Callable[[str], str]\n            a function that renames design masters.\n\n        Returns\n        -------\n        content : Optional[Tuple[Any,...]]\n            the master content data structure.\n        \"\"\"\n        if self.is_primitive():\n            return None\n\n        # populate instance transform mapping dictionary\n        inst_map = {}\n        for inst_name, inst_list in self.instances.items():\n            if isinstance(inst_list, SchInstance):\n                inst_list = [inst_list]\n\n            info_list = []\n            for inst in inst_list:\n                if not inst.should_delete:\n                    cur_lib = inst.get_master_lib_name(lib_name)\n                    info_list.append(dict(\n                        name=inst.name,\n                        lib_name=cur_lib,\n                        cell_name= inst.master_cell_name if inst.is_primitive else rename_fun(inst.master_cell_name),\n                        params=inst.parameters,\n                        term_mapping=inst.connections,\n                    ))\n            inst_map[inst_name] = info_list\n\n        return (self._orig_lib_name, self._orig_cell_name, rename_fun(self.cell_name),\n                self.pin_map, inst_map, self.new_pins)\n\n    @property\n    def cell_name(self):\n        # type: () -> str\n        \"\"\"The master cell name.\"\"\"\n        if self.is_primitive():\n            return self.get_cell_name_from_parameters()\n        return super(Module, self).cell_name\n\n    @property\n    def orig_cell_name(self):\n        # type: () -> str\n        \"\"\"The original schematic template cell name.\"\"\"\n        return self._orig_cell_name\n\n    def is_primitive(self):\n        # type: () -> bool\n        \"\"\"Returns True if this Module represents a BAG primitive.\n\n        NOTE: This method is only used by BAG and schematic primitives.  This method prevents\n        the module from being copied during design implementation.  Custom subclasses should\n        not override this method.\n\n        Returns\n        -------\n        is_primitive : bool\n            True if this Module represents a BAG primitive.\n        \"\"\"\n        return False\n\n    def should_delete_instance(self):\n        # type: () -> bool\n        \"\"\"Returns True if this instance should be deleted based on its parameters.\n\n        This method is mainly used to delete 0 finger or 0 width transistors.  However,\n        You can override this method if there exists parameter settings which corresponds\n        to an empty schematic.\n\n        Returns\n        -------\n        delete : bool\n            True if parent should delete this instance.\n        \"\"\"\n        return False\n\n    def get_schematic_parameters(self):\n        # type: () -> Dict[str, str]\n        \"\"\"Returns the schematic parameter dictionary of this instance.\n\n        NOTE: This method is only used by BAG primitives, as they are\n        implemented with parameterized cells in the CAD database.  Custom\n        subclasses should not override this method.\n\n        Returns\n        -------\n        params : Dict[str, str]\n            the schematic parameter dictionary.\n        \"\"\"\n        return {}\n\n    def get_cell_name_from_parameters(self):\n        \"\"\"Returns new cell name based on parameters.\n\n        NOTE: This method is only used by BAG primitives.  This method\n        enables a BAG primitive to change the cell master based on\n        design parameters (e.g. change transistor instance based on the\n        intent parameter).  Custom subclasses should not override this\n        method.\n\n        Returns\n        -------\n        cell : str\n            the cell name based on parameters.\n        \"\"\"\n        return super(Module, self).cell_name\n\n    def rename_pin(self, old_pin, new_pin):\n        # type: (str, str) -> None\n        \"\"\"Renames an input/output pin of this schematic.\n\n        NOTE: Make sure to call :meth:`.reconnect_instance_terminal` so that instances are\n        connected to the new pin.\n\n        Parameters\n        ----------\n        old_pin : str\n            the old pin name.\n        new_pin : str\n            the new pin name.\n        \"\"\"\n        self.pin_map[old_pin] = new_pin\n\n    def add_pin(self, new_pin, pin_type):\n        # type: (str, str) -> None\n        \"\"\"Adds a new pin to this schematic.\n\n        NOTE: Make sure to call :meth:`.reconnect_instance_terminal` so that instances are\n        connected to the new pin.\n\n        Parameters\n        ----------\n        new_pin : str\n            the new pin name.\n        pin_type : str\n            the new pin type.  We current support \"input\", \"output\", or \"inputOutput\"\n        \"\"\"\n        self.new_pins.append([new_pin, pin_type])\n\n    def remove_pin(self, remove_pin):\n        # type: (str) -> None\n        \"\"\"Removes a pin from this schematic.\n\n        Parameters\n        ----------\n        remove_pin : str\n            the pin to remove.\n        \"\"\"\n        self.rename_pin(remove_pin, '')\n\n    def delete_instance(self, inst_name):\n        # type: (str) -> None\n        \"\"\"Delete the instance with the given name.\n\n        Parameters\n        ----------\n        inst_name : str\n            the child instance to delete.\n        \"\"\"\n        self.instances[inst_name] = []\n\n    def replace_instance_master(self, inst_name, lib_name, cell_name, static=False, index=None):\n        # type: (str, str, str, bool, Optional[int]) -> None\n        \"\"\"Replace the master of the given instance.\n\n        NOTE: all terminal connections will be reset.  Call reconnect_instance_terminal() to modify\n        terminal connections.\n\n        Parameters\n        ----------\n        inst_name : str\n            the child instance to replace.\n        lib_name : str\n            the new library name.\n        cell_name : str\n            the new cell name.\n        static : bool\n            True if we're replacing instance with a static schematic instead of a design module.\n        index : Optional[int]\n            If index is not None and the child instance has been arrayed, this is the instance\n            array index that we are replacing.\n            If index is None, the entire child instance (whether arrayed or not) will be replaced\n            by a single new instance.\n        \"\"\"\n        if inst_name not in self.instances:\n            raise ValueError('Cannot find instance with name: %s' % inst_name)\n\n        # check if this is arrayed\n        if index is not None and isinstance(self.instances[inst_name], list):\n            self.instances[inst_name][index].change_generator(lib_name, cell_name, static=static)\n        else:\n            self.instances[inst_name] = SchInstance(self.master_db, lib_name, cell_name, inst_name,\n                                                    static=static)\n\n    def reconnect_instance_terminal(self, inst_name, term_name, net_name, index=None):\n        \"\"\"Reconnect the instance terminal to a new net.\n\n        Parameters\n        ----------\n        inst_name : str\n            the child instance to modify.\n        term_name : Union[str, List[str]]\n            the instance terminal name to reconnect.\n            If a list is given, it is applied to each arrayed instance.\n        net_name : Union[str, List[str]]\n            the net to connect the instance terminal to.\n            If a list is given, it is applied to each arrayed instance.\n        index : Optional[int]\n            If not None and the given instance is arrayed, will only modify terminal\n            connection for the instance at the given index.\n            If None and the given instance is arrayed, all instances in the array\n            will be reconnected.\n        \"\"\"\n        if index is not None:\n            # only modify terminal connection for one instance in the array\n            if isinstance(term_name, str) and isinstance(net_name, str):\n                self.instances[inst_name][index].connections[term_name] = net_name\n            else:\n                raise ValueError('If index is not None, '\n                                 'both term_name and net_name must be string.')\n        else:\n            # modify terminal connection for all instances in the array\n            cur_inst_list = self.instances[inst_name]\n            if isinstance(cur_inst_list, SchInstance):\n                cur_inst_list = [cur_inst_list]\n\n            num_insts = len(cur_inst_list)\n            if not isinstance(term_name, list) and not isinstance(term_name, tuple):\n                if not isinstance(term_name, str):\n                    raise ValueError('term_name = %s must be string.' % term_name)\n                term_name = [term_name] * num_insts\n            else:\n                if len(term_name) != num_insts:\n                    raise ValueError('term_name length = %d != %d' % (len(term_name), num_insts))\n\n            if not isinstance(net_name, list) and not isinstance(net_name, tuple):\n                if not isinstance(net_name, str):\n                    raise ValueError('net_name = %s must be string.' % net_name)\n                net_name = [net_name] * num_insts\n            else:\n                if len(net_name) != num_insts:\n                    raise ValueError('net_name length = %d != %d' % (len(net_name), num_insts))\n\n            for inst, tname, nname in zip(cur_inst_list, term_name, net_name):\n                inst.connections[tname] = nname\n\n    def array_instance(self, inst_name, inst_name_list, term_list=None):\n        # type: (str, List[str], Optional[List[Dict[str, str]]]) -> None\n        \"\"\"Replace the given instance by an array of instances.\n\n        This method will replace self.instances[inst_name] by a list of\n        Modules.  The user can then design each of those modules.\n\n        Parameters\n        ----------\n        inst_name : str\n            the instance to array.\n        inst_name_list : List[str]\n            a list of the names for each array item.\n        term_list : Optional[List[Dict[str, str]]]\n            a list of modified terminal connections for each array item.  The keys are\n            instance terminal names, and the values are the net names to connect\n            them to.  Only terminal connections different than the parent instance\n            should be listed here.\n            If None, assume terminal connections are not changed.\n        \"\"\"\n        num_inst = len(inst_name_list)\n        if not term_list:\n            term_list = [None] * num_inst\n        if num_inst != len(term_list):\n            msg = 'len(inst_name_list) = %d != len(term_list) = %d'\n            raise ValueError(msg % (num_inst, len(term_list)))\n\n        orig_inst = self.instances[inst_name]\n        if not isinstance(orig_inst, SchInstance):\n            raise ValueError('Instance %s is already arrayed.' % inst_name)\n\n        self.instances[inst_name] = [orig_inst.copy(iname, connections=iterm)\n                                     for iname, iterm in zip(inst_name_list, term_list)]\n\n    def design_dc_bias_sources(self,  # type: Module\n                               vbias_dict,  # type: Optional[Dict[str, List[str]]]\n                               ibias_dict,  # type: Optional[Dict[str, List[str]]]\n                               vinst_name,  # type: str\n                               iinst_name,  # type: str\n                               define_vdd=True,  # type: bool\n                               ):\n        # type: (...) -> None\n        \"\"\"Convenience function for generating DC bias sources.\n\n        Given DC voltage/current bias sources information, array the given voltage/current bias\n        sources and configure the voltage/current.\n\n        Each bias dictionary is a dictionary from bias source name to a 3-element list.  The first\n        two elements are the PLUS/MINUS net names, respectively, and the third element is the DC\n        voltage/current value as a string or float. A variable name can be given to define a\n        testbench parameter.\n\n        Parameters\n        ----------\n        vbias_dict : Optional[Dict[str, List[str]]]\n            the voltage bias dictionary.  None or empty to disable.\n        ibias_dict : Optional[Dict[str, List[str]]]\n            the current bias dictionary.  None or empty to disable.\n        vinst_name : str\n            the DC voltage source instance name.\n        iinst_name : str\n            the DC current source instance name.\n        define_vdd : bool\n            True to include a supply voltage source connected to VDD/VSS, with voltage value 'vdd'.\n        \"\"\"\n        if define_vdd and 'SUP' not in vbias_dict:\n            vbias_dict = vbias_dict.copy()\n            vbias_dict['SUP'] = ['VDD', 'VSS', 'vdd']\n\n        for bias_dict, name_template, param_name, inst_name in \\\n                ((vbias_dict, 'V%s', 'vdc', vinst_name), (ibias_dict, 'I%s', 'idc', iinst_name)):\n            if bias_dict:\n                name_list, term_list, val_list, param_dict_list = [], [], [], []\n                for name in sorted(bias_dict.keys()):\n                    value_tuple = bias_dict[name]\n                    pname, nname, bias_val = value_tuple[:3]\n                    param_dict = value_tuple[3] if len(value_tuple) > 3 \\\n                        else None  # type: Optional[Dict]\n                    term_list.append(dict(PLUS=pname, MINUS=nname))\n                    name_list.append(name_template % name)\n                    param_dict_list.append(param_dict)\n                    if isinstance(bias_val, str):\n                        val_list.append(bias_val)\n                    elif isinstance(bias_val, int) or isinstance(bias_val, float):\n                        val_list.append(float_to_si_string(bias_val))\n                    else:\n                        raise ValueError('value %s of type %s '\n                                         'not supported' % (bias_val, type(bias_val)))\n\n                self.array_instance(inst_name, name_list, term_list=term_list)\n                for inst, val, param_dict in zip(self.instances[inst_name], val_list,\n                                                 param_dict_list):\n                    inst.parameters[param_name] = val\n                    if param_dict is not None:\n                        for k, v in param_dict.items():\n                            if isinstance(v, str):\n                                pass\n                            elif isinstance(v, int) or isinstance(v, float):\n                                v = float_to_si_string(v)\n                            else:\n                                raise ValueError('value %s of type %s not supported' % (v, type(v)))\n\n                            inst.parameters[k] = v\n            else:\n                self.delete_instance(inst_name)\n\n    def design_dummy_transistors(self, dum_info, inst_name, vdd_name, vss_name, net_map=None):\n        # type: (List[Tuple[Any]], str, str, str, Optional[Dict[str, str]]) -> None\n        \"\"\"Convenience function for generating dummy transistor schematic.\n\n        Given dummy information (computed by AnalogBase) and a BAG transistor instance,\n        this method generates dummy schematics by arraying and modifying the BAG\n        transistor instance.\n\n        Parameters\n        ----------\n        dum_info : List[Tuple[Any]]\n            the dummy information data structure.\n        inst_name : str\n            the BAG transistor instance name.\n        vdd_name : str\n            VDD net name.  Used for PMOS dummies.\n        vss_name : str\n            VSS net name.  Used for NMOS dummies.\n        net_map : Optional[Dict[str, str]]\n            optional net name transformation mapping.\n        \"\"\"\n        if not dum_info:\n            self.delete_instance(inst_name)\n        else:\n            num_arr = len(dum_info)\n            arr_name_list = ['XDUMMY%d' % idx for idx in range(num_arr)]\n            self.array_instance(inst_name, arr_name_list)\n\n            for idx, ((mos_type, w, lch, th, s_net, d_net), fg) in enumerate(dum_info):\n                if mos_type == 'pch':\n                    cell_name = 'pmos4_standard'\n                    sup_name = vdd_name\n                else:\n                    cell_name = 'nmos4_standard'\n                    sup_name = vss_name\n                if net_map is not None:\n                    s_net = net_map.get(s_net, s_net)\n                    d_net = net_map.get(d_net, d_net)\n                s_name = s_net if s_net else sup_name\n                d_name = d_net if d_net else sup_name\n\n                self.replace_instance_master(inst_name, 'BAG_prim', cell_name, index=idx)\n                self.reconnect_instance_terminal(inst_name, 'G', sup_name, index=idx)\n                self.reconnect_instance_terminal(inst_name, 'B', sup_name, index=idx)\n                self.reconnect_instance_terminal(inst_name, 'D', d_name, index=idx)\n                self.reconnect_instance_terminal(inst_name, 'S', s_name, index=idx)\n                self.instances[inst_name][idx].design(w=w, l=lch, nf=fg, intent=th)\n\n\nclass MosModuleBase(Module):\n    \"\"\"The base design class for the bag primitive transistor.\n\n    Parameters\n    ----------\n    database : ModuleDB\n        the design database object.\n    yaml_file : str\n        the netlist information file name.\n    **kwargs :\n        additional arguments\n    \"\"\"\n\n    def __init__(self, database, yaml_file, **kwargs):\n        Module.__init__(self, database, yaml_file, **kwargs)\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Dict[str, str]\n        return dict(\n            w='transistor width, in meters or number of fins.',\n            l='transistor length, in meters.',\n            nf='transistor number of fingers.',\n            intent='transistor threshold flavor.',\n        )\n\n    def design(self, w=1e-6, l=60e-9, nf=1, intent='standard'):\n        pass\n\n    def get_schematic_parameters(self):\n        # type: () -> Dict[str, str]\n        w_res = self.tech_info.tech_params['mos']['width_resolution']\n        l_res = self.tech_info.tech_params['mos']['length_resolution']\n        w = self.params['w']\n        l = self.params['l']\n        nf = self.params['nf']\n        wstr = w if isinstance(w, str) else float_to_si_string(int(round(w / w_res)) * w_res)\n        lstr = l if isinstance(l, str) else float_to_si_string(int(round(l / l_res)) * l_res)\n        nstr = nf if isinstance(nf, str) else '%d' % nf\n\n        return dict(w=wstr, l=lstr, nf=nstr)\n\n    def get_cell_name_from_parameters(self):\n        # type: () -> str\n        mos_type = self.orig_cell_name.split('_')[0]\n        return '%s_%s' % (mos_type, self.params['intent'])\n\n    def is_primitive(self):\n        # type: () -> bool\n        return True\n\n    def should_delete_instance(self):\n        # type: () -> bool\n        return self.params['nf'] == 0 or self.params['w'] == 0 or self.params['l'] == 0\n\n\nclass ResPhysicalModuleBase(Module):\n    \"\"\"The base design class for a real resistor parametrized by width and length.\n\n    Parameters\n    ----------\n    database : ModuleDB\n        the design database object.\n    yaml_file : str\n        the netlist information file name.\n    **kwargs :\n        additional arguments\n    \"\"\"\n\n    def __init__(self, database, yaml_file, **kwargs):\n        Module.__init__(self, database, yaml_file, **kwargs)\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Dict[str, str]\n        return dict(\n            w='resistor width, in meters.',\n            l='resistor length, in meters.',\n            intent='resistor flavor.',\n        )\n\n    def design(self, w=1e-6, l=1e-6, intent='standard'):\n        pass\n\n    def get_schematic_parameters(self):\n        # type: () -> Dict[str, str]\n        w = self.params['w']\n        l = self.params['l']\n        wstr = w if isinstance(w, str) else float_to_si_string(w)\n        lstr = l if isinstance(l, str) else float_to_si_string(l)\n\n        return dict(w=wstr, l=lstr)\n\n    def get_cell_name_from_parameters(self):\n        # type: () -> str\n        return 'res_%s' % self.params['intent']\n\n    def is_primitive(self):\n        # type: () -> bool\n        return True\n\n    def should_delete_instance(self):\n        # type: () -> bool\n        return self.params['w'] == 0 or self.params['l'] == 0\n\n\nclass ResMetalModule(Module):\n    \"\"\"The base design class for a metal resistor.\n\n    Parameters\n    ----------\n    database : ModuleDB\n        the design database object.\n    yaml_file : str\n        the netlist information file name.\n    **kwargs :\n        additional arguments\n    \"\"\"\n\n    def __init__(self, database, yaml_file, **kwargs):\n        Module.__init__(self, database, yaml_file, **kwargs)\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Dict[str, str]\n        return dict(\n            w='resistor width, in meters.',\n            l='resistor length, in meters.',\n            layer='the metal layer ID.',\n        )\n\n    def design(self, w, l, layer):\n        # type: (float, float, int) -> None\n        pass\n\n    def get_schematic_parameters(self):\n        # type: () -> Dict[str, str]\n        w = self.params['w']\n        l = self.params['l']\n        layer = self.params['layer']\n        wstr = float_to_si_string(w)\n        lstr = float_to_si_string(l)\n        lay_str = str(layer)\n        return dict(w=wstr, l=lstr, layer=lay_str)\n\n    def is_primitive(self):\n        # type: () -> bool\n        return True\n\n    def should_delete_instance(self):\n        # type: () -> bool\n        return self.params['w'] == 0 or self.params['l'] == 0\n"
  },
  {
    "path": "bag/interface/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/interface/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This packages defines classes to interface with CAD database and circuit simulators.\n\"\"\"\n\nfrom .server import SkillServer\nfrom .zmqwrapper import ZMQRouter, ZMQDealer\n\n__all__ = ['SkillServer', 'ZMQRouter', 'ZMQDealer', ]\n"
  },
  {
    "path": "bag/interface/base.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines the base of all interface classes.\n\"\"\"\n\nfrom typing import Dict, Any\n\nfrom ..io.template import new_template_env\n\n\nclass InterfaceBase:\n    \"\"\"The base class of all interfaces.\n\n    Provides various helper methods common to all interfaces.\n    \"\"\"\n    def __init__(self):\n        self._tmp_env = new_template_env('bag.interface', 'templates')\n\n    def render_file_template(self, temp_name, params):\n        # type: (str, Dict[str, Any]) -> str\n        \"\"\"Returns the rendered content from the given template file.\"\"\"\n        template = self._tmp_env.get_template(temp_name)\n        return template.render(**params)\n"
  },
  {
    "path": "bag/interface/database.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines DbAccess, the base class for CAD database manipulation.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, List, Dict, Tuple, Optional, Sequence, Any, Union\n\nimport os\nimport abc\nimport traceback\nimport yaml\n\nfrom ..io.file import make_temp_dir, read_file, write_file\nfrom ..verification import make_checker\nfrom .base import InterfaceBase\n\nif TYPE_CHECKING:\n    from ..verification import Checker\n\n\ndef dict_to_item_list(table):\n    \"\"\"Given a Python dictionary, convert to sorted item list.\n\n    Parameters\n    ----------\n    table : dict[str, any]\n        a Python dictionary where the keys are strings.\n\n    Returns\n    -------\n    assoc_list : list[(str, str)]\n        the sorted item list representation of the given dictionary.\n    \"\"\"\n    return [[key, table[key]] for key in sorted(table.keys())]\n\n\ndef format_inst_map(inst_map):\n    \"\"\"Given instance map from DesignModule, format it for database changes.\n\n    Parameters\n    ----------\n    inst_map : Dict[str, Any]\n        the instance map created by DesignModule.\n\n    Returns\n    -------\n    ans : List[(str, Any)]\n        the database change instance map.\n    \"\"\"\n    ans = []\n    for old_inst_name, rinst_list in inst_map.items():\n        new_rinst_list = [dict(name=rinst['name'],\n                               lib_name=rinst['lib_name'],\n                               cell_name=rinst['cell_name'],\n                               params=dict_to_item_list(rinst['params']),\n                               term_mapping=dict_to_item_list(rinst['term_mapping']),\n                               ) for rinst in rinst_list]\n        ans.append([old_inst_name, new_rinst_list])\n    return ans\n\n\nclass DbAccess(InterfaceBase, abc.ABC):\n    \"\"\"A class that manipulates the CAD database.\n\n    Parameters\n    ----------\n    tmp_dir : str\n        temporary file directory for DbAccess.\n    db_config : Dict[str, Any]\n        the database configuration dictionary.\n    \"\"\"\n\n    def __init__(self, tmp_dir, db_config):\n        # type: (str, Dict[str, Any]) -> None\n        InterfaceBase.__init__(self)\n\n        self.tmp_dir = make_temp_dir('dbTmp', parent_dir=tmp_dir)\n        self.db_config = db_config\n        self.exc_libs = set(db_config['schematic']['exclude_libraries'])\n        # noinspection PyBroadException\n        try:\n            check_kwargs = self.db_config['checker'].copy()\n            check_kwargs['tmp_dir'] = self.tmp_dir\n            self.checker = make_checker(**check_kwargs)  # type: Optional[Checker]\n        except Exception:\n            stack_trace = traceback.format_exc()\n            print('*WARNING* error creating Checker:\\n%s' % stack_trace)\n            print('*WARNING* LVS/RCX will be disabled.')\n            self.checker = None  # type: Optional[Checker]\n\n        # set default lib path\n        self._default_lib_path = self.get_default_lib_path(db_config)\n\n    @classmethod\n    def get_default_lib_path(cls, db_config):\n        lib_path_fallback = os.path.abspath('.')\n        default_lib_path = os.path.abspath(db_config.get('default_lib_path', lib_path_fallback))\n        if not os.path.isdir(default_lib_path):\n            default_lib_path = lib_path_fallback\n\n        return default_lib_path\n\n    @property\n    def default_lib_path(self):\n        \"\"\"Returns the default directory to create new libraries in.\n\n        Returns\n        -------\n        lib_path : string\n            directory to create new libraries in.\n        \"\"\"\n        return self._default_lib_path\n\n    @abc.abstractmethod\n    def close(self):\n        \"\"\"Terminate the database server gracefully.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def parse_schematic_template(self, lib_name, cell_name):\n        \"\"\"Parse the given schematic template.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the library.\n        cell_name : str\n            name of the cell.\n\n        Returns\n        -------\n        template : str\n            the content of the netlist structure file.\n        \"\"\"\n        return \"\"\n\n    @abc.abstractmethod\n    def get_cells_in_library(self, lib_name):\n        \"\"\"Get a list of cells in the given library.\n\n        Returns an empty list if the given library does not exist.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n\n        Returns\n        -------\n        cell_list : list[str]\n            a list of cells in the library\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    def create_library(self, lib_name, lib_path=''):\n        \"\"\"Create a new library if one does not exist yet.\n\n        Parameters\n        ----------\n        lib_name : string\n            the library name.\n        lib_path : string\n            directory to create the library in.  If Empty, use default location.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def create_implementation(self, lib_name, template_list, change_list, lib_path=''):\n        \"\"\"Create implementation of a design in the CAD database.\n\n        Parameters\n        ----------\n        lib_name : str\n            implementation library name.\n        template_list : list\n            a list of schematic templates to copy to the new library.\n        change_list :\n            a list of changes to be performed on each copied templates.\n        lib_path : str\n            directory to create the library in.  If Empty, use default location.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def configure_testbench(self, tb_lib, tb_cell):\n        \"\"\"Configure testbench state for the given testbench.\n\n        This method fill in process-specific information for the given testbench.\n\n        Parameters\n        ----------\n        tb_lib : str\n            testbench library name.\n        tb_cell : str\n            testbench cell name.\n\n        Returns\n        -------\n        cur_env : str\n            the current simulation environment.\n        envs : list[str]\n            a list of available simulation environments.\n        parameters : dict[str, str]\n            a list of testbench parameter values, represented as string.\n        \"\"\"\n        return \"\", [], {}\n\n    @abc.abstractmethod\n    def get_testbench_info(self, tb_lib, tb_cell):\n        \"\"\"Returns information about an existing testbench.\n\n        Parameters\n        ----------\n        tb_lib : str\n            testbench library.\n        tb_cell : str\n            testbench cell.\n\n        Returns\n        -------\n        cur_envs : list[str]\n            the current simulation environments.\n        envs : list[str]\n            a list of available simulation environments.\n        parameters : dict[str, str]\n            a list of testbench parameter values, represented as string.\n        outputs : dict[str, str]\n            a list of testbench output expressions.\n        \"\"\"\n        return [], [], {}, {}\n\n    @abc.abstractmethod\n    def update_testbench(self,  # type: DbAccess\n                         lib,  # type: str\n                         cell,  # type: str\n                         parameters,  # type: Dict[str, str]\n                         sim_envs,  # type: Sequence[str]\n                         config_rules,  # type: Sequence[List[str]]\n                         env_parameters,  # type: Sequence[List[Tuple[str, str]]]\n                         ):\n        # type: (...) -> None\n        \"\"\"Update the given testbench configuration.\n\n        Parameters\n        ----------\n        lib : str\n            testbench library.\n        cell : str\n            testbench cell.\n        parameters : Dict[str, str]\n            testbench parameters.\n        sim_envs : Sequence[str]\n            list of enabled simulation environments.\n        config_rules : Sequence[List[str]]\n            config view mapping rules, list of (lib, cell, view) rules.\n        env_parameters : Sequence[List[Tuple[str, str]]]\n            list of param/value list for each simulation environment.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def instantiate_layout_pcell(self, lib_name, cell_name, view_name,\n                                 inst_lib, inst_cell, params, pin_mapping):\n        \"\"\"Create a layout cell with a single pcell instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            layout library name.\n        cell_name : str\n            layout cell name.\n        view_name : str\n            layout view name, default is \"layout\".\n        inst_lib : str\n            pcell library name.\n        inst_cell : str\n            pcell cell name.\n        params : dict[str, any]\n            the parameter dictionary.\n        pin_mapping: dict[str, str]\n            the pin mapping dictionary.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):\n        # type: (str, str, str, Sequence[Any]) -> None\n        \"\"\"Create a batch of layouts.\n\n        Parameters\n        ----------\n        lib_name : str\n            layout library name.\n        view_name : str\n            layout view name.\n        via_tech : str\n            via technology library name.\n        layout_list : Sequence[Any]\n            a list of layouts to create\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def release_write_locks(self, lib_name, cell_view_list):\n        # type: (str, Sequence[Tuple[str, str]]) -> None\n        \"\"\"Release write locks from all the given cells.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n        cell_view_list : Sequence[Tuple[str, str]]\n            list of cell/view name tuples.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def create_schematic_from_netlist(self, netlist, lib_name, cell_name,\n                                      sch_view=None, **kwargs):\n        # type: (str, str, str, Optional[str], **Any) -> None\n        \"\"\"Create a schematic from a netlist.\n\n        This is mainly used to create extracted schematic from an extracted netlist.\n\n        Parameters\n        ----------\n        netlist : str\n            the netlist file name.\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n        sch_view : Optional[str]\n            schematic view name.  The default value is implemendation dependent.\n        **kwargs : Any\n            additional implementation-dependent arguments.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):\n        # type: (str, str, str, **Any) -> None\n        \"\"\"Create a verilog view for mix-signal simulation.\n\n        Parameters\n        ----------\n        verilog_file : str\n            the verilog file name.\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        **kwargs : Any\n            additional implementation-dependent arguments.\n        \"\"\"\n        pass\n\n    def get_python_template(self, lib_name, cell_name, primitive_table):\n        # type: (str, str, Dict[str, str]) -> str\n        \"\"\"Returns the default Python Module template for the given schematic.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n        cell_name : str\n            the cell name.\n        primitive_table : Dict[str, str]\n            a dictionary from primitive cell name to module template file name.\n\n        Returns\n        -------\n        template : str\n            the default Python Module template.\n        \"\"\"\n        param_dict = dict(lib_name=lib_name, cell_name=cell_name)\n        if lib_name == 'BAG_prim':\n            if cell_name in primitive_table:\n                # load template from user defined file\n                template = self._tmp_env.from_string(read_file(primitive_table[cell_name]))\n                return template.render(**param_dict)\n            else:\n                if cell_name.startswith('nmos4_') or cell_name.startswith('pmos4_'):\n                    # transistor template\n                    module_name = 'MosModuleBase'\n                elif cell_name == 'res_ideal':\n                    # ideal resistor template\n                    module_name = 'ResIdealModuleBase'\n                elif cell_name == 'res_metal':\n                    module_name = 'ResMetalModule'\n                elif cell_name == 'cap_ideal':\n                    # ideal capacitor template\n                    module_name = 'CapIdealModuleBase'\n                elif cell_name.startswith('res_'):\n                    # physical resistor template\n                    module_name = 'ResPhysicalModuleBase'\n                else:\n                    raise Exception('Unknown primitive cell: %s' % cell_name)\n\n                param_dict['module_name'] = module_name\n                return self.render_file_template('PrimModule.pyi', param_dict)\n        else:\n            # use default empty template.\n            return self.render_file_template('Module.pyi', param_dict)\n\n    def _process_rcx_output(self, netlist, log_fname, lib_name, cell_name, create_schematic):\n        if create_schematic:\n            if netlist is None:\n                return False, log_fname\n            if netlist:\n                # create schematic only if netlist name is not empty.\n                self.create_schematic_from_netlist(netlist, lib_name, cell_name)\n            return True, log_fname\n        else:\n            return netlist, log_fname\n\n    async def async_run_lvs(self, lib_name: str, cell_name: str, **kwargs: Any) -> Tuple[bool, str]:\n        \"\"\"A coroutine for running LVS.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n        **kwargs : Any\n            optional keyword arguments.  See Checker class for details.\n            LVS parameters should be specified as lvs_params.\n\n        Returns\n        -------\n        value : bool\n            True if LVS succeeds\n        log_fname : str\n            name of the LVS log file.\n        \"\"\"\n        if self.checker is None:\n            raise Exception('LVS/RCX is disabled.')\n\n        kwargs['params'] = kwargs.pop('lvs_params', None)\n        return await self.checker.async_run_lvs(lib_name, cell_name, **kwargs)\n\n    async def async_run_rcx(self,  # type: DbAccess\n                            lib_name: str,\n                            cell_name: str,\n                            create_schematic: bool = True,\n                            **kwargs: Any\n                            ) -> Tuple[Union[bool, Optional[str]], str]:\n        \"\"\"Run RCX on the given cell.\n\n        The behavior and the first return value of this method depends on the\n        input arguments.  The second return argument will always be the RCX\n        log file name.\n\n        If create_schematic is True, this method will run RCX, then if it succeeds,\n        create a schematic of the extracted netlist in the database.  It then returns\n        a boolean value which will be True if RCX succeeds.\n\n        If create_schematic is False, this method will run RCX, then return a string\n        which is the extracted netlist filename. If RCX failed, None will be returned\n        instead.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n            override RCX parameter values.\n        create_schematic : bool\n            True to automatically create extracted schematic in database if RCX\n            is successful and it is supported.\n        **kwargs : Any\n            optional keyword arguments.  See Checker class for details.\n            RCX parameters should be specified as rcx_params.\n\n        Returns\n        -------\n        value : Union[bool, Optional[str]]\n            The return value, as described.\n        log_fname : str\n            name of the RCX log file.\n        \"\"\"\n        kwargs['params'] = kwargs.pop('rcx_params', None)\n        netlist, log_fname = await self.checker.async_run_rcx(lib_name, cell_name, **kwargs)\n\n        return self._process_rcx_output(netlist, log_fname, lib_name, cell_name, create_schematic)\n\n    async def async_export_layout(self, lib_name: str, cell_name: str,\n                                  out_file: str, *args: Any, **kwargs: Any) -> str:\n        \"\"\"Export layout.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        out_file : str\n            output file name.\n        *args : Any\n            optional list arguments.\n        **kwargs : Any\n            optional keyword arguments.  See Checker class for details.\n\n        Returns\n        -------\n        log_fname : str\n            log file name.  Empty if task cancelled.\n        \"\"\"\n        if self.checker is None:\n            raise Exception('layout export is disabled.')\n\n        return await self.checker.async_export_layout(lib_name, cell_name, out_file,\n                                                      *args, **kwargs)\n\n    def import_design_library(self, lib_name, dsn_db, new_lib_path):\n        \"\"\"Import all design templates in the given library from CAD database.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the library.\n        dsn_db : ModuleDB\n            the design database object.\n        new_lib_path: str\n            location to import new libraries to.\n        \"\"\"\n        imported_cells = set()\n        for cell_name in self.get_cells_in_library(lib_name):\n            self._import_design(lib_name, cell_name, imported_cells, dsn_db, new_lib_path)\n\n    def import_sch_cellview(self, lib_name, cell_name, dsn_db, new_lib_path):\n        \"\"\"Import the given schematic and symbol template into Python.\n\n       This import process is done recursively.\n\n       Parameters\n       ----------\n       lib_name : str\n           library name.\n       cell_name : str\n           cell name.\n        dsn_db : ModuleDB\n            the design database object.\n        new_lib_path: str\n            location to import new libraries to.\n       \"\"\"\n        imported_cells = set()\n        self._import_design(lib_name, cell_name, imported_cells, dsn_db, new_lib_path)\n\n    def _import_design(self, lib_name, cell_name, imported_cells, dsn_db, new_lib_path):\n        \"\"\"Recursive helper for import_design_library.\n        \"\"\"\n        # check if we already imported this schematic\n        key = '%s__%s' % (lib_name, cell_name)\n        if key in imported_cells:\n            return\n        imported_cells.add(key)\n\n        # create root directory if missing\n        root_path = dsn_db.get_library_path(lib_name)\n        if root_path is None:\n            root_path = new_lib_path\n            dsn_db.append_library(lib_name, new_lib_path)\n\n        package_path = os.path.join(root_path, lib_name)\n        python_file = os.path.join(package_path, '%s.py' % cell_name)\n        yaml_file = os.path.join(package_path, 'netlist_info', '%s.yaml' % cell_name)\n        yaml_dir = os.path.dirname(yaml_file)\n        if not os.path.exists(yaml_dir):\n            os.makedirs(yaml_dir)\n            write_file(os.path.join(package_path, '__init__.py'), '\\n',\n                       mkdir=False)\n\n        # update netlist file\n        content = self.parse_schematic_template(lib_name, cell_name)\n        sch_info = yaml.load(content, Loader=yaml.Loader)\n        try:\n            write_file(yaml_file, content)\n        except IOError:\n            print('Warning: cannot write to %s.' % yaml_file)\n\n        # generate new design module file if necessary.\n        if not os.path.exists(python_file):\n            content = self.get_python_template(lib_name, cell_name,\n                                               self.db_config.get('prim_table', {}))\n            write_file(python_file, content + '\\n', mkdir=False)\n\n        # recursively import all children\n        for inst_name, inst_attrs in sch_info['instances'].items():\n            inst_lib_name = inst_attrs['lib_name']\n            if inst_lib_name not in self.exc_libs:\n                inst_cell_name = inst_attrs['cell_name']\n                self._import_design(inst_lib_name, inst_cell_name, imported_cells, dsn_db,\n                                    new_lib_path)\n\n    def instantiate_schematic(self, lib_name, content_list, lib_path=''):\n        \"\"\"Create the given schematics in CAD database.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the new library to put the concrete schematics.\n        content_list : Sequence[Any]\n            list of schematics to create.\n        lib_path : str\n            the path to create the library in.  If empty, use default location.\n        \"\"\"\n        template_list, change_list = [], []\n        for content in content_list:\n            if content is not None:\n                master_lib, master_cell, impl_cell, pin_map, inst_map, new_pins = content\n\n                # add to template list\n                template_list.append([master_lib, master_cell, impl_cell])\n\n                # construct change object\n                change = dict(\n                    name=impl_cell,\n                    pin_map=dict_to_item_list(pin_map),\n                    inst_list=format_inst_map(inst_map),\n                    new_pins=new_pins,\n                )\n                change_list.append(change)\n\n        self.create_implementation(lib_name, template_list, change_list, lib_path=lib_path)\n"
  },
  {
    "path": "bag/interface/ocean.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module implements bag's interaction with an ocean simulator.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Dict, Any, Optional\n\nimport os\n\nimport bag.io\nfrom .simulator import SimProcessManager\n\nif TYPE_CHECKING:\n    from .simulator import ProcInfo\n\n\nclass OceanInterface(SimProcessManager):\n    \"\"\"This class handles interaction with Ocean simulators.\n\n    Parameters\n    ----------\n    tmp_dir : str\n        temporary file directory for SimAccess.\n    sim_config : Dict[str, Any]\n        the simulation configuration dictionary.\n    \"\"\"\n\n    def __init__(self, tmp_dir, sim_config):\n        # type: (str, Dict[str, Any]) -> None\n        \"\"\"Initialize a new SkillInterface object.\n        \"\"\"\n        SimProcessManager.__init__(self, tmp_dir, sim_config)\n\n    def format_parameter_value(self, param_config, precision):\n        # type: (Dict[str, Any], int) -> str\n        \"\"\"Format the given parameter value as a string.\n\n        To support both single value parameter and parameter sweeps, each parameter value is\n        represented as a string instead of simple floats.  This method will cast a parameter\n        configuration (which can either be a single value or a sweep) to a\n        simulator-specific string.\n\n        Parameters\n        ----------\n        param_config: Dict[str, Any]\n            a dictionary that describes this parameter value.\n\n            4 formats are supported.  This is best explained by example.\n\n            single value:\n            dict(type='single', value=1.0)\n\n            sweep a given list of values:\n            dict(type='list', values=[1.0, 2.0, 3.0])\n\n            linear sweep with inclusive start, inclusive stop, and step size:\n            dict(type='linstep', start=1.0, stop=3.0, step=1.0)\n\n            logarithmic sweep with given number of points per decade:\n            dict(type='decade', start=1.0, stop=10.0, num=10)\n\n        precision : int\n            the parameter value precision.\n\n        Returns\n        -------\n        param_str : str\n            a string representation of param_config\n        \"\"\"\n\n        fmt = '%.{}e'.format(precision)\n        swp_type = param_config['type']\n        if swp_type == 'single':\n            return fmt % param_config['value']\n        elif swp_type == 'list':\n            return ' '.join((fmt % val for val in param_config['values']))\n        elif swp_type == 'linstep':\n            syntax = '{From/To}Linear:%s:%s:%s{From/To}' % (fmt, fmt, fmt)\n            return syntax % (param_config['start'], param_config['step'], param_config['stop'])\n        elif swp_type == 'decade':\n            syntax = '{From/To}Decade:%s:%s:%s{From/To}' % (fmt, '%d', fmt)\n            return syntax % (param_config['start'], param_config['num'], param_config['stop'])\n        else:\n            raise Exception('Unsupported param_config: %s' % param_config)\n\n    def _get_ocean_info(self, save_dir, script_fname, log_fname):\n        \"\"\"Private helper function that launches ocean process.\"\"\"\n        # get the simulation command.\n        sim_kwargs = self.sim_config['kwargs']\n        ocn_cmd = sim_kwargs['command']\n        env = sim_kwargs.get('env', None)\n        cwd = sim_kwargs.get('cwd', None)\n        sim_cmd = [ocn_cmd, '-nograph', '-replay', script_fname, '-log', log_fname]\n\n        if cwd is None:\n            # set working directory to BAG_WORK_DIR if None\n            cwd = os.environ['BAG_WORK_DIR']\n\n        # create empty log file to make sure it exists.\n        return sim_cmd, log_fname, env, cwd, save_dir\n\n    def setup_sim_process(self, lib, cell, outputs, precision, sim_tag):\n        # type: (str, str, Dict[str, str], int, Optional[str]) -> ProcInfo\n\n        sim_tag = sim_tag or 'BagSim'\n        job_options = self.sim_config['job_options']\n        init_file = self.sim_config['init_file']\n        view = self.sim_config['view']\n        state = self.sim_config['state']\n\n        # format job options as skill list of string\n        job_opt_str = \"'( \"\n        for key, val in job_options.items():\n            job_opt_str += '\"%s\" \"%s\" ' % (key, val)\n        job_opt_str += \" )\"\n\n        # create temporary save directory and log/script names\n        save_dir = bag.io.make_temp_dir(prefix='%s_data' % sim_tag, parent_dir=self.tmp_dir)\n        log_fname = os.path.join(save_dir, 'ocn_output.log')\n        script_fname = os.path.join(save_dir, 'run.ocn')\n\n        # setup ocean simulation script\n        script = self.render_file_template('run_simulation.ocn',\n                                           dict(\n                                               lib=lib,\n                                               cell=cell,\n                                               view=view,\n                                               state=state,\n                                               init_file=init_file,\n                                               save_dir=save_dir,\n                                               precision=precision,\n                                               sim_tag=sim_tag,\n                                               outputs=outputs,\n                                               job_opt_str=job_opt_str,\n                                           ))\n        bag.io.write_file(script_fname, script)\n\n        return self._get_ocean_info(save_dir, script_fname, log_fname)\n\n    def setup_load_process(self, lib, cell, hist_name, outputs, precision):\n        # type: (str, str, str, Dict[str, str], int) -> ProcInfo\n\n        init_file = self.sim_config['init_file']\n        view = self.sim_config['view']\n\n        # create temporary save directory and log/script names\n        save_dir = bag.io.make_temp_dir(prefix='%s_data' % hist_name, parent_dir=self.tmp_dir)\n        log_fname = os.path.join(save_dir, 'ocn_output.log')\n        script_fname = os.path.join(save_dir, 'run.ocn')\n\n        # setup ocean load script\n        script = self.render_file_template('load_results.ocn',\n                                           dict(\n                                               lib=lib,\n                                               cell=cell,\n                                               view=view,\n                                               init_file=init_file,\n                                               save_dir=save_dir,\n                                               precision=precision,\n                                               hist_name=hist_name,\n                                               outputs=outputs,\n                                           ))\n        bag.io.write_file(script_fname, script)\n\n        # launch ocean\n        return self._get_ocean_info(save_dir, script_fname, log_fname)\n"
  },
  {
    "path": "bag/interface/server.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This class defines SkillOceanServer, a server that handles skill/ocean requests.\n\nThe SkillOceanServer listens for skill/ocean requests from bag.  Skill commands will\nbe forwarded to Virtuoso for execution, and Ocean simulation requests will be handled\nby starting an Ocean subprocess.  It also provides utility for bag to query simulation\nprogress and allows parallel simulation.\n\nClient-side communication:\n\nthe client will always send a request object, which is a python dictionary.\nThis script processes the request and sends the appropriate commands to\nVirtuoso.\n\nVirtuoso side communication:\n\nTo ensure this process receive all the data from Virtuoso properly, Virtuoso\nwill print a single line of integer indicating the number of bytes to read.\nThen, virtuoso will print out exactly that many bytes of data, followed by\na newline (to flush the standard input).  This script handles that protcol\nand will strip the newline before sending result back to client.\n\"\"\"\n\nimport traceback\nimport numpy as np\n\nfrom .. import io\n\n\ndef _object_to_skill_file_helper(py_obj, file_obj):\n    \"\"\"Recursive helper function for object_to_skill_file\n\n    Parameters\n    ----------\n    py_obj : any\n        the object to convert.\n    file_obj : file\n        the file object to write to.  Must be created with io\n        package so that encodings are handled correctly.\n    \"\"\"\n    # fix potential raw bytes\n    py_obj = io.fix_string(py_obj)\n    if isinstance(py_obj, str):\n        # string\n        file_obj.write(py_obj)\n    elif isinstance(py_obj, (float, np.floating)):\n        # prepend type flag\n        file_obj.write('#float {:f}'.format(py_obj))\n    elif isinstance(py_obj, bool):\n        bool_val = 1 if py_obj else 0\n        file_obj.write('#bool {:d}'.format(bool_val))\n    elif isinstance(py_obj, (int, np.integer)):\n        # prepend type flag\n        file_obj.write('#int {:d}'.format(py_obj))\n    elif isinstance(py_obj, list) or isinstance(py_obj, tuple):\n        # a list of other objects.\n        file_obj.write('#list\\n')\n        for val in py_obj:\n            _object_to_skill_file_helper(val, file_obj)\n            file_obj.write('\\n')\n        file_obj.write('#end')\n    elif isinstance(py_obj, dict):\n        # disembodied property lists\n        file_obj.write('#prop_list\\n')\n        for key, val in py_obj.items():\n            file_obj.write('{}\\n'.format(key))\n            _object_to_skill_file_helper(val, file_obj)\n            file_obj.write('\\n')\n        file_obj.write('#end')\n    else:\n        raise Exception('Unsupported python data type: %s' % type(py_obj))\n\n\ndef object_to_skill_file(py_obj, file_obj):\n    \"\"\"Write the given python object to a file readable by Skill.\n\n    Write a Python object to file that can be parsed into equivalent\n    skill object by Virtuoso.  Currently only strings, lists, and dictionaries\n    are supported.\n\n    Parameters\n    ----------\n    py_obj : any\n        the object to convert.\n    file_obj : file\n        the file object to write to.  Must be created with io\n        package so that encodings are handled correctly.\n    \"\"\"\n    _object_to_skill_file_helper(py_obj, file_obj)\n    file_obj.write('\\n')\n\n\nbag_proc_prompt = 'BAG_PROMPT>>> '\n\n\nclass SkillServer(object):\n    \"\"\"A server that handles skill commands.\n\n    This server is started and ran by virtuoso.  It listens for commands from bag\n    from a ZMQ socket, then pass the command to virtuoso.  It then gather the result\n    and send it back to bag.\n\n    Parameters\n    ----------\n    router : :class:`bag.interface.ZMQRouter`\n        the :class:`~bag.interface.ZMQRouter` object used for socket communication.\n    virt_in : file\n        the virtuoso input file.  Must be created with io\n        package so that encodings are handled correctly.\n    virt_out : file\n        the virtuoso output file.  Must be created with io\n        package so that encodings are handled correctly.\n    tmpdir : str or None\n        if given, will save all temporary files to this folder.\n    \"\"\"\n\n    def __init__(self, router, virt_in, virt_out, tmpdir=None):\n        \"\"\"Create a new SkillOceanServer instance.\n        \"\"\"\n        self.handler = router\n        self.virt_in = virt_in\n        self.virt_out = virt_out\n\n        # create a directory for all temporary files\n        self.dtmp = io.make_temp_dir('skillTmp', parent_dir=tmpdir)\n\n    def run(self):\n        \"\"\"Starts this server.\n        \"\"\"\n        while not self.handler.is_closed():\n            # check if socket received message\n            if self.handler.poll_for_read(5):\n                req = self.handler.recv_obj()\n                if isinstance(req, dict) and 'type' in req:\n                    if req['type'] == 'exit':\n                        self.close()\n                    elif req['type'] == 'skill':\n                        expr, out_file = self.process_skill_request(req)\n                        if expr is not None:\n                            # send expression to virtuoso\n                            self.send_skill(expr)\n                            msg = self.recv_skill()\n                            self.process_skill_result(msg, out_file)\n                    else:\n                        msg = '*Error* bag server error: bag request:\\n%s' % str(req)\n                        self.handler.send_obj(dict(type='error', data=msg))\n                else:\n                    msg = '*Error* bag server error: bag request:\\n%s' % str(req)\n                    self.handler.send_obj(dict(type='error', data=msg))\n\n    def send_skill(self, expr):\n        \"\"\"Sends expr to virtuoso for evaluation.\n\n        Parameters\n        ----------\n        expr : string\n            the skill expression.\n        \"\"\"\n        self.virt_in.write(expr)\n        self.virt_in.flush()\n\n    def recv_skill(self):\n        \"\"\"Receive response from virtuoso\"\"\"\n        num_bytes = int(self.virt_out.readline())\n        msg = self.virt_out.read(num_bytes)\n        if msg[-1] == '\\n':\n            msg = msg[:-1]\n        return msg\n\n    def close(self):\n        \"\"\"Close this server.\"\"\"\n        self.handler.close()\n\n    def process_skill_request(self, request):\n        \"\"\"Process the given skill request.\n\n        Based on the given request object, returns the skill expression\n        to be evaluated by Virtuoso.  This method creates temporary\n        files for long input arguments and long output.\n\n        Parameters\n        ----------\n        request : dict\n            the request object.\n\n        Returns\n        -------\n        expr : str or None\n            expression to be evaluated by Virtuoso.  If None, an error occurred and\n            nothing needs to be evaluated\n        out_file : str or None\n            if not None, the result will be written to this file.\n        \"\"\"\n        try:\n            expr = request['expr']\n            input_files = request['input_files'] or {}\n            out_file = request['out_file']\n        except KeyError as e:\n            msg = '*Error* bag server error: %s' % str(e)\n            self.handler.send_obj(dict(type='error', data=msg))\n            return None, None\n\n        fname_dict = {}\n        # write input parameters to files\n        for key, val in input_files.items():\n            with io.open_temp(prefix=key, delete=False, dir=self.dtmp) as file_obj:\n                fname_dict[key] = '\"%s\"' % file_obj.name\n                # noinspection PyBroadException\n                try:\n                    object_to_skill_file(val, file_obj)\n                except Exception:\n                    stack_trace = traceback.format_exc()\n                    msg = '*Error* bag server error: \\n%s' % stack_trace\n                    self.handler.send_obj(dict(type='error', data=msg))\n                    return None, None\n\n        # generate output file\n        if out_file:\n            with io.open_temp(prefix=out_file, delete=False, dir=self.dtmp) as file_obj:\n                fname_dict[out_file] = '\"%s\"' % file_obj.name\n                out_file = file_obj.name\n\n        # fill in parameters to expression\n        expr = expr.format(**fname_dict)\n        return expr, out_file\n\n    def process_skill_result(self, msg, out_file=None):\n        \"\"\"Process the given skill output, then send result to socket.\n\n        Parameters\n        ----------\n        msg : str\n            skill expression evaluation output.\n        out_file : str or None\n            if not None, read result from this file.\n        \"\"\"\n        # read file if needed, and only if there are no errors.\n        if msg.startswith('*Error*'):\n            # an error occurred, forward error message directly\n            self.handler.send_obj(dict(type='error', data=msg))\n        elif out_file:\n            # read result from file.\n            try:\n                msg = io.read_file(out_file)\n                data = dict(type='str', data=msg)\n            except IOError:\n                stack_trace = traceback.format_exc()\n                msg = '*Error* error reading file:\\n%s' % stack_trace\n                data = dict(type='error', data=msg)\n            self.handler.send_obj(data)\n        else:\n            # return output from virtuoso directly\n            self.handler.send_obj(dict(type='str', data=msg))\n"
  },
  {
    "path": "bag/interface/simulator.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module handles high level simulation routines.\n\nThis module defines SimAccess, which provides methods to run simulations\nand retrieve results.\n\"\"\"\n\nfrom typing import Dict, Optional, Sequence, Any, Tuple, Union\n\nimport abc\n\nfrom ..io import make_temp_dir\nfrom ..concurrent.core import SubProcessManager\nfrom .base import InterfaceBase\n\n\nclass SimAccess(InterfaceBase, abc.ABC):\n    \"\"\"A class that interacts with a simulator.\n\n    Parameters\n    ----------\n    tmp_dir : str\n        temporary file directory for SimAccess.\n    sim_config : Dict[str, Any]\n        the simulation configuration dictionary.\n    \"\"\"\n\n    def __init__(self, tmp_dir, sim_config):\n        # type: (str, Dict[str, Any]) -> None\n        InterfaceBase.__init__(self)\n\n        self.sim_config = sim_config\n        self.tmp_dir = make_temp_dir('simTmp', parent_dir=tmp_dir)\n\n    @abc.abstractmethod\n    def format_parameter_value(self, param_config, precision):\n        # type: (Dict[str, Any], int) -> str\n        \"\"\"Format the given parameter value as a string.\n\n        To support both single value parameter and parameter sweeps, each parameter value is represented\n        as a string instead of simple floats.  This method will cast a parameter configuration (which can\n        either be a single value or a sweep) to a simulator-specific string.\n\n        Parameters\n        ----------\n        param_config: Dict[str, Any]\n            a dictionary that describes this parameter value.\n\n            4 formats are supported.  This is best explained by example.\n\n            single value:\n            dict(type='single', value=1.0)\n\n            sweep a given list of values:\n            dict(type='list', values=[1.0, 2.0, 3.0])\n\n            linear sweep with inclusive start, inclusive stop, and step size:\n            dict(type='linstep', start=1.0, stop=3.0, step=1.0)\n\n            logarithmic sweep with given number of points per decade:\n            dict(type='decade', start=1.0, stop=10.0, num=10)\n\n        precision : int\n            the parameter value precision.\n\n        Returns\n        -------\n        param_str : str\n            a string representation of param_config\n        \"\"\"\n        return \"\"\n\n    @abc.abstractmethod\n    async def async_run_simulation(self, tb_lib, tb_cell, outputs, precision=6, sim_tag=None):\n        # type: (str, str, Dict[str, str], int, Optional[str]) -> str\n        \"\"\"A coroutine for simulation a testbench.\n\n        Parameters\n        ----------\n        tb_lib : str\n            testbench library name.\n        tb_cell : str\n            testbench cell name.\n        outputs : Dict[str, str]\n            the variable-to-expression dictionary.\n        precision : int\n            precision of floating point results.\n        sim_tag : Optional[str]\n            a descriptive tag describing this simulation run.\n\n        Returns\n        -------\n        value : str\n            the save directory path.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    async def async_load_results(self, lib, cell, hist_name, outputs, precision=6):\n        # type: (str, str, str, Dict[str, str], int) -> str\n        \"\"\"A coroutine for loading simulation results.\n\n        Parameters\n        ----------\n        lib : str\n            testbench library name.\n        cell : str\n            testbench cell name.\n        hist_name : str\n            simulation history name.\n        outputs : Dict[str, str]\n            the variable-to-expression dictionary.\n        precision : int\n            precision of floating point results.\n\n        Returns\n        -------\n        value : str\n            the save directory path.\n        \"\"\"\n        pass\n\n\nProcInfo = Tuple[Union[str, Sequence[str]], str, Optional[Dict[str, str]], Optional[str], str]\n\n\nclass SimProcessManager(SimAccess, metaclass=abc.ABCMeta):\n    \"\"\"An implementation of :class:`SimAccess` using :class:`SubProcessManager`.\n\n    Parameters\n    ----------\n    tmp_dir : str\n        temporary file directory for SimAccess.\n    sim_config : Dict[str, Any]\n        the simulation configuration dictionary.\n    \"\"\"\n\n    def __init__(self, tmp_dir, sim_config):\n        # type: (str, Dict[str, Any]) -> None\n        SimAccess.__init__(self, tmp_dir, sim_config)\n        cancel_timeout = sim_config.get('cancel_timeout_ms', None)\n        if cancel_timeout is not None:\n            cancel_timeout /= 1e3\n        self._manager = SubProcessManager(max_workers=sim_config.get('max_workers', None),\n                                          cancel_timeout=cancel_timeout)\n\n    @abc.abstractmethod\n    def setup_sim_process(self, lib, cell, outputs, precision, sim_tag):\n        # type: (str, str, Dict[str, str], int, Optional[str]) -> ProcInfo\n        \"\"\"This method performs any setup necessary to configure a simulation process.\n\n        Parameters\n        ----------\n        lib : str\n            testbench library name.\n        cell : str\n            testbench cell name.\n        outputs : Dict[str, str]\n            the variable-to-expression dictionary.\n        precision : int\n            precision of floating point results.\n        sim_tag : Optional[str]\n            a descriptive tag describing this simulation run.\n\n        Returns\n        -------\n        args : Union[str, Sequence[str]]\n            command to run, as string or list of string arguments.\n        log : str\n            log file name.\n        env : Optional[Dict[str, str]]\n            environment variable dictionary.  None to inherit from parent.\n        cwd : Optional[str]\n            working directory path.  None to inherit from parent.\n        save_dir : str\n            save directory path.\n        \"\"\"\n        return '', '', None, None, ''\n\n    @abc.abstractmethod\n    def setup_load_process(self, lib, cell, hist_name, outputs, precision):\n        # type: (str, str, str, Dict[str, str], int) -> ProcInfo\n        \"\"\"This method performs any setup necessary to configure a result loading process.\n\n        Parameters\n        ----------\n        lib : str\n            testbench library name.\n        cell : str\n            testbench cell name.\n        hist_name : str\n            simulation history name.\n        outputs : Dict[str, str]\n            the variable-to-expression dictionary.\n        precision : int\n            precision of floating point results.\n\n        Returns\n        -------\n        args : Union[str, Sequence[str]]\n            command to run, as string or list of string arguments.\n        log : str\n            log file name.\n        env : Optional[Dict[str, str]]\n            environment variable dictionary.  None to inherit from parent.\n        cwd : Optional[str]\n            working directory path.  None to inherit from parent.\n        save_dir : str\n            save directory path.\n        \"\"\"\n        return '', '', None, None, ''\n\n    async def async_run_simulation(self, tb_lib: str, tb_cell: str,\n                                   outputs: Dict[str, str],\n                                   precision: int = 6,\n                                   sim_tag: Optional[str] = None) -> str:\n        args, log, env, cwd, save_dir = self.setup_sim_process(tb_lib, tb_cell, outputs, precision,\n                                                               sim_tag)\n\n        await self._manager.async_new_subprocess(args, log, env=env, cwd=cwd)\n        return save_dir\n\n    async def async_load_results(self, lib: str, cell: str, hist_name: str,\n                                 outputs: Dict[str, str],\n                                 precision: int = 6) -> str:\n        args, log, env, cwd, save_dir = self.setup_load_process(lib, cell, hist_name, outputs,\n                                                                precision)\n\n        await self._manager.async_new_subprocess(args, log, env=env, cwd=cwd)\n        return save_dir\n"
  },
  {
    "path": "bag/interface/skill.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module implements all CAD database manipulations using skill commands.\n\"\"\"\n\nfrom typing import List, Dict, Optional, Any, Tuple\n\nimport os\nimport shutil\nimport yaml\n\nfrom ..io.common import get_encoding, fix_string\nfrom ..io.file import open_temp\nfrom .database import DbAccess\n\ntry:\n    import cybagoa\nexcept ImportError:\n    cybagoa = None\n\n\ndef _dict_to_pcell_params(table):\n    \"\"\"Convert given parameter dictionary to pcell parameter list format.\n\n    Parameters\n    ----------\n    table : dict[str, any]\n        the parameter dictionary.\n\n    Returns\n    -------\n    param_list : list[any]\n        the Pcell parameter list\n    \"\"\"\n    param_list = []\n    for key, val in table.items():\n        # python 2/3 compatibility: convert raw bytes to string.\n        val = fix_string(val)\n        if isinstance(val, float):\n            param_list.append([key, \"float\", val])\n        elif isinstance(val, str):\n            # unicode string\n            param_list.append([key, \"string\", val])\n        elif isinstance(val, int):\n            param_list.append([key, \"int\", val])\n        elif isinstance(val, bool):\n            param_list.append([key, \"bool\", val])\n        else:\n            raise Exception('Unsupported parameter %s with type: %s' % (key, type(val)))\n\n    return param_list\n\n\ndef to_skill_list_str(pylist):\n    \"\"\"Convert given python list to a skill list string.\n\n    Parameters\n    ----------\n    pylist : list[str]\n        a list of string.\n\n    Returns\n    -------\n    ans : str\n        a string representation of the equivalent skill list.\n\n    \"\"\"\n    content = ' '.join(('\"%s\"' % val for val in pylist))\n    return \"'( %s )\" % content\n\n\ndef _handle_reply(reply):\n    \"\"\"Process the given reply.\"\"\"\n    if isinstance(reply, dict):\n        if reply.get('type') == 'error':\n            if 'data' not in reply:\n                raise Exception('Unknown reply format: %s' % reply)\n            raise VirtuosoException(reply['data'])\n        else:\n            try:\n                return reply['data']\n            except Exception:\n                raise Exception('Unknown reply format: %s' % reply)\n    else:\n        raise Exception('Unknown reply format: %s' % reply)\n\n\nclass VirtuosoException(Exception):\n    \"\"\"Exception raised when Virtuoso returns an error.\"\"\"\n\n    def __init__(self, *args, **kwargs):\n        # noinspection PyArgumentList\n        Exception.__init__(self, *args, **kwargs)\n\n\nclass SkillInterface(DbAccess):\n    \"\"\"Skill interface between bag and Virtuoso.\n\n    This class sends all bag's database and simulation operations to\n    an external Virtuoso process, then get the result from it.\n\n    Parameters\n    ----------\n    dealer : :class:`bag.interface.ZMQDealer`\n        the socket used to communicate with :class:`~bag.interface.SkillOceanServer`.\n    tmp_dir : string\n        temporary file directory for DbAccess.\n    db_config : dict[str, any]\n        the database configuration dictionary.\n    \"\"\"\n\n    def __init__(self, dealer, tmp_dir, db_config):\n        \"\"\"Initialize a new SkillInterface object.\n        \"\"\"\n        DbAccess.__init__(self, tmp_dir, db_config)\n        self.handler = dealer\n        self._rcx_jobs = {}\n\n    def close(self):\n        \"\"\"Terminate the database server gracefully.\n        \"\"\"\n        self.handler.send_obj(dict(type='exit'))\n        self.handler.close()\n\n    def _eval_skill(self, expr, input_files=None, out_file=None):\n        # type: (str, Optional[Dict[str, Any]], Optional[str]) -> str\n        \"\"\"Send a request to evaluate the given skill expression.\n\n        Because Virtuoso has a limit on the input/output data (< 4096 bytes),\n        if your input is large, you need to write it to a file and have\n        Virtuoso open the file to parse it.  Similarly, if you expect a\n        large output, you need to make Virtuoso write the result to the\n        file, then read it yourself.  The parameters input_files and\n        out_file help you achieve this functionality.\n\n        For example, if you need to evaluate \"skill_fun(arg fname)\", where\n        arg is a file containing the list [1 2 3], and fname is the output\n        file name, you will call this function with:\n\n        expr = \"skill_fun({arg} {fname})\"\n        input_files = { \"arg\": [1 2 3] }\n        out_file = \"fname\"\n\n        the bag server will then a temporary file for arg and fname, write\n        the list [1 2 3] into the file for arg, call Virtuoso, then read\n        the output file fname and return the result.\n\n        Parameters\n        ----------\n        expr : string\n            the skill expression to evaluate.\n        input_files : dict[string, any] or None\n            A dictionary of input files content.\n        out_file : string or None\n            the output file name argument in expr.\n\n        Returns\n        -------\n        result : str\n            a string representation of the result.\n\n        Raises\n        ------\n        :class: `.VirtuosoException` :\n            if virtuoso encounters errors while evaluating the expression.\n        \"\"\"\n        request = dict(\n            type='skill',\n            expr=expr,\n            input_files=input_files,\n            out_file=out_file,\n        )\n\n        self.handler.send_obj(request)\n        reply = self.handler.recv_obj()\n        return _handle_reply(reply)\n\n    def parse_schematic_template(self, lib_name, cell_name):\n        \"\"\"Parse the given schematic template.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the library.\n        cell_name : str\n            name of the cell.\n\n        Returns\n        -------\n        template : str\n            the content of the netlist structure file.\n        \"\"\"\n        cmd = 'parse_cad_sch( \"%s\" \"%s\" {netlist_info} )' % (lib_name, cell_name)\n        return self._eval_skill(cmd, out_file='netlist_info')\n\n    def get_cells_in_library(self, lib_name):\n        \"\"\"Get a list of cells in the given library.\n\n        Returns an empty list if the given library does not exist.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n\n        Returns\n        -------\n        cell_list : list[str]\n            a list of cells in the library\n        \"\"\"\n        cmd = 'get_cells_in_library_file( \"%s\" {cell_file} )' % lib_name\n        return self._eval_skill(cmd, out_file='cell_file').split()\n\n    def create_library(self, lib_name, lib_path=''):\n        \"\"\"Create a new library if one does not exist yet.\n\n        Parameters\n        ----------\n        lib_name : string\n            the library name.\n        lib_path : string\n            directory to create the library in.  If Empty, use default location.\n        \"\"\"\n        lib_path = lib_path or self.default_lib_path\n        tech_lib = self.db_config['schematic']['tech_lib']\n        return self._eval_skill('create_or_erase_library('\n                                '\"{}\" \"{}\" \"{}\" nil)'.format(lib_name, tech_lib, lib_path))\n\n    def create_implementation(self, lib_name, template_list, change_list, lib_path=''):\n        \"\"\"Create implementation of a design in the CAD database.\n\n        Parameters\n        ----------\n        lib_name : str\n            implementation library name.\n        template_list : list\n            a list of schematic templates to copy to the new library.\n        change_list :\n            a list of changes to be performed on each copied templates.\n        lib_path : str\n            directory to create the library in.  If Empty, use default location.\n        \"\"\"\n        lib_path = lib_path or self.default_lib_path\n        tech_lib = self.db_config['schematic']['tech_lib']\n\n        if cybagoa is not None and self.db_config['schematic'].get('use_cybagoa', False):\n            cds_lib_path = os.environ.get('CDS_LIB_PATH', './cds.lib')\n            sch_name = 'schematic'\n            sym_name = 'symbol'\n            encoding = get_encoding()\n            # release write locks\n            cell_view_list = []\n            for _, _, cell_name in template_list:\n                cell_view_list.append((cell_name, sch_name))\n                cell_view_list.append((cell_name, sym_name))\n            self.release_write_locks(lib_name, cell_view_list)\n\n            # create library in case it doesn't exist\n            self.create_library(lib_name, lib_path)\n\n            # write schematic\n            with cybagoa.PyOASchematicWriter(cds_lib_path, lib_name, encoding) as writer:\n                for temp_info, change_info in zip(template_list, change_list):\n                    sch_cell = cybagoa.PySchCell(temp_info[0], temp_info[1], temp_info[2], encoding)\n                    for old_pin, new_pin in change_info['pin_map']:\n                        sch_cell.rename_pin(old_pin, new_pin)\n                    for inst_name, rinst_list in change_info['inst_list']:\n                        sch_cell.add_inst(inst_name, lib_name, rinst_list)\n                    writer.add_sch_cell(sch_cell)\n                writer.create_schematics(sch_name, sym_name)\n\n            copy = 'nil'\n        else:\n            copy = \"'t\"\n\n        in_files = {'template_list': template_list,\n                    'change_list': change_list}\n        sympin = to_skill_list_str(self.db_config['schematic']['sympin'])\n        ipin = to_skill_list_str(self.db_config['schematic']['ipin'])\n        opin = to_skill_list_str(self.db_config['schematic']['opin'])\n        iopin = to_skill_list_str(self.db_config['schematic']['iopin'])\n        simulators = to_skill_list_str(self.db_config['schematic']['simulators'])\n        cmd = ('create_concrete_schematic( \"%s\" \"%s\" \"%s\" {template_list} '\n               '{change_list} %s %s %s %s %s %s)' % (lib_name, tech_lib, lib_path,\n                                                     sympin, ipin, opin, iopin, simulators, copy))\n\n        return self._eval_skill(cmd, input_files=in_files)\n\n    def configure_testbench(self, tb_lib, tb_cell):\n        \"\"\"Update testbench state for the given testbench.\n\n        This method fill in process-specific information for the given testbench.\n\n        Parameters\n        ----------\n        tb_lib : str\n            testbench library name.\n        tb_cell : str\n            testbench cell name.\n\n        Returns\n        -------\n        cur_env : str\n            the current simulation environment.\n        envs : list[str]\n            a list of available simulation environments.\n        parameters : dict[str, str]\n            a list of testbench parameter values, represented as string.\n        \"\"\"\n\n        tb_config = self.db_config['testbench']\n\n        cmd = ('instantiate_testbench(\"{tb_cell}\" \"{targ_lib}\" ' +\n               '\"{config_libs}\" \"{config_views}\" \"{config_stops}\" ' +\n               '\"{default_corner}\" \"{corner_file}\" {def_files} ' +\n               '\"{tech_lib}\" {result_file})')\n        cmd = cmd.format(tb_cell=tb_cell,\n                         targ_lib=tb_lib,\n                         config_libs=tb_config['config_libs'],\n                         config_views=tb_config['config_views'],\n                         config_stops=tb_config['config_stops'],\n                         default_corner=tb_config['default_env'],\n                         corner_file=tb_config['env_file'],\n                         def_files=to_skill_list_str(tb_config['def_files']),\n                         tech_lib=self.db_config['schematic']['tech_lib'],\n                         result_file='{result_file}')\n        output = yaml.load(self._eval_skill(cmd, out_file='result_file'), Loader=yaml.Loader)\n        return tb_config['default_env'], output['corners'], output['parameters'], output['outputs']\n\n    def get_testbench_info(self, tb_lib, tb_cell):\n        \"\"\"Returns information about an existing testbench.\n\n        Parameters\n        ----------\n        tb_lib : str\n            testbench library.\n        tb_cell : str\n            testbench cell.\n\n        Returns\n        -------\n        cur_envs : list[str]\n            the current simulation environments.\n        envs : list[str]\n            a list of available simulation environments.\n        parameters : dict[str, str]\n            a list of testbench parameter values, represented as string.\n        outputs : dict[str, str]\n            a list of testbench output expressions.\n        \"\"\"\n        cmd = 'get_testbench_info(\"{tb_lib}\" \"{tb_cell}\" {result_file})'\n        cmd = cmd.format(tb_lib=tb_lib,\n                         tb_cell=tb_cell,\n                         result_file='{result_file}')\n        output = yaml.load(self._eval_skill(cmd, out_file='result_file'), Loader=yaml.Loader)\n        return output['enabled_corners'], output['corners'], output['parameters'], output['outputs']\n\n    def update_testbench(self,\n                         lib,  # type: str\n                         cell,  # type: str\n                         parameters,  # type: Dict[str, str]\n                         sim_envs,  # type: List[str]\n                         config_rules,  # type: List[List[str]]\n                         env_parameters  # type: List[List[Tuple[str, str]]]\n                         ):\n        # type: (...) -> None\n        \"\"\"Update the given testbench configuration.\n\n        Parameters\n        ----------\n        lib : str\n            testbench library.\n        cell : str\n            testbench cell.\n        parameters : Dict[str, str]\n            testbench parameters.\n        sim_envs : List[str]\n            list of enabled simulation environments.\n        config_rules : List[List[str]]\n            config view mapping rules, list of (lib, cell, view) rules.\n        env_parameters : List[List[Tuple[str, str]]]\n            list of param/value list for each simulation environment.\n        \"\"\"\n\n        cmd = ('modify_testbench(\"%s\" \"%s\" {conf_rules} {run_opts} '\n               '{sim_envs} {params} {env_params})' % (lib, cell))\n        in_files = {'conf_rules': config_rules,\n                    'run_opts': [],\n                    'sim_envs': sim_envs,\n                    'params': list(parameters.items()),\n                    'env_params': list(zip(sim_envs, env_parameters)),\n                    }\n        self._eval_skill(cmd, input_files=in_files)\n\n    def instantiate_layout_pcell(self, lib_name, cell_name, view_name,\n                                 inst_lib, inst_cell, params, pin_mapping):\n        \"\"\"Create a layout cell with a single pcell instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            layout library name.\n        cell_name : str\n            layout cell name.\n        view_name : str\n            layout view name, default is \"layout\".\n        inst_lib : str\n            pcell library name.\n        inst_cell : str\n            pcell cell name.\n        params : dict[str, any]\n            the parameter dictionary.\n        pin_mapping: dict[str, str]\n            the pin mapping dictionary.\n        \"\"\"\n        # create library in case it doesn't exist\n        self.create_library(lib_name)\n\n        # convert parameter dictionary to pcell params list format\n        param_list = _dict_to_pcell_params(params)\n\n        cmd = ('create_layout_with_pcell( \"%s\" \"%s\" \"%s\" \"%s\" \"%s\"'\n               '{params} {pin_mapping} )' % (lib_name, cell_name,\n                                             view_name, inst_lib, inst_cell))\n        in_files = {'params': param_list, 'pin_mapping': list(pin_mapping.items())}\n        return self._eval_skill(cmd, input_files=in_files)\n\n    def instantiate_layout(self, lib_name, view_name, via_tech, layout_list):\n        \"\"\"Create a batch of layouts.\n\n        Parameters\n        ----------\n        lib_name : str\n            layout library name.\n        view_name : str\n            layout view name.\n        via_tech : str\n            via technology library name.\n        layout_list : list[any]\n            a list of layouts to create\n        \"\"\"\n        # create library in case it doesn't exist\n        self.create_library(lib_name)\n\n        # convert parameter dictionary to pcell params list format\n        new_layout_list = []\n        for info_list in layout_list:\n            new_inst_list = []\n            for inst in info_list[1]:\n                if 'params' in inst:\n                    inst = inst.copy()\n                    inst['params'] = _dict_to_pcell_params(inst['params'])\n                if 'master_key' in inst:\n                    # SKILL inteface cannot handle master_key info, so we remove it\n                    # from InstanceInfo if we find it\n                    inst.pop('master_key')\n                new_inst_list.append(inst)\n\n            new_info_list = info_list[:]\n            new_info_list[1] = new_inst_list\n            new_layout_list.append(new_info_list)\n\n        cmd = 'create_layout( \"%s\" \"%s\" \"%s\" {layout_list} )' % (lib_name, view_name, via_tech)\n        in_files = {'layout_list': new_layout_list}\n        return self._eval_skill(cmd, input_files=in_files)\n\n    def release_write_locks(self, lib_name, cell_view_list):\n        \"\"\"Release write locks from all the given cells.\n\n        Parameters\n        ----------\n        lib_name : string\n            the library name.\n        cell_view_list : List[(string, string)]\n            list of cell/view name tuples.\n        \"\"\"\n        cmd = 'release_write_locks( \"%s\" {cell_view_list} )' % lib_name\n        in_files = {'cell_view_list': cell_view_list}\n        return self._eval_skill(cmd, input_files=in_files)\n\n    def create_schematic_from_netlist(self, netlist, lib_name, cell_name,\n                                      sch_view=None, **kwargs):\n        # type: (str, str, str, Optional[str], **Any) -> None\n        \"\"\"Create a schematic from a netlist.\n\n        This is mainly used to create extracted schematic from an extracted netlist.\n\n        Parameters\n        ----------\n        netlist : str\n            the netlist file name.\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n        sch_view : Optional[str]\n            schematic view name.  The default value is implemendation dependent.\n        **kwargs : Any\n            additional implementation-dependent arguments.\n        \"\"\"\n        calview_config = self.db_config.get('calibreview', None)\n        use_calibreview = self.db_config.get('use_calibreview', True)\n        if calview_config is not None and use_calibreview:\n            # create calibre view from extraction netlist\n            cell_map = calview_config['cell_map']\n            sch_view = sch_view or calview_config['view_name']\n\n            # create calibre view config file\n            tmp_params = dict(\n                netlist_file=netlist,\n                lib_name=lib_name,\n                cell_name=cell_name,\n                calibre_cellmap=cell_map,\n                view_name=sch_view,\n            )\n            content = self.render_file_template('calibreview_setup.txt', tmp_params)\n            with open_temp(prefix='calview', dir=self.tmp_dir, delete=False) as f:\n                fname = f.name\n                f.write(content)\n\n            # delete old calibre view\n            cmd = f'delete_cellview( \"{lib_name}\" \"{cell_name}\" \"{sch_view}\" )'\n            self._eval_skill(cmd)\n            # make extracted schematic\n            calibre_root_version = os.path.basename(os.environ['MGC_HOME']).split('.')[0]\n            calibre_year = int(calibre_root_version[-4:])\n            if calibre_year > 2011:\n                cmd = f'mgc_rve_load_setup_file( \"{fname}\" )'\n            else:\n                cmd0 = f'mgc_eview_globals->outputLibrary = \"{lib_name}\"'\n                self._eval_skill(cmd0)\n                cmd0 = f'mgc_eview_globals->schematicLibrary = \"{lib_name}\"'\n                self._eval_skill(cmd0)\n                cmd0 = f'mgc_eview_globals->cellMapFile = \"{cell_map}\"'\n                self._eval_skill(cmd0)\n                cmd0 = 'mgc_eview_globals->createUnmatchedTerminals = t'\n                self._eval_skill(cmd0)\n                # cmd0 = 'mgc_eview_globals->preserveDeviceCase = t'\n                # self._eval_skill(cmd0)\n                cmd0 = 'mgc_eview_globals->devicePlacementArrayed = t'\n                self._eval_skill(cmd0)\n                cmd0 = 'mgc_eview_globals->showCalviewDlg = nil'\n                self._eval_skill(cmd0)\n                cmd = f'mgc_rve_create_cellview(\"{netlist}\")'\n            self._eval_skill(cmd)\n        else:\n            # get netlists to copy\n            netlist_dir = os.path.dirname(netlist)\n            netlist_files = self.checker.get_rcx_netlists(lib_name, cell_name)\n            if not netlist_files:\n                # some error checking.  Shouldn't be needed but just in case\n                raise ValueError('RCX did not generate any netlists')\n\n            # copy netlists to a \"netlist\" subfolder in the CAD database\n            cell_dir = self.get_cell_directory(lib_name, cell_name)\n            targ_dir = os.path.join(cell_dir, 'netlist')\n            os.makedirs(targ_dir, exist_ok=True)\n            for fname in netlist_files:\n                shutil.copy(os.path.join(netlist_dir, fname), targ_dir)\n\n            # create symbolic link as aliases\n            symlink = os.path.join(targ_dir, 'netlist')\n            try:\n                os.remove(symlink)\n            except FileNotFoundError:\n                pass\n            os.symlink(netlist_files[0], symlink)\n\n    def get_cell_directory(self, lib_name, cell_name):\n        # type: (str, str) -> str\n        \"\"\"Returns the directory name of the given cell.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n\n        Returns\n        -------\n        cell_dir : str\n            path to the cell directory.\n        \"\"\"\n        # use yaml.load to remove outermost quotation marks\n        lib_dir = yaml.load(self._eval_skill(f'get_lib_directory( \"{lib_name}\" )'), Loader=yaml.Loader)\n        if not lib_dir:\n            raise ValueError('Library %s not found.' % lib_name)\n        return os.path.join(lib_dir, cell_name)\n\n    def create_verilog_view(self, verilog_file, lib_name, cell_name, **kwargs):\n        # type: (str, str, str, **Any) -> None\n        \"\"\"Create a verilog view for mix-signal simulation.\n\n        Parameters\n        ----------\n        verilog_file : str\n            the verilog file name.\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        **kwargs : Any\n            additional implementation-dependent arguments.\n        \"\"\"\n        # delete old verilog view\n        cmd = 'delete_cellview( \"%s\" \"%s\" \"verilog\" )' % (lib_name, cell_name)\n        self._eval_skill(cmd)\n        cmd = 'schInstallHDL(\"%s\" \"%s\" \"verilog\" \"%s\" t)' % (lib_name, cell_name, verilog_file)\n        self._eval_skill(cmd)\n"
  },
  {
    "path": "bag/interface/templates/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/interface/templates/Module.pyi",
    "content": "# -*- coding: utf-8 -*-\n\nfrom typing import Dict\n\nimport os\nimport pkg_resources\n\nfrom bag.design.module import Module\n\n\n# noinspection PyPep8Naming\nclass {{ lib_name }}__{{ cell_name }}(Module):\n    \"\"\"Module for library {{ lib_name }} cell {{ cell_name }}.\n\n    Fill in high level description here.\n    \"\"\"\n    yaml_file = pkg_resources.resource_filename(__name__,\n                                                os.path.join('netlist_info',\n                                                             '{{ cell_name }}.yaml'))\n\n\n    def __init__(self, database, parent=None, prj=None, **kwargs):\n        Module.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Dict[str, str]\n        \"\"\"Returns a dictionary from parameter names to descriptions.\n\n        Returns\n        -------\n        param_info : Optional[Dict[str, str]]\n            dictionary from parameter names to descriptions.\n        \"\"\"\n        return dict(\n        )\n\n    def design(self):\n        \"\"\"To be overridden by subclasses to design this module.\n\n        This method should fill in values for all parameters in\n        self.parameters.  To design instances of this module, you can\n        call their design() method or any other ways you coded.\n\n        To modify schematic structure, call:\n\n        rename_pin()\n        delete_instance()\n        replace_instance_master()\n        reconnect_instance_terminal()\n        restore_instance()\n        array_instance()\n        \"\"\"\n        pass\n"
  },
  {
    "path": "bag/interface/templates/PrimModule.pyi",
    "content": "# -*- coding: utf-8 -*-\n\nimport os\nimport pkg_resources\n\nfrom bag.design.module import {{ module_name }}\n\n\n# noinspection PyPep8Naming\nclass {{ lib_name }}__{{ cell_name }}({{ module_name }}):\n    \"\"\"design module for {{ lib_name }}__{{ cell_name }}.\n    \"\"\"\n\n    yaml_file = pkg_resources.resource_filename(__name__,\n                                                os.path.join('netlist_info',\n                                                             '{{ cell_name }}.yaml'))\n\n    def __init__(self, database, parent=None, prj=None, **kwargs):\n        {{ module_name }}.__init__(self, database, self.yaml_file, parent=parent, prj=prj, **kwargs)\n"
  },
  {
    "path": "bag/interface/templates/calibreview_setup.txt",
    "content": "calibre_view_netlist_file : {{ netlist_file }}\noutput_library : {{ lib_name }}\nschematic_library : {{ lib_name }}\ncell_name : {{ cell_name }}\ncellmap_file : {{ calibre_cellmap }}\ncalibreview_log_file : ./calview.log\ncalibreview_name : {{ view_name }}\ncalibreview_type : schematic\ncreate_terminals : all\npreserve_device_case : on\nexecute_callbacks : off\nreset_properties : (m=1)\nmagnify_devices_by : 1\nmagnify_parasitics_by : 1\ndevice_placement : arrayed\nparasitic_placement : arrayed\nshow_parasitic_polygons : off\nopen_calibreview : don't_open\ngenerate_spectre_netlist : off\n"
  },
  {
    "path": "bag/interface/templates/load_results.ocn",
    "content": "lib = \"{{ lib }}\"\ncell = \"{{ cell }}\"\nview = \"{{ view }}\"\ninit_file = \"{{ init_file }}\"\nsave_dir = \"{{ save_dir }}\"\nprecision = {{ precision }}\nhist_name = \"{{ hist_name }}\"\n\n; initialize environment variables\nwhen( strlen(init_file) > 0\n    load(init_file)\n)\n\n; save parametric waveform values as a flattened list.\nprocedure( save_param_wave_values(wave fmt line_fmt fhandle)\n    let( (vec wave_cls tmp_val)\n        if( drIsWaveform(wave) then\n            ; 1D waveform, simply print all values\n            vec = drGetWaveformYVec(wave)\n            wave_cls = className(classOf(drGetElem(vec 0)))\n            if( wave_cls == 'adtComplex then\n                ; print complex\n                for( i 0 drVectorLength(vec) - 1\n                    tmp_val = drGetElem(vec i)\n                    if( imag(tmp_val) < 0 then\n                        ; fix for negative imaginary part.\n                        sprintf(line_fmt \"%s%sj\\n\" fmt fmt)\n                    else\n                        sprintf(line_fmt \"%s+%sj\\n\" fmt fmt)\n                    )\n                    fprintf(fhandle line_fmt real(tmp_val) imag(tmp_val))\n                )\n            else\n                ; print real value\n                for( i 0 drVectorLength(vec) - 1\n                    fprintf(fhandle line_fmt drGetElem(vec i))\n                )\n            )\n        else\n            ; parametric waveform, recurse\n            foreach(val sweepValues(wave)\n                save_param_wave_values(famValue(wave val) fmt line_fmt fhandle)\n            )\n        )\n    )\n)\n\n\n; define save functions\n; save a waveform to file.\n; the given waveform will be saved to the file \"<directory>/<var_name>.data\" as a flattened 1D array.\n; the sweep parameter names of this waveform will be saved to the file \"<directory>/<var_name>.sweep\",\n; and the values of each parameter will be saved to the file \"<directory>/<swp_var>.info\".\n; data_list_struct is a tconc struct of (waveform_name, waveform_data_file_handle) pairs.\nprocedure( save_waveform(directory var_name wave precision data_list_struct)\n    let( (fmt line_fmt wave_cls entry data_file sweep_file fhandle\n          name_list val_list sweep_df iter_wave)\n        sprintf(fmt \"%%.%de\" precision)\n        sprintf(line_fmt \"%s\\n\" fmt)\n        wave_cls = className(classOf(wave))\n\n        if( not( entry = assoc( var_name cdar(data_list_struct) ) ) then\n            ; first time saving this variable\n            sprintf(data_file \"%s/%s.data\" directory var_name)\n            sprintf(sweep_file \"%s/%s.sweep\" directory var_name)\n            cond(\n            ( or( drIsWaveform(wave) drIsParamWave(wave) )\n                ; save sweep names\n                fhandle = outfile( sweep_file \"w\" )\n                name_list = sweepNames(wave)\n                foreach(swp_name name_list\n                    fprintf(fhandle \"%s\\n\" swp_name)\n                )\n                close(fhandle)\n\n                ; save sweep values\n                iter_wave = wave\n                foreach(swp_name name_list\n                    ; save output most sweep values\n                    val_list = sweepValues(iter_wave)\n                    sprintf(sweep_df \"%s/%s.info\" directory swp_name)\n                    unless( isFile(sweep_df)\n                       fhandle = outfile( sweep_df \"w\" )\n                       foreach(val val_list\n                           fprintf(fhandle line_fmt val)\n                       )\n                       close(fhandle)\n                    )\n                    ; remove outer sweep\n                    when( drIsParamWave(iter_wave)\n                        iter_wave = famValue(iter_wave car(val_list))\n                    )\n                )\n\n                fhandle = outfile( data_file \"w\" )\n            )\n            ( or( wave_cls == 'flonum wave_cls == 'fixnum wave_cls == 'adtComplex )\n                ; scalar data, make empty sweep file\n                fhandle = outfile( sweep_file \"w\")\n                close(fhandle)\n                fhandle = outfile( data_file \"w\" )\n            )\n            ( t\n                ; unsupported type\n                error(\"Unsupported data for output %s: %A\\n\" var_name wave)\n            )\n            )\n            tconc( data_list_struct list(var_name fhandle) )\n        else\n            fhandle = cadr(entry)\n        )\n\n        ; append data to file\n        if( or( drIsWaveform(wave) drIsParamWave(wave) ) then\n            save_param_wave_values(wave fmt line_fmt fhandle)\n        else\n            ; print single point value\n            if( wave_cls == 'adtComplex then\n                ; print complex\n                if( imag(wave) < 0 then\n                    ; fix for negative imaginary part.\n                    sprintf(line_fmt \"%s%sj\\n\" fmt fmt)\n                else\n                    sprintf(line_fmt \"%s+%sj\\n\" fmt fmt)\n                )\n                fprintf(fhandle line_fmt real(wave) imag(wave))\n            else\n                fprintf(fhandle line_fmt wave)\n            )\n        )\n        't\n    )\n)\n\nocnSetXLMode()\nocnxlTargetCellView(lib cell view)\n\n; load result database\nrdb = axlReadHistoryResDB(hist_name)\nunless( rdb\n    error(\"Cannot find database associated with name %s\" hist_name)\n)\npoint_list = rdb->points()\n\nsprintf(sweep_fname \"%s/sweep.info\" save_dir)\nsweep_f = outfile( sweep_fname \"w\" )\n\n; write sweep parameters title\nwhen( point_list\n    point = car(point_list)\n    test_list = point->tests()\n    when( test_list\n        corner = car(test_list)->cornerName\n        par_names = setof( name point->params(?corner corner ?sortBy 'name)~>name\n                           and( (name != \"corModelSpec\") (name != \"temperature\") ) )\n\n        fprintf(sweep_f \"corner \")\n        fprintf(sweep_f \"%s\\n\" buildString( par_names \" \" ))\n    )\n)\n\n; iterate through each design point and save data.\ndata_list_struct = tconc(nil 0)\ntotal_points = length(point_list)\ncur_idx = 1\nforeach(point point_list\n    printf(\"*Info* saving process: %d/%d\\n\" cur_idx total_points)\n    cur_idx = cur_idx + 1\n    foreach(test point->tests()\n        ; write param values to file.\n        corner = test->cornerName\n        params = setof(par point->params(?corner corner ?sortBy 'name)\n                       and( (par->name != \"corModelSpec\") (par->name != \"temperature\") ) )\n        param_vals = mapcar( lambda( (par) par->valueAsString(?digits precision ?notation 'eng) ) params )\n        fprintf(sweep_f \"%s \" corner)\n        fprintf(sweep_f \"%s\\n\" buildString( param_vals \" \" ))\n\n        ; open results\n        openResults(test->resultsDir)\n\n        {% for var, expr in outputs.items() %}\n        tmp = {{ expr }}\n        save_waveform( save_dir \"{{ var }}\" tmp precision data_list_struct )\n        {% endfor %}\n\n    )\n)\n\n; close opened files\nclose(sweep_f)\nforeach( entry cdar(data_list_struct)\n    close(cadr(entry))\n)\n\nocnxlEndXLMode()\n\nexit()\n"
  },
  {
    "path": "bag/interface/templates/run_simulation.ocn",
    "content": "lib = \"{{ lib }}\"\ncell = \"{{ cell }}\"\nview = \"{{ view }}\"\nstate = \"{{ state }}\"\ninit_file = \"{{ init_file }}\"\nsave_dir = \"{{ save_dir }}\"\nprecision = {{ precision }}\nsim_tag = \"{{ sim_tag }}\"\njob_opt_list = {{ job_opt_str }}\n\n; initialize environment variables\nwhen( strlen(init_file) > 0\n    load(init_file)\n)\n\n; save parametric waveform values as a flattened list.\nprocedure( save_param_wave_values(wave fmt line_fmt fhandle)\n    let( (vec wave_cls tmp_val)\n        if( drIsWaveform(wave) then\n            ; 1D waveform, simply print all values\n            vec = drGetWaveformYVec(wave)\n            wave_cls = className(classOf(drGetElem(vec 0)))\n            if( wave_cls == 'adtComplex then\n                ; print complex\n                for( i 0 drVectorLength(vec) - 1\n                    tmp_val = drGetElem(vec i)\n                    if( imag(tmp_val) < 0 then\n                        ; fix for negative imaginary part.\n                        sprintf(line_fmt \"%s%sj\\n\" fmt fmt)\n                    else\n                        sprintf(line_fmt \"%s+%sj\\n\" fmt fmt)\n                    )\n                    fprintf(fhandle line_fmt real(tmp_val) imag(tmp_val))\n                )\n            else\n                ; print real value\n                for( i 0 drVectorLength(vec) - 1\n                    fprintf(fhandle line_fmt drGetElem(vec i))\n                )\n            )\n        else\n            ; parametric waveform, recurse\n            foreach(val sweepValues(wave)\n                save_param_wave_values(famValue(wave val) fmt line_fmt fhandle)\n            )\n        )\n    )\n)\n\n\n; define save functions\n; save a waveform to file.\n; the given waveform will be saved to the file \"<directory>/<var_name>.data\" as a flattened 1D array.\n; the sweep parameter names of this waveform will be saved to the file \"<directory>/<var_name>.sweep\",\n; and the values of each parameter will be saved to the file \"<directory>/<swp_var>.info\".\n; data_list_struct is a tconc struct of (waveform_name, waveform_data_file_handle) pairs.\nprocedure( save_waveform(directory var_name wave precision data_list_struct)\n    let( (fmt line_fmt wave_cls entry data_file sweep_file fhandle\n          name_list val_list sweep_df iter_wave)\n        sprintf(fmt \"%%.%de\" precision)\n        sprintf(line_fmt \"%s\\n\" fmt)\n        wave_cls = className(classOf(wave))\n\n        if( not( entry = assoc( var_name cdar(data_list_struct) ) ) then\n            ; first time saving this variable\n            sprintf(data_file \"%s/%s.data\" directory var_name)\n            sprintf(sweep_file \"%s/%s.sweep\" directory var_name)\n            cond(\n            ( or( drIsWaveform(wave) drIsParamWave(wave) )\n                ; save sweep names\n                fhandle = outfile( sweep_file \"w\" )\n                name_list = sweepNames(wave)\n                foreach(swp_name name_list\n                    fprintf(fhandle \"%s\\n\" swp_name)\n                )\n                close(fhandle)\n\n                ; save sweep values\n                iter_wave = wave\n                foreach(swp_name name_list\n                    ; save output most sweep values\n                    val_list = sweepValues(iter_wave)\n                    sprintf(sweep_df \"%s/%s.info\" directory swp_name)\n                    unless( isFile(sweep_df)\n                       fhandle = outfile( sweep_df \"w\" )\n                       foreach(val val_list\n                           fprintf(fhandle line_fmt val)\n                       )\n                       close(fhandle)\n                    )\n                    ; remove outer sweep\n                    when( drIsParamWave(iter_wave)\n                        iter_wave = famValue(iter_wave car(val_list))\n                    )\n                )\n\n                fhandle = outfile( data_file \"w\" )\n            )\n            ( or( wave_cls == 'flonum wave_cls == 'fixnum wave_cls == 'adtComplex )\n                ; scalar data, make empty sweep file\n                fhandle = outfile( sweep_file \"w\")\n                close(fhandle)\n                fhandle = outfile( data_file \"w\" )\n            )\n            ( t\n                ; unsupported type\n                error(\"Unsupported data for output %s: %A\\n\" var_name wave)\n            )\n            )\n            tconc( data_list_struct list(var_name fhandle) )\n        else\n            fhandle = cadr(entry)\n        )\n\n        ; append data to file\n        if( or( drIsWaveform(wave) drIsParamWave(wave) ) then\n            save_param_wave_values(wave fmt line_fmt fhandle)\n        else\n            ; print single point value\n            if( wave_cls == 'adtComplex then\n                ; print complex\n                if( imag(wave) < 0 then\n                    ; fix for negative imaginary part.\n                    sprintf(line_fmt \"%s%sj\\n\" fmt fmt)\n                else\n                    sprintf(line_fmt \"%s+%sj\\n\" fmt fmt)\n                )\n                fprintf(fhandle line_fmt real(wave) imag(wave))\n            else\n                fprintf(fhandle line_fmt wave)\n            )\n        )\n        't\n    )\n)\n\nocnSetXLMode()\nocnxlTargetCellView(lib cell view)\nocnxlLoadSetupState(state 'overwrite)\nocnxlHistoryPrefix(sim_tag)\nocnxlJobSetup(job_opt_list)\nprintf(\"*Info* Creating netlist...\\n\")\ncreateNetlist( ?recreateAll t ?display nil )\nprintf(\"*Info* Starting simulation...\\n\")\nocnxlRun(?mode 'sweepAndCorners ?nominalCornerEnabled nil ?allCornersEnabled 't\n         ?allSweepsEnabled 't)\n\n; load result database\nhist_name = ocnxlGetCurrentHistory()\nrdb = axlReadHistoryResDB(hist_name)\npoint_list = rdb->points()\n\nsprintf(sweep_fname \"%s/sweep.info\" save_dir)\nsweep_f = outfile( sweep_fname \"w\" )\n\n; write sweep parameters title\nwhen( point_list\n    point = car(point_list)\n    test_list = point->tests()\n    when( test_list\n        corner = car(test_list)->cornerName\n        par_names = setof( name point->params(?corner corner ?sortBy 'name)~>name\n                           and( (name != \"corModelSpec\") (name != \"temperature\") ) )\n\n        fprintf(sweep_f \"corner \")\n        fprintf(sweep_f \"%s\\n\" buildString( par_names \" \" ))\n    )\n)\n\n; iterate through each design point and save data.\ndata_list_struct = tconc(nil 0)\ntotal_points = length(point_list)\ncur_idx = 1\nforeach(point point_list\n    printf(\"*Info* saving process: %d/%d\\n\" cur_idx total_points)\n    cur_idx = cur_idx + 1\n    foreach(test point->tests()\n        ; write param values to file.\n        corner = test->cornerName\n        params = setof(par point->params(?corner corner ?sortBy 'name)\n                       and( (par->name != \"corModelSpec\") (par->name != \"temperature\") ) )\n        param_vals = mapcar( lambda( (par) par->valueAsString(?digits precision ?notation 'eng) ) params )\n        fprintf(sweep_f \"%s \" corner)\n        fprintf(sweep_f \"%s\\n\" buildString( param_vals \" \" ))\n\n        ; open results\n        openResults(test->resultsDir)\n\n        {% for var, expr in outputs.items() %}\n        tmp = {{ expr }}\n        save_waveform( save_dir \"{{ var }}\" tmp precision data_list_struct )\n        {% endfor %}\n\n    )\n)\n\n; close opened files\nclose(sweep_f)\nforeach( entry cdar(data_list_struct)\n    close(cadr(entry))\n)\n\nocnxlEndXLMode()\n\nexit()\n"
  },
  {
    "path": "bag/interface/zmqwrapper.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines various wrapper around ZMQ sockets.\"\"\"\n\nfrom datetime import datetime\nimport zlib\nimport pprint\nfrom pathlib import Path\nimport os\n\nimport yaml\nimport zmq\n\nfrom .. import io\n\n\nclass ZMQDealer(object):\n    \"\"\"A class that interacts with a ZMQ dealer socket.\n\n    a dealer socket is an asynchronous socket that can issue multiple requests\n    without needing to wait for an reply.  This class encapsulates the ZMQ\n    socket details and provide more convenient API to use.\n\n    Parameters\n    ----------\n    port : int\n        the port to connect to.\n    pipeline : int\n        number of messages allowed in a pipeline.  Only affects file\n        transfer performance.\n    host : str\n        the host to connect to.\n    log_file : str or None\n        the log file.  None to disable logging.\n    \"\"\"\n\n    def __init__(self, port, pipeline=100, host='localhost', log_file=None):\n        \"\"\"Create a new ZMQDealer object.\n        \"\"\"\n        context = zmq.Context.instance()\n        # noinspection PyUnresolvedReferences\n        self.socket = context.socket(zmq.DEALER)\n        self.socket.hwm = pipeline\n        self.socket.connect('tcp://%s:%d' % (host, port))\n        self._log_file = log_file\n        self.poller = zmq.Poller()\n        # noinspection PyUnresolvedReferences\n        self.poller.register(self.socket, zmq.POLLIN)\n\n        if self._log_file is not None:\n            self._log_file = Path(self._log_file).resolve()\n            # If log file directory does not exists, create it\n            log_dir: Path = self._log_file.parent\n            log_dir.mkdir(parents=True, exist_ok=True)\n            # time stamp the file\n            now = datetime.now()\n            time_stamp = now.strftime('%Y%m%d_%H%M%S%f')\n            ext = self._log_file.suffix\n            self._log_file = str(log_dir / f'{self._log_file.stem}_{time_stamp}{ext}')\n\n    def log_msg(self, msg):\n        \"\"\"Log the given message\"\"\"\n        if self._log_file is not None:\n            io.write_file(self._log_file, '%s\\n' % msg, append=True)\n\n    def log_obj(self, msg, obj):\n        \"\"\"Log the given object\"\"\"\n        if self._log_file is not None:\n            obj_str = pprint.pformat(obj)\n            io.write_file(self._log_file, '%s\\n%s\\n' % (msg, obj_str), append=True)\n\n    def close(self):\n        \"\"\"Close the underlying socket.\"\"\"\n        self.socket.close()\n\n    def send_obj(self, obj):\n        \"\"\"Sends a python object using pickle serialization and zlib compression.\n\n        Parameters\n        ----------\n        obj : any\n            the object to send.\n        \"\"\"\n        p = io.to_bytes(yaml.dump(obj))\n        z = zlib.compress(p)\n        self.log_obj('sending data:', obj)\n        self.socket.send(z)\n\n    def recv_obj(self, timeout=None, enable_cancel=False):\n        \"\"\"Receive a python object, serialized with pickle and compressed with zlib.\n\n        Parameters\n        ----------\n        timeout : int or None\n            the timeout to wait in miliseconds.  If None, wait indefinitely.\n        enable_cancel : bool\n            If True, allows the user to press Ctrl-C to abort.  For this to work,\n            the other end must know how to process the stop request dictionary.\n        Returns\n        -------\n        obj : any\n            the received object.  None if timeout reached.\n        \"\"\"\n        try:\n            events = self.poller.poll(timeout=timeout)\n        except KeyboardInterrupt:\n            if not enable_cancel:\n                # re-raise exception if cancellation is not enabled.\n                raise\n            self.send_obj(dict(type='stop'))\n            print('Stop signal sent, waiting for reply.  Press Ctrl-C again to force exit.')\n            try:\n                events = self.poller.poll(timeout=timeout)\n            except KeyboardInterrupt:\n                print('Force exiting.')\n                return None\n\n        if events:\n            data = self.socket.recv()\n            z = io.fix_string(zlib.decompress(data))\n            obj = yaml.load(z, Loader=yaml.Loader)\n            self.log_obj('received data:', obj)\n            return obj\n        else:\n            self.log_msg('timeout with %d ms reached.' % timeout)\n            return None\n\n    def recv_msg(self):\n        \"\"\"Receive a string message.\n\n        Returns\n        -------\n        msg : str\n            the received object.\n        \"\"\"\n        data = self.socket.recv()\n        self.log_msg('received message:\\n%s' % data)\n        return data\n\n\nclass ZMQRouter(object):\n    \"\"\"A class that interacts with a ZMQ router socket.\n\n    a router socket is an asynchronous socket that can receive multiple requests\n    without needing to issue an reply.  This class encapsulates the ZMQ socket\n    details and provide more convenient API to use.\n\n    Parameters\n    ----------\n    port : int or None\n        the port to connect to.  If None, then a random port between min_port and max_port\n        will be chosen.\n    min_port : int\n        the minimum random port number (inclusive).\n    max_port : int\n        the maximum random port number (exclusive).\n    pipeline : int\n        number of messages allowed in a pipeline.  Only affects file\n        transfer performance.\n    log_file : str or None\n        the log file.  None to disable logging.\n    \"\"\"\n\n    def __init__(self, port=None, min_port=5000, max_port=9999, pipeline=100, log_file=None):\n        \"\"\"Create a new ZMQDealer object.\n        \"\"\"\n        context = zmq.Context.instance()\n        # noinspection PyUnresolvedReferences\n        self.socket = context.socket(zmq.ROUTER)\n        self.socket.hwm = pipeline\n        if port is not None:\n            self.socket.bind('tcp://*:%d' % port)\n            self.port = port\n        else:\n            self.port = self.socket.bind_to_random_port('tcp://*', min_port=min_port, max_port=max_port)\n        self.addr = None\n        self._log_file = log_file\n\n        if self._log_file is not None:\n            self._log_file = os.path.abspath(self._log_file)\n            # If log file directory does not exists, create it\n            log_dir = os.path.dirname(self._log_file)\n            if not os.path.exists(log_dir):\n                os.makedirs(log_dir)\n            # clears any existing log\n            if os.path.exists(self._log_file):\n                os.remove(self._log_file)\n\n    def get_port(self):\n        \"\"\"Returns the port number.\"\"\"\n        return self.port\n\n    def is_closed(self):\n        \"\"\"Returns True if this router is closed.\"\"\"\n        return self.socket.closed\n\n    def close(self):\n        \"\"\"Close the underlying socket.\"\"\"\n        self.socket.close()\n\n    def log_msg(self, msg):\n        \"\"\"Log the given message\"\"\"\n        if self._log_file is not None:\n            io.write_file(self._log_file, '%s\\n' % msg, append=True)\n\n    def log_obj(self, msg, obj):\n        \"\"\"Log the given object\"\"\"\n        if self._log_file is not None:\n            obj_str = pprint.pformat(obj)\n            io.write_file(self._log_file, '%s\\n%s\\n' % (msg, obj_str), append=True)\n\n    def send_msg(self, msg, addr=None):\n        \"\"\"Sends a string message\n\n        Parameters\n        ----------\n        msg : str\n            the message to send.\n        addr : str or None\n            the address to send the object to.  If None, send to last sender.\n        \"\"\"\n        addr = addr or self.addr\n        if addr is None:\n            warn_msg = '*WARNING* No receiver address specified.  Message not sent:\\n%s' % msg\n            self.log_msg(warn_msg)\n        else:\n            self.log_msg('sending message:\\n%s' % msg)\n            self.socket.send_multipart([addr, msg])\n\n    def send_obj(self, obj, addr=None):\n        \"\"\"Sends a python object using pickle serialization and zlib compression.\n\n        Parameters\n        ----------\n        obj : any\n            the object to send.\n        addr : str or None\n            the address to send the object to.  If None, send to last sender.\n        \"\"\"\n        addr = addr or self.addr\n        if addr is None:\n            warn_msg = '*WARNING* No receiver address specified.  Message not sent:'\n            self.log_obj(warn_msg, obj)\n        else:\n            p = io.to_bytes(yaml.dump(obj))\n            z = zlib.compress(p)\n            self.log_obj('sending data:', obj)\n            self.socket.send_multipart([addr, z])\n\n    def poll_for_read(self, timeout):\n        \"\"\"Poll this socket for given timeout for read event.\n\n        Parameters\n        ----------\n        timeout : int\n            timeout in miliseconds.\n\n        Returns\n        -------\n        status : int\n            nonzero value means that this socket is ready for read.\n        \"\"\"\n        return self.socket.poll(timeout=timeout)\n\n    def recv_obj(self):\n        \"\"\"Receive a python object, serialized with pickle and compressed with zlib.\n\n        Returns\n        -------\n        obj : any\n            the received object.\n        \"\"\"\n        self.addr, data = self.socket.recv_multipart()\n\n        z = io.fix_string(zlib.decompress(data))\n        obj = yaml.load(z, Loader=yaml.Loader)\n        self.log_obj('received data:', obj)\n        return obj\n\n    def get_last_sender_addr(self):\n        \"\"\"Returns the address of the sender of last received message.\n\n        Returns\n        -------\n        addr : str\n            the last sender address\n        \"\"\"\n        return self.addr\n"
  },
  {
    "path": "bag/io/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/io/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package provides all IO related functionalities for BAG.\n\nMost importantly, this module sorts out all the bytes v.s. unicode differences\nand simplifies writing python2/3 compatible code.\n\"\"\"\n\nfrom .common import fix_string, to_bytes, set_encoding, get_encoding, \\\n    set_error_policy, get_error_policy\nfrom .sim_data import load_sim_results, save_sim_results, load_sim_file\nfrom .file import read_file, read_resource, read_yaml, read_yaml_env, readlines_iter, \\\n    write_file, make_temp_dir, open_temp, open_file, Pickle, Yaml\n\nfrom . import process\n\n__all__ = ['fix_string', 'to_bytes', 'set_encoding', 'get_encoding',\n           'set_error_policy', 'get_error_policy',\n           'load_sim_results', 'save_sim_results', 'load_sim_file',\n           'read_file', 'read_resource', 'read_yaml', 'read_yaml_env', 'readlines_iter',\n           'write_file', 'make_temp_dir', 'open_temp', 'open_file',\n           'Pickle', 'Yaml'\n           ]\n"
  },
  {
    "path": "bag/io/common.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module contains some commonly used IO functions.\n\nIn particular, this module keeps track of BAG's system-wide encoding/decoding settings.\n\"\"\"\n\n# default BAG file encoding.\nbag_encoding = 'utf-8'\n# default codec error policy\nbag_codec_error = 'replace'\n\n\ndef fix_string(obj):\n    \"\"\"Fix the given potential string object to ensure python 2/3 compatibility.\n\n    If the given object is raw bytes, decode it into a string using\n    current encoding and return it.  Otherwise, just return the given object.\n\n    This method is useful for writing python 2/3 compatible code.\n\n    Parameters\n    ----------\n    obj :\n        any python object.\n\n    Returns\n    -------\n    val :\n        the given object, or a decoded string if the given object is bytes.\n    \"\"\"\n    if isinstance(obj, bytes):\n        obj = obj.decode(encoding=bag_encoding, errors=bag_codec_error)\n    return obj\n\n\ndef to_bytes(my_str):\n    \"\"\"Convert the given string to raw bytes.\n\n    Parameters\n    ----------\n    my_str : string\n        the string to encode to bytes.\n\n    Returns\n    -------\n    val : bytes\n        raw bytes of the string.\n    \"\"\"\n    return bytes(my_str.encode(encoding=bag_encoding, errors=bag_codec_error))\n\n\ndef set_encoding(new_encoding):\n    \"\"\"Sets the BAG input/output encoding.\n\n    Parameters\n    ----------\n    new_encoding : string\n        the new encoding name.\n    \"\"\"\n    global bag_encoding\n    if not isinstance(new_encoding, str):\n        raise Exception('encoding name must be string/unicode.')\n    bag_encoding = new_encoding\n\n\ndef get_encoding():\n    \"\"\"Returns the BAG input/output encoding.\n\n    Returns\n    -------\n    bag_encoding : unicode\n        the encoding name.\n    \"\"\"\n    return bag_encoding\n\n\ndef set_error_policy(new_policy):\n    \"\"\"Sets the error policy on encoding/decoding errors.\n\n    Parameters\n    ----------\n    new_policy : string\n        the new error policy name.  See codecs package documentation\n        for more information.\n    \"\"\"\n    global bag_codec_error\n    bag_codec_error = new_policy\n\n\ndef get_error_policy():\n    \"\"\"Returns the current BAG encoding/decoding error policy.\n\n    Returns\n    -------\n    policy : unicode\n        the current error policy name.\n    \"\"\"\n    return bag_codec_error\n"
  },
  {
    "path": "bag/io/file.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module handles file related IO.\n\"\"\"\nfrom typing import Dict, Any\n\nimport os\nimport tempfile\nimport time\nimport pkg_resources\nimport codecs\nimport string\n\nimport yaml\nimport pickle\n\nfrom .common import bag_encoding, bag_codec_error\n\n\nclass Pickle:\n    \"\"\"\n    A global class for reading and writing Pickle format.\n    \"\"\"\n    @staticmethod\n    def save(obj, file, **kwargs) -> None:\n        with open(file, 'wb') as f:\n            pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n    @staticmethod\n    def load(file, **kwargs):\n        with open(file, 'rb') as f:\n            return pickle.load(f)\n\n\nclass Yaml:\n    \"\"\"\n    A global class for reading and writing yaml format\n    For backward compatibility some module functions may overlap with this.\n    \"\"\"\n    @staticmethod\n    def save(obj, file, **kwargs) -> None:\n        with open(file, 'w') as f:\n            yaml.dump(obj, f)\n\n    @staticmethod\n    def load(file, **kwargs):\n        with open(file, 'r') as f:\n            return yaml.load(f, Loader=yaml.Loader)\n\n\ndef open_file(fname, mode):\n    \"\"\"Opens a file with the correct encoding interface.\n\n    Use this method if you need to have a file handle.\n\n    Parameters\n    ----------\n    fname : string\n        the file name.\n    mode : string\n        the mode, either 'r', 'w', or 'a'.\n\n    Returns\n    -------\n    file_obj : file\n        a file objects that reads/writes string with the BAG system encoding.\n    \"\"\"\n    if mode != 'r' and mode != 'w' and mode != 'a':\n        raise ValueError(\"Only supports 'r', 'w', or 'a' mode.\")\n    return open(fname, mode, encoding=bag_encoding, errors=bag_codec_error)\n\n\ndef read_file(fname):\n    \"\"\"Read the given file and return content as string.\n\n    Parameters\n    ----------\n    fname : string\n        the file name.\n\n    Returns\n    -------\n    content : unicode\n        the content as a unicode string.\n    \"\"\"\n    with open_file(fname, 'r') as f:\n        content = f.read()\n    return content\n\n\ndef readlines_iter(fname):\n    \"\"\"Iterate over lines in a file.\n\n    Parameters\n    ----------\n    fname : string\n        the file name.\n\n    Yields\n    ------\n    line : unicode\n        a line in the file.\n    \"\"\"\n    with open_file(fname, 'r') as f:\n        for line in f:\n            yield line\n\n\ndef read_yaml_env(fname):\n    # type: (str) -> Dict[str, Any]\n    \"\"\"Parse YAML file with environment variable substitution.\n\n    Parameters\n    ----------\n    fname : str\n        yaml file name.\n\n    Returns\n    -------\n    table : Dict[str, Any]\n        the yaml file as a dictionary.\n    \"\"\"\n    content = read_file(fname)\n    # substitute environment variables\n    content = string.Template(content).substitute(os.environ)\n    return yaml.load(content, Loader=yaml.Loader)\n\n\ndef read_yaml(fname):\n    \"\"\"Read the given file using YAML.\n\n    Parameters\n    ----------\n    fname : string\n        the file name.\n\n    Returns\n    -------\n    content : Any\n        the object returned by YAML.\n    \"\"\"\n    with open_file(fname, 'r') as f:\n        content = yaml.load(f, Loader=yaml.Loader)\n\n    return content\n\n\ndef read_resource(package, fname):\n    \"\"\"Read the given resource file and return content as string.\n\n    Parameters\n    ----------\n    package : string\n        the package name.\n    fname : string\n        the resource file name.\n\n    Returns\n    -------\n    content : unicode\n        the content as a unicode string.\n    \"\"\"\n    raw_content = pkg_resources.resource_string(package, fname)\n    return raw_content.decode(encoding=bag_encoding, errors=bag_codec_error)\n\n\ndef write_file(fname, content, append=False, mkdir=True):\n    \"\"\"Writes the given content to file.\n\n    Parameters\n    ----------\n    fname : string\n        the file name.\n    content : unicode\n        the unicode string to write to file.\n    append : bool\n        True to append instead of overwrite.\n    mkdir : bool\n        If True, will create parent directories if they don't exist.\n    \"\"\"\n    if mkdir:\n        fname = os.path.abspath(fname)\n        dname = os.path.dirname(fname)\n        os.makedirs(dname, exist_ok=True)\n\n    mode = 'a' if append else 'w'\n    with open_file(fname, mode) as f:\n        f.write(content)\n\n\ndef make_temp_dir(prefix, parent_dir=None):\n    \"\"\"Create a new temporary directory.\n\n    Parameters\n    ----------\n    prefix : string\n        the directory prefix.\n    parent_dir : string\n        the parent directory.\n    \"\"\"\n    prefix += time.strftime(\"_%Y%m%d_%H%M%S\")\n    parent_dir = parent_dir or tempfile.gettempdir()\n    return tempfile.mkdtemp(prefix=prefix, dir=parent_dir)\n\n\ndef open_temp(**kwargs):\n    \"\"\"Opens a new temporary file for writing with unicode interface.\n\n    Parameters\n    ----------\n    **kwargs\n        the tempfile keyword arguments.  See documentation for\n        :func:`tempfile.NamedTemporaryFile`.\n\n    Returns\n    -------\n    file : file\n        the opened file that accepts unicode input.\n    \"\"\"\n    timestr = time.strftime(\"_%Y%m%d_%H%M%S\")\n    if 'prefix' in kwargs:\n        kwargs['prefix'] += timestr\n    else:\n        kwargs['prefix'] = timestr\n    temp = tempfile.NamedTemporaryFile(**kwargs)\n    return codecs.getwriter(bag_encoding)(temp, errors=bag_codec_error)\n"
  },
  {
    "path": "bag/io/gui.py",
    "content": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport subprocess\nimport json\nimport select\n\nimport PyQt5.QtWidgets as QtWidgets\nimport PyQt5.QtCore as QtCore\n\nfrom .file import write_file, open_file\nfrom .common import to_bytes\n\nif os.name != 'posix':\n    raise Exception('bag.io.gui module current only works for POSIX systems.')\n\n\nclass StdinThread(QtCore.QThread):\n    \"\"\"A QT worker thread that reads stdin.\"\"\"\n    update = QtCore.pyqtSignal('QString')\n\n    def __init__(self, parent):\n        QtCore.QThread.__init__(self, parent=parent)\n        self.stop = False\n\n    def run(self):\n        while not self.stop:\n            try:\n                stdin, _, _ = select.select([sys.stdin], [], [], 0.05)\n                if stdin:\n                    cmd = sys.stdin.readline().strip()\n                else:\n                    cmd = None\n            except:\n                cmd = 'exit'\n\n            if cmd is not None:\n                self.stop = (cmd == 'exit')\n                self.update.emit(cmd)\n\n\nclass LogWidget(QtWidgets.QFrame):\n    \"\"\"A Logger window widget.\n\n    Note: due to QPlainTextEdit always adding an extra newline when calling\n    appendPlainText(), we keep track of internal buffer and only print output\n    one line at a time.  This may cause some message to not display immediately.\n    \"\"\"\n\n    def __init__(self, parent=None):\n        QtWidgets.QFrame.__init__(self, parent=parent)\n\n        self.logger = QtWidgets.QPlainTextEdit(parent=self)\n        self.logger.setReadOnly(True)\n        self.logger.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)\n        self.logger.setMinimumWidth(1100)\n        self.buffer = ''\n\n        self.clear_button = QtWidgets.QPushButton('Clear Log', parent=self)\n        self.clear_button.clicked.connect(self.clear_log)\n        self.save_button = QtWidgets.QPushButton('Save Log As...', parent=self)\n        self.save_button.clicked.connect(self.save_log)\n\n        self.lay = QtWidgets.QVBoxLayout(self)\n        self.lay.addWidget(self.logger)\n        self.lay.addWidget(self.clear_button)\n        self.lay.addWidget(self.save_button)\n\n    def clear_log(self):\n        self.logger.setPlainText('')\n        self.buffer = ''\n\n    def save_log(self):\n        root_dir = os.getcwd()\n        fname, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', root_dir)\n        if fname:\n            write_file(fname, self.logger.toPlainText() + '\\n')\n\n    def print_file(self, file_obj):\n        # this code converts all types of newlines (such as '\\r\\n') to '\\n',\n        # and make sure any ending newlines are preserved.\n        for line in file_obj:\n            if self.buffer:\n                line = self.buffer + line\n                self.buffer = ''\n            if line.endswith('\\n'):\n                self.logger.appendPlainText(line[:-1])\n            else:\n                self.buffer = line\n\n\nclass LogViewer(QtWidgets.QWidget):\n    \"\"\"A Simple window to see process log in real time..\"\"\"\n\n    def __init__(self):\n        QtWidgets.QWidget.__init__(self)\n\n        # combo box label\n        self.label = QtWidgets.QLabel('Log File: ', parent=self)\n        # populate log selection combo box.\n        self.combo_box = QtWidgets.QComboBox(parent=self)\n        self.log_files = []\n        self.reader = None\n\n        self.logger = LogWidget(parent=self)\n\n        # setup GUI\n        self.setWindowTitle('BAG Simulation Log Viewer')\n        self.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n\n        self.layout = QtWidgets.QGridLayout(self)\n        self.layout.addWidget(self.label, 0, 0, alignment=QtCore.Qt.AlignRight)\n        self.layout.addWidget(self.combo_box, 0, 1, alignment=QtCore.Qt.AlignLeft)\n        self.layout.addWidget(self.logger, 1, 0, -1, -1)\n        self.layout.setRowStretch(0, 0.0)\n        self.layout.setRowStretch(1, 1.0)\n        self.layout.setColumnStretch(0, 0.0)\n        self.layout.setColumnStretch(1, 0.0)\n\n        # setup file watcher\n        self.cur_paths = None\n        self.watcher = QtCore.QFileSystemWatcher(parent=self)\n        # setup signals\n        self.watcher.fileChanged.connect(self.update_logfile)\n        self.combo_box.currentIndexChanged.connect(self.change_log)\n\n        # start thread\n        self.thread = StdinThread(self)\n        self.thread.update.connect(self.parse_cmd)\n        self.thread.start()\n\n    def closeEvent(self, evt):\n        if not self.thread.stop:\n            self.thread.stop = True\n            self.thread.wait()\n        QtWidgets.QWidget.closeEvent(self, evt)\n\n    @QtCore.pyqtSlot('QString')\n    def parse_cmd(self, cmd):\n        if cmd == 'exit':\n            self.close()\n        else:\n            try:\n                cmd = json.loads(cmd)\n                if cmd[0] == 'add':\n                    self.add_log(cmd[1], cmd[2])\n                elif cmd[0] == 'remove':\n                    self.remove_log(cmd[1])\n            except:\n                pass\n\n    @QtCore.pyqtSlot('int')\n    def change_log(self, new_idx):\n        # print('log change called, switching to index %d' % new_idx)\n        if self.cur_paths is not None:\n            self.watcher.removePaths(self.cur_paths)\n        self.logger.clear_log()\n        if self.reader is not None:\n            self.reader.close()\n            self.reader = None\n\n        if new_idx >= 0:\n            fname = os.path.abspath(self.log_files[new_idx])\n            dname = os.path.dirname(fname)\n            self.reader = open_file(fname, 'r')\n            self.logger.print_file(self.reader)\n            self.cur_paths = [dname, fname]\n            self.watcher.addPaths(self.cur_paths)\n\n    @QtCore.pyqtSlot('QString')\n    def update_logfile(self, fname):\n        # print('filechanged called, fname = %s' % fname)\n        if self.reader is not None:\n            self.logger.print_file(self.reader)\n\n    def remove_log(self, log_tag):\n        idx = self.combo_box.findText(log_tag)\n        if idx >= 0:\n            del self.log_files[idx]\n            self.combo_box.removeItem(idx)\n\n    def add_log(self, log_tag, log_file):\n        self.remove_log(log_tag)\n        if os.path.isfile(log_file):\n            self.log_files.append(log_file)\n            self.combo_box.addItem(log_tag)\n\n\ndef app_start():\n    app = QtWidgets.QApplication([])\n\n    window = LogViewer()\n    app.window_reference = window\n    window.show()\n    app.exec_()\n\n\ndef start_viewer():\n    cmd = [sys.executable, '-m', 'bag.io.gui']\n    devnull = open(os.devnull, 'w')\n    proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=devnull,\n                            stderr=subprocess.STDOUT,\n                            preexec_fn=os.setpgrp)\n    return proc\n\n\ndef add_log(proc, tag, fname):\n    if proc is not None:\n        if proc.poll() is not None or proc.stdin.closed:\n            # process finished\n            return False\n        cmd_str = json.dumps(['add', tag, fname]) + '\\n'\n        proc.stdin.write(to_bytes(cmd_str))\n        proc.stdin.flush()\n    return True\n\n\ndef remove_log(proc, tag):\n    if proc is not None:\n        if proc.poll() is not None or proc.stdin.closed:\n            # process finished\n            return False\n        cmd_str = json.dumps(['remove', tag]) + '\\n'\n        proc.stdin.write(to_bytes(cmd_str))\n        proc.stdin.flush()\n    return True\n\n\ndef close(proc):\n    if proc is not None and proc.poll() is None:\n        proc.stdin.close()\n\nif __name__ == '__main__':\n    app_start()\n"
  },
  {
    "path": "bag/io/process.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module provides functions to help you run external processes.\n\"\"\"\n\nimport os\nimport sys\n\nfrom .common import bag_encoding, bag_codec_error\nfrom .file import write_file\n\nimport multiprocessing\n# noinspection PyCompatibility\nimport concurrent.futures\n\nif sys.version_info[0] < 3:\n    # use subprocess32 for timeout feature.\n    if os.name != 'posix':\n        raise Exception('bag.io.process module current only works for POSIX systems.')\n    # noinspection PyUnresolvedReferences,PyPackageRequirements\n    import subprocess32 as subprocess\nelse:\n    import subprocess\n\n\ndef run_proc_with_quit(proc_id, quit_dict, args, logfile=None, append=False, env=None, cwd=None):\n    if logfile is None:\n        logfile = os.devnull\n\n    mode = 'ab' if append else 'wb'\n    with open(logfile, mode) as logf:\n        if proc_id in quit_dict:\n            return None\n        proc = subprocess.Popen(args, stdout=logf, stderr=subprocess.STDOUT,\n                                env=env, cwd=cwd)\n        retcode = None\n        num_kill = 0\n        timeout = 0.05\n        while retcode is None and num_kill <= 2:\n            try:\n                retcode = proc.wait(timeout=timeout)\n            except subprocess.TimeoutExpired:\n                if proc_id in quit_dict:\n                    if num_kill == 0:\n                        proc.terminate()\n                        timeout = quit_dict[proc_id]\n                    elif num_kill == 1:\n                        proc.kill()\n                    num_kill += 1\n\n        return proc.returncode\n\n\ndef run_and_wait(args, timeout=None, logfile=None, append=False,\n                 env=None, cwd=None):\n    \"\"\"Run a command in a subprocess, then wait for it to finish.\n\n    Parameters\n    ----------\n    args : string or list[string]\n        the command to run.  Should be either a command string or a list\n        of command string and its arguments as strings.  A list is preferred;\n        see Python subprocess documentation.\n    timeout : float or None\n        the amount of time to wait for the command to finish, in seconds.\n        If None, waits indefinitely.\n    logfile : string or None\n        If given, stdout and stderr will be written to this file.\n    append : bool\n        True to append to the logfile.  Defaults to False.\n    env : dict[string, any]\n        If not None, environment variables of the subprocess will be set\n        according to this dictionary instead of inheriting from current\n        process.\n    cwd : string or None\n        The current working directory of the subprocess.\n\n    Returns\n    -------\n    output : string\n        the standard output and standard error from the command.\n\n    Raises\n    ------\n    subprocess.CalledProcessError\n        if any error occurred in the subprocess.\n    \"\"\"\n    output = subprocess.check_output(args, stderr=subprocess.STDOUT,\n                                     timeout=timeout, env=env, cwd=cwd)\n    output = output.decode(encoding=bag_encoding, errors=bag_codec_error)\n\n    if logfile is not None:\n        write_file(logfile, output, append=append)\n\n    return output\n\n\nclass ProcessManager(object):\n    \"\"\"A class that manages subprocesses.\n\n    This class is for starting processes that you do not need to wait on,\n    and allows you to query for their status or terminate/kill them if needed.\n\n    Parameters\n    ----------\n    max_workers : int or None\n        number of maximum allowed subprocesses.  If None, defaults to system\n        CPU count.\n    cancel_timeout : float or None\n        Number of seconds to wait for a process to terminate once SIGTERM or\n        SIGKILL is issued.  Defaults to 10 seconds.\n    \"\"\"\n    def __init__(self, max_workers=None, cancel_timeout=10.0):\n        if max_workers is None:\n            max_workers = multiprocessing.cpu_count()\n        if cancel_timeout is None:\n            cancel_timeout = 10.0\n        self._exec = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)\n        self._cancel_timeout = cancel_timeout\n        self._future_dict = {}\n        self._quit_dict = {}\n\n    def close(self, timeout=10.0):\n        \"\"\"Cancel all processes.\n\n        Parameters\n        ----------\n        timeout : float\n            time to wait in seconds for each process to terminate.\n        \"\"\"\n        for proc_id in self._future_dict.keys():\n            self.cancel(proc_id, timeout=timeout)\n        self._exec.shutdown()\n        self._quit_dict.clear()\n        self._future_dict.clear()\n\n    def new_thread(self, fun, basename=None, callback=None):\n        \"\"\"Put a new custom task in queue.\n\n        Execute the given function in a thread asynchronously.  The given function\n        must take two arguments, The first argument is a unique string that represents\n        this task, and the second argument is a dictionary.  The dictionary will\n        map the unique string to a timeout (in second) if this task is being cancelled.\n        The function should periodically check the dictionary and terminate gracefully.\n\n        Before function returns, it should also delete the unique string from dictionary\n        if it exists.\n\n        Parameters\n        ----------\n        fun : callable\n            the function to execute in a thread, as described above.\n        basename : string or None\n            If given, this will be used as the basis for generating the unique\n            process ID.\n        callback : callable\n            If given, this function will automatically be executed when the\n            process finished.  This function should take a single argument,\n            which is a Future object that returns the return code of the\n            process.\n\n        Returns\n        -------\n        proc_id : string\n            a unique string representing this process.  Can be used later\n            to query process status or cancel process.\n        \"\"\"\n        # find unique process ID\n        proc_id = basename or 'proc'\n        cur_idx = 1\n        while proc_id in self._future_dict:\n            proc_id = '%s_%d' % (proc_id, cur_idx)\n            cur_idx += 1\n\n        future = self._exec.submit(fun, proc_id, self._quit_dict)\n        if callback is not None:\n            future.add_done_callback(callback)\n\n        self._future_dict[proc_id] = future\n        return proc_id\n\n    def new_process(self, args, basename=None, logfile=None, append=False,\n                    env=None, cwd=None, callback=None):\n        \"\"\"Put a new process in queue.\n\n        When the process is done, its return code will be returned.\n\n        Parameters\n        ----------\n        args : string or list[string]\n            the command to run as a string or list of string arguments.  See\n            Python subprocess documentation.  list of string format is preferred.\n        basename : string or None\n            If given, this will be used as the basis for generating the unique\n            process ID.\n        logfile : string or None\n            If given, stdout and stderr will be written to this file.  Otherwise,\n            they will be redirected to `os.devnull`.\n        append : bool\n            True to append to ``logfile`` instead of overwritng it.\n        env : dict[string, string] or None\n            If given, environment variables of the process will be set according\n            to this dictionary.\n        cwd : string or None\n            current working directory of the process.\n        callback : callable\n            If given, this function will automatically be executed when the\n            process finished.  This function should take a single argument,\n            which is a Future object that returns the return code of the\n            process.\n\n        Returns\n        -------\n        proc_id : string\n            a unique string representing this process.  Can be used later\n            to query process status or cancel process.\n        \"\"\"\n        # find unique process ID\n        proc_id = basename or 'proc'\n        cur_idx = 1\n        while proc_id in self._future_dict:\n            proc_id = '%s_%d' % (proc_id, cur_idx)\n            cur_idx += 1\n\n        future = self._exec.submit(self._start_cmd, args, proc_id,\n                                   logfile=logfile, append=append, env=env, cwd=cwd)\n        if callback is not None:\n            future.add_done_callback(callback)\n\n        self._future_dict[proc_id] = future\n        return proc_id\n\n    @staticmethod\n    def _get_output(future, timeout=None):\n        \"\"\"Get output from future.  Return None when exception.\"\"\"\n        try:\n            if future.exception(timeout=timeout) is None:\n                return future.result()\n            else:\n                return None\n        except concurrent.futures.CancelledError:\n            return None\n\n    def cancel(self, proc_id, timeout=None):\n        \"\"\"Cancel the given process.\n\n        If the process haven't started, this method prevents it from started.\n        Otherwise, we first send a SIGTERM signal to kill the process.  If\n        after ``timeout`` seconds the process is still alive, we will send a\n        SIGKILL signal.  If after another ``timeout`` seconds the process is\n        still alive, an Exception will be raised.\n\n        Parameters\n        ----------\n        proc_id : string\n            the process ID to cancel.\n        timeout : float or None\n            number of seconds to wait for cancellation.  If None, use default\n            timeout.\n\n        Returns\n        -------\n        output :\n            output of the thread if it successfully terminates.\n            Otherwise, return None.\n        \"\"\"\n        if timeout is None:\n            timeout = self._cancel_timeout\n\n        future = self._future_dict.get(proc_id, None)\n        if future is None:\n            return None\n        if future.done():\n            # process already done, return status.\n            del self._future_dict[proc_id]\n            return self._get_output(future)\n        if future.cancel():\n            # we cancelled process before it made into the thread pool.\n            del self._future_dict[proc_id]\n            return None\n        else:\n            # inform thread it should try to quit.\n            self._quit_dict[proc_id] = timeout\n            try:\n                output = self._get_output(future, timeout=4 * timeout)\n                del self._future_dict[proc_id]\n                return output\n            except concurrent.futures.TimeoutError:\n                # shouldn't get here, but we did\n                print(\"*WARNING* worker thread refuse to die...\")\n                del self._future_dict[proc_id]\n                return None\n\n    def done(self, proc_id):\n        \"\"\"Returns True if the given process finished or is cancelled successfully.\n\n        Parameters\n        ----------\n        proc_id : string\n            the process ID.\n\n        Returns\n        -------\n        done : bool\n            True if the process is cancelled or completed.\n        \"\"\"\n        return self._future_dict[proc_id].done()\n\n    def wait(self, proc_id, timeout=None, cancel_timeout=None):\n        \"\"\"Wait for the given process to finish, then return its return code.\n\n        If ``timeout`` is None, waits indefinitely.  Otherwise, if after\n        ``timeout`` seconds the process is still running, a\n        :class:`concurrent.futures.TimeoutError` will be raised.\n        However, it is safe to catch this error and call wait again.\n\n        If Ctrl-C is pressed before process finish or before timeout\n        is reached, the process will be cancelled.\n\n        Parameters\n        ----------\n        proc_id : string\n            the process ID.\n        timeout : float or None\n            number of seconds to wait.  If None, waits indefinitely.\n        cancel_timeout : float or None\n            number of seconds to wait for process cancellation.  If None,\n            use default timeout.\n\n        Returns\n        -------\n        output :\n            output of the thread if it successfully terminates.  Otherwise return None.\n        \"\"\"\n        if cancel_timeout is None:\n            cancel_timeout = self._cancel_timeout\n\n        future = self._future_dict[proc_id]\n        try:\n            output = future.result(timeout=timeout)\n            # remove future from dictionary.\n            del self._future_dict[proc_id]\n            return output\n        except KeyboardInterrupt:\n            # cancel the process\n            print('KeyboardInterrupt received, cancelling %s...' % proc_id)\n            return self.cancel(proc_id, timeout=cancel_timeout)\n\n    def _start_cmd(self, args, proc_id, logfile=None, append=False, env=None, cwd=None):\n        \"\"\"The function that actually starts the subprocess.  Executed by thread.\"\"\"\n\n        retcode = run_proc_with_quit(proc_id, self._quit_dict, args, logfile=logfile,\n                                     append=append, env=env, cwd=cwd)\n        if proc_id in self._quit_dict:\n            del self._quit_dict[proc_id]\n\n        return retcode\n"
  },
  {
    "path": "bag/io/sim_data.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module handles simulation data related IO.\n\nNote : when reading data files, we use Numpy to handle the encodings,\nso BAG encoding settings will not apply.\n\"\"\"\n\nimport os\nimport glob\n\nimport numpy as np\nimport h5py\n\nfrom .common import bag_encoding, bag_codec_error\n\nillegal_var_name = ['sweep_params']\n\n\nclass SweepArray(np.ndarray):\n    \"\"\"Subclass of numpy array that adds sweep parameters attribute.\n    \"\"\"\n\n    def __new__(cls, data, sweep_params=None):\n        # Input array is an already formed ndarray instance\n        # We first cast to be our class type\n        obj = np.asarray(data).view(cls)\n        # add the new attribute to the created instance\n        obj.sweep_params = sweep_params\n        # Finally, we must return the newly created object:\n        return obj\n\n    def __array_finalize__(self, obj):\n        # see InfoArray.__array_finalize__ for comments\n        if obj is None:\n            return\n        self.sweep_params = getattr(obj, 'sweep_params', None)\n\n    def __reduce__(self):\n        # Get the parent's __reduce__ tuple\n        pickled_state = super(SweepArray, self).__reduce__()\n        # Create our own tuple to pass to __setstate__\n        new_state = pickled_state[2] + (self.sweep_params,)\n        # Return a tuple that replaces the parent's __setstate__ tuple with our own\n        return pickled_state[0], pickled_state[1], new_state\n\n    # noinspection PyMethodOverriding\n    def __setstate__(self, state):\n        self.sweep_params = state[-1]  # Set the info attribute\n        # Call the parent's __setstate__ with the other tuple elements.\n        # noinspection PyArgumentList\n        super(SweepArray, self).__setstate__(state[0:-1])\n\n\ndef _get_sweep_params(fname):\n    \"\"\"Parse the sweep information file and reverse engineer sweep parameters.\n\n    Parameters\n    ----------\n    fname : str\n        the sweep information file name.\n\n    Returns\n    -------\n    swp_list : list[str]\n        list of sweep parameter names.  index 0 is the outer-most loop.\n    values_list : list[list[float or str]]\n        list of values list for each sweep parameter.\n    \"\"\"\n    mat = np.genfromtxt(fname, dtype=np.unicode_)\n    header = mat[0, :]\n    data = mat[1:, :]\n\n    # eliminate same data\n    idx_list = []\n    for idx in range(len(header)):\n        bool_vec = data[:, idx] == data[0, idx]  # type: np.ndarray\n        if not np.all(bool_vec):\n            idx_list.append(idx)\n\n    header = header[idx_list]\n    data = data[:, idx_list]\n    # find the first index of last element of each column.\n    last_first_idx = [np.where(data[:, idx] == data[-1, idx])[0][0] for idx in range(len(header))]\n    # sort by first index of last element; the column where the last element\n    # appears the earliest is the inner most loop.\n    order_list = np.argsort(last_first_idx)  # type: np.ndarray\n\n    # get list of values\n    values_list = []\n    skip_len = 1\n    for idx in order_list:\n        end_idx = last_first_idx[idx] + 1\n        values = data[0:end_idx:skip_len, idx]\n        if header[idx] != 'corner':\n            values = values.astype(np.float)\n        skip_len *= len(values)\n        values_list.append(values)\n\n    swp_list = header[order_list][::-1].tolist()\n    values_list.reverse()\n    return swp_list, values_list\n\n\ndef load_sim_results(save_dir):\n    \"\"\"Load exported simulation results from the given directory.\n\n    Parameters\n    ----------\n    save_dir : str\n        the save directory path.\n\n    Returns\n    -------\n    results : dict[str, any]\n        the simulation data dictionary.\n\n        most keys in result is either a sweep parameter or an output signal.\n        the values are the corresponding data as a numpy array.  In addition,\n        results has a key called 'sweep_params', which contains a dictionary from\n        output signal name to a list of sweep parameters of that output.\n\n    \"\"\"\n    if not save_dir:\n        return None\n\n    results = {}\n    sweep_params = {}\n\n    # load sweep parameter values\n    top_swp_list, values_list = _get_sweep_params(os.path.join(save_dir, 'sweep.info'))\n    top_shape = []\n    for swp, values in zip(top_swp_list, values_list):\n        results[swp] = values\n        top_shape.append(len(values))\n\n    for swp_name in glob.glob(os.path.join(save_dir, '*.sweep')):\n        base_name = os.path.basename(swp_name).split('.')[0]\n        data_name = os.path.join(save_dir, '%s.data' % base_name)\n        try:\n            data_arr = np.loadtxt(data_name)\n        except ValueError:\n            # try loading complex\n            data_arr = np.loadtxt(data_name, dtype=complex)\n\n        # get sweep parameter names\n        with open(swp_name, 'r', encoding='utf-8') as f:\n            swp_list = [str(line.strip()) for line in f]\n\n        # make a copy of master sweep list and sweep shape\n        cur_swp_list = list(top_swp_list)\n        cur_shape = list(top_shape)\n\n        for swp in swp_list:\n            if swp not in results:\n                fname = os.path.join(save_dir, '%s.info' % swp)\n                results[swp] = np.loadtxt(fname)\n\n            # if sweep has more than one element.\n            if results[swp].shape:\n                cur_swp_list.append(swp)\n                cur_shape.append(results[swp].shape[0])\n\n        # sanity check\n        if base_name in results:\n            raise Exception('Error: output named %s already in results' % base_name)\n\n        # reshape data array\n        data_arr = data_arr.reshape(cur_shape)\n        results[base_name] = SweepArray(data_arr, cur_swp_list)\n        # record sweep parameters for this data\n        sweep_params[base_name] = cur_swp_list\n\n    if 'sweep_params' in results:\n        raise Exception('illegal output name: sweep_params')\n\n    results['sweep_params'] = sweep_params\n\n    return results\n\n\ndef save_sim_results(results, fname, compression='gzip'):\n    \"\"\"Saves the given simulation results dictionary as a HDF5 file.\n\n    Parameters\n    ----------\n    results : dict[string, any]\n        the results dictionary.\n    fname : str\n        the file to save results to.\n    compression : str\n        HDF5 compression method.  Defaults to 'gzip'.\n    \"\"\"\n    # create directory if it didn't exist.\n    fname = os.path.abspath(fname)\n    dir_name = os.path.dirname(fname)\n    if not os.path.exists(dir_name):\n        os.makedirs(dir_name)\n\n    sweep_info = results['sweep_params']\n    with h5py.File(fname, 'w') as f:\n        for name, swp_vars in sweep_info.items():\n            # store data\n            data = np.asarray(results[name])\n            if not data.shape:\n                dset = f.create_dataset(name, data=data)\n            else:\n                dset = f.create_dataset(name, data=data, compression=compression)\n            # h5py workaround: need to explicitly store unicode\n            dset.attrs['sweep_params'] = [swp.encode(encoding=bag_encoding, errors=bag_codec_error)\n                                          for swp in swp_vars]\n\n            # store sweep parameter values\n            for var in swp_vars:\n                if var not in f:\n                    swp_data = results[var]\n                    if np.issubdtype(swp_data.dtype, np.unicode_):\n                        # we need to explicitly encode unicode strings to bytes\n                        swp_data = [v.encode(encoding=bag_encoding, errors=bag_codec_error) for v in swp_data]\n\n                    f.create_dataset(var, data=swp_data, compression=compression)\n\n\ndef load_sim_file(fname):\n    \"\"\"Read simulation results from HDF5 file.\n\n    Parameters\n    ----------\n    fname : str\n        the file to read.\n\n    Returns\n    -------\n    results : dict[str, any]\n        the result dictionary.\n    \"\"\"\n    if not os.path.isfile(fname):\n        raise ValueError('%s is not a file.' % fname)\n\n    results = {}\n    sweep_params = {}\n    with h5py.File(fname, 'r') as f:\n        for name in f:\n            dset = f[name]\n            dset_data = dset[()]\n            if np.issubdtype(dset.dtype, np.bytes_):\n                # decode byte values to unicode arrays\n                dset_data = np.array([v.decode(encoding=bag_encoding, errors=bag_codec_error) for v in dset_data])\n\n            if 'sweep_params' in dset.attrs:\n                cur_swp = [swp.decode(encoding=bag_encoding, errors=bag_codec_error)\n                           for swp in dset.attrs['sweep_params']]\n                results[name] = SweepArray(dset_data, cur_swp)\n                sweep_params[name] = cur_swp\n            else:\n                results[name] = dset_data\n\n    results['sweep_params'] = sweep_params\n    return results\n"
  },
  {
    "path": "bag/io/template.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines methods to create files from templates.\n\"\"\"\n\nfrom jinja2 import Environment, PackageLoader, select_autoescape\n\n\ndef new_template_env(parent_package, tmp_folder):\n    # type: (str, str) -> Environment\n    return Environment(trim_blocks=True,\n                       lstrip_blocks=True,\n                       keep_trailing_newline=True,\n                       autoescape=select_autoescape(default_for_string=False),\n                       loader=PackageLoader(parent_package, package_path=tmp_folder),\n                       enable_async=False,\n                       )\n"
  },
  {
    "path": "bag/layout/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/layout/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package contains code for templated based layout.\n\"\"\"\n\nfrom .core import BagLayout, TechInfo\nfrom .routing import RoutingGrid\nfrom .template import TemplateDB\nfrom . import util\n\n\n__all__ = ['BagLayout', 'TechInfo',\n           'RoutingGrid',\n           'TemplateDB',\n           ]\n"
  },
  {
    "path": "bag/layout/core.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines the base template class.\n\"\"\"\n\nfrom typing import Dict, List, Iterator, Tuple, Optional, Union, Callable, Any\n\nimport abc\nimport math\nimport numpy as np\nfrom itertools import chain\n\nfrom .. import io\nfrom .util import BBox\nfrom .objects import Rect, Via, ViaInfo, Instance, InstanceInfo, PinInfo\nfrom .objects import Path, Polygon, Blockage, Boundary\nfrom ..util.search import BinaryIterator\n\n# try to import cybagoa module\ntry:\n    import cybagoa\nexcept ImportError:\n    cybagoa = None\n\n\nclass TechInfo(object, metaclass=abc.ABCMeta):\n    \"\"\"A base class that create vias.\n\n    This class provides the API for making vias.  Each process should subclass this class and\n    implement the make_via method.\n\n    Parameters\n    ----------\n    res : float\n        the grid resolution of this technology.\n    layout_unit : float\n        the layout unit, in meters.\n    via_tech : string\n        the via technology library name.  This is usually the PDK library name.\n    process_params : dict[str, any]\n        process specific parameters.\n\n    Attributes\n    ----------\n    tech_params : dict[str, any]\n        technology specific parameters.\n    \"\"\"\n\n    def __init__(self, res, layout_unit, via_tech, process_params):\n        self._resolution = res\n        self._layout_unit = layout_unit\n        self._via_tech = via_tech\n        self.tech_params = process_params\n\n    @abc.abstractmethod\n    def get_well_layers(self, sub_type):\n        # type: (str) -> List[Tuple[str, str]]\n        \"\"\"Returns a list of well layers associated with the given substrate type.\"\"\"\n        return []\n\n    @abc.abstractmethod\n    def get_implant_layers(self, mos_type, res_type=None):\n        # type: (str, Optional[str]) -> List[Tuple[str, str]]\n        \"\"\"Returns a list of implant layers associated with the given device type.\n\n        Parameters\n        ----------\n        mos_type : str\n            one of 'nch', 'pch', 'ntap', or 'ptap'\n        res_type : Optional[str]\n            If given, the return layers will be for the substrate of the given resistor type.\n\n        Returns\n        -------\n        imp_list : List[Tuple[str, str]]\n            list of implant layers.\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    def get_threshold_layers(self, mos_type, threshold, res_type=None):\n        # type: (str, str, Optional[str]) -> List[Tuple[str, str]]\n        \"\"\"Returns a list of threshold layers.\"\"\"\n        return []\n\n    @abc.abstractmethod\n    def get_exclude_layer(self, layer_id):\n        # type: (int) -> Tuple[str, str]\n        \"\"\"Returns the metal exclude layer\"\"\"\n        return '', ''\n\n    @abc.abstractmethod\n    def get_dnw_margin_unit(self, dnw_mode):\n        # type: (str) -> int\n        \"\"\"Returns the required DNW margin given the DNW mode.\n\n        Parameters\n        ----------\n        dnw_mode : str\n            the DNW mode string.\n\n        Returns\n        -------\n        dnw_margin : int\n            the DNW margin in resolution units.\n        \"\"\"\n        return 0\n\n    @abc.abstractmethod\n    def get_dnw_layers(self):\n        # type: () -> List[Tuple[str, str]]\n        \"\"\"Returns a list of layers that defines DNW.\n\n        Returns\n        -------\n        lay_list : List[Tuple[str, str]]\n            list of DNW layers.\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    def get_res_metal_layers(self, layer_id):\n        # type: (int) -> List[Tuple[str, str]]\n        \"\"\"Returns a list of layers associated with the given metal resistor.\n\n        Parameters\n        ----------\n        layer_id : int\n            the metal layer ID.\n\n        Returns\n        -------\n        res_list : List[Tuple[str, str]]\n            list of resistor layers.\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    def get_metal_dummy_layers(self, layer_id):\n        # type: (int) -> List[Tuple[str, str]]\n        \"\"\"Returns a list of layers associated with the given metal dummy layers.\n\n        Parameters\n        ----------\n        layer_id : int\n            the metal layer ID.\n\n        Returns\n        -------\n        res_list : List[Tuple[str, str]]\n            list of metal dummy layers.\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    def add_cell_boundary(self, template, box):\n        \"\"\"Adds a cell boundary object to the given template.\n        \n        This is usually the PR boundary.\n        \n        Parameters\n        ----------\n        template : TemplateBase\n            the template to draw the cell boundary in.\n        box : BBox\n            the cell boundary bounding box.\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def draw_device_blockage(self, template):\n        \"\"\"Draw device blockage layers on the given template.\n\n        Parameters\n        ----------\n        template : TemplateBase\n            the template to draw the device block layers on\n        \"\"\"\n        pass\n\n    @abc.abstractmethod\n    def get_via_drc_info(self, vname, vtype, mtype, mw_unit, is_bot):\n        \"\"\"Return data structures used to identify VIA DRC rules.\n\n        Parameters\n        ----------\n        vname : string\n            the via type name.\n        vtype : string\n            the via type, square/hrect/vrect/etc.\n        mtype : string\n            name of the metal layer via is connecting.  Can be either top or bottom.\n        mw_unit : int\n            width of the metal, in resolution units.\n        is_bot : bool\n            True if the given metal is the bottom metal.\n\n        Returns\n        -------\n        sp : Tuple[int, int]\n            horizontal/vertical space between adjacent vias, in resolution units.\n        sp2_list : List[Tuple[int, int]] or None\n            horizontal/vertical space between adjacent vias if the via has 2 or more neighbors.\n            None if no constraint.\n        sp3_list : List[Tuple[int, int]] or None\n            horizontal/vertical space between adjacent vias if the via has 3 or more neighbors.\n            None if no constraint.\n        sp6_list : List[Tuple[int, int]] or None\n            horizontal/vertical space between adjacent vias if the via has 6 or more neighbors.\n            None if no constraint.\n        dim : Tuple[int, int]\n            the via width/height in resolution units.\n        enc : List[Tuple[int, int]]\n            a list of valid horizontal/vertical enclosure of the via on the given metal\n            layer, in resolution units.\n        arr_enc : List[Tuple[int, int]] or None\n            a list of valid horizontal/vertical enclosure of the via on the given metal\n            layer if this is a \"via array\", in layout units.\n            None if no constraint.\n        arr_test : callable or None\n            a function that accepts two inputs, the number of via rows and number of via\n            columns, and returns True if those numbers describe a \"via array\".\n            None if no constraint.\n        \"\"\"\n        return (0, 0), [(0, 0)], [(0, 0)], [(0, 0)], (0, 0), [(0, 0)], None, None\n\n    @abc.abstractmethod\n    def get_min_space(self, layer_type, width, unit_mode=False, same_color=False):\n        \"\"\"Returns the minimum spacing needed around a wire on the given layer with the given width.\n\n        Parameters\n        ----------\n        layer_type : str\n            the wiring layer type.\n        width : Union[float, int]\n            the width of the wire, in layout units.\n        unit_mode : bool\n            True if dimension are given/returned in resolution units.\n        same_color : bool\n            True to use same-color spacing.\n\n        Returns\n        -------\n        sp : Union[float, int]\n            the minimum spacing needed.\n        \"\"\"\n        return 0.0\n\n    @abc.abstractmethod\n    def get_min_line_end_space(self, layer_type, width, unit_mode=False):\n        \"\"\"Returns the minimum line-end spacing of a wire with given width.\n\n        Parameters\n        ----------\n        layer_type : str\n            the wiring layer type.\n        width : Union[float, int]\n            the width of the wire, in layout units.\n        unit_mode : bool\n            True if dimension are given/returned in resolution units.\n\n        Returns\n        -------\n        sp : Union[float, int]\n            the minimum line-end space.\n        \"\"\"\n        return 0.0\n\n    @abc.abstractmethod\n    def get_min_length(self, layer_type, width):\n        # type: (str, float) -> float\n        \"\"\"Returns the minimum length of a wire on the given layer with the given width.\n\n        Parameters\n        ----------\n        layer_type : str\n            the wiring layer type.\n        width : float\n            the width of the wire, in layout units.\n\n        Returns\n        -------\n        min_length : float\n            the minimum length.\n        \"\"\"\n        return 0.0\n\n    @abc.abstractmethod\n    def get_layer_id(self, layer_name):\n        \"\"\"Return the layer id for the given layer name.\n\n        Parameters\n        ----------\n        layer_name : string\n            the layer name.\n\n        Returns\n        -------\n        layer_id : int\n            the layer ID.\n        \"\"\"\n        return 0\n\n    @abc.abstractmethod\n    def get_layer_name(self, layer_id):\n        \"\"\"Return the layer name(s) for the given routing grid layer ID.\n\n        Parameters\n        ----------\n        layer_id : int\n            the routing grid layer ID.\n\n        Returns\n        -------\n        name : string or Tuple[string]\n            name of the layer.  Returns a tuple of names if this is a double\n            patterning layer.\n        \"\"\"\n        return ''\n\n    @abc.abstractmethod\n    def get_layer_type(self, layer_name):\n        \"\"\"Returns the metal type of the given wiring layer.\n\n        Parameters\n        ----------\n        layer_name : str\n            the wiring layer name.\n\n        Returns\n        -------\n        metal_type : string\n            the metal layer type.\n        \"\"\"\n        return ''\n\n    @abc.abstractmethod\n    def get_via_name(self, bot_layer_id):\n        \"\"\"Returns the via type name of the given via.\n\n        Parameters\n        ----------\n        bot_layer_id : int\n            the via bottom layer ID\n\n        Returns\n        -------\n        name : string\n            the via type name.\n        \"\"\"\n        return ''\n\n    @abc.abstractmethod\n    def get_metal_em_specs(self, layer_name, w, l=-1, vertical=False, **kwargs):\n        \"\"\"Returns a tuple of EM current/resistance specs of the given wire.\n\n        Parameters\n        ----------\n        layer_name : str\n            the metal layer name.\n        w : float\n            the width of the metal in layout units (dimension perpendicular to current flow).\n        l : float\n            the length of the metal in layout units (dimension parallel to current flow).\n            If negative, disable length enhancement.\n        vertical : bool\n            True to compute vertical current.\n        **kwargs :\n            optional EM specs parameters.\n\n        Returns\n        -------\n        idc : float\n            maximum DC current, in Amperes.\n        iac_rms : float\n            maximum AC RMS current, in Amperes.\n        iac_peak : float\n            maximum AC peak current, in Amperes.\n        \"\"\"\n        return float('inf'), float('inf'), float('inf')\n\n    @abc.abstractmethod\n    def get_via_em_specs(self, via_name,  # type: str\n                         bm_layer,  # type: str\n                         tm_layer,  # type: str\n                         via_type='square',  # type: str\n                         bm_dim=(-1, -1),  # type: Tuple[float, float]\n                         tm_dim=(-1, -1),  # type: Tuple[float, float]\n                         array=False,  # type: bool\n                         **kwargs):\n        # type: (...) -> Tuple[float ,float, float]\n        \"\"\"Returns a tuple of EM current/resistance specs of the given via.\n\n        Parameters\n        ----------\n        via_name : str\n            the via type name.\n        bm_layer : str\n            the bottom layer name.\n        tm_layer : str\n            the top layer name.\n        via_type : str\n            the via type, square/vrect/hrect/etc.\n        bm_dim : Tuple[float, float]\n            bottom layer metal width/length in layout units.  If negative,\n            disable length/width enhancement.\n        tm_dim : Tuple[float, float]\n            top layer metal width/length in layout units.  If negative,\n            disable length/width enhancement.\n        array : bool\n            True if this via is in a via array.\n        **kwargs :\n            optional EM specs parameters.\n\n        Returns\n        -------\n        idc : float\n            maximum DC current per via, in Amperes.\n        iac_rms : float\n            maximum AC RMS current per via, in Amperes.\n        iac_peak : float\n            maximum AC peak current per via, in Amperes.\n        \"\"\"\n        return float('inf'), float('inf'), float('inf')\n\n    @abc.abstractmethod\n    def get_res_rsquare(self, res_type):\n        \"\"\"Returns R-square for the given resistor type.\n\n        This is used to do some approximate resistor dimension calculation.\n\n        Parameters\n        ----------\n        res_type : string\n            the resistor type.\n\n        Returns\n        -------\n        rsquare : float\n            resistance in Ohms per unit square of the given resistor type.\n        \"\"\"\n        return 0.0\n\n    @abc.abstractmethod\n    def get_res_width_bounds(self, res_type):\n        \"\"\"Returns the maximum and minimum resistor width for the given resistor type.\n\n        Parameters\n        ----------\n        res_type : string\n            the resistor type.\n\n        Returns\n        -------\n        wmin : float\n            minimum resistor width, in layout units.\n        wmax : float\n            maximum resistor width, in layout units.\n        \"\"\"\n        return 0.0, 0.0\n\n    @abc.abstractmethod\n    def get_res_length_bounds(self, res_type):\n        \"\"\"Returns the maximum and minimum resistor length for the given resistor type.\n\n        Parameters\n        ----------\n        res_type : string\n            the resistor type.\n\n        Returns\n        -------\n        lmin : float\n            minimum resistor length, in layout units.\n        lmax : float\n            maximum resistor length, in layout units.\n        \"\"\"\n        return 0.0, 0.0\n\n    @abc.abstractmethod\n    def get_res_min_nsquare(self, res_type):\n        \"\"\"Returns the minimum allowable number of squares for the given resistor type.\n\n        Parameters\n        ----------\n        res_type : string\n            the resistor type.\n\n        Returns\n        -------\n        nsq_min : flaot\n            minimum number of squares needed.\n        \"\"\"\n        return 1.0\n\n    @abc.abstractmethod\n    def get_res_em_specs(self, res_type, w, l=-1, **kwargs):\n        # type: (str, float, float, **Any) -> Tuple[float, float, float]\n        \"\"\"Returns a tuple of EM current/resistance specs of the given resistor.\n\n        Parameters\n        ----------\n        res_type : string\n            the resistor type string.\n        w : float\n            the width of the metal in layout units (dimension perpendicular to current flow).\n        l : float\n            the length of the metal in layout units (dimension parallel to current flow).\n            If negative, disable length enhancement.\n        **kwargs : Any\n            optional EM specs parameters.\n\n        Returns\n        -------\n        idc : float\n            maximum DC current, in Amperes.\n        iac_rms : float\n            maximum AC RMS current, in Amperes.\n        iac_peak : float\n            maximum AC peak current, in Amperes.\n        \"\"\"\n        return float('inf'), float('inf'), float('inf')\n\n    @property\n    def via_tech_name(self):\n        \"\"\"Returns the via technology library name.\"\"\"\n        return self._via_tech\n\n    @property\n    def pin_purpose(self):\n        \"\"\"Returns the layout pin purpose name.\"\"\"\n        return 'pin'\n\n    @property\n    def resolution(self):\n        \"\"\"Returns the grid resolution.\"\"\"\n        return self._resolution\n\n    @property\n    def layout_unit(self):\n        \"\"\"Returns the layout unit length, in meters.\"\"\"\n        return self._layout_unit\n\n    def merge_well(self, template, inst_list, sub_type, threshold=None, res_type=None,\n                   merge_imp=False):\n        # type: ('TemplateBase', List[Instance], str, Optional[str], Optional[str], bool) -> None\n        \"\"\"Merge the well of the given instances together.\"\"\"\n\n        if threshold is not None:\n            lay_iter = chain(self.get_well_layers(sub_type),\n                             self.get_threshold_layers(sub_type, threshold, res_type=res_type))\n        else:\n            lay_iter = self.get_well_layers(sub_type)\n        if merge_imp:\n            lay_iter = chain(lay_iter, self.get_implant_layers(sub_type, res_type=res_type))\n\n        for lay in lay_iter:\n            tot_box = BBox.get_invalid_bbox()\n            for inst in inst_list:\n                cur_box = inst.master.get_rect_bbox(lay)\n                tot_box = tot_box.merge(inst.translate_master_box(cur_box))\n            if tot_box.is_physical():\n                template.add_rect(lay, tot_box)\n\n    def use_flip_parity(self):\n        # type: () -> bool\n        \"\"\"Returns True if flip_parity dictionary is needed in this technology.\"\"\"\n        return True\n\n    def finalize_template(self, template):\n        \"\"\"Perform any operations necessary on the given layout template before finalizing it.\n\n        By default, nothing is done.\n\n        Parameters\n        ----------\n        template : TemplateBase\n            the template object.\n        \"\"\"\n        pass\n\n    def get_res_info(self, res_type, w, l, **kwargs):\n        \"\"\"Returns a dictionary containing EM information of the given resistor.\n\n        Parameters\n        ----------\n        res_type : string or (string, string)\n            the resistor type.\n        w : float\n            the resistor width in layout units (dimension perpendicular to current flow).\n        l : float\n            the resistor length in layout units (dimension parallel to current flow).\n        **kwargs :\n            optional parameters for EM rule calculations, such as nominal temperature,\n            AC rms delta-T, etc.\n\n        Returns\n        -------\n        info : dict[string, any]\n            A dictionary of wire information.  Should have the following:\n\n            resistance : float\n                The resistance, in Ohms.\n            idc : float\n                The maximum allowable DC current, in Amperes.\n            iac_rms : float\n                The maximum allowable AC RMS current, in Amperes.\n            iac_peak : float\n                The maximum allowable AC peak current, in Amperes.\n        \"\"\"\n        rsq = self.get_res_rsquare(res_type)\n        res = l / w * rsq\n        idc, irms, ipeak = self.get_res_em_specs(res_type, w, l=l, **kwargs)\n\n        return dict(\n            resistance=res,\n            idc=idc,\n            iac_rms=irms,\n            iac_peak=ipeak,\n        )\n\n    def get_via_types(self, bmtype, tmtype):\n        return [('square', 1), ('vrect', 2), ('hrect', 2)]\n\n    def get_best_via_array(self, vname, bmtype, tmtype, bot_dir, top_dir, w, h, extend):\n        \"\"\"Maximize the number of vias in the given bounding box.\n\n        Parameters\n        ----------\n        vname : str\n            the via type name.\n        bmtype : str\n            the bottom metal type name.\n        tmtype : str\n            the top metal type name.\n        bot_dir : str\n            the bottom wire direction.  Either 'x' or 'y'.\n        top_dir : str\n            the top wire direction.  Either 'x' or 'y'.\n        w : float\n            width of the via array bounding box, in layout units.\n        h : float\n            height of the via array bounding box, in layout units.\n        extend : bool\n            True if via can extend beyond bounding box.\n\n        Returns\n        -------\n        best_nxy : Tuple[int, int]\n            optimal number of vias per row/column.\n        best_mdim_list : List[Tuple[int, int]]\n            a list of bottom/top layer width/height, in resolution units.\n        vtype : str\n            the via type to draw, square/hrect/vrect/etc.\n        vdim : Tuple[int, int]\n            the via width/height, in resolution units.\n        via_space : Tuple[int, int]\n            the via horizontal/vertical spacing, in resolution units.\n        via_arr_dim : Tuple[int, int]\n            the via array width/height, in resolution units.\n        \"\"\"\n        # This entire optimization routine relies on the bounding box being measured integer units\n        res = self._resolution\n        w = int(round(w / res))\n        h = int(round(h / res))\n\n        # Depending on the routing direction of the metal, the provided width/height of the\n        # bounding box may correspond to either the x direction or y direction.\n        if bot_dir == 'x':\n            bb, be = h, w\n        else:\n            bb, be = w, h\n        if top_dir == 'x':\n            tb, te = h, w\n        else:\n            tb, te = w, h\n\n        # Initialize variables that will hold optimal via size at the end of the algorithm\n        best_num = None\n        best_nxy = [-1, -1]\n        best_mdim_list = None\n        best_type = None\n        best_vdim = None\n        best_sp = None\n        best_adim = None\n\n        # Perform via optimization algorithm for all available via types. Some technologies have\n        # both square and rectangular via types, which can be used in different situations. Each\n        # via_type has a weight which signifies a preference for choosing one type over another\n        via_type_list = self.get_via_types(bmtype, tmtype)\n        for vtype, weight in via_type_list:\n            # Extract via drc information from the loaded tech yaml file. Some drc info is optional\n            # so catch ValueErrors from missing info and move on\n            try:\n                # get space and enclosure rules for top and bottom layer\n                bot_drc_info = self.get_via_drc_info(vname, vtype, bmtype, bb, True)\n                top_drc_info = self.get_via_drc_info(vname, vtype, tmtype, tb, False)\n                sp, sp2_list, sp3_list, sp6_list, dim, encb, arr_encb, arr_testb = bot_drc_info\n                _, _, _, _, _, enct, arr_enct, arr_testt = top_drc_info\n            except ValueError:\n                continue\n            # optional sp2/sp3 rules enable different spacing rules for via arrays with 2 or 3 neighbors\n            if sp2_list is None:\n                sp2_list = [sp]\n            if sp3_list is None:\n                sp3_list = sp2_list\n            if sp6_list is None:\n                sp6_list = sp3_list\n\n            # Get minimum possible spacing between vias\n            spx_min, spy_min = sp\n            for high_sp_list in (sp2_list, sp3_list, sp6_list):\n                for high_spx, high_spy in high_sp_list:\n                    spx_min = min(spx_min, high_spx)\n                    spy_min = min(spy_min, high_spy)\n\n            # Get minimum possible enclosure size for top or bottom layers\n            extx = 0\n            exty = 0\n            for enc in chain(encb, enct):\n                extx = min(extx, enc[0])\n                exty = min(exty, enc[1])\n\n            # Allocate area in the bounding box for minimum enclosure, then find\n            # maximum number of vias that can fit in the remaining area with the minimum spacing\n            if np.isinf(spx_min):\n                nx_max = 1 if (w - 2 * extx) // dim[0] else 0\n            else:\n                nx_max = (w + spx_min - 2 * extx) // (dim[0] + spx_min)\n            if np.isinf(spy_min):\n                ny_max = 1 if (h - 2 * exty) // dim[1] else 0\n            else:\n                ny_max = (h + spy_min - 2 * exty) // (dim[1] + spy_min)\n\n            # Theoretically any combination of via array size from (1, 1) to (nx_max, ny_max) may actually\n            # work within the given bound box. Here we enumerate a list all of these possible via combinations\n            # starting from the max via number\n            nxy_list = [(a * b, a, b) for a in range(1, nx_max + 1) for b in range(1, ny_max + 1)]\n            nxy_list = sorted(nxy_list, reverse=True)\n\n            # Initialize variables that will hold the best working via array size for this via type\n            opt_nxy = None\n            opt_mdim_list = None\n            opt_adim = None\n            opt_sp = None\n\n            # This looping procedure will iterate over all possible via array configurations and select\n            # one that maximizes the number of vias while meeting all rules\n            for num, nx, ny in nxy_list:\n                # Determine whether we should be using sp/sp2/sp3 rules for the current via configuration\n                if (nx == 1 and ny >= 1) or (nx >= 1 and ny == 1):\n                    sp_combo = [sp]\n                elif nx == 2 and ny == 2:\n                    sp_combo = sp2_list\n                elif nx >= 6 and ny >= 6:\n                    sp_combo = sp6_list\n                else:\n                    sp_combo = sp3_list\n\n                # DRC rules can typically be satisfied with a number of different spacing rules, so here we\n                # iterate over each to find the best one. Note that since we break out of the loop immediately upon\n                # finding a valid via configuration, this code prioritizes spacing rules that are early on in the list\n                for spx, spy in sp_combo:\n                    # Compute a bounding box for the via array without the enclosure\n                    w_arr = dim[0] if nx == 1 else nx * (spx + dim[0]) - spx\n                    h_arr = dim[1] if ny == 1 else ny * (spy + dim[1]) - spy\n                    mdim_list = [None, None]\n\n                    # Loop over all possible enclosure types and check whether this via configuration satisfies\n                    # one of them for both the bottom metal and top metal\n                    for idx, (mdir, tot_enc_list, arr_enc, arr_test) in \\\n                            enumerate([(bot_dir, encb, arr_encb, arr_testb),\n                                       (top_dir, enct, arr_enct, arr_testt)]):\n                        # arr_test is a function that takes an array size as input and returns a boolean. If its\n                        # is true the array size is valid and is added to the list of valid enclosures\n                        if arr_test is not None and arr_test(ny, nx):\n                            tot_enc_list = tot_enc_list + arr_enc\n\n                        # If the routing direction is y, start by computing x-direction enclosure. ext_dim\n                        # corresponds to x-direction. Vice-versa if the routing direction is x\n                        if mdir == 'y':\n                            enc_idx = 0\n                            enc_dim = w_arr\n                            ext_dim = h_arr\n                            dim_lim = w\n                            max_ext_dim = h\n                        else:\n                            enc_idx = 1\n                            enc_dim = h_arr\n                            ext_dim = w_arr\n                            dim_lim = h\n                            max_ext_dim = w\n\n                        # Initialize variable to hold opposite direction enclosure size\n                        min_ext_dim = None\n\n                        # This loop selects the minimum opposite direction size that satisfies the enclosure\n                        # rules\n                        for enc in tot_enc_list:\n                            cur_ext_dim = ext_dim + 2 * enc[1 - enc_idx]\n                            # Check that the enclosure rule is satisfied. If extend is true, this passing enclosure\n                            # size can exceed the maximum size set by the user provided bounding box\n                            if (enc[enc_idx] * 2 + enc_dim <= dim_lim) and (extend or cur_ext_dim <= max_ext_dim):\n                                # Select the minimum of all enclosures in the non-routing direction that satisfies\n                                # the enclosure rules\n                                if min_ext_dim is None or min_ext_dim > cur_ext_dim:\n                                    min_ext_dim = cur_ext_dim\n\n                        # If none of the enclosures in the list meet the rules, the current spacing rules cannot\n                        # be used to create a valid via, so we continue on to the next set of spacing rules\n                        if min_ext_dim is None:\n                            break\n                        # Otherwise record the computed via dimensions that pass all checks\n                        else:\n                            min_ext_dim = max(min_ext_dim, max_ext_dim)\n                            mdim_list[idx] = [min_ext_dim, min_ext_dim]\n                            mdim_list[idx][enc_idx] = dim_lim\n\n                    # If we've found a valid via configuration immediately break out of the loop\n                    if mdim_list[0] is not None and mdim_list[1] is not None:\n                        # passed\n                        opt_mdim_list = mdim_list\n                        opt_nxy = (nx, ny)\n                        opt_adim = (w_arr, h_arr)\n                        opt_sp = (spx, spy)\n                        break\n\n                # If we've found a valid via array size immediately break out of the loop\n                if opt_nxy is not None:\n                    break\n\n            # Select the best via out of all the passing via types. Vias are selected by choosing the\n            # highest 'best_num'. This is calculated by multiplying the via array size by the via weight\n            # Ties between vias are broken by minimizing drawn via area\n            if opt_nxy is not None:\n                opt_num = weight * opt_nxy[0] * opt_nxy[1]\n                if (best_num is None or opt_num > best_num or\n                        (opt_num == best_num and self._via_better(best_mdim_list, opt_mdim_list))):\n                    best_num = opt_num\n                    best_nxy = opt_nxy\n                    best_mdim_list = opt_mdim_list\n                    best_type = vtype\n                    best_vdim = dim\n                    best_sp = opt_sp\n                    best_adim = opt_adim\n\n        if best_num is None:\n            return None\n        return best_nxy, best_mdim_list, best_type, best_vdim, best_sp, best_adim\n\n    def _via_better(self, mdim_list1, mdim_list2):\n        \"\"\"Returns true if the via in mdim_list1 has smaller area compared with via in mdim_list2\"\"\"\n        res = self._resolution\n        better = False\n        for mdim1, mdim2 in zip(mdim_list1, mdim_list2):\n            area1 = int(round(mdim1[0] / res)) * int(round(mdim1[1] / res))\n            area2 = int(round(mdim2[0] / res)) * int(round(mdim2[1] / res))\n            if area1 < area2:\n                better = True\n            elif area1 > area2:\n                return False\n        return better\n\n    # noinspection PyMethodMayBeStatic\n    def get_via_id(self, bot_layer, top_layer):\n        \"\"\"Returns the via ID string given bottom and top layer name.\n\n        Defaults to \"<bot_layer>_<top_layer>\"\n\n        Parameters\n        ----------\n        bot_layer : string\n            the bottom layer name.\n        top_layer : string\n            the top layer name.\n\n        Returns\n        -------\n        via_id : string\n            the via ID string.\n        \"\"\"\n        return '%s_%s' % (top_layer, bot_layer)\n\n    def get_via_info(self, bbox, bot_layer, top_layer, bot_dir, bot_len=-1, top_len=-1,\n                     extend=True, top_dir=None, **kwargs):\n        \"\"\"Create a via on the routing grid given the bounding box.\n\n        Parameters\n        ----------\n        bbox : ..layout.util.BBox\n            the bounding box of the via.\n        bot_layer : Union[str, Tuple[str, str]]\n            the bottom layer name, or a tuple of layer name and purpose name.\n            If purpose name not given, defaults to 'drawing'.\n        top_layer : Union[str, Tuple[str, str]]\n            the top layer name, or a tuple of layer name and purpose name.\n            If purpose name not given, defaults to 'drawing'.\n        bot_dir : str\n            the bottom layer extension direction.  Either 'x' or 'y'\n        bot_len : float\n            length of bottom wire connected to this Via, in layout units.\n            Used for length enhancement EM calculation.\n        top_len : float\n            length of top wire connected to this Via, in layout units.\n            Used for length enhancement EM calculation.\n        extend : bool\n            True if via extension can be drawn outside of bounding box.\n        top_dir : Optional[str]\n            top layer extension direction.  Can force to extend in same direction as bottom.\n        **kwargs :\n            optional parameters for EM rule calculations, such as nominal temperature,\n            AC rms delta-T, etc.\n\n        Returns\n        -------\n        info : dict[string, any]\n            A dictionary of via information, or None if no solution.  Should have the following:\n\n            resistance : float\n                The total via array resistance, in Ohms.\n            idc : float\n                The total via array maximum allowable DC current, in Amperes.\n            iac_rms : float\n                The total via array maximum allowable AC RMS current, in Amperes.\n            iac_peak : float\n                The total via array maximum allowable AC peak current, in Amperes.\n            params : dict[str, any]\n                A dictionary of via parameters.\n            top_box : ..layout.util.BBox\n                the top via layer bounding box, including extensions.\n            bot_box : ..layout.util.BBox\n                the bottom via layer bounding box, including extensions.\n\n        \"\"\"\n        # remove purpose\n        if isinstance(bot_layer, tuple):\n            bot_layer = bot_layer[0]\n        if isinstance(top_layer, tuple):\n            top_layer = top_layer[0]\n        bot_layer = io.fix_string(bot_layer)\n        top_layer = io.fix_string(top_layer)\n\n        bot_id = self.get_layer_id(bot_layer)\n        bmtype = self.get_layer_type(bot_layer)\n        tmtype = self.get_layer_type(top_layer)\n        vname = self.get_via_name(bot_id)\n\n        if not top_dir:\n            top_dir = 'x' if bot_dir == 'y' else 'y'\n\n        via_result = self.get_best_via_array(vname, bmtype, tmtype, bot_dir, top_dir,\n                                             bbox.width, bbox.height, extend)\n        if via_result is None:\n            # no solution found\n            return None\n\n        (nx, ny), mdim_list, vtype, vdim, (spx, spy), (warr_norm, harr_norm) = via_result\n\n        res = self.resolution\n        xc_norm = bbox.xc_unit\n        yc_norm = bbox.yc_unit\n\n        wbot_norm = mdim_list[0][0]\n        hbot_norm = mdim_list[0][1]\n        wtop_norm = mdim_list[1][0]\n        htop_norm = mdim_list[1][1]\n\n        # OpenAccess Via can't handle even + odd enclosure, so we truncate.\n        enc1_x = (wbot_norm - warr_norm) // 2 * res\n        enc1_y = (hbot_norm - harr_norm) // 2 * res\n        enc2_x = (wtop_norm - warr_norm) // 2 * res\n        enc2_y = (htop_norm - harr_norm) // 2 * res\n\n        # compute EM rule dimensions\n        if bot_dir == 'x':\n            bw, tw = hbot_norm * res, wtop_norm * res\n        else:\n            bw, tw = wbot_norm * res, htop_norm * res\n\n        bot_xl_norm = xc_norm - wbot_norm // 2\n        bot_yb_norm = yc_norm - hbot_norm // 2\n        top_xl_norm = xc_norm - wtop_norm // 2\n        top_yb_norm = yc_norm - htop_norm // 2\n\n        bot_box = BBox(bot_xl_norm, bot_yb_norm, bot_xl_norm + wbot_norm,\n                       bot_yb_norm + hbot_norm, res, unit_mode=True)\n        top_box = BBox(top_xl_norm, top_yb_norm, top_xl_norm + wtop_norm,\n                       top_yb_norm + htop_norm, res, unit_mode=True)\n\n        idc, irms, ipeak = self.get_via_em_specs(vname, bot_layer, top_layer, via_type=vtype,\n                                                 bm_dim=(bw, bot_len), tm_dim=(tw, top_len),\n                                                 array=nx > 1 or ny > 1, **kwargs)\n\n        params = {'id': self.get_via_id(bot_layer, top_layer),\n                  'loc': (xc_norm * res, yc_norm * res),\n                  'orient': 'R0',\n                  'num_rows': ny,\n                  'num_cols': nx,\n                  'sp_rows': spy * res,\n                  'sp_cols': spx * res,\n                  # increase left/bottom enclusion if off-center.\n                  'enc1': [enc1_x, enc1_x, enc1_y, enc1_y],\n                  'enc2': [enc2_x, enc2_x, enc2_y, enc2_y],\n                  'cut_width': vdim[0] * res,\n                  'cut_height': vdim[1] * res,\n                  }\n\n        ntot = nx * ny\n        return dict(\n            resistance=0.0,\n            idc=idc * ntot,\n            iac_rms=irms * ntot,\n            iac_peak=ipeak * ntot,\n            params=params,\n            top_box=top_box,\n            bot_box=bot_box,\n        )\n\n    def design_resistor(self, res_type, res_targ, idc=0.0, iac_rms=0.0,\n                        iac_peak=0.0, num_even=True, **kwargs):\n        \"\"\"Finds the optimal resistor dimension that meets the given specs.\n\n        Assumes resistor length does not effect EM specs.\n\n        Parameters\n        ----------\n        res_type : string\n            the resistor type.\n        res_targ : float\n            target resistor, in Ohms.\n        idc : float\n            maximum DC current spec, in Amperes.\n        iac_rms : float\n            maximum AC RMS current spec, in Amperes.\n        iac_peak : float\n            maximum AC peak current spec, in Amperes.\n        num_even : int\n            True to return even number of resistors.\n        **kwargs :\n            optional EM spec calculation parameters.\n\n        Returns\n        -------\n        num_par : int\n            number of resistors needed in parallel.\n        num_ser : int\n            number of resistors needed in series.\n        w : float\n            width of a unit resistor, in meters.\n        l : float\n            length of a unit resistor, in meters.\n        \"\"\"\n        resolution = self.resolution\n        rsq = self.get_res_rsquare(res_type)\n        wmin, wmax = self.get_res_width_bounds(res_type)\n        lmin, lmax = self.get_res_length_bounds(res_type)\n        min_nsq = self.get_res_min_nsquare(res_type)\n\n        wmin_unit = int(round(wmin / resolution))\n        wmax_unit = int(round(wmax / resolution))\n        lmin_unit = int(round(lmin / resolution))\n        lmax_unit = int(round(lmax / resolution))\n        # make sure width is always even\n        wmin_unit = -2 * (-wmin_unit // 2)\n        wmax_unit = 2 * (wmax_unit // 2)\n\n        # step 1: find number of parallel resistors and minimum resistor width.\n        if num_even:\n            npar_iter = BinaryIterator(2, None, step=2)\n        else:\n            npar_iter = BinaryIterator(1, None, step=1)\n        while npar_iter.has_next():\n            npar = npar_iter.get_next()\n            res_targ_par = res_targ * npar\n            idc_par = idc / npar\n            iac_rms_par = iac_rms / npar\n            iac_peak_par = iac_peak / npar\n            res_idc, res_irms, res_ipeak = self.get_res_em_specs(res_type, wmax, **kwargs)\n            if (0.0 < res_idc < idc_par or 0.0 < res_irms < iac_rms_par or\n                    0.0 < res_ipeak < iac_peak_par):\n                npar_iter.up()\n            else:\n                # This could potentially work, find width solution\n                w_iter = BinaryIterator(wmin_unit, wmax_unit + 1, step=2)\n                while w_iter.has_next():\n                    wcur_unit = w_iter.get_next()\n                    lcur_unit = int(math.ceil(res_targ_par / rsq * wcur_unit))\n                    if lcur_unit < max(lmin_unit, int(math.ceil(min_nsq * wcur_unit))):\n                        w_iter.down()\n                    else:\n                        tmp = self.get_res_em_specs(res_type, wcur_unit * resolution,\n                                                    l=lcur_unit * resolution, **kwargs)\n                        res_idc, res_irms, res_ipeak = tmp\n                        if (0.0 < res_idc < idc_par or 0.0 < res_irms < iac_rms_par or\n                                0.0 < res_ipeak < iac_peak_par):\n                            w_iter.up()\n                        else:\n                            w_iter.save_info((wcur_unit, lcur_unit))\n                            w_iter.down()\n\n                w_info = w_iter.get_last_save_info()\n                if w_info is None:\n                    # no solution; we need more parallel resistors\n                    npar_iter.up()\n                else:\n                    # solution!\n                    npar_iter.save_info((npar, w_info[0], w_info[1]))\n                    npar_iter.down()\n\n        # step 3: fix maximum length violation by having resistor in series.\n        num_par, wopt_unit, lopt_unit = npar_iter.get_last_save_info()\n        wopt = wopt_unit * resolution\n        if lopt_unit > lmax_unit:\n            num_ser = -(-lopt_unit // lmax_unit)\n            lopt = round(lopt_unit / num_ser / resolution) * resolution\n        else:\n            num_ser = 1\n            lopt = lopt_unit * resolution\n\n        # step 4: return answer\n        return num_par, num_ser, wopt * self.layout_unit, lopt * self.layout_unit\n\n\nclass DummyTechInfo(TechInfo):\n    \"\"\"A dummy TechInfo class.\n\n    Parameters\n    ----------\n    tech_params : dict[str, any]\n        technology parameters dictionary.\n    \"\"\"\n\n    def __init__(self, tech_params):\n        TechInfo.__init__(self, 0.001, 1e-6, '', tech_params)\n\n    def get_well_layers(self, sub_type):\n        return []\n\n    def get_implant_layers(self, mos_type, res_type=None):\n        return []\n\n    def get_threshold_layers(self, mos_type, threshold, res_type=None):\n        return []\n\n    def get_dnw_layers(self):\n        # type: () -> List[Tuple[str, str]]\n        return []\n\n    def get_exclude_layer(self, layer_id):\n        # type: (int) -> Tuple[str, str]\n        \"\"\"Returns the metal exclude layer\"\"\"\n        return '', ''\n\n    def get_dnw_margin_unit(self, dnw_mode):\n        # type: (str) -> int\n        return 0\n\n    def get_res_metal_layers(self, layer_id):\n        # type: (int) -> List[Tuple[str, str]]\n        return []\n\n    def get_metal_dummy_layers(self, layer_id):\n        # type: (int) -> List[Tuple[str, str]]\n        return []\n\n    def add_cell_boundary(self, template, box):\n        pass\n\n    def draw_device_blockage(self, template):\n        pass\n\n    def get_via_drc_info(self, vname, vtype, mtype, mw_unit, is_bot):\n        return (0, 0), [(0, 0)], [(0, 0)], [(0, 0)], (0, 0), [(0, 0)], None, None\n\n    def get_min_space(self, layer_type, width, unit_mode=False, same_color=False):\n        return 0\n\n    def get_min_line_end_space(self, layer_type, width, unit_mode=False):\n        return 0\n\n    def get_min_length(self, layer_type, width):\n        return 0.0\n\n    def get_layer_id(self, layer_name):\n        return -1\n\n    def get_layer_name(self, layer_id):\n        return ''\n\n    def get_layer_type(self, layer_name):\n        return ''\n\n    def get_via_name(self, bot_layer_id):\n        return ''\n\n    def get_metal_em_specs(self, layer_name, w, l=-1, vertical=False, **kwargs):\n        return float('inf'), float('inf'), float('inf')\n\n    def get_via_em_specs(self, via_name, bm_layer, tm_layer, via_type='square',\n                         bm_dim=(-1, -1), tm_dim=(-1, -1), array=False, **kwargs):\n        return float('inf'), float('inf'), float('inf')\n\n    def get_res_rsquare(self, res_type):\n        return 0.0\n\n    def get_res_width_bounds(self, res_type):\n        return 0.0, 0.0\n\n    def get_res_length_bounds(self, res_type):\n        return 0.0, 0.0\n\n    def get_res_min_nsquare(self, res_type):\n        return 1.0\n\n    def get_res_em_specs(self, res_type, w, l=-1, **kwargs):\n        return float('inf'), float('inf'), float('inf')\n\n\nclass BagLayout(object):\n    \"\"\"This class contains layout information of a cell.\n\n    Parameters\n    ----------\n    grid : :class:`..layout.routing.RoutingGrid`\n        the routing grid instance.\n    use_cybagoa : bool\n        True to use cybagoa package to accelerate layout.\n    \"\"\"\n\n    def __init__(self, grid, use_cybagoa=False):\n        self._res = grid.resolution\n        self._via_tech = grid.tech_info.via_tech_name\n        self._pin_purpose = grid.tech_info.pin_purpose\n        self._make_pin_rect = True\n        self._inst_list = []  # type: List[Instance]\n        self._inst_primitives = []  # type: List[InstanceInfo]\n        self._rect_list = []  # type: List[Rect]\n        self._via_list = []  # type: List[Via]\n        self._via_primitives = []  # type: List[ViaInfo]\n        self._pin_list = []  # type: List[PinInfo]\n        self._path_list = []  # type: List[Path]\n        self._polygon_list = []  # type: List[Polygon]\n        self._blockage_list = []  # type: List[Blockage]\n        self._boundary_list = []  # type: List[Boundary]\n        self._used_inst_names = set()\n        self._used_pin_names = set()\n        self._raw_content = None\n        self._is_empty = True\n        self._finalized = False\n        self._use_cybagoa = use_cybagoa\n\n    @property\n    def pin_purpose(self):\n        \"\"\"Returns the default pin layer purpose name.\"\"\"\n        return self._pin_purpose\n\n    @property\n    def is_empty(self):\n        \"\"\"Returns True if this layout is empty.\"\"\"\n        return self._is_empty\n\n    def inst_iter(self):\n        # type: () -> Iterator[Instance]\n        return iter(self._inst_list)\n\n    def finalize(self):\n        # type: () -> None\n        \"\"\"Prevents any further changes to this layout.\n        \"\"\"\n        self._finalized = True\n\n        # get rectangles\n        rect_list = []\n        for obj in self._rect_list:\n            if obj.valid:\n                if not obj.bbox.is_physical():\n                    print('WARNING: rectangle with non-physical bounding box found.', obj.layer)\n                else:\n                    obj_content = obj.content\n                    rect_list.append(obj_content)\n\n        # filter out invalid geometries\n        path_list, polygon_list, blockage_list, boundary_list, via_list = [], [], [], [], []\n        for targ_list, obj_list in ((path_list, self._path_list),\n                                    (polygon_list, self._polygon_list),\n                                    (blockage_list, self._blockage_list),\n                                    (boundary_list, self._boundary_list),\n                                    (via_list, self._via_list)):\n            for obj in obj_list:\n                if obj.valid:\n                    targ_list.append(obj.content)\n\n        # get via primitives\n        via_list.extend(self._via_primitives)\n\n        # get instances\n        inst_list = []  # type: List[InstanceInfo]\n        for obj in self._inst_list:\n            if obj.valid:\n                obj_content = self._format_inst(obj)\n                inst_list.append(obj_content)\n\n        self._raw_content = [inst_list,\n                             self._inst_primitives,\n                             rect_list,\n                             via_list,\n                             self._pin_list,\n                             path_list,\n                             blockage_list,\n                             boundary_list,\n                             polygon_list,\n                             ]\n\n        if (not inst_list and not self._inst_primitives and not rect_list and not blockage_list and\n                not boundary_list and not via_list and not self._pin_list and not path_list and\n                not polygon_list):\n            self._is_empty = True\n        else:\n            self._is_empty = False\n\n    def get_rect_bbox(self, layer):\n        # type: (Union[str, Tuple[str, str]]) -> BBox\n        \"\"\"Returns the overall bounding box of all rectangles on the given layer.\n\n        Note: currently this does not check primitive instances or vias.\n        \"\"\"\n        if isinstance(layer, str):\n            layer = (layer, 'drawing')\n\n        box = BBox.get_invalid_bbox()\n        for rect in self._rect_list:\n            if layer == rect.layer:\n                box = box.merge(rect.bbox_array.get_overall_bbox())\n\n        for inst in self._inst_list:\n            box = box.merge(inst.get_rect_bbox(layer))\n\n        return box\n\n    def get_masters_set(self):\n        \"\"\"Returns a set of all template master keys used in this layout.\"\"\"\n        return set((inst.master.key for inst in self._inst_list))\n\n    def _get_unused_inst_name(self, inst_name):\n        \"\"\"Returns a new inst name.\"\"\"\n        if inst_name is None or inst_name in self._used_inst_names:\n            cnt = 0\n            inst_name = 'X%d' % cnt\n            while inst_name in self._used_inst_names:\n                cnt += 1\n                inst_name = 'X%d' % cnt\n\n        return inst_name\n\n    def _format_inst(self, inst):\n        # type: (Instance) -> InstanceInfo\n        \"\"\"Convert the given instance into dictionary representation.\"\"\"\n        content = inst.content\n        inst_name = self._get_unused_inst_name(content.name)\n        content.name = inst_name\n        self._used_inst_names.add(inst_name)\n        return content\n\n    def get_content(self,  # type: BagLayout\n                    lib_name,  # type: str\n                    cell_name,  # type: str\n                    rename_fun,  # type: Callable[[str], str]\n                    ):\n        # type: (...) -> Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]\n        \"\"\"returns a list describing geometries in this layout.\n\n        Parameters\n        ----------\n        lib_name : str\n            the layout library name.\n        cell_name : str\n            the layout top level cell name.\n        rename_fun : Callable[[str], str]\n            the layout cell renaming function.\n\n        Returns\n        -------\n        content : Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]\n            a list describing this layout, or PyOALayout if cybagoa package is enabled.\n        \"\"\"\n        if not self._finalized:\n            raise Exception('Layout is not finalized.')\n\n        cell_name = rename_fun(cell_name)\n        (inst_list, inst_prim_list, rect_list, via_list, pin_list,\n         path_list, blockage_list, boundary_list, polygon_list) = self._raw_content\n\n        # update library name and apply layout cell renaming on instances\n        inst_tot_list = []\n        for inst in inst_list:\n            inst_temp = inst.copy()\n            inst_temp['lib'] = lib_name\n            inst_temp['cell'] = rename_fun(inst_temp['cell'])\n            inst_tot_list.append(inst_temp)\n        inst_tot_list.extend(inst_prim_list)\n\n        if self._use_cybagoa and cybagoa is not None:\n            encoding = io.get_encoding()\n            oa_layout = cybagoa.PyLayout(encoding)\n\n            for obj in inst_tot_list:\n                obj.pop('master_key', None)\n                oa_layout.add_inst(**obj)\n            for obj in rect_list:\n                oa_layout.add_rect(**obj)\n            for obj in via_list:\n                oa_layout.add_via(**obj)\n            for obj in pin_list:\n                oa_layout.add_pin(**obj)\n            for obj in path_list:\n                oa_layout.add_path(**obj)\n            for obj in blockage_list:\n                oa_layout.add_blockage(**obj)\n            for obj in boundary_list:\n                oa_layout.add_boundary(**obj)\n            for obj in polygon_list:\n                oa_layout.add_polygon(**obj)\n\n            return cell_name, oa_layout\n        else:\n            ans = [cell_name, inst_tot_list, rect_list, via_list, pin_list, path_list,\n                   blockage_list, boundary_list, polygon_list]\n            return ans\n\n    def add_instance(self, instance):\n        \"\"\"Adds the given instance to this layout.\n\n        Parameters\n        ----------\n        instance : ..layout.objects.Instance\n            the instance to add.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        # if isinstance(instance.nx, float) or isinstance(instance.ny, float):\n        #     raise Exception('float nx/ny')\n\n        self._inst_list.append(instance)\n\n    def move_all_by(self, dx=0.0, dy=0.0, unit_mode=False):\n        # type: (Union[float, int], Union[float, int], bool) -> None\n        \"\"\"Move all layout objects in this layout by the given amount.\n\n        Parameters\n        ----------\n        dx : Union[float, int]\n            the X shift.\n        dy : Union[float, int]\n            the Y shift.\n        unit_mode : bool\n            True if shift values are given in resolution units.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        for obj in chain(self._inst_list, self._inst_primitives, self._rect_list,\n                         self._via_primitives, self._via_list, self._pin_list,\n                         self._path_list, self._blockage_list, self._boundary_list,\n                         self._polygon_list):\n            obj.move_by(dx=dx, dy=dy, unit_mode=unit_mode)\n\n    def add_instance_primitive(self,  # type: BagLayout\n                               lib_name,  # type: str\n                               cell_name,  # type: str\n                               loc,  # type: Tuple[Union[float, int], Union[float, int]]\n                               view_name='layout',  # type: str\n                               inst_name=None,  # type: Optional[str]\n                               orient=\"R0\",  # type: str\n                               num_rows=1,  # type: int\n                               num_cols=1,  # type: int\n                               sp_rows=0,  # type: Union[float, int]\n                               sp_cols=0,  # type: Union[float, int]\n                               params=None,  # type: Optional[Dict[str, Any]]\n                               unit_mode=False,  # type: bool\n                               **kwargs\n                               ):\n        \"\"\"Adds a new (arrayed) primitive instance to this layout.\n\n        Parameters\n        ----------\n        lib_name : str\n            instance library name.\n        cell_name : str\n            instance cell name.\n        loc : Tuple[Union[float, int], Union[float, int]]\n            instance location.\n        view_name : str\n            instance view name.  Defaults to 'layout'.\n        inst_name : Optional[str]\n            instance name.  If None or an instance with this name already exists,\n            a generated unique name is used.\n        orient : str\n            instance orientation.  Defaults to \"R0\"\n        num_rows : int\n            number of rows.  Must be positive integer.\n        num_cols : int\n            number of columns.  Must be positive integer.\n        sp_rows : Union[float, int]\n            row spacing.  Used for arraying given instance.\n        sp_cols : Union[float, int]\n            column spacing.  Used for arraying given instance.\n        params : Optional[Dict[str, Any]]\n            the parameter dictionary.  Used for adding pcell instance.\n        unit_mode : bool\n            True if distances are specified in resolution units.\n        **kwargs :\n            additional arguments.  Usually implementation specific.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        res = self._res\n        if not unit_mode:\n            loc = [round(loc[0] / res) * res,\n                   round(loc[1] / res) * res]\n            sp_rows = round(sp_rows / res) * res\n            sp_cols = round(sp_cols / res) * res\n        else:\n            loc = [loc[0] * res, loc[1] * res]\n            sp_rows *= res\n            sp_cols *= res\n\n        # get unique instance name\n        inst_name = self._get_unused_inst_name(inst_name)\n        self._used_inst_names.add(inst_name)\n\n        inst_info = InstanceInfo(self._res, lib=lib_name,\n                                 cell=cell_name,\n                                 view=view_name,\n                                 name=inst_name,\n                                 loc=loc,\n                                 orient=orient,\n                                 num_rows=num_rows,\n                                 num_cols=num_cols,\n                                 sp_rows=sp_rows,\n                                 sp_cols=sp_cols)\n\n        # if isinstance(num_rows, float) or isinstance(num_cols, float):\n        #     raise Exception('float nx/ny')\n\n        if params is not None:\n            inst_info.params = params\n        inst_info.update(kwargs)\n\n        self._inst_primitives.append(inst_info)\n\n    def add_rect(self, rect):\n        \"\"\"Add a new (arrayed) rectangle.\n\n        Parameters\n        ----------\n        rect : ..layout.objects.Rect\n            the rectangle object to add.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        self._rect_list.append(rect)\n\n    def add_path(self, path):\n        # type: (Path) -> None\n        \"\"\"Add a new path.\n\n        Parameters\n        ----------\n        path : Path\n            the path object to add.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        self._path_list.append(path)\n\n    def add_polygon(self, polygon):\n        # type: (Polygon) -> None\n        \"\"\"Add a new polygon.\n\n        Parameters\n        ----------\n        polygon : Polygon\n            the polygon object to add.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        self._polygon_list.append(polygon)\n\n    def add_blockage(self, blockage):\n        # type: (Blockage) -> None\n        \"\"\"Add a new blockage.\n\n        Parameters\n        ----------\n        blockage : Blockage\n            the blockage object to add.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        self._blockage_list.append(blockage)\n\n    def add_boundary(self, boundary):\n        # type: (Boundary) -> None\n        \"\"\"Add a new boundary.\n\n        Parameters\n        ----------\n        boundary : Boundary\n            the boundary object to add.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        self._boundary_list.append(boundary)\n\n    def add_via(self, via):\n        \"\"\"Add a new (arrayed) via.\n\n        Parameters\n        ----------\n        via : ..layout.objects.Via\n            the via object to add.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        # if isinstance(via.nx, float) or isinstance(via.ny, float):\n        #     raise Exception('float nx/ny')\n\n        self._via_list.append(via)\n\n    def add_via_primitive(self, via_type, loc, num_rows=1, num_cols=1, sp_rows=0.0, sp_cols=0.0,\n                          enc1=None, enc2=None, orient='R0', cut_width=None, cut_height=None,\n                          arr_nx=1, arr_ny=1, arr_spx=0.0, arr_spy=0.0):\n        \"\"\"Adds a primitive via by specifying all parameters.\n\n        Parameters\n        ----------\n        via_type : str\n            the via type name.\n        loc : Tuple[float, float]\n            the via location as a two-element tuple.\n        num_rows : int\n            number of via cut rows.\n        num_cols : int\n            number of via cut columns.\n        sp_rows : float\n            spacing between via cut rows.\n        sp_cols : float\n            spacing between via cut columns.\n        enc1 : list[float]\n            a list of left, right, top, and bottom enclosure values on bottom layer.\n            Defaults to all 0.\n        enc2 : list[float]\n            a list of left, right, top, and bottom enclosure values on top layer.\n            Defaults. to all 0.\n        orient : str\n            orientation of the via.\n        cut_width : float or None\n            via cut width.  This is used to create rectangle via.\n        cut_height : float or None\n            via cut height.  This is used to create rectangle via.\n        arr_nx : int\n            number of columns.\n        arr_ny : int\n            number of rows.\n        arr_spx : float\n            column pitch.\n        arr_spy : float\n            row pitch.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        if arr_nx > 0 and arr_ny > 0:\n            if enc1 is None:\n                enc1 = [0.0, 0.0, 0.0, 0.0]\n            if enc2 is None:\n                enc2 = [0.0, 0.0, 0.0, 0.0]\n\n            # if isinstance(arr_nx, float) or isinstance(arr_ny, float):\n            #     raise Exception('float nx/ny')\n\n            par = ViaInfo(self._res, id=via_type, loc=loc, orient=orient, num_rows=num_rows,\n                          num_cols=num_cols,\n                          sp_rows=sp_rows, sp_cols=sp_cols, enc1=enc1, enc2=enc2, )\n            if cut_width is not None:\n                par['cut_width'] = cut_width\n            if cut_height is not None:\n                par['cut_height'] = cut_height\n            if arr_nx > 1 or arr_ny > 1:\n                par['arr_nx'] = arr_nx\n                par['arr_ny'] = arr_ny\n                par['arr_spx'] = arr_spx\n                par['arr_spy'] = arr_spy\n\n            self._via_primitives.append(par)\n\n    def add_pin(self, net_name, layer, bbox, pin_name=None, label=None):\n        \"\"\"Add a new pin.\n\n        Parameters\n        ----------\n        net_name : str\n            the net name associated with this pin.\n        layer : string or (string, string)\n            the layer name, or (layer, purpose) pair.\n            if purpose is not specified, defaults to 'pin'.\n        bbox : ..layout.util.BBox\n            the rectangle bounding box\n        pin_name : str or None\n            the pin name.  If None or empty, auto-generate from net name.\n        label : str or None\n            the pin label text.  If None or empty, will use net name as the text.\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        if isinstance(layer, bytes):\n            # interpret as unicode\n            layer = layer.decode('utf-8')\n        if isinstance(layer, str):\n            layer = (layer, self._pin_purpose)\n        else:\n            layer = layer[0], layer[1]\n\n        if not label:\n            label = net_name\n\n        pin_name = pin_name or net_name\n        idx = 1\n        while pin_name in self._used_pin_names:\n            pin_name = '%s_%d' % (net_name, idx)\n            idx += 1\n\n        par = PinInfo(self._res, net_name=net_name,\n                      pin_name=pin_name,\n                      label=label,\n                      layer=list(layer),\n                      bbox=[[bbox.left, bbox.bottom], [bbox.right, bbox.top]],\n                      make_rect=self._make_pin_rect)\n\n        self._used_pin_names.add(pin_name)\n        self._pin_list.append(par)\n\n    def add_label(self, label, layer, bbox):\n        \"\"\"Add a new label.\n\n        This is mainly used to add voltage text labels.\n\n        Parameters\n        ----------\n        label : str\n            the label text.\n        layer : Union[str, Tuple[str, str]]\n            the layer name, or (layer, purpose) pair.\n            if purpose is not specified, defaults to 'pin'.\n        bbox : ..layout.util.BBox\n            the rectangle bounding box\n        \"\"\"\n        if self._finalized:\n            raise Exception('Layout is already finalized.')\n\n        if isinstance(layer, bytes):\n            # interpret as unicode\n            layer = layer.decode('utf-8')\n        if isinstance(layer, str):\n            layer = (layer, self._pin_purpose)\n        else:\n            layer = layer[0], layer[1]\n\n        par = PinInfo(self._res, net_name='',\n                      pin_name='',\n                      label=label,\n                      layer=list(layer),\n                      bbox=[[bbox.left, bbox.bottom], [bbox.right, bbox.top]],\n                      make_rect=False)\n\n        self._pin_list.append(par)\n"
  },
  {
    "path": "bag/layout/digital.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines layout template classes for digital standard cells.\n\"\"\"\n\nfrom typing import Dict, Any, Set, Tuple, List, Optional\n\nimport abc\n\nfrom bag.io import read_yaml\n\nfrom ..util.interval import IntervalSet\nfrom .util import BBox\nfrom .template import TemplateDB, TemplateBase\nfrom .objects import Instance\nfrom .routing import TrackID, WireArray\n\n\nclass StdCellBase(TemplateBase, metaclass=abc.ABCMeta):\n    \"\"\"The base class of all micro templates.\n\n    Parameters\n    ----------\n    temp_db : TemplateDB\n            the template database.\n    lib_name : str\n        the layout library name.\n    params : Dict[str, Any]\n        the parameter values.\n    used_names : Set[str]\n        a set of already used cell names.\n    **kwargs\n        dictionary of optional parameters.  See documentation of\n        :class:`bag.layout.template.TemplateBase` for details.\n    \"\"\"\n\n    def __init__(self, temp_db, lib_name, params, used_names, **kwargs):\n        # type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None\n        self._config = read_yaml(params['config_file'])\n        self._tech_params = self._config['tech_params']\n        self._cells = self._config['cells']\n        self._spaces = self._config['spaces']\n        self._bound_params = self._config['boundaries']\n        TemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)\n        self._std_size = None  # type: Optional[Tuple[int, int]]\n        self._std_size_bare = None  # type: Optional[Tuple[int, int]]\n        self._draw_boundaries = False  # type: bool\n        self._used_blocks = []  # type: List[IntervalSet]\n\n    @property\n    def min_space_width(self):\n        # type: () -> int\n        \"\"\"Returns the minimum space block width in number of standard cell columns.\"\"\"\n        return self._spaces[-1]['num_col']\n\n    @property\n    def std_col_width(self):\n        # type: () -> float\n        \"\"\"Returns the standard cell column width.\"\"\"\n        return self._tech_params['col_pitch']\n\n    @property\n    def std_col_width_unit(self):\n        # type: () -> float\n        \"\"\"Returns the standard cell column width in resolution units.\"\"\"\n        res = self.grid.resolution\n        return int(round(self._tech_params['col_pitch'] / res))\n\n    @property\n    def std_row_height(self):\n        # type: () -> float\n        \"\"\"Returns the standard cell row height.\"\"\"\n        return self._tech_params['height']\n\n    @property\n    def std_row_height_unit(self):\n        # type: () -> float\n        \"\"\"Returns the standard cell row height in resolution units.\"\"\"\n        res = self.grid.resolution\n        return int(round(self._tech_params['height'] / res))\n\n    @property\n    def std_size(self):\n        # type: () -> Optional[Tuple[int, int]]\n        \"\"\"Returns the number of columns/rows that this standard cell occupies.\"\"\"\n        return self._std_size\n\n    @property\n    def std_routing_layers(self):\n        # type: () -> List[int]\n        \"\"\"Returns the routing layers used by this standard cell.\"\"\"\n        return self._tech_params['layers']\n\n    def get_num_columns(self, layer_id, num_tr):\n        # type: (int, int) -> int\n        \"\"\"Returns the number of standard cell columns needed to contain the given amount of tracks.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID.\n        num_tr : int\n            number of tracks.\n\n        Returns\n        -------\n        num_col : int\n            number of standard cell columns that span the given number of tracks.\n        \"\"\"\n        col_width_unit = int(round(self._tech_params['col_pitch'] / self.grid.resolution))\n        tr_pitch = int(self.grid.get_track_pitch(layer_id, unit_mode=True))  # type: int\n        return -(-(tr_pitch * num_tr) // col_width_unit)  # ceiling division\n\n    def set_draw_boundaries(self, draw_boundaries):\n        # type: (bool) -> None\n        \"\"\"Sets whether this standard cell have boundaries drawn around it.\n\n        To draw boundaries around a standard cell, first call this method\n        with draw_boundaries=True, then call set_std_size() method when\n        all blocks have been placed.  Finally, call draw_boundaries()\n        to draw the bounded cells.\n\n        Parameters\n        ----------\n        draw_boundaries : bool\n            True to draw boundaries around this standard cell.\n        \"\"\"\n        self._draw_boundaries = draw_boundaries\n\n    def get_space_blocks(self):\n        # type: () -> List[Dict[str, Any]]\n        \"\"\"Returns the space blocks parameters.  Used internally.\"\"\"\n        return self._spaces\n\n    def get_cell_params(self, cell_name):\n        # type: (str) -> Dict[str, Any]\n        \"\"\"Returns parameters for the given standard cell.  Used internally.\n\n        Parameters\n        ----------\n        cell_name : str\n            the standard cell name.\n        \"\"\"\n        for key, val in self._cells.items():\n            if key == cell_name:\n                return val\n        raise ValueError('Cannot find standard cell with name %s' % cell_name)\n\n    def set_std_size(self, std_size, top_layer=-1):\n        # type: (Tuple[int, int], int) -> None\n        \"\"\"Sets the size of this standard cell.\n\n        This method computes self.size, self.array_box, and self.std_size.\n        If you will draw boundaries around this standard cell,\n        self.set_draw_boundaries(True) should be called first.\n\n        Parameters\n        ----------\n        std_size : Tuple[int, int]\n            the standard cell size as (number of std. columns, number of std. rows) Tuple.\n        top_layer : int\n            the top level routing layer.  If negative, default to standard cell top routing layer.\n        \"\"\"\n        num_col, num_row = std_size\n        self._std_size_bare = std_size\n        if self._draw_boundaries:\n            dx = self._bound_params['lr_width'] * self.std_col_width\n            dy = self._bound_params['tb_height'] * self.std_row_height\n            self._std_size = (int(std_size[0] + 2 * self._bound_params['lr_width']),\n                              int(std_size[1] + 2 * self._bound_params['tb_height']))\n        else:\n            self._std_size = std_size\n            dx, dy = 0, 0\n        self.array_box = BBox(0.0, 0.0, num_col * self.std_col_width + 2 * dx,\n                              num_row * self.std_row_height + 2 * dy, self.grid.resolution)\n        if top_layer < 0:\n            top_layer = self.std_routing_layers[-1]\n\n        if self.grid.size_defined(top_layer):\n            self.set_size_from_array_box(top_layer)\n        else:\n            self.prim_top_layer = top_layer\n            self.prim_bound_box = self.array_box\n\n    def update_routing_grid(self):\n        # type: () -> None\n        \"\"\"Register standard cell routing layers in the RoutingGrid.\n\n        This method must be called first in draw_layout().\n        \"\"\"\n        layers = self._tech_params['layers']\n        widths = self._tech_params['widths']\n        spaces = self._tech_params['spaces']\n        directions = self._tech_params['directions']\n\n        self.grid = self.grid.copy()\n        for lay_id, w, sp, tdir in zip(layers, widths, spaces, directions):\n            self.grid.add_new_layer(lay_id, sp, w, tdir, override=True)\n        self.grid.update_block_pitch()\n\n    def get_num_tracks(self, layer_id):\n        # type: (int) -> int\n        \"\"\"Get number of tracks in this standard cell.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n\n        Returns\n        -------\n        num_tracks : int\n            number of tracks on the given layer in this standard cell.\n        \"\"\"\n        std_size = self.std_size\n        if std_size is None:\n            raise ValueError(\"std_size is unset. Try calling set_std_size()?\")\n        ncol, nrow = std_size\n\n        tdir = self.grid.get_direction(layer_id)\n        pitch = int(self.grid.get_track_pitch(layer_id, unit_mode=True))\n        if tdir == 'x':\n            tot_dim = nrow * int(round(self.std_row_height / self.grid.resolution))\n        else:\n            tot_dim = ncol * int(round(self.std_col_width / self.grid.resolution))\n        return tot_dim // pitch\n\n    def add_std_instance(self, master, inst_name=None, loc=(0, 0), nx=1, ny=1,\n                         spx=0, spy=0, flip_lr=False):\n        # type: (StdCellBase, Optional[str], Tuple[int, int], int, int, int, int, bool) -> Instance\n        \"\"\"Add a new standard cell instance.\n\n        Parameters\n        ----------\n        master : StdCellBase\n            the standard cell template master to add.\n        inst_name : Optional[str]\n            the instance name.\n        loc : Tuple[int, int]\n            lower-left corner of the instance in number of standard cell columns/rows.\n        nx : int\n            horizontal array count.\n        ny : int\n            vertical array count.\n        spx : int\n            horizontal pitch in number of standard cell columns.\n        spy : int\n            vertical pitch in number of standard cell rows.  Must be even.\n        flip_lr : bool\n            True to flip the standard cell over Y axis.\n\n        Returns\n        -------\n        inst : Instance\n            the standard cell instance.\n        \"\"\"\n        if spy % 2 != 0:\n            raise ValueError('row pitch must be even')\n\n        # update self._used_blocks\n        master_std_size = master.std_size\n        if master_std_size is None:\n            raise ValueError(\"master.std_size is unset. Try calling master.set_std_size()?\")\n        inst_ncol, inst_nrow = master_std_size\n        cur_nrow = loc[1] + inst_nrow + (ny - 1) * spy\n        while len(self._used_blocks) < cur_nrow:\n            self._used_blocks.append(IntervalSet())\n        for col_off in range(nx):\n            xoff = col_off * spx + loc[0]\n            for row_off in range(ny):\n                yoff = row_off * spy + loc[1]\n                for std_row_idx in range(yoff, yoff + inst_nrow):\n                    success = self._used_blocks[std_row_idx].add((xoff, xoff + inst_ncol))\n                    if not success:\n                        raise ValueError('Cannot add instance at std loc (%d, %d)' % (xoff, yoff))\n\n        col_pitch = self.std_col_width\n        row_pitch = self.std_row_height\n        if loc[1] % 2 == 0:\n            orient = 'R0'\n            dy = loc[1] * row_pitch\n        else:\n            orient = 'MX'\n            dy = (loc[1] + 1) * row_pitch\n\n        dx = loc[0] * col_pitch\n        if flip_lr:\n            dx += inst_ncol * col_pitch\n            if orient == 'R0':\n                orient = 'MY'\n            else:\n                orient = 'R180'\n\n        spx_new = spx * col_pitch\n        spy_new = spy * row_pitch\n        if self._draw_boundaries:\n            dx += self._bound_params['lr_width'] * self.std_col_width\n            dy += self._bound_params['tb_height'] * self.std_row_height\n\n        return self.add_instance(master, inst_name=inst_name, loc=(dx, dy),\n                                 orient=orient, nx=nx, ny=ny, spx=spx_new, spy=spy_new)\n\n    def draw_boundaries(self):\n        # type: () -> None\n        \"\"\"Draw the boundary cells around this standard cell.\"\"\"\n        lib_name = self._bound_params['lib_name']\n        suffix = self._bound_params.get('suffix', '')\n        std_size_bare = self._std_size_bare\n        if std_size_bare is None:\n            raise ValueError(\"std_size_bare is unset. Try calling set_std_size()?\")\n        num_col, num_row = std_size_bare\n        num_row_even = (num_row + 1) // 2\n        num_row_odd = num_row - num_row_even\n        wcol, hrow = self.std_col_width, self.std_row_height\n        dx = self._bound_params['lr_width'] * wcol\n        dy = self._bound_params['tb_height'] * hrow\n\n        # add bottom-left\n        self.add_instance_primitive(lib_name, 'boundary_bottomleft' + suffix, (0, 0))\n\n        # add left\n        self.add_instance_primitive(lib_name, 'boundary_left' + suffix, (0, dy), ny=num_row_even,\n                                    spy=hrow * 2)\n        if num_row_odd > 0:\n            self.add_instance_primitive(lib_name, 'boundary_left' + suffix, (0, dy + 2 * hrow),\n                                        orient='MX', ny=num_row_odd, spy=hrow * 2)\n\n        # add top-left\n        if num_row % 2 == 1:\n            yc = dy + num_row * hrow\n            self.add_instance_primitive(lib_name, 'boundary_topleft' + suffix, (0, yc))\n        else:\n            yc = 2 * dy + num_row * hrow\n            self.add_instance_primitive(lib_name, 'boundary_bottomleft' + suffix, (0, yc),\n                                        orient='MX')\n\n        # add bottom\n        self.add_instance_primitive(lib_name, 'boundary_bottom' + suffix, (dx, 0), nx=num_col,\n                                    spx=wcol)\n\n        # add top\n        if num_row % 2 == 1:\n            self.add_instance_primitive(lib_name, 'boundary_top' + suffix, (dx, yc), nx=num_col,\n                                        spx=wcol)\n        else:\n            self.add_instance_primitive(lib_name, 'boundary_bottom' + suffix, (dx, yc), orient='MX',\n                                        nx=num_col, spx=wcol)\n\n        # add bottom right\n        xc = dx + num_col * wcol\n        self.add_instance_primitive(lib_name, 'boundary_bottomright' + suffix, (xc, 0))\n\n        # add right\n        self.add_instance_primitive(lib_name, 'boundary_right' + suffix, (xc, dy), ny=num_row_even,\n                                    spy=hrow * 2)\n        if num_row_odd > 0:\n            self.add_instance_primitive(lib_name, 'boundary_right' + suffix, (xc, dy + 2 * hrow),\n                                        orient='MX', ny=num_row_odd, spy=hrow * 2)\n\n        # add top right\n        if num_row % 2 == 1:\n            self.add_instance_primitive(lib_name, 'boundary_topright' + suffix, (xc, yc))\n        else:\n            self.add_instance_primitive(lib_name, 'boundary_bottomright' + suffix, (xc, yc),\n                                        orient='MX')\n\n    def fill_space(self):\n        # type: () -> None\n        \"\"\"Fill all unused blocks with spaces.\"\"\"\n        std_size_bare = self._std_size_bare\n        if std_size_bare is None:\n            raise ValueError(\"std_size_bare is unset. Try calling set_std_size()?\")\n        tot_intv = (0, std_size_bare[0])\n        for row_idx, intv_set in enumerate(self._used_blocks):\n            for intv in intv_set.get_complement(tot_intv).intervals():\n                loc = (intv[0], row_idx)\n                num_spaces = intv[1] - intv[0]\n                self.add_std_space(loc, num_spaces, update_used_blks=False)\n\n    def add_std_space(self, loc, num_col, update_used_blks=True):\n        # type: (Tuple[int, int], int, bool) -> None\n        \"\"\"Add standard cell spaces at the given location.\n\n        Parameters\n        ----------\n        loc : Tuple[int, int]\n            the lower-left corner of the space block.\n        num_col : int\n            the space block width in number of columns.\n        update_used_blks : bool\n            True to register space blocks.  This flag is for internal use only.\n        \"\"\"\n        if update_used_blks:\n            # update self._used_blocks\n            while len(self._used_blocks) < loc[1] + 1:\n                self._used_blocks.append(IntervalSet())\n            success = self._used_blocks[loc[1]].add((loc[0], loc[0] + num_col))\n            if not success:\n                raise ValueError('Cannot add space at std loc (%d, %d)' % (loc[0], loc[1]))\n\n        col_pitch = self.std_col_width\n        xcur = loc[0] * col_pitch\n        if loc[1] % 2 == 0:\n            orient = 'R0'\n            ycur = loc[1] * self.std_row_height\n        else:\n            orient = 'MX'\n            ycur = (loc[1] + 1) * self.std_row_height\n\n        if self._draw_boundaries:\n            dx = self._bound_params['lr_width'] * self.std_col_width\n            dy = self._bound_params['tb_height'] * self.std_row_height\n        else:\n            dx = dy = 0\n\n        for blk_params in self.get_space_blocks():\n            lib_name = blk_params['lib_name']\n            cell_name = blk_params['cell_name']\n            blk_col = blk_params['num_col']\n            num_blk, num_col = divmod(num_col, blk_col)\n            blk_width = blk_col * col_pitch\n            if num_blk > 0:\n                self.add_instance_primitive(lib_name, cell_name, (xcur + dx, ycur + dy),\n                                            orient=orient, nx=num_blk, spx=blk_width)\n                xcur += num_blk * blk_width\n\n        if num_col > 0:\n            raise ValueError('has %d columns remaining' % num_col)\n\n\nclass StdCellTemplate(StdCellBase):\n    \"\"\"A template wrapper around a standard cell block.\n\n    Parameters\n    ----------\n    temp_db : TemplateDB\n        the template database.\n    lib_name : str\n        the layout library name.\n    params : Dict[str, Any]\n        the parameter values.\n    used_names : Set[str]\n        a set of already used cell names.\n    **kwargs :\n        dictionary of optional parameters.  See documentation of\n        :class:`bag.layout.template.TemplateBase` for details.\n    \"\"\"\n\n    def __init__(self, temp_db, lib_name, params, used_names, **kwargs):\n        # type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None\n        StdCellBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)\n        self._sch_params = None\n\n    @property\n    def sch_params(self):\n        return self._sch_params\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Dict[str, str]\n        \"\"\"Returns a dictionary containing parameter descriptions.\n\n        Override this method to return a dictionary from parameter names to descriptions.\n\n        Returns\n        -------\n        param_info : Dict[str, str]\n            dictionary from parameter name to description.\n        \"\"\"\n        return dict(\n            cell_name='standard cell cell name.',\n            config_file='standard cell configuration file name.',\n        )\n\n    def get_layout_basename(self):\n        return 'stdcell_%s' % self.params['cell_name']\n\n    def compute_unique_key(self):\n        cell_params = self.get_cell_params(self.params['cell_name'])\n        return 'stdcell_%s_%s' % (cell_params['lib_name'], cell_params['cell_name'])\n\n    def get_sch_master_info(self):\n        # type: () -> Tuple[str, str]\n        \"\"\"Returns the schematic master library/cell name tuple.\"\"\"\n        cell_params = self.get_cell_params(self.params['cell_name'])\n        return cell_params['lib_name'], cell_params['cell_name']\n\n    def draw_layout(self):\n        # type: () -> None\n\n        cell_params = self.get_cell_params(self.params['cell_name'])\n        lib_name = cell_params['lib_name']\n        cell_name = cell_params['cell_name']\n        size = cell_params['size']\n        ports = cell_params['ports']\n\n        # update routing grid\n        self.update_routing_grid()\n        # add instance\n        self.add_instance_primitive(lib_name, cell_name, (0, 0))\n        # compute size\n        self.set_std_size(size)\n\n        # add pins\n        res = self.grid.resolution\n        for port_name, pin_list in ports.items():\n            for pin in pin_list:\n                port_lay_id = pin['layer']\n                bbox = pin['bbox']\n                layer_dir = self.grid.get_direction(port_lay_id)\n                if layer_dir == 'x':\n                    intv = bbox[1], bbox[3]\n                    lower, upper = bbox[0], bbox[2]\n                else:\n                    intv = bbox[0], bbox[2]\n                    lower, upper = bbox[1], bbox[3]\n                tr_idx, tr_w = self.grid.interval_to_track(port_lay_id, intv)\n                warr = WireArray(TrackID(port_lay_id, tr_idx, width=tr_w), lower, upper,\n                                 res=res, unit_mode=False)\n                self.add_pin(port_name, warr, show=False)\n\n        # set properties\n        self._sch_params = cell_params.get('sch_params', None)\n"
  },
  {
    "path": "bag/layout/objects.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines various layout objects one can add and manipulate in a template.\n\"\"\"\nfrom typing import TYPE_CHECKING, Union, List, Tuple, Optional, Dict, Any, Iterator, Iterable, \\\n    Generator\n\nimport abc\nimport numpy as np\nfrom copy import deepcopy\n\nfrom .util import transform_table, BBox, BBoxArray, transform_point, get_inverse_transform\nfrom .routing.base import Port, WireArray\n\nfrom .. import io\n\nif TYPE_CHECKING:\n    from .template import TemplateBase\n    from .routing.grid import RoutingGrid\n\nldim = Union[float, int]\nloc_type = Tuple[ldim, ldim]\n\n\nclass Figure(object, metaclass=abc.ABCMeta):\n    \"\"\"Base class of all layout objects.\n\n    Parameters\n    ----------\n    resolution : float\n        layout unit resolution.\n    \"\"\"\n\n    def __init__(self, resolution):\n        # type: (float) -> None\n        self._res = resolution\n        self._destroyed = False\n\n    @abc.abstractmethod\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):\n        # type: (Tuple[ldim, ldim], str, bool, bool) -> Figure\n        \"\"\"Transform this figure.\"\"\"\n        pass\n\n    @abc.abstractmethod\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (ldim, ldim, bool) -> None\n        \"\"\"Move this path by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        unit_mode : bool\n            True if shifts are given in resolution units.\n        \"\"\"\n        pass\n\n    @property\n    def resolution(self):\n        # type: () -> float\n        \"\"\"Retuns the layout unit resolution.\"\"\"\n        return self._res\n\n    @property\n    def destroyed(self):\n        # type: () -> bool\n        \"\"\"Returns True if this instance is destroyed\"\"\"\n        return self._destroyed\n\n    @property\n    def valid(self):\n        # type: () -> bool\n        \"\"\"Returns True if this figure is valid.\"\"\"\n        return not self._destroyed\n\n    def check_destroyed(self):\n        # type: () -> None\n        \"\"\"Raises an exception if this object is already destroyed.\"\"\"\n        if self._destroyed:\n            raise Exception('This %s is already destroyed.' % self.__class__.__name__)\n\n    def destroy(self):\n        # type: () -> None\n        \"\"\"Destroy this instance.\"\"\"\n        self._destroyed = True\n\n\n# noinspection PyAbstractClass\nclass Arrayable(Figure, metaclass=abc.ABCMeta):\n    \"\"\"A layout object with arraying support.\n\n    Also handles destroy support.\n\n    Parameters\n    ----------\n    res : float\n        layout unit resolution.\n    nx : int\n        number of columns.\n    ny : int\n        number of rows.\n    spx : Union[float or int]\n        column pitch.\n    spy : Union[float or int]\n        row pitch.\n    unit_mode : bool\n        True if spx/spy are specified in resolution units.\n    \"\"\"\n\n    def __init__(self, res, nx=1, ny=1, spx=0, spy=0, unit_mode=False):\n        # type: (float, int, int, ldim, ldim, bool) -> None\n        Figure.__init__(self, res)\n        self._nx = nx\n        self._ny = ny\n        if unit_mode:\n            self._spx_unit = spx\n            self._spy_unit = spy\n        else:\n            self._spx_unit = int(round(spx / res))\n            self._spy_unit = int(round(spy / res))\n\n    @property\n    def nx(self):\n        # type: () -> int\n        \"\"\"Number of columns.\"\"\"\n        return self._nx\n\n    @nx.setter\n    def nx(self, val):\n        # type: (int) -> None\n        \"\"\"Sets the number of columns.\"\"\"\n        self.check_destroyed()\n        if val <= 0:\n            raise ValueError('Cannot have non-positive number of columns.')\n        self._nx = val\n\n    @property\n    def ny(self):\n        # type: () -> int\n        \"\"\"Number of rows.\"\"\"\n        return self._ny\n\n    @ny.setter\n    def ny(self, val):\n        # type: (int) -> None\n        \"\"\"Sets the number of rows.\"\"\"\n        self.check_destroyed()\n        if val <= 0:\n            raise ValueError('Cannot have non-positive number of rows.')\n        self._ny = val\n\n    @property\n    def spx(self):\n        # type: () -> float\n        \"\"\"The column pitch.\"\"\"\n        return self._spx_unit * self.resolution\n\n    @spx.setter\n    def spx(self, val):\n        # type: (float) -> None\n        \"\"\"Sets the new column pitch.\"\"\"\n        self.check_destroyed()\n        if val < 0:\n            raise ValueError('Currently does not support negative pitches.')\n        self._spx_unit = int(round(val / self.resolution))\n\n    @property\n    def spx_unit(self):\n        # type: () -> int\n        \"\"\"The column pitch in resolution units.\"\"\"\n        return self._spx_unit\n\n    @spx_unit.setter\n    def spx_unit(self, val):\n        # type: (int) -> None\n        \"\"\"Sets the new column pitch in resolution units.\"\"\"\n        self.check_destroyed()\n        if val < 0:\n            raise ValueError('Currently does not support negative pitches.')\n        self._spx_unit = val\n\n    @property\n    def spy(self):\n        # type: () -> float\n        \"\"\"The row pitch.\"\"\"\n        return self._spy_unit * self.resolution\n\n    @spy.setter\n    def spy(self, val):\n        # type: (float) -> None\n        \"\"\"Sets the new row pitch.\"\"\"\n        self.check_destroyed()\n        if val < 0:\n            raise ValueError('Currently does not support negative pitches.')\n        self._spy_unit = int(round(val / self.resolution))\n\n    @property\n    def spy_unit(self):\n        # type: () -> int\n        \"\"\"The row pitch in resolution units.\"\"\"\n        return self._spy_unit\n\n    @spy_unit.setter\n    def spy_unit(self, val):\n        # type: (int) -> None\n        \"\"\"Sets the new row pitch in resolution units.\"\"\"\n        self.check_destroyed()\n        if val < 0:\n            raise ValueError('Currently does not support negative pitches.')\n        self._spy_unit = val\n\n    @Figure.valid.getter\n    def valid(self):\n        # type: () -> bool\n        \"\"\"Returns True if this instance is valid, i.e. not destroyed and nx, ny >= 1.\"\"\"\n        return not self.destroyed and self.nx >= 1 and self.ny >= 1\n\n    def get_item_location(self, row=0, col=0, unit_mode=False):\n        # type: (int, int, bool) -> Tuple[ldim, ldim]\n        \"\"\"Returns the location of the given item in the array.\n\n        Parameters\n        ----------\n        row : int\n            the item row index.  0 is the bottom-most row.\n        col : int\n            the item column index.  0 is the left-most column.\n        unit_mode : bool\n            True to return coordinates in resolution units\n\n        Returns\n        -------\n        xo : Union[float, int]\n            the item X coordinate.\n        yo : Union[float, int]\n            the item Y coordinate.\n        \"\"\"\n        if row < 0 or row >= self.ny or col < 0 or col >= self.nx:\n            raise ValueError('Invalid row/col index: row=%d, col=%d' % (row, col))\n\n        xo = col * self._spx_unit\n        yo = row * self._spy_unit\n        if unit_mode:\n            return xo, yo\n        return xo * self.resolution, yo * self.resolution\n\n\nclass InstanceInfo(dict):\n    \"\"\"A dictionary that represents a layout instance.\n    \"\"\"\n\n    param_list = ['lib', 'cell', 'view', 'name', 'loc', 'orient', 'num_rows',\n                  'num_cols', 'sp_rows', 'sp_cols', 'master_key']\n\n    def __init__(self, res, change_orient=True, **kwargs):\n        kv_iter = ((key, kwargs.get(key, None)) for key in self.param_list)\n        dict.__init__(self, kv_iter)\n        self._resolution = res\n        if 'params' in kwargs:\n            self.params = kwargs['params']\n\n        # skill/OA array before rotation, while we're doing the opposite.\n        # this is supposed to fix it.\n        if change_orient:\n            orient = self['orient']\n            if orient == 'R180':\n                self['sp_rows'] *= -1\n                self['sp_cols'] *= -1\n            elif orient == 'MX':\n                self['sp_rows'] *= -1\n            elif orient == 'MY':\n                self['sp_cols'] *= -1\n            elif orient == 'R90':\n                self['sp_rows'], self['sp_cols'] = self['sp_cols'], -self['sp_rows']\n                self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']\n            elif orient == 'MXR90':\n                self['sp_rows'], self['sp_cols'] = self['sp_cols'], self['sp_rows']\n                self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']\n            elif orient == 'MYR90':\n                self['sp_rows'], self['sp_cols'] = -self['sp_cols'], -self['sp_rows']\n                self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']\n            elif orient == 'R270':\n                self['sp_rows'], self['sp_cols'] = -self['sp_cols'], self['sp_rows']\n                self['num_rows'], self['num_cols'] = self['num_cols'], self['num_rows']\n            elif orient != 'R0':\n                raise ValueError('Unknown orientation: %s' % orient)\n\n    @property\n    def lib(self):\n        # type: () -> str\n        return self['lib']\n\n    @property\n    def cell(self):\n        # type: () -> str\n        return self['cell']\n\n    @property\n    def view(self):\n        # type: () -> str\n        return self['view']\n\n    @property\n    def name(self):\n        # type: () -> str\n        return self['name']\n\n    @name.setter\n    def name(self, new_name):\n        # type: (str) -> None\n        self['name'] = new_name\n\n    @property\n    def loc(self):\n        # type: () -> Tuple[float, float]\n        loc_list = self['loc']\n        return loc_list[0], loc_list[1]\n\n    @property\n    def orient(self):\n        # type: () -> str\n        return self['orient']\n\n    @property\n    def num_rows(self):\n        # type: () -> int\n        return self['num_rows']\n\n    @property\n    def num_cols(self):\n        # type: () -> int\n        return self['num_cols']\n\n    @property\n    def sp_rows(self):\n        # type: () -> float\n        return self['sp_rows']\n\n    @property\n    def sp_cols(self):\n        # type: () -> float\n        return self['sp_cols']\n\n    @property\n    def params(self):\n        # type: () -> Optional[Dict[str, Any]]\n        return self.get('params', None)\n\n    @params.setter\n    def params(self, new_params):\n        # type: (Optional[Dict[str, Any]]) -> None\n        self['params'] = new_params\n\n    @property\n    def master_key(self):\n        return self.get('master_key', None)\n\n    @master_key.setter\n    def master_key(self, value):\n        self['master_key'] = value\n\n    @property\n    def angle_reflect(self):\n        # type: () -> Tuple[int, bool]\n        orient = self['orient']\n        if orient == 'R0':\n            return 0, False\n        elif orient == 'R180':\n            return 180, False\n        elif orient == 'MX':\n            return 0, True\n        elif orient == 'MY':\n            return 180, True\n        elif orient == 'R90':\n            return 90, False\n        elif orient == 'MXR90':\n            return 90, True\n        elif orient == 'MYR90':\n            return 270, True\n        elif orient == 'R270':\n            return 270, False\n        else:\n            raise ValueError('Unknown orientation: %s' % orient)\n\n    def copy(self):\n        \"\"\"Override copy method of dictionary to return an InstanceInfo instead.\"\"\"\n        return InstanceInfo(self._resolution, change_orient=False, **self)\n\n    def move_by(self, dx=0, dy=0):\n        # type: (float, float) -> None\n        \"\"\"Move this instance by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        \"\"\"\n        res = self._resolution\n        loc = self.loc\n        self['loc'] = [round((loc[0] + dx) / res) * res,\n                       round((loc[1] + dy) / res) * res]\n\n\nclass Instance(Arrayable):\n    \"\"\"A layout instance, with optional arraying parameters.\n\n    Parameters\n    ----------\n    parent_grid : RoutingGrid\n        the parent RoutingGrid object.\n    lib_name : str\n        the layout library name.\n    master : TemplateBase\n        the master template of this instance.\n    loc : Tuple[Union[float, int], Union[float, int]]\n        the origin of this instance.\n    orient : str\n        the orientation of this instance.\n    name : Optional[str]\n        name of this instance.\n    nx : int\n        number of columns.\n    ny : int\n        number of rows.\n    spx : Union[float, int]\n        column pitch.\n    spy : Union[float, int]\n        row pitch.\n    unit_mode : bool\n        True if layout dimensions are specified in resolution units.\n    \"\"\"\n\n    def __init__(self,\n                 parent_grid,  # type: RoutingGrid\n                 lib_name,  # type: str\n                 master,  # type: TemplateBase\n                 loc,  # type: Tuple[ldim, ldim]\n                 orient,  # type: str\n                 name=None,  # type: Optional[str]\n                 nx=1,  # type: int\n                 ny=1,  # type: int\n                 spx=0,  # type: ldim\n                 spy=0,  # type: ldim\n                 unit_mode=False,  # type: bool\n                 ):\n        # type: (...) -> None\n        res = parent_grid.resolution\n        Arrayable.__init__(self, res, nx=nx, ny=ny, spx=spx, spy=spy, unit_mode=unit_mode)\n        self._parent_grid = parent_grid\n        self._lib_name = lib_name\n        self._inst_name = name\n        self._master = master\n        if unit_mode:\n            self._loc_unit = loc[0], loc[1]\n        else:\n            self._loc_unit = int(round(loc[0] / res)), int(round(loc[1] / res))\n        self._orient = orient\n\n    def new_master_with(self, **kwargs):\n        # type: (**Any) -> None\n        \"\"\"Change the master template of this instance.\n\n        This method will get the old master template layout parameters, update\n        the parameter values with the given dictionary, then create a new master\n        template with those parameters and associate it with this instance.\n\n        Parameters\n        ----------\n        **kwargs\n            a dictionary of new parameter values.\n        \"\"\"\n        self._master = self._master.new_template_with(**kwargs)\n\n    def blockage_iter(self, layer_id, test_box, spx=0, spy=0):\n        # type: (int, BBox, int, int) -> Generator[BBox, None, None]\n        # transform the given BBox to master coordinate\n        if self.destroyed:\n            return\n\n        base_box = self._master.get_track_bbox(layer_id)\n        if not base_box.is_physical():\n            return\n        base_box = self.translate_master_box(base_box)\n        test = test_box.expand(dx=spx, dy=spy, unit_mode=True)\n\n        inst_spx = max(self.spx_unit, 1)\n        inst_spy = max(self.spy_unit, 1)\n        xl = base_box.left_unit\n        yb = base_box.bottom_unit\n        xr = base_box.right_unit\n        yt = base_box.top_unit\n        nx0 = max(0, -(-(test.left_unit - xr) // inst_spx))\n        nx1 = min(self.nx - 1, (test.right_unit - xl) // inst_spx)\n        ny0 = max(0, -(-(test.bottom_unit - yt) // inst_spy))\n        ny1 = min(self.ny - 1, (test.top_unit - yb) // inst_spy)\n        orient = self._orient\n        x0, y0 = self._loc_unit\n        if (orient == 'R90' or orient == 'R270' or\n                orient == 'MXR90' or orient == 'MYR90'):\n            spx, spy = spy, spx\n        for row in range(ny0, ny1 + 1):\n            for col in range(nx0, nx1 + 1):\n                dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)\n                loc = dx + x0, dy + y0\n                inv_loc, inv_orient = get_inverse_transform(loc, orient)\n                cur_box = test_box.transform(inv_loc, inv_orient, unit_mode=True)\n                for box in self._master.blockage_iter(layer_id, cur_box, spx=spx, spy=spy):\n                    yield box.transform(loc, orient, unit_mode=True)\n\n    def all_rect_iter(self):\n        # type: () -> Generator[Tuple[BBox, int, int], None, None]\n        if self.destroyed:\n            return\n\n        orient = self._orient\n        x0, y0 = self._loc_unit\n        flip = (orient == 'R90' or orient == 'R270' or orient == 'MXR90' or orient == 'MYR90')\n        for layer_id, box, sdx, sdy in self._master.all_rect_iter():\n            if flip:\n                sdx, sdy = sdy, sdx\n            for row in range(self.ny):\n                for col in range(self.nx):\n                    dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)\n                    loc = dx + x0, dy + y0\n                    yield layer_id, box.transform(loc, orient, unit_mode=True), sdx, sdy\n\n    def intersection_rect_iter(self, layer_id, test_box):\n        # type: (int, BBox) -> Generator[BBox, None, None]\n        if self.destroyed:\n            return\n\n        base_box = self._master.get_track_bbox(layer_id)\n        if not base_box.is_physical():\n            return\n        base_box = self.translate_master_box(base_box)\n\n        inst_spx = max(self.spx_unit, 1)\n        inst_spy = max(self.spy_unit, 1)\n        xl = base_box.left_unit\n        yb = base_box.bottom_unit\n        xr = base_box.right_unit\n        yt = base_box.top_unit\n        nx0 = max(0, -(-(test_box.left_unit - xr) // inst_spx))\n        nx1 = min(self.nx - 1, (test_box.right_unit - xl) // inst_spx)\n        ny0 = max(0, -(-(test_box.bottom_unit - yt) // inst_spy))\n        ny1 = min(self.ny - 1, (test_box.top_unit - yb) // inst_spy)\n        orient = self._orient\n        x0, y0 = self._loc_unit\n        for row in range(ny0, ny1 + 1):\n            for col in range(nx0, nx1 + 1):\n                dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)\n                loc = dx + x0, dy + y0\n                inv_loc, inv_orient = get_inverse_transform(loc, orient)\n                cur_box = test_box.transform(inv_loc, inv_orient, unit_mode=True)\n                for box in self._master.intersection_rect_iter(layer_id, cur_box):\n                    yield box.transform(loc, orient, unit_mode=True)\n\n    def get_rect_bbox(self, layer):\n        \"\"\"Returns the overall bounding box of all rectangles on the given layer.\n\n        Note: currently this does not check primitive instances or vias.\n        \"\"\"\n        bbox = self._master.get_rect_bbox(layer)\n        if not bbox.is_valid():\n            return bbox\n        box_arr = BBoxArray(self.translate_master_box(bbox), nx=self.nx, ny=self.ny,\n                            spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)\n        return box_arr.get_overall_bbox()\n\n    def track_bbox_iter(self):\n        for layer_id, bbox in self._master.track_bbox_iter():\n            box_arr = BBoxArray(self.translate_master_box(bbox), nx=self.nx, ny=self.ny,\n                                spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)\n            yield layer_id, box_arr.get_overall_bbox()\n\n    @property\n    def master(self):\n        # type: () -> TemplateBase\n        \"\"\"The master template of this instance.\"\"\"\n        return self._master\n\n    @property\n    def location(self):\n        # type: () -> Tuple[float, float]\n        \"\"\"The instance location.\"\"\"\n        return self._loc_unit[0] * self.resolution, self._loc_unit[1] * self.resolution\n\n    @location.setter\n    def location(self, new_loc):\n        # type: (Tuple[float, float]) -> None\n        \"\"\"Sets the instance location.\"\"\"\n        self.check_destroyed()\n        self._loc_unit = (int(round(new_loc[0] / self.resolution)),\n                          int(round(new_loc[1] / self.resolution)))\n\n    @property\n    def location_unit(self):\n        # type: () -> Tuple[int, int]\n        \"\"\"The instance location.\"\"\"\n        return self._loc_unit\n\n    @location_unit.setter\n    def location_unit(self, new_loc):\n        # type: (Tuple[int, int]) -> None\n        \"\"\"Sets the instance location.\"\"\"\n        self.check_destroyed()\n        self._loc_unit = (new_loc[0], new_loc[1])\n\n    @property\n    def orientation(self):\n        # type: () -> str\n        \"\"\"The instance orientation\"\"\"\n        return self._orient\n\n    @orientation.setter\n    def orientation(self, val):\n        # type: (str) -> None\n        \"\"\"Sets the instance orientation.\"\"\"\n        self.check_destroyed()\n        if val not in transform_table:\n            raise ValueError('Unsupported orientation: %s' % val)\n        self._orient = val\n\n    @property\n    def content(self):\n        # type: () -> InstanceInfo\n        \"\"\"A dictionary representation of this instance.\"\"\"\n        return InstanceInfo(self.resolution,\n                            lib=self._lib_name,\n                            cell=self.master.cell_name,\n                            view='layout',\n                            name=self._inst_name,\n                            loc=list(self.location),\n                            orient=self.orientation,\n                            num_rows=self.ny,\n                            num_cols=self.nx,\n                            sp_rows=self.spy,\n                            sp_cols=self.spx,\n                            master_key=self.master.key\n                            )\n\n    @property\n    def bound_box(self):\n        # type: () -> BBox\n        \"\"\"Returns the overall bounding box of this instance.\"\"\"\n        box_arr = BBoxArray(self._master.bound_box, nx=self.nx, ny=self.ny,\n                            spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)\n        return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,\n                                                    unit_mode=True)\n\n    @property\n    def array_box(self):\n        # type: () -> BBox\n        \"\"\"Returns the array box of this instance.\"\"\"\n        master_box = getattr(self._master, 'array_box', None)  # type: BBox\n        if master_box is None:\n            raise ValueError('Master template array box is not defined.')\n\n        box_arr = BBoxArray(master_box, nx=self.nx, ny=self.ny,\n                            spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)\n        return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,\n                                                    unit_mode=True)\n\n    @property\n    def fill_box(self):\n        # type: () -> BBox\n        \"\"\"Returns the array box of this instance.\"\"\"\n        master_box = getattr(self._master, 'fill_box', None)  # type: BBox\n        if master_box is None:\n            raise ValueError('Master template fill box is not defined.')\n\n        box_arr = BBoxArray(master_box, nx=self.nx, ny=self.ny,\n                            spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)\n        return box_arr.get_overall_bbox().transform(self.location_unit, self.orientation,\n                                                    unit_mode=True)\n\n    def get_bound_box_of(self, row=0, col=0):\n        \"\"\"Returns the bounding box of an instance in this mosaic.\"\"\"\n        dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)\n        xshift, yshift = self._loc_unit\n        xshift += dx\n        yshift += dy\n        return self._master.bound_box.transform((xshift, yshift), self.orientation, unit_mode=True)\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (Union[float, int], Union[float, int], bool) -> None\n        \"\"\"Move this instance by the given amount.\n\n        Parameters\n        ----------\n        dx : Union[float, int]\n            the X shift.\n        dy : Union[float, int]\n            the Y shift.\n        unit_mode : bool\n            True if shifts are given in resolution units\n        \"\"\"\n        if not unit_mode:\n            dx = int(round(dx / self.resolution))\n            dy = int(round(dy / self.resolution))\n        self._loc_unit = self._loc_unit[0] + dx, self._loc_unit[1] + dy\n\n    def translate_master_box(self, box):\n        # type: (BBox) -> BBox\n        \"\"\"Transform the bounding box in master template.\n\n        Parameters\n        ----------\n        box : BBox\n            the BBox in master template coordinate.\n\n        Returns\n        -------\n        new_box : BBox\n            the cooresponding BBox in instance coordinate.\n        \"\"\"\n        return box.transform(self.location_unit, self.orientation, unit_mode=True)\n\n    def translate_master_location(self,\n                                  mloc,  # type: Tuple[Union[float, int], Union[float, int]]\n                                  unit_mode=False,  # type: bool\n                                  ):\n        # type: (...) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns the actual location of the given point in master template.\n\n        Parameters\n        ----------\n        mloc : Tuple[Union[float, int], Union[float, int]]\n            the location in master coordinate.\n        unit_mode : bool\n            True if location is given in resolution units.\n\n        Returns\n        -------\n        xi : Union[float, int]\n            the actual X coordinate.  Integer if unit_mode is True.\n        yi : Union[float, int]\n            the actual Y coordinate.  Integer if unit_mode is True.\n        \"\"\"\n        res = self.resolution\n        if unit_mode:\n            mx, my = mloc[0], mloc[1]\n        else:\n            mx, my = int(round(mloc[0] / res)), int(round(mloc[1] / res))\n        p = transform_point(mx, my, self.location_unit, self.orientation)\n        if unit_mode:\n            return p[0], p[1]\n        return p[0] * res, p[1] * res\n\n    def translate_master_track(self, layer_id, track_idx):\n        # type: (int, Union[float, int]) -> Union[float, int]\n        \"\"\"Returns the actual track index of the given track in master template.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        track_idx : Union[float, int]\n            the track index.\n\n        Returns\n        -------\n        new_idx : Union[float, int]\n            the new track index.\n        \"\"\"\n        dx, dy = self.location_unit\n        return self._parent_grid.transform_track(layer_id, track_idx, dx=dx, dy=dy,\n                                                 orient=self.orientation, unit_mode=True)\n\n    def get_port(self, name='', row=0, col=0):\n        # type: (Optional[str], int, int) -> Port\n        \"\"\"Returns the port object of the given instance in the array.\n\n        Parameters\n        ----------\n        name : Optional[str]\n            the port terminal name.  If None or empty, check if this\n            instance has only one port, then return it.\n        row : int\n            the instance row index.  Index 0 is the bottom-most row.\n        col : int\n            the instance column index.  Index 0 is the left-most column.\n\n        Returns\n        -------\n        port : Port\n            the port object.\n        \"\"\"\n        dx, dy = self.get_item_location(row=row, col=col, unit_mode=True)\n        xshift, yshift = self._loc_unit\n        loc = (xshift + dx, yshift + dy)\n        return self._master.get_port(name).transform(self._parent_grid, loc=loc,\n                                                     orient=self.orientation, unit_mode=True)\n\n    def get_pin(self, name='', row=0, col=0, layer=-1):\n        # type: (Optional[str], int, int, int) -> Union[WireArray, BBox]\n        \"\"\"Returns the first pin with the given name.\n\n        This is an efficient method if you know this instance has exactly one pin.\n\n        Parameters\n        ----------\n        name : Optional[str]\n            the port terminal name.  If None or empty, check if this\n            instance has only one port, then return it.\n        row : int\n            the instance row index.  Index 0 is the bottom-most row.\n        col : int\n            the instance column index.  Index 0 is the left-most column.\n        layer : int\n            the pin layer.  If negative, check to see if the given port has only one layer.\n            If so then use that layer.\n\n        Returns\n        -------\n        pin : Union[WireArray, BBox]\n            the first pin associated with the port of given name.\n        \"\"\"\n        port = self.get_port(name, row, col)\n        return port.get_pins(layer)[0]\n\n    def get_all_port_pins(self, name='', layer=-1):\n        # type: (Optional[str], int) -> List[WireArray]\n        \"\"\"Returns a list of all pins of all ports with the given name in this instance array.\n\n        This method gathers ports from all instances in this array with the given name,\n        then find all pins of those ports on the given layer, then return as list of WireArrays.\n\n        Parameters\n        ----------\n        name : Optional[str]\n            the port terminal name.  If None or empty, check if this\n            instance has only one port, then return it.\n        layer : int\n            the pin layer.  If negative, check to see if the given port has only one layer.\n            If so then use that layer.\n\n        Returns\n        -------\n        pin_list : List[WireArray]\n            the list of pins as WireArrays.\n        \"\"\"\n        results = []\n        for col in range(self.nx):\n            for row in range(self.ny):\n                port = self.get_port(name, row, col)\n                results.extend(port.get_pins(layer))\n        return results\n\n    def port_pins_iter(self, name='', layer=-1):\n        # type: (Optional[str], int) -> Iterator[WireArray]\n        \"\"\"Iterate through all pins of all ports with the given name in this instance array.\n\n        Parameters\n        ----------\n        name : Optional[str]\n            the port terminal name.  If None or empty, check if this\n            instance has only one port, then return it.\n        layer : int\n            the pin layer.  If negative, check to see if the given port has only one layer.\n            If so then use that layer.\n\n        Yields\n        ------\n        pin : WireArray\n            the pin as WireArray.\n        \"\"\"\n        for col in range(self.nx):\n            for row in range(self.ny):\n                try:\n                    port = self.get_port(name, row, col)\n                except KeyError:\n                    return\n                for warr in port.get_pins(layer):\n                    yield warr\n\n    def port_names_iter(self):\n        # type: () -> Iterable[str]\n        \"\"\"Iterates over port names in this instance.\n\n        Yields\n        ------\n        port_name : str\n            name of a port in this instance.\n        \"\"\"\n        return self._master.port_names_iter()\n\n    def has_port(self, port_name):\n        # type: (str) -> bool\n        \"\"\"Returns True if this instance has the given port.\"\"\"\n        return self._master.has_port(port_name)\n\n    def has_prim_port(self, port_name):\n        # type: (str) -> bool\n        \"\"\"Returns True if this instance has the given primitive port.\"\"\"\n        return self._master.has_prim_port(port_name)\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):\n        # type: (Tuple[ldim, ldim], str, bool, bool) -> Optional[Figure]\n        \"\"\"Transform this figure.\"\"\"\n        if not unit_mode:\n            res = self.resolution\n            loc = int(round(loc[0] / res)), int(round(loc[1] / res))\n\n        if not copy:\n            ans = self\n        else:\n            ans = deepcopy(self)\n        ans._loc_unit = loc\n        ans._orient = orient\n        return ans\n\n\nclass Rect(Arrayable):\n    \"\"\"A layout rectangle, with optional arraying parameters.\n\n    Parameters\n    ----------\n    layer : string or (string, string)\n        the layer name, or a tuple of layer name and purpose name.\n        If pupose name not given, defaults to 'drawing'.\n    bbox : ..layout.util.BBox or ..layout.util.BBoxArray\n        the base bounding box.  If this is a BBoxArray, the BBoxArray's\n        arraying parameters are used.\n    nx : int\n        number of columns.\n    ny : int\n        number of rows.\n    spx : float\n        column pitch.\n    spy : float\n        row pitch.\n    unit_mode : bool\n        True if layout dimensions are specified in resolution units.\n    \"\"\"\n\n    def __init__(self, layer, bbox, nx=1, ny=1, spx=0, spy=0, unit_mode=False):\n        # python 2/3 compatibility: convert raw bytes to string.\n        layer = io.fix_string(layer)\n        if isinstance(layer, str):\n            layer = (layer, 'drawing')\n        self._layer = layer[0], layer[1]\n        if isinstance(bbox, BBoxArray):\n            self._bbox = bbox.base\n            Arrayable.__init__(self, self._bbox.resolution, nx=bbox.nx, ny=bbox.ny,\n                               spx=bbox.spx_unit, spy=bbox.spy_unit, unit_mode=True)\n        else:\n            self._bbox = bbox\n            Arrayable.__init__(self, self._bbox.resolution, nx=nx, ny=ny, spx=spx, spy=spy,\n                               unit_mode=unit_mode)\n\n    @property\n    def bbox_array(self):\n        \"\"\"The BBoxArray representing this (Arrayed) rectangle.\n\n        Returns\n        -------\n        barr : :class:`..layout.util.BBoxArray`\n            the BBoxArray representing this (Arrayed) rectangle.\n        \"\"\"\n        return BBoxArray(self._bbox, nx=self.nx, ny=self.ny,\n                         spx=self.spx_unit, spy=self.spy_unit, unit_mode=True)\n\n    @property\n    def layer(self):\n        \"\"\"The rectangle (layer, purpose) pair.\"\"\"\n        return self._layer\n\n    @layer.setter\n    def layer(self, val):\n        \"\"\"Sets the rectangle layer.\"\"\"\n        self.check_destroyed()\n        # python 2/3 compatibility: convert raw bytes to string.\n        val = io.fix_string(val)\n        if isinstance(val, str):\n            val = (val, 'drawing')\n        self._layer = val[0], val[1]\n        print(\"WARNING: USING THIS BREAKS POWER FILL ALGORITHM.\")\n\n    @property\n    def bbox(self):\n        \"\"\"The rectangle bounding box.\"\"\"\n        return self._bbox\n\n    @bbox.setter\n    def bbox(self, val):\n        \"\"\"Sets the rectangle bounding box.\"\"\"\n        self.check_destroyed()\n        if not val.is_physical():\n            raise ValueError('Bounding box %s is not physical' % val)\n        print(\"WARNING: USING THIS BREAKS POWER FILL ALGORITHM.\")\n        self._bbox = val\n\n    @property\n    def content(self):\n        \"\"\"A dictionary representation of this rectangle.\"\"\"\n        content = dict(layer=list(self.layer),\n                       bbox=[[self.bbox.left, self.bbox.bottom], [self.bbox.right, self.bbox.top]],\n                       )\n        if self.nx > 1 or self.ny > 1:\n            content['arr_nx'] = self.nx\n            content['arr_ny'] = self.ny\n            content['arr_spx'] = self.spx\n            content['arr_spy'] = self.spy\n\n        return content\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        \"\"\"Move the base rectangle by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        unit_mode : bool\n        True if layout dimensions are specified in resolution units.\n        \"\"\"\n        print(\"WARNING: USING THIS BREAKS POWER FILL ALGORITHM.\")\n        self._bbox = self._bbox.move_by(dx=dx, dy=dy, unit_mode=unit_mode)\n\n    def extend(self, x=None, y=None):\n        \"\"\"extend the base rectangle horizontally or vertically so it overlaps the given X/Y coordinate.\n\n        Parameters\n        ----------\n        x : float or None\n            if not None, make sure the base rectangle overlaps this X coordinate.\n        y : float or None\n            if not None, make sure the base rectangle overlaps this Y coordinate.\n        \"\"\"\n        print(\"WARNING: USING THIS BREAKS POWER FILL ALGORITHM.\")\n        self._bbox = self._bbox.extend(x=x, y=y)\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):\n        # type: (Tuple[ldim, ldim], str, bool, bool) -> Optional[Figure]\n        \"\"\"Transform this figure.\"\"\"\n        new_box = self._bbox.transform(loc=loc, orient=orient, unit_mode=unit_mode)\n        if not copy:\n            print(\"WARNING: USING THIS BREAKS POWER FILL ALGORITHM.\")\n            ans = self\n        else:\n            ans = deepcopy(self)\n\n        ans._bbox = new_box\n        return ans\n\n    def destroy(self):\n        # type: () -> None\n        \"\"\"Destroy this instance.\"\"\"\n        print(\"WARNING: USING THIS BREAKS POWER FILL ALGORITHM.\")\n        Arrayable.destroy(self)\n\n\nclass Path(Figure):\n    \"\"\"A layout path.  Only 45/90 degree turns are allowed.\n\n    Parameters\n    ----------\n    resolution : float\n        the layout grid resolution.\n    layer : string or (string, string)\n        the layer name, or a tuple of layer name and purpose name.\n        If purpose name not given, defaults to 'drawing'.\n    width : float\n        width of this path, in layout units.\n    points : List[Tuple[float, float]]\n        list of path points.\n    end_style : str\n        the path ends style.  Currently support 'truncate', 'extend', and 'round'.\n    join_style : str\n        the ends style at intermediate points of the path.  Currently support 'extend' and 'round'.\n    unit_mode : bool\n        True if width and points are given as resolution units instead of layout units.\n    \"\"\"\n\n    def __init__(self,\n                 resolution,  # type: float\n                 layer,  # type: Union[str, Tuple[str, str]]\n                 width,  # type: Union[int, float]\n                 points,  # type: List[Tuple[Union[int, float], Union[int, float]]]\n                 end_style='truncate',  # type: str\n                 join_style='extend',  # type: str\n                 unit_mode=False,  # type: bool\n                 ):\n        # type: (...) -> None\n        layer = io.fix_string(layer)\n        Figure.__init__(self, resolution)\n        if isinstance(layer, str):\n            layer = (layer, 'drawing')\n\n        self._layer = layer\n        self._end_style = end_style\n        self._join_style = join_style\n        self._destroyed = False\n        self._width = 0\n        self._points = None\n        if not unit_mode:\n            self._width = int(round(width / resolution))\n            pt_list = self.compress_points(((int(round(x / resolution)), int(round(y / resolution)))\n                                            for x, y in points))\n        else:\n            self._width = width\n            pt_list = self.compress_points(points)\n\n        self._points = np.array(pt_list, dtype=int)\n\n    @classmethod\n    def compress_points(cls, pts_unit):\n        # remove collinear/duplicate points, and make sure all segments are 45 degrees.\n        cur_len = 0\n        pt_list = []\n        for x, y in pts_unit:\n            if cur_len == 0:\n                pt_list.append((x, y))\n                cur_len += 1\n            else:\n                lastx, lasty = pt_list[-1]\n                # make sure we don't have duplicate points\n                if x != lastx or y != lasty:\n                    dx, dy = x - lastx, y - lasty\n                    if dx != 0 and dy != 0 and abs(dx) != abs(dy):\n                        # we don't have 45 degree wires\n                        raise ValueError('Cannot have line segment (%d, %d)->(%d, %d) in path'\n                                         % (lastx, lasty, x, y))\n                    if cur_len >= 2:\n                        # check for collinearity\n                        dx0, dy0 = lastx - pt_list[-2][0], lasty - pt_list[-2][1]\n                        if (dx == 0 and dx0 == 0) or (dx != 0 and dx0 != 0 and\n                                                      dy / dx == dy0 / dx0):\n                            # collinear, remove middle point\n                            del pt_list[-1]\n                            cur_len -= 1\n                    pt_list.append((x, y))\n                    cur_len += 1\n\n        return pt_list\n\n    @property\n    def layer(self):\n        # type: () -> Tuple[str, str]\n        \"\"\"The rectangle (layer, purpose) pair.\"\"\"\n        return self._layer\n\n    @Figure.valid.getter\n    def valid(self):\n        # type: () -> bool\n        \"\"\"Returns True if this instance is valid.\"\"\"\n        return not self.destroyed and len(self._points) >= 2 and self._width > 0\n\n    @property\n    def width(self):\n        return self._width * self._res\n\n    @property\n    def points(self):\n        return [(self._points[idx][0] * self._res, self._points[idx][1] * self._res)\n                for idx in range(self._points.shape[0])]\n\n    @property\n    def points_unit(self):\n        return [(self._points[idx][0], self._points[idx][1])\n                for idx in range(self._points.shape[0])]\n\n    @property\n    def content(self):\n        # type: () -> Dict[str, Any]\n        \"\"\"A dictionary representation of this path.\"\"\"\n        content = dict(layer=list(self.layer),\n                       width=self._width * self._res,\n                       points=self.points,\n                       end_style=self._end_style,\n                       join_style=self._join_style,\n                       )\n        return content\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (ldim, ldim, bool) -> None\n        \"\"\"Move this path by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        unit_mode : bool\n            True if shifts are given in resolution units.\n        \"\"\"\n        if not unit_mode:\n            dx = int(round(dx / self._res))\n            dy = int(round(dy / self._res))\n        self._points += np.array([dx, dy])\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):\n        # type: (Tuple[ldim, ldim], str, bool, bool) -> Figure\n        \"\"\"Transform this figure.\"\"\"\n        res = self.resolution\n        if unit_mode:\n            dx, dy = loc\n        else:\n            dx = int(round(loc[0] / res))\n            dy = int(round(loc[1] / res))\n        dvec = np.array([dx, dy])\n        mat = transform_table[orient]\n        new_points = np.dot(mat, self._points.T).T + dvec\n\n        if not copy:\n            ans = self\n        else:\n            ans = deepcopy(self)\n\n        ans._points = new_points\n        return ans\n\n\nclass PathCollection(Figure):\n    \"\"\"A layout figure that consists of one or more paths.\n\n    This class make it easy to draw bus/trasmission line objects.\n\n    Parameters\n    ----------\n    resolution : float\n        layout unit resolution.\n    paths : List[Path]\n        paths in this collection.\n    \"\"\"\n\n    def __init__(self, resolution, paths, poly_paths = None):\n        Figure.__init__(self, resolution)\n        self._paths = paths\n        self._poly_paths = poly_paths\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (ldim, ldim, bool) -> None\n        \"\"\"Move this path by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        unit_mode : bool\n            True if shifts are given in resolution units.\n        \"\"\"\n        for path in self._paths:\n            path.move_by(dx=dx, dy=dy, unit_mode=unit_mode)\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=True):\n        # type: (Tuple[ldim, ldim], str, bool, bool) -> PathCollection\n        \"\"\"Transform this figure.\"\"\"\n        if copy:\n            ans = deepcopy(self)\n        else:\n            ans = self\n\n        for p in ans._paths:\n            p.transform(loc=loc, orient=orient, unit_mode=unit_mode, copy=False)\n        return ans\n\n\nclass TLineBus(PathCollection):\n    \"\"\"A transmission line bus drawn using Path.\n\n    assumes only 45 degree turns are used, and begin and end line segments are straight.\n\n    Parameters\n    ----------\n    resolution : float\n        layout unit resolution.\n    layer : Union[str, Tuple[str, str]]\n        the bus layer.\n    points : List[Tuple[Union[float, int], Union[float, int]]]\n        list of center points of the bus.\n    widths : List[Union[float, int]]\n        list of wire widths.  0 index is left/bottom most wire.\n    spaces : List[Union[float, int]]\n        list of wire spacings.\n    end_style : str\n        the path ends style.  Currently support 'truncate', 'extend', and 'round'.\n    unit_mode : bool\n        True if width and points are given as resolution units instead of layout units.\n    \"\"\"\n\n    def __init__(self, resolution, layer, points, widths, spaces, end_style='truncate',\n                 unit_mode=False):\n        npoints = len(points)\n        if npoints < 2:\n            raise ValueError('Must have >= 2 points.')\n\n        if not unit_mode:\n            points = ((int(round(px / resolution)), int(round(py / resolution)))\n                      for px, py in points)\n            widths = [int(round(v / resolution / 2.0)) * 2 for v in widths]\n            spaces = [int(round(v / resolution / 2.0)) * 2 for v in spaces]\n\n        points = Path.compress_points(points)\n\n        self._points = np.array(points, dtype=int)\n        self._layer = layer\n        self._widths = widths\n        self._spaces = spaces\n        self._end_style = end_style\n\n        tot_width = sum(self._widths) + sum(self._spaces)\n        delta_list = [(-tot_width + self._widths[0]) // 2]\n        for w0, w1, sp in zip(self._widths, self._widths[1:], self._spaces):\n            delta_list.append(delta_list[-1] + sp + ((w0 + w1) // 2))\n\n        # print(tot_width)\n        # print(self._widths)\n        # print(self._spaces)\n        # print(delta_list)\n\n        paths = self.create_paths(delta_list, resolution)\n        poly_paths = self.create_poly_paths(delta_list, resolution)\n        PathCollection.__init__(self, resolution, paths, poly_paths)\n\n    def paths_iter(self):\n        return iter(self._paths)\n\n    def poly_paths_iter(self):\n        return iter(self._poly_paths)\n\n    def create_paths(self, delta_list, res):\n        npoints = len(self._points)\n        npaths = len(self._widths)\n        path_points = [[] for _ in range(npaths)]\n\n        #print(self._points)\n        # add first point\n        p0 = self._points[0, :]\n        s0 = self._points[1, :] - p0\n        s0 //= np.amax(np.absolute(s0))\n        s0_norm = np.linalg.norm(s0)\n        d0 = np.array([-s0[1], s0[0]])\n        for path, delta in zip(path_points, delta_list):\n            tmp = p0 + d0 * int(round(delta / s0_norm))\n            path.append((tmp[0], tmp[1]))\n\n        # add intermediate points\n        for last_idx in range(2, npoints):\n            p1 = self._points[last_idx - 1, :]\n            p0 = self._points[last_idx - 2, :]\n            s0 = p1 - p0\n            s1 = self._points[last_idx, :] - p1\n            s0 //= np.amax(np.absolute(s0))\n            s1 //= np.amax(np.absolute(s1))\n            s0_norm = np.linalg.norm(s0)\n            s1_norm = np.linalg.norm(s1)\n            dir0 = np.array([-s0[1], s0[0]])\n            dir1 = np.array([-s1[1], s1[0]])\n            for path, delta in zip(path_points, delta_list):\n                d0 = p0 + dir0 * int(round(delta / s0_norm))\n                d1 = p1 + dir1 * int(round(delta / s1_norm))\n                a = np.array([[-s1[1], s1[0]],\n                              [s0[1], s0[0]]], dtype=int) // (s0[1] * s1[0] - s0[0] * s1[1])\n                sol = np.dot(a, d1 - d0)\n                tmp = sol[0] * s0 + d0\n                path.append((tmp[0], tmp[1]))\n\n        # add last points\n        p1 = self._points[-1, :]\n        s0 = p1 - self._points[-2, :]\n        s0 //= np.amax(np.absolute(s0))\n        s0_norm = np.linalg.norm(s0)\n        d0 = np.array([-s0[1], s0[0]])\n        for path, delta in zip(path_points, delta_list):\n            tmp = p1 + d0 * int(round(delta / s0_norm))\n            path.append((tmp[0], tmp[1]))\n\n        #print(path_points)\n\n        paths = [Path(res, self._layer, w, pp, end_style=self._end_style,\n                      join_style='round', unit_mode=True)\n                 for w, pp in zip(self._widths, path_points)]\n        return paths\n\n\n    def create_poly_paths(self, delta_list, res):\n        npoints = len(self._points)\n        npaths = len(self._widths)\n        path_points = [[] for _ in range(npaths)]\n\n        #print(self._points)\n        # add first point\n        p0 = self._points[0, :]\n        s0 = self._points[1, :] - p0\n        s0 //= np.amax(np.absolute(s0))\n        s0_norm = np.linalg.norm(s0)\n        d0 = np.array([-s0[1], s0[0]])\n        for path, delta in zip(path_points, delta_list):\n            tmp = p0 + d0 * int(round(delta / s0_norm))\n            path.append((tmp[0], tmp[1]))\n\n        # add intermediate points\n        for last_idx in range(2, npoints):\n            p1 = self._points[last_idx - 1, :]\n            p0 = self._points[last_idx - 2, :]\n            s0 = p1 - p0\n            s1 = self._points[last_idx, :] - p1\n            s0 //= np.amax(np.absolute(s0))\n            s1 //= np.amax(np.absolute(s1))\n            s0_norm = np.linalg.norm(s0)\n            s1_norm = np.linalg.norm(s1)\n            dir0 = np.array([-s0[1], s0[0]])\n            dir1 = np.array([-s1[1], s1[0]])\n            for path, delta in zip(path_points, delta_list):\n                d0 = p0 + dir0 * int(round(delta / s0_norm))\n                d1 = p1 + dir1 * int(round(delta / s1_norm))\n                a = np.array([[-s1[1], s1[0]],\n                              [s0[1], s0[0]]], dtype=int) // (s0[1] * s1[0] - s0[0] * s1[1])\n                sol = np.dot(a, d1 - d0)\n                tmp = sol[0] * s0 + d0\n                path.append((tmp[0], tmp[1]))\n\n        # add last points\n        p1 = self._points[-1, :]\n        s0 = p1 - self._points[-2, :]\n        s0 //= np.amax(np.absolute(s0))\n        s0_norm = np.linalg.norm(s0)\n        d0 = np.array([-s0[1], s0[0]])\n        for path, delta in zip(path_points, delta_list):\n            tmp = p1 + d0 * int(round(delta / s0_norm))\n            path.append((tmp[0], tmp[1]))\n\n        #print(path_points)\n        \n        path_polygons_points=[]\n        for w, pp in zip(self._widths, path_points):\n\n            pright = []\n            pleft = []\n            for point_index in range(0,len(pp)-1):\n                p0_x = pp[point_index][0]\n                p0_y = pp[point_index][1]\n                p1_x = pp[point_index+1][0]\n                p1_y = pp[point_index+1][1]\n\n                if p0_x == p1_x:\n                    #Vert\n                    if p1_y > p0_y:\n                        #print('up')\n                        p0_x_right  = p0_x + w //2\n                        p0_y_right  = p0_y\n                        p0_x_left   = p0_x - w //2\n                        p0_y_left   = p0_y                    \n                        p1_x_right  = p1_x + w //2\n                        p1_y_right  = p1_y\n                        p1_x_left   = p1_x - w //2\n                        p1_y_left   = p1_y        \n                    else:\n                        #print('down')\n                        p0_x_right  = p0_x - w //2\n                        p0_y_right  = p0_y\n                        p0_x_left   = p0_x + w //2\n                        p0_y_left   = p0_y                    \n                        p1_x_right  = p1_x - w //2\n                        p1_y_right  = p1_y\n                        p1_x_left   = p1_x + w //2\n                        p1_y_left   = p1_y        \n                                            \n                    \n                elif p0_y == p1_y:\n                    #horz\n                    if p1_x > p0_x:\n                        #print('right')\n                        p0_x_right  = p0_x \n                        p0_y_right  = p0_y - w //2\n                        p0_x_left   = p0_x \n                        p0_y_left   = p0_y + w //2                  \n                        p1_x_right  = p1_x \n                        p1_y_right  = p1_y - w //2\n                        p1_x_left   = p1_x \n                        p1_y_left   = p1_y + w //2 \n                    else:\n                        #print('left')\n                        p0_x_right  = p0_x \n                        p0_y_right  = p0_y + w //2\n                        p0_x_left   = p0_x \n                        p0_y_left   = p0_y - w //2                  \n                        p1_x_right  = p1_x \n                        p1_y_right  = p1_y + w //2\n                        p1_x_left   = p1_x \n                        p1_y_left   = p1_y - w //2  \n                         \n                    \n                else:\n                    \n                    if (point_index == 0):\n                        pP_x = None\n                        pP_y = None\n                    else:\n                        pP_x = pp[point_index-1][0]\n                        pP_y = pp[point_index-1][1]\n                        \n                    if (point_index == len(pp)-2):\n                        pN_x = None\n                        pN_y = None\n                    else:\n                        pN_x = pp[point_index+2][0]\n                        pN_y = pp[point_index+2][1]\n                    \n                    if p1_y > p0_y and p1_x > p0_x:\n\n                        #print('up and right')\n                        if pP_y is None:\n                            p0_x_right  = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p0_y_right  = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_x_left   = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_y_left   = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pP_y != p0_y:\n                            #from right\n                            p0_x_right  = p0_x + w //2\n                            p0_y_right  = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_x_left   = p0_x - w //2\n                            p0_y_left   = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))  \n                        else:\n                            #from up\n                            p0_x_right  = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_right  = p0_y - w //2\n                            p0_x_left   = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_left   = p0_y + w //2  \n                               \n                        if pN_y is None:\n                            p1_x_right  = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p1_y_right  = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_x_left   = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_y_left   = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pN_y != p1_y:\n                            #to right\n                            p1_x_right  = p1_x + w //2\n                            p1_y_right  = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_x_left   = p1_x - w //2\n                            p1_y_left   = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))                                      \n                        else:\n                            #to up\n                            p1_x_right  = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_right  = p1_y - w //2\n                            p1_x_left   = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_left   = p1_y + w //2                               \n                            \n                    elif p1_y < p0_y and p1_x > p0_x:\n\n                        #print('down and right')\n                        if pP_y is None:\n                            p0_x_right  = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p0_y_right  = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_x_left   = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_y_left   = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pP_y != p0_y:\n                            #from right\n                            p0_x_right  = p0_x - w //2\n                            p0_y_right  = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_x_left   = p0_x + w //2\n                            p0_y_left   = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))  \n                        else:\n                            #from up\n                            p0_x_right  = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_right  = p0_y - w //2\n                            p0_x_left   = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_left   = p0_y + w //2  \n                               \n                        if pN_y is None:\n                            p1_x_right  = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p1_y_right  = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_x_left   = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_y_left   = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pN_y != p1_y:\n                            #to right\n                            p1_x_right  = p1_x - w //2\n                            p1_y_right  = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_x_left   = p1_x + w //2\n                            p1_y_left   = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))                                      \n                        else:\n                            #to up\n                            p1_x_right  = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_right  = p1_y - w //2\n                            p1_x_left   = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_left   = p1_y + w //2   \n                                            \n                    elif p1_y < p0_y and p1_x < p0_x:\n\n                        #print('down and left')\n                        if pP_y is None:\n                            p0_x_right  = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p0_y_right  = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_x_left   = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_y_left   = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pP_y != p0_y:\n                            #from right\n                            p0_x_right  = p0_x - w //2\n                            p0_y_right  = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_x_left   = p0_x + w //2\n                            p0_y_left   = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))  \n                        else:\n                            #from up\n                            p0_x_right  = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_right  = p0_y + w //2\n                            p0_x_left   = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_left   = p0_y - w //2  \n                               \n                        if pN_y is None:\n                            p1_x_right  = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p1_y_right  = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_x_left   = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_y_left   = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pN_y != p1_y:\n                            #to right\n                            p1_x_right  = p1_x - w //2\n                            p1_y_right  = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_x_left   = p1_x + w //2\n                            p1_y_left   = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))                                      \n                        else:\n                            #to up\n                            p1_x_right  = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_right  = p1_y + w //2\n                            p1_x_left   = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_left   = p1_y - w //2   \n\n                    elif p1_y > p0_y and p1_x < p0_x:\n                    \n                        #print('up and left')\n                        if pP_y is None:\n                            p0_x_right  = p0_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p0_y_right  = p0_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_x_left   = p0_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p0_y_left   = p0_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pP_y != p0_y:\n                            #from right\n                            p0_x_right  = p0_x + w //2\n                            p0_y_right  = p0_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_x_left   = p0_x - w //2\n                            p0_y_left   = p0_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))  \n                        else:\n                            #from up\n                            p0_x_right  = p0_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_right  = p0_y + w //2\n                            p0_x_left   = p0_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p0_y_left   = p0_y - w //2  \n                               \n                        if pN_y is None:\n                            p1_x_right  = p1_x + 2*int(np.floor((.5/np.sqrt(2))*(w // 2))) \n                            p1_y_right  = p1_y + 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_x_left   = p1_x - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))\n                            p1_y_left   = p1_y - 2*int(np.floor((.5/np.sqrt(2))*(w // 2)))  \n                        elif pN_y != p1_y:\n                            #to right\n                            p1_x_right  = p1_x + w //2\n                            p1_y_right  = p1_y + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_x_left   = p1_x - w //2\n                            p1_y_left   = p1_y - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))                                      \n                        else:\n                            #to up\n                            p1_x_right  = p1_x + 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_right  = p1_y + w //2\n                            p1_x_left   = p1_x - 2*int(np.floor( .5*(np.tan(np.pi/8)) * (w // 2) ))\n                            p1_y_left   = p1_y - w //2   \n                           \n                    else:\n                        raise RuntimeError\n\n\n                # if (p0_x_right%2==1) or (p0_y_right%2==1) or (p0_x_left%2==1) or (p0_y_left%2==1) or \\\n                #         (p1_x_right%2==1) or (p1_y_right%2==1) or (p1_x_left%2==1) or (p1_y_left%2==1):\n                #     pdb.set_trace()\n\n                pright.append( ( p0_x_right, p0_y_right) )\n                pleft.append( ( p0_x_left, p0_y_left) )\n                pright.append( ( p1_x_right, p1_y_right) )\n                pleft.append(  ( p1_x_left , p1_y_left) )\n\n\n            current_path_polygons_points = pright + pleft[::-1]\n\n            current_path_polygons_array = np.array(current_path_polygons_points)\n\n            current_path_diff = np.diff(np.diff(current_path_polygons_array,axis=0),axis=0)\n            current_path_diff_x = current_path_diff[:,0]\n            current_path_diff_x_0_ind = np.where(current_path_diff_x == 0)[0] + 1\n            current_path_polygons_array = np.delete(current_path_polygons_array, current_path_diff_x_0_ind,axis=0)\n\n            current_path_diff = np.diff(np.diff(current_path_polygons_array,axis=0),axis=0)\n            current_path_diff_y = current_path_diff[:,1]\n            current_path_diff_y_0_ind = np.where(current_path_diff_y == 0)[0] + 1\n            current_path_polygons_array = np.delete(current_path_polygons_array, current_path_diff_y_0_ind,axis=0)\n\n\n            current_path_polygons_points = [(np_point[0],np_point[1]) for np_point in current_path_polygons_array.tolist()]\n\n            \n            path_polygons_points.append(current_path_polygons_points)\n            \n        paths = [Polygon(res, self._layer, pp ,unit_mode=True) for pp in path_polygons_points]\n\n        return paths\n\n\nclass Polygon(Figure):\n    \"\"\"A layout polygon object.\n\n    Parameters\n    ----------\n    resolution : float\n        the layout grid resolution.\n    layer : Union[str, Tuple[str, str]]\n        the layer name, or a tuple of layer name and purpose name.\n        If purpose name not given, defaults to 'drawing'.\n    points : List[Tuple[Union[float, int], Union[float, int]]]\n        the points defining the polygon.\n    unit_mode : bool\n        True if the points are given in resolution units.\n    \"\"\"\n\n    def __init__(self,\n                 resolution,  # type: float\n                 layer,  # type: Union[str, Tuple[str, str]]\n                 points,  # type: List[Tuple[Union[float, int], Union[float, int]]]\n                 unit_mode=False,  # type: bool\n                 ):\n        # type: (...) -> None\n        Figure.__init__(self, resolution)\n        layer = io.fix_string(layer)\n        if isinstance(layer, str):\n            layer = (layer, 'drawing')\n        self._layer = layer\n\n        if not unit_mode:\n            self._points = np.array(points) / resolution\n            self._points = self._points.astype(int)\n        else:\n            self._points = np.array(points, dtype=int)\n\n    @property\n    def layer(self):\n        # type: () -> str\n        \"\"\"The blockage layer.\"\"\"\n        return self._layer\n\n    @property\n    def points(self):\n        return [(self._points[idx][0] * self._res, self._points[idx][1] * self._res)\n                for idx in range(self._points.shape[0])]\n\n    @property\n    def points_unit(self):\n        return [(self._points[idx][0], self._points[idx][1])\n                for idx in range(self._points.shape[0])]\n\n    @property\n    def content(self):\n        # type: () -> Dict[str, Any]\n        \"\"\"A dictionary representation of this blockage.\"\"\"\n        content = dict(layer=self.layer,\n                       points=self.points,\n                       )\n        return content\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (ldim, ldim, bool) -> None\n        if not unit_mode:\n            dx = int(round(dx / self._res))\n            dy = int(round(dy / self._res))\n        self._points += np.array([dx, dy])\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):\n        # type: (Tuple[ldim, ldim], str, bool, bool) -> Figure\n        \"\"\"Transform this figure.\"\"\"\n        res = self.resolution\n        if unit_mode:\n            dx, dy = loc\n        else:\n            dx = int(round(loc[0] / res))\n            dy = int(round(loc[1] / res))\n        dvec = np.array([dx, dy])\n        mat = transform_table[orient]\n        new_points = np.dot(mat, self._points.T).T + dvec\n\n        if not copy:\n            ans = self\n        else:\n            ans = deepcopy(self)\n\n        ans._points = new_points\n        return ans\n\n\nclass Blockage(Polygon):\n    \"\"\"A blockage object.\n\n    Subclass Polygon for code reuse.\n\n    Parameters\n    ----------\n    resolution : float\n        the layout grid resolution.\n    block_type : str\n        the blockage type.  Currently supports 'routing' and 'placement'.\n    block_layer : str\n        the blockage layer.  This value is ignored if blockage type is 'placement'.\n    points : List[Tuple[Union[float, int], Union[float, int]]]\n        the points defining the blockage.\n    unit_mode : bool\n        True if the points are given in resolution units.\n    \"\"\"\n\n    def __init__(self, resolution, block_type, block_layer, points, unit_mode=False):\n        # type: (float, str, str, List[Tuple[Union[float, int], Union[float, int]]], bool) -> None\n        Polygon.__init__(self, resolution, block_layer, points, unit_mode=unit_mode)\n        self._type = block_type\n        self._block_layer = block_layer\n\n    @property\n    def layer(self):\n        \"\"\"The blockage layer.\"\"\"\n        return self._block_layer\n\n    @property\n    def type(self):\n        # type: () -> str\n        \"\"\"The blockage type.\"\"\"\n        return self._type\n\n    @property\n    def content(self):\n        # type: () -> Dict[str, Any]\n        \"\"\"A dictionary representation of this blockage.\"\"\"\n        content = dict(layer=self.layer,\n                       btype=self.type,\n                       points=self.points,\n                       )\n        return content\n\n\nclass Boundary(Polygon):\n    \"\"\"A boundary object.\n\n    Subclass Polygon for code reuse.\n\n    Parameters\n    ----------\n    resolution : float\n        the layout grid resolution.\n    boundary_type : str\n        the boundary type.  Currently supports 'PR', 'snap', and 'area'.\n    points : List[Tuple[Union[float, int], Union[float, int]]]\n        the points defining the blockage.\n    unit_mode : bool\n        True if the points are given in resolution units.\n    \"\"\"\n\n    def __init__(self, resolution, boundary_type, points, unit_mode=False):\n        # type: (float, str, List[Tuple[Union[float, int], Union[float, int]]], bool) -> None\n        Polygon.__init__(self, resolution, ('', ''), points, unit_mode=unit_mode)\n        self._type = boundary_type\n\n    @property\n    def type(self):\n        # type: () -> str\n        \"\"\"The blockage type.\"\"\"\n        return self._type\n\n    @property\n    def content(self):\n        # type: () -> Dict[str, Any]\n        \"\"\"A dictionary representation of this blockage.\"\"\"\n        content = dict(btype=self.type,\n                       points=self.points,\n                       )\n        return content\n\n\nclass ViaInfo(dict):\n    \"\"\"A dictionary that represents a layout via.\n    \"\"\"\n\n    param_list = ['id', 'loc', 'orient', 'num_rows', 'num_cols', 'sp_rows', 'sp_cols',\n                  'enc1', 'enc2']\n\n    def __init__(self, res, **kwargs):\n        kv_iter = ((key, kwargs[key]) for key in self.param_list)\n        dict.__init__(self, kv_iter)\n        for opt_par in ['cut_width', 'cut_height', 'arr_nx', 'arr_ny', 'arr_spx', 'arr_spy']:\n            if opt_par in kwargs:\n                self[opt_par] = kwargs[opt_par]\n\n        self._resolution = res\n\n    @property\n    def id(self):\n        # type: () -> str\n        return self['id']\n\n    @property\n    def loc(self):\n        # type: () -> Tuple[float, float]\n        loc_list = self['loc']\n        return loc_list[0], loc_list[1]\n\n    @property\n    def orient(self):\n        # type: () -> str\n        return self['orient']\n\n    @property\n    def num_rows(self):\n        # type: () -> int\n        return self['num_rows']\n\n    @property\n    def num_cols(self):\n        # type: () -> int\n        return self['num_cols']\n\n    @property\n    def sp_rows(self):\n        # type: () -> float\n        return self['sp_rows']\n\n    @property\n    def sp_cols(self):\n        # type: () -> float\n        return self['sp_cols']\n\n    @property\n    def enc1(self):\n        # type: () -> Tuple[float, float, float, float]\n        enc_list = self['enc1']\n        return enc_list[0], enc_list[1], enc_list[2], enc_list[3]\n\n    @property\n    def enc2(self):\n        # type: () -> Tuple[float, float, float, float]\n        enc_list = self['enc2']\n        return enc_list[0], enc_list[1], enc_list[2], enc_list[3]\n\n    @property\n    def cut_width(self):\n        # type: () -> float\n        return self.get('cut_width', -1)\n\n    @property\n    def cut_height(self):\n        # type: () -> float\n        return self.get('cut_height', -1)\n\n    @property\n    def arr_nx(self):\n        # type: () -> int\n        return self.get('arr_nx', 1)\n\n    @property\n    def arr_ny(self):\n        # type: () -> int\n        return self.get('arr_ny', 1)\n\n    @property\n    def arr_spx(self):\n        # type: () -> float\n        return self.get('arr_spx', 0)\n\n    @property\n    def arr_spy(self):\n        # type: () -> float\n        return self.get('arr_spy', 0)\n\n    def move_by(self, dx=0, dy=0):\n        # type: (float, float) -> None\n        \"\"\"Move this instance by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        \"\"\"\n        res = self._resolution\n        loc = self.loc\n        self['loc'] = [round((loc[0] + dx) / res) * res,\n                       round((loc[1] + dy) / res) * res]\n\n\nclass Via(Arrayable):\n    \"\"\"A layout via, with optional arraying parameters.\n\n    Parameters\n    ----------\n    tech : ..layout.core.TechInfo\n        the technology class used to calculate via information.\n    bbox : ..layout.util.BBox or ..layout.util.BBoxArray\n        the via bounding box, not including extensions.\n        If this is a BBoxArray, the BBoxArray's arraying parameters are used.\n    bot_layer : str or (str, str)\n        the bottom layer name, or a tuple of layer name and purpose name.\n        If purpose name not given, defaults to 'drawing'.\n    top_layer : str or (str, str)\n        the top layer name, or a tuple of layer name and purpose name.\n        If purpose name not given, defaults to 'drawing'.\n    bot_dir : str\n        the bottom layer extension direction.  Either 'x' or 'y'.\n    nx : int\n        arraying parameter.  Number of columns.\n    ny : int\n        arraying parameter.  Mumber of rows.\n    spx : float\n        arraying parameter.  Column pitch.\n    spy : float\n        arraying parameter.  Row pitch.\n    extend : bool\n        True if via extension can be drawn outside of bounding box.\n    top_dir : Optional[str]\n        top layer extension direction.  Can force to extend in same direction as bottom.\n    unit_mode : bool\n        True if array pitches are given in resolution units.\n    \"\"\"\n\n    def __init__(self, tech, bbox, bot_layer, top_layer, bot_dir,\n                 nx=1, ny=1, spx=0, spy=0, extend=True, top_dir=None, unit_mode=False):\n        if isinstance(bbox, BBoxArray):\n            self._bbox = bbox.base\n            Arrayable.__init__(self, tech.resolution, nx=bbox.nx, ny=bbox.ny,\n                               spx=bbox.spx_unit, spy=bbox.spy_unit, unit_mode=True)\n\n        else:\n            self._bbox = bbox\n            Arrayable.__init__(self, tech.resolution, nx=nx, ny=ny, spx=spx, spy=spy,\n                               unit_mode=unit_mode)\n\n        # python 2/3 compatibility: convert raw bytes to string.\n        bot_layer = io.fix_string(bot_layer)\n        top_layer = io.fix_string(top_layer)\n\n        if isinstance(bot_layer, str):\n            bot_layer = (bot_layer, 'drawing')\n        if isinstance(top_layer, str):\n            top_layer = (top_layer, 'drawing')\n\n        self._tech = tech\n        self._bot_layer = bot_layer[0], bot_layer[1]\n        self._top_layer = top_layer[0], top_layer[1]\n        self._bot_dir = bot_dir\n        self._top_dir = top_dir\n        self._extend = extend\n        self._info = self._tech.get_via_info(self._bbox, bot_layer, top_layer, bot_dir,\n                                             top_dir=top_dir, extend=extend)\n        if self._info is None:\n            raise ValueError('Cannot make via with bounding box %s' % self._bbox)\n\n    def _update(self):\n        \"\"\"Update via parameters.\"\"\"\n        self._info = self._tech.get_via_info(self.bbox, self.bot_layer, self.top_layer,\n                                             self.bottom_direction, top_dir=self.top_direction,\n                                             extend=self.extend)\n\n    @property\n    def top_box(self):\n        # type: () -> BBox\n        \"\"\"the top via layer bounding box.\"\"\"\n        return self._info['top_box']\n\n    @property\n    def bottom_box(self):\n        # type: () -> BBox\n        \"\"\"the bottom via layer bounding box.\"\"\"\n        return self._info['bot_box']\n\n    @property\n    def bot_layer(self):\n        \"\"\"The bottom via (layer, purpose) pair.\"\"\"\n        return self._bot_layer\n\n    @property\n    def top_layer(self):\n        \"\"\"The top via layer.\"\"\"\n        return self._top_layer\n\n    @property\n    def bottom_direction(self):\n        \"\"\"the bottom via extension direction.\"\"\"\n        return self._bot_dir\n\n    @bottom_direction.setter\n    def bottom_direction(self, new_bot_dir):\n        \"\"\"Sets the bottom via extension direction.\"\"\"\n        self.check_destroyed()\n        self._bot_dir = new_bot_dir\n        self._update()\n\n    @property\n    def top_direction(self):\n        \"\"\"the bottom via extension direction.\"\"\"\n        if not self._top_dir:\n            return 'x' if self._bot_dir == 'y' else 'y'\n        return self._top_dir\n\n    @top_direction.setter\n    def top_direction(self, new_top_dir):\n        \"\"\"Sets the bottom via extension direction.\"\"\"\n        self.check_destroyed()\n        self._top_dir = new_top_dir\n        self._update()\n\n    @property\n    def extend(self):\n        \"\"\"True if via extension can grow beyond bounding box.\"\"\"\n        return self._extend\n\n    @extend.setter\n    def extend(self, new_val):\n        self._extend = new_val\n\n    @property\n    def bbox(self):\n        \"\"\"The via bounding box not including extensions.\"\"\"\n        return self._bbox\n\n    @property\n    def bbox_array(self):\n        \"\"\"The via bounding box array, not including extensions.\n\n        Returns\n        -------\n        barr : :class:`..layout.util.BBoxArray`\n            the BBoxArray representing this (Arrayed) rectangle.\n        \"\"\"\n        return BBoxArray(self._bbox, nx=self.nx, ny=self.ny, spx=self.spx_unit,\n                         spy=self.spy_unit, unit_mode=True)\n\n    @bbox.setter\n    def bbox(self, new_bbox):\n        \"\"\"Sets the via bounding box.  Will redraw the via.\"\"\"\n        self.check_destroyed()\n        if not new_bbox.is_physical():\n            raise ValueError('Bounding box %s is not physical' % new_bbox)\n        self._bbox = new_bbox\n        self._update()\n\n    @property\n    def content(self):\n        \"\"\"A dictionary representation of this via.\"\"\"\n        via_params = self._info['params']\n        content = ViaInfo(self._tech.resolution, **via_params)\n\n        if self.nx > 1 or self.ny > 1:\n            content['arr_nx'] = self.nx\n            content['arr_ny'] = self.ny\n            content['arr_spx'] = self.spx\n            content['arr_spy'] = self.spy\n\n        return content\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (ldim, ldim, bool) -> None\n        \"\"\"Move this path by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        unit_mode : bool\n            True if shifts are given in resolution units.\n        \"\"\"\n        self._bbox = self._bbox.move_by(dx=dx, dy=dy, unit_mode=unit_mode)\n        self._info['top_box'] = self._info['top_box'].move_by(dx=dx, dy=dy, unit_mode=unit_mode)\n        self._info['bot_box'] = self._info['bot_box'].move_by(dx=dx, dy=dy, unit_mode=unit_mode)\n        self._info['params']['loc'] = [self._bbox.xc, self._bbox.yc]\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False, copy=False):\n        # type: (Tuple[ldim, ldim], str, bool, bool) -> Figure\n        \"\"\"Transform this figure.\"\"\"\n        new_box = self._bbox.transform(loc=loc, orient=orient, unit_mode=unit_mode)\n        if copy:\n            return Via(self._tech, new_box, self._bot_layer, self._top_layer, self._bot_dir,\n                       nx=self.nx, ny=self.ny, spx=self.spx_unit, spy=self.spy_unit,\n                       unit_mode=True)\n        else:\n            self._bbox = new_box\n            self._info['top_box'] = self._info['top_box'].transform(loc=loc, orient=orient,\n                                                                    unit_mode=unit_mode)\n            self._info['bot_box'] = self._info['bot_box'].transform(loc=loc, orient=orient,\n                                                                    unit_mode=unit_mode)\n            self._info['params']['loc'] = [self._bbox.xc, self._bbox.yc]\n\n\nclass PinInfo(dict):\n    \"\"\"A dictionary that represents a layout pin.\n    \"\"\"\n\n    param_list = ['net_name', 'pin_name', 'label', 'layer', 'bbox', 'make_rect']\n\n    def __init__(self, res, **kwargs):\n        kv_iter = ((key, kwargs[key]) for key in self.param_list)\n        dict.__init__(self, kv_iter)\n\n        self._resolution = res\n\n    @property\n    def net_name(self):\n        # type: () -> str\n        return self['net_name']\n\n    @property\n    def pin_name(self):\n        # type: () -> str\n        return self['pin_name']\n\n    @property\n    def label(self):\n        # type: () -> str\n        return self['label']\n\n    @property\n    def layer(self):\n        # type: () -> Tuple[str, str]\n        lay_list = self['layer']\n        return lay_list[0], lay_list[1]\n\n    @property\n    def bbox(self):\n        # type: () -> BBox\n        bbox_list = self['bbox']\n        return BBox(bbox_list[0][0], bbox_list[0][1], bbox_list[1][0], bbox_list[1][1],\n                    self._resolution)\n\n    @property\n    def make_rect(self):\n        # type: () -> bool\n        return self['make_rect']\n\n    def move_by(self, dx=0, dy=0):\n        # type: (float, float) -> None\n        \"\"\"Move this instance by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            the X shift.\n        dy : float\n            the Y shift.\n        \"\"\"\n        new_box = self.bbox.move_by(dx=dx, dy=dy)\n        self['bbox'] = [[new_box.left, new_box.bottom], [new_box.right, new_box.top]]\n"
  },
  {
    "path": "bag/layout/routing/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/layout/routing/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package provide routing classes.\n\"\"\"\n\nfrom .base import TrackID, WireArray, Port, TrackManager\nfrom .grid import RoutingGrid\nfrom .fill import UsedTracks\n"
  },
  {
    "path": "bag/layout/routing/base.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module provides basic routing classes.\n\"\"\"\n\nfrom typing import Tuple, Union, Generator, Dict, List, Sequence\n\nimport numbers\n\nfrom ...util.search import BinaryIterator\nfrom ..util import BBox, BBoxArray\nfrom .grid import RoutingGrid\n\n\nclass TrackID(object):\n    \"\"\"A class that represents locations of track(s) on the routing grid.\n\n    Parameters\n    ----------\n    layer_id : int\n        the layer ID.\n    track_idx : Union[float, int]\n        the smallest middle track index in the array.  Multiples of 0.5\n    width : int\n        width of one track in number of tracks.\n    num : int\n        number of tracks in this array.\n    pitch : Union[float, int]\n        pitch between adjacent tracks, in number of track pitches.\n    \"\"\"\n\n    def __init__(self, layer_id, track_idx, width=1, num=1, pitch=0.0):\n        # type: (int, Union[float, int], int, int, Union[float, int]) -> None\n        if num < 1:\n            raise ValueError('TrackID must have 1 or more tracks.')\n\n        self._layer_id = layer_id\n        self._hidx = int(round(2 * track_idx)) + 1\n        self._w = width\n        self._n = num\n        self._hpitch = 0 if num == 1 else int(pitch * 2)\n\n    def __repr__(self):\n        arg_list = ['layer=%d' % self._layer_id]\n        if self._hidx % 2 == 1:\n            arg_list.append('track=%d' % ((self._hidx - 1) // 2))\n        else:\n            arg_list.append('track=%.1f' % ((self._hidx - 1) / 2))\n        if self._w != 1:\n            arg_list.append('width=%d' % self._w)\n        if self._n != 1:\n            arg_list.append('num=%d' % self._n)\n            if self._hpitch % 2 == 0:\n                arg_list.append('pitch=%d' % (self._hpitch // 2))\n            else:\n                arg_list.append('pitch=%.1f' % (self._hpitch / 2))\n\n        return '%s(%s)' % (self.__class__.__name__, ', '.join(arg_list))\n\n    def __str__(self):\n        return repr(self)\n\n    @property\n    def layer_id(self):\n        # type: () -> int\n        return self._layer_id\n\n    @property\n    def width(self):\n        # type: () -> int\n        return self._w\n\n    @property\n    def base_index(self):\n        # type: () -> Union[float, int]\n        if self._hidx % 2 == 1:\n            return (self._hidx - 1) // 2\n        return (self._hidx - 1) / 2\n\n    @property\n    def index_htr(self):\n        # type: () -> int\n        return self._hidx\n\n    @property\n    def num(self):\n        # type: () -> int\n        return self._n\n\n    @property\n    def pitch(self):\n        # type: () -> Union[float, int]\n        if self._hpitch % 2 == 0:\n            return self._hpitch // 2\n        return self._hpitch / 2\n\n    @property\n    def pitch_htr(self):\n        # type: () -> int\n        return self._hpitch\n\n    def get_immutable_key(self):\n        return self.__class__.__name__, self._layer_id, self._hidx, self._w, self._n, self._hpitch\n\n    def get_bounds(self, grid, unit_mode=False):\n        # type: (RoutingGrid, bool) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Calculate the track bounds coordinate.\n\n        Parameters\n        ----------\n        grid : RoutingGrid\n            the RoutingGrid object.\n        unit_mode : bool\n            True to return coordinates in resolution units.\n\n        Returns\n        -------\n        lower : Union[float, int]\n            the lower bound coordinate perpendicular to track direction.\n        upper : Union[float, int]\n            the upper bound coordinate perpendicular to track direction.\n        \"\"\"\n        lower, upper = grid.get_wire_bounds(self.layer_id, self.base_index,\n                                            width=self.width, unit_mode=True)\n        pitch_dim = (self._hpitch * grid.get_track_pitch(self._layer_id, unit_mode=True)) // 2\n        upper += (self.num - 1) * pitch_dim\n        if unit_mode:\n            return lower, upper\n        else:\n            res = grid.resolution\n            return lower * res, upper * res\n\n    def __iter__(self):\n        # type: () -> Generator[Union[float, int]]\n        \"\"\"Iterate over all middle track indices in this TrackID.\"\"\"\n        for idx in range(self._n):\n            num = self._hidx + idx * self._hpitch\n            if num % 2 == 1:\n                yield (num - 1) // 2\n            else:\n                yield (num - 1) / 2\n\n    def sub_tracks_iter(self, grid):\n        # type: (RoutingGrid) -> Generator[TrackID]\n        \"\"\"Iterate through sub-TrackIDs where every track in sub-TrackID has the same layer name.\n\n        This method is used to deal with double patterning layer.  If this TrackID is not\n        on a double patterning layer, it simply yields itself.\n\n        Parameters\n        ----------\n        grid : RoutingGrid\n            the RoutingGrid object.\n\n        Yields\n        ------\n        sub_id : TrackID\n            a TrackID where all tracks has the same layer name.\n        \"\"\"\n        layer_id = self._layer_id\n        layer_names = grid.tech_info.get_layer_name(layer_id)\n        if isinstance(layer_names, tuple):\n            den = 2 * len(layer_names)\n            if self._hpitch % den == 0:\n                # layer name will never change\n                yield self\n            else:\n                # TODO: have more robust solution than just yielding tracks one by one?\n                for tr_idx in self:\n                    yield TrackID(layer_id, tr_idx, width=self.width)\n        else:\n            yield self\n\n    def transform(self, grid, loc=(0, 0), orient=\"R0\", unit_mode=False):\n        # type: (RoutingGrid, Tuple[Union[float, int], Union[float, int]], str, bool) -> TrackID\n        \"\"\"returns a transformation of this TrackID.\"\"\"\n        layer_id = self._layer_id\n        is_x = grid.get_direction(layer_id) == 'x'\n        if orient == 'R0':\n            base_hidx = self._hidx\n        elif orient == 'MX':\n            if is_x:\n                base_hidx = -self._hidx - (self._n - 1) * self._hpitch\n            else:\n                base_hidx = self._hidx\n        elif orient == 'MY':\n            if is_x:\n                base_hidx = self._hidx\n            else:\n                base_hidx = -self._hidx - (self._n - 1) * self._hpitch\n        elif orient == 'R180':\n            base_hidx = -self._hidx - (self._n - 1) * self._hpitch\n        else:\n            raise ValueError('Unsupported orientation: %s' % orient)\n\n        delta = loc[1] if is_x else loc[0]\n        delta = grid.coord_to_track(layer_id, delta, unit_mode=unit_mode) + 0.5\n        return TrackID(layer_id, (base_hidx - 1) / 2 + delta, width=self._w,\n                       num=self._n, pitch=self.pitch)\n\n\nclass WireArray(object):\n    \"\"\"An array of wires on the routing grid.\n\n    Parameters\n    ----------\n    track_id : :class:`bag.layout.routing.TrackID`\n        TrackArray representing the track locations of this wire array.\n    lower : Union[float, int]\n        the lower coordinate along the track direction.\n    upper : Union[float, int]\n        the upper coordinate along the track direction.\n    res : Optional[float]\n        the resolution unit.\n    unit_mode : bool\n        True if lower/upper are specified in resolution units.\n    \"\"\"\n\n    def __init__(self, track_id, lower, upper, res=None, unit_mode=False):\n        # type: (TrackID, Union[float, int], Union[float, int], Optional[float], bool) -> None\n        if res is None:\n            raise ValueError('Please specify the layout distance resolution.')\n\n        self._track_id = track_id\n        self._res = res\n        if unit_mode:\n            self._lower_unit = int(lower)  # type: int\n            self._upper_unit = int(upper)  # type: int\n        else:\n            self._lower_unit = int(round(lower / res))\n            self._upper_unit = int(round(upper / res))\n\n    def __repr__(self):\n        return '%s(%s, %.d, %.d, %.4g)' % (self.__class__.__name__, self._track_id,\n                                           self._lower_unit, self._upper_unit, self._res)\n\n    def __str__(self):\n        return repr(self)\n\n    @property\n    def resolution(self):\n        return self._res\n\n    @property\n    def lower(self):\n        return self._lower_unit * self._res\n\n    @property\n    def upper(self):\n        return self._upper_unit * self._res\n\n    @property\n    def middle(self):\n        return (self._lower_unit + self._upper_unit) // 2 * self._res\n\n    @property\n    def lower_unit(self):\n        return self._lower_unit\n\n    @property\n    def upper_unit(self):\n        return self._upper_unit\n\n    @property\n    def middle_unit(self):\n        return (self._lower_unit + self._upper_unit) // 2\n\n    @property\n    def track_id(self):\n        # type: () -> TrackID\n        \"\"\"Returns the TrackID of this WireArray.\"\"\"\n        return self._track_id\n\n    @property\n    def layer_id(self):\n        # type: () -> int\n        \"\"\"Returns the layer ID of this WireArray.\"\"\"\n        return self.track_id.layer_id\n\n    @property\n    def width(self):\n        return self.track_id.width\n\n    @classmethod\n    def list_to_warr(cls, warr_list):\n        # type: (List[WireArray]) -> WireArray\n        \"\"\"Convert a list of WireArrays to a single WireArray.\n\n        this method assumes all WireArrays have the same layer, width, and lower/upper coordinates.\n        Overlapping WireArrays will be compacted.\n        \"\"\"\n        if len(warr_list) == 1:\n            return warr_list[0]\n\n        tid0 = warr_list[0].track_id\n        layer = tid0.layer_id\n        width = tid0.width\n        res = warr_list[0].resolution\n        lower, upper = warr_list[0].lower_unit, warr_list[0].upper_unit\n        tid_list = sorted(set((int(idx * 2) for warr in warr_list for idx in warr.track_id)))\n        base_idx2 = tid_list[0]\n        base_idx = base_idx2 // 2 if base_idx2 % 2 == 0 else base_idx2 / 2\n        if len(tid_list) < 2:\n            return WireArray(TrackID(layer, base_idx, width=width), lower, upper,\n                             res=res, unit_mode=True)\n        diff = tid_list[1] - tid_list[0]\n        for idx in range(1, len(tid_list) - 1):\n            if tid_list[idx + 1] - tid_list[idx] != diff:\n                raise ValueError('pitch mismatch.')\n        pitch = diff // 2 if diff % 2 == 0 else diff / 2\n\n        return WireArray(TrackID(layer, base_idx, width=width, num=len(tid_list), pitch=pitch),\n                         lower, upper, res=res, unit_mode=True)\n\n    @classmethod\n    def single_warr_iter(cls, warr):\n        if isinstance(warr, WireArray):\n            yield from warr.warr_iter()\n        else:\n            for w in warr:\n                yield from w.warr_iter()\n\n    def get_immutable_key(self):\n        return (self.__class__.__name__, self._track_id.get_immutable_key(), self._lower_unit,\n                self._upper_unit, self._res)\n\n    def to_warr_list(self):\n        return list(self.warr_iter())\n\n    def warr_iter(self):\n        tid = self._track_id\n        layer = tid.layer_id\n        width = tid.width\n        for tr in tid:\n            yield WireArray(TrackID(layer, tr, width=width), self._lower_unit,\n                            self._upper_unit, res=self._res, unit_mode=True)\n\n    def get_bbox_array(self, grid):\n        # type: ('RoutingGrid') -> BBoxArray\n        \"\"\"Returns the BBoxArray representing this WireArray.\n\n        Parameters\n        ----------\n        grid : RoutingGrid\n            the RoutingGrid of this WireArray.\n\n        Returns\n        -------\n        bbox_arr : BBoxArray\n            the BBoxArray of the wires.\n        \"\"\"\n        track_id = self.track_id\n        tr_w = track_id.width\n        layer_id = track_id.layer_id\n        base_idx = track_id.base_index\n        num = track_id.num\n\n        base_box = grid.get_bbox(layer_id, base_idx, self._lower_unit, self._upper_unit,\n                                 width=tr_w, unit_mode=True)\n        tot_pitch = (track_id.pitch_htr * grid.get_track_pitch(layer_id, unit_mode=True)) // 2\n        if grid.get_direction(layer_id) == 'x':\n            return BBoxArray(base_box, ny=num, spy=tot_pitch, unit_mode=True)\n        else:\n            return BBoxArray(base_box, nx=num, spx=tot_pitch, unit_mode=True)\n\n    def wire_iter(self, grid):\n        \"\"\"Iterate over all wires in this WireArray as layer/BBox pair.\n\n        Parameters\n        ----------\n        grid : :class:`bag.layout.routing.RoutingGrid`\n            the RoutingGrid of this WireArray.\n\n        Yields\n        ------\n        layer : string\n            the wire layer name.\n        bbox : :class:`bag.layout.util.BBox`\n            the wire bounding box.\n        \"\"\"\n        tr_w = self.track_id.width\n        layer_id = self.layer_id\n        for tr_idx in self.track_id:\n            layer_name = grid.get_layer_name(layer_id, tr_idx)\n            bbox = grid.get_bbox(layer_id, tr_idx, self._lower_unit, self._upper_unit,\n                                 width=tr_w, unit_mode=True)\n            yield layer_name, bbox\n\n    def wire_arr_iter(self, grid):\n        \"\"\"Iterate over all wires in this WireArray as layer/BBoxArray pair.\n\n        This method group all rectangles in the same layer together.\n\n        Parameters\n        ----------\n        grid : :class:`bag.layout.routing.RoutingGrid`\n            the RoutingGrid of this WireArray.\n\n        Yields\n        ------\n        layer : string\n            the wire layer name.\n        bbox : :class:`bag.layout.util.BBoxArray`\n            the wire bounding boxes.\n        \"\"\"\n        res = self._res\n        tid = self.track_id\n        layer_id = tid.layer_id\n        tr_width = tid.width\n        track_pitch = grid.get_track_pitch(layer_id, unit_mode=True)\n        is_x = grid.get_direction(layer_id) == 'x'\n        for track_idx in tid.sub_tracks_iter(grid):\n            base_idx = track_idx.base_index\n            cur_layer = grid.get_layer_name(layer_id, base_idx)\n            cur_num = track_idx.num\n            wire_pitch = (track_idx.pitch_htr * track_pitch) // 2\n            tl, tu = grid.get_wire_bounds(layer_id, base_idx, width=tr_width, unit_mode=True)\n            if is_x:\n                base_box = BBox(self._lower_unit, tl, self._upper_unit, tu, res, unit_mode=True)\n                box_arr = BBoxArray(base_box, ny=cur_num, spy=wire_pitch, unit_mode=True)\n            else:\n                base_box = BBox(tl, self._lower_unit, tu, self._upper_unit, res, unit_mode=True)\n                box_arr = BBoxArray(base_box, nx=cur_num, spx=wire_pitch, unit_mode=True)\n\n            yield cur_layer, box_arr\n\n    def transform(self, grid, loc=(0, 0), orient='R0', unit_mode=False):\n        \"\"\"Return a new transformed WireArray.\n\n        Parameters\n        ----------\n        grid : :class:`bag.layout.routing.RoutingGrid`\n            the RoutingGrid of this WireArray.\n        loc : Tuple[Union[float, int], Union[float, int]]\n            the X/Y coordinate shift.\n        orient : str\n            the new orientation.\n        unit_mode : bool\n            True if location is given in unit mode.\n        \"\"\"\n        res = self._res\n        if not unit_mode:\n            loc = int(round(loc[0] / res)), int(round(loc[1] / res))\n\n        layer_id = self.layer_id\n        is_x = grid.get_direction(layer_id) == 'x'\n        if orient == 'R0':\n            lower, upper = self._lower_unit, self._upper_unit\n        elif orient == 'MX':\n            if is_x:\n                lower, upper = self._lower_unit, self._upper_unit\n            else:\n                lower, upper = -self._upper_unit, -self._lower_unit\n        elif orient == 'MY':\n            if is_x:\n                lower, upper = -self._upper_unit, -self._lower_unit\n            else:\n                lower, upper = self._lower_unit, self._upper_unit\n        elif orient == 'R180':\n            lower, upper = -self._upper_unit, -self._lower_unit\n        else:\n            raise ValueError('Unsupported orientation: %s' % orient)\n\n        delta = loc[0] if is_x else loc[1]\n        return WireArray(self.track_id.transform(grid, loc=loc, orient=orient, unit_mode=True),\n                         lower + delta, upper + delta, res=res, unit_mode=True)\n\n\nclass Port(object):\n    \"\"\"A layout port.\n\n    a port is a group of pins that represent the same net.\n    The pins can be on different layers.\n\n    Parameters\n    ----------\n    term_name : str\n        the terminal name of the port.\n    pin_dict : dict[int, list[bag.layout.routing.WireArray]]\n        a dictionary from layer ID to pin geometries on that layer.\n    \"\"\"\n\n    def __init__(self, term_name, pin_dict, label=''):\n        self._term_name = term_name\n        self._pin_dict = pin_dict\n        self._label = label or term_name\n\n    def __iter__(self):\n        \"\"\"Iterate through all pin geometries in this port.\n\n        the iteration order is not guaranteed.\n        \"\"\"\n        for geo_list in self._pin_dict.values():\n            yield from geo_list\n\n    def get_single_layer(self):\n        # type: () -> Union[int, str]\n        \"\"\"Returns the layer of this port if it only has a single layer.\"\"\"\n        if len(self._pin_dict) > 1:\n            raise ValueError('This port has more than one layer.')\n        return next(iter(self._pin_dict))\n\n    def _get_layer(self, layer):\n        \"\"\"Get the layer number.\"\"\"\n        if isinstance(layer, numbers.Integral):\n            return self.get_single_layer() if layer < 0 else layer\n        else:\n            return self.get_single_layer() if not layer else layer\n\n    @property\n    def net_name(self):\n        \"\"\"Returns the net name of this port.\"\"\"\n        return self._term_name\n\n    @property\n    def label(self):\n        \"\"\"Returns the label of this port.\"\"\"\n        return self._label\n\n    def get_pins(self, layer=-1):\n        \"\"\"Returns the pin geometries on the given layer.\n\n        Parameters\n        ----------\n        layer : int\n            the layer ID.  If Negative, check if this port is on a single layer,\n            then return the result.\n\n        Returns\n        -------\n        track_bus_list : Union[WireArray, BBox]\n            pins on the given layer representing as WireArrays.\n        \"\"\"\n        layer = self._get_layer(layer)\n        return self._pin_dict.get(layer, [])\n\n    def get_bounding_box(self, grid, layer=-1):\n        \"\"\"Calculate the overall bounding box of this port on the given layer.\n\n        Parameters\n        ----------\n        grid : :class:`~bag.layout.routing.RoutingGrid`\n            the RoutingGrid of this Port.\n        layer : int\n            the layer ID.  If Negative, check if this port is on a single layer,\n            then return the result.\n\n        Returns\n        -------\n        bbox : BBox\n            the bounding box.\n        \"\"\"\n        layer = self._get_layer(layer)\n        box = BBox.get_invalid_bbox()\n        for geo in self._pin_dict[layer]:\n            if isinstance(geo, BBox):\n                box = box.merge(geo)\n            else:\n                box = box.merge(geo.get_bbox_array(grid).get_overall_bbox())\n        return box\n\n    def transform(self, grid, loc=(0, 0), orient='R0', unit_mode=False):\n        # type: (RoutingGrid, Tuple[Union[float, int], Union[float, int]], str, bool) -> Port\n        \"\"\"Return a new transformed Port.\n\n        Parameters\n        ----------\n        grid : RoutingGrid\n            the RoutingGrid of this Port.\n        loc : Tuple[Union[float, int], Union[float, int]]\n            the X/Y coordinate shift.\n        orient : str\n            the new orientation.\n        unit_mode: bool\n            True if location is in resolution units.\n        \"\"\"\n        if not unit_mode:\n            res = grid.resolution\n            loc = (int(round(loc[0] / res)), int(round(loc[1] / res)))\n\n        new_pin_dict = {}\n        for lay, geo_list in self._pin_dict.items():\n            new_geo_list = []\n            for geo in geo_list:\n                if isinstance(geo, BBox):\n                    new_geo_list.append(geo.transform(loc=loc, orient=orient, unit_mode=True))\n                else:\n                    new_geo_list.append(geo.transform(grid, loc=loc, orient=orient, unit_mode=True))\n            new_pin_dict[lay] = new_geo_list\n\n        return Port(self._term_name, new_pin_dict, label=self._label)\n\n\nclass TrackManager(object):\n    \"\"\"A class that makes it easy to compute track locations.\n\n    This class provides many helper methods for computing track locations and spacing when\n    each track could have variable width.  All methods in this class accepts a \"track_type\",\n    which is either a string in the track dictionary or an integer representing the track\n    width.\n\n    Parameters\n    ----------\n    grid : RoutingGrid\n        the RoutingGrid object.\n    tr_widths : Dict[str, Dict[int, int]]\n        dictionary from wire types to its width on each layer.\n    tr_spaces : Dict[Union[str, Tuple[str, str]], Dict[int, Union[float, int]]]\n        dictionary from wire types to its spaces on each layer.\n    **kwargs :\n        additional options.\n    \"\"\"\n\n    def __init__(self,\n                 grid,  # type: RoutingGrid\n                 tr_widths,  # type: Dict[str, Dict[int, int]]\n                 tr_spaces,  # type: Dict[Union[str, Tuple[str, str]], Dict[int, Union[float, int]]]\n                 **kwargs\n                 ):\n        # type: (...) -> None\n        half_space = kwargs.get('half_space', False)\n\n        self._grid = grid\n        self._tr_widths = tr_widths\n        self._tr_spaces = tr_spaces\n        self._half_space = half_space\n\n    @property\n    def grid(self):\n        # type: () -> RoutingGrid\n        return self._grid\n\n    @property\n    def half_space(self):\n        # type: () -> bool\n        return self._half_space\n\n    def get_width(self, layer_id, track_type):\n        # type: (int, Union[str, int]) -> int\n        \"\"\"Returns the track width.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID.\n        track_type : Union[str, int]\n            the track type.\n        \"\"\"\n        if isinstance(track_type, int):\n            return track_type\n        if track_type not in self._tr_widths:\n            return 1\n        return self._tr_widths[track_type].get(layer_id, 1)\n\n    def get_space(self,  # type: TrackManager\n                  layer_id,  # type: int\n                  type_tuple,  # type: Union[str, int, Tuple[Union[str, int], Union[str, int]]]\n                  **kwargs):\n        # type: (...) -> Union[int, float]\n        \"\"\"Returns the track spacing.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID.\n        type_tuple : Union[str, int, Tuple[Union[str, int], Union[str, int]]]\n            If a single track type is given, will return the minimum spacing needed around that\n            track type.  If a tuple of two types are given, will return the specific spacing\n            between those two track types if specified.  Otherwise, returns the maximum of all the\n            valid spacing.\n        **kwargs:\n            optional parameters.\n        \"\"\"\n        half_space = kwargs.get('half_space', self._half_space)\n        sp_override = kwargs.get('sp_override', None)\n\n        if isinstance(type_tuple, tuple):\n            # if two specific wires are given, first check if any specific rules exist\n            ans = self._get_space_from_tuple(layer_id, type_tuple, sp_override)\n            if ans is not None:\n                return ans\n            ans = self._get_space_from_tuple(layer_id, type_tuple, self._tr_spaces)\n            if ans is not None:\n                return ans\n            # no specific rules, so return max of wire spacings.\n            ans = 0\n            for wtype in type_tuple:\n                cur_space = self._get_space_from_type(layer_id, wtype, sp_override)\n                if cur_space is None:\n                    cur_space = self._get_space_from_type(layer_id, wtype, self._tr_spaces)\n                if cur_space is None:\n                    cur_space = 0\n                cur_width = self.get_width(layer_id, wtype)\n                ans = max(ans, cur_space, self._grid.get_num_space_tracks(layer_id, cur_width,\n                                                                          half_space=half_space))\n            return ans\n        else:\n            cur_space = self._get_space_from_type(layer_id, type_tuple, sp_override)\n            if cur_space is None:\n                cur_space = self._get_space_from_type(layer_id, type_tuple, self._tr_spaces)\n            if cur_space is None:\n                cur_space = 0\n            cur_width = self.get_width(layer_id, type_tuple)\n            return max(cur_space, self._grid.get_num_space_tracks(layer_id, cur_width,\n                                                                  half_space=half_space))\n\n    @classmethod\n    def _get_space_from_tuple(cls, layer_id, ntup, sp_dict):\n        if sp_dict is not None:\n            if ntup in sp_dict:\n                return sp_dict[ntup].get(layer_id, None)\n            ntup = (ntup[1], ntup[0])\n            if ntup in sp_dict:\n                return sp_dict[ntup].get(layer_id, None)\n        return None\n\n    @classmethod\n    def _get_space_from_type(cls, layer_id, wtype, sp_dict):\n        if sp_dict is None:\n            return None\n        if wtype in sp_dict:\n            test = sp_dict[wtype]\n        else:\n            key = (wtype, '')\n            if key in sp_dict:\n                test = sp_dict[key]\n            else:\n                key = ('', wtype)\n                if key in sp_dict:\n                    test = sp_dict[key]\n                else:\n                    test = None\n\n        if test is None:\n            return None\n        return test.get(layer_id, None)\n\n    def get_next_track(self,  # type: TrackManager\n                       layer_id,  # type: int\n                       cur_idx,  # type: Union[float, int]\n                       cur_type,  # type: Union[str, int]\n                       next_type,  # type: Union[str, int]\n                       up=True,  # type: bool\n                       **kwargs):\n        # type: (...) -> Union[float, int]\n        \"\"\"Compute the track location of a wire next to a given one.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        cur_idx : Union[float, int]\n            the current wire track index.\n        cur_type : Union[str, int]\n            the current wire type.\n        next_type : Union[str, int]\n            the next wire type.\n        up : bool\n            True to return the next track index that is larger than cur_idx.\n        **kwargs :\n            optional parameters.\n\n        Returns\n        -------\n        next_int : Union[float, int]\n            the next track index.\n        \"\"\"\n        cur_width = self.get_width(layer_id, cur_type)\n        next_width = self.get_width(layer_id, next_type)\n        space = self.get_space(layer_id, (cur_type, next_type), **kwargs)\n        if up:\n            par_test = int(round(2 * cur_idx + 2 * space + cur_width + next_width))\n        else:\n            par_test = int(round(2 * cur_idx - 2 * space - cur_width - next_width))\n\n        return par_test // 2 if par_test % 2 == 0 else par_test / 2\n\n    def place_wires(self,  # type: TrackManager\n                    layer_id,  # type: int\n                    type_list,  # type: Sequence[Union[str, int]]\n                    start_idx=0,  # type: Union[float, int]\n                    **kwargs):\n        # type: (...) -> Tuple[Union[float, int], List[Union[float, int]]]\n        \"\"\"Place the given wires next to each other.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer of the tracks.\n        type_list : Sequence[Union[str, int]]\n            list of wire types.\n        start_idx : Union[float, int]\n            the starting track index.\n        **kwargs:\n            optional parameters for get_num_space_tracks() method of RoutingGrid.\n\n        Returns\n        -------\n        num_tracks : Union[float, int]\n            number of tracks used.\n        locations : List[Union[float, int]]\n            the center track index of each wire.\n        \"\"\"\n        if not type_list:\n            return 0, []\n\n        prev_type = type_list[0]\n        w0 = self.get_width(layer_id, prev_type)\n        par_test = int(round(2 * start_idx + w0 - 1))\n        mid_idx = par_test // 2 if par_test % 2 == 0 else par_test / 2\n        ans = [mid_idx]\n        for idx in range(1, len(type_list)):\n            ans.append(self.get_next_track(layer_id, ans[-1], type_list[idx - 1],\n                                           type_list[idx], up=True, **kwargs))\n\n        w1 = self.get_width(layer_id, type_list[-1])\n        par_test = int(round(w0 + w1 + 2 * (ans[-1] - ans[0])))\n        ntr = par_test // 2 if par_test % 2 == 0 else par_test / 2\n\n        return ntr, ans\n\n    @classmethod\n    def _get_align_delta(cls, tot_ntr, num_used, alignment):\n        if alignment == -1 or num_used == tot_ntr:\n            # we already aligned to left\n            return 0\n        elif alignment == 0:\n            # center tracks\n            delta_htr = int((tot_ntr - num_used) * 2) // 2\n            return delta_htr / 2 if delta_htr % 2 == 1 else delta_htr // 2\n        elif alignment == 1:\n            # align to right\n            return tot_ntr - num_used\n        else:\n            raise ValueError('Unknown alignment code: %d' % alignment)\n\n    def align_wires(self,  # type: TrackManager\n                    layer_id,  # type: int\n                    type_list,  # type: Sequence[Union[str, int]]\n                    tot_ntr,  # type: Union[float, int]\n                    alignment=0,  # type: int\n                    start_idx=0,  # type: Union[float, int]\n                    **kwargs):\n        # type: (...) -> List[Union[float, int]]\n        \"\"\"Place the given wires in the given space with the specified alignment.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer of the tracks.\n        type_list : Sequence[Union[str, int]]\n            list of wire types.\n        tot_ntr : Union[float, int]\n            total available space in number of tracks.\n        alignment : int\n            If alignment == -1, will \"left adjust\" the wires (left is the lower index direction).\n            If alignment == 0, will center the wires in the middle.\n            If alignment == 1, will \"right adjust\" the wires.\n        start_idx : Union[float, int]\n            the starting track index.\n        **kwargs:\n            optional parameters for place_wires().\n\n        Returns\n        -------\n        locations : List[Union[float, int]]\n            the center track index of each wire.\n        \"\"\"\n        num_used, idx_list = self.place_wires(layer_id, type_list, start_idx=start_idx, **kwargs)\n        if num_used > tot_ntr:\n            raise ValueError('Given tracks occupy more space than given.')\n\n        delta = self._get_align_delta(tot_ntr, num_used, alignment)\n        return [idx + delta for idx in idx_list]\n\n    def spread_wires(self,  # type: TrackManager\n                     layer_id,  # type: int\n                     type_list,  # type: Sequence[Union[str, int]]\n                     tot_ntr,  # type: Union[float, int]\n                     sp_type,  # type: Union[str, int, Tuple[Union[str, int], Union[str, int]]]\n                     alignment=0,  # type: int\n                     start_idx=0,  # type: Union[float, int]\n                     max_sp=10000,  # type: int\n                     sp_override=None,\n                     ):\n        # type: (...) -> List[Union[float, int]]\n        \"\"\"Spread out the given wires in the given space.\n\n        This method tries to spread out wires by increasing the space around the given\n        wire/combination of wires.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer of the tracks.\n        type_list : Sequence[Union[str, int]]\n            list of wire types.\n        tot_ntr : Union[float, int]\n            total available space in number of tracks.\n        sp_type : Union[str, Tuple[str, str]]\n            The space to increase.\n        alignment : int\n            If alignment == -1, will \"left adjust\" the wires (left is the lower index direction).\n            If alignment == 0, will center the wires in the middle.\n            If alignment == 1, will \"right adjust\" the wires.\n        start_idx : Union[float, int]\n            the starting track index.\n        max_sp : int\n            maximum space.\n        sp_override :\n            tracking spacing override dictionary.\n\n        Returns\n        -------\n        locations : List[Union[float, int]]\n            the center track index of each wire.\n        \"\"\"\n        if not sp_override:\n            sp_override = {sp_type: {layer_id: 0}}\n        else:\n            sp_override = sp_override.copy()\n            sp_override[sp_type] = {layer_id: 0}\n        cur_sp = int(round(2 * self.get_space(layer_id, sp_type)))\n        bin_iter = BinaryIterator(cur_sp, None)\n        while bin_iter.has_next():\n            new_sp = bin_iter.get_next()\n            if new_sp > 2 * max_sp:\n                break\n            sp_override[sp_type][layer_id] = new_sp / 2 if new_sp % 2 == 1 else new_sp // 2\n            tmp = self.place_wires(layer_id, type_list, start_idx=start_idx,\n                                   sp_override=sp_override)\n            if tmp[0] > tot_ntr:\n                bin_iter.down()\n            else:\n                bin_iter.save_info(tmp)\n                bin_iter.up()\n\n        if bin_iter.get_last_save_info() is None:\n            raise ValueError('No solution found.')\n\n        num_used, idx_list = bin_iter.get_last_save_info()\n        delta = self._get_align_delta(tot_ntr, num_used, alignment)\n        return [idx + delta for idx in idx_list]\n"
  },
  {
    "path": "bag/layout/routing/fill.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines classes that provides automatic fill utility on a grid.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Optional, Union, List, Tuple, Any, Generator\n\nfrom rtree.index import Index, Property\n\nfrom ...layout.util import BBox\nfrom ...util.search import BinaryIterator, minimize_cost_golden\n\nif TYPE_CHECKING:\n    from ...layout.util import BBoxArray\n\n    from .grid import RoutingGrid\n\n\nclass RectIndex(object):\n    \"\"\"A R-tree that stores all tracks on a layer.\"\"\"\n\n    def __init__(self, resolution, basename=None, overwrite=False):\n        # type: (float, Optional[str], bool) -> None\n        self._res = resolution\n        self._cnt = 0\n        if basename is None:\n            self._index = Index(interleaved=True)\n        else:\n            p = Property(overwrite=overwrite)\n            self._index = Index(basename, interleaved=True, properties=p)\n\n    @property\n    def bound_box(self):\n        # type: () -> BBox\n        xl, yb, xr, yt = self._index.bounds\n        return BBox(int(xl), int(yb), int(xr), int(yt), self._res, unit_mode=True)\n\n    def close(self):\n        self._index.close()\n\n    def record_box(self, box, dx, dy):\n        # type: (BBox, int, int) -> None\n        \"\"\"Record the given BBox.\"\"\"\n        sp_box = box.expand(dx=dx, dy=dy, unit_mode=True)\n        bnds = sp_box.get_bounds(unit_mode=True)\n        obj = (box.left_unit, box.bottom_unit, box.right_unit, box.top_unit, dx, dy)\n        self._index.insert(self._cnt, bnds, obj=obj)\n        self._cnt += 1\n\n    def rect_iter(self):\n        # type: () -> Generator[Tuple[BBox, int, int], None, None]\n        for xl, yb, xr, yt, sdx, sdy in self._index.intersection(self._index.bounds, objects='raw'):\n            box_real = BBox(xl, yb, xr, yt, self._res, unit_mode=True)\n            yield box_real, sdx, sdy\n\n    def intersection_iter(self, box, dx=0, dy=0):\n        # type: (BBox, int, int) -> Generator[BBox, None, None]\n        \"\"\"Finds all bounding box that intersects the given box.\"\"\"\n        res = self._res\n        test_box = box.expand(dx=dx, dy=dy, unit_mode=True)\n        box_iter = self._index.intersection(test_box.get_bounds(unit_mode=True), objects='raw')\n        for xl, yb, xr, yt, sdx, sdy in box_iter:\n            box_real = BBox(xl, yb, xr, yt, res, unit_mode=True)\n            box_sp = box_real.expand(dx=sdx, dy=sdy, unit_mode=True)\n            if box_sp.overlaps(box) or test_box.overlaps(box_real):\n                yield box_real.expand(dx=max(dx, sdx), dy=max(dy, sdy), unit_mode=True)\n\n    def intersection_rect_iter(self, box):\n        # type: (BBox) -> Generator[BBox, None, None]\n        \"\"\"Finds all bounding box that intersects the given box.\"\"\"\n        res = self._res\n        box_iter = self._index.intersection(box.get_bounds(unit_mode=True), objects='raw')\n        for xl, yb, xr, yt, sdx, sdy in box_iter:\n            yield BBox(xl, yb, xr, yt, res, unit_mode=True)\n\n\nclass UsedTracks(object):\n    \"\"\"A R-tree that stores all tracks in a template.\n    \"\"\"\n\n    def __init__(self, save_file_basename=None, overwrite=False):\n        # type: (Optional[str], bool) -> None\n        self._idx_table = {}\n        self._save_file_basename = save_file_basename\n        self._overwrite = overwrite\n\n    def __iter__(self):\n        return self._idx_table.keys()\n\n    def get_track_bbox(self, layer_id):\n        # type: (int) -> BBox\n        if layer_id not in self._idx_table:\n            return BBox.get_invalid_bbox()\n        return self._idx_table[layer_id].bound_box\n\n    def track_box_iter(self):\n        # type: () -> Generator[Tuple[int, BBox], None, None]\n        for layer_id, rect_idx in self._idx_table.items():\n            yield layer_id, rect_idx.bound_box\n\n    def record_box(self, layer_id, box, dx, dy, res):\n        # type: (int, BBox, int, int, float) -> None\n        if layer_id not in self._idx_table:\n            if self._save_file_basename is None:\n                basename = None\n            else:\n                basename = self._save_file_basename + ('_%d' % layer_id)\n            index = self._idx_table[layer_id] = RectIndex(res, basename, self._overwrite)\n        else:\n            index = self._idx_table[layer_id]\n        index.record_box(box, dx, dy)\n\n    def close(self):\n        for index in self._idx_table.values():\n            index.close()\n\n    def record_rect(self, grid, layer_name, box_arr, dx=-1, dy=-1):\n        # type: (RoutingGrid, Union[Tuple[str, str], str], BBoxArray, int, int) -> Optional[int]\n        \"\"\"Record the given bounding box array.  Returns the added layer ID.\"\"\"\n        tech_info = grid.tech_info\n\n        if isinstance(layer_name, tuple):\n            # TODO: find more process-portable fix?\n            if layer_name[1] == 'exclude':\n                return None\n            layer_name = layer_name[0]\n        try:\n            layer_id = tech_info.get_layer_id(layer_name)\n        except ValueError:\n            return None\n\n        if layer_id not in grid:\n            return None\n\n        if layer_id not in self._idx_table:\n            if self._save_file_basename is None:\n                basename = None\n            else:\n                basename = self._save_file_basename + ('_%d' % layer_id)\n            index = self._idx_table[layer_id] = RectIndex(grid.resolution, basename,\n                                                          self._overwrite)\n        else:\n            index = self._idx_table[layer_id]\n\n        layer_type = tech_info.get_layer_type(layer_name)\n        if grid.get_direction(layer_id) == 'x':\n            w = box_arr.base.height_unit\n            dx0 = tech_info.get_min_line_end_space(layer_type, w, unit_mode=True)\n            dy0 = tech_info.get_min_space(layer_type, w, unit_mode=True, same_color=False)\n        else:\n            w = box_arr.base.width_unit\n            dy0 = tech_info.get_min_line_end_space(layer_type, w, unit_mode=True)\n            dx0 = tech_info.get_min_space(layer_type, w, unit_mode=True, same_color=False)\n\n        if dx < 0:\n            dx = dx0\n        if dy < 0:\n            dy = dy0\n\n        for box in box_arr:\n            index.record_box(box, dx, dy)\n\n        return layer_id\n\n    def all_rect_iter(self):\n        # type: () -> Generator[Tuple[int, BBox, int, int], None, None]\n        for layer_id, index in self._idx_table.items():\n            for box, dx, dy, in index.rect_iter():\n                yield layer_id, box, dx, dy\n\n    def intersection_rect_iter(self, layer_id, box):\n        # type: (int, BBox) -> Generator[BBox, None, None]\n        \"\"\"Finds all bounding box that intersects the given box.\"\"\"\n        if layer_id in self._idx_table:\n            yield from self._idx_table[layer_id].intersection_rect_iter(box)\n\n    def blockage_iter(self, layer_id, test_box, spx=0, spy=0):\n        # type: (int, BBox, int, int) -> Generator[BBox, None, None]\n        if layer_id in self._idx_table:\n            yield from self._idx_table[layer_id].intersection_iter(test_box, dx=spx, dy=spy)\n\n\ndef fill_symmetric_const_space(area, sp_max, n_min, n_max, offset=0):\n    # type: (int, int, int, int, int) -> List[Tuple[int, int]]\n    \"\"\"Fill the given 1-D area given maximum space spec alone.\n\n    The method draws the minimum number of fill blocks needed to satisfy maximum spacing spec.\n    The given area is filled with the following properties:\n\n    1. all spaces are as close to the given space as possible (differ by at most 1),\n       without exceeding it.\n    2. the filled area is as uniform as possible.\n    3. the filled area is symmetric about the center.\n    4. fill is drawn as much as possible given the above constraints.\n\n    fill is drawn such that space blocks abuts both area boundaries.\n\n    Parameters\n    ----------\n    area : int\n        the 1-D area to fill.\n    sp_max : int\n        the maximum space.\n    n_min : int\n        minimum fill length.\n    n_max : int\n        maximum fill length\n    offset : int\n        the fill area starting coordinate.\n\n    Returns\n    -------\n    fill_intv : List[Tuple[int, int]]\n        list of fill intervals.\n    \"\"\"\n    if n_min > n_max:\n        raise ValueError('min fill length = %d > %d = max fill length' % (n_min, n_max))\n\n    # suppose we draw N fill blocks, then the filled area is A - (N + 1) * sp.\n    # therefore, to maximize fill, with A and sp given, we need to minimize N.\n    # since N = (A - sp) / (f + sp), where f is length of the fill, this tells\n    # us we want to try filling with max block.\n    # so we calculate the maximum number of fill blocks we'll use if we use\n    # largest fill block.\n    num_fill = -(-(area - sp_max) // (n_max + sp_max))\n    if num_fill == 0:\n        # we don't need fill; total area is less than sp_max.\n        return []\n\n    # at this point, using (num_fill - 1) max blocks is not enough, but num_fill\n    # max blocks either fits perfectly or exceeds area.\n\n    # calculate the fill block length if we use num_fill fill blocks, and sp_max\n    # between blocks.\n    blk_len = (area - (num_fill + 1) * sp_max) // num_fill\n    if blk_len >= n_min:\n        # we can draw fill using num_fill fill blocks.\n        return fill_symmetric_helper(area, num_fill, sp_max, offset=offset, inc_sp=False,\n                                     invert=False, fill_on_edge=False, cyclic=False)[0]\n\n    # trying to draw num_fill fill blocks with sp_max between them results in fill blocks\n    # that are too small.  This means we need to reduce the space between fill blocks.\n    sp_max, remainder = divmod(area - num_fill * n_min, num_fill + 1)\n    # we can achieve the new sp_max using fill with length n_min or n_min + 1.\n    if n_max > n_min or remainder == 0:\n        # if everything divides evenly or we can use two different fill lengths,\n        # then we're done.\n        return fill_symmetric_helper(area, num_fill, sp_max, offset=offset, inc_sp=False,\n                                     invert=False, fill_on_edge=False, cyclic=False)[0]\n    # If we're here, then we must use only one fill length\n    # fill by inverting fill/space to try to get only one fill length\n    sol, num_diff_sp = fill_symmetric_helper(area, num_fill + 1, n_max, offset=offset, inc_sp=False,\n                                             invert=True, fill_on_edge=True, cyclic=False)\n    if num_diff_sp == 0:\n        # we manage to fill using only one fill length\n        return sol\n\n    # If we're here, that means num_fill + 1 is even.  So using num_fill + 2 will\n    # guarantee solution.\n    return fill_symmetric_helper(area, num_fill + 2, n_max, offset=offset, inc_sp=False,\n                                 invert=True, fill_on_edge=True, cyclic=False)[0]\n\n\ndef fill_symmetric_min_density_info(area, targ_area, n_min, n_max, sp_min,\n                                    sp_max=None, fill_on_edge=True, cyclic=False):\n    # type: (int, int, int, int, int, Optional[int], bool, bool) -> Tuple[Tuple[Any, ...], bool]\n    \"\"\"Fill the given 1-D area as little as possible.\n\n    Compute fill location such that the given area is filled with the following properties:\n\n    1. the area is as uniform as possible.\n    2. the area is symmetric with respect to the center\n    3. all fill blocks have lengths between n_min and n_max.\n    4. all fill blocks are at least sp_min apart.\n\n    Parameters\n    ----------\n    area : int\n        total number of space we need to fill.\n    targ_area : int\n        target minimum fill area.  If not achievable, will do the best that we can.\n    n_min : int\n        minimum length of the fill block.  Must be less than or equal to n_max.\n    n_max : int\n        maximum length of the fill block.\n    sp_min : int\n        minimum space between each fill block.\n    sp_max : Optional[int]\n        if given, make sure space between blocks does not exceed this value.\n        Must be greater than sp_min\n    fill_on_edge : bool\n        If True, we put fill blocks on area boundary.  Otherwise, we put space block on\n        area boundary.\n    cyclic : bool\n        If True, we assume we're filling in a cyclic area (it wraps around).\n\n    Returns\n    -------\n    info : Tuple[Any, ...]\n        the fill information tuple.\n    invert : bool\n        True if space/fill is inverted.\n    \"\"\"\n    # first, fill as much as possible\n    max_result = fill_symmetric_max_density_info(area, targ_area, n_min, n_max, sp_min,\n                                                 sp_max=sp_max, fill_on_edge=fill_on_edge,\n                                                 cyclic=cyclic)\n\n    fill_area, nfill_opt = max_result[0][:2]\n    if fill_area <= targ_area:\n        # we cannot/barely meet area spec; return max result\n        return max_result\n\n    # now, reduce fill by doing binary search on n_max\n    n_max_iter = BinaryIterator(n_min, n_max)\n    while n_max_iter.has_next():\n        n_max_cur = n_max_iter.get_next()\n        try:\n            info, invert = fill_symmetric_max_num_info(area, nfill_opt, n_min, n_max_cur, sp_min,\n                                                       fill_on_edge=fill_on_edge, cyclic=cyclic)\n            fill_area_cur = area - info[0] if invert else info[0]\n            if invert:\n                _, sp_cur = _get_min_max_blk_len(info)\n            else:\n                sp_cur = sp_min if info[1][2] == 0 else sp_min + 1\n            if fill_area_cur >= targ_area and (sp_max is None or sp_cur <= sp_max):\n                # both specs passed\n                n_max_iter.save_info((info, invert))\n                n_max_iter.down()\n            else:\n                # reduce n_max too much\n                n_max_iter.up()\n\n        except ValueError:\n            # get here if n_min == n_max and there's no solution.\n            n_max_iter.up()\n\n    last_save = n_max_iter.get_last_save_info()\n    if last_save is None:\n        # no solution, return max result\n        return max_result\n\n    # return new minimum solution\n    info, invert = last_save\n    fill_area = area - info[0] if invert else info[0]\n    return (fill_area, nfill_opt, info[1]), invert\n\n\ndef fill_symmetric_max_density_info(area, targ_area, n_min, n_max, sp_min,\n                                    sp_max=None, fill_on_edge=True, cyclic=False):\n    # type: (int, int, int, int, int, Optional[int], bool, bool) -> Tuple[Tuple[Any, ...], bool]\n    \"\"\"Fill the given 1-D area as much as possible.\n\n    Compute fill location such that the given area is filled with the following properties:\n\n    1. the area is as uniform as possible.\n    2. the area is symmetric with respect to the center\n    3. all fill blocks have lengths between n_min and n_max.\n    4. all fill blocks are at least sp_min apart.\n\n    Parameters\n    ----------\n    area : int\n        total number of space we need to fill.\n    targ_area : int\n        target minimum fill area.  If not achievable, will do the best that we can.\n    n_min : int\n        minimum length of the fill block.  Must be less than or equal to n_max.\n    n_max : int\n        maximum length of the fill block.\n    sp_min : int\n        minimum space between each fill block.\n    sp_max : Optional[int]\n        if given, make sure space between blocks does not exceed this value.\n        Must be greater than sp_min\n    fill_on_edge : bool\n        If True, we put fill blocks on area boundary.  Otherwise, we put space block on\n        area boundary.\n    cyclic : bool\n        If True, we assume we're filling in a cyclic area (it wraps around).\n\n    Returns\n    -------\n    info : Tuple[Any, ...]\n        the fill information tuple.\n    invert : bool\n        True if space/fill is inverted.\n    \"\"\"\n\n    # min area test\n    nfill_min = 1\n    try:\n        try:\n            fill_symmetric_max_num_info(area, nfill_min, n_min, n_max, sp_min,\n                                        fill_on_edge=fill_on_edge, cyclic=cyclic)\n        except (NoFillAbutEdgeError, NoFillChoiceError):\n            # we need at least 2 fiils\n            nfill_min = 2\n            fill_symmetric_max_num_info(area, nfill_min, n_min, n_max, sp_min,\n                                        fill_on_edge=fill_on_edge, cyclic=cyclic)\n    except InsufficientAreaError:\n        # cannot fill at all\n        info, invert = fill_symmetric_max_num_info(area, 0, n_min, n_max, sp_min,\n                                                   fill_on_edge=fill_on_edge, cyclic=cyclic)\n        return (0, 0, info[1]), invert\n\n    # fill area first monotonically increases with number of fill blocks, then monotonically\n    # decreases (as we start adding more space than fill).  Therefore, a golden section search\n    # can be done on the number of fill blocks to determine the optimum.\n    def golden_fun(nfill):\n        try:\n            info2, invert2 = fill_symmetric_max_num_info(area, nfill, n_min, n_max, sp_min,\n                                                         fill_on_edge=fill_on_edge, cyclic=cyclic)\n        except ValueError:\n            return 0\n        if invert2:\n            return area - info2[0]\n        else:\n            return info2[0]\n\n    if sp_max is not None:\n        if sp_max <= sp_min:\n            raise ValueError('Cannot have sp_max = %d <= %d = sp_min' % (sp_max, sp_min))\n\n        # find minimum nfill that meets sp_max spec\n\n        def golden_fun2(nfill):\n            try:\n                info2, invert2 = fill_symmetric_max_num_info(area, nfill, n_min, n_max, sp_min,\n                                                             fill_on_edge=fill_on_edge,\n                                                             cyclic=cyclic)\n                if invert2:\n                    _, sp_cur = _get_min_max_blk_len(info2)\n                else:\n                    sp_cur = sp_min if info2[1][2] == 0 else sp_min + 1\n                return -sp_cur\n            except ValueError:\n                return -sp_max - 1\n\n        min_result = minimize_cost_golden(golden_fun2, -sp_max, offset=nfill_min, maxiter=None)\n        if min_result.x is None:\n            # try even steps\n            min_result = minimize_cost_golden(golden_fun2, -sp_max, offset=nfill_min,\n                                              step=2, maxiter=None)\n            nfill_min = min_result.x\n            if nfill_min is None:\n                # should never get here...\n                raise ValueError('No solution for sp_max = %d' % sp_max)\n        else:\n            nfill_min = min_result.x\n\n    min_result = minimize_cost_golden(golden_fun, targ_area, offset=nfill_min, maxiter=None)\n    nfill_opt = min_result.x\n    if nfill_opt is None:\n        nfill_opt = min_result.xmax\n    info, invert = fill_symmetric_max_num_info(area, nfill_opt, n_min, n_max, sp_min,\n                                               fill_on_edge=fill_on_edge, cyclic=cyclic)\n    fill_area = area - info[0] if invert else info[0]\n    return (fill_area, nfill_opt, info[1]), invert\n\n\ndef fill_symmetric_max_density(area,  # type: int\n                               targ_area,  # type: int\n                               n_min,  # type: int\n                               n_max,  # type: int\n                               sp_min,  # type: int\n                               offset=0,  # type: int\n                               sp_max=None,  # type: Optional[int]\n                               fill_on_edge=True,  # type: bool\n                               cyclic=False,  # type: bool\n                               ):\n    # type: (...) -> Tuple[List[Tuple[int, int]], int]\n    \"\"\"Fill the given 1-D area as much as possible.\n\n    Compute fill location such that the given area is filled with the following properties:\n\n    1. the area is as uniform as possible.\n    2. the area is symmetric with respect to the center\n    3. all fill blocks have lengths between n_min and n_max.\n    4. all fill blocks are at least sp_min apart.\n\n    Parameters\n    ----------\n    area : int\n        total number of space we need to fill.\n    targ_area : int\n        target minimum fill area.  If not achievable, will do the best that we can.\n    n_min : int\n        minimum length of the fill block.  Must be less than or equal to n_max.\n    n_max : int\n        maximum length of the fill block.\n    sp_min : int\n        minimum space between each fill block.\n    offset : int\n        the starting coordinate of the total interval.\n    sp_max : Optional[int]\n        if given, make sure space between blocks does not exceed this value.\n        Must be greater than sp_min\n    fill_on_edge : bool\n        If True, we put fill blocks on area boundary.  Otherwise, we put space block on\n        area boundary.\n    cyclic : bool\n        If True, we assume we're filling in a cyclic area (it wraps around).\n\n    Returns\n    -------\n    fill_interval : List[Tuple[int, int]]\n        a list of [start, stop) intervals that needs to be filled.\n    fill_area : int\n        total filled area.  May or may not meet minimum density requirement.\n    \"\"\"\n    max_result = fill_symmetric_max_density_info(area, targ_area, n_min, n_max, sp_min,\n                                                 sp_max=sp_max, fill_on_edge=fill_on_edge,\n                                                 cyclic=cyclic)\n    (fill_area, _, args), invert = max_result\n    return fill_symmetric_interval(*args, offset=offset, invert=invert)[0], fill_area\n\n\nclass InsufficientAreaError(ValueError):\n    pass\n\n\nclass FillTooSmallError(ValueError):\n    pass\n\n\nclass NoFillAbutEdgeError(ValueError):\n    pass\n\n\nclass NoFillChoiceError(ValueError):\n    pass\n\n\nclass EmptyRegionError(ValueError):\n    pass\n\n\ndef fill_symmetric_max_num_info(tot_area, nfill, n_min, n_max, sp_min,\n                                fill_on_edge=True, cyclic=False):\n    # type: (int, int, int, int, int, bool, bool) -> Tuple[Tuple[Any, ...], bool]\n    \"\"\"Fill the given 1-D area as much as possible with given number of fill blocks.\n\n    Compute fill location such that the given area is filled with the following properties:\n\n    1. the area is as uniform as possible.\n    2. the area is symmetric with respect to the center\n    3. the area is filled as much as possible with exactly nfill blocks,\n       with lengths between n_min and n_max.\n    4. all fill blocks are at least sp_min apart.\n\n    Parameters\n    ----------\n    tot_area : int\n        total number of space we need to fill.\n    nfill : int\n        number of fill blocks to draw.\n    n_min : int\n        minimum length of the fill block.  Must be less than or equal to n_max.\n    n_max : int\n        maximum length of the fill block.\n    sp_min : int\n        minimum space between each fill block.\n    fill_on_edge : bool\n        If True, we put fill blocks on area boundary.  Otherwise, we put space block on\n        area boundary.\n    cyclic : bool\n        If True, we assume we're filling in a cyclic area (it wraps around).\n\n    Returns\n    -------\n    info : Tuple[Any, ...]\n        the fill information tuple.\n    invert : bool\n        True if space/fill is inverted.\n    \"\"\"\n    # error checking\n    if nfill < 0:\n        raise ValueError('nfill = %d < 0' % nfill)\n    if n_min > n_max:\n        raise ValueError('n_min = %d > %d = n_max' % (n_min, n_max))\n    if n_min <= 0:\n        raise ValueError('n_min = %d <= 0' % n_min)\n\n    if nfill == 0:\n        # no fill at all\n        return _fill_symmetric_info(tot_area, 0, tot_area, inc_sp=False,\n                                    fill_on_edge=False, cyclic=False), False\n\n    # check no solution\n    sp_delta = 0 if cyclic else (-1 if fill_on_edge else 1)\n    nsp = nfill + sp_delta\n    if n_min * nfill + nsp * sp_min > tot_area:\n        raise InsufficientAreaError('Cannot draw %d fill blocks with n_min = %d' % (nfill, n_min))\n\n    # first, try drawing nfill blocks without block length constraint.\n    # may throw exception if no solution\n    info = _fill_symmetric_info(tot_area, nfill, sp_min, inc_sp=True,\n                                fill_on_edge=fill_on_edge, cyclic=cyclic)\n    bmin, bmax = _get_min_max_blk_len(info)\n    if bmin < n_min:\n        # could get here if cyclic = True, fill_on_edge = True, n_min is odd\n        # in this case actually no solution\n        raise FillTooSmallError('Cannot draw %d fill blocks with n_min = %d' % (nfill, n_min))\n    if bmax <= n_max:\n        # we satisfy block length constraint, just return\n        return info, False\n\n    # we broke maximum block length constraint, so we flip\n    # space and fill to have better control on fill length\n    if nsp == 0 and n_max != tot_area and n_max - 1 != tot_area:\n        # we get here only if nfill = 1 and fill_on_edge is True.\n        # In this case there's no way to draw only one fill and abut both edges\n        raise NoFillAbutEdgeError('Cannot draw only one fill abutting both edges.')\n    info = _fill_symmetric_info(tot_area, nsp, n_max, inc_sp=False,\n                                fill_on_edge=not fill_on_edge, cyclic=cyclic)\n    num_diff_sp = info[1][2]\n    if num_diff_sp > 0 and n_min == n_max:\n        # no solution with same fill length, but we must have same fill length everywhere.\n        raise NoFillChoiceError('Cannot draw %d fill blocks with '\n                                'n_min = n_max = %d' % (nfill, n_min))\n    return info, True\n\n\ndef _fill_symmetric_info(tot_area, num_blk_tot, sp, inc_sp=True, fill_on_edge=True, cyclic=False):\n    # type: (int, int, int, bool, bool, bool) -> Tuple[int, Tuple[Any, ...]]\n    \"\"\"Calculate symmetric fill information.\n\n    This method computes fill information without generating fill interval list.  This makes\n    it fast to explore various fill settings.  See fill_symmetric_helper() to see a description\n    of the fill algorithm.\n\n    Parameters\n    ----------\n    tot_area : int\n        the fill area length.\n    num_blk_tot : int\n        total number of fill blocks to use.\n    sp : int\n        space between blocks.  We will try our best to keep this spacing constant.\n    inc_sp : bool\n        If True, then we use sp + 1 if necessary.  Otherwise, we use sp - 1\n        if necessary.\n    fill_on_edge : bool\n        If True, we put fill blocks on area boundary.  Otherwise, we put space block on\n        area boundary.\n    cyclic : bool\n        If True, we assume we're filling in a cyclic area (it wraps around).\n\n    Returns\n    -------\n    fill_area : int\n        total filled area.\n    args : Tuple[Any, ...]\n        input arguments to _fill_symmetric_interval()\n    \"\"\"\n    # error checking\n    if num_blk_tot < 0:\n        raise ValueError('num_blk_tot = %d < 0' % num_blk_tot)\n\n    adj_sp_sgn = 1 if inc_sp else -1\n    if num_blk_tot == 0:\n        # special case, no fill at all\n        if sp == tot_area:\n            return 0, (tot_area, tot_area, 0, tot_area, 0, 0, 0, 0, -1, tot_area, False, False)\n        elif sp == tot_area - adj_sp_sgn:\n            return 0, (tot_area, tot_area, 1, tot_area, 0, 0, 0, 0, -1, tot_area, False, False)\n        else:\n            raise EmptyRegionError('Cannot have empty region = %d with sp = %d' % (tot_area, sp))\n\n    # determine the number of space blocks\n    if cyclic:\n        num_sp_tot = num_blk_tot\n    else:\n        if fill_on_edge:\n            num_sp_tot = num_blk_tot - 1\n        else:\n            num_sp_tot = num_blk_tot + 1\n\n    # compute total fill area\n    fill_area = tot_area - num_sp_tot * sp\n\n    # find minimum fill length\n    blk_len, num_blk1 = divmod(fill_area, num_blk_tot)\n    # find number of fill intervals\n    if cyclic and fill_on_edge:\n        # if cyclic and fill on edge, number of intervals = number of blocks + 1,\n        # because the interval on the edge double counts.\n        num_blk_interval = num_blk_tot + 1\n    else:\n        num_blk_interval = num_blk_tot\n\n    # find space length on edge, if applicable\n    num_diff_sp = 0\n    sp_edge = sp\n    if cyclic and not fill_on_edge and sp_edge % 2 == 1:\n        # edge space must be even.  To fix, we convert space to fill\n        num_diff_sp += 1\n        sp_edge += adj_sp_sgn\n        num_blk1 += -adj_sp_sgn\n        fill_area += -adj_sp_sgn\n        if num_blk1 == num_blk_tot:\n            blk_len += 1\n            num_blk1 = 0\n        elif num_blk1 < 0:\n            blk_len -= 1\n            num_blk1 += num_blk_tot\n\n    mid_blk_len = mid_sp_len = -1\n    # now we have num_blk_tot blocks with length blk0.  We have num_blk1 fill units\n    # remaining that we need to distribute to the fill blocks\n    if num_blk_interval % 2 == 0:\n        # we have even number of fill intervals, so we have a space block in the middle\n        mid_sp_len = sp\n        # test condition for cyclic and fill_on_edge is different than other cases\n        test_val = num_blk1 + blk_len if cyclic and fill_on_edge else num_blk1\n        if test_val % 2 == 1:\n            # we cannot distribute remaining fill units evenly, have to convert to space\n            num_diff_sp += 1\n            mid_sp_len += adj_sp_sgn\n            num_blk1 += -adj_sp_sgn\n            fill_area += -adj_sp_sgn\n            if num_blk1 == num_blk_tot:\n                blk_len += 1\n                num_blk1 = 0\n            elif num_blk1 < 0:\n                blk_len -= 1\n                num_blk1 += num_blk_tot\n        if num_blk1 % 2 == 1:\n            # the only way we get here is if cyclic and fill_on_edge is True.\n            # in this case, we need to add one to fill unit to account\n            # for edge fill double counting.\n            num_blk1 += 1\n\n        # get number of half fill intervals\n        m = num_blk_interval // 2\n    else:\n        # we have odd number of fill intervals, so we have a fill block in the middle\n        mid_blk_len = blk_len\n        if cyclic and fill_on_edge:\n            # special handling for this case, because edge fill block must be even\n            if blk_len % 2 == 0 and num_blk1 % 2 == 1:\n                # assign one fill unit to middle block\n                mid_blk_len += 1\n                num_blk1 -= 1\n            elif blk_len % 2 == 1:\n                # edge fill block is odd; we need odd number of fill units so we can\n                # correct this.\n                if num_blk1 % 2 == 0:\n                    # we increment middle fill block to get odd number of fill units\n                    mid_blk_len += 1\n                    num_blk1 -= 1\n                    if num_blk1 < 0:\n                        # we get here only if num_blk1 == 0.  This means middle blk\n                        # borrow one unit from edge block.  So we set num_blk1 to\n                        # num_blk_tot - 2 to make sure rest of the blocks are one\n                        # larger than edge block.\n                        blk_len -= 1\n                        num_blk1 = num_blk_tot - 2\n                    else:\n                        # Add one to account for edge fill double counting.\n                        num_blk1 += 1\n                else:\n                    # Add one to account for edge fill double counting.\n                    num_blk1 += 1\n        elif num_blk1 % 2 == 1:\n            # assign one fill unit to middle block\n            mid_blk_len += 1\n            num_blk1 -= 1\n\n        m = (num_blk_interval - 1) // 2\n\n    if blk_len <= 0:\n        raise InsufficientAreaError('Insufficent area; cannot draw fill with length <= 0.')\n\n    # now we need to distribute the fill units evenly.  We do so using cumulative modding\n    num_large = num_blk1 // 2\n    num_small = m - num_large\n    if cyclic and fill_on_edge:\n        # if cyclic and fill is on the edge, we need to make sure left-most block is even length\n        if blk_len % 2 == 0:\n            blk1, blk0 = blk_len, blk_len + 1\n            k = num_small\n        else:\n            blk0, blk1 = blk_len, blk_len + 1\n            k = num_large\n    else:\n        # make left-most fill interval be the most frequenct fill length\n        if num_large >= num_small:\n            blk0, blk1 = blk_len, blk_len + 1\n            k = num_large\n        else:\n            blk1, blk0 = blk_len, blk_len + 1\n            k = num_small\n\n    return fill_area, (tot_area, sp, num_diff_sp, sp_edge, blk0, blk1, k, m,\n                       mid_blk_len, mid_sp_len, fill_on_edge, cyclic)\n\n\ndef _get_min_max_blk_len(fill_info):\n    \"\"\"Helper method to get minimum/maximum fill lengths used.\"\"\"\n    blk0, blk1, blkm = fill_info[1][4], fill_info[1][5], fill_info[1][8]\n    if blkm < 0:\n        blkm = blk0\n    return min(blk0, blk1, blkm), max(blk0, blk1, blkm)\n\n\ndef fill_symmetric_interval(tot_area, sp, num_diff_sp, sp_edge, blk0, blk1, k, m, mid_blk_len,\n                            mid_sp_len, fill_on_edge, cyclic, offset=0, invert=False):\n    \"\"\"Helper function, construct interval list from output of _fill_symmetric_info().\n\n    num_diff_sp = number of space blocks that has length different than sp\n    sp_edge = if cyclic and not fill on edge, the edge space length.\n    m = number of half fill blocks.\n    blk1 = length of left-most fill block.\n    blk0 = the second possible fill block length.\n    k = number of half fill blocks with length = blk1.\n    mid_blk_len = if > 0, length of middle fill block.  This is either blk0 or blk1.\n    \"\"\"\n    ans = []\n    if cyclic:\n        if fill_on_edge:\n            marker = offset - blk1 // 2\n        else:\n            marker = offset - sp_edge // 2\n    else:\n        marker = offset\n    cur_sum = 0\n    prev_sum = 1\n    for fill_idx in range(m):\n        # determine current fill length from cumulative modding result\n        if cur_sum <= prev_sum:\n            cur_len = blk1\n        else:\n            cur_len = blk0\n\n        cur_sp = sp_edge if fill_idx == 0 else sp\n        # record fill/space interval\n        if invert:\n            if fill_on_edge:\n                ans.append((marker + cur_len, marker + cur_sp + cur_len))\n            else:\n                ans.append((marker, marker + cur_sp))\n        else:\n            if fill_on_edge:\n                ans.append((marker, marker + cur_len))\n            else:\n                ans.append((marker + cur_sp, marker + cur_sp + cur_len))\n\n        marker += cur_len + cur_sp\n        prev_sum = cur_sum\n        cur_sum = (cur_sum + k) % m\n\n    # add middle fill or space\n    if mid_blk_len >= 0:\n        # fill in middle\n        if invert:\n            if not fill_on_edge:\n                # we have one more space block before reaching middle block\n                cur_sp = sp_edge if m == 0 else sp\n                ans.append((marker, marker + cur_sp))\n            half_len = len(ans)\n        else:\n            # we don't want to replicate middle fill, so get half length now\n            half_len = len(ans)\n            if fill_on_edge:\n                ans.append((marker, marker + mid_blk_len))\n            else:\n                cur_sp = sp_edge if m == 0 else sp\n                ans.append((marker + cur_sp, marker + cur_sp + mid_blk_len))\n    else:\n        # space in middle\n        if invert:\n            if fill_on_edge:\n                # the last space we added is wrong, we need to remove\n                del ans[-1]\n                marker -= sp\n            # we don't want to replicate middle space, so get half length now\n            half_len = len(ans)\n            ans.append((marker, marker + mid_sp_len))\n        else:\n            # don't need to do anything if we're recording blocks\n            half_len = len(ans)\n\n    # now add the second half of the list\n    shift = tot_area + offset * 2\n    for idx in range(half_len - 1, -1, -1):\n        start, stop = ans[idx]\n        ans.append((shift - stop, shift - start))\n\n    return ans, num_diff_sp\n\n\ndef fill_symmetric_helper(tot_area, num_blk_tot, sp, offset=0, inc_sp=True, invert=False,\n                          fill_on_edge=True, cyclic=False):\n    # type: (int, int, int, int, bool, bool, bool, bool) -> Tuple[List[Tuple[int, int]], int]\n    \"\"\"Helper method for all fill symmetric methods.\n\n    This method fills an area with given number of fill blocks such that the space between\n    blocks is equal to the given space.  Other fill_symmetric methods basically transpose\n    the constraints into this problem, with the proper options.\n\n    The solution has the following properties:\n\n    1. it is symmetric about the center.\n    2. it is as uniform as possible.\n    3. it uses at most 3 consecutive values of fill lengths.\n    4. it uses at most 2 consecutive values of space lengths.  If inc_sp is True,\n       we use sp and sp + 1.  If inc_sp is False, we use sp - 1 and sp.  In addition,\n       at most two space blocks have length different than sp.\n\n    Here are all the scenarios that affect the number of different fill/space lengths:\n\n    1. All spaces will be equal to sp under the following condition:\n       i. cyclic is False, and num_blk_tot is odd.\n       ii. cyclic is True, fill_on_edge is True, and num_blk_tot is even.\n       iii. cyclic is True, fill_on_edge is False, sp is even, and num_blk_tot is odd.\n\n       In particular, this means if you must have the same space between fill blocks, you\n       can change num_blk_tot by 1.\n    2. The only case where at most 2 space blocks have length different than sp is\n       when cyclic is True, fill_on_edge is False, sp is odd, and num_blk_tot is even.\n    3. In all other cases, at most 1 space block have legnth different than sp.\n    4, The only case where at most 3 fill lengths are used is when cyclic is True,\n       fill_on_edge is True, and num_blk_tot is even,\n\n    Parameters\n    ----------\n    tot_area : int\n        the fill area length.\n    num_blk_tot : int\n        total number of fill blocks to use.\n    sp : int\n        space between blocks.  We will try our best to keep this spacing constant.\n    offset : int\n        the starting coordinate of the area interval.\n    inc_sp : bool\n        If True, then we use sp + 1 if necessary.  Otherwise, we use sp - 1\n        if necessary.\n    invert : bool\n        If True, we return space intervals instead of fill intervals.\n    fill_on_edge : bool\n        If True, we put fill blocks on area boundary.  Otherwise, we put space block on\n        area boundary.\n    cyclic : bool\n        If True, we assume we're filling in a cyclic area (it wraps around).\n\n    Returns\n    -------\n    ans : List[(int, int)]\n        list of fill or space intervals.\n    num_diff_sp : int\n        number of space intervals with length different than sp.  This is an integer\n        between 0 and 2.\n    \"\"\"\n    fill_info = _fill_symmetric_info(tot_area, num_blk_tot, sp, inc_sp=inc_sp,\n                                     fill_on_edge=fill_on_edge, cyclic=cyclic)\n\n    _, args = fill_info\n    return fill_symmetric_interval(*args, offset=offset, invert=invert)\n"
  },
  {
    "path": "bag/layout/routing/grid.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines the RoutingGrid class.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Sequence, Union, Tuple, List, Optional, Dict, Any\n\nimport numpy as np\n\nfrom ..util import BBox\nfrom ...util.search import BinaryIterator\nfrom ...math import lcm\n\nif TYPE_CHECKING:\n    from ...layout.core import TechInfo\n\n\nclass RoutingGrid(object):\n    \"\"\"A class that represents the routing grid.\n\n    This class provides various methods to convert between Cartesian coordinates and\n    routing tracks.  This class assumes the lower-left coordinate is (0, 0)\n\n    the track numbers are at half-track pitch.  That is, even track numbers corresponds\n    to physical tracks, and odd track numbers corresponds to middle between two tracks.\n    This convention is chosen so it is easy to locate a via for 2-track wide wires, for\n    example.\n\n    Assumptions:\n\n    1. the pitch of all layers evenly divides the largest pitch.\n\n    Parameters\n    ----------\n    tech_info : bag.layout.core.TechInfo\n        the TechInfo instance used to create metals and vias.\n    layers : list[int]\n        list of available routing layers.  Must be in increasing order.\n    spaces : list[float]\n        list of track spacings for each layer.\n    widths : list[float]\n        list of minimum track widths for each layer.\n    bot_dir : str\n        the direction of the bottom-most layer.  Either 'x' for horizontal tracks or 'y' for\n        vertical tracks.\n    max_num_tr : int or list[int]\n        maximum track width in number of tracks.  Can be given as an integer (which applies to\n        all layers), our a list to specify maximum width per layer.\n    \"\"\"\n\n    def __init__(self,  # type: RoutingGrid\n                 tech_info,  # type: TechInfo\n                 layers,  # type: Sequence[int]\n                 spaces,  # type: Sequence[float]\n                 widths,  # type: Sequence[float]\n                 bot_dir,  # type: str\n                 max_num_tr=1000,  # type: Union[int, Sequence[int]]\n                 width_override=None,  # type: Dict[int, Dict[int, float]]\n                 ):\n        # type: (...) -> None\n        # error checking\n        num_layer = len(layers)\n        if len(spaces) != num_layer:\n            raise ValueError('spaces length = %d != %d' % (len(spaces), num_layer))\n        if len(widths) != num_layer:\n            raise ValueError('spaces length = %d != %d' % (len(widths), num_layer))\n        if isinstance(max_num_tr, int):\n            max_num_tr = [max_num_tr] * num_layer\n        elif len(max_num_tr) != num_layer:\n            raise ValueError('max_num_tr length = %d != %d' % (len(max_num_tr), num_layer))\n\n        self._tech_info = tech_info\n        self._resolution = tech_info.resolution\n        self._layout_unit = tech_info.layout_unit\n        self._flip_parity = {}\n        self._ignore_layers = set()\n        self.layers = []\n        self.sp_tracks = {}\n        self.w_tracks = {}\n        self.offset_tracks = {}\n        self.dir_tracks = {}\n        self.max_num_tr_tracks = {}\n        self.block_pitch = {}\n        self.w_override = {}\n        self.private_layers = []\n\n        cur_dir = bot_dir\n        for lay, sp, w, max_num in zip(layers, spaces, widths, max_num_tr):\n            self.add_new_layer(lay, sp, w, cur_dir, max_num_tr=max_num, is_private=False)\n            # alternate track direction\n            cur_dir = 'y' if cur_dir == 'x' else 'x'\n\n        self.update_block_pitch()\n\n        # add width overrides\n        if width_override is not None:\n            for layer_id, w_info in width_override.items():\n                for width_ntr, tr_w in w_info.items():\n                    self.add_width_override(layer_id, width_ntr, tr_w)\n\n    def __contains__(self, layer):\n        # type: (int) -> bool\n        \"\"\"Returns True if this RoutingGrid contains the given layer. \"\"\"\n        return layer in self.sp_tracks\n\n    @classmethod\n    def get_middle_track(cls, tr1, tr2, round_up=False):\n        # type: (Union[float, int], Union[float, int], bool) -> Union[float, int]\n        test = int(round((tr1 + tr2) * 2))\n        if test % 4 == 0:\n            return test // 4\n        if test % 4 == 1:\n            return (test + 1) / 4 if round_up else (test - 1) // 4\n        if test % 4 == 2:\n            return test / 4\n        return (test + 1) // 4 if round_up else (test - 1) / 4\n\n    def _get_track_offset(self, layer_id):\n        # type: (int) -> int\n        \"\"\"Returns the track offset in resolution units on the given layer.\"\"\"\n        track_pitch = self.get_track_pitch(layer_id, unit_mode=True)\n        return self.offset_tracks.get(layer_id, track_pitch // 2)\n\n    def get_flip_parity(self):\n        # type: () -> Dict[int, Tuple[int, int]]\n        \"\"\"Returns a copy of the flip parity dictionary.\"\"\"\n        return self._flip_parity.copy()\n\n    def get_bot_common_layer(self, inst_grid, inst_top_layer):\n        # type: (RoutingGrid, int) -> int\n        \"\"\"Given an instance's RoutingGrid, return the bottom common layer ID.\n\n        Parameters\n        ----------\n        inst_grid : RoutingGrid\n            the instance's RoutingGrid object.\n        inst_top_layer : int\n            the instance top layer ID.\n\n        Returns\n        -------\n        bot_layer : int\n            the bottom common layer ID.\n        \"\"\"\n        my_bot_layer = self.layers[0]\n        for bot_layer in range(inst_top_layer, my_bot_layer - 1, -1):\n            has_bot = (bot_layer in self.layers)\n            inst_has_bot = (bot_layer in inst_grid.layers)\n            if has_bot and inst_has_bot:\n                w_par, sp_par = self.get_track_info(bot_layer, unit_mode=True)\n                w_inst, sp_inst = inst_grid.get_track_info(bot_layer, unit_mode=True)\n                if w_par != w_inst or sp_par != sp_inst or \\\n                        self.get_direction(bot_layer) != inst_grid.get_direction(bot_layer):\n                    return bot_layer + 1\n            elif has_bot != inst_has_bot:\n                return bot_layer + 1\n\n        return my_bot_layer\n\n    def get_flip_parity_at(self,  # type: RoutingGrid\n                           bot_layer,  # type: int\n                           top_layer,  # type: int\n                           loc,  # type: Tuple[Union[int, float], Union[int, float]]\n                           orient,  # type: str\n                           unit_mode=False,  # type: bool\n                           ):\n        # type: (...) -> Dict[int, Tuple[int, int]]\n        \"\"\"Compute the flip parity dictionary for an instance placed at the given location.\n\n        Parameters\n        ----------\n        bot_layer : int\n            the bottom layer ID, inclusive.\n        top_layer : int\n            the top layer ID, inclusive.\n        loc : Tuple[Union[int, float], Union[int, float]]\n            the instance origin location.\n        orient : str\n            the instance orientation.\n        unit_mode : bool\n            True if loc is given in resolution units.\n\n        Returns\n        -------\n        flip_parity : Dict[int, Tuple[int, int]]\n            the flip_parity dictionary.\n        \"\"\"\n        if unit_mode:\n            xo, yo = loc\n        else:\n            res = self._resolution\n            xo, yo = int(round(loc[0] / res)), int(round(loc[1] / res))\n\n        if orient == 'R0':\n            xscale, yscale = 1, 1\n        elif orient == 'MX':\n            xscale, yscale = -1, 1\n        elif orient == 'MY':\n            xscale, yscale = 1, -1\n        elif orient == 'R180':\n            xscale, yscale = -1, -1\n        else:\n            raise ValueError('Unknown orientation: %s' % orient)\n\n        flip_par = {}\n        for lay in range(bot_layer, top_layer + 1):\n            if lay in self.layers:\n                tdir = self.dir_tracks[lay]\n\n                # find the track in top level that corresponds to the track at instance origin\n                if tdir == 'y':\n                    coord, scale = xo, yscale\n                else:\n                    coord, scale = yo, xscale\n\n                tr_idx = self.coord_to_track(lay, coord, unit_mode=True)\n                offset_htr = int(round(tr_idx * 2 + 1))\n\n                cur_scale, cur_offset = self._flip_parity.get(lay, (1, 0))\n                new_scale = cur_scale * scale\n                new_offset = (cur_scale * offset_htr + cur_offset) % 4\n                flip_par[lay] = (new_scale, new_offset)\n\n        return flip_par\n\n    def set_flip_parity(self, fp):\n        # type: (Dict[int, Tuple[int, int]]) -> None\n        \"\"\"set the flip track parity dictionary.\"\"\"\n        for lay in fp:\n            self._flip_parity[lay] = fp[lay]\n\n    @property\n    def tech_info(self):\n        # type: () -> TechInfo\n        \"\"\"The TechInfo technology object.\"\"\"\n        return self._tech_info\n\n    @property\n    def resolution(self):\n        # type: () -> float\n        \"\"\"Returns the grid resolution.\"\"\"\n        return self._resolution\n\n    @property\n    def layout_unit(self):\n        # type: () -> float\n        \"\"\"Returns the layout unit length, in meters.\"\"\"\n        return self._layout_unit\n\n    @property\n    def top_private_layer(self):\n        # type: () -> int\n        \"\"\"Returns the top private layer ID.\"\"\"\n        return -99 if not self.private_layers else self.private_layers[-1]\n\n    def update_block_pitch(self):\n        # type: () -> None\n        \"\"\"Update block pitch.\"\"\"\n        self.block_pitch.clear()\n        top_private_layer = self.top_private_layer\n\n        # update private block pitches\n        lay_list = [lay for lay in self.layers\n                    if lay <= top_private_layer and lay not in self._ignore_layers]\n        self._update_block_pitch_helper(lay_list)\n\n        # update public block pitches\n        lay_list = [lay for lay in self.layers\n                    if lay > top_private_layer and lay not in self._ignore_layers]\n        self._update_block_pitch_helper(lay_list)\n\n    def _update_block_pitch_helper(self, lay_list):\n        # type: (Sequence[int]) -> None\n        \"\"\"helper method for updating block pitch.\"\"\"\n        pitch_list = []\n        for lay in lay_list:\n            cur_bp = self.get_track_pitch(lay, unit_mode=True)\n            cur_bp2 = cur_bp // 2\n            cur_dir = self.dir_tracks[lay]\n            if pitch_list:\n                # the pitch of each layer = LCM of all layers below with same direction\n                for play, (bp, bp2) in zip(lay_list, pitch_list):\n                    if self.dir_tracks[play] == cur_dir:\n                        cur_bp = lcm([cur_bp, bp])\n                        cur_bp2 = lcm([cur_bp2, bp2])\n            result = (cur_bp, cur_bp2)\n            pitch_list.append(result)\n            self.block_pitch[lay] = result\n\n    def get_direction(self, layer_id):\n        # type: (int) -> str\n        \"\"\"Returns the track direction of the given layer.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n\n        Returns\n        -------\n        tdir : str\n            'x' for horizontal tracks, 'y' for vertical tracks.\n        \"\"\"\n        return self.dir_tracks[layer_id]\n\n    def get_track_pitch(self, layer_id, unit_mode=False):\n        # type: (int, bool) -> Union[float, int]\n        \"\"\"Returns the routing track pitch on the given layer.\n\n        Parameters\n        ----------\n        layer_id : int\n            the routing layer ID.\n        unit_mode : bool\n            True to return block pitch in resolution units.\n\n        Returns\n        -------\n        track_pitch : Union[float, int]\n            the track pitch in layout units.\n        \"\"\"\n        pitch = self.w_tracks[layer_id] + self.sp_tracks[layer_id]\n        return pitch if unit_mode else pitch * self._resolution\n\n    def get_track_width(self, layer_id, width_ntr, unit_mode=False):\n        # type: (int, int, bool) -> Union[float, int]\n        \"\"\"Calculate track width in layout units from number of tracks.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID\n        width_ntr : int\n            the track width in number of tracks.\n        unit_mode : bool\n            True to return track width in resolution units.\n\n        Returns\n        -------\n        width : Union[float, int]\n            the track width in layout units.\n        \"\"\"\n        w = self.w_tracks[layer_id]\n        sp = self.sp_tracks[layer_id]\n        w_unit = width_ntr * (w + sp) - sp\n        w_unit = self.w_override[layer_id].get(width_ntr, w_unit)\n        if unit_mode:\n            return w_unit\n        return w_unit * self._resolution\n\n    def get_track_width_inverse(self, layer_id, width, mode=-1, unit_mode=False):\n        # type: (int, Union[float, int], int, bool) -> int\n        \"\"\"Given track width in layout/resolution units, compute equivalent number of tracks.\n\n        This is the inverse function of get_track_width().\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID\n        width : Union[float, int]\n            the track width in layout or resolution units.\n        mode : int\n            If negative, the result wire will have width less than or equal to the given width.\n            If positive, the result wire will have width greater than or equal to the given width.\n        unit_mode : bool\n            True if width is specified in resolution units.\n\n        Returns\n        -------\n        width_ntr : int\n            number of tracks needed to achieve the given width.\n        \"\"\"\n        if not unit_mode:\n            width = int(round(width / self.resolution))\n\n        # use binary search to find the minimum track width\n        bin_iter = BinaryIterator(1, None)\n        while bin_iter.has_next():\n            ntr = bin_iter.get_next()\n            w_test = self.get_track_width(layer_id, ntr, unit_mode=True)\n            if w_test == width:\n                return ntr\n            elif w_test < width:\n                if mode < 0:\n                    bin_iter.save()\n                bin_iter.up()\n            else:\n                if mode > 0:\n                    bin_iter.save()\n                bin_iter.down()\n\n        ans = bin_iter.get_last_save()\n        if ans is None:\n            return 0\n        return ans\n\n    def get_num_tracks(self, size, layer_id):\n        # type: (Tuple[int, Union[int, float], Union[int, float]], int) -> Union[int, float]\n        \"\"\"Returns the number of tracks on the given layer for a block with the given size.\n\n        Parameters\n        ----------\n        size : Tuple[int, Union[int, float], Union[int, float]]\n            the block size tuple.\n        layer_id : int\n            the layer ID.\n\n        Returns\n        -------\n        num_tracks : Union[int, float]\n            number of tracks on that given layer.\n        \"\"\"\n        tr_dir = self.get_direction(layer_id)\n        blk_w, blk_h = self.get_size_dimension(size, unit_mode=True)\n        tr_half_pitch = self.get_track_pitch(layer_id, unit_mode=True) // 2\n        if tr_dir == 'x':\n            val = blk_h // tr_half_pitch\n        else:\n            val = blk_w // tr_half_pitch\n\n        if val % 2 == 0:\n            return val // 2\n        return val / 2\n\n    def get_min_length(self, layer_id, width_ntr, unit_mode=False):\n        # type: (int, int, bool) -> Union[float, int]\n        \"\"\"Returns the minimum length for the given track.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID\n        width_ntr : int\n            the track width in number of tracks.\n        unit_mode : bool\n            True to return the minimum length in resolution units.\n\n        Returns\n        -------\n        min_length : Union[float, int]\n            the minimum length.\n        \"\"\"\n        layer_name = self.tech_info.get_layer_name(layer_id)\n        if isinstance(layer_name, tuple):\n            layer_name = layer_name[0]\n        layer_type = self.tech_info.get_layer_type(layer_name)\n\n        width = self.get_track_width(layer_id, width_ntr)\n        min_length = self.tech_info.get_min_length(layer_type, width)\n\n        if unit_mode:\n            return int(round(min_length / self._resolution))\n        else:\n            return min_length\n\n    def get_space(self, layer_id, width_ntr, same_color=False, unit_mode=False):\n        # type: (int, int, bool, bool) -> Union[int, float]\n        \"\"\"Returns the space needed around a track, in layout/resolution units.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID\n        width_ntr : int\n            the track width in number of tracks.\n        same_color : bool\n            True to use same-color spacing.\n        unit_mode : bool\n            True to return resolution units.\n\n        Returns\n        -------\n        sp : Union[int, float]\n            minimum space needed around the given track in layout/resolution units.\n        \"\"\"\n        layer_name = self.tech_info.get_layer_name(layer_id)\n        if isinstance(layer_name, tuple):\n            layer_name = layer_name[0]\n        layer_type = self.tech_info.get_layer_type(layer_name)\n\n        width = self.get_track_width(layer_id, width_ntr, unit_mode=True)\n        sp_min_unit = self.tech_info.get_min_space(layer_type, width, unit_mode=True,\n                                                   same_color=same_color)\n        if unit_mode:\n            return sp_min_unit\n        return sp_min_unit * self._resolution\n\n    def get_num_space_tracks(self, layer_id, width_ntr, half_space=False, same_color=False):\n        # type: (int, int, bool, bool) -> Union[int, float]\n        \"\"\"Returns the number of tracks needed for space around a track of the given width.\n\n        In advance technologies, metal spacing is often a function of the metal width, so for a\n        a wide track we may need to reserve empty tracks next to this.  This method computes the\n        minimum number of empty tracks needed.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID\n        width_ntr : int\n            the track width in number of tracks.\n        half_space : bool\n            True to allow half-integer spacing.\n        same_color : bool\n            True to use same-color spacing.\n\n        Returns\n        -------\n        num_sp_tracks : Union[int, float]\n            minimum space needed around the given track in number of tracks.\n        \"\"\"\n        width = self.get_track_width(layer_id, width_ntr, unit_mode=True)\n        sp_min_unit = self.get_space(layer_id, width_ntr, same_color=same_color, unit_mode=True)\n        w_unit = self.w_tracks[layer_id]\n        sp_unit = self.sp_tracks[layer_id]\n        # if this width is overridden, we may have extra space\n        width_normal = w_unit * width_ntr + sp_unit * (width_ntr - 1)\n        extra_space = (width_normal - width) // 2\n        half_pitch = (w_unit + sp_unit) // 2\n        num_half_pitch = -(-(sp_min_unit - sp_unit - extra_space) // half_pitch)\n        if num_half_pitch % 2 == 0:\n            return num_half_pitch // 2\n        elif half_space:\n            return num_half_pitch / 2.0\n        else:\n            return (num_half_pitch + 1) // 2\n\n    def get_line_end_space(self, layer_id, width_ntr, unit_mode=False):\n        # type: (int, int, bool) -> Union[float, int]\n        \"\"\"Returns the minimum line end spacing for the given wire.\n\n        Parameters\n        ----------\n        layer_id : int\n            wire layer ID.\n        width_ntr : int\n            wire width, in number of tracks.\n        unit_mode : bool\n            True to return line-end space in resolution units.\n\n        Returns\n        -------\n        space : Union[float, int]\n            the line-end spacing.\n        \"\"\"\n        layer_name = self.tech_info.get_layer_name(layer_id)\n        if isinstance(layer_name, tuple):\n            layer_name = layer_name[0]\n        layer_type = self.tech_info.get_layer_type(layer_name)\n        width = self.get_track_width(layer_id, width_ntr, unit_mode=True)\n        ans = self.tech_info.get_min_line_end_space(layer_type, width, unit_mode=True)\n        if not unit_mode:\n            return ans * self._resolution\n        return ans\n\n    def get_line_end_space_tracks(self, wire_layer, space_layer, width_ntr, half_space=False):\n        # type: (int, int, int, bool) -> Union[float, int]\n        \"\"\"Returns the minimum line end spacing in number of space tracks.\n\n        Parameters\n        ----------\n        wire_layer : int\n            line-end wire layer ID.\n        space_layer : int\n            the layer used to measure line-end space.  Must be adjacent to wire_layer, and its\n            direction must be orthogonal to the wire layer.\n        width_ntr : int\n            wire width, in number of tracks.\n        half_space : bool\n            True to allow half-track spacing.\n\n        Returns\n        -------\n        space_ntr : Union[float, int]\n            number of tracks needed to reserve as space.\n        \"\"\"\n        if space_layer == wire_layer - 1:\n            _, conn_ext = self.get_via_extensions(space_layer, 1, width_ntr, unit_mode=True)\n        elif space_layer == wire_layer + 1:\n            conn_ext, _ = self.get_via_extensions(wire_layer, width_ntr, 1, unit_mode=True)\n        else:\n            raise ValueError('space_layer must be adjacent to wire_layer')\n\n        if self.get_direction(space_layer) == self.get_direction(wire_layer):\n            raise ValueError('space_layer must be orthogonal to wire_layer.')\n\n        wire_sp = self.get_line_end_space(wire_layer, width_ntr, unit_mode=True)\n        margin = 2 * conn_ext + wire_sp\n        w, sp = self.get_track_info(space_layer, unit_mode=True)\n        half_pitch = (w + sp) // 2\n        space_ntr = max(-(-(margin - sp) // half_pitch), 0)\n        if space_ntr % 2 == 0:\n            return space_ntr // 2\n        elif half_space:\n            return space_ntr / 2\n        else:\n            return (space_ntr + 1) // 2\n\n    def get_max_track_width(self, layer_id, num_tracks, tot_space, half_end_space=False):\n        # type: (int, int, int, bool) -> int\n        \"\"\"Compute maximum track width and space that satisfies DRC rule.\n\n        Given available number of tracks and numbers of tracks needed, returns\n        the maximum possible track width and spacing.\n\n        Parameters\n        ----------\n        layer_id : int\n            the track layer ID.\n        num_tracks : int\n            number of tracks to draw.\n        tot_space : int\n            avilable number of tracks.\n        half_end_space : bool\n            True if end spaces can be half of minimum spacing.  This is true if you're\n            these tracks will be repeated, or there are no adjacent tracks.\n\n        Returns\n        -------\n        tr_w : int\n            track width.\n        \"\"\"\n        bin_iter = BinaryIterator(1, None)\n        num_space = num_tracks if half_end_space else num_tracks + 1\n        while bin_iter.has_next():\n            tr_w = bin_iter.get_next()\n            tr_sp = self.get_num_space_tracks(layer_id, tr_w, half_space=False)\n            used_tracks = tr_w * num_tracks + tr_sp * num_space\n            if used_tracks > tot_space:\n                bin_iter.down()\n            else:\n                bin_iter.save()\n                bin_iter.up()\n\n        opt_w = bin_iter.get_last_save()\n        return opt_w\n\n    @staticmethod\n    def get_evenly_spaced_tracks(num_tracks, tot_space, track_width, half_end_space=False):\n        # type: (int, int, int, bool) -> List[Union[float, int]]\n        \"\"\"Evenly space given number of tracks in the available space.\n\n        Currently this method may return half-integer tracks.\n\n        Parameters\n        ----------\n        num_tracks : int\n            number of tracks to draw.\n        tot_space : int\n            avilable number of tracks.\n        track_width : int\n            track width in number of tracks.\n        half_end_space : bool\n            True if end spaces can be half of minimum spacing.  This is true if you're\n            these tracks will be repeated, or there are no adjacent tracks.\n\n        Returns\n        -------\n        idx_list : List[float]\n            list of track indices.  0 is the left-most track.\n        \"\"\"\n        if half_end_space:\n            tot_space_htr = 2 * tot_space\n            scale = 2 * tot_space_htr\n            offset = tot_space_htr + num_tracks\n            den = 2 * num_tracks\n        else:\n            tot_space_htr = 2 * tot_space\n            width_htr = 2 * track_width - 2\n            # magic math.  You can work it out\n            scale = 2 * (tot_space_htr + width_htr)\n            offset = 2 * tot_space_htr - width_htr * (num_tracks - 1) + (num_tracks + 1)\n            den = 2 * (num_tracks + 1)\n        hidx_arr = (scale * np.arange(num_tracks, dtype=int) + offset) // den\n        # convert from half indices to actual indices\n        idx_list = ((hidx_arr - 1) / 2.0).tolist()  # type: List[float]\n        return idx_list\n\n    def get_block_size(self, layer_id, unit_mode=False, include_private=False,\n                       half_blk_x=True, half_blk_y=True):\n        # type: (int, bool, bool, bool, bool) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns unit block size given the top routing layer.\n\n        Parameters\n        ----------\n        layer_id : int\n            the routing layer ID.\n        unit_mode : bool\n            True to return block dimension in resolution units.\n        include_private : bool\n            True to include private layers in block size calculation.\n        half_blk_x : bool\n            True to allow half-block widths.\n        half_blk_y : bool\n            True to allow half-block heights.\n\n        Returns\n        -------\n        block_width : Union[float, int]\n            the block width in layout units.\n        block_height : Union[float, int]\n            the block height in layout units.\n        \"\"\"\n        top_private_layer = self.top_private_layer\n        top_dir = self.dir_tracks[layer_id]\n\n        # get bottom layer that has different direction\n        bot_layer = layer_id - 1\n        while bot_layer in self.block_pitch and self.dir_tracks[bot_layer] == top_dir:\n            bot_layer -= 1\n\n        if bot_layer not in self.block_pitch:\n            bot_pitch = (2, 1)\n        else:\n            bot_pitch = self.block_pitch[bot_layer]\n\n        top_pitch = self.block_pitch[layer_id]\n\n        if layer_id > top_private_layer >= bot_layer and not include_private:\n            # if top layer not private but bottom layer is, then bottom is not quantized.\n            bot_pitch = (2, 1)\n\n        if top_dir == 'y':\n            w_pitch, h_pitch = top_pitch, bot_pitch\n        else:\n            w_pitch, h_pitch = bot_pitch, top_pitch\n\n        w_pitch = w_pitch[1] if half_blk_x else w_pitch[0]\n        h_pitch = h_pitch[1] if half_blk_y else h_pitch[0]\n        if unit_mode:\n            return w_pitch, h_pitch\n        else:\n            return w_pitch * self.resolution, h_pitch * self.resolution\n\n    def get_fill_size(self,  # type: RoutingGrid\n                      top_layer,  # type: int\n                      fill_config,  # type: Dict[int, Tuple[int, int, int, int]]\n                      unit_mode=False,  # type: bool\n                      include_private=False,  # type: bool\n                      half_blk_x=True,  # type: bool\n                      half_blk_y=True,  # type: bool\n                      ):\n        # type: (...) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns unit block size given the top routing layer and power fill configuration.\n\n        Parameters\n        ----------\n        top_layer : int\n            the top layer ID.\n        fill_config : Dict[int, Tuple[int, int, int, int]]\n            the fill configuration dictionary.\n        unit_mode : bool\n            True to return block dimension in resolution units.\n        include_private : bool\n            True to include private layers in block size calculation.\n        half_blk_x : bool\n            True to allow half-block widths.\n        half_blk_y : bool\n            True to allow half-block heights.\n\n        Returns\n        -------\n        block_width : Union[float, int]\n            the block width in layout units.\n        block_height : Union[float, int]\n            the block height in layout units.\n        \"\"\"\n        blk_w, blk_h = self.get_block_size(top_layer, unit_mode=True,\n                                           include_private=include_private,\n                                           half_blk_x=half_blk_x, half_blk_y=half_blk_y)\n\n        w_list = [blk_w]\n        h_list = [blk_h]\n        for lay, (tr_w, tr_sp, _, _) in fill_config.items():\n            if lay <= top_layer:\n                cur_pitch = self.get_track_pitch(lay, unit_mode=True)\n                cur_dim = (tr_w + tr_sp) * cur_pitch * 2\n                if self.get_direction(lay) == 'x':\n                    h_list.append(cur_dim)\n                else:\n                    w_list.append(cur_dim)\n\n        blk_w = lcm(w_list)\n        blk_h = lcm(h_list)\n        if unit_mode:\n            return blk_w, blk_h\n        return blk_w * self._resolution, blk_h * self._resolution\n\n    def size_defined(self, layer_id):\n        # type: (int) -> bool\n        \"\"\"Returns True if size is defined on the given layer.\"\"\"\n        return layer_id >= self.top_private_layer + 2\n\n    def get_size_pitch(self, layer_id, unit_mode=False):\n        # type: (int, bool) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns the horizontal/vertical pitch that defines template size.\n\n        Parameters\n        ----------\n        layer_id : int\n            the size layer.\n        unit_mode : bool\n            True to return pitches in resolution units.\n\n        Returns\n        -------\n        w_pitch : Union[float, int]\n            the width pitch.\n        h_pitch : Union[float, int]\n            the height pitch.\n        \"\"\"\n        if not self.size_defined(layer_id):\n            raise ValueError('Size tuple is undefined for layer = %d' % layer_id)\n\n        top_dir = self.dir_tracks[layer_id]\n        bot_layer = layer_id - 1\n        while bot_layer in self.dir_tracks and self.dir_tracks[bot_layer] == top_dir:\n            bot_layer -= 1\n\n        h_pitch = self.get_track_pitch(layer_id, unit_mode=unit_mode)\n        w_pitch = self.get_track_pitch(bot_layer, unit_mode=unit_mode)\n        if top_dir == 'y':\n            return h_pitch, w_pitch\n        return w_pitch, h_pitch\n\n    def get_size_tuple(self,  # type: RoutingGrid\n                       layer_id,  # type: int\n                       width,  # type: Union[float, int]\n                       height,  # type: Union[float, int]\n                       round_up=False,  # type: bool\n                       unit_mode=False,  # type: bool\n                       half_blk_x=True,  # type: bool\n                       half_blk_y=True,  # type: bool\n                       ):\n        # type: (...) -> Tuple[int, Union[float, int], Union[float, int]]\n        \"\"\"Compute the size tuple corresponding to the given width and height from block pitch.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        width : Union[float, int]\n            width of the block, in layout units.\n        height : Union[float, int]\n            height of the block, in layout units.\n        round_up : bool\n            True to round up instead of raising an error if the given width and height\n            are not on pitch.\n        unit_mode : bool\n            True if the given layout dimensions are in resolution units.\n        half_blk_x : bool\n            True to allow half-block widths.\n        half_blk_y : bool\n            True to allow half-block heights.\n\n        Returns\n        -------\n        size : Tuple[int, int, int]\n            the size tuple.  the first element is the top layer ID, second element is the width in\n            number of vertical tracks, and third element is the height in number of\n            horizontal tracks.\n        \"\"\"\n        if not unit_mode:\n            res = self._resolution\n            width = int(round(width / res))\n            height = int(round(height / res))\n\n        w_pitch, h_pitch = self.get_size_pitch(layer_id, unit_mode=True)\n\n        wblk, hblk = self.get_block_size(layer_id, unit_mode=True,\n                                         half_blk_x=half_blk_x, half_blk_y=half_blk_y)\n        if width % wblk != 0:\n            if round_up:\n                width = -(-width // wblk) * wblk\n            else:\n                raise ValueError('width = %d not on block pitch (%d)' % (width, wblk))\n        if height % hblk != 0:\n            if round_up:\n                height = -(-height // hblk) * hblk\n            else:\n                raise ValueError('height = %d not on block pitch (%d)' % (height, hblk))\n\n        w_size = width // w_pitch if width % w_pitch == 0 else width / w_pitch\n        h_size = height // h_pitch if height % h_pitch == 0 else height / h_pitch\n        return layer_id, w_size, h_size\n\n    def get_size_dimension(self,  # type: RoutingGrid\n                           size,  # type: Tuple[int, Union[float, int], Union[float, int]]\n                           unit_mode=False,  # type: bool\n                           ):\n        # type: (...) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Compute width and height from given size.\n\n        Parameters\n        ----------\n        size : Tuple[int, Union[float, int], Union[float, int]]\n            size of a block.\n        unit_mode : bool\n            True to return width/height in resolution units.\n\n        Returns\n        -------\n        width : Union[float, int]\n            the width in layout units.\n        height : Union[float, int]\n            the height in layout units.\n        \"\"\"\n        w_pitch, h_pitch = self.get_size_pitch(size[0], unit_mode=True)\n        w_unit = int(round(size[1] * 2)) * w_pitch // 2\n        h_unit = int(round(size[2] * 2)) * h_pitch // 2\n        if unit_mode:\n            return w_unit, h_unit\n        else:\n            return w_unit * self.resolution, h_unit * self.resolution\n\n    def convert_size(self, size, new_top_layer):\n        # type: (Tuple[int, Union[float, int], Union[float, int]], int) -> Tuple[int, int, int]\n        \"\"\"Convert the given size to a new top layer.\n\n        Parameters\n        ----------\n        size : Tuple[int, Union[float, int], Union[float, int]]\n            size of a block.\n        new_top_layer : int\n            the new top level layer ID.\n\n        Returns\n        -------\n        new_size : Tuple[int, int, int]\n            the new size tuple.\n        \"\"\"\n        wblk, hblk = self.get_size_dimension(size, unit_mode=True)\n        return self.get_size_tuple(new_top_layer, wblk, hblk, unit_mode=True)\n\n    def get_track_info(self, layer_id, unit_mode=False):\n        # type: (int, bool) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns the routing track width and spacing on the given layer.\n\n        Parameters\n        ----------\n        layer_id : int\n            the routing layer ID.\n        unit_mode : bool\n            True to return track width/spacing in resolution units.\n\n        Returns\n        -------\n        track_width : Union[float, int]\n            the track width in layout/resolution units.\n        track_spacing : Union[float, int]\n            the track spacing in layout/resolution units\n        \"\"\"\n        w, sp = self.w_tracks[layer_id], self.sp_tracks[layer_id]\n        if unit_mode:\n            return w, sp\n        return w * self._resolution, sp * self._resolution\n\n    def get_track_parity(self, layer_id, tr_idx):\n        # type: (int, Union[float, int]) -> int\n        \"\"\"Returns the parity of the given track.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        tr_idx : Union[float, int]\n            the track index.\n\n        Returns\n        -------\n        parity : int\n            the track parity, either 0 or 1.\n        \"\"\"\n        # multiply then divide by 2 makes sure negative tracks are colored correctly.\n        htr = int(round(tr_idx * 2 + 1))\n        scale, offset = self._flip_parity[layer_id]\n        par_htr = scale * htr + offset\n        if par_htr % 4 < 2:\n            return 0\n        return 1\n\n    def get_layer_name(self, layer_id, tr_idx):\n        # type: (int, Union[float, int]) -> str\n        \"\"\"Returns the layer name of the given track.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        tr_idx : Union[float, int]\n            the track index.\n\n        Returns\n        -------\n        layer_name : str\n            the layer name.\n        \"\"\"\n        layer_name = self.tech_info.get_layer_name(layer_id)\n        if isinstance(layer_name, tuple):\n            # round down half integer track\n            tr_parity = self.get_track_parity(layer_id, tr_idx)\n            return layer_name[tr_parity]\n        else:\n            return layer_name\n\n    def get_wire_bounds(self, layer_id, tr_idx, width=1, unit_mode=False):\n        # type: (int, Union[int, float], int, bool) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Calculate the wire bounds coordinate.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        tr_idx : Union[int, float]\n            the center track index.\n        width : int\n            width of wire in number of tracks.\n        unit_mode : bool\n            True to return coordinates in resolution units.\n\n        Returns\n        -------\n        lower : Union[float, int]\n            the lower bound coordinate perpendicular to wire direction.\n        upper : Union[float, int]\n            the upper bound coordinate perpendicular to wire direction.\n        \"\"\"\n        width_unit = self.get_track_width(layer_id, width, unit_mode=True)\n        center = self.track_to_coord(layer_id, tr_idx, unit_mode=True)\n        lower, upper = center - width_unit // 2, center + width_unit // 2\n        if unit_mode:\n            return lower, upper\n        else:\n            return lower * self._resolution, upper * self._resolution\n\n    def get_bbox(self, layer_id, tr_idx, lower, upper, width=1, unit_mode=False):\n        # type: (int, Union[int, float], Union[int, float], Union[int, float], int, bool) -> BBox\n        \"\"\"Compute bounding box for the given wire.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        tr_idx : Union[int, float]\n            the center track index.\n        lower : Union[int, float]\n            the lower coordinate along track direction.\n        upper : Union[int, float]\n            the upper coordinate along track direction.\n        width : int\n            width of wire in number of tracks.\n        unit_mode : bool\n            True if lower and upper are specified in resolution units.\n\n        Returns\n        -------\n        bbox : bag.layout.util.BBox\n            the bounding box.\n        \"\"\"\n        if not unit_mode:\n            lower = int(round(lower / self._resolution))\n            upper = int(round(upper / self._resolution))\n\n        cl, cu = self.get_wire_bounds(layer_id, tr_idx, width=width, unit_mode=True)\n        if self.get_direction(layer_id) == 'x':\n            bbox = BBox(lower, cl, upper, cu, self._resolution, unit_mode=True)\n        else:\n            bbox = BBox(cl, lower, cu, upper, self._resolution, unit_mode=True)\n\n        return bbox\n\n    def get_min_track_width(self, layer_id, idc=0, iac_rms=0, iac_peak=0, l=-1,\n                            bot_w=-1, top_w=-1, unit_mode=False, **kwargs):\n        # type: (int, float, float, float, float, float, float, bool, **Any) -> int\n        \"\"\"Returns the minimum track width required for the given EM specs.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        idc : float\n            the DC current spec.\n        iac_rms : float\n            the AC RMS current spec.\n        iac_peak : float\n            the AC peak current spec.\n        l : float\n            the length of the wire in layout units.  Use negative length\n            to disable length enhancement factor.\n        bot_w : float\n            the bottom layer track width in layout units.  If given, will make sure\n            that the via between the two tracks meet EM specs too.\n        top_w : float\n            the top layer track width in layout units.  If given, will make sure\n            that the via between the two tracks meet EM specs too.\n        unit_mode : bool\n            True if l/bot_w/top_w are given in resolution units.\n        **kwargs : Any\n            override default EM spec parameters.\n\n        Returns\n        -------\n        track_width : int\n            the minimum track width in number of tracks.\n        \"\"\"\n        res = self._resolution\n        if not unit_mode:\n            if l > 0:\n                l = int(round(l / res))\n            if bot_w > 0:\n                bot_w = int(round(bot_w / res))\n            if top_w > 0:\n                top_w = int(round(top_w / res))\n\n        # if double patterning layer, just use any name.\n        layer_name = self.tech_info.get_layer_name(layer_id)\n        if isinstance(layer_name, tuple):\n            layer_name = layer_name[0]\n        if bot_w > 0:\n            bot_layer_name = self.tech_info.get_layer_name(layer_id - 1)\n            if isinstance(bot_layer_name, tuple):\n                bot_layer_name = bot_layer_name[0]\n        else:\n            bot_layer_name = None\n        if top_w > 0:\n            top_layer_name = self.tech_info.get_layer_name(layer_id + 1)\n            if isinstance(top_layer_name, tuple):\n                top_layer_name = top_layer_name[0]\n        else:\n            top_layer_name = None\n\n        # use binary search to find the minimum track width\n        bin_iter = BinaryIterator(1, None)\n        tr_dir = self.dir_tracks[layer_id]\n        alt_dir = 'x' if tr_dir == 'y' else 'y'\n        bot_dir = self.dir_tracks.get(layer_id - 1, alt_dir)\n        top_dir = self.dir_tracks.get(layer_id + 1, alt_dir)\n        while bin_iter.has_next():\n            ntr = bin_iter.get_next()\n            width = self.get_track_width(layer_id, ntr, unit_mode=True)\n            idc_max, irms_max, ipeak_max = self.tech_info.get_metal_em_specs(layer_name,\n                                                                             width * res,\n                                                                             l=l * res, **kwargs)\n            if idc > idc_max or iac_rms > irms_max or iac_peak > ipeak_max:\n                # check metal satisfies EM spec\n                bin_iter.up()\n                continue\n            if bot_w > 0 and bot_dir != tr_dir:\n                if tr_dir == 'x':\n                    bbox = BBox(0, 0, bot_w, width, res, unit_mode=True)\n                else:\n                    bbox = BBox(0, 0, width, bot_w, res, unit_mode=True)\n                vinfo = self.tech_info.get_via_info(bbox, bot_layer_name, layer_name,\n                                                    bot_dir, **kwargs)\n                if (vinfo is None or idc > vinfo['idc'] or iac_rms > vinfo['iac_rms'] or\n                        iac_peak > vinfo['iac_peak']):\n                    bin_iter.up()\n                    continue\n            if top_w > 0 and top_dir != tr_dir:\n                if tr_dir == 'x':\n                    bbox = BBox(0, 0, top_w, width, res, unit_mode=True)\n                else:\n                    bbox = BBox(0, 0, width, top_w, res, unit_mode=True)\n                vinfo = self.tech_info.get_via_info(bbox, layer_name, top_layer_name,\n                                                    tr_dir, **kwargs)\n                if (vinfo is None or idc > vinfo['idc'] or iac_rms > vinfo['iac_rms'] or\n                        iac_peak > vinfo['iac_peak']):\n                    bin_iter.up()\n                    continue\n\n            # we got here, so all EM specs passed\n            bin_iter.save()\n            bin_iter.down()\n\n        return bin_iter.get_last_save()\n\n    def get_min_track_width_for_via(self,\n                                    bot_layer: int,\n                                    next_ntr: int = 1,\n                                    **kwargs: Any,\n                                    ) -> int:\n        \"\"\"Returns the minimum track width required to fit a via to the next layer.\n\n        Parameters\n        ----------\n        bot_layer : int\n            the layer ID.\n        next_ntr : int\n            the width of the track on the next layer, in track widths.\n        **kwargs : Any\n            Override the default EM specs and pass additional arguments that are accepted by get_min_track_width\n\n        Returns\n        -------\n        track_width : int\n            the minimum track width in number of tracks\n        \"\"\"\n        next_layer_min_width_unit = self.get_track_width(layer_id=bot_layer + 1, width_ntr=next_ntr, unit_mode=True)\n        return self.get_min_track_width(layer_id=bot_layer, top_w=next_layer_min_width_unit, unit_mode=True, **kwargs)\n\n    def get_track_index_range(self,  # type: RoutingGrid\n                              layer_id,  # type: int\n                              lower,  # type: Union[float, int]\n                              upper,  # type: Union[float, int]\n                              num_space=0,  # type: Union[float, int]\n                              edge_margin=0,  # type: Union[float, int]\n                              half_track=False,  # type: bool\n                              unit_mode=False  # type: bool\n                              ):\n        # type: (...) -> Tuple[Optional[Union[float, int]], Optional[Union[float, int]]]\n        \"\"\" Returns the first and last track index strictly in the given range.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        lower : Union[float, int]\n            the lower coordinate.\n        upper : Union[float, int]\n            the upper coordinate.\n        num_space : Union[float, int]\n            number of space tracks to the tracks right outside of the given range.\n        edge_margin : Union[float, int]\n            minimum space from outer tracks to given range.\n        half_track : bool\n            True to allow half-integer tracks.\n        unit_mode : bool\n            True if lower/upper/edge_margin are given in resolution units.\n\n        Returns\n        -------\n        start_track : Optional[Union[float, int]]\n            the first track index.  None if no solution.\n        end_track : Optional[Union[float, int]]\n            the last track index.  None if no solution.\n        \"\"\"\n        if not unit_mode:\n            lower = int(round(lower / self._resolution))\n            upper = int(round(upper / self._resolution))\n            edge_margin = int(round(edge_margin / self._resolution))\n\n        tr_w = self.get_track_width(layer_id, 1, unit_mode=True)\n        tr_ph = self.get_track_pitch(layer_id, unit_mode=True) // 2\n        tr_wh = tr_w // 2\n\n        # get start track half index\n        lower_bnd = self.coord_to_nearest_track(layer_id, lower, half_track=True,\n                                                mode=-1, unit_mode=True)\n        start_track = self.coord_to_nearest_track(layer_id, lower + edge_margin, half_track=True,\n                                                  mode=2, unit_mode=True)\n        hstart_track = int(round(2 * max(start_track, lower_bnd + num_space) + 1))\n        # check strictly in range\n        if hstart_track * tr_ph - tr_wh < lower + edge_margin:\n            hstart_track += 1\n        # check if half track is allowed\n        if not half_track and hstart_track % 2 == 0:\n            hstart_track += 1\n\n        # get end track half index\n        upper_bnd = self.coord_to_nearest_track(layer_id, upper, half_track=True,\n                                                mode=1, unit_mode=True)\n        end_track = self.coord_to_nearest_track(layer_id, upper - edge_margin, half_track=True,\n                                                mode=-2, unit_mode=True)\n        hend_track = int(round(2 * min(end_track, upper_bnd - num_space) + 1))\n        # check strictly in range\n        if hend_track * tr_ph + tr_wh > upper - edge_margin:\n            hend_track -= 1\n        # check if half track is allowed\n        if not half_track and hend_track % 2 == 0:\n            hend_track -= 1\n\n        if hend_track < hstart_track:\n            # no solution\n            return None, None\n        # convert to track\n        if hstart_track % 2 == 1:\n            start_track = (hstart_track - 1) // 2\n        else:\n            start_track = (hstart_track - 1) / 2\n        if hend_track % 2 == 1:\n            end_track = (hend_track - 1) // 2\n        else:\n            end_track = (hend_track - 1) / 2\n        return start_track, end_track\n\n    def get_overlap_tracks(self,  # type: RoutingGrid\n                           layer_id,  # type: int\n                           lower,  # type: Union[float, int]\n                           upper,  # type: Union[float, int]\n                           half_track=False,  # type: bool\n                           unit_mode=False  # type: bool\n                           ):\n        # type: (...) -> Tuple[Optional[Union[float, int]], Optional[Union[float, int]]]\n        \"\"\" Returns the first and last track index that overlaps with the given range.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        lower : Union[float, int]\n            the lower coordinate.\n        upper : Union[float, int]\n            the upper coordinate.\n        half_track : bool\n            True to allow half-integer tracks.\n        unit_mode : bool\n            True if lower/upper are given in resolution units.\n\n        Returns\n        -------\n        start_track : Optional[Union[float, int]]\n            the first track index.  None if no solution.\n        end_track : Optional[Union[float, int]]\n            the last track index.  None if no solution.\n        \"\"\"\n        if not unit_mode:\n            lower = int(round(lower / self._resolution))\n            upper = int(round(upper / self._resolution))\n\n        wtr = self.w_tracks[layer_id]\n        lower_tr = self.find_next_track(layer_id, lower - wtr, half_track=half_track,\n                                        mode=1, unit_mode=True)\n        upper_tr = self.find_next_track(layer_id, upper + wtr, half_track=half_track,\n                                        mode=-1, unit_mode=True)\n\n        return lower_tr, upper_tr\n\n    def get_via_extensions_dim(self,  # type: RoutingGrid\n                               bot_layer_id,  # type: int\n                               bot_dim,  # type: Union[float, int]\n                               top_dim,  # type: Union[float, int]\n                               unit_mode=False,  # type: bool\n                               ):\n        # type: (...) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns the via extension.\n\n        Parameters\n        ----------\n        bot_layer_id : int\n            the via bottom layer ID.\n        bot_dim : Union[float, int]\n            the bottom track width in layout/resolution units.\n        top_dim : Union[float, int]\n            the top track width in layout/resolution units.\n        unit_mode : bool\n            True if given widths are in resolution units.\n\n        Returns\n        -------\n        bot_ext : Union[float, int]\n            via extension on the bottom layer.\n        top_ext : Union[float, int]\n            via extension on the top layer.\n        \"\"\"\n        res = self._resolution\n        if not unit_mode:\n            bot_dim = int(round(bot_dim / res))\n            top_dim = int(round(top_dim / res))\n\n        bot_lay_name = self.get_layer_name(bot_layer_id, 0)\n        top_lay_name = self.get_layer_name(bot_layer_id + 1, 0)\n        bot_dir = self.get_direction(bot_layer_id)\n        top_dir = self.get_direction(bot_layer_id + 1)\n        if top_dir == bot_dir:\n            raise ValueError('This method only works if top and bottom layers are orthogonal.')\n\n        if bot_dir == 'x':\n            vbox = BBox(0, 0, top_dim, bot_dim, res, unit_mode=True)\n            vinfo = self._tech_info.get_via_info(vbox, bot_lay_name, top_lay_name, bot_dir)\n            if vinfo is None:\n                raise ValueError('Cannot create via')\n            bot_ext = (vinfo['bot_box'].width_unit - top_dim) // 2\n            top_ext = (vinfo['top_box'].height_unit - bot_dim) // 2\n        else:\n            vbox = BBox(0, 0, bot_dim, top_dim, res, unit_mode=True)\n            vinfo = self._tech_info.get_via_info(vbox, bot_lay_name, top_lay_name, bot_dir)\n            if vinfo is None:\n                raise ValueError('Cannot create via')\n            bot_ext = (vinfo['bot_box'].height_unit - top_dim) // 2\n            top_ext = (vinfo['top_box'].width_unit - bot_dim) // 2\n\n        if unit_mode:\n            return bot_ext, top_ext\n        else:\n            return bot_ext * res, top_ext * res\n\n    def get_via_extensions(self, bot_layer_id, bot_width, top_width, unit_mode=False):\n        # type: (int, int, int, bool) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns the via extension.\n\n        Parameters\n        ----------\n        bot_layer_id : int\n            the via bottom layer ID.\n        bot_width : int\n            the bottom track width in number of tracks.\n        top_width : int\n            the top track width in number of tracks.\n        unit_mode : bool\n            True to return extensions in resolution units.\n\n        Returns\n        -------\n        bot_ext : Union[float, int]\n            via extension on the bottom layer.\n        top_ext : Union[float, int]\n            via extension on the top layer.\n        \"\"\"\n        bot_dim = self.get_track_width(bot_layer_id, bot_width, unit_mode=unit_mode)\n        top_dim = self.get_track_width(bot_layer_id + 1, top_width, unit_mode=unit_mode)\n        return self.get_via_extensions_dim(bot_layer_id, bot_dim, top_dim, unit_mode=unit_mode)\n\n    def coord_to_track(self, layer_id, coord, unit_mode=False):\n        # type: (int, Union[float, int], bool) -> Union[float, int]\n        \"\"\"Convert given coordinate to track number.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer number.\n        coord : Union[float, int]\n            the coordinate perpendicular to the track direction.\n        unit_mode : bool\n            True if coordinate is given in resolution units.\n\n        Returns\n        -------\n        track : float or int\n            the track number\n        \"\"\"\n        if not unit_mode:\n            coord = int(round(coord / self._resolution))\n\n        pitch = self.get_track_pitch(layer_id, unit_mode=True)\n        q, r = divmod(coord - self._get_track_offset(layer_id), pitch)\n\n        if r == 0:\n            return q\n        elif r == (pitch // 2):\n            return q + 0.5\n        else:\n            raise ValueError('coordinate %.4g is not on track.' % coord)\n\n    def find_next_track(self, layer_id, coord, tr_width=1, half_track=False,\n                        mode=1, unit_mode=False):\n        # type: (int, Union[float, int], int, bool, int, bool) -> Union[float, int]\n        \"\"\"Find the track such that its edges are on the same side w.r.t. the given coordinate.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer number.\n        coord : float\n            the coordinate perpendicular to the track direction.\n        tr_width : int\n            the track width, in number of tracks.\n        half_track : bool\n            True to allow half integer track center numbers.\n        mode : int\n            1 to find track with both edge coordinates larger than or equal to the given one,\n            -1 to find track with both edge coordinates less than or equal to the given one.\n        unit_mode : bool\n            True if coordinate is given in resolution units.\n\n        Returns\n        -------\n        tr_idx : int or float\n            the center track index.\n        \"\"\"\n        if not unit_mode:\n            coord = int(round(coord / self._resolution))\n\n        tr_w = self.get_track_width(layer_id, tr_width, unit_mode=True)\n        if mode > 0:\n            return self.coord_to_nearest_track(layer_id, coord + tr_w // 2, half_track=half_track,\n                                               mode=mode, unit_mode=True)\n        else:\n            return self.coord_to_nearest_track(layer_id, coord - tr_w // 2, half_track=half_track,\n                                               mode=mode, unit_mode=True)\n\n    def coord_to_nearest_track(self, layer_id, coord, half_track=False, mode=0,\n                               unit_mode=False):\n        # type: (int, Union[float, int], bool, int, bool) -> Union[float, int]\n        \"\"\"Returns the track number closest to the given coordinate.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer number.\n        coord : Union[float, int]\n            the coordinate perpendicular to the track direction.\n        half_track : bool\n            if True, allow half integer track numbers.\n        mode : int\n            the \"rounding\" mode.\n\n            If mode == 0, return the nearest track (default).\n\n            If mode == -1, return the nearest track with coordinate less\n            than or equal to coord.\n\n            If mode == -2, return the nearest track with coordinate less\n            than coord.\n\n            If mode == 1, return the nearest track with coordinate greater\n            than or equal to coord.\n\n            If mode == 2, return the nearest track with coordinate greater\n            than coord.\n        unit_mode : bool\n            True if the given coordinate is in resolution units.\n\n        Returns\n        -------\n        track : Union[float, int]\n            the track number\n        \"\"\"\n        if not unit_mode:\n            coord = int(round(coord / self._resolution))\n\n        pitch = self.get_track_pitch(layer_id, unit_mode=True)\n        if half_track:\n            pitch //= 2\n\n        q, r = divmod(coord - self._get_track_offset(layer_id), pitch)\n\n        if r == 0:\n            # exactly on track\n            if mode == -2:\n                # move to lower track\n                q -= 1\n            elif mode == 2:\n                # move to upper track\n                q += 1\n        else:\n            # not on track\n            if mode > 0 or (mode == 0 and r >= pitch / 2):\n                # round up\n                q += 1\n\n        if not half_track:\n            return q\n        elif q % 2 == 0:\n            return q // 2\n        else:\n            return q / 2\n\n    def coord_to_nearest_fill_track(self, layer_id, coord, fill_config, mode=0,\n                                    unit_mode=False):\n        # type: (int, Union[float, int], Dict[int, Any], int, bool) -> Union[float, int]\n\n        if not unit_mode:\n            coord = int(round(coord / self._resolution))\n\n        tr_w, tr_sp, _, _ = fill_config[layer_id]\n\n        num_htr = int(round(2 * (tr_w + tr_sp)))\n        fill_pitch = num_htr * self.get_track_pitch(layer_id, unit_mode=True) // 2\n        fill_pitch2 = fill_pitch // 2\n        fill_q, fill_r = divmod(coord - fill_pitch2, fill_pitch)\n\n        if fill_r == 0:\n            # exactly on track\n            if mode == -2:\n                # move to lower track\n                fill_q -= 1\n            elif mode == 2:\n                # move to upper track\n                fill_q += 1\n        else:\n            # not on track\n            if mode > 0 or (mode == 0 and fill_r >= fill_pitch2):\n                # round up\n                fill_q += 1\n\n        return self.coord_to_track(layer_id, fill_q * fill_pitch + fill_pitch2, unit_mode=True)\n\n    def transform_track(self,  # type: RoutingGrid\n                        layer_id,  # type: int\n                        track_idx,  # type: Union[float, int]\n                        dx=0,  # type: Union[float, int]\n                        dy=0,  # type: Union[float, int]\n                        orient='R0',  # type: str\n                        unit_mode=False,  # type: bool\n                        ):\n        # type: (...) -> Union[float, int]\n        \"\"\"Transform the given track index.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer ID.\n        track_idx : Union[float, int]\n            the track index.\n        dx : Union[float, int]\n            X shift.\n        dy : Union[float, int]\n            Y shift.\n        orient : str\n            orientation.\n        unit_mode : bool\n            True if dx/dy are given in resolution units.\n\n        Returns\n        -------\n        new_track_idx : Union[float, int]\n            the transformed track index.\n        \"\"\"\n        if not unit_mode:\n            dx = int(round(dx / self._resolution))\n            dy = int(round(dy / self._resolution))\n\n        is_x = self.get_direction(layer_id) == 'x'\n        if is_x:\n            hidx_shift = int(2 * self.coord_to_track(layer_id, dy, unit_mode=True)) + 1\n        else:\n            hidx_shift = int(2 * self.coord_to_track(layer_id, dx, unit_mode=True)) + 1\n\n        if orient == 'R0':\n            hidx_scale = 1\n        elif orient == 'R180':\n            hidx_scale = -1\n        elif orient == 'MX':\n            hidx_scale = -1 if is_x else 1\n        elif orient == 'MY':\n            hidx_scale = 1 if is_x else -1\n        else:\n            raise ValueError('Unsupported orientation: %s' % orient)\n\n        old_hidx = int(track_idx * 2 + 1)\n        new_hidx = old_hidx * hidx_scale + hidx_shift\n        if new_hidx % 2 == 1:\n            return (new_hidx - 1) // 2\n        else:\n            return (new_hidx - 1) / 2\n\n    def track_to_coord(self, layer_id, track_idx, unit_mode=False):\n        # type: (int, Union[float, int], bool) -> Union[float, int]\n        \"\"\"Convert given track number to coordinate.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer number.\n        track_idx : Union[float, int]\n            the track number.\n        unit_mode : bool\n            True to return coordinate in resolution units.\n\n        Returns\n        -------\n        coord : Union[float, int]\n            the coordinate perpendicular to track direction.\n        \"\"\"\n        pitch = self.get_track_pitch(layer_id, unit_mode=True)\n        coord_unit = int(pitch * track_idx + self._get_track_offset(layer_id))\n        if unit_mode:\n            return coord_unit\n        return coord_unit * self._resolution\n\n    def interval_to_track(self,  # type: RoutingGrid\n                          layer_id,  # type: int\n                          intv,  # type: Tuple[Union[float, int], Union[float, int]]\n                          unit_mode=False,  # type: bool\n                          ):\n        # type: (...) -> Tuple[Union[float, int], int]\n        \"\"\"Convert given coordinates to track number and width.\n\n        Parameters\n        ----------\n        layer_id : int\n            the layer number.\n        intv : Tuple[Union[float, int], Union[float, int]]\n            lower and upper coordinates perpendicular to the track direction.\n        unit_mode : bool\n            True if dimensions are given in resolution units.\n\n        Returns\n        -------\n        track : Union[float, int]\n            the track number\n        width : int\n            the track width, in number of tracks.\n        \"\"\"\n        res = self._resolution\n        start, stop = intv\n        if not unit_mode:\n            start = int(round(start / res))\n            stop = int(round(stop / res))\n\n        track = self.coord_to_track(layer_id, (start + stop) // 2, unit_mode=True)\n        width = stop - start\n\n        # binary search to take width override into account\n        bin_iter = BinaryIterator(1, None)\n        while bin_iter.has_next():\n            cur_ntr = bin_iter.get_next()\n            cur_w = self.get_track_width(layer_id, cur_ntr, unit_mode=True)\n            if cur_w == width:\n                return track, cur_ntr\n            elif cur_w > width:\n                bin_iter.down()\n            else:\n                bin_iter.up()\n\n        # never found solution; width is not quantized.\n        raise ValueError('Interval {} on layer {} width not quantized'.format(intv, layer_id))\n\n    def copy(self):\n        # type: () -> RoutingGrid\n        \"\"\"Returns a deep copy of this RoutingGrid.\"\"\"\n        cls = self.__class__\n        result = cls.__new__(cls)\n        attrs = result.__dict__\n        attrs['_tech_info'] = self._tech_info\n        attrs['_resolution'] = self._resolution\n        attrs['_layout_unit'] = self._layout_unit\n        attrs['_flip_parity'] = self._flip_parity.copy()\n        attrs['_ignore_layers'] = self._ignore_layers.copy()\n        attrs['layers'] = list(self.layers)\n        attrs['sp_tracks'] = self.sp_tracks.copy()\n        attrs['dir_tracks'] = self.dir_tracks.copy()\n        attrs['offset_tracks'] = {}\n        attrs['w_tracks'] = self.w_tracks.copy()\n        attrs['max_num_tr_tracks'] = self.max_num_tr_tracks.copy()\n        attrs['block_pitch'] = self.block_pitch.copy()\n        attrs['w_override'] = self.w_override.copy()\n        attrs['private_layers'] = list(self.private_layers)\n        for lay in self.layers:\n            attrs['w_override'][lay] = self.w_override[lay].copy()\n\n        return result\n\n    def ignore_layers_under(self, layer_id):\n        # type: (int) -> None\n        \"\"\"Ignore all layers under the given layer (inclusive) when calculating block pitches.\n\n        Parameters\n        ----------\n        layer_id : int\n            ignore this layer and below.\n        \"\"\"\n        for lay in self.layers:\n            if lay > layer_id:\n                break\n            self._ignore_layers.add(lay)\n\n    def add_new_layer(self, layer_id, tr_space, tr_width, direction,\n                      max_num_tr=100, override=False, unit_mode=False, is_private=True):\n        # type: (int, float, float, str, int, bool, bool, bool) -> None\n        \"\"\"Add a new private layer to this RoutingGrid.\n\n        This method is used to add customized routing grid per template on lower level layers.\n        The new layers doesn't necessarily need to follow alternating track direction, however,\n        if you do this you cannot connect to adjacent level metals.\n\n        Note: do not use this method to add/modify top level layers, as it does not calculate\n        block pitch.\n\n        Parameters\n        ----------\n        layer_id : int\n            the new layer ID.\n        tr_space : float\n            the track spacing, in layout units.\n        tr_width : float\n            the track width, in layout units.\n        direction : str\n            track direction.  'x' for horizontal, 'y' for vertical.\n        max_num_tr : int\n            maximum track width in number of tracks.\n        override : bool\n            True to override existing layers if they already exist.\n        unit_mode : bool\n            True if given lengths are in resolution units\n        is_private : bool\n            True if this is a private layer.\n        \"\"\"\n        self._ignore_layers.discard(layer_id)\n\n        if not unit_mode:\n            sp_unit = 2 * int(round(tr_space / (2 * self.resolution)))\n            w_unit = 2 * int(round(tr_width / (2 * self.resolution)))\n        else:\n            sp_unit = -(-tr_space // 2) * 2\n            w_unit = -(-tr_width // 2) * 2\n        if layer_id in self.sp_tracks:\n            # double check to see if we actually need to modify layer\n            w_cur = self.w_tracks[layer_id]\n            sp_cur = self.sp_tracks[layer_id]\n            dir_cur = self.dir_tracks[layer_id]\n\n            if w_cur == w_unit and sp_cur == sp_unit and dir_cur == direction:\n                # everything is the same, just return\n                return\n\n            if not override:\n                raise ValueError('Layer %d already on routing grid.' % layer_id)\n        else:\n            self.layers.append(layer_id)\n            self.layers.sort()\n\n        if is_private and layer_id not in self.private_layers:\n            self.private_layers.append(layer_id)\n            self.private_layers.sort()\n\n        self.sp_tracks[layer_id] = sp_unit\n        self.w_tracks[layer_id] = w_unit\n        self.dir_tracks[layer_id] = direction\n        self.w_override[layer_id] = {}\n        self.max_num_tr_tracks[layer_id] = max_num_tr\n        if layer_id not in self._flip_parity:\n            self._flip_parity[layer_id] = (1, 0)\n\n    def set_track_offset(self, layer_id, offset, unit_mode=False):\n        # type: (int, Union[float, int], bool) -> None\n        \"\"\"Set track offset for this RoutingGrid.\n\n        Parameters\n        ----------\n        layer_id : int\n            the routing layer ID.\n        offset : Union[float, int]\n            the track offset.\n        unit_mode : bool\n            True if the track offset is specified in resolution units.\n        \"\"\"\n        if not unit_mode:\n            offset = int(round(offset / self.resolution))\n\n        self.offset_tracks[layer_id] = offset\n\n    def add_width_override(self, layer_id, width_ntr, tr_width, unit_mode=False):\n        # type: (int, int, Union[int, float], bool) -> None\n        \"\"\"Add width override.\n\n        NOTE: call this method only directly after you construct the RoutingGrid.  Do not\n        use this to modify an existing grid.\n\n        Parameters\n        ----------\n        layer_id : int\n            the new layer ID.\n        width_ntr : int\n            the width in number of tracks.\n        tr_width : Union[int, float]\n            the actual width in layout units.\n        unit_mode : bool\n            True if tr_width is in resolution units.\n        \"\"\"\n        if width_ntr == 1:\n            raise ValueError('Cannot override width_ntr=1.')\n\n        if not unit_mode:\n            tr_width = int(round(tr_width / self.resolution))\n\n        if layer_id not in self.w_override:\n            self.w_override[layer_id] = {width_ntr: tr_width}\n        else:\n            self.w_override[layer_id][width_ntr] = tr_width\n"
  },
  {
    "path": "bag/layout/tech.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom typing import List, Tuple, Union, Optional, Callable, TYPE_CHECKING\n\nimport abc\n\nfrom .core import TechInfo\n\nif TYPE_CHECKING:\n    from ..layout.util import BBox\n    from ..layout.template import TemplateBase\n\n\nclass TechInfoConfig(TechInfo, metaclass=abc.ABCMeta):\n    \"\"\"An implementation of TechInfo that implements most methods with a technology file.\"\"\"\n    def __init__(self, config, tech_params, mos_entry_name='mos'):\n        TechInfo.__init__(self, config['resolution'], config['layout_unit'],\n                          config['tech_lib'], tech_params)\n\n        self.config = config\n        self._mos_entry_name = mos_entry_name\n        self.idc_temp = tech_params['layout']['em']['dc_temp']\n        self.irms_dt = tech_params['layout']['em']['rms_dt']\n\n    @abc.abstractmethod\n    def get_metal_em_specs(self, layer_name, w, l=-1, vertical=False, **kwargs):\n        return float('inf'), float('inf'), float('inf')\n\n    @abc.abstractmethod\n    def get_via_em_specs(self, via_name, bm_layer, tm_layer, via_type='square',\n                         bm_dim=(-1, -1), tm_dim=(-1, -1), array=False, **kwargs):\n        return float('inf'), float('inf'), float('inf')\n\n    @abc.abstractmethod\n    def get_res_em_specs(self, res_type, w, l=-1, **kwargs):\n        return float('inf'), float('inf'), float('inf')\n\n    @abc.abstractmethod\n    def add_cell_boundary(self, template, box):\n        # type: (TemplateBase, BBox) -> None\n        pass\n\n    @abc.abstractmethod\n    def draw_device_blockage(self, template):\n        # type: (TemplateBase) -> None\n        pass\n\n    @abc.abstractmethod\n    def get_via_arr_enc(self, vname, vtype, mtype, mw_unit, is_bot):\n        # type: (...) -> Tuple[Optional[List[Tuple[int, int]]], Optional[Callable[[int, int], bool]]]\n        return None, None\n\n    @property\n    def pin_purpose(self):\n        return self.config.get('pin_purpose', 'pin')\n\n    def get_via_types(self, bmtype, tmtype):\n        default = [('square', 1), ('vrect', 2), ('hrect', 2)]\n        if 'via_type_order' in self.config:\n            table = self.config['via_type_order']\n            return table.get((bmtype, tmtype), default)\n        return default\n\n    def get_well_layers(self, sub_type):\n        # type: (str) -> List[Tuple[str, str]]\n        return self.config['well_layers'][sub_type]\n\n    def get_implant_layers(self, mos_type, res_type=None):\n        # type: (str, Optional[str]) -> List[Tuple[str, str]]\n        if res_type is None:\n            table = self.config[self._mos_entry_name]\n        else:\n            table = self.config['resistor']\n\n        return list(table['imp_layers'][mos_type].keys())\n\n    def get_threshold_layers(self, mos_type, threshold, res_type=None):\n        # type: (str, str, Optional[str]) -> List[Tuple[str, str]]\n        if res_type is None:\n            table = self.config[self._mos_entry_name]\n        else:\n            table = self.config['resistor']\n\n        return list(table['thres_layers'][mos_type][threshold].keys())\n\n    def get_exclude_layer(self, layer_id):\n        # type: (int) -> Tuple[str, str]\n        \"\"\"Returns the metal exclude layer\"\"\"\n        return self.config['metal_exclude_table'][layer_id]\n\n    def get_dnw_margin_unit(self, dnw_mode):\n        # type: (str) -> int\n        return self.config['dnw_margins'][dnw_mode]\n\n    def get_dnw_layers(self):\n        # type: () -> List[Tuple[str, str]]\n        return self.config[self._mos_entry_name]['dnw_layers']\n\n    def get_res_metal_layers(self, layer_id):\n        # type: (int) -> List[Tuple[str, str]]\n        return self.config['res_metal_layer_table'][layer_id]\n\n    def get_metal_dummy_layers(self, layer_id):\n        # type: (int) -> List[Tuple[str, str]]\n        return self.config['metal_dummy_table'][layer_id]\n\n    def use_flip_parity(self):\n        # type: () -> bool\n        return self.config['use_flip_parity']\n\n    def get_layer_name(self, layer_id):\n        # type: (int) -> str\n        name_dict = self.config['layer_name']\n        return name_dict[layer_id]\n\n    def get_layer_id(self, layer_name):\n        # type: (str) -> int\n        for key, val in self.config['layer_name'].items():\n            if val == layer_name:\n                return key\n        raise ValueError('Unknown layer: %s' % layer_name)\n\n    def get_layer_type(self, layer_name):\n        # type: (str) -> str\n        type_dict = self.config['layer_type']\n        return type_dict[layer_name]\n\n    def get_idc_scale_factor(self, temp, mtype, is_res=False):\n        # type: (float, str, bool) -> float\n        if is_res:\n            mtype = 'res'\n        idc_em_scale = self.config['idc_em_scale']\n        if mtype in idc_em_scale:\n            idc_params = idc_em_scale[mtype]\n        else:\n            idc_params = idc_em_scale['default']\n\n        temp_list = idc_params['temp']\n        scale_list = idc_params['scale']\n\n        for temp_test, scale in zip(temp_list, scale_list):\n            if temp <= temp_test:\n                return scale\n        return scale_list[-1]\n\n    def get_via_name(self, bot_layer_id):\n        # type: (int) -> str\n        return self.config['via_name'][bot_layer_id]\n\n    def get_via_id(self, bot_layer, top_layer):\n        # type: (str, str) -> str\n        return self.config['via_id'][(bot_layer, top_layer)]\n\n    def get_via_drc_info(self, vname, vtype, mtype, mw_unit, is_bot):\n        via_config = self.config['via']\n        if vname not in via_config:\n            raise ValueError('Unsupported vname %s' % vname)\n\n        via_config = via_config[vname]\n        if vtype.startswith('vrect') and vtype not in via_config:\n            # trying vertical rectangle via, but it does not exist,\n            # so try rotating horizontal rectangle instead\n            rotate = True\n            vtype2 = 'hrect' + vtype[5:]\n        else:\n            rotate = False\n            vtype2 = vtype\n        if vtype2 not in via_config:\n            raise ValueError('Unsupported vtype %s' % vtype2)\n\n        via_config = via_config[vtype2]\n\n        dim = via_config['dim']\n        sp = via_config['sp']\n        sp2_list = via_config.get('sp2', None)\n        sp3_list = via_config.get('sp3', None)\n        sp6_list = via_config.get('sp6', None)\n\n        if not is_bot or via_config['bot_enc'] is None:\n            enc_data = via_config['top_enc']\n        else:\n            enc_data = via_config['bot_enc']\n\n        enc_w_list = enc_data['w_list']\n        enc_list = enc_data['enc_list']\n\n        enc_cur = []\n        for mw_max, enc in zip(enc_w_list, enc_list):\n            if mw_unit <= mw_max:\n                enc_cur = enc\n                break\n\n        arr_enc, arr_test_tmp = self.get_via_arr_enc(vname, vtype, mtype, mw_unit, is_bot)\n        arr_test = arr_test_tmp\n\n        if rotate:\n            sp = sp[1], sp[0]\n            dim = dim[1], dim[0]\n            enc_cur = [(yv, xv) for xv, yv in enc_cur]\n            if sp2_list is not None:\n                sp2_list = [(spy, spx) for spx, spy in sp2_list]\n            if sp3_list is not None:\n                sp3_list = [(spy, spx) for spx, spy in sp3_list]\n            if sp6_list is not None:\n                sp6_list = [(spy, spx) for spx, spy in sp6_list]\n            if arr_enc is not None:\n                arr_enc = [(yv, xv) for xv, yv in arr_enc]\n            if arr_test_tmp is not None:\n                def arr_test(nrow, ncol):\n                    return arr_test_tmp(ncol, nrow)\n\n        return sp, sp2_list, sp3_list, sp6_list, dim, enc_cur, arr_enc, arr_test\n\n    def _space_helper(self, config_name, layer_type, width):\n        sp_min_config = self.config[config_name]\n        if layer_type not in sp_min_config:\n            raise ValueError('Unsupported layer type: %s' % layer_type)\n\n        sp_min_config = sp_min_config[layer_type]\n        w_list = sp_min_config['w_list']\n        sp_list = sp_min_config['sp_list']\n\n        for w, sp in zip(w_list, sp_list):\n            if width <= w:\n                return sp\n        return None\n\n    def get_min_space_unit(self, layer_type, w_unit, same_color=False):\n        # type: (str, int, bool) -> int\n        if not same_color or 'sp_sc_min' not in self.config:\n            config_name = 'sp_min'\n        else:\n            config_name = 'sp_sc_min'\n\n        return self._space_helper(config_name, layer_type, w_unit)\n\n    def get_min_line_end_space_unit(self, layer_type, w_unit):\n        return self._space_helper('sp_le_min', layer_type, w_unit)\n\n    def get_min_space(self, layer_type, width, unit_mode=False, same_color=False):\n        # type: (str, float, bool, bool) -> Union[float, int]\n        res = self.config['resolution']\n        if not unit_mode:\n            width = int(round(width / res))\n\n        ans = self.get_min_space_unit(layer_type, width, same_color=same_color)\n\n        if unit_mode:\n            return ans\n        return ans * res\n\n    def get_min_line_end_space(self, layer_type, width, unit_mode=False):\n        # type: (str, float, bool) -> Union[float, int]\n        res = self.config['resolution']\n        if not unit_mode:\n            width = int(round(width / res))\n\n        ans = self.get_min_line_end_space_unit(layer_type, width)\n\n        if unit_mode:\n            return ans\n        return ans * res\n\n    def layer_id_to_type(self, layer_id):\n        name_dict = self.config['layer_name']\n        type_dict = self.config['layer_type']\n        return type_dict[name_dict[layer_id]]\n\n    def get_min_length_unit(self, layer_type, w_unit):\n        len_min_config = self.config['len_min']\n        if layer_type not in len_min_config:\n            raise ValueError('Unsupported layer type: %s' % layer_type)\n\n        w_list = len_min_config[layer_type]['w_list']\n        w_al_list = len_min_config[layer_type]['w_al_list']\n        md_list = len_min_config[layer_type]['md_list']\n        md_al_list = len_min_config[layer_type]['md_al_list']\n\n        # get minimum length from width spec\n        l_unit = 0\n        for w, (area, len_min) in zip(w_list, w_al_list):\n            if w_unit <= w:\n                l_unit = max(len_min, -(-area // w_unit))\n                break\n\n        # check maximum dimension spec\n        for max_dim, (area, len_min) in zip(reversed(md_list), reversed(md_al_list)):\n            if max(w_unit, l_unit) > max_dim:\n                return l_unit\n            l_unit = max(l_unit, len_min, -(-area // w_unit))\n\n        return -(-l_unit // 2) * 2\n\n    def get_min_length(self, layer_type, width):\n        res = self.resolution\n        w_unit = int(round(width / res))\n        return res * self.get_min_length_unit(layer_type, w_unit)\n\n    def get_res_rsquare(self, res_type):\n        return self.config['resistor']['info'][res_type]['rsq']\n\n    def get_res_width_bounds(self, res_type):\n        return self.config['resistor']['info'][res_type]['w_bounds']\n\n    def get_res_length_bounds(self, res_type):\n        return self.config['resistor']['info'][res_type]['l_bounds']\n\n    def get_res_min_nsquare(self, res_type):\n        return self.config['resistor']['info'][res_type]['min_nsq']\n"
  },
  {
    "path": "bag/layout/template.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines layout template classes.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Union, Dict, Any, List, Set, TypeVar, Type, \\\n    Optional, Tuple, Iterable, Sequence, Callable, Generator, cast\n\nimport os\nimport abc\nimport copy\nimport time\nimport bisect\nimport pickle\nfrom itertools import islice, product, chain\nimport math\n\nimport yaml\nimport shapely.ops as shops\nimport shapely.geometry as shgeo\n\nfrom ..util.cache import DesignMaster, MasterDB\nfrom ..util.interval import IntervalSet\nfrom .core import BagLayout\nfrom .util import BBox, BBoxArray, tuple2_to_int, tuple2_to_float_int\nfrom ..io import get_encoding, open_file\nfrom .routing import Port, TrackID, WireArray\nfrom .routing.fill import UsedTracks, fill_symmetric_max_num_info, fill_symmetric_interval, \\\n    NoFillChoiceError\nfrom .objects import Instance, Rect, Via, Path, Polygon\n\nif TYPE_CHECKING:\n    from bag.core import BagProject\n    from .objects import Polygon, Blockage, Boundary\n    from .objects import InstanceInfo, ViaInfo, PinInfo\n    from .routing import RoutingGrid\n\n# try to import optional modules\ntry:\n    import cybagoa\nexcept ImportError:\n    cybagoa = None\ntry:\n    # noinspection PyPackageRequirements\n    import gdspy\nexcept ImportError:\n    gdspy = None\n\nTemplateType = TypeVar('TemplateType', bound='TemplateBase')\n\n\nclass TemplateDB(MasterDB):\n    \"\"\"A database of all templates.\n\n    This class is responsible for keeping track of template libraries and\n    creating new templates.\n\n    Parameters\n    ----------\n    lib_defs : str\n        path to the template library definition file.\n    routing_grid : RoutingGrid\n        the default RoutingGrid object.\n    lib_name : str\n        the cadence library to put all generated templates in.\n    prj : Optional[BagProject]\n        the BagProject instance.\n    name_prefix : str\n        generated layout name prefix.\n    name_suffix : str\n        generated layout name suffix.\n    use_cybagoa : bool\n        True to use cybagoa module to accelerate layout.\n    gds_lay_file : str\n        The GDS layer/purpose mapping file.\n    flatten : bool\n        True to compute flattened layout.\n    **kwargs :\n        additional arguments.\n    \"\"\"\n\n    def __init__(self,  # type: TemplateDB\n                 lib_defs,  # type: str\n                 routing_grid,  # type: RoutingGrid\n                 lib_name,  # type: str\n                 prj=None,  # type: Optional[BagProject]\n                 name_prefix='',  # type: str\n                 name_suffix='',  # type: str\n                 use_cybagoa=False,  # type: bool\n                 gds_lay_file='',  # type: str\n                 flatten=False,  # type: bool\n                 **kwargs):\n        # type: (...) -> None\n        MasterDB.__init__(self, lib_name, lib_defs=lib_defs,\n                          name_prefix=name_prefix, name_suffix=name_suffix)\n\n        pure_oa = kwargs.get('pure_oa', False)\n        cache_dir = kwargs.get('cache_dir', '')\n\n        if gds_lay_file:\n            if gdspy is None:\n                raise ValueError('gdspy module not found; cannot export GDS.')\n            # GDS export takes precedence over other options\n            use_cybagoa = pure_oa = False\n        if pure_oa:\n            if cybagoa is None:\n                raise ValueError('Cannot use pure OA mode when cybagoa is not found.')\n            use_cybagoa = True\n\n        self._prj = prj\n        self._grid = routing_grid\n        self._use_cybagoa = use_cybagoa and cybagoa is not None\n        self._gds_lay_file = gds_lay_file\n        self._flatten = flatten\n        self._pure_oa = pure_oa\n\n        if cache_dir and os.path.isdir(cache_dir):\n            print('loading template cache...')\n            start = time.time()\n            cache_dir = os.path.realpath(cache_dir)\n            with open(os.path.join(cache_dir, 'db_mapping.pickle'), 'rb') as f:\n                info = pickle.load(f)\n            for key, fname in info.items():\n                params = dict(cache_fname=fname)\n                master = CachedTemplate(self, lib_name, params, self.used_cell_names,\n                                        use_cybagoa=self._use_cybagoa)\n                master.finalize()\n                self.register_master(key, master)\n                self.register_master(master.key, master)\n            end = time.time()\n            print('cache loading took %.5g seconds.' % (end - start))\n\n    def create_master_instance(self, gen_cls, lib_name, params, used_cell_names, **kwargs):\n        # type: (Type[TemplateType], str, Dict[str, Any], Set[str], **Any) -> TemplateType\n        \"\"\"Create a new non-finalized master instance.\n\n        This instance is used to determine if we created this instance before.\n\n        Parameters\n        ----------\n        gen_cls : Type[TemplateType]\n            the generator Python class.\n        lib_name : str\n            generated instance library name.\n        params : Dict[str, Any]\n            instance parameters dictionary.\n        used_cell_names : Set[str]\n            a set of all used cell names.\n        **kwargs: Any\n            optional arguments for the generator.\n\n        Returns\n        -------\n        master : TemplateType\n            the non-finalized generated instance.\n        \"\"\"\n        # noinspection PyCallingNonCallable\n        return gen_cls(self, lib_name, params, used_cell_names, **kwargs)\n\n    def create_masters_in_db(self, lib_name, content_list, debug=False):\n        # type: (str, Sequence[Any], bool) -> None\n        \"\"\"Create the masters in the design database.\n\n        Parameters\n        ----------\n        lib_name : str\n            library to create the designs in.\n        content_list : Sequence[Any]\n            a list of the master contents.  Must be created in this order.\n        debug : bool\n            True to print debug messages\n        \"\"\"\n        if self._prj is None:\n            raise ValueError('BagProject is not defined.')\n\n        if self._gds_lay_file:\n            self._create_gds(lib_name, content_list, debug=debug)\n        elif self._use_cybagoa:\n            # remove write locks from old layouts\n            cell_view_list = [(item[0], 'layout') for item in content_list]\n            if self._pure_oa:\n                pass\n            else:\n                # create library if it does not exist\n                self._prj.create_library(self._lib_name)\n                self._prj.release_write_locks(self._lib_name, cell_view_list)\n\n            if debug:\n                print('Instantiating layout')\n            # create OALayouts\n            start = time.time()\n            if 'CDSLIBPATH' in os.environ:\n                cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))\n            else:\n                cds_lib_path = os.path.abspath('./cds.lib')\n            with cybagoa.PyOALayoutLibrary(cds_lib_path, self._lib_name, self._prj.default_lib_path,\n                                           self._prj.tech_info.via_tech_name,\n                                           get_encoding()) as lib:\n                lib.add_layer('prBoundary', 235)\n                lib.add_purpose('label', 237)\n                lib.add_purpose('drawing1', 241)\n                lib.add_purpose('drawing2', 242)\n                lib.add_purpose('drawing3', 243)\n                lib.add_purpose('drawing4', 244)\n                lib.add_purpose('drawing5', 245)\n                lib.add_purpose('drawing6', 246)\n                lib.add_purpose('drawing7', 247)\n                lib.add_purpose('drawing8', 248)\n                lib.add_purpose('drawing9', 249)\n                lib.add_purpose('boundary', 250)\n                lib.add_purpose('pin', 251)\n\n                for cell_name, oa_layout in content_list:\n                    lib.create_layout(cell_name, 'layout', oa_layout)\n            end = time.time()\n            if debug:\n                print('layout instantiation took %.4g seconds' % (end - start))\n        else:\n            # create library if it does not exist\n            self._prj.create_library(self._lib_name)\n\n            if debug:\n                print('Instantiating layout')\n            via_tech_name = self._grid.tech_info.via_tech_name\n            start = time.time()\n            self._prj.instantiate_layout(self._lib_name, 'layout', via_tech_name, content_list)\n            end = time.time()\n            if debug:\n                print('layout instantiation took %.4g seconds' % (end - start))\n\n    @property\n    def grid(self):\n        # type: () -> RoutingGrid\n        \"\"\"Returns the default routing grid instance.\"\"\"\n        return self._grid\n\n    def new_template(self, lib_name='', temp_name='', params=None, temp_cls=None, debug=False,\n                     **kwargs):\n        # type: (str, str, Dict[str, Any], Type[TemplateType], bool, **Any) -> TemplateType\n        \"\"\"Create a new template.\n\n        Parameters\n        ----------\n        lib_name : str\n            template library name.\n        temp_name : str\n            template name\n        params : Dict[str, Any]\n            the parameter dictionary.\n        temp_cls : Type[TemplateType]\n            the template class to instantiate.\n        debug : bool\n            True to print debug messages.\n        **kwargs : Any\n            optional template parameters.\n\n        Returns\n        -------\n        template : TemplateType\n            the new template instance.\n        \"\"\"\n        kwargs['use_cybagoa'] = self._use_cybagoa\n        master = self.new_master(lib_name=lib_name, cell_name=temp_name, params=params,\n                                 gen_cls=temp_cls, debug=debug, **kwargs)\n\n        return master\n\n    def instantiate_layout(self, prj, template, top_cell_name=None, debug=False, rename_dict=None):\n        # type: (BagProject, TemplateBase, Optional[str], bool, Optional[Dict[str, str]]) -> None\n        \"\"\"Instantiate the layout of the given :class:`~bag.layout.template.TemplateBase`.\n\n        Parameters\n        ----------\n        prj : BagProject\n            the :class:`~bag.BagProject` instance used to create layout.\n        template : TemplateBase\n            the :class:`~bag.layout.template.TemplateBase` to instantiate.\n        top_cell_name : Optional[str]\n            name of the top level cell.  If None, a default name is used.\n        debug : bool\n            True to print debugging messages\n        rename_dict : Optional[Dict[str, str]]\n            optional master cell renaming dictionary.\n        \"\"\"\n        self.batch_layout(prj, [template], [top_cell_name], debug=debug, rename_dict=rename_dict)\n\n    def batch_layout(self,\n                     prj,  # type: BagProject\n                     template_list,  # type: Sequence[TemplateBase]\n                     name_list=None,  # type: Optional[Sequence[Optional[str]]]\n                     lib_name='',  # type: str\n                     debug=False,  # type: bool\n                     rename_dict=None,  # type: Optional[Dict[str, str]]\n                     ):\n        # type: (...) -> None\n        \"\"\"Instantiate all given templates.\n\n        Parameters\n        ----------\n        prj : BagProject\n            the :class:`~bag.BagProject` instance used to create layout.\n        template_list : Sequence[TemplateBase]\n            list of templates to instantiate.\n        name_list : Optional[Sequence[Optional[str]]]\n            list of template layout names.  If not given, default names will be used.\n        lib_name : str\n            Library to create the masters in.  If empty or None, use default library.\n        debug : bool\n            True to print debugging messages\n        rename_dict : Optional[Dict[str, str]]\n            optional master cell renaming dictionary.\n        \"\"\"\n        self._prj = prj\n        self.instantiate_masters(template_list, name_list=name_list, lib_name=lib_name,\n                                 debug=debug, rename_dict=rename_dict)\n\n    def save_to_cache(self, temp_list, dir_name, debug=False):\n        os.makedirs(dir_name, exist_ok=True)\n\n        info = {}\n        cnt = 0\n        for master in temp_list:\n            fname = os.path.join(dir_name, str(cnt))\n            key = master.key\n            if key not in info:\n                master.write_to_disk(fname, self.lib_name, master.cell_name, debug=debug)\n                info[key] = fname\n            cnt += 1\n\n        with open(os.path.join(dir_name, 'db_mapping.pickle'), 'wb') as f:\n            pickle.dump(info, f, protocol=-1)\n\n    def _create_gds(self, lib_name, content_list, debug=False):\n        # type: (str, Sequence[Any], bool) -> None\n        \"\"\"Create a GDS file containing the given layouts\n\n        Parameters\n        ----------\n        lib_name : str\n            library to create the designs in.\n        content_list : Sequence[Any]\n            a list of the master contents.  Must be created in this order.\n        debug : bool\n            True to print debug messages\n        \"\"\"\n        tech_info = self.grid.tech_info\n        lay_unit = tech_info.layout_unit\n        res = tech_info.resolution\n\n        with open(self._gds_lay_file, 'r') as f:\n            lay_info = yaml.load(f)\n            lay_map = lay_info['layer_map']\n            via_info = lay_info['via_info']\n\n        out_fname = '%s.gds' % lib_name\n        gds_lib = gdspy.GdsLibrary(name=lib_name, unit=lay_unit, precision=res * lay_unit)\n        cell_dict = gds_lib.cell_dict\n        if debug:\n            print('Instantiating layout')\n\n        start = time.time()\n        for content in content_list:\n            (cell_name, inst_tot_list, rect_list, via_list, pin_list,\n             path_list, blockage_list, boundary_list, polygon_list) = content\n            gds_cell = gdspy.Cell(cell_name, exclude_from_current=True)\n            gds_lib.add(gds_cell)\n\n            # add instances\n            for inst_info in inst_tot_list:  # type: InstanceInfo\n                if inst_info.params is not None:\n                    raise ValueError('Cannot instantiate PCells in GDS.')\n                num_rows = inst_info.num_rows\n                num_cols = inst_info.num_cols\n                angle, reflect = inst_info.angle_reflect\n                if num_rows > 1 or num_cols > 1:\n                    cur_inst = gdspy.CellArray(cell_dict[inst_info.cell], num_cols, num_rows,\n                                               (inst_info.sp_cols, inst_info.sp_rows),\n                                               origin=inst_info.loc, rotation=angle,\n                                               x_reflection=reflect)\n                else:\n                    cur_inst = gdspy.CellReference(cell_dict[inst_info.cell], origin=inst_info.loc,\n                                                   rotation=angle, x_reflection=reflect)\n                gds_cell.add(cur_inst)\n\n            # add rectangles\n            for rect in rect_list:\n                nx, ny = rect.get('arr_nx', 1), rect.get('arr_ny', 1)\n                (x0, y0), (x1, y1) = rect['bbox']\n                lay_id, purp_id = lay_map[tuple(rect['layer'])]\n\n                if nx > 1 or ny > 1:\n                    spx, spy = rect['arr_spx'], rect['arr_spy']\n                    for xidx in range(nx):\n                        dx = xidx * spx\n                        for yidx in range(ny):\n                            dy = yidx * spy\n                            cur_rect = gdspy.Rectangle((x0 + dx, y0 + dy), (x1 + dx, y1 + dy),\n                                                       layer=lay_id, datatype=purp_id)\n                            gds_cell.add(cur_rect)\n                else:\n                    cur_rect = gdspy.Rectangle((x0, y0), (x1, y1), layer=lay_id, datatype=purp_id)\n                    gds_cell.add(cur_rect)\n\n            # add vias\n            for via in via_list:  # type: ViaInfo\n                via_lay_info = via_info[via.id]\n\n                nx, ny = via.arr_nx, via.arr_ny\n                x0, y0 = via.loc\n                if nx > 1 or ny > 1:\n                    spx, spy = via.arr_spx, via.arr_spy\n                    for xidx in range(nx):\n                        xc = x0 + xidx * spx\n                        for yidx in range(ny):\n                            yc = y0 + yidx * spy\n                            self._add_gds_via(gds_cell, via, lay_map, via_lay_info, xc, yc)\n                else:\n                    self._add_gds_via(gds_cell, via, lay_map, via_lay_info, x0, y0)\n\n            # add pins\n            for pin in pin_list:  # type: PinInfo\n                lay_id, purp_id = lay_map[pin.layer]\n                bbox = pin.bbox\n                label = pin.label\n                if pin.make_rect:\n                    cur_rect = gdspy.Rectangle((bbox.left, bbox.bottom), (bbox.right, bbox.top),\n                                               layer=lay_id, datatype=purp_id)\n                    gds_cell.add(cur_rect)\n                angle = 90 if bbox.height_unit > bbox.width_unit else 0\n                cur_lbl = gdspy.Label(label, (bbox.xc, bbox.yc), rotation=angle,\n                                      layer=lay_id, texttype=purp_id)\n                gds_cell.add(cur_lbl)\n\n            for path in path_list:\n                pass\n\n            for blockage in blockage_list:\n                pass\n\n            for boundary in boundary_list:\n                pass\n\n            for polygon in polygon_list:\n                lay_id, purp_id = lay_map[polygon['layer']]\n                cur_poly = gdspy.Polygon(polygon['points'], layer=lay_id, datatype=purp_id,\n                                         verbose=False)\n                gds_cell.add(cur_poly.fracture(precision=res))\n\n        gds_lib.write_gds(out_fname)\n        end = time.time()\n        if debug:\n            print('layout instantiation took %.4g seconds' % (end - start))\n\n    def _add_gds_via(self, gds_cell, via, lay_map, via_lay_info, x0, y0):\n        blay, bpurp = lay_map[via_lay_info['bot_layer']]\n        tlay, tpurp = lay_map[via_lay_info['top_layer']]\n        vlay, vpurp = lay_map[via_lay_info['via_layer']]\n        cw, ch = via.cut_width, via.cut_height\n        if cw < 0:\n            cw = via_lay_info['cut_width']\n        if ch < 0:\n            ch = via_lay_info['cut_height']\n\n        num_cols, num_rows = via.num_cols, via.num_rows\n        sp_cols, sp_rows = via.sp_cols, via.sp_rows\n        w_arr = num_cols * cw + (num_cols - 1) * sp_cols\n        h_arr = num_rows * ch + (num_rows - 1) * sp_rows\n        \n        x0 -= w_arr / 2\n        y0 -= h_arr / 2\n        # If the via array is odd dimension, prevent off-grid points\n        if int(round(w_arr / self.grid.resolution)) % 2 == 1:\n            x0 -= 0.5 * self.grid.resolution\n        if int(round(h_arr / self.grid.resolution)) % 2 == 1:\n            y0 -= 0.5 * self.grid.resolution\n\n        bl, br, bt, bb = via.enc1\n        tl, tr, tt, tb = via.enc2\n        bot_p0, bot_p1 = (x0 - bl, y0 - bb), (x0 + w_arr + br, y0 + h_arr + bt)\n        top_p0, top_p1 = (x0 - tl, y0 - tb), (x0 + w_arr + tr, y0 + h_arr + tt)\n\n        cur_rect = gdspy.Rectangle(bot_p0, bot_p1, layer=blay, datatype=bpurp)\n        gds_cell.add(cur_rect)\n        cur_rect = gdspy.Rectangle(top_p0, top_p1, layer=tlay, datatype=tpurp)\n        gds_cell.add(cur_rect)\n\n        for xidx in range(num_cols):\n            dx = xidx * (cw + sp_cols)\n            for yidx in range(num_rows):\n                dy = yidx * (ch + sp_rows)\n                cur_rect = gdspy.Rectangle((x0 + dx, y0 + dy), (x0 + cw + dx, y0 + ch + dy),\n                                           layer=vlay, datatype=vpurp)\n                gds_cell.add(cur_rect)\n\n\nclass TemplateBase(DesignMaster, metaclass=abc.ABCMeta):\n    \"\"\"The base template class.\n\n    Parameters\n    ----------\n    temp_db : TemplateDB\n            the template database.\n    lib_name : str\n        the layout library name.\n    params : Dict[str, Any]\n        the parameter values.\n    used_names : Set[str]\n        a set of already used cell names.\n    **kwargs\n        dictionary of the following optional parameters:\n\n        grid : RoutingGrid\n            the routing grid to use for this template.\n        use_cybagoa : bool\n            True to use cybagoa module to accelerate layout.\n\n    Attributes\n    ----------\n    pins : dict\n        the pins dictionary.\n    children : List[str]\n        a list of template cells this template uses.\n    params : Dict[str, Any]\n        the parameter values of this template.\n    \"\"\"\n\n    def __init__(self, temp_db, lib_name, params, used_names, **kwargs):\n        # type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None\n\n        use_cybagoa = kwargs.get('use_cybagoa', False)\n\n        # initialize template attributes\n        self._parent_grid = kwargs.get('grid', temp_db.grid)\n        self._grid = self._parent_grid.copy()\n        self._layout = BagLayout(self._grid, use_cybagoa=use_cybagoa)\n        self._size = None  # type: Optional[Tuple[int, int, int]]\n        self._ports = {}  # type: Dict[str, Port]\n        self._port_params = {}  # type: Dict[str, dict]\n        self._prim_ports = {}  # type: Dict[str, Port]\n        self._prim_port_params = {}  # type: Dict[str, dict]\n        self._array_box = None  # type: Optional[BBox]\n        self._fill_box = None  # type: Optional[BBox]\n        self.prim_top_layer = None  # type: Optional[int]\n        self.prim_bound_box = None  # type: Optional[BBox]\n        self._used_tracks = UsedTracks()\n        self._track_boxes = {}  # type: Dict[int, BBox]\n        self._merge_used_tracks = False\n\n        # add hidden parameters\n        if 'hidden_params' in kwargs:\n            hidden_params = kwargs['hidden_params'].copy()\n        else:\n            hidden_params = {}\n        hidden_params['flip_parity'] = None\n\n        DesignMaster.__init__(self, temp_db, lib_name, params, used_names,\n                              hidden_params=hidden_params)\n        # update RoutingGrid\n        fp_dict = self.params['flip_parity']\n        if fp_dict is not None:\n            self._grid.set_flip_parity(fp_dict)\n\n    @abc.abstractmethod\n    def draw_layout(self):\n        # type: () -> None\n        \"\"\"Draw the layout of this template.\n\n        Override this method to create the layout.\n\n        WARNING: you should never call this method yourself.\n        \"\"\"\n        pass\n\n    def populate_params(self, table, params_info, default_params, **kwargs):\n        # type: (Dict[str, Any], Dict[str, str], Dict[str, Any], **Any) -> None\n        \"\"\"Fill params dictionary with values from table and default_params\"\"\"\n        DesignMaster.populate_params(self, table, params_info, default_params, **kwargs)\n\n        # add hidden parameters\n        hidden_params = kwargs.get('hidden_params', {})\n        for name, value in hidden_params.items():\n            self.params[name] = table.get(name, value)\n\n        # always add flip_parity parameter\n        if 'flip_parity' not in self.params:\n            self.params['flip_parity'] = table.get('flip_parity', None)\n        # update RoutingGrid\n        fp_dict = self.params['flip_parity']\n        if fp_dict is not None:\n            self._grid.set_flip_parity(fp_dict)\n\n    def get_master_basename(self):\n        # type: () -> str\n        \"\"\"Returns the base name to use for this instance.\n\n        Returns\n        -------\n        basename : str\n            the base name for this instance.\n        \"\"\"\n        return self.get_layout_basename()\n\n    def get_layout_basename(self):\n        # type: () -> str\n        \"\"\"Returns the base name for this template.\n\n        Returns\n        -------\n        base_name : str\n            the base name of this template.\n        \"\"\"\n        return self.__class__.__name__\n\n    def get_content(self, lib_name, rename_fun):\n        # type: (str, Callable[[str], str]) -> Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]\n        \"\"\"Returns the content of this master instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library to create the design masters in.\n        rename_fun : Callable[[str], str]\n            a function that renames design masters.\n\n        Returns\n        -------\n        content : Union[List[Any], Tuple[str, 'cybagoa.PyOALayout']]\n            a list describing this layout, or PyOALayout if cybagoa is enabled.\n        \"\"\"\n        if not self.finalized:\n            raise ValueError('This template is not finalized yet')\n        return self._layout.get_content(lib_name, self.cell_name, rename_fun)\n\n    def finalize(self):\n        # type: () -> None\n        \"\"\"Finalize this master instance.\n        \"\"\"\n        # create layout\n        self.draw_layout()\n\n        # finalize this template\n        self.grid.tech_info.finalize_template(self)\n\n        # update track parities of all instances\n        if self.grid.tech_info.use_flip_parity():\n            self._update_flip_parity()\n\n        # construct port objects\n        for net_name, port_params in self._port_params.items():\n            pin_dict = port_params['pins']\n            label = port_params['label']\n            if port_params['show']:\n                label = port_params['label']\n                for wire_arr_list in pin_dict.values():\n                    for wire_arr in wire_arr_list:  # type: WireArray\n                        for layer_name, bbox in wire_arr.wire_iter(self.grid):\n                            self._layout.add_pin(net_name, layer_name, bbox, label=label)\n            self._ports[net_name] = Port(net_name, pin_dict, label=label)\n\n        # construct primitive port objects\n        for net_name, port_params in self._prim_port_params.items():\n            pin_dict = port_params['pins']\n            label = port_params['label']\n            if port_params['show']:\n                label = port_params['label']\n                for layer, box_list in pin_dict.items():\n                    for box in box_list:\n                        self._layout.add_pin(net_name, layer, box, label=label)\n            self._ports[net_name] = Port(net_name, pin_dict, label=label)\n\n        # finalize layout\n        self._layout.finalize()\n        # get set of children keys\n        self.children = self._layout.get_masters_set()\n\n        for layer_id, bbox in self._used_tracks.track_box_iter():\n            self._track_boxes[layer_id] = bbox\n        if not self._merge_used_tracks:\n            for inst in self._layout.inst_iter():\n                for layer_id, bbox in inst.track_bbox_iter():\n                    if layer_id not in self._track_boxes:\n                        self._track_boxes[layer_id] = bbox\n                    else:\n                        self._track_boxes[layer_id] = bbox.merge(self._track_boxes[layer_id])\n\n        # call super finalize routine\n        DesignMaster.finalize(self)\n\n    @classmethod\n    def get_cache_properties(cls):\n        # type: () -> List[str]\n        \"\"\"Returns a list of properties to cache.\"\"\"\n        return []\n\n    @property\n    def template_db(self):\n        # type: () -> TemplateDB\n        \"\"\"Returns the template database object\"\"\"\n        # noinspection PyTypeChecker\n        return self.master_db\n\n    @property\n    def is_empty(self):\n        # type: () -> bool\n        \"\"\"Returns True if this template is empty.\"\"\"\n        return self._layout.is_empty\n\n    @property\n    def grid(self):\n        # type: () -> RoutingGrid\n        \"\"\"Returns the RoutingGrid object\"\"\"\n        return self._grid\n\n    @grid.setter\n    def grid(self, new_grid):\n        # type: (RoutingGrid) -> None\n        \"\"\"Change the RoutingGrid of this template.\"\"\"\n        if not self._finalized:\n            self._grid = new_grid\n        else:\n            raise RuntimeError('Template already finalized.')\n\n    @property\n    def array_box(self):\n        # type: () -> Optional[BBox]\n        \"\"\"Returns the array/abutment bounding box of this template.\"\"\"\n        return self._array_box\n\n    @array_box.setter\n    def array_box(self, new_array_box):\n        # type: (BBox) -> None\n        \"\"\"Sets the array/abutment bound box of this template.\"\"\"\n        if not self._finalized:\n            self._array_box = new_array_box\n        else:\n            raise RuntimeError('Template already finalized.')\n\n    @property\n    def fill_box(self):\n        # type: () -> Optional[BBox]\n        \"\"\"Returns the dummy fill bounding box of this template.\"\"\"\n        return self._fill_box\n\n    @fill_box.setter\n    def fill_box(self, new_box):\n        # type: (BBox) -> None\n        \"\"\"Sets the array/abutment bound box of this template.\"\"\"\n        if not self._finalized:\n            self._fill_box = new_box\n        else:\n            raise RuntimeError('Template already finalized.')\n\n    @property\n    def top_layer(self):\n        # type: () -> int\n        \"\"\"Returns the top layer used in this template.\"\"\"\n        if self.size is None:\n            if self.prim_top_layer is None:\n                raise Exception('Both size and prim_top_layer are unset.')\n            return self.prim_top_layer\n        return self.size[0]\n\n    @property\n    def size(self):\n        # type: () -> Optional[Tuple[int, int, int]]\n        \"\"\"The size of this template, in (layer, num_x_block,  num_y_block) format.\"\"\"\n        return self._size\n\n    @property\n    def bound_box(self):\n        # type: () -> Optional[BBox]\n        \"\"\"Returns the BBox with the size of this template.  None if size not set yet.\"\"\"\n        mysize = self.size\n        if mysize is None:\n            if self.prim_bound_box is None:\n                raise ValueError('Both size and prim_bound_box are unset.')\n            return self.prim_bound_box\n\n        wblk, hblk = self.grid.get_size_dimension(mysize, unit_mode=True)\n        return BBox(0, 0, wblk, hblk, self.grid.resolution, unit_mode=True)\n\n    @size.setter\n    def size(self, new_size):\n        # type: (Tuple[int, int, int]) -> None\n        \"\"\"Sets the size of this template.\"\"\"\n        if not self._finalized:\n            self._size = new_size\n        else:\n            raise RuntimeError('Template already finalized.')\n\n    @property\n    def used_tracks(self):\n        # type: () -> UsedTracks\n        return self._used_tracks\n\n    def _update_flip_parity(self):\n        # type: () -> None\n        \"\"\"Update all instances in this template to have the correct track parity.\n        \"\"\"\n        for inst in self._layout.inst_iter():\n            top_layer = inst.master.top_layer\n            bot_layer = self.grid.get_bot_common_layer(inst.master.grid, top_layer)\n            loc = inst.location_unit\n            fp_dict = self.grid.get_flip_parity_at(bot_layer, top_layer, loc,\n                                                   inst.orientation, unit_mode=True)\n            inst.new_master_with(flip_parity=fp_dict)\n\n    def instance_iter(self):\n        return self._layout.inst_iter()\n\n    def blockage_iter(self, layer_id, test_box, spx=0, spy=0):\n        # type: (int, BBox, int, int) -> Generator[BBox, None, None]\n        \"\"\"Returns all block intersecting the given rectangle.\"\"\"\n        yield from self._used_tracks.blockage_iter(layer_id, test_box, spx=spx, spy=spy)\n        if not self._merge_used_tracks:\n            for inst in self._layout.inst_iter():\n                yield from inst.blockage_iter(layer_id, test_box, spx=spx, spy=spy)\n\n    def all_rect_iter(self):\n        # type: () -> Generator[Tuple[int, BBox, int, int], None, None]\n        \"\"\"Returns all rectangle objects in this \"\"\"\n        yield from self._used_tracks.all_rect_iter()\n        if not self._merge_used_tracks:\n            for inst in self._layout.inst_iter():\n                yield from inst.all_rect_iter()\n\n    def intersection_rect_iter(self, layer_id, box):\n        # type: (int, BBox) -> Generator[BBox, None, None]\n        yield from self._used_tracks.intersection_rect_iter(layer_id, box)\n        if not self._merge_used_tracks:\n            for inst in self._layout.inst_iter():\n                yield from inst.intersection_rect_iter(layer_id, box)\n\n    def open_interval_iter(self,  # type: TemplateBase\n                           track_id,  # type: TrackID\n                           lower,  # type: int\n                           upper,  # type: int\n                           sp=0,  # type: int\n                           sp_le=0,  # type: int\n                           min_len=0,  # type: int\n                           ):\n        # type: (...) -> Generator[Tuple[int, int], None, None]\n\n        res = self.grid.resolution\n        layer_id = track_id.layer_id\n        width = track_id.width\n        intv_dir = self.grid.get_direction(layer_id)\n        warr = WireArray(track_id, lower, upper, res=res, unit_mode=True)\n        test_box = warr.get_bbox_array(self.grid).base\n        sp = max(sp, int(self.grid.get_space(layer_id, width, unit_mode=True)))\n        sp_le = max(sp_le, int(self.grid.get_line_end_space(layer_id, width, unit_mode=True)))\n        if intv_dir == 'x':\n            spx, spy = sp_le, sp\n        else:\n            spx, spy = sp, sp_le\n\n        intv_set = IntervalSet()\n        for box in self.blockage_iter(layer_id, test_box, spx=spx, spy=spy):\n            bl, bu = tuple2_to_int(box.get_interval(intv_dir, unit_mode=True))\n            intv_set.add((max(bl, lower), min(bu, upper)), merge=True, abut=True)\n\n        for intv in intv_set.complement_iter((lower, upper)):\n            if intv[1] - intv[0] >= min_len:\n                yield intv\n\n    def is_track_available(self,  # type: TemplateBase\n                           layer_id,  # type: int\n                           tr_idx,  # type: Union[float, int]\n                           lower,  # type: Union[float, int]\n                           upper,  # type: Union[float, int]\n                           width=1,  # type: int\n                           sp=0,  # type: Union[float, int]\n                           sp_le=0,  # type: Union[float, int]\n                           unit_mode=False,  # type: bool\n                           ):\n        \"\"\"Returns True if the given track is available.\"\"\"\n        res = self.grid.resolution\n        if not unit_mode:\n            lower = int(round(lower / res))\n            upper = int(round(upper / res))\n            sp = int(round(sp / res))\n            sp_le = int(round(sp_le / res))\n        else:\n            lower = int(lower)\n            upper = int(upper)\n            sp = int(sp)\n            sp_le = int(sp_le)\n\n        intv_dir = self.grid.get_direction(layer_id)\n        track_id = TrackID(layer_id, tr_idx, width=width)\n        warr = WireArray(track_id, lower, upper, res=res, unit_mode=True)\n        test_box = warr.get_bbox_array(self.grid).base\n        sp = max(sp, int(self.grid.get_space(layer_id, width, unit_mode=True)))\n        sp_le = max(sp_le, int(self.grid.get_line_end_space(layer_id, width, unit_mode=True)))\n        if intv_dir == 'x':\n            spx, spy = sp_le, sp\n        else:\n            spx, spy = sp, sp_le\n\n        try:\n            next(self.blockage_iter(layer_id, test_box, spx=spx, spy=spy))\n        except StopIteration:\n            return True\n        return False\n\n    def get_rect_bbox(self, layer):\n        # type: (Union[str, Tuple[str, str]]) -> BBox\n        \"\"\"Returns the overall bounding box of all rectangles on the given layer.\n\n        Note: currently this does not check primitive instances or vias.\n\n        Parameters\n        ----------\n        layer : Union[str, Tuple[str, str]]\n            the layer name.\n\n        Returns\n        -------\n        box : BBox\n            the overall bounding box of the given layer.\n        \"\"\"\n        return self._layout.get_rect_bbox(layer)\n\n    def get_track_bbox(self, layer_id):\n        \"\"\"Returns the bounding box of all tracks on the given layer.\"\"\"\n        if not self.finalized:\n            raise ValueError('This method only works after being finalized.')\n        if layer_id in self._track_boxes:\n            return self._track_boxes[layer_id]\n        return BBox.get_invalid_bbox()\n\n    def track_bbox_iter(self):\n        \"\"\"Returns the bounding box of all tracks on the given layer.\"\"\"\n        if not self.finalized:\n            raise ValueError('This method only works after being finalized.')\n        return self._track_boxes.items()\n\n    def new_template_with(self, **kwargs):\n        # type: (Any) -> TemplateBase\n        \"\"\"Create a new template with the given parameters.\n\n        This method will update the parameter values with the given dictionary,\n        then create a new template with those parameters and return it.\n\n        Parameters\n        ----------\n        **kwargs\n            a dictionary of new parameter values.\n        \"\"\"\n        # get new parameter dictionary.\n        new_params = copy.deepcopy(self.params)\n        for key, val in kwargs.items():\n            if key in new_params:\n                new_params[key] = val\n\n        return self.template_db.new_template(params=new_params, temp_cls=self.__class__,\n                                             grid=self._parent_grid)\n\n    def set_size_from_bound_box(self, top_layer_id, bbox, round_up=False,\n                                half_blk_x=True, half_blk_y=True):\n        # type: (int, BBox, bool, bool, bool) -> None\n        \"\"\"Compute the size from overall bounding box.\n\n        Parameters\n        ----------\n        top_layer_id : int\n            the top level routing layer ID that array box is calculated with.\n        bbox : BBox\n            the overall bounding box\n        round_up: bool\n            True to round up bounding box if not quantized properly\n        half_blk_x : bool\n            True to allow half-block widths.\n        half_blk_y : bool\n            True to allow half-block heights.\n        \"\"\"\n        grid = self.grid\n\n        if bbox.left_unit != 0 or bbox.bottom_unit != 0:\n            raise ValueError('lower-left corner of overall bounding box must be (0, 0).')\n\n        self.size = grid.get_size_tuple(top_layer_id, bbox.width_unit, bbox.height_unit,\n                                        round_up=round_up, unit_mode=True, half_blk_x=half_blk_x,\n                                        half_blk_y=half_blk_y)\n\n    def set_size_from_array_box(self, top_layer_id):\n        # type: (int) -> None\n        \"\"\"Automatically compute the size from array_box.\n\n        Assumes the array box is exactly in the center of the template.\n\n        Parameters\n        ----------\n        top_layer_id : int\n            the top level routing layer ID that array box is calculated with.\n        \"\"\"\n        grid = self.grid\n\n        array_box = self.array_box\n        if array_box is None:\n            raise ValueError(\"array_box is not set\")\n\n        dx = array_box.left_unit\n        dy = array_box.bottom_unit\n        if dx < 0 or dy < 0:\n            raise ValueError('lower-left corner of array box must be in first quadrant.')\n\n        self.size = grid.get_size_tuple(top_layer_id, 2 * dx + array_box.width_unit,\n                                        2 * dy + array_box.height_unit, unit_mode=True)\n\n    def write_summary_file(self, fname, lib_name, cell_name):\n        # type: (str, str, str) -> None\n        \"\"\"Create a summary file for this template layout.\"\"\"\n        # get all pin information\n        pin_dict = {}\n        for port_name in self.port_names_iter():\n            pin_cnt = 0\n            port = self.get_port(port_name)\n            for pin_warr in port:\n                for layer_name, bbox in pin_warr.wire_iter(self.grid):\n                    if pin_cnt == 0:\n                        pin_name = port_name\n                    else:\n                        pin_name = '%s_%d' % (port_name, pin_cnt)\n                    pin_cnt += 1\n                    pin_dict[pin_name] = dict(\n                        layer=[layer_name, self._layout.pin_purpose],\n                        netname=port_name,\n                        xy0=[bbox.left, bbox.bottom],\n                        xy1=[bbox.right, bbox.top],\n                    )\n\n        # get size information\n        bnd_box = self.bound_box\n        if bnd_box is None:\n            raise ValueError(\"bound_box is not set\")\n        info = {\n            lib_name: {\n                cell_name: dict(\n                    pins=pin_dict,\n                    xy0=[0.0, 0.0],\n                    xy1=[bnd_box.width, bnd_box.height],\n                ),\n            },\n        }\n\n        with open_file(fname, 'w') as f:\n            yaml.dump(info, f)\n\n    def write_to_disk(self, fname, lib_name, cell_name, debug=False):\n        # type: (str, str, str, bool) -> None\n        \"\"\"Create a cache file for this template.\"\"\"\n        if not self.finalized:\n            raise ValueError('Cannot write non-final template to disk.')\n\n        if debug:\n            print('Writing %s to disk...' % self.__class__.__name__)\n\n        start = time.time()\n        prop_dict = {key: getattr(self, key) for key in self.get_cache_properties()}\n\n        res = self.grid.resolution\n        save_tracks = UsedTracks(fname, overwrite=True)\n        for layer_id, box, dx, dy in self.all_rect_iter():\n            save_tracks.record_box(layer_id, box, dx, dy, res)\n        save_tracks.close()\n\n        template_info = dict(\n            lib_name=lib_name,\n            cell_name=cell_name,\n            size=self._size,\n            port_params=self._port_params,\n            prim_top_layer=self.prim_top_layer,\n            prim_bound_box=self.prim_bound_box,\n            array_box=self.array_box,\n            properties=prop_dict,\n        )\n\n        with open(fname + '_info.pickle', 'wb') as f:\n            pickle.dump(template_info, f, protocol=-1)\n\n        stop = time.time()\n        if debug:\n            print('Writing to disk took %.4g seconds.' % (stop - start))\n\n    def merge_inst_tracks(self):\n        # type: () -> None\n        \"\"\"Flatten all rectangles from instances into the UsedTracks data structure.\"\"\"\n        if not self._merge_used_tracks:\n            self._merge_used_tracks = True\n            res = self.grid.resolution\n            for inst in self._layout.inst_iter():\n                for layer_id, box, dx, dy in inst.all_rect_iter():\n                    self._used_tracks.record_box(layer_id, box, dx, dy, res)\n\n    def get_pin_name(self, name):\n        # type: (str) -> str\n        \"\"\"Get the actual name of the given pin from the renaming dictionary.\n\n        Given a pin name, If this Template has a parameter called 'rename_dict',\n        return the actual pin name from the renaming dictionary.\n\n        Parameters\n        ----------\n        name : str\n            the pin name.\n\n        Returns\n        -------\n        actual_name : str\n            the renamed pin name.\n        \"\"\"\n        rename_dict = self.params.get('rename_dict', {})\n        return rename_dict.get(name, name)\n\n    def get_port(self, name=''):\n        # type: (str) -> Port\n        \"\"\"Returns the port object with the given name.\n\n        Parameters\n        ----------\n        name : str\n            the port terminal name.  If None or empty, check if this template has only one port,\n            then return it.\n\n        Returns\n        -------\n        port : Port\n            the port object.\n        \"\"\"\n        if not name:\n            if len(self._ports) != 1:\n                raise ValueError('Template has %d ports != 1.' % len(self._ports))\n            name = next(iter(self._ports))\n        return self._ports[name]\n\n    def has_port(self, port_name):\n        # type: (str) -> bool\n        \"\"\"Returns True if this template has the given port.\"\"\"\n        return port_name in self._ports\n\n    def port_names_iter(self):\n        # type: () -> Iterable[str]\n        \"\"\"Iterates over port names in this template.\n\n        Yields\n        ------\n        port_name : string\n            name of a port in this template.\n        \"\"\"\n        return self._ports.keys()\n\n    def get_prim_port(self, name=''):\n        # type: (str) -> Port\n        \"\"\"Returns the primitive port object with the given name.\n\n        Parameters\n        ----------\n        name : str\n            the port terminal name.  If None or empty, check if this template has only one port,\n            then return it.\n\n        Returns\n        -------\n        port : Port\n            the primitive port object.\n        \"\"\"\n        if not name:\n            if len(self._prim_ports) != 1:\n                raise ValueError('Template has %d ports != 1.' % len(self._prim_ports))\n            name = next(iter(self._ports))\n        return self._prim_ports[name]\n\n    def has_prim_port(self, port_name):\n        # type: (str) -> bool\n        \"\"\"Returns True if this template has the given primitive port.\"\"\"\n        return port_name in self._prim_ports\n\n    def prim_port_names_iter(self):\n        # type: () -> Iterable[str]\n        \"\"\"Iterates over primitive port names in this template.\n\n        Yields\n        ------\n        port_name : str\n            name of a primitive port in this template.\n        \"\"\"\n        return self._prim_ports.keys()\n\n    def new_template(self, params=None, temp_cls=None, debug=False, **kwargs):\n        # type: (Dict[str, Any], Type[TemplateType], bool, **Any) -> TemplateType\n        \"\"\"Create a new template.\n\n        Parameters\n        ----------\n        params : Dict[str, Any]\n            the parameter dictionary.\n        temp_cls : Type[TemplateType]\n            the template class to instantiate.\n        debug : bool\n            True to print debug messages.\n        **kwargs : Any\n            optional template parameters.\n\n        Returns\n        -------\n        template : TemplateType\n            the new template instance.\n        \"\"\"\n        kwargs['grid'] = self.grid\n        return self.template_db.new_template(params=params, temp_cls=temp_cls, debug=debug,\n                                             **kwargs)\n\n    def move_all_by(self, dx=0.0, dy=0.0, unit_mode=False):\n        # type: (Union[float, int], Union[float, int], bool) -> None\n        \"\"\"Move all layout objects Except pins in this layout by the given amount.\n\n        primitive pins will be moved, but pins on routing grid will not.\n\n        Parameters\n        ----------\n        dx : Union[float, int]\n            the X shift.\n        dy : Union[float, int]\n            the Y shift.\n        unit_mode : bool\n            true if given shift values are in resolution units.\n        \"\"\"\n        print(\"WARNING: USING THIS BREAKS POWER FILL ALGORITHM.\")\n        self._layout.move_all_by(dx=dx, dy=dy, unit_mode=unit_mode)\n\n    def add_instance(self,  # type: TemplateBase\n                     master,  # type: TemplateBase\n                     inst_name=None,  # type: Optional[str]\n                     loc=(0, 0),  # type: Tuple[Union[float, int], Union[float, int]]\n                     orient=\"R0\",  # type: str\n                     nx=1,  # type: int\n                     ny=1,  # type: int\n                     spx=0,  # type: Union[float, int]\n                     spy=0,  # type: Union[float, int]\n                     unit_mode=False,  # type: bool\n                     ):\n        # type: (...) -> Instance\n        \"\"\"Adds a new (arrayed) instance to layout.\n\n        Parameters\n        ----------\n        master : TemplateBase\n            the master template object.\n        inst_name : Optional[str]\n            instance name.  If None or an instance with this name already exists,\n            a generated unique name is used.\n        loc : Tuple[Union[float, int], Union[float, int]]\n            instance location.\n        orient : str\n            instance orientation.  Defaults to \"R0\"\n        nx : int\n            number of columns.  Must be positive integer.\n        ny : int\n            number of rows.  Must be positive integer.\n        spx : Union[float, int]\n            column pitch.  Used for arraying given instance.\n        spy : Union[float, int]\n            row pitch.  Used for arraying given instance.\n        unit_mode : bool\n            True if dimensions are given in resolution units.\n\n        Returns\n        -------\n        inst : Instance\n            the added instance.\n        \"\"\"\n        res = self.grid.resolution\n        if not unit_mode:\n            loc = int(round(loc[0] / res)), int(round(loc[1] / res))\n            spx = int(round(spx / res))\n            spy = int(round(spy / res))\n\n        inst = Instance(self.grid, self._lib_name, master, loc=loc, orient=orient,\n                        name=inst_name, nx=nx, ny=ny, spx=spx, spy=spy, unit_mode=True)\n\n        self._layout.add_instance(inst)\n        return inst\n\n    def add_instance_primitive(self,  # type: TemplateBase\n                               lib_name,  # type: str\n                               cell_name,  # type: str\n                               loc,  # type: Tuple[Union[float, int], Union[float, int]]\n                               view_name='layout',  # type: str\n                               inst_name=None,  # type: Optional[str]\n                               orient=\"R0\",  # type: str\n                               nx=1,  # type: int\n                               ny=1,  # type: int\n                               spx=0,  # type: Union[float, int]\n                               spy=0,  # type: Union[float, int]\n                               params=None,  # type: Optional[Dict[str, Any]]\n                               unit_mode=False,  # type: bool\n                               **kwargs\n                               ):\n        # type: (...) -> None\n        \"\"\"Adds a new (arrayed) primitive instance to layout.\n\n        Parameters\n        ----------\n        lib_name : str\n            instance library name.\n        cell_name : str\n            instance cell name.\n        loc : Tuple[Union[float, int], Union[float, int]]\n            instance location.\n        view_name : str\n            instance view name.  Defaults to 'layout'.\n        inst_name : Optional[str]\n            instance name.  If None or an instance with this name already exists,\n            a generated unique name is used.\n        orient : str\n            instance orientation.  Defaults to \"R0\"\n        nx : int\n            number of columns.  Must be positive integer.\n        ny : int\n            number of rows.  Must be positive integer.\n        spx : Union[float, int]\n            column pitch.  Used for arraying given instance.\n        spy : Union[float, int]\n            row pitch.  Used for arraying given instance.\n        params : Optional[Dict[str, Any]]\n            the parameter dictionary.  Used for adding pcell instance.\n        unit_mode : bool\n            True if distances are specified in resolution units.\n        **kwargs\n            additional arguments.  Usually implementation specific.\n        \"\"\"\n        self._layout.add_instance_primitive(lib_name, cell_name, loc,\n                                            view_name=view_name, inst_name=inst_name,\n                                            orient=orient, num_rows=ny, num_cols=nx,\n                                            sp_rows=spy, sp_cols=spx,\n                                            params=params, unit_mode=unit_mode, **kwargs)\n\n    def add_rect(self,  # type: TemplateBase\n                 layer,  # type: Union[str, Tuple[str, str]]\n                 bbox,  # type: Union[BBox, BBoxArray]\n                 nx=1,  # type: int\n                 ny=1,  # type: int\n                 spx=0,  # type: Union[float, int]\n                 spy=0,  # type: Union[float, int]\n                 unit_mode=False,  # type: bool\n                 ):\n        # type: (...) -> Rect\n        \"\"\"Add a new (arrayed) rectangle.\n\n        Parameters\n        ----------\n        layer: Union[str, Tuple[str, str]]\n            the layer name, or the (layer, purpose) pair.\n        bbox : Union[BBox, BBoxArray]\n            the rectangle bounding box.  If BBoxArray is given, its arraying parameters will\n            be used instead.\n        nx : int\n            number of columns.\n        ny : int\n            number of rows.\n        spx : Union[float, int]\n            column pitch.\n        spy : Union[float, int]\n            row pitch.\n        unit_mode : bool\n            True if spx and spy are given in resolution units.\n\n        Returns\n        -------\n        rect : Rect\n            the added rectangle.\n        \"\"\"\n        rect = Rect(layer, bbox, nx=nx, ny=ny, spx=spx, spy=spy, unit_mode=unit_mode)\n        self._layout.add_rect(rect)\n        self._used_tracks.record_rect(self.grid, layer, rect.bbox_array)\n        return rect\n\n    def add_res_metal(self, layer_id, bbox, **kwargs):\n        # type: (int, Union[BBox, BBoxArray], **Any) -> List[Rect]\n        \"\"\"Add a new metal resistor.\n\n        Parameters\n        ----------\n        layer_id : int\n            the metal layer ID.\n        bbox : Union[BBox, BBoxArray]\n            the resistor bounding box.  If BBoxArray is given, its arraying parameters will\n            be used instead.\n        **kwargs : Any\n            optional arguments to add_rect()\n\n        Returns\n        -------\n        rect_list : List[Rect]\n            list of rectangles defining the metal resistor.\n        \"\"\"\n        rect_list = []\n        rect_layers = self.grid.tech_info.get_res_metal_layers(layer_id)\n        for lay in rect_layers:\n            rect_list.append(self.add_rect(lay, bbox, **kwargs))\n        return rect_list\n\n    def add_path(self, path):\n        # type: (Path) -> Path\n        \"\"\"Add a new path.\n\n        Parameters\n        ----------\n        path : Path\n            the path to add.\n\n        Returns\n        -------\n        path : Path\n            the added path object.\n        \"\"\"\n        self._layout.add_path(path)\n        lay_id = self.grid.tech_info.get_layer_id(path.layer[0])\n        res = self.grid.resolution\n\n        # record it as used tracks\n        points_list = path.points_unit\n        for pidx, [x0, y0] in enumerate(points_list[:-1]):\n            [x1, y1] = points_list[pidx + 1]\n            y_low, y_high = min(y0, y1), max(y0, y1)\n            x_low, x_high = min(x0, x1), max(x0, x1)\n            if x_low == x0:\n                y_xlow, y_xhigh = y0, y1\n            else:\n                y_xlow, y_xhigh = y1, y0\n\n            width_unit = int(path.width / self.grid.resolution)\n            w2 = math.ceil(width_unit // 2)\n            wr2 = math.ceil(width_unit // math.sqrt(2))\n\n            if x0 == x1:\n                # 1. 90 degree cases\n                bbox = BBox(x0 - w2, y_low - w2, x0 + w2, y_high + w2, res, unit_mode=True)\n                self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                # rect = Rect(path.layer, bbox)\n                # self._layout.add_rect(rect)\n            elif y0 == y1:\n                # 2. 0 degree cases\n                bbox = BBox(x_low - w2, y0 - w2, x_high + w2, y0 + w2, res, unit_mode=True)\n                self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                # rect = Rect(path.layer, bbox)\n                # self._layout.add_rect(rect)\n            elif y_xlow == y_low:\n                # 3. 45 degree case\n                x_start, x_stop = x_low - wr2, x_high + wr2\n                y_start, y_stop = y_low - wr2, y_high + wr2\n                while True:\n                    bbox = BBox(x_start, y_start, x_start + wr2, y_start + wr2, res, unit_mode=True)\n                    self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                    # rect = Rect(path.layer, bbox)\n                    # self._layout.add_rect(rect)\n                    if x_start + wr2 >= x_stop:\n                        break\n                    bbox = BBox(x_start, y_start + wr2, x_start + wr2, y_start + 2 * wr2, res, unit_mode=True)\n                    self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                    # rect = Rect(path.layer, bbox)\n                    # self._layout.add_rect(rect)\n                    bbox = BBox(x_start + wr2, y_start, x_start + 2 * wr2, y_start + wr2, res, unit_mode=True)\n                    self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                    # rect = Rect(path.layer, bbox)\n                    # self._layout.add_rect(rect)\n                    x_start += wr2\n                    y_start += wr2\n            else:\n                # 4. 135 degree case\n                x_start, x_stop = x_low - wr2, x_high + wr2\n                y_start, y_stop = y_high + wr2, y_low - wr2\n                while True:\n                    bbox = BBox(x_start, y_start - wr2, x_start + wr2, y_start, res, unit_mode=True)\n                    self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                    # rect = Rect(path.layer, bbox)\n                    # self._layout.add_rect(rect)\n                    if x_start + wr2 >= x_stop:\n                        break\n                    bbox = BBox(x_start, y_start - 2 * wr2, x_start + wr2, y_start - wr2, res, unit_mode=True)\n                    self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                    # rect = Rect(path.layer, bbox)\n                    # self._layout.add_rect(rect)\n                    bbox = BBox(x_start + wr2, y_start - wr2, x_start + 2 * wr2, y_start, res, unit_mode=True)\n                    self._used_tracks.record_box(lay_id, bbox, dx=0, dy=0, res=res)\n                    # rect = Rect(path.layer, bbox)\n                    # self._layout.add_rect(rect)\n                    x_start += wr2\n                    y_start -= wr2\n\n        return path\n\n    def add_polygon(self, polygon):\n        # type: (Polygon) -> Polygon\n        \"\"\"Add a new polygon.\n\n        Parameters\n        ----------\n        polygon : Polygon\n            the blockage to add.\n\n        Returns\n        -------\n        polygon : Polygon\n            the added blockage object.\n        \"\"\"\n        self._layout.add_polygon(polygon)\n        return polygon\n\n    def add_blockage(self, blockage):\n        # type: (Blockage) -> Blockage\n        \"\"\"Add a new blockage.\n\n        Parameters\n        ----------\n        blockage : Blockage\n            the blockage to add.\n\n        Returns\n        -------\n        blockage : Blockage\n            the added blockage object.\n        \"\"\"\n        self._layout.add_blockage(blockage)\n        return blockage\n\n    def add_cell_boundary(self, box):\n        # type: (BBox) -> None\n        \"\"\"Adds a cell boundary object to the this template.\n\n        This is usually the PR boundary.\n\n        Parameters\n        ----------\n        box : BBox\n            the cell boundary bounding box.\n        \"\"\"\n        self._grid.tech_info.add_cell_boundary(self, box)\n\n    def add_boundary(self, boundary):\n        # type: (Boundary) -> Boundary\n        \"\"\"Add a new boundary.\n\n        Parameters\n        ----------\n        boundary : Boundary\n            the boundary to add.\n\n        Returns\n        -------\n        boundary : Boundary\n            the added boundary object.\n        \"\"\"\n        self._layout.add_boundary(boundary)\n        return boundary\n\n    def reexport(self, port, net_name='', label='', show=True):\n        # type: (Port, str, str, bool) -> None\n        \"\"\"Re-export the given port object.\n\n        Add all geometries in the given port as pins with optional new name\n        and label.\n\n        Parameters\n        ----------\n        port : Port\n            the Port object to re-export.\n        net_name : str\n            the new net name.  If not given, use the port's current net name.\n        label : str\n            the label.  If not given, use net_name.\n        show : bool\n            True to draw the pin in layout.\n        \"\"\"\n        net_name = net_name or port.net_name\n        if not label:\n            if net_name != port.net_name:\n                label = net_name\n            else:\n                label = port.label\n\n        if net_name not in self._port_params:\n            self._port_params[net_name] = dict(label=label, pins={}, show=show)\n\n        port_params = self._port_params[net_name]\n        # check labels is consistent.\n        if port_params['label'] != label:\n            msg = 'Current port label = %s != specified label = %s'\n            raise ValueError(msg % (port_params['label'], label))\n        if port_params['show'] != show:\n            raise ValueError('Conflicting show port specification.')\n\n        # export all port geometries\n        port_pins = port_params['pins']\n        for wire_arr in port:\n            layer_id = wire_arr.layer_id\n            if layer_id not in port_pins:\n                port_pins[layer_id] = [wire_arr]\n            else:\n                port_pins[layer_id].append(wire_arr)\n\n    def add_pin_primitive(self, net_name, layer, bbox, label='', show=True):\n        # type: (str, str, BBox, str, bool) -> None\n        \"\"\"Add a primitive pin to the layout.\n\n        Parameters\n        ----------\n        net_name : str\n            the net name associated with the pin.\n        layer : str\n            the pin layer name.\n        bbox : BBox\n            the pin bounding box.\n        label : str\n            the label of this pin.  If None or empty, defaults to be the net_name.\n            this argument is used if you need the label to be different than net name\n            for LVS purposes.  For example, unconnected pins usually need a colon after\n            the name to indicate that LVS should short those pins together.\n        show : bool\n            True to draw the pin in layout.\n        \"\"\"\n        label = label or net_name\n        if net_name in self._prim_port_params:\n            port_params = self._prim_port_params[net_name]\n        else:\n            port_params = self._prim_port_params[net_name] = dict(label=label, pins={}, show=show)\n\n        # check labels is consistent.\n        if port_params['label'] != label:\n            msg = 'Current port label = %s != specified label = %s'\n            raise ValueError(msg % (port_params['label'], label))\n        if port_params['show'] != show:\n            raise ValueError('Conflicting show port specification.')\n\n        port_pins = port_params['pins']\n\n        if layer in port_pins:\n            port_pins[layer].append(bbox)\n        else:\n            port_pins[layer] = [bbox]\n\n    def add_label(self, label, layer, bbox):\n        # type: (str, Union[str, Tuple[str, str]], BBox) -> None\n        \"\"\"Adds a label to the layout.\n\n        This is mainly used to add voltage text labels.\n\n        Parameters\n        ----------\n        label : str\n            the label text.\n        layer : Union[str, Tuple[str, str]]\n            the pin layer name.\n        bbox : BBox\n            the pin bounding box.\n        \"\"\"\n        self._layout.add_label(label, layer, bbox)\n\n    def add_pin(self, net_name, wire_arr_list, label='', show=True, edge_mode=0):\n        # type: (str, Union[WireArray, List[WireArray]], str, bool, int) -> None\n        \"\"\"Add new pin to the layout.\n\n        If one or more pins with the same net name already exists,\n        they'll be grouped under the same port.\n\n        Parameters\n        ----------\n        net_name : str\n            the net name associated with the pin.\n        wire_arr_list : Union[WireArray, List[WireArray]]\n            WireArrays representing the pin geometry.\n        label : str\n            the label of this pin.  If None or empty, defaults to be the net_name.\n            this argument is used if you need the label to be different than net name\n            for LVS purposes.  For example, unconnected pins usually need a colon after\n            the name to indicate that LVS should short those pins together.\n        edge_mode : int\n            If <0, draw the pin on the lower end of the WireArray.  If >0, draw the pin\n            on the upper end.  If 0, draw the pin on the entire WireArray.\n        show : bool\n            if True, draw the pin in layout.\n        \"\"\"\n        if isinstance(wire_arr_list, WireArray):\n            wire_arr_list = [wire_arr_list]\n        else:\n            pass\n\n        label = label or net_name\n\n        if net_name not in self._port_params:\n            self._port_params[net_name] = dict(label=label, pins={}, show=show)\n\n        port_params = self._port_params[net_name]\n\n        # check labels is consistent.\n        if port_params['label'] != label:\n            msg = 'Current port label = %s != specified label = %s'\n            raise ValueError(msg % (port_params['label'], label))\n        if port_params['show'] != show:\n            raise ValueError('Conflicting show port specification.')\n\n        for warr in wire_arr_list:\n            # add pin array to port_pins\n            layer_id = warr.track_id.layer_id\n            if edge_mode != 0:\n                cur_w = self.grid.get_track_width(layer_id, warr.track_id.width, unit_mode=True)\n                wl = warr.lower_unit\n                wu = warr.upper_unit\n                pin_len = min(cur_w * 6, wu - wl)\n                if edge_mode < 0:\n                    wu = wl + pin_len\n                else:\n                    wl = wu - pin_len\n                warr = WireArray(warr.track_id, wl, wu, res=self.grid.resolution, unit_mode=True)\n\n            port_pins = port_params['pins']\n            if layer_id not in port_pins:\n                port_pins[layer_id] = [warr]\n            else:\n                port_pins[layer_id].append(warr)\n\n    def add_via(self,  # type: TemplateBase\n                bbox,  # type: BBox\n                bot_layer,  # type: Union[str, Tuple[str, str]]\n                top_layer,  # type: Union[str, Tuple[str, str]]\n                bot_dir,  # type: str\n                nx=1,  # type: int\n                ny=1,  # type: int\n                spx=0.0,  # type: Union[float, int]\n                spy=0.0,  # type: Union[float, int]\n                extend=True,  # type: bool\n                top_dir=None,  # type: Optional[str]\n                unit_mode=False,  # type: bool\n                ):\n        # type: (...) -> Via\n        \"\"\"Adds a (arrayed) via object to the layout.\n\n        Parameters\n        ----------\n        bbox : BBox\n            the via bounding box, not including extensions.\n        bot_layer : Union[str, Tuple[str, str]]\n            the bottom layer name, or a tuple of layer name and purpose name.\n            If purpose name not given, defaults to 'drawing'.\n        top_layer : Union[str, Tuple[str, str]]\n            the top layer name, or a tuple of layer name and purpose name.\n            If purpose name not given, defaults to 'drawing'.\n        bot_dir : str\n            the bottom layer extension direction.  Either 'x' or 'y'.\n        nx : int\n            number of columns.\n        ny : int\n            number of rows.\n        spx : Union[float, int]\n            column pitch.\n        spy : Union[float, int]\n            row pitch.\n        extend : bool\n            True if via extension can be drawn outside of the box.\n        top_dir : Optional[str]\n            top layer extension direction.  Can force to extend in same direction as bottom.\n        unit_mode : bool\n            True if spx/spy are specified in resolution units.\n        Returns\n        -------\n        via : Via\n            the created via object.\n        \"\"\"\n        via = Via(self.grid.tech_info, bbox, bot_layer, top_layer, bot_dir,\n                  nx=nx, ny=ny, spx=spx, spy=spy, extend=extend, top_dir=top_dir,\n                  unit_mode=unit_mode)\n        self._layout.add_via(via)\n\n        return via\n\n    def add_via_primitive(self, via_type,  # type: str\n                          loc,  # type: Tuple[float, float]\n                          num_rows=1,  # type: int\n                          num_cols=1,  # type: int\n                          sp_rows=0.0,  # type: float\n                          sp_cols=0.0,  # type: float\n                          enc1=None,  # type: Optional[List[float]]\n                          enc2=None,  # type: Optional[List[float]]\n                          orient='R0',  # type: str\n                          cut_width=None,  # type: Optional[float]\n                          cut_height=None,  # type: Optional[float]\n                          nx=1,  # type: int\n                          ny=1,  # type: int\n                          spx=0.0,  # type: float\n                          spy=0.0,  # type: float\n                          unit_mode=False,  # type: bool\n                          ):\n        # type: (...) -> None\n        \"\"\"Adds a via by specifying all parameters.\n\n        Parameters\n        ----------\n        via_type : str\n            the via type name.\n        loc : Tuple[float, float]\n            the via location as a two-element tuple.\n        num_rows : int\n            number of via cut rows.\n        num_cols : int\n            number of via cut columns.\n        sp_rows : float\n            spacing between via cut rows.\n        sp_cols : float\n            spacing between via cut columns.\n        enc1 : Optional[List[float]]\n            a list of left, right, top, and bottom enclosure values on bottom layer.\n            Defaults to all 0.\n        enc2 : Optional[List[float]]\n            a list of left, right, top, and bottom enclosure values on top layer.\n            Defaults to all 0.\n        orient : str\n            orientation of the via.\n        cut_width : Optional[float]\n            via cut width.  This is used to create rectangle via.\n        cut_height : Optional[float]\n            via cut height.  This is used to create rectangle via.\n        nx : int\n            number of columns.\n        ny : int\n            number of rows.\n        spx : float\n            column pitch.\n        spy : float\n            row pitch.\n        unit_mode : bool\n            True if all given dimensions are in resolution units.\n        \"\"\"\n        if unit_mode:\n            res = self.grid.resolution\n            loc = (loc[0] * res, loc[1] * res)\n            sp_rows *= res\n            sp_cols *= res\n            if enc1 is not None:\n                enc1 = [v * res for v in enc1]\n            if enc2 is not None:\n                enc2 = [v * res for v in enc2]\n            if cut_width is not None:\n                cut_width *= res\n            if cut_height is not None:\n                cut_height *= res\n            spx *= res\n            spy *= res\n\n        self._layout.add_via_primitive(via_type, loc, num_rows=num_rows, num_cols=num_cols,\n                                       sp_rows=sp_rows, sp_cols=sp_cols,\n                                       enc1=enc1, enc2=enc2, orient=orient,\n                                       cut_width=cut_width, cut_height=cut_height,\n                                       arr_nx=nx, arr_ny=ny, arr_spx=spx, arr_spy=spy)\n\n    def add_via_on_grid(self, bot_layer_id, bot_track, top_track, bot_width=1, top_width=1):\n        # type: (int, Union[float, int], Union[float, int], int, int) -> Via\n        \"\"\"Add a via on the routing grid.\n\n        Parameters\n        ----------\n        bot_layer_id : int\n            the bottom layer ID.\n        bot_track : Union[float, int]\n            the bottom track index.\n        top_track : Union[float, int]\n            the top track index.\n        bot_width : int\n            the bottom track width.\n        top_width : int\n            the top track width.\n        \"\"\"\n        grid = self.grid\n        res = grid.resolution\n        bl, bu = tuple2_to_int(\n            grid.get_wire_bounds(bot_layer_id, bot_track, width=bot_width, unit_mode=True))\n        tl, tu = tuple2_to_int(\n            grid.get_wire_bounds(bot_layer_id + 1, top_track, width=top_width, unit_mode=True))\n        bot_dir = grid.get_direction(bot_layer_id)\n        if bot_dir == 'x':\n            bbox = BBox(tl, bl, tu, bu, res, unit_mode=True)\n        else:\n            bbox = BBox(bl, tl, bu, tu, res, unit_mode=True)\n        bname = grid.get_layer_name(bot_layer_id, bot_track)\n        tname = grid.get_layer_name(bot_layer_id + 1, top_track)\n\n        return self.add_via(bbox, bname, tname, bot_dir)\n\n    def extend_wires(self,  # type: TemplateBase\n                     warr_list,  # type: Union[WireArray, List[Optional[WireArray]]]\n                     lower=None,  # type: Optional[Union[float, int]]\n                     upper=None,  # type: Optional[Union[float, int]]\n                     unit_mode=False,  # type: bool\n                     min_len_mode=None,  # type: Optional[int]\n                     ):\n        # type: (...) -> List[Optional[WireArray]]\n        \"\"\"Extend the given wires to the given coordinates.\n\n        Parameters\n        ----------\n        warr_list : Union[WireArray, List[Optional[WireArray]]]\n            the wires to extend.\n        lower : Optional[Union[float, int]]\n            the wire lower coordinate.\n        upper : Optional[Union[float, int]]\n            the wire upper coordinate.\n        unit_mode: bool\n            True if lower/upper/fill_margin is given in resolution units.\n        min_len_mode : Optional[int]\n            If not None, will extend track so it satisfy minimum length requirement.\n            Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.\n\n        Returns\n        -------\n        warr_list : List[Optional[WireArray]]\n            list of added wire arrays.\n            If any elements in warr_list were None, they will be None in the return.\n        \"\"\"\n        if isinstance(warr_list, WireArray):\n            warr_list = [warr_list]\n        else:\n            pass\n\n        res = self.grid.resolution\n        if not unit_mode:\n            if lower is not None:\n                lower = int(round(lower / res))\n            if upper is not None:\n                upper = int(round(upper / res))\n\n        new_warr_list = []  # type: List[Optional[WireArray]]\n        for warr in warr_list:\n            if warr is None:\n                new_warr_list.append(None)\n            else:\n                wlower = warr.lower_unit\n                wupper = warr.upper_unit\n                if lower is None:\n                    cur_lower = wlower\n                else:\n                    cur_lower = min(lower, wlower)\n                if upper is None:\n                    cur_upper = wupper\n                else:\n                    cur_upper = max(upper, wupper)\n                if min_len_mode is not None:\n                    # extend track to meet minimum length\n                    min_len = self.grid.get_min_length(warr.layer_id, warr.track_id.width,\n                                                       unit_mode=True)\n                    # make sure minimum length is even so that middle coordinate exists\n                    min_len = -(-min_len // 2) * 2\n                    tr_len = cur_upper - cur_lower\n                    if min_len > tr_len:\n                        ext = min_len - tr_len\n                        if min_len_mode < 0:\n                            cur_lower -= ext\n                        elif min_len_mode > 0:\n                            cur_upper += ext\n                        else:\n                            cur_lower -= ext // 2\n                            cur_upper = cur_lower + min_len\n\n                new_warr = WireArray(warr.track_id, cur_lower, cur_upper, res=res, unit_mode=True)\n                for layer_name, bbox_arr in new_warr.wire_arr_iter(self.grid):\n                    self.add_rect(layer_name, bbox_arr)\n\n                new_warr_list.append(new_warr)\n\n        return new_warr_list\n\n    def add_wires(self,  # type: TemplateBase\n                  layer_id,  # type: int\n                  track_idx,  # type: Union[float, int]\n                  lower,  # type: Union[float, int]\n                  upper,  # type: Union[float, int]\n                  width=1,  # type: int\n                  num=1,  # type: int\n                  pitch=0,  # type: Union[float, int]\n                  unit_mode=False  # type: bool\n                  ):\n        # type: (...) -> WireArray\n        \"\"\"Add the given wire(s) to this layout.\n\n        Parameters\n        ----------\n        layer_id : int\n            the wire layer ID.\n        track_idx : Union[float, int]\n            the smallest wire track index.\n        lower : Union[float, int]\n            the wire lower coordinate.\n        upper : Union[float, int]\n            the wire upper coordinate.\n        width : int\n            the wire width in number of tracks.\n        num : int\n            number of wires.\n        pitch : Union[float, int]\n            the wire pitch.\n        unit_mode: bool\n            True if lower/upper is given in resolution units.\n\n        Returns\n        -------\n        warr : WireArray\n            the added WireArray object.\n        \"\"\"\n        res = self.grid.resolution\n        if not unit_mode:\n            lower = int(round(lower / res))\n            upper = int(round(upper / res))\n\n        tid = TrackID(layer_id, track_idx, width=width, num=num, pitch=pitch)\n        warr = WireArray(tid, lower, upper, res=res, unit_mode=True)\n\n        for layer_name, bbox_arr in warr.wire_arr_iter(self.grid):\n            self.add_rect(layer_name, bbox_arr)\n\n        return warr\n\n    def add_res_metal_warr(self,  # type: TemplateBase\n                           layer_id,  # type: int\n                           track_idx,  # type: Union[float, int]\n                           lower,  # type: Union[float, int]\n                           upper,  # type: Union[float, int]\n                           **kwargs):\n        # type: (...) -> WireArray\n        \"\"\"Add metal resistor as WireArray to this layout.\n\n        Parameters\n        ----------\n        layer_id : int\n            the wire layer ID.\n        track_idx : Union[float, int]\n            the smallest wire track index.\n        lower : Union[float, int]\n            the wire lower coordinate.\n        upper : Union[float, int]\n            the wire upper coordinate.\n        **kwargs :\n            optional arguments to add_wires()\n\n        Returns\n        -------\n        warr : WireArray\n            the added WireArray object.\n        \"\"\"\n        warr = self.add_wires(layer_id, track_idx, lower, upper, **kwargs)\n\n        for _, bbox_arr in warr.wire_arr_iter(self.grid):\n            self.add_res_metal(layer_id, bbox_arr)\n\n        return warr\n\n    def add_mom_cap(self,  # type: TemplateBase\n                    cap_box,  # type: BBox\n                    bot_layer,  # type: int\n                    num_layer,  # type: int\n                    port_widths=1,  # type: Union[int, List[int], Dict[int, int]]\n                    port_parity=None,\n                    # type: Optional[Union[Tuple[int, int], Dict[int, Tuple[int, int]]]]\n                    array=False,  # type: bool\n                    **kwargs\n                    ):\n        # type: (...) -> Any\n        \"\"\"Draw mom cap in the defined bounding box.\"\"\"\n\n        return_rect = kwargs.get('return_cap_wires', False)\n        cap_type = kwargs.get('cap_type', 'standard')\n\n        if num_layer <= 1:\n            raise ValueError('Must have at least 2 layers for MOM cap.')\n\n        res = self.grid.resolution\n        tech_info = self.grid.tech_info\n\n        mom_cap_dict = tech_info.tech_params['layout']['mom_cap'][cap_type]\n        cap_margins = mom_cap_dict['margins']\n        cap_info = mom_cap_dict['width_space']\n        num_ports_on_edge = mom_cap_dict.get('num_ports_on_edge', {})\n        port_widths_default = mom_cap_dict.get('port_widths_default', {})\n        port_sp_min = mom_cap_dict.get('port_sp_min', {})\n\n        top_layer = bot_layer + num_layer - 1\n\n        if isinstance(port_widths, int):\n            port_widths = {lay: port_widths for lay in range(bot_layer, top_layer + 1)}\n        elif isinstance(port_widths, list) or isinstance(port_widths, tuple):\n            if len(port_widths) != num_layer:\n                raise ValueError('port_widths length != %d' % num_layer)\n            port_widths = dict(zip(range(bot_layer, top_layer + 1), port_widths))\n        else:\n            port_widths = {lay: port_widths.get(lay, port_widths_default.get(lay, 1))\n                           for lay in range(bot_layer, top_layer + 1)}\n\n        if port_parity is None:\n            port_parity = {lay: (0, 1) for lay in range(bot_layer, top_layer + 1)}\n        elif isinstance(port_parity, tuple) or isinstance(port_parity, list):\n            if len(port_parity) != 2:\n                raise ValueError('port parity should be a tuple/list of 2 elements.')\n            port_parity = {lay: port_parity for lay in range(bot_layer, top_layer + 1)}\n        else:\n            port_parity = {lay: port_parity.get(lay, (0, 1)) for lay in\n                           range(bot_layer, top_layer + 1)}\n\n        via_ext_dict = {lay: 0 for lay in range(bot_layer, top_layer + 1)}  # type: Dict[int, int]\n        # get via extensions on each layer\n        for vbot_layer in range(bot_layer, top_layer):\n            vtop_layer = vbot_layer + 1\n            bport_w = int(\n                self.grid.get_track_width(vbot_layer, port_widths[vbot_layer], unit_mode=True))\n            tport_w = int(\n                self.grid.get_track_width(vtop_layer, port_widths[vtop_layer], unit_mode=True))\n            bcap_w = int(round(cap_info[vbot_layer][0] / res))\n            tcap_w = int(round(cap_info[vtop_layer][0] / res))\n\n            # port-to-port via\n            vbext1, vtext1 = tuple2_to_int(\n                self.grid.get_via_extensions_dim(vbot_layer, bport_w, tport_w,\n                                                 unit_mode=True))\n            # cap-to-port via\n            vbext2 = int(self.grid.get_via_extensions_dim(vbot_layer, bcap_w, tport_w,\n                                                          unit_mode=True)[0])\n            # port-to-cap via\n            vtext2 = int(self.grid.get_via_extensions_dim(vbot_layer, bport_w, tcap_w,\n                                                          unit_mode=True)[1])\n\n            # record extension due to via\n            via_ext_dict[vbot_layer] = max(via_ext_dict[vbot_layer], vbext1, vbext2)\n            via_ext_dict[vtop_layer] = max(via_ext_dict[vtop_layer], vtext1, vtext2)\n\n        # find port locations and cap boundaries.\n        port_tracks = {}\n        cap_bounds = {}\n        cap_exts = {}\n        for cur_layer in range(bot_layer, top_layer + 1):\n            # mark bounding box as used.\n            self.mark_bbox_used(cur_layer, cap_box)\n\n            cur_num_ports = num_ports_on_edge.get(cur_layer, 1)\n            cur_port_width = port_widths[cur_layer]\n            cur_port_space = self.grid.get_num_space_tracks(cur_layer, cur_port_width,\n                                                            half_space=True)\n            if self.grid.get_direction(cur_layer) == 'x':\n                cur_lower, cur_upper = cap_box.bottom_unit, cap_box.top_unit\n            else:\n                cur_lower, cur_upper = cap_box.left_unit, cap_box.right_unit\n            # make sure adjacent layer via extension will not extend outside of cap bounding box.\n            adj_via_ext = 0\n            if cur_layer != bot_layer:\n                adj_via_ext = via_ext_dict[cur_layer - 1]\n            if cur_layer != top_layer:\n                adj_via_ext = max(adj_via_ext, via_ext_dict[cur_layer + 1])\n            # find track indices\n            if array:\n                tr_lower = self.grid.coord_to_track(cur_layer, cur_lower, unit_mode=True)\n                tr_upper = self.grid.coord_to_track(cur_layer, cur_upper, unit_mode=True)\n            else:\n                tr_lower = self.grid.find_next_track(cur_layer, cur_lower + adj_via_ext,\n                                                     tr_width=cur_port_width,\n                                                     half_track=True, mode=1, unit_mode=True)\n                tr_upper = self.grid.find_next_track(cur_layer, cur_upper - adj_via_ext,\n                                                     tr_width=cur_port_width,\n                                                     half_track=True, mode=-1, unit_mode=True)\n\n            port_delta = cur_port_width + max(port_sp_min.get(cur_layer, 0), cur_port_space)\n            if tr_lower + 2 * (cur_num_ports - 1) * port_delta >= tr_upper:\n                raise ValueError('Cannot draw MOM cap; area too small.')\n\n            ll0, lu0 = tuple2_to_int(\n                self.grid.get_wire_bounds(cur_layer, tr_lower, width=cur_port_width,\n                                          unit_mode=True))\n            tmp = self.grid.get_wire_bounds(cur_layer,\n                                            tr_lower + (cur_num_ports - 1) * port_delta,\n                                            width=cur_port_width,\n                                            unit_mode=True)\n            ll1, lu1 = tuple2_to_int(tmp)\n            tmp = self.grid.get_wire_bounds(cur_layer,\n                                            tr_upper - (cur_num_ports - 1) * port_delta,\n                                            width=cur_port_width,\n                                            unit_mode=True)\n            ul0, uu0 = tuple2_to_int(tmp)\n            ul1, uu1 = tuple2_to_int(self.grid.get_wire_bounds(cur_layer, tr_upper,\n                                                               width=cur_port_width,\n                                                               unit_mode=True))\n\n            # compute space from MOM cap wires to port wires\n            port_w = lu0 - ll0\n            lay_name = tech_info.get_layer_name(cur_layer)\n            if isinstance(lay_name, tuple) or isinstance(lay_name, list):\n                lay_name = lay_name[0]\n            lay_type = tech_info.get_layer_type(lay_name)\n            cur_margin = int(round(cap_margins[cur_layer] / res))\n            cur_margin = max(cur_margin, tech_info.get_min_space(lay_type, port_w, unit_mode=True))\n\n            lower_tracks = [tr_lower + idx * port_delta for idx in range(cur_num_ports)]\n            upper_tracks = [tr_upper - idx * port_delta for idx in range(cur_num_ports - 1, -1, -1)]\n            port_tracks[cur_layer] = (lower_tracks, upper_tracks)\n            cap_bounds[cur_layer] = (lu1 + cur_margin, ul0 - cur_margin)\n            cap_exts[cur_layer] = (ll0, uu1)\n\n        port_dict = {}\n        cap_wire_dict = {}\n        # draw ports/wires\n        for cur_layer in range(bot_layer, top_layer + 1):\n            cur_port_width = port_widths[cur_layer]\n            # find port/cap wires lower/upper coordinates\n            lower, upper = None, None\n            if cur_layer != top_layer:\n                lower, upper = cap_exts[cur_layer + 1]\n            if cur_layer != bot_layer:\n                tmpl, tmpu = cap_exts[cur_layer - 1]\n                lower = tmpl if lower is None else min(lower, tmpl)\n                upper = tmpu if upper is None else max(upper, tmpu)\n            assert lower is not None and upper is not None, \\\n                ('cur_layer is iterating and should never be equal '\n                 'to both bot_layer and top_layer at the same time')\n\n            via_ext = via_ext_dict[cur_layer]\n            lower -= via_ext\n            upper += via_ext\n\n            # draw lower and upper ports\n            lower_tracks, upper_tracks = port_tracks[cur_layer]\n            lower_warrs = [self.add_wires(cur_layer, tr_idx, lower, upper, width=cur_port_width,\n                                          unit_mode=True)\n                           for tr_idx in lower_tracks]\n            upper_warrs = [self.add_wires(cur_layer, tr_idx, lower, upper, width=cur_port_width,\n                                          unit_mode=True)\n                           for tr_idx in upper_tracks]\n\n            # assign port wires to positive/negative terminals\n            lpar, upar = port_parity[cur_layer]\n            if lpar == upar:\n                raise ValueError('Port parity must be different.')\n            elif lpar == 0:\n                plist = upper_warrs\n                nlist = lower_warrs\n            else:\n                plist = lower_warrs\n                nlist = upper_warrs\n\n            port_dict[cur_layer] = plist, nlist\n            if cur_layer != bot_layer:\n                # connect ports to layer below\n                for clist, blist in zip((plist, nlist), port_dict[cur_layer - 1]):\n                    if len(clist) == len(blist):\n                        iter_list = zip(clist, blist)\n                    else:\n                        iter_list = product(clist, blist)\n\n                    for cur_warr, bot_warr in iter_list:\n                        cur_tid = cur_warr.track_id.base_index\n                        cur_w = cur_warr.track_id.width\n                        bot_tid = bot_warr.track_id.base_index\n                        bot_w = bot_warr.track_id.width\n                        self.add_via_on_grid(cur_layer - 1, bot_tid, cur_tid, bot_width=bot_w,\n                                             top_width=cur_w)\n\n            # draw cap wires\n            cap_lower, cap_upper = cap_bounds[cur_layer]\n            cap_tot_space = cap_upper - cap_lower\n            cap_w, cap_sp = cap_info[cur_layer]\n            cap_w = int(round(cap_w / res))\n            cap_sp = int(round(cap_sp / res))\n            cap_pitch = cap_w + cap_sp\n            num_cap_wires = cap_tot_space // cap_pitch\n            cap_lower += (cap_tot_space - (num_cap_wires * cap_pitch - cap_sp)) // 2\n\n            is_horizontal = (self.grid.get_direction(cur_layer) == 'x')\n\n            if is_horizontal:\n                wbox = BBox(lower, cap_lower, upper, cap_lower + cap_w, res, unit_mode=True)\n            else:\n                wbox = BBox(cap_lower, lower, cap_lower + cap_w, upper, res, unit_mode=True)\n\n            lay_name_list = tech_info.get_layer_name(cur_layer)\n            if isinstance(lay_name_list, str):\n                lay_name_list = [lay_name_list]\n\n            # save cap wire information\n            cur_rect_box = wbox\n            cap_wire_dict[cur_layer] = (lpar, lay_name_list, cur_rect_box, num_cap_wires, cap_pitch)\n\n        # draw cap wires and connect to port\n        rect_list = []\n        for cur_layer in range(bot_layer, top_layer + 1):\n            cur_rect_list = []\n            lpar, lay_name_list, cap_base_box, num_cap_wires, cap_pitch = cap_wire_dict[cur_layer]\n            if cur_layer == bot_layer:\n                prev_plist = prev_nlist = None\n            else:\n                prev_plist, prev_nlist = port_dict[cur_layer - 1]\n            if cur_layer == top_layer:\n                next_plist = next_nlist = None\n            else:\n                next_plist, next_nlist = port_dict[cur_layer + 1]\n\n            cur_dir = self.grid.get_direction(cur_layer)\n            is_horizontal = (cur_dir == 'x')\n            next_dir = 'y' if is_horizontal else 'x'\n            num_lay_names = len(lay_name_list)\n            p_lists = (prev_plist, next_plist)\n            n_lists = (prev_nlist, next_nlist)\n            for idx in range(num_cap_wires):\n                # figure out the port wire to connect this cap wire to\n                if idx % 2 == 0 and lpar == 0 or idx % 2 == 1 and lpar == 1:\n                    ports_list = p_lists\n                else:\n                    ports_list = n_lists\n\n                # draw the cap wire\n                cap_lay_name = lay_name_list[idx % num_lay_names]\n                if is_horizontal:\n                    cap_box = cap_base_box.move_by(dy=cap_pitch * idx, unit_mode=True)\n                else:\n                    cap_box = cap_base_box.move_by(dx=cap_pitch * idx, unit_mode=True)\n                rect = self.add_rect(cap_lay_name, cap_box)\n                cur_rect_list.append(rect)\n\n                # connect cap wire to port\n                for pidx, port in enumerate(ports_list):\n                    if port is not None:\n                        port_warr = port[(idx // 2) % len(port)]\n                        port_lay_name = self.grid.get_layer_name(port_warr.layer_id,\n                                                                 port_warr.track_id.base_index)\n                        vbox = cap_box.intersect(port_warr.get_bbox_array(self.grid).base)\n                        if pidx == 1:\n                            self.add_via(vbox, cap_lay_name, port_lay_name, cur_dir)\n                        else:\n                            self.add_via(vbox, port_lay_name, cap_lay_name, next_dir)\n\n            rect_list.append(cur_rect_list)\n\n        if return_rect:\n            return port_dict, rect_list\n        else:\n            return port_dict\n\n    def reserve_tracks(self,  # type: TemplateBase\n                       layer_id,  # type: int\n                       track_idx,  # type: Union[float, int]\n                       width=1,  # type: int\n                       num=1,  # type: int\n                       pitch=0,  # type: Union[float, int]\n                       ):\n        # type: (...) -> None\n        \"\"\"Reserve the given routing tracks so that power fill will not fill these tracks.\n\n        Note: the size of this template should be set before calling this method.\n\n        Parameters\n        ----------\n        layer_id : int\n            the wire layer ID.\n        track_idx : Union[float, int]\n            the smallest wire track index.\n        width : int\n            the wire width in number of tracks.\n        num : int\n            number of wires.\n        pitch : Union[float, int]\n            the wire pitch.\n        \"\"\"\n\n        bnd_box = self.bound_box\n        if bnd_box is None:\n            raise ValueError(\"bound_box is not set\")\n\n        tid = TrackID(layer_id, track_idx, width=width, num=num, pitch=pitch)\n        if self.grid.get_direction(layer_id) == 'x':\n            upper = bnd_box.width_unit\n        else:\n            upper = bnd_box.height_unit\n        warr = WireArray(tid, 0, upper, res=self.grid.resolution, unit_mode=True)\n\n        lay_name = self.grid.get_layer_name(layer_id, track_idx)\n        self._used_tracks.record_rect(self.grid, lay_name, warr.get_bbox_array(self.grid))\n\n    def connect_wires(self,  # type: TemplateBase\n                      wire_arr_list,  # type: Union[WireArray, List[WireArray]]\n                      lower=None,  # type: Optional[Union[int, float]]\n                      upper=None,  # type: Optional[Union[int, float]]\n                      debug=False,  # type: bool\n                      unit_mode=False,  # type: bool\n                      ):\n        # type: (...) -> List[WireArray]\n        \"\"\"Connect all given WireArrays together.\n\n        all WireArrays must be on the same layer.\n\n        Parameters\n        ----------\n        wire_arr_list : Union[WireArr, List[WireArr]]\n            WireArrays to connect together.\n        lower : Optional[Union[int, float]]\n            if given, extend connection wires to this lower coordinate.\n        upper : Optional[Union[int, float]]\n            if given, extend connection wires to this upper coordinate.\n        debug : bool\n            True to print debug messages.\n        unit_mode: bool\n            True if lower/upper/fill_margin is given in resolution units.\n\n        Returns\n        -------\n        conn_list : List[WireArray]\n            list of connection wires created.\n        \"\"\"\n        grid = self.grid\n        res = grid.resolution\n\n        if not unit_mode:\n            if lower is not None:\n                lower = int(round(lower / res))\n            if upper is not None:\n                upper = int(round(upper / res))\n        else:\n            if lower is not None:\n                lower = int(lower)\n            if upper is not None:\n                upper = int(upper)\n\n        if isinstance(wire_arr_list, WireArray):\n            wire_arr_list = [wire_arr_list]\n        else:\n            pass\n\n        if not wire_arr_list:\n            # do nothing\n            return []\n\n        # record all wire ranges\n        a = wire_arr_list[0]\n        layer_id = a.layer_id\n        direction = grid.get_direction(layer_id)\n        is_horiz = direction == 'x'\n        perp_dir = 'y' if direction == 'x' else 'x'\n        htr_pitch = int(grid.get_track_pitch(layer_id, unit_mode=True)) // 2\n        intv_set = IntervalSet()\n        for wire_arr in wire_arr_list:\n            if wire_arr.layer_id != layer_id:\n                raise ValueError('WireArray layer ID != %d' % layer_id)\n\n            cur_range = wire_arr.lower_unit, wire_arr.upper_unit\n            box_arr = wire_arr.get_bbox_array(grid)\n            for box in box_arr:\n                intv = tuple2_to_int(box.get_interval(perp_dir, unit_mode=True))\n                intv_rang_item = intv_set.get_first_overlap_item(intv)\n                if intv_rang_item is None:\n                    range_set = IntervalSet()\n                    range_set.add(cur_range)\n                    intv_set.add(intv, val=range_set)\n                elif intv_rang_item[0] == intv:\n                    intv_rang_item[1].add(cur_range, merge=True, abut=True)\n                else:\n                    raise ValueError('wire interval {} overlap existing wires.'.format(intv))\n\n        # draw wires, group into arrays\n        new_warr_list = []\n        base_start = None  # type: Optional[int]\n        base_end = None  # type: Optional[int]\n        base_intv = None  # type: Optional[Tuple[int, int]]\n        base_width = None  # type: Optional[int]\n        count = 0\n        hpitch = 0\n        last_lower = 0\n        for intv, range_set in intv_set.items():\n            cur_start = range_set.get_start()  # type: int\n            cur_end = range_set.get_end()  # type: int\n            add = len(range_set) > 1\n            if lower is not None and lower < cur_start:\n                cur_start = lower\n                add = True\n            if upper is not None and upper > cur_end:\n                cur_end = upper\n                add = True\n\n            cur_lower, cur_upper = intv\n            if add:\n                tr_id = grid.coord_to_track(layer_id, (cur_lower + cur_upper) // 2, unit_mode=True)\n                layer_name = grid.get_layer_name(layer_id, tr_id)\n                if is_horiz:\n                    box = BBox(cur_start, cur_lower, cur_end, cur_upper, res, unit_mode=True)\n                else:\n                    box = BBox(cur_lower, cur_start, cur_upper, cur_end, res, unit_mode=True)\n                self.add_rect(layer_name, box)\n\n            if debug:\n                print('wires intv: %s, range: (%d, %d)' % (intv, cur_start, cur_end))\n            cur_width = cur_upper - cur_lower\n            if count == 0:\n                base_intv = intv\n                base_start = cur_start\n                base_end = cur_end\n                base_width = cur_upper - cur_lower\n                count += 1\n                hpitch = 0\n            else:\n                assert base_intv is not None, \"count == 0 should have set base_intv\"\n                assert base_width is not None, \"count == 0 should have set base_width\"\n                assert base_start is not None, \"count == 0 should have set base_start\"\n                assert base_end is not None, \"count == 0 should have set base_end\"\n                if cur_start == base_start and cur_end == base_end and base_width == cur_width:\n                    # length and width matches\n                    cur_hpitch = (cur_lower - last_lower) // htr_pitch\n                    if count == 1:\n                        # second wire, set half pitch\n                        hpitch = cur_hpitch\n                        count += 1\n                    elif hpitch == cur_hpitch:\n                        # pitch matches\n                        count += 1\n                    else:\n                        # pitch does not match, add current wires and start anew\n                        tr_idx, tr_width = tuple2_to_float_int(\n                            grid.interval_to_track(layer_id, base_intv,\n                                                   unit_mode=True))\n                        track_id = TrackID(layer_id, tr_idx, width=tr_width,\n                                           num=count, pitch=hpitch / 2)\n                        warr = WireArray(track_id, base_start, base_end, res=res, unit_mode=True)\n                        new_warr_list.append(warr)\n                        base_intv = intv\n                        count = 1\n                        hpitch = 0\n                else:\n                    # length/width does not match, add cumulated wires and start anew\n                    tr_idx, tr_width = tuple2_to_float_int(\n                        grid.interval_to_track(layer_id, base_intv, unit_mode=True))\n                    track_id = TrackID(layer_id, tr_idx, width=tr_width,\n                                       num=count, pitch=hpitch / 2)\n                    warr = WireArray(track_id, base_start, base_end, res=res, unit_mode=True)\n                    new_warr_list.append(warr)\n                    base_start = cur_start\n                    base_end = cur_end\n                    base_intv = intv\n                    base_width = cur_width\n                    count = 1\n                    hpitch = 0\n\n            # update last lower coordinate\n            last_lower = cur_lower\n\n        assert base_intv is not None, \"count == 0 should have set base_intv\"\n        assert base_start is not None, \"count == 0 should have set base_start\"\n        assert base_end is not None, \"count == 0 should have set base_end\"\n\n        # add last wires\n        tr_idx, tr_width = tuple2_to_float_int(\n            grid.interval_to_track(layer_id, base_intv, unit_mode=True))\n        track_id = TrackID(layer_id, tr_idx, tr_width, num=count, pitch=hpitch / 2)\n        warr = WireArray(track_id, base_start, base_end, res=res, unit_mode=True)\n        new_warr_list.append(warr)\n        return new_warr_list\n\n    def _draw_via_on_track(self, wlayer, box_arr, track_id, tl_unit=None,\n                           tu_unit=None):\n        # type: (str, BBoxArray, TrackID, Optional[float], Optional[float]) -> Tuple[float, float]\n        \"\"\"Helper method.  Draw vias on the intersection of the BBoxArray and TrackID.\"\"\"\n        grid = self.grid\n        res = grid.resolution\n\n        tr_layer_id = track_id.layer_id\n        tr_width = track_id.width\n        tr_dir = grid.get_direction(tr_layer_id)\n        tr_pitch = grid.get_track_pitch(tr_layer_id)\n\n        w_layer_id = grid.tech_info.get_layer_id(wlayer)\n        w_dir = 'x' if tr_dir == 'y' else 'y'\n        wbase = box_arr.base\n        for sub_track_id in track_id.sub_tracks_iter(grid):\n            base_idx = sub_track_id.base_index\n            if w_layer_id > tr_layer_id:\n                bot_layer = grid.get_layer_name(tr_layer_id, base_idx)\n                top_layer = wlayer\n                bot_dir = tr_dir\n            else:\n                bot_layer = wlayer\n                top_layer = grid.get_layer_name(tr_layer_id, base_idx)\n                bot_dir = w_dir\n            # compute via bounding box\n            tl, tu = tuple2_to_int(\n                grid.get_wire_bounds(tr_layer_id, base_idx, width=tr_width, unit_mode=True))\n            if tr_dir == 'x':\n                via_box = BBox(wbase.left_unit, tl, wbase.right_unit, tu, res, unit_mode=True)\n                nx, ny = box_arr.nx, sub_track_id.num\n                spx, spy = box_arr.spx, sub_track_id.pitch * tr_pitch\n                via = self.add_via(via_box, bot_layer, top_layer, bot_dir,\n                                   nx=nx, ny=ny, spx=spx, spy=spy)\n                vtbox = via.bottom_box if w_layer_id > tr_layer_id else via.top_box\n                if tl_unit is None:\n                    tl_unit = vtbox.left_unit\n                else:\n                    tl_unit = min(tl_unit, vtbox.left_unit)\n                if tu_unit is None:\n                    tu_unit = vtbox.right_unit + (nx - 1) * box_arr.spx_unit\n                else:\n                    tu_unit = max(tu_unit, vtbox.right_unit + (nx - 1) * box_arr.spx_unit)\n            else:\n                via_box = BBox(tl, wbase.bottom_unit, tu, wbase.top_unit, res, unit_mode=True)\n                nx, ny = sub_track_id.num, box_arr.ny\n                spx, spy = sub_track_id.pitch * tr_pitch, box_arr.spy\n                via = self.add_via(via_box, bot_layer, top_layer, bot_dir,\n                                   nx=nx, ny=ny, spx=spx, spy=spy)\n                vtbox = via.bottom_box if w_layer_id > tr_layer_id else via.top_box\n                if tl_unit is None:\n                    tl_unit = vtbox.bottom_unit\n                else:\n                    tl_unit = min(tl_unit, vtbox.bottom_unit)\n                if tu_unit is None:\n                    tu_unit = vtbox.top_unit + (ny - 1) * box_arr.spy_unit\n                else:\n                    tu_unit = max(tu_unit, vtbox.top_unit + (ny - 1) * box_arr.spy_unit)\n        assert tl_unit is not None and tu_unit is not None, \\\n            \"for loop should have assigned tl_unit and tu_unit\"\n\n        return tl_unit, tu_unit\n\n    def connect_bbox_to_tracks(self,  # type: TemplateBase\n                               layer_name,  # type: str\n                               box_arr,  # type: Union[BBox, BBoxArray]\n                               track_id,  # type: TrackID\n                               track_lower=None,  # type: Optional[Union[int, float]]\n                               track_upper=None,  # type: Optional[Union[int, float]]\n                               unit_mode=False,  # type: bool\n                               min_len_mode=None,  # type: Optional[int]\n                               wire_lower=None,  # type: Optional[Union[float, int]]\n                               wire_upper=None,  # type: Optional[Union[float, int]]\n                               ):\n        # type: (...) -> WireArray\n        \"\"\"Connect the given primitive wire to given tracks.\n\n        Parameters\n        ----------\n        layer_name : str\n            the primitive wire layer name.\n        box_arr : Union[BBox, BBoxArray]\n            bounding box of the wire(s) to connect to tracks.\n        track_id : TrackID\n            TrackID that specifies the track(s) to connect the given wires to.\n        track_lower : Optional[Union[int, float]]\n            if given, extend track(s) to this lower coordinate.\n        track_upper : Optional[Union[int, float]]\n            if given, extend track(s) to this upper coordinate.\n        unit_mode: bool\n            True if track_lower/track_upper/fill_margin is given in resolution units.\n        min_len_mode : Optional[int]\n            If not None, will extend track so it satisfy minimum length requirement.\n            Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.\n        wire_lower : Optional[Union[float, int]]\n            if given, extend wire(s) to this lower coordinate.\n        wire_upper : Optional[Union[float, int]]\n            if given, extend wire(s) to this upper coordinate.\n\n        Returns\n        -------\n        wire_arr : WireArray\n            WireArray representing the tracks created.\n        \"\"\"\n        if isinstance(box_arr, BBox):\n            box_arr = BBoxArray(box_arr)\n        else:\n            pass\n\n        grid = self.grid\n        res = grid.resolution\n        if not unit_mode:\n            if track_lower is not None:\n                track_lower = int(round(track_lower / res))\n            if track_upper is not None:\n                track_upper = int(round(track_upper / res))\n            if wire_lower is not None:\n                wire_lower = int(round(wire_lower / res))\n            if wire_upper is not None:\n                wire_upper = int(round(wire_upper / res))\n\n        # extend bounding boxes to tracks\n        tl, tu = track_id.get_bounds(grid, unit_mode=True)\n        if wire_lower is not None:\n            tl = min(wire_lower, tl)\n        if wire_upper is not None:\n            tu = max(wire_upper, tu)\n\n        tr_layer = track_id.layer_id\n        tr_dir = grid.get_direction(tr_layer)\n        base = box_arr.base\n        if tr_dir == 'x':\n            self.add_rect(layer_name,\n                          base.extend(y=tl, unit_mode=True).extend(y=tu, unit_mode=True),\n                          nx=box_arr.nx, ny=box_arr.ny, spx=box_arr.spx, spy=box_arr.spy)\n        else:\n            self.add_rect(layer_name,\n                          base.extend(x=tl, unit_mode=True).extend(x=tu, unit_mode=True),\n                          nx=box_arr.nx, ny=box_arr.ny, spx=box_arr.spx, spy=box_arr.spy)\n\n        # draw vias\n        tl_unit, tu_unit = self._draw_via_on_track(layer_name, box_arr, track_id,\n                                                   tl_unit=track_lower, tu_unit=track_upper)\n\n        # draw tracks\n        if min_len_mode is not None:\n            # extend track to meet minimum length\n            min_len = grid.get_min_length(tr_layer, track_id.width, unit_mode=True)\n            # make sure minimum length is even so that middle coordinate exists\n            min_len = -(-min_len // 2) * 2\n            tr_len = tu_unit - tl_unit\n            if min_len > tr_len:\n                ext = min_len - tr_len\n                if min_len_mode < 0:\n                    tl_unit -= ext\n                elif min_len_mode > 0:\n                    tu_unit += ext\n                else:\n                    tl_unit -= ext // 2\n                    tu_unit = tl_unit + min_len\n        result = WireArray(track_id, tl_unit, tu_unit, res=res, unit_mode=True)\n        for layer_name, bbox_arr in result.wire_arr_iter(grid):\n            self.add_rect(layer_name, bbox_arr)\n\n        return result\n\n    def connect_bbox_to_differential_tracks(self,  # type: TemplateBase\n                                            layer_name,  # type: str\n                                            pbox,  # type: Union[BBox, BBoxArray]\n                                            nbox,  # type: Union[BBox, BBoxArray]\n                                            tr_layer_id,  # type: int\n                                            ptr_idx,  # type: Union[int, float]\n                                            ntr_idx,  # type: Union[int, float]\n                                            width=1,  # type: int\n                                            track_lower=None,  # type: Optional[Union[float, int]]\n                                            track_upper=None,  # type: Optional[Union[float, int]]\n                                            unit_mode=False,  # type: bool\n                                            ):\n        # type: (...) -> Tuple[Optional[WireArray], Optional[WireArray]]\n        \"\"\"Connect the given differential primitive wires to two tracks symmetrically.\n\n        This method makes sure the connections are symmetric and have identical parasitics.\n\n        Parameters\n        ----------\n        layer_name : str\n            the primitive wire layer name.\n        pbox : Union[BBox, BBoxArray]\n            positive signal wires to connect.\n        nbox : Union[BBox, BBoxArray]\n            negative signal wires to connect.\n        tr_layer_id : int\n            track layer ID.\n        ptr_idx : Union[int, float]\n            positive track index.\n        ntr_idx : Union[int, float]\n            negative track index.\n        width : int\n            track width in number of tracks.\n        track_lower : Optional[Union[float, int]]\n            if given, extend track(s) to this lower coordinate.\n        track_upper : Optional[Union[float, int]]\n            if given, extend track(s) to this upper coordinate.\n        unit_mode: bool\n            True if track_lower/track_upper/fill_margin is given in resolution units.\n\n        Returns\n        -------\n        p_track : Optional[WireArray]\n            the positive track.\n        n_track : Optional[WireArray]\n            the negative track.\n        \"\"\"\n        track_list = self.connect_bbox_to_matching_tracks(layer_name, [pbox, nbox], tr_layer_id,\n                                                          [ptr_idx, ntr_idx], width=width,\n                                                          track_lower=track_lower,\n                                                          track_upper=track_upper,\n                                                          unit_mode=unit_mode)\n        return track_list[0], track_list[1]\n\n    def connect_bbox_to_matching_tracks(self,  # type: TemplateBase\n                                        layer_name,  # type: str\n                                        box_arr_list,  # type: List[Union[BBox, BBoxArray]]\n                                        tr_layer_id,  # type: int\n                                        tr_idx_list,  # type: List[Union[int, float]]\n                                        width=1,  # type: int\n                                        track_lower=None,  # type: Optional[Union[int, float]]\n                                        track_upper=None,  # type: Optional[Union[int, float]]\n                                        unit_mode=False  # type: bool\n                                        ):\n        # type: (...) -> List[Optional[WireArray]]\n        \"\"\"Connect the given primitive wire to given tracks.\n\n        Parameters\n        ----------\n        layer_name : str\n            the primitive wire layer name.\n        box_arr_list : List[Union[BBox, BBoxArray]]\n            bounding box of the wire(s) to connect to tracks.\n        tr_layer_id : int\n            track layer ID.\n        tr_idx_list : List[Union[int, float]]\n            list of track indices.\n        width : int\n            track width in number of tracks.\n        track_lower : Optional[Union[int, float]]\n            if given, extend track(s) to this lower coordinate.\n        track_upper : Optional[Union[int, float]]\n            if given, extend track(s) to this upper coordinate.\n        unit_mode: bool\n            True if track_lower/track_upper/fill_margin is given in resolution units.\n\n        Returns\n        -------\n        wire_arr : WireArray\n            WireArray representing the tracks created.\n        \"\"\"\n        grid = self.grid\n        res = grid.resolution\n        if not unit_mode:\n            if track_lower is not None:\n                track_lower = int(round(track_lower / res))\n            if track_upper is not None:\n                track_upper = int(round(track_upper / res))\n\n        num_tracks = len(tr_idx_list)\n        if num_tracks != len(box_arr_list):\n            raise ValueError('wire list length and track index list length mismatch.')\n        if num_tracks == 0:\n            raise ValueError('No tracks given')\n        w_layer_id = grid.tech_info.get_layer_id(layer_name)\n        if abs(w_layer_id - tr_layer_id) != 1:\n            raise ValueError('Given primitive wires not adjacent to given track layer.')\n        bot_layer_id = min(w_layer_id, tr_layer_id)\n\n        # compute wire_lower/upper without via extension\n        w_lower, w_upper = tuple2_to_int(\n            grid.get_wire_bounds(tr_layer_id, tr_idx_list[0], width=width,\n                                 unit_mode=True))\n        for tr_idx in islice(tr_idx_list, 1, None):\n            cur_low, cur_up = tuple2_to_int(grid.get_wire_bounds(tr_layer_id, tr_idx, width=width,\n                                                                 unit_mode=True))\n            w_lower = min(w_lower, cur_low)\n            w_upper = max(w_upper, cur_up)\n\n        # separate wire arrays into bottom/top tracks, compute wire/track lower/upper coordinates\n        tr_width = grid.get_track_width(tr_layer_id, width, unit_mode=True)\n        tr_dir = grid.get_direction(tr_layer_id)\n        tr_horizontal = tr_dir == 'x'\n        bbox_bounds = (None, None)  # type: Tuple[Optional[int], Optional[int]]\n        for idx, box_arr in enumerate(box_arr_list):\n            # convert to WireArray list\n            if isinstance(box_arr, BBox):\n                box_arr = BBoxArray(box_arr)\n            else:\n                pass\n\n            base = box_arr.base\n            if w_layer_id < tr_layer_id:\n                bot_dim = base.width_unit if tr_horizontal else base.height_unit\n                top_dim = tr_width\n                w_ext, tr_ext = tuple2_to_int(\n                    grid.get_via_extensions_dim(bot_layer_id, bot_dim, top_dim,\n                                                unit_mode=True))\n            else:\n                bot_dim = tr_width\n                top_dim = base.width_unit if tr_horizontal else base.height_unit\n                tr_ext, w_ext = tuple2_to_int(\n                    grid.get_via_extensions_dim(bot_layer_id, bot_dim, top_dim,\n                                                unit_mode=True))\n\n            if bbox_bounds[0] is None:\n                bbox_bounds = (w_lower - w_ext, w_upper + w_ext)\n            else:\n                bbox_bounds = (\n                    min(bbox_bounds[0], w_lower - w_ext), max(bbox_bounds[1], w_upper + w_ext))\n\n            # compute track lower/upper including via extension\n            tr_bounds = tuple2_to_int(\n                box_arr.get_overall_bbox().get_interval(tr_dir, unit_mode=True))\n            if track_lower is None:\n                track_lower = tr_bounds[0] - tr_ext\n            else:\n                track_lower = min(track_lower, tr_bounds[0] - tr_ext)\n            if track_upper is None:\n                track_upper = tr_bounds[1] + tr_ext\n            else:\n                track_upper = max(track_upper, tr_bounds[1] + tr_ext)\n        assert track_lower is not None and track_upper is not None, \\\n            \"track_lower/track_upper should be set above\"\n\n        # draw tracks\n        track_list = []  # type: List[Optional[WireArray]]\n        for box_arr, tr_idx in zip(box_arr_list, tr_idx_list):\n            track_list.append(self.add_wires(tr_layer_id, tr_idx, track_lower, track_upper,\n                                             width=width, unit_mode=True))\n\n            tr_id = TrackID(tr_layer_id, tr_idx, width=width)\n            self.connect_bbox_to_tracks(layer_name, box_arr, tr_id, wire_lower=bbox_bounds[0],\n                                        wire_upper=bbox_bounds[1], unit_mode=True)\n\n        return track_list\n\n    def connect_to_tracks(self,  # type: TemplateBase\n                          wire_arr_list,  # type: Union[WireArray, List[WireArray]]\n                          track_id,  # type: TrackID\n                          wire_lower=None,  # type: Optional[Union[float, int]]\n                          wire_upper=None,  # type: Optional[Union[float, int]]\n                          track_lower=None,  # type: Optional[Union[float, int]]\n                          track_upper=None,  # type: Optional[Union[float, int]]\n                          unit_mode=False,  # type: bool\n                          min_len_mode=None,  # type: Optional[int]\n                          return_wires=False,  # type: bool\n                          debug=False,  # type: bool\n                          ):\n        # type: (...) -> Union[Optional[WireArray], Tuple[Optional[WireArray], List[WireArray]]]\n        \"\"\"Connect all given WireArrays to the given track(s).\n\n        All given wires should be on adjacent layers of the track.\n\n        Parameters\n        ----------\n        wire_arr_list : Union[WireArray, List[WireArray]]\n            list of WireArrays to connect to track.\n        track_id : TrackID\n            TrackID that specifies the track(s) to connect the given wires to.\n        wire_lower : Optional[Union[float, int]]\n            if given, extend wire(s) to this lower coordinate.\n        wire_upper : Optional[Union[float, int]]\n            if given, extend wire(s) to this upper coordinate.\n        track_lower : Optional[Union[float, int]]\n            if given, extend track(s) to this lower coordinate.\n        track_upper : Optional[Union[float, int]]\n            if given, extend track(s) to this upper coordinate.\n        unit_mode : bool\n            True if track_lower/track_upper is given in resolution units.\n        min_len_mode : Optional[int]\n            If not None, will extend track so it satisfy minimum length requirement.\n            Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.\n        return_wires : bool\n            True to return the extended wires.\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        wire_arr : Union[Optional[WireArray], Tuple[Optional[WireArray], List[WireArray]]]\n            WireArray representing the tracks/wires created.\n            If return_wires is True, returns a Tuple[Optional[WireArray], List[WireArray]].\n            If there was nothing to do, the first argument will be None.\n            Otherwise, returns a WireArray.\n        \"\"\"\n        if isinstance(wire_arr_list, WireArray):\n            # convert to list.\n            wire_arr_list = [wire_arr_list]\n        else:\n            pass\n\n        if not wire_arr_list:\n            # do nothing\n            if return_wires:\n                return None, []\n            return None\n\n        grid = self.grid\n        res = grid.resolution\n\n        if track_upper is not None:\n            if not unit_mode:\n                track_upper = int(round(track_upper / res))\n            else:\n                track_upper = int(track_upper)\n        if track_lower is not None:\n            if not unit_mode:\n                track_lower = int(round(track_lower / res))\n            else:\n                track_lower = int(track_lower)\n\n        # find min/max track Y coordinates\n        tr_layer_id = track_id.layer_id\n        wl, wu = tuple2_to_int(track_id.get_bounds(grid, unit_mode=True))\n        if wire_lower is not None:\n            if not unit_mode:\n                wire_lower = int(round(wire_lower / res))\n            else:\n                wire_lower = int(wire_lower)\n            wl = min(wire_lower, wl)\n\n        if wire_upper is not None:\n            if not unit_mode:\n                wire_upper = int(round(wire_upper / res))\n            else:\n                wire_upper = int(wire_upper)\n            wu = max(wire_upper, wu)\n\n        # get top wire and bottom wire list\n        top_list = []\n        bot_list = []\n        for wire_arr in wire_arr_list:\n            cur_layer_id = wire_arr.layer_id\n            if cur_layer_id == tr_layer_id + 1:\n                top_list.append(wire_arr)\n            elif cur_layer_id == tr_layer_id - 1:\n                bot_list.append(wire_arr)\n            else:\n                raise ValueError(\n                    'WireArray layer %d cannot connect to layer %d' % (cur_layer_id, tr_layer_id))\n\n        # connect wires together\n        top_wire_list = self.connect_wires(top_list, lower=wl, upper=wu, unit_mode=True,\n                                           debug=debug)\n        bot_wire_list = self.connect_wires(bot_list, lower=wl, upper=wu, unit_mode=True,\n                                           debug=debug)\n\n        # draw vias\n        for w_layer_id, wire_list in ((tr_layer_id + 1, top_wire_list),\n                                      (tr_layer_id - 1, bot_wire_list)):\n            for wire_arr in wire_list:\n                for wlayer, box_arr in wire_arr.wire_arr_iter(grid):\n                    track_lower, track_upper = self._draw_via_on_track(wlayer, box_arr, track_id,\n                                                                       tl_unit=track_lower,\n                                                                       tu_unit=track_upper)\n        assert_msg = \"track_lower/track_upper should have been set just above\"\n        assert track_lower is not None and track_upper is not None, assert_msg\n\n        if min_len_mode is not None:\n            # extend track to meet minimum length\n            min_len = int(grid.get_min_length(tr_layer_id, track_id.width, unit_mode=True))\n            # make sure minimum length is even so that middle coordinate exists\n            min_len = -(-min_len // 2) * 2\n            tr_len = track_upper - track_lower\n            if min_len > tr_len:\n                ext = min_len - tr_len\n                if min_len_mode < 0:\n                    track_lower -= ext\n                elif min_len_mode > 0:\n                    track_upper += ext\n                else:\n                    track_lower -= ext // 2\n                    track_upper = track_lower + min_len\n\n        # draw tracks\n        result = WireArray(track_id, track_lower, track_upper, res=res, unit_mode=True)\n        for layer_name, bbox_arr in result.wire_arr_iter(grid):\n            self.add_rect(layer_name, bbox_arr)\n\n        if return_wires:\n            top_wire_list.extend(bot_wire_list)\n            return result, top_wire_list\n        else:\n            return result\n\n    def connect_to_track_wires(self,  # type: TemplateBase\n                               wire_arr_list,  # type: Union[WireArray, List[WireArray]]\n                               track_wires,  # type: Union[WireArray, List[WireArray]]\n                               min_len_mode=None,  # type: Optional[int]\n                               debug=False,  # type: bool\n                               ):\n        # type: (...) -> Union[WireArray, List[WireArray]]\n        \"\"\"Connect all given WireArrays to the given WireArrays on adjacent layer.\n\n        Parameters\n        ----------\n        wire_arr_list : Union[WireArray, List[WireArray]]\n            list of WireArrays to connect to track.\n        track_wires : Union[WireArray, List[WireArray]]\n            list of tracks as WireArrays.\n        min_len_mode : Optional[int]\n            If not None, will extend track so it satisfy minimum length requirement.\n            Use -1 to extend lower bound, 1 to extend upper bound, 0 to extend both equally.\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        wire_arr : Union[WireArray, List[WireArray]]\n            WireArray representing the tracks created.  None if nothing to do.\n        \"\"\"\n        res = self.grid.resolution\n\n        ans = []  # type: List[WireArray]\n        if isinstance(track_wires, WireArray):\n            ans_is_list = False\n            track_wires = [track_wires]\n        else:\n            ans_is_list = True\n\n        for warr in track_wires:\n            track_lower = int(round(warr.lower / res))\n            track_upper = int(round(warr.upper / res))\n            tr = self.connect_to_tracks(wire_arr_list, warr.track_id,\n                                        track_lower=track_lower, track_upper=track_upper,\n                                        unit_mode=True, min_len_mode=min_len_mode, debug=debug,\n                                        return_wires=False)\n            assert tr is not None, \"connect_to_tracks did nothing\"\n            assert isinstance(tr, WireArray), \"return_wires=False should return a WireArray\"\n            ans.append(tr)\n\n        if not ans_is_list:\n            return ans[0]\n        return ans\n\n    def connect_with_via_stack(self,  # type: TemplateBase\n                               wire_array,  # type: Union[WireArray, List[WireArray]]\n                               track_id,  # type: TrackID\n                               tr_w_list=None,  # type: Optional[List[int]]\n                               tr_mode_list=None,  # type: Optional[Union[int, List[int]]]\n                               min_len_mode_list=None,  # type: Optional[Union[int, List[int]]]\n                               debug=False,  # type: bool\n                               ):\n        # type: (...) -> List[WireArray]\n        \"\"\"Connect a single wire to the given track by using a via stack.\n\n        This is a convenience function that draws via connections through several layers\n        at once.  With optional parameters to control the track widths on each\n        intermediate layers.\n\n        Parameters\n        ----------\n        wire_array : Union[WireArray, List[WireArray]]\n            the starting WireArray.\n        track_id : TrackID\n            the TrackID to connect to.\n        tr_w_list : Optional[List[int]]\n            the track widths to use on each layer.  If not specified, will compute automatically.\n        tr_mode_list : Optional[Union[int, List[int]]]\n            If tracks on intermediate layers do not line up nicely,\n            the track mode flags determine whether to pick upper or lower tracks\n        min_len_mode_list : Optional[Union[int, List[int]]]\n            minimum length mode flags on each layer.\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        warr_list : List[WireArray]\n            List of created WireArrays.\n        \"\"\"\n        if not isinstance(wire_array, WireArray):\n            # error checking\n            if len(wire_array) != 1:\n                raise ValueError('connect_with_via_stack() only works on WireArray '\n                                 'and TrackID with a single wire.')\n            # convert to WireArray.\n            wire_array = wire_array[0]\n\n        # error checking\n        warr_tid = wire_array.track_id\n        warr_layer = warr_tid.layer_id\n        tr_layer = track_id.layer_id\n        tr_index = track_id.base_index\n        if warr_tid.num != 1 or track_id.num != 1:\n            raise ValueError('connect_with_via_stack() only works on WireArray '\n                             'and TrackID with a single wire.')\n        if tr_layer == warr_layer:\n            raise ValueError('Cannot connect wire to track on the same layer.')\n\n        num_connections = abs(tr_layer - warr_layer)\n\n        # set default values\n        if tr_w_list is None:\n            tr_w_list = [-1] * num_connections\n        elif len(tr_w_list) == num_connections - 1:\n            # user might be inclined to not list the last track width, as it is included in\n            # TrackID.  Allow for this exception\n            tr_w_list = tr_w_list + [-1]\n        elif len(tr_w_list) != num_connections:\n            raise ValueError('tr_w_list must have exactly %d elements.' % num_connections)\n        else:\n            # create a copy of the given list, as this list may be modified later.\n            tr_w_list = list(tr_w_list)\n\n        if tr_mode_list is None:\n            tr_mode_list = [0] * num_connections\n        elif isinstance(tr_mode_list, int):\n            tr_mode_list = [tr_mode_list] * num_connections\n        elif len(tr_mode_list) != num_connections:\n            raise ValueError('tr_mode_list must have exactly %d elements.' % num_connections)\n\n        if min_len_mode_list is None:\n            min_len_mode_list_resolved = [None] * num_connections  # type: List[Optional[int]]\n        elif isinstance(min_len_mode_list, int):\n            min_len_mode_list_resolved = [min_len_mode_list] * num_connections\n        elif len(min_len_mode_list) != num_connections:\n            raise ValueError('min_len_mode_list must have exactly %d elements.' % num_connections)\n        else:\n            min_len_mode_list_resolved = min_len_mode_list\n\n        # determine via location\n        grid = self.grid\n        w_dir = grid.get_direction(warr_layer)\n        t_dir = grid.get_direction(tr_layer)\n        w_coord = grid.track_to_coord(warr_layer, warr_tid.base_index, unit_mode=True)\n        t_coord = grid.track_to_coord(tr_layer, tr_index, unit_mode=True)\n        if w_dir != t_dir:\n            x0, y0 = (w_coord, t_coord) if w_dir == 'y' else (t_coord, w_coord)\n        else:\n            w_mid = int(round(wire_array.middle / grid.resolution))\n            x0, y0 = (w_coord, w_mid) if w_dir == 'y' else (w_mid, w_coord)\n\n        # determine track width on each layer\n        tr_w_list[num_connections - 1] = track_id.width\n        if tr_layer > warr_layer:\n            layer_dir = 1\n            tr_w_prev = grid.get_track_width(tr_layer, tr_w_list[num_connections - 1],\n                                             unit_mode=True)\n            tr_w_idx_iter = range(num_connections - 2, -1, -1)\n        else:\n            layer_dir = -1\n            tr_w_prev = grid.get_track_width(warr_layer, warr_tid.width, unit_mode=True)\n            tr_w_idx_iter = range(0, num_connections - 1)\n        for idx in tr_w_idx_iter:\n            cur_layer = warr_layer + layer_dir * (idx + 1)\n            if tr_w_list[idx] < 0:\n                tr_w_list[idx] = max(1, grid.get_track_width_inverse(cur_layer, tr_w_prev,\n                                                                     unit_mode=True))\n            tr_w_prev = grid.get_track_width(cur_layer, tr_w_list[idx], unit_mode=True)\n\n        # draw via stacks\n        results = []  # type: List[WireArray]\n        targ_layer = warr_layer\n        for tr_w, tr_mode, min_len_mode in zip(tr_w_list, tr_mode_list, min_len_mode_list_resolved):\n            targ_layer += layer_dir\n\n            # determine track index to connect to\n            if targ_layer == tr_layer:\n                targ_index = tr_index\n            else:\n                targ_dir = grid.get_direction(targ_layer)\n                coord = x0 if targ_dir == 'y' else y0\n                targ_index = grid.coord_to_nearest_track(targ_layer, coord, half_track=True,\n                                                         mode=tr_mode, unit_mode=True)\n\n            targ_tid = TrackID(targ_layer, targ_index, width=tr_w)\n            warr = self.connect_to_tracks(wire_array, targ_tid, min_len_mode=min_len_mode,\n                                          unit_mode=True, debug=debug, return_wires=False)\n            assert warr is not None, \"connect_to_tracks did nothing\"\n            assert isinstance(warr, WireArray), \"return_wires=False should return a WireArray\"\n            results.append(warr)\n            wire_array = warr\n\n        return results\n\n    def strap_wires(self,  # type: TemplateBase\n                    warr,  # type: WireArray\n                    targ_layer,  # type: int\n                    tr_w_list=None,  # type: Optional[List[int]]\n                    min_len_mode_list=None,  # type: Optional[List[int]]\n                    ):\n        # type: (...) -> WireArray\n        \"\"\"Strap the given WireArrays to the target routing layer.\n\n        This method is used to connects wires on adjacent layers that has the same direction.\n        The track locations must be valid on all routing layers for this method to work.\n\n        Parameters\n        ----------\n        warr : WireArray\n            the WireArrays to strap.\n        targ_layer : int\n            the final routing layer ID.\n        tr_w_list : Optional[List[int]]\n            the track widths to use on each layer.  If not specified, will determine automatically.\n        min_len_mode_list : Optional[List[int]]\n            minimum length mode flags on each layer.\n\n        Returns\n        -------\n        wire_arr : WireArray\n            WireArray representing the tracks created.  None if nothing to do.\n        \"\"\"\n        warr_layer = warr.layer_id\n\n        if targ_layer == warr_layer:\n            # no need to do anything\n            return warr\n\n        num_connections = abs(targ_layer - warr_layer)  # type: int\n\n        # set default values\n        if tr_w_list is None:\n            tr_w_list = [-1] * num_connections\n        elif len(tr_w_list) != num_connections:\n            raise ValueError('tr_w_list must have exactly %d elements.' % num_connections)\n        else:\n            # create a copy of the given list, as this list may be modified later.\n            tr_w_list = list(tr_w_list)\n\n        if min_len_mode_list is None:\n            min_len_mode_list_resolved = ([None] * num_connections)  # type: List[Optional[int]]\n        else:\n            # List[int] is a List[Optional[int]]\n            min_len_mode_list_resolved = cast(List[Optional[int]], min_len_mode_list)\n\n        if len(min_len_mode_list_resolved) != num_connections:\n            raise ValueError('min_len_mode_list must have exactly %d elements.' % num_connections)\n\n        layer_dir = 1 if targ_layer > warr_layer else -1\n        for tr_w, mlen_mode in zip(tr_w_list, min_len_mode_list_resolved):\n            warr = self._strap_wires_helper(warr, warr.layer_id + layer_dir, tr_w, mlen_mode)\n\n        return warr\n\n    def _strap_wires_helper(self,  # type: TemplateBase\n                            warr,  # type: WireArray\n                            targ_layer,  # type: int\n                            tr_w,  # type: int\n                            mlen_mode,  # type: Optional[int]\n                            ):\n        # type: (...) -> WireArray\n        \"\"\"Helper method for strap_wires().  Connect one layer at a time.\"\"\"\n        wire_tid = warr.track_id\n        wire_layer = wire_tid.layer_id\n\n        res = self.grid.resolution\n        lower = int(round(warr.lower / res))\n        upper = int(round(warr.upper / res))\n\n        # error checking\n        wdir = self.grid.get_direction(wire_layer)\n        if wdir != self.grid.get_direction(targ_layer):\n            raise ValueError('Cannot strap wires with different directions.')\n\n        # convert base track index\n        base_coord = int(self.grid.track_to_coord(wire_layer, wire_tid.base_index, unit_mode=True))\n        base_tid = int(self.grid.coord_to_track(targ_layer, base_coord, unit_mode=True))\n        # convert pitch\n        wire_pitch = int(self.grid.get_track_pitch(wire_layer, unit_mode=True))\n        targ_pitch = int(self.grid.get_track_pitch(targ_layer, unit_mode=True))\n        targ_pitch_half = targ_pitch // 2\n        pitch_unit = int(round(wire_pitch * wire_tid.pitch))\n        if pitch_unit % targ_pitch_half != 0:\n            raise ValueError('Cannot strap wires on layers with mismatched pitch ')\n        num_pitch_2 = pitch_unit // targ_pitch_half\n        if num_pitch_2 % 2 == 0:\n            num_pitch = num_pitch_2 // 2  # type: Union[float, int]\n        else:\n            num_pitch = num_pitch_2 / 2  # type: Union[float, int]\n        # convert width\n        if tr_w < 0:\n            width_unit = int(self.grid.get_track_width(wire_layer, wire_tid.width, unit_mode=True))\n            tr_w = max(1, self.grid.get_track_width_inverse(targ_layer, width_unit, mode=-1,\n                                                            unit_mode=True))\n\n        # draw vias.  Update WireArray lower/upper\n        new_lower = lower  # type: int\n        new_upper = upper  # type: int\n        w_lower = lower  # type: int\n        w_upper = upper  # type: int\n        for tid in wire_tid:\n            coord = int(self.grid.track_to_coord(wire_layer, tid, unit_mode=True))\n            tid2 = int(self.grid.coord_to_track(targ_layer, coord, unit_mode=True))\n            w_name = self.grid.get_layer_name(wire_layer, tid)\n            t_name = self.grid.get_layer_name(targ_layer, tid2)\n\n            w_yb, w_yt = tuple2_to_int(\n                self.grid.get_wire_bounds(wire_layer, tid, wire_tid.width, unit_mode=True))\n            t_yb, t_yt = tuple2_to_int(\n                self.grid.get_wire_bounds(targ_layer, tid2, tr_w, unit_mode=True))\n            vbox = BBox(lower, max(w_yb, t_yb), upper, min(w_yt, t_yt), res, unit_mode=True)\n            if wdir == 'y':\n                vbox = vbox.flip_xy()\n            if wire_layer < targ_layer:\n                via = self.add_via(vbox, w_name, t_name, wdir, extend=True, top_dir=wdir)\n                tbox, wbox = via.top_box, via.bottom_box\n            else:\n                via = self.add_via(vbox, t_name, w_name, wdir, extend=True, top_dir=wdir)\n                tbox, wbox = via.bottom_box, via.top_box\n\n            if wdir == 'y':\n                new_lower = min(new_lower, tbox.bottom_unit)\n                new_upper = max(new_upper, tbox.top_unit)\n                w_lower = min(w_lower, wbox.bottom_unit)\n                w_upper = max(w_upper, wbox.top_unit)\n            else:\n                new_lower = min(new_lower, tbox.left_unit)\n                new_upper = max(new_upper, tbox.right_unit)\n                w_lower = min(w_lower, wbox.left_unit)\n                w_upper = max(w_upper, wbox.top_unit)\n\n        # handle minimum length DRC rule\n        min_len = int(self.grid.get_min_length(targ_layer, tr_w, unit_mode=True))\n        ext = min_len - (new_upper - new_lower)\n        if mlen_mode is not None and ext > 0:\n            if mlen_mode < 0:\n                new_lower -= ext\n            elif mlen_mode > 0:\n                new_upper += ext\n            else:\n                new_lower -= ext // 2\n                new_upper += (ext - ext // 2)\n\n        # add wires\n        self.add_wires(wire_layer, wire_tid.base_index, w_lower, w_upper, width=wire_tid.width,\n                       num=wire_tid.num, pitch=wire_tid.pitch, unit_mode=True)\n        return self.add_wires(targ_layer, base_tid, new_lower, new_upper, width=tr_w,\n                              num=wire_tid.num, pitch=num_pitch, unit_mode=True)\n\n    def connect_differential_tracks(self,  # type: TemplateBase\n                                    pwarr_list,  # type: Union[WireArray, List[WireArray]]\n                                    nwarr_list,  # type: Union[WireArray, List[WireArray]]\n                                    tr_layer_id,  # type: int\n                                    ptr_idx,  # type: Union[int, float]\n                                    ntr_idx,  # type: Union[int, float]\n                                    width=1,  # type: int\n                                    track_lower=None,  # type: Optional[Union[float, int]]\n                                    track_upper=None,  # type: Optional[Union[float, int]]\n                                    unit_mode=False,  # type: bool\n                                    debug=False  # type: bool\n                                    ):\n        # type: (...) -> Tuple[Optional[WireArray], Optional[WireArray]]\n        \"\"\"Connect the given differential wires to two tracks symmetrically.\n\n        This method makes sure the connections are symmetric and have identical parasitics.\n\n        Parameters\n        ----------\n        pwarr_list : Union[WireArray, List[WireArray]]\n            positive signal wires to connect.\n        nwarr_list : Union[WireArray, List[WireArray]]\n            negative signal wires to connect.\n        tr_layer_id : int\n            track layer ID.\n        ptr_idx : Union[int, float]\n            positive track index.\n        ntr_idx : Union[int, float]\n            negative track index.\n        width : int\n            track width in number of tracks.\n        track_lower : Optional[Union[float, int]]\n            if given, extend track(s) to this lower coordinate.\n        track_upper : Optional[Union[float, int]]\n            if given, extend track(s) to this upper coordinate.\n        unit_mode: bool\n            True if track_lower/track_upper is given in resolution units.\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        p_track : Optional[WireArray]\n            the positive track.\n        n_track : Optional[WireArray]\n            the negative track.\n        \"\"\"\n        track_list = self.connect_matching_tracks([pwarr_list, nwarr_list], tr_layer_id,\n                                                  [ptr_idx, ntr_idx], width=width,\n                                                  track_lower=track_lower,\n                                                  track_upper=track_upper,\n                                                  unit_mode=unit_mode,\n                                                  debug=debug)\n        return track_list[0], track_list[1]\n\n    def connect_differential_wires(self,  # type: TemplateBase\n                                   pin_warrs,  # type: Union[WireArray, List[WireArray]]\n                                   nin_warrs,  # type: Union[WireArray, List[WireArray]]\n                                   pout_warr,  # type: WireArray\n                                   nout_warr,  # type: WireArray\n                                   track_lower=None,  # type: Optional[Union[float, int]]\n                                   track_upper=None,  # type: Optional[Union[float, int]]\n                                   unit_mode=False,  # type: bool\n                                   debug=False  # type: bool\n                                   ):\n        # type: (...) -> Tuple[Optional[WireArray], Optional[WireArray]]\n        if not unit_mode:\n            res = self.grid.resolution\n            if track_lower is not None:\n                track_lower = int(round(track_lower / res))\n            if track_upper is not None:\n                track_upper = int(round(track_upper / res))\n\n        p_tid = pout_warr.track_id\n        lay_id = p_tid.layer_id\n        pidx = p_tid.base_index\n        nidx = nout_warr.track_id.base_index\n        width = p_tid.width\n\n        if track_lower is None:\n            tr_lower = pout_warr.lower_unit\n        else:\n            tr_lower = min(track_lower, pout_warr.lower_unit)\n        if track_upper is None:\n            tr_upper = pout_warr.upper_unit\n        else:\n            tr_upper = max(track_upper, pout_warr.upper_unit)\n\n        return self.connect_differential_tracks(pin_warrs, nin_warrs, lay_id, pidx, nidx,\n                                                width=width, track_lower=tr_lower,\n                                                track_upper=tr_upper, unit_mode=True, debug=debug)\n\n    def connect_matching_tracks(self,  # type: TemplateBase\n                                warr_list_list,  # type: List[Union[WireArray, List[WireArray]]]\n                                tr_layer_id,  # type: int\n                                tr_idx_list,  # type: List[Union[int, float]]\n                                width=1,  # type: int\n                                track_lower=None,  # type: Optional[Union[float, int]]\n                                track_upper=None,  # type: Optional[Union[float, int]]\n                                unit_mode=False,  # type: bool\n                                debug=False  # type: bool\n                                ):\n        # type: (...) -> List[Optional[WireArray]]\n        \"\"\"Connect wires to tracks with optimal matching.\n\n        This method connects the wires to tracks in a way that minimizes the parasitic mismatches.\n\n        Parameters\n        ----------\n        warr_list_list : List[Union[WireArray, List[WireArray]]]\n            list of signal wires to connect.\n        tr_layer_id : int\n            track layer ID.\n        tr_idx_list : List[Union[int, float]]\n            list of track indices.\n        width : int\n            track width in number of tracks.\n        track_lower : Optional[Union[float, int]]\n            if given, extend track(s) to this lower coordinate.\n        track_upper : Optional[Union[float, int]]\n            if given, extend track(s) to this upper coordinate.\n        unit_mode: bool\n            True if track_lower/track_upper is given in resolution units.\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        track_list : List[WireArray]\n            list of created tracks.\n        \"\"\"\n        grid = self.grid\n        res = grid.resolution\n\n        if not unit_mode:\n            if track_lower is not None:\n                track_lower = int(round(track_lower / res))\n            if track_upper is not None:\n                track_upper = int(round(track_upper / res))\n\n        # simple error checking\n        num_tracks = len(tr_idx_list)  # type: int\n        if num_tracks != len(warr_list_list):\n            raise ValueError('wire list length and track index list length mismatch.')\n        if num_tracks == 0:\n            raise ValueError('No tracks given')\n\n        # compute wire_lower/upper without via extension\n        w_lower, w_upper = tuple2_to_int(\n            grid.get_wire_bounds(tr_layer_id, tr_idx_list[0], width=width,\n                                 unit_mode=True))\n        for tr_idx in islice(tr_idx_list, 1, None):\n            cur_low, cur_up = tuple2_to_int(\n                grid.get_wire_bounds(tr_layer_id, tr_idx, width=width, unit_mode=True))\n            w_lower = min(w_lower, cur_low)\n            w_upper = max(w_upper, cur_up)\n\n        # separate wire arrays into bottom/top tracks, compute wire/track lower/upper coordinates\n        bot_warrs = [[] for _ in range(num_tracks)]  # type: List[List[WireArray]]\n        top_warrs = [[] for _ in range(num_tracks)]  # type: List[List[WireArray]]\n        bot_bounds = [None, None]  # type: List[Optional[Union[float, int]]]\n        top_bounds = [None, None]  # type: List[Optional[Union[float, int]]]\n        for idx, warr_list in enumerate(warr_list_list):\n            # convert to WireArray list\n            if isinstance(warr_list, WireArray):\n                warr_list = [warr_list]\n            else:\n                pass\n\n            if not warr_list:\n                raise ValueError('No wires found for track index %d' % idx)\n\n            for warr in warr_list:\n                warr_tid = warr.track_id\n                cur_layer_id = warr_tid.layer_id\n                cur_width = warr_tid.width\n                if cur_layer_id == tr_layer_id + 1:\n                    tr_w_ext = grid.get_via_extensions(tr_layer_id, width, cur_width,\n                                                       unit_mode=True)\n                    top_warrs[idx].append(warr)\n                    cur_bounds = top_bounds\n                    tr_ext, w_ext = tuple2_to_int(tr_w_ext)\n                elif cur_layer_id == tr_layer_id - 1:\n                    tr_w_ext = grid.get_via_extensions(cur_layer_id, cur_width, width,\n                                                       unit_mode=True)\n                    bot_warrs[idx].append(warr)\n                    cur_bounds = bot_bounds\n                    w_ext, tr_ext = tuple2_to_int(tr_w_ext)\n                else:\n                    raise ValueError('Cannot connect wire on layer %d '\n                                     'to track on layer %d' % (cur_layer_id, tr_layer_id))\n\n                # compute wire lower/upper including via extension\n                if cur_bounds[0] is None:\n                    cur_bounds[0] = w_lower - w_ext\n                    cur_bounds[1] = w_upper + w_ext\n                else:\n                    cur_bounds[0] = min(cur_bounds[0], w_lower - w_ext)\n                    cur_bounds[1] = max(cur_bounds[1], w_upper + w_ext)\n\n                # compute track lower/upper including via extension\n                warr_bounds = warr_tid.get_bounds(grid, unit_mode=True)\n                if track_lower is None:\n                    track_lower = warr_bounds[0] - tr_ext\n                else:\n                    track_lower = min(track_lower, warr_bounds[0] - tr_ext)\n                if track_upper is None:\n                    track_upper = warr_bounds[1] + tr_ext\n                else:\n                    track_upper = max(track_upper, warr_bounds[1] + tr_ext)\n\n        assert track_lower is not None and track_upper is not None, \\\n            \"track_lower/track_upper should have been set above\"\n\n        # draw tracks\n        track_list = []  # type: List[Optional[WireArray]]\n        for bwarr_list, twarr_list, tr_idx in zip(bot_warrs, top_warrs, tr_idx_list):\n            track_list.append(self.add_wires(tr_layer_id, tr_idx, track_lower, track_upper,\n                                             width=width, unit_mode=True))\n\n            tr_id = TrackID(tr_layer_id, tr_idx, width=width)\n            self.connect_to_tracks(bwarr_list, tr_id, wire_lower=bot_bounds[0],\n                                   wire_upper=bot_bounds[1], unit_mode=True,\n                                   min_len_mode=None, debug=debug)\n            self.connect_to_tracks(twarr_list, tr_id, wire_lower=top_bounds[0],\n                                   wire_upper=top_bounds[1], unit_mode=True,\n                                   min_len_mode=None, debug=debug)\n\n        return track_list\n\n    def draw_vias_on_intersections(self, bot_warr_list, top_warr_list):\n        # type: (Union[WireArray, List[WireArray]], Union[WireArray, List[WireArray]]) -> List[bool]\n        \"\"\"Draw vias on all intersections of the two given wire groups.\n\n        Parameters\n        ----------\n        bot_warr_list : Union[WireArray, List[WireArray]]\n            the bottom wires.\n        top_warr_list : Union[WireArray, List[WireArray]]\n            the top wires.\n        \"\"\"\n        if isinstance(bot_warr_list, WireArray):\n            bot_warr_list = [bot_warr_list]\n        else:\n            pass\n        if isinstance(top_warr_list, WireArray):\n            top_warr_list = [top_warr_list]\n        else:\n            pass\n\n        grid = self.grid\n        res = grid.resolution\n\n        bwarr_conn_made_list = [False for _ in bot_warr_list]\n\n        for bwarr_indx, bwarr in enumerate(bot_warr_list):\n            bot_tl = bwarr.lower_unit\n            bot_tu = bwarr.upper_unit\n            bot_track_idx = bwarr.track_id\n            bot_layer_id = bot_track_idx.layer_id\n            top_layer_id = bot_layer_id + 1\n            bot_width = bot_track_idx.width\n            bot_dir = self.grid.get_direction(bot_layer_id)\n            bot_horizontal = (bot_dir == 'x')\n            for bot_index in bot_track_idx:\n                bot_lay_name = self.grid.get_layer_name(bot_layer_id, bot_index)\n                btl, btu = tuple2_to_int(\n                    grid.get_wire_bounds(bot_layer_id, bot_index, width=bot_width,\n                                         unit_mode=True))\n                for twarr in top_warr_list:\n                    top_tl = twarr.lower_unit\n                    top_tu = twarr.upper_unit\n                    top_track_idx = twarr.track_id\n                    top_width = top_track_idx.width\n                    if top_tu >= btu and top_tl <= btl:\n                        # top wire cuts bottom wire, possible intersection\n                        for top_index in top_track_idx:\n                            ttl, ttu = tuple2_to_int(grid.get_wire_bounds(top_layer_id, top_index,\n                                                                          width=top_width,\n                                                                          unit_mode=True))\n                            if bot_tu >= ttu and bot_tl <= ttl:\n                                # bottom wire cuts top wire, we have intersection.  Make bbox\n                                if bot_horizontal:\n                                    box = BBox(ttl, btl, ttu, btu, res, unit_mode=True)\n                                else:\n                                    box = BBox(btl, ttl, btu, ttu, res, unit_mode=True)\n                                top_lay_name = self.grid.get_layer_name(top_layer_id, top_index)\n                                self.add_via(box, bot_lay_name, top_lay_name, bot_dir)\n\n                                bwarr_conn_made_list[bwarr_indx] = True\n\n        return bwarr_conn_made_list\n\n    def mark_bbox_used(self, layer_id, bbox):\n        # type: (int, BBox) -> None\n        \"\"\"Marks the given bounding-box region as used in this Template.\"\"\"\n        layer_name = self.grid.get_layer_name(layer_id, 0)\n        self._used_tracks.record_rect(self.grid, layer_name, BBoxArray(bbox, unit_mode=True),\n                                      dx=0, dy=0)\n\n    def get_available_tracks(self,  # type: TemplateBase\n                             layer_id,  # type: int\n                             tr_idx_list,  # type: List[int]\n                             lower,  # type: Union[float, int]\n                             upper,  # type: Union[float, int]\n                             width=1,  # type: int\n                             margin=0,  # type: Union[float, int]\n                             unit_mode=False,  # type: bool\n                             ):\n        # type: (...) -> List[int]\n        \"\"\"Returns empty tracks\"\"\"\n        if not unit_mode:\n            res = self.grid.resolution\n            lower = int(round(lower / res))\n            upper = int(round(upper / res))\n            margin = int(round(margin / res))\n\n        return [tr_idx for tr_idx in tr_idx_list\n                if self.is_track_available(layer_id, tr_idx, lower, upper, width=width,\n                                           sp=margin, sp_le=margin, unit_mode=True)]\n\n    def do_power_fill(self,  # type: TemplateBase\n                      layer_id,  # type: int\n                      space,  # type: Union[float, int]\n                      space_le,  # type: Union[float, int]\n                      vdd_warrs=None,  # type: Optional[Union[WireArray, List[WireArray]]]\n                      vss_warrs=None,  # type: Optional[Union[WireArray, List[WireArray]]]\n                      bound_box=None,  # type: Optional[BBox]\n                      fill_width=1,  # type: int\n                      fill_space=0,  # type: int\n                      x_margin=0,  # type: Union[float, int]\n                      y_margin=0,  # type: Union[float, int]\n                      tr_offset=0,  # type: Union[float, int]\n                      min_len=0,  # type: Union[float, int]\n                      flip=False,  # type: bool\n                      unit_mode=False,  # type: bool\n                      sup_type='both',  # type: str\n                      vss_only=False,  # type: bool\n                      vdd_only=False,  # type: bool\n                      ):\n        # type: (...) -> Tuple[List[WireArray], List[WireArray]]\n        \"\"\"Draw power fill on the given layer.\"\"\"\n        res = self.grid.resolution\n        if not unit_mode:\n            space = int(round(space / res))\n            space_le = int(round(space_le / res))\n            x_margin = int(round(x_margin / res))\n            y_margin = int(round(y_margin / res))\n            tr_offset = int(round(tr_offset / res))\n            min_len = int(round(min_len / res))\n        else:\n            space = int(space)\n            space_le = int(space_le)\n            x_margin = int(x_margin)\n            y_margin = int(y_margin)\n            tr_offset = int(tr_offset)\n            min_len = int(min_len)\n\n        min_len = max(min_len, int(self.grid.get_min_length(layer_id, fill_width, unit_mode=True)))\n        if bound_box is None:\n            if self.bound_box is None:\n                raise ValueError(\"bound_box is not set\")\n            bound_box = self.bound_box\n\n        bound_box = bound_box.expand(dx=-x_margin, dy=-y_margin, unit_mode=True)\n\n        tr_off = self.grid.coord_to_track(layer_id, tr_offset, unit_mode=True)\n        htr0 = int(tr_off * 2) + 1 + fill_width + fill_space\n        htr_pitch = 2 * (fill_width + fill_space)\n        is_horizontal = (self.grid.get_direction(layer_id) == 'x')\n        if is_horizontal:\n            cl, cu = bound_box.bottom_unit, bound_box.top_unit\n            lower, upper = bound_box.left_unit, bound_box.right_unit\n        else:\n            cl, cu = bound_box.left_unit, bound_box.right_unit\n            lower, upper = bound_box.bottom_unit, bound_box.top_unit\n\n        tr_bot = int(self.grid.find_next_track(layer_id, cl, tr_width=fill_width, half_track=True,\n                                               mode=1, unit_mode=True))\n        tr_top = int(self.grid.find_next_track(layer_id, cu, tr_width=fill_width, half_track=True,\n                                               mode=-1, unit_mode=True))\n        n0 = - (-(int(tr_bot * 2) + 1 - htr0) // htr_pitch)\n        n1 = (int(tr_top * 2) + 1 - htr0) // htr_pitch\n        top_vdd = []  # type: List[WireArray]\n        top_vss = []  # type: List[WireArray]\n        for ncur in range(n0, n1 + 1):\n            tr_idx = (htr0 + ncur * htr_pitch - 1) / 2\n            tid = TrackID(layer_id, tr_idx, width=fill_width)\n            # Two options for legacy support\n            if vss_only or vdd_only:\n                if vss_only and vdd_only:\n                    raise ValueError(\"only one of 'vss_only' and 'vdd_only' could be True.\")\n                cur_list = top_vss if vss_only else top_vdd\n            else:\n                if sup_type.lower() == 'vss':\n                    cur_list = top_vss\n                elif sup_type.lower() == 'vdd':\n                    cur_list = top_vdd\n                elif sup_type.lower() == 'both':\n                    cur_list = top_vss if (ncur % 2 == 0) != flip else top_vdd\n                else:\n                    raise ValueError('sup_type has to be \"VDD\" or \"VSS\" or \"both\"(default)')\n            for tl, tu in self.open_interval_iter(tid, lower, upper, sp=space, sp_le=space_le,\n                                                  min_len=min_len):\n                cur_list.append(WireArray(tid, tl, tu, res=res, unit_mode=True))\n\n        for warr in chain(top_vdd, top_vss):\n            for lay, box_arr in warr.wire_arr_iter(self.grid):\n                self.add_rect(lay, box_arr)\n\n        if vdd_warrs:\n            self.draw_vias_on_intersections(vdd_warrs, top_vdd)\n        if vss_warrs:\n            self.draw_vias_on_intersections(vss_warrs, top_vss)\n        return top_vdd, top_vss\n\n    def do_max_space_fill2(self,  # type: TemplateBase\n                           layer_id,  # type: int\n                           bound_box=None,  # type: Optional[BBox]\n                           ):\n        # type: (...) -> None\n        \"\"\"Draw density fill on the given layer.\"\"\"\n        grid = self.grid\n        tech_info = grid.tech_info\n\n        fill_config = tech_info.tech_params['layout']['dummy_fill'][layer_id]\n        density = fill_config['density']\n        sp_max = fill_config['sp_max']\n        sp_le_max = fill_config['sp_le_max']\n        ip_margin = fill_config['margin']\n        ip_margin_le = fill_config['margin_le']\n        sp_max2 = sp_max // 2\n        sp_le_max2 = sp_le_max // 2\n        margin = sp_max2 // 2\n        margin_le = sp_le_max2 // 2\n\n        if bound_box is None:\n            if self.bound_box is None:\n                raise ValueError(\"bound_box is not set\")\n            bound_box = self.bound_box\n\n        # get tracks information\n        long_dir = grid.get_direction(layer_id)\n        if long_dir == 'y':\n            tran_dir = 'x'\n            spx = sp_max2\n            spy = sp_le_max2\n        else:\n            tran_dir = 'y'\n            spx = sp_le_max2\n            spy = sp_max2\n        dim_tran0, dim_tran1 = tuple2_to_int(bound_box.get_interval(tran_dir, unit_mode=True))\n        dim_long0, dim_long1 = tuple2_to_int(bound_box.get_interval(long_dir, unit_mode=True))\n        dim_tranl = min(dim_tran1, dim_tran0 + sp_max2)\n        dim_tranu = max(dim_tran0, dim_tran1 - sp_max2)\n        dim_longl = min(dim_long1, dim_long0 + sp_le_max2)\n        dim_longu = max(dim_long0, dim_long1 - sp_le_max2)\n        dim_tran = dim_tran1 - dim_tran0\n        dim_long = dim_long1 - dim_long0\n\n        # self.add_rect(tech_info.get_exclude_layer(layer_id), bound_box)\n        if dim_tran <= ip_margin or dim_long <= ip_margin_le:\n            return\n\n        min_len = int(grid.get_min_length(layer_id, 1, unit_mode=True))\n        htr0 = int(self.grid.coord_to_nearest_track(layer_id, dim_tranl, half_track=True,\n                                                    mode=-1, unit_mode=True))\n        htr1 = int(self.grid.coord_to_nearest_track(layer_id, dim_tranu, half_track=True,\n                                                    mode=1, unit_mode=True))\n        htr0 = int(round(htr0 * 2 + 1))\n        htr1 = int(round(htr1 * 2 + 1))\n        num_htr_tot = htr1 - htr0 + 1\n\n        # calculate track pitch based on density/max space\n        tr_w, tr_sp = tuple2_to_int(grid.get_track_info(layer_id, unit_mode=True))\n        sp_le = int(grid.get_line_end_space(layer_id, 1, unit_mode=True))\n        tr_pitch2 = int(grid.get_track_pitch(layer_id, unit_mode=True)) // 2\n        num_tracks = int(round(-(-(dim_tran * density) // tr_w)))\n        num_tracks = min(max(num_tracks, -(-num_htr_tot // ((sp_max - tr_sp) // tr_pitch2 + 2))),\n                         num_htr_tot // 2)\n\n        fill_info = None\n        invert = False\n        for _ in range(100):\n            try:\n                fill_info, invert = fill_symmetric_max_num_info(num_htr_tot, num_tracks, 1, 1, 1,\n                                                                fill_on_edge=True, cyclic=False)\n            except NoFillChoiceError:\n                num_tracks -= 1\n        if fill_info is None:\n            raise ValueError('no fill solution.')\n\n        intv_list = fill_symmetric_interval(*fill_info[1], offset=htr0, invert=invert)[0]\n\n        # create interval sets\n        intv_tran0 = IntervalSet()\n        intv_tran1 = IntervalSet()\n        htr_list = [intv[0] for intv in intv_list]\n        num_htr = len(htr_list)\n        set_long0 = set(htr_list)\n        set_long1 = set_long0.copy()\n        intv_list = [IntervalSet() for _ in range(num_htr)]\n\n        # handle blockages\n        for blk_box in self.blockage_iter(layer_id, bound_box, spx=spx, spy=spy):\n            b_tran0, b_tran1 = tuple2_to_int(blk_box.get_interval(tran_dir, unit_mode=True))\n            b_long0, b_long1 = tuple2_to_int(blk_box.get_interval(long_dir, unit_mode=True))\n            b_long0_lim = max(b_long0, dim_longl)\n            b_long1_lim = min(b_long1, dim_longu)\n            blk_intv = (b_long0_lim, b_long1_lim)\n            if b_long0_lim < b_long1_lim:\n                # handle lower/upper transverse edges\n                if b_tran0 <= dim_tran0 and dim_tranl <= b_tran1:\n                    intv_tran0.add(blk_intv, merge=True, abut=True)\n                if b_tran0 <= dim_tranu and dim_tran1 <= b_tran1:\n                    intv_tran1.add(blk_intv, merge=True, abut=True)\n            cur_htr0 = int(self.grid.find_next_track(layer_id, b_tran0, half_track=True, mode=1,\n                                                     unit_mode=True))\n            cur_htr1 = int(self.grid.find_next_track(layer_id, b_tran1, half_track=True, mode=-1,\n                                                     unit_mode=True))\n            cur_htr0 = max(htr0, int(round(cur_htr0 * 2 + 1)))\n            cur_htr1 = min(htr1, int(round(cur_htr1 * 2 + 1)))\n            htr_idx0 = bisect.bisect_left(htr_list, cur_htr0)\n            if htr_idx0 < num_htr and htr_list[htr_idx0] <= cur_htr1:\n                htr_idx1 = min(num_htr - 1, bisect.bisect_right(htr_list, cur_htr1, lo=htr_idx0))\n                for htr_idx in range(htr_idx0, htr_idx1 + 1):\n                    htr = htr_list[htr_idx]\n                    # handle lower/upper longitudinal edges\n                    if b_long0 <= dim_long0 and dim_longl <= b_long1:\n                        set_long0.discard(htr)\n                    if b_long0 <= dim_longu and dim_long1 <= b_long1:\n                        set_long1.discard(htr)\n                    if b_long0_lim < b_long1_lim:\n                        intv_list[htr_idx].add(blk_intv, merge=True, abut=True)\n\n        # add fill in edges on transverse sides\n        trl = int(self.grid.coord_to_nearest_track(layer_id, dim_tran0 + margin, half_track=True,\n                                                   mode=-1, unit_mode=True))\n        trr = int(self.grid.coord_to_nearest_track(layer_id, dim_tran1 - margin, half_track=True,\n                                                   mode=1, unit_mode=True))\n        if trr < trl + 1:\n            # handle cases where the given bounding box is small\n            dim_mid = (dim_tran0 + dim_tran1) // 2\n            trl = int(self.grid.coord_to_nearest_track(layer_id, dim_mid, half_track=True,\n                                                       mode=0, unit_mode=True))\n            tran_edge_iter = ((intv_tran0, trl),)\n        else:\n            tran_edge_iter = ((intv_tran0, trl), (intv_tran1, trr))\n\n        intv_long = (dim_longl, dim_longu)\n        for intv_set, tidx in tran_edge_iter:\n            for long0, long1 in intv_set.complement_iter(intv_long):\n                if long1 - long0 < min_len:\n                    long0 = (long0 + long1 - min_len) // 2\n                    long1 = long0 + min_len\n                self.add_wires(layer_id, tidx, long0, long1, unit_mode=True)\n\n        # add fill in edges on longitude sides\n        if dim_long0 + 2 * (margin_le + min_len) + sp_le > dim_long1:\n            # handle cases where the giving bounding box is small\n            long_lower = min(dim_long0 + margin_le, (dim_long0 + dim_long1 - min_len) // 2)\n            long_upper = max(dim_long1 - margin_le, long_lower + min_len)\n            long_edge_iter = ((set_long0, long_lower, long_upper),)\n        else:\n            long_lower = dim_long0 + margin_le - min_len // 2\n            long_upper = dim_long1 - margin_le + min_len // 2\n            long_edge_iter = ((set_long0, long_lower, long_lower + min_len),\n                              (set_long1, long_upper - min_len, long_upper))\n\n        for set_long_edge, lower, upper in long_edge_iter:\n            intv_mark = (max(dim_longl, lower - sp_le_max2), min(dim_longu, upper + sp_le_max2))\n            for htr in set_long_edge:\n                htr_idx = bisect.bisect_left(htr_list, htr)\n                intv_list[htr_idx].add(intv_mark, merge=True, abut=True)\n                self.add_wires(layer_id, (htr - 1) / 2, lower, upper, unit_mode=True)\n\n        # add rest of fill\n        for htr, intv_set in zip(htr_list, intv_list):\n            tidx = (htr - 1) / 2\n            for long0, long1 in intv_set.complement_iter(intv_long):\n                if long1 - long0 < min_len:\n                    long0 = (long0 + long1 - min_len) // 2\n                    long1 = long0 + min_len\n                self.add_wires(layer_id, tidx, long0, long1, unit_mode=True)\n\n    def do_max_space_fill(self,  # type: TemplateBase\n                          layer_id,  # type: int\n                          bound_box=None,  # type: Optional[BBox]\n                          fill_pitch=1,  # type: Union[float, int]\n                          ):\n        # type: (...) -> None\n        \"\"\"Draw density fill on the given layer.\"\"\"\n\n        grid = self.grid\n        tech_info = grid.tech_info\n\n        fill_config = tech_info.tech_params['layout']['dummy_fill'][layer_id]\n        sp_max = fill_config['sp_max']\n        sp_le_max = fill_config['sp_le_max']\n        ip_margin = fill_config['margin']\n        ip_margin_le = fill_config['margin_le']\n        sp_max2 = sp_max // 2\n        sp_le_max2 = sp_le_max // 2\n        margin = sp_max2 // 2\n        margin_le = sp_le_max2 // 2\n\n        min_len = grid.get_min_length(layer_id, 1, unit_mode=True)\n        long_dir = grid.get_direction(layer_id)\n        is_horiz = (long_dir == 'x')\n\n        if bound_box is None:\n            if self.bound_box is None:\n                raise ValueError(\"bound_box_resolved is not set\")\n            bound_box_resolved = self.bound_box  # type: BBox\n        else:\n            bound_box_resolved = bound_box\n\n        xl = bound_box_resolved.left_unit\n        xr = bound_box_resolved.right_unit\n        yb = bound_box_resolved.bottom_unit\n        yt = bound_box_resolved.top_unit\n        if is_horiz:\n            tran_box = shgeo.box(xl + margin_le, yb, xr - margin_le, yb + sp_max2)\n            long_box = shgeo.box(xl, yb + margin_le, xl + sp_le_max2, yt - margin_le)\n            dim_tran0 = yb\n            dim_tran1 = yt\n            dim_long0 = xl\n            dim_long1 = xr\n        else:\n            tran_box = shgeo.box(xl, yb + margin_le, xl + sp_max2, yt - margin_le)\n            long_box = shgeo.box(xl + margin_le, yb, xr - margin_le, yb + sp_le_max2)\n            dim_tran0 = xl\n            dim_tran1 = xr\n            dim_long0 = yb\n            dim_long1 = yt\n\n        dim_tran = dim_tran1 - dim_tran0\n        dim_long = dim_long1 - dim_long0\n        self.add_rect(tech_info.get_exclude_layer(layer_id), bound_box_resolved)\n        if dim_tran <= ip_margin or dim_long <= ip_margin_le:\n            return\n\n        box_list = [shgeo.box(*box.get_bounds(unit_mode=True))\n                    for box in self.intersection_rect_iter(layer_id, bound_box_resolved)]\n        tot_geo = shops.cascaded_union(box_list)  # type: shgeo.Polygon\n        tot_geo = tot_geo.buffer(sp_max2, cap_style=2, join_style=2)\n\n        # fill transverse edges\n        new_polys = []  # type: List[shgeo.Polygon]\n        if sp_max2 * 2 >= dim_tran:\n            tr = grid.coord_to_nearest_track(layer_id, (dim_tran0 + dim_tran1) // 2,\n                                             half_track=True, unit_mode=True)\n            do_upper = False\n        else:\n            tr = grid.coord_to_nearest_track(layer_id, dim_tran0 + margin, half_track=True,\n                                             mode=-1, unit_mode=True)\n            do_upper = True\n        self._fill_tran_edge_helper(layer_id, grid, tot_geo, tran_box, tr, is_horiz,\n                                    min_len, sp_max2, new_polys)\n\n        if do_upper:\n            tr = grid.coord_to_nearest_track(layer_id, dim_tran1 - margin, half_track=True,\n                                             mode=1, unit_mode=True)\n            if is_horiz:\n                tran_box = shgeo.box(xl + margin_le, yt - sp_max2, xr - margin_le, yt)\n            else:\n                tran_box = shgeo.box(xr - sp_max2, yb + margin_le, xr, yt - margin_le)\n            self._fill_tran_edge_helper(layer_id, grid, tot_geo, tran_box, tr, is_horiz,\n                                        min_len, sp_max2, new_polys)\n\n        new_polys.append(tot_geo)\n        tot_geo = shops.cascaded_union(new_polys)\n\n        # fill longitudinal edges\n        new_polys.clear()\n        if sp_le_max2 * 2 >= dim_long:\n            coord_mid = (dim_long1 + dim_long0) // 2\n            do_upper = False\n        else:\n            coord_mid = dim_long0 + margin_le\n            do_upper = True\n        self._fill_long_edge_helper(layer_id, grid, tot_geo, long_box, coord_mid, is_horiz,\n                                    min_len, sp_max2, new_polys, mode=-1 if do_upper else 0)\n        if do_upper:\n            coord_mid = dim_long1 - margin_le\n            if is_horiz:\n                long_box = shgeo.box(xr - sp_le_max2, yb + margin_le, xr, yt - margin_le)\n            else:\n                long_box = shgeo.box(xl + margin_le, yt - sp_le_max2, xr - margin_le, yt)\n            self._fill_long_edge_helper(layer_id, grid, tot_geo, long_box, coord_mid, is_horiz,\n                                        min_len, sp_max2, new_polys, mode=1)\n\n        new_polys.append(tot_geo)\n        tot_geo = shops.cascaded_union(new_polys)\n\n        # fill interior\n        min_len2 = -(-min_len // 2)\n        tot_box = shgeo.box(*bound_box_resolved.get_bounds(unit_mode=True))\n        geo = tot_box.difference(tot_geo)\n        for poly in self._get_flat_poly_iter(geo):\n            if not poly.is_empty:\n                self._fill_poly_bounds(poly, layer_id, is_horiz, min_len2, fill_pitch)\n\n    def _fill_poly_bounds(self, poly, layer_id, is_horiz, min_len2, fill_pitch):\n        grid = self.grid\n        bounds = poly.bounds\n        xl = int(round(bounds[0]))\n        yb = int(round(bounds[1]))\n        xr = int(round(bounds[2]))\n        yt = int(round(bounds[3]))\n        tr_p2 = grid.get_track_pitch(layer_id, unit_mode=True) // 2\n        fill_htr = int(round(2 * fill_pitch))\n        if is_horiz:\n            tr0 = grid.coord_to_nearest_track(layer_id, yb, half_track=True,\n                                              mode=-1, unit_mode=True)\n            tr1 = grid.coord_to_nearest_track(layer_id, yt, half_track=True,\n                                              mode=1, unit_mode=True)\n            wl, wu = tuple2_to_int(grid.get_wire_bounds(layer_id, tr0, width=1, unit_mode=True))\n            comb = shgeo.MultiPolygon([shgeo.box(xl, wl + tr_p2 * idx, xr, wu + tr_p2 * idx)\n                                       for idx in range(0, int(round(2 * (tr1 - tr0))) + 2,\n                                                        fill_htr)])\n        else:\n            tr0 = grid.coord_to_nearest_track(layer_id, xl, half_track=True,\n                                              mode=-1, unit_mode=True)\n            tr1 = grid.coord_to_nearest_track(layer_id, xr, half_track=True,\n                                              mode=1, unit_mode=True)\n            wl, wu = tuple2_to_int(grid.get_wire_bounds(layer_id, tr0, width=1, unit_mode=True))\n            comb = shgeo.MultiPolygon([shgeo.box(wl + tr_p2 * idx, yb, wu + tr_p2 * idx, yt)\n                                       for idx in range(0, int(round(2 * (tr1 - tr0))) + 2,\n                                                        fill_htr)])\n\n        htr0 = int(round(tr0 * 2)) + 1\n        pitch = fill_htr * tr_p2\n        for p in self._get_flat_poly_iter(poly.intersection(comb)):\n            p_bnds = p.bounds\n            if p_bnds:\n                if is_horiz:\n                    htr = htr0 + (int(round(p_bnds[1])) - wl) // pitch * fill_htr\n                    pl = int(round(p_bnds[0]))\n                    pu = int(round(p_bnds[2]))\n                else:\n                    htr = htr0 + (int(round(p_bnds[0])) - wl) // pitch * fill_htr\n                    pl = int(round(p_bnds[1]))\n                    pu = int(round(p_bnds[3]))\n                pc = (pl + pu) // 2\n\n                self.add_wires(layer_id, (htr - 1) / 2, min(pl, pc - min_len2),\n                               max(pu, pc + min_len2), unit_mode=True)\n\n    @classmethod\n    def _get_flat_poly_iter(cls, poly):\n        if (isinstance(poly, shgeo.MultiPolygon) or\n                isinstance(poly, shgeo.MultiLineString) or\n                isinstance(poly, shgeo.GeometryCollection)):\n            yield from poly\n        else:\n            yield poly\n\n    def _fill_long_edge_helper(self, layer_id, grid, tot_geo, long_box, coord_mid, is_horiz,\n                               min_len, sp_max2, new_polys, mode=0):\n        if mode < 0:\n            clower = coord_mid\n        elif mode == 0:\n            clower = coord_mid - min_len // 2\n        else:\n            clower = coord_mid - min_len\n        cupper = clower + min_len\n        geo = long_box.difference(tot_geo)\n        if isinstance(geo, shgeo.Polygon):\n            geo = [geo]\n        for poly in geo:\n            poly_bnds = poly.bounds\n            if poly_bnds:\n                if is_horiz:\n                    lower = poly_bnds[1]\n                    upper = poly_bnds[3]\n                else:\n                    lower = poly_bnds[0]\n                    upper = poly_bnds[2]\n                htr0 = grid.coord_to_nearest_track(layer_id, lower, half_track=True, mode=-1,\n                                                   unit_mode=True)\n                htr1 = grid.coord_to_nearest_track(layer_id, upper, half_track=True, mode=1,\n                                                   unit_mode=True)\n                htr0 = int(round(htr0 * 2 + 1))\n                htr1 = int(round(htr1 * 2 + 1))\n                for htr in range(htr0, htr1 + 1, 2):\n                    warr = self.add_wires(layer_id, (htr - 1) / 2, clower, cupper, unit_mode=True)\n                    wbox = shgeo.box(*warr.get_bbox_array(grid).base.get_bounds(unit_mode=True))\n                    new_polys.append(wbox.buffer(sp_max2, cap_style=2, join_style=2))\n\n    def _fill_tran_edge_helper(self, layer_id, grid, tot_geo, tran_box, tr, is_horiz, min_len,\n                               sp_max2, new_polys):\n        geo = tran_box.difference(tot_geo)\n        if isinstance(geo, shgeo.Polygon):\n            geo = [geo]\n        for poly in geo:\n            poly_bnds = poly.bounds\n            if poly_bnds:\n                if is_horiz:\n                    lower = int(round(poly_bnds[0]))\n                    upper = int(round(poly_bnds[2]))\n                else:\n                    lower = int(round(poly_bnds[1]))\n                    upper = int(round(poly_bnds[3]))\n                lower = min(lower, (lower + upper - min_len) // 2)\n                upper = max(upper, lower + min_len)\n                warr = self.add_wires(layer_id, tr, lower, upper, unit_mode=True)\n                wbox = shgeo.box(*warr.get_bbox_array(grid).base.get_bounds(unit_mode=True))\n                new_polys.append(wbox.buffer(sp_max2, cap_style=2, join_style=2))\n\n\nclass CachedTemplate(TemplateBase):\n    \"\"\"A template that's cached in file.\"\"\"\n\n    def __init__(self, temp_db, lib_name, params, used_names, **kwargs):\n        # type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None\n        TemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Dict[str, str]\n        return dict(\n            cache_fname='the cache file name.',\n        )\n\n    def draw_layout(self):\n        # type: () -> None\n        fname = self.params['cache_fname']\n\n        with open(fname + '_info.pickle', 'rb') as f:\n            info = pickle.load(f)\n        self._size = info['size']\n        self._port_params = info['port_params']\n        self.prim_top_layer = info['prim_top_layer']\n        self.prim_bound_box = info['prim_bound_box']\n        self.array_box = info['array_box']\n\n        self._merge_used_tracks = True\n        self._used_tracks = UsedTracks(fname, overwrite=False)\n\n        prop_dict = info['properties']\n        for key, val in prop_dict.items():\n            setattr(self, key, val)\n\n        lib_name = info['lib_name']\n        cell_name = info['cell_name']\n        self.add_instance_primitive(lib_name, cell_name, (0, 0), inst_name='X0', unit_mode=True)\n\n\nclass BlackBoxTemplate(TemplateBase):\n    \"\"\"A black box template.\"\"\"\n\n    def __init__(self, temp_db, lib_name, params, used_names, **kwargs):\n        # type: (TemplateDB, str, Dict[str, Any], Set[str], **Any) -> None\n        TemplateBase.__init__(self, temp_db, lib_name, params, used_names, **kwargs)\n        self._sch_params = {}  # type: Dict[str, Any]\n\n    @property\n    def sch_params(self):\n        # type: () -> Dict[str, Any]\n        return self._sch_params\n\n    @classmethod\n    def get_params_info(cls):\n        # type: () -> Dict[str, str]\n        return dict(\n            lib_name='The library name.',\n            cell_name='The layout cell name.',\n            top_layer='The top level layer.',\n            size='The width/height of the cell, in resolution units.',\n            ports='The port information dictionary.',\n            show_pins='True to show pins.',\n        )\n\n    def get_layout_basename(self):\n        return self.params['cell_name']\n\n    def draw_layout(self):\n        # type: () -> None\n        lib_name = self.params['lib_name']\n        cell_name = self.params['cell_name']\n        top_layer = self.params['top_layer']\n        size = self.params['size']\n        ports = self.params['ports']\n        show_pins = self.params['show_pins']\n\n        res = self.grid.resolution\n        tech_info = self.grid.tech_info\n        for term_name, pin_dict in ports.items():\n            for lay_name, bbox_list in pin_dict.items():\n                lay_id = tech_info.get_layer_id(lay_name)\n                for xl, yb, xr, yt in bbox_list:\n                    box = BBox(xl, yb, xr, yt, res, unit_mode=True)\n                    self._register_pin(lay_id, lay_name, term_name, box, show_pins)\n\n        self.add_instance_primitive(lib_name, cell_name, (0, 0), unit_mode=True)\n\n        self.prim_top_layer = top_layer\n        self.prim_bound_box = BBox(0, 0, size[0], size[1], self.grid.resolution, unit_mode=True)\n\n        for layer in range(1, top_layer + 1):\n            self.mark_bbox_used(layer, self.prim_bound_box)\n\n        self._sch_params = dict(\n            lib_name=lib_name,\n            cell_name=cell_name,\n        )\n\n    def _register_pin(self, lay_id, lay_name, term_name, box, show_pins):\n        if lay_id is None:\n            self.add_pin_primitive(term_name, lay_name, box, show=show_pins)\n        else:\n            if self.grid.get_direction(lay_id) == 'x':\n                dim = box.height_unit\n                coord = box.yc_unit\n                lower = box.left_unit\n                upper = box.right_unit\n            else:\n                dim = box.width_unit\n                coord = box.xc_unit\n                lower = box.bottom_unit\n                upper = box.top_unit\n            try:\n                tr_idx = self.grid.coord_to_track(lay_id, coord, unit_mode=True)\n            except ValueError:\n                self.add_pin_primitive(term_name, lay_name, box, show=show_pins)\n                return\n\n            width_ntr = self.grid.get_track_width_inverse(lay_id, dim, unit_mode=True)\n            if self.grid.get_track_width(lay_id, width_ntr, unit_mode=True) == dim:\n                track_id = TrackID(lay_id, tr_idx, width=width_ntr)\n                warr = WireArray(track_id, lower, upper, res=self.grid.resolution, unit_mode=True)\n                self.add_pin(term_name, warr, show=show_pins)\n            else:\n                self.add_pin_primitive(term_name, lay_name, box, show=show_pins)\n"
  },
  {
    "path": "bag/layout/util.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module contains utility classes used for layout\n\"\"\"\n\nfrom typing import Iterator, Union, Tuple, List, Any\n\nimport pprint\n\nimport numpy as np\n\n__all__ = ['BBox', 'BBoxArray', 'Pin', 'transform_table', 'transform_point',\n           'get_inverse_transform', 'tuple2_to_int', 'tuple2_to_float_int']\n\ntransform_table = {'R0': np.array([[1, 0], [0, 1]], dtype=int),\n                   'MX': np.array([[1, 0], [0, -1]], dtype=int),\n                   'MY': np.array([[-1, 0], [0, 1]], dtype=int),\n                   'R180': np.array([[-1, 0], [0, -1]], dtype=int),\n                   'R90': np.array([[0, -1], [1, 0]], dtype=int),\n                   'MXR90': np.array([[0, 1], [1, 0]], dtype=int),\n                   'MYR90': np.array([[0, -1], [-1, 0]], dtype=int),\n                   'R270': np.array([[0, 1], [-1, 0]], dtype=int),\n                   }\n\n\ndef tuple2_to_int(input_tuple: Tuple[Any, Any]) -> Tuple[int, int]:\n    \"\"\"\n    Cast a tuple of 2 elements to a tuple of 2 ints.\n    :param input_tuple: Tuple of two elements\n    :return: Tuple of two ints\n    \"\"\"\n    return int(input_tuple[0]), int(input_tuple[1])\n\n\ndef tuple2_to_float_int(input_tuple: Tuple[Any, Any]) -> Tuple[float, int]:\n    \"\"\"\n    Cast a tuple of 2 elements to a tuple of 2 ints.\n    :param input_tuple: Tuple of two elements\n    :return: Tuple of two ints\n    \"\"\"\n    return float(input_tuple[0]), int(input_tuple[1])\n\n\ndef transform_point(x, y, loc, orient):\n    \"\"\"Transform the (x, y) point using the given location and orientation.\"\"\"\n    shift = np.asarray(loc)\n    if orient not in transform_table:\n        raise ValueError('Unsupported orientation: %s' % orient)\n\n    mat = transform_table[orient]\n    ans = np.dot(mat, np.array([x, y])) + shift\n    return ans.item(0), ans.item(1)\n\n\ndef get_inverse_transform(loc, orient):\n    \"\"\"Returns the inverse transform\"\"\"\n    if orient == 'R90':\n        orient_inv = 'R270'\n    elif orient == 'R270':\n        orient_inv = 'R90'\n    else:\n        orient_inv = orient\n\n    inv_mat = transform_table[orient_inv]\n    new_shift = np.dot(inv_mat, -np.asarray(loc))\n    return (new_shift.item(0), new_shift.item(1)), orient_inv\n\n\ndef transform_loc_orient(loc, orient, trans_loc, trans_orient):\n    \"\"\"Transforms loc orient with trans_loc and trans_orient\"\"\"\n    mat1 = transform_table[orient]\n    mat2 = transform_table[trans_orient]\n    new_mat = np.dot(mat2, mat1)\n    new_loc = np.array(trans_loc) + np.dot(mat2, np.array(loc))\n\n    for key, val in transform_table.items():\n        if np.allclose(new_mat, val):\n            return (new_loc.item(0), new_loc.item(1)), key\n\n\nclass PortSpec(object):\n    \"\"\"Specification of a port.\n\n    Parameters\n    ----------\n    ntr : int\n        number of tracks the port should occupy\n    idc : float\n        DC current the port should support, in Amperes.\n    \"\"\"\n\n    def __init__(self, ntr, idc):\n        self._ntr = ntr\n        self._idc = idc\n\n    @property\n    def ntr(self):\n        \"\"\"minimum number of tracks the port should occupy\"\"\"\n        return self._ntr\n\n    @property\n    def idc(self):\n        \"\"\"minimum DC current the port should support, in Amperes\"\"\"\n        return self._idc\n\n    def __str__(self):\n        return repr(self)\n\n    def __repr__(self):\n        fmt_str = '%s(%d, %.4g)'\n        return fmt_str % (self.__class__.__name__, self._ntr, self._idc)\n\n\nclass BBox(object):\n    \"\"\"An immutable bounding box.\n\n    Parameters\n    ----------\n    left : float or int\n        left coordinate.\n    bottom : float or int\n        bottom coordinate.\n    right : float or int\n        right coordinate.\n    top : float or int\n        top coordinate.\n    resolution : float\n        the coordinate resolution\n    unit_mode : bool\n        True if the given coordinates are in layout units already.\n\n    \"\"\"\n\n    def __init__(self, left, bottom, right, top, resolution, unit_mode=False):\n        if not unit_mode:\n            self._left_unit = int(round(left / resolution))\n            self._bot_unit = int(round(bottom / resolution))\n            self._right_unit = int(round(right / resolution))\n            self._top_unit = int(round(top / resolution))\n        else:\n            self._left_unit = int(round(left))\n            self._bot_unit = int(round(bottom))\n            self._right_unit = int(round(right))\n            self._top_unit = int(round(top))\n            # self._left_unit = left\n            # self._bot_unit = bottom\n            # self._right_unit = right\n            # self._top_unit = top\n        self._res = resolution\n\n    @classmethod\n    def get_invalid_bbox(cls):\n        # type: () -> BBox\n        \"\"\"Returns a default invalid bounding box.\n\n        Returns\n        -------\n        box : bag.layout.util.BBox\n            an invalid bounding box.\n        \"\"\"\n        return cls(0, 0, -1, -1, 0.1, unit_mode=True)\n\n    @property\n    def left(self):\n        \"\"\"left coordinate.\"\"\"\n        return self._left_unit * self._res\n\n    @property\n    def left_unit(self):\n        \"\"\"left coordinate.\"\"\"\n        return self._left_unit\n\n    @property\n    def right(self):\n        \"\"\"right coordinate.\"\"\"\n        return self._right_unit * self._res\n\n    @property\n    def right_unit(self):\n        \"\"\"right coordinate.\"\"\"\n        return self._right_unit\n\n    @property\n    def bottom(self):\n        \"\"\"bottom coordinate.\"\"\"\n        return self._bot_unit * self._res\n\n    @property\n    def bottom_unit(self):\n        \"\"\"bottom coordinate.\"\"\"\n        return self._bot_unit\n\n    @property\n    def top(self):\n        \"\"\"top coordinate.\"\"\"\n        return self._top_unit * self._res\n\n    @property\n    def top_unit(self):\n        \"\"\"top coordinate.\"\"\"\n        return self._top_unit\n\n    @property\n    def resolution(self):\n        \"\"\"coordinate resolution.\"\"\"\n        return self._res\n\n    @property\n    def width(self):\n        \"\"\"width of this bounding box.\"\"\"\n        return self.width_unit * self._res\n\n    @property\n    def width_unit(self):\n        \"\"\"width of this bounding box in resolution units.\"\"\"\n        return self._right_unit - self._left_unit\n\n    @property\n    def height(self):\n        \"\"\"height of this bounding box.\"\"\"\n        return self.height_unit * self._res\n\n    @property\n    def height_unit(self):\n        \"\"\"height of this bounding box in resolution units.\"\"\"\n        return self._top_unit - self._bot_unit\n\n    @property\n    def xc(self):\n        \"\"\"The center X coordinate, rounded to nearest grid point.\"\"\"\n        return ((self._left_unit + self._right_unit) // 2) * self._res\n\n    @property\n    def xc_unit(self):\n        \"\"\"The center X coordinate in resolution units.\"\"\"\n        return (self._left_unit + self._right_unit) // 2\n\n    @property\n    def yc(self):\n        \"\"\"The center Y coordinate, rounded to nearest grid point.\"\"\"\n        return ((self._bot_unit + self._top_unit) // 2) * self._res\n\n    @property\n    def yc_unit(self):\n        \"\"\"The center Y coordinate in resolution units.\"\"\"\n        return (self._bot_unit + self._top_unit) // 2\n\n    def get_points(self, unit_mode=False):\n        # type: (bool) -> List[Tuple[Union[float, int], Union[float, int]]]\n        \"\"\"Returns this bounding box as a list of points.\n\n        Parameters\n        ----------\n        unit_mode : bool\n            True to return points in resolution units.\n\n        Returns\n        -------\n        points : List[Tuple[Union[float, int], Union[float, int]]]\n            this bounding box as a list of points.\n        \"\"\"\n        if unit_mode:\n            return [(self._left_unit, self._bot_unit),\n                    (self._left_unit, self._top_unit),\n                    (self._right_unit, self._top_unit),\n                    (self._right_unit, self._bot_unit)]\n        else:\n            return [(self.left, self.bottom),\n                    (self.left, self.top),\n                    (self.right, self.top),\n                    (self.right, self.bottom)]\n\n    def as_bbox_array(self):\n        \"\"\"Cast this BBox as a BBoxArray.\"\"\"\n        return BBoxArray(self)\n\n    def as_bbox_collection(self):\n        \"\"\"Cast this BBox as a BBoxCollection.\"\"\"\n        return BBoxCollection([BBoxArray(self)])\n\n    def merge(self, bbox):\n        # type: (BBox) -> BBox\n        \"\"\"Returns a new bounding box that's the union of this bounding box and the given one.\n\n        Parameters\n        ----------\n        bbox : bag.layout.util.BBox\n            the bounding box to merge with.\n\n        Returns\n        -------\n        total : bag.layout.util.BBox\n            the merged bounding box.\n        \"\"\"\n        if not self.is_valid():\n            return bbox\n        elif not bbox.is_valid():\n            return self\n\n        return BBox(min(self._left_unit, bbox._left_unit),\n                    min(self._bot_unit, bbox._bot_unit),\n                    max(self._right_unit, bbox._right_unit),\n                    max(self._top_unit, bbox._top_unit),\n                    self._res, unit_mode=True)\n\n    def intersect(self, bbox):\n        # type: (BBox) -> BBox\n        \"\"\"Returns a new bounding box that's the intersection of this bounding box and the given one.\n\n        Parameters\n        ----------\n        bbox : bag.layout.util.BBox\n            the bounding box to intersect with.\n\n        Returns\n        -------\n        intersect : bag.layout.util.BBox\n            the intersection bounding box.\n        \"\"\"\n        return BBox(max(self._left_unit, bbox._left_unit),\n                    max(self._bot_unit, bbox._bot_unit),\n                    min(self._right_unit, bbox._right_unit),\n                    min(self._top_unit, bbox._top_unit),\n                    self._res, unit_mode=True)\n\n    def overlaps(self, bbox):\n        # type: (BBox) -> bool\n        \"\"\"Returns True if this BBox overlaps the given BBox.\"\"\"\n\n        return ((max(self._left_unit, bbox._left_unit) <\n                 min(self._right_unit, bbox._right_unit)) and\n                (max(self._bot_unit, bbox._bot_unit) <\n                 min(self._top_unit, bbox._top_unit)))\n\n    def extend(self, x=None, y=None, unit_mode=False):\n        # type: (Union[float, int], Union[float, int], bool) -> BBox\n        \"\"\"Returns an extended BBox that covers the given point.\n\n        Parameters\n        ----------\n        x : float or None\n            if given, the X coordinate to extend to.\n        y : float or None\n            if given, the Y coordinate to extend to\n        unit_mode : bool\n            True if x and y are given in resolution units.\n\n        Returns\n        -------\n        ext_box : BBox\n            the extended bounding box.\n        \"\"\"\n        if x is None:\n            x = self._left_unit\n        elif not unit_mode:\n            x = int(round(x / self._res))\n        if y is None:\n            y = self._bot_unit\n        elif not unit_mode:\n            y = int(round(y / self._res))\n\n        return BBox(min(self._left_unit, x),\n                    min(self._bot_unit, y),\n                    max(self._right_unit, x),\n                    max(self._top_unit, y), self._res, unit_mode=True)\n\n    def expand(self, dx=0, dy=0, unit_mode=False):\n        # type: (Union[float, int], Union[float, int], bool) -> BBox\n        \"\"\"Returns a BBox expanded by the given amount.\n\n        Parameters\n        ----------\n        dx : Union[float, int]\n            if given, expand left and right edge by this amount.\n        dy : Union[float, int]\n            if given, expand top and bottom edge by this amount.\n        unit_mode : bool\n            True if x and y are given in resolution units.\n\n        Returns\n        -------\n        ext_box : BBox\n            the extended bounding box.\n        \"\"\"\n        if not unit_mode:\n            dx = int(round(dx / self._res))\n            dy = int(round(dy / self._res))\n\n        return BBox(self._left_unit - dx, self._bot_unit - dy, self._right_unit + dx,\n                    self._top_unit + dy, self._res, unit_mode=True)\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False):\n        # type: (Tuple[Union[float, int], Union[float, int]], str, bool) -> BBox\n        \"\"\"Returns a new BBox under the given transformation.\n\n        rotates first before shift.\n\n        Parameters\n        ----------\n        loc : Tuple[Union[float, int], Union[float, int]]\n            location of the anchor.\n        orient : str\n            the orientation of the bounding box.\n        unit_mode : bool\n            True if location is given in resolution units\n\n        Returns\n        -------\n        box : BBox\n            the new bounding box.\n        \"\"\"\n        if not self.is_valid():\n            return BBox.get_invalid_bbox()\n\n        if not unit_mode:\n            loc = int(round(loc[0] / self._res)), int(round(loc[1] / self._res))\n\n        p1 = transform_point(self._left_unit, self._bot_unit, loc, orient)\n        p2 = transform_point(self._right_unit, self._top_unit, loc, orient)\n        return BBox(min(p1[0], p2[0]), min(p1[1], p2[1]),\n                    max(p1[0], p2[0]), max(p1[1], p2[1]),\n                    self._res, unit_mode=True)\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (Union[float, int], Union[float, int], bool) -> BBox\n        \"\"\"Returns a new BBox shifted by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            shift in X direction.\n        dy : float\n            shift in Y direction.\n        unit_mode : bool\n            True if shifts are given in resolution units\n\n        Returns\n        -------\n        box : bag.layout.util.BBox\n            the new bounding box.\n        \"\"\"\n        if not unit_mode:\n            dx = int(round(dx / self._res))\n            dy = int(round(dy / self._res))\n        return BBox(self._left_unit + dx, self._bot_unit + dy,\n                    self._right_unit + dx, self._top_unit + dy,\n                    self._res, unit_mode=True)\n\n    def flip_xy(self):\n        # type: () -> BBox\n        \"\"\"Returns a new BBox with X and Y coordinate swapped.\"\"\"\n        return BBox(self._bot_unit, self._left_unit, self._top_unit, self._right_unit,\n                    self._res, unit_mode=True)\n\n    def with_interval(self, direction, lower, upper, unit_mode=False):\n        if not unit_mode:\n            lower = int(round(lower / self._res))\n            upper = int(round(upper / self._res))\n        if direction == 'x':\n            return BBox(lower, self._bot_unit, upper, self._top_unit, self._res, unit_mode=True)\n        else:\n            return BBox(self._left_unit, lower, self._right_unit, upper, self._res, unit_mode=True)\n\n    def get_interval(self, direction, unit_mode=False):\n        # type: (str, bool) -> Tuple[Union[float, int], Union[float, int]]\n        \"\"\"Returns the interval of this bounding box along the given direction.\n\n        Parameters\n        ----------\n        direction : str\n            direction along which to campute the bounding box interval.  Either 'x' or 'y'.\n        unit_mode : bool\n            True to return dimensions in resolution units.\n\n        Returns\n        -------\n        lower : float\n            the lower coordinate along the given direction.\n        upper : float\n            the upper coordinate along the given direction.\n        \"\"\"\n        if direction == 'x':\n            ans = self._left_unit, self._right_unit\n        else:\n            ans = self._bot_unit, self._top_unit\n\n        if unit_mode:\n            return ans\n        return ans[0] * self._res, ans[1] * self._res\n\n    def get_bounds(self, unit_mode=False):\n        # type: (bool) -> Tuple[Union[float, int], ...]\n        \"\"\"Returns the bounds of this bounding box.\n\n        Parameters\n        ----------\n        unit_mode : bool\n            True to return bounds in resolution units.\n\n        Returns\n        -------\n        bounds : Tuple[Union[float, int], ...]\n            a tuple of (left, bottom, right, top) coordinates.\n        \"\"\"\n        if unit_mode:\n            return self._left_unit, self._bot_unit, self._right_unit, self._top_unit\n        else:\n            return self.left, self.bottom, self.right, self.top\n\n    def is_physical(self):\n        \"\"\"Returns True if this bounding box has positive area.\n\n        Returns\n        -------\n        is_physical : bool\n            True if this bounding box has positive area.\n        \"\"\"\n        return self._right_unit - self._left_unit > 0 and self._top_unit - self._bot_unit > 0\n\n    def is_valid(self):\n        \"\"\"Returns True if this bounding box is valid, i.e. nonnegative area.\n\n        Returns\n        -------\n        is_valid : bool\n            True if this bounding box has nonnegative area.\n        \"\"\"\n        return self._right_unit >= self._left_unit and self._top_unit >= self._bot_unit\n\n    def get_immutable_key(self):\n        \"\"\"Returns an immutable key object that can be used to uniquely identify this BBox.\"\"\"\n        return (self.__class__.__name__, self._left_unit, self._bot_unit,\n                self._right_unit, self._top_unit, self._res)\n\n    def __str__(self):\n        return repr(self)\n\n    def __repr__(self):\n        precision = max(1, -1 * int(np.floor(np.log10(self._res))))\n        fmt_str = '%s(%.{0}f, %.{0}f, %.{0}f, %.{0}f)'.format(precision)\n        return fmt_str % (self.__class__.__name__, self.left, self.bottom, self.right, self.top)\n\n    def __hash__(self):\n        return hash(self.get_immutable_key())\n\n    def __eq__(self, other):\n        return self.get_immutable_key() == other.get_immutable_key()\n\n\nclass BBoxArray(object):\n    \"\"\"An array of bounding boxes.\n\n    Useful for representing bus of wires.\n\n    Parameters\n    ----------\n    bbox : BBox\n        the lower-left bounding box.\n    nx : int\n        number of columns.\n    ny : int\n        number of rows.\n    spx : Union[float, int]\n        column pitch.\n    spy : Union[float, int]\n        row pitch.\n    unit_mode : bool\n        True if layout dimensions are specified in resolution units.\n    \"\"\"\n\n    def __init__(self, bbox, nx=1, ny=1, spx=0, spy=0, unit_mode=False):\n        # type: (BBox, int, int, Union[float, int], Union[float, int], bool) -> None\n        if not isinstance(bbox, BBox):\n            raise ValueError('%s is not a BBox object' % bbox)\n        if nx <= 0 or ny <= 0:\n            raise ValueError('Cannot have 0 bounding boxes.')\n        if spx < 0 or spy < 0:\n            raise ValueError('Currently does not support negative pitches.')\n\n        self._bbox = bbox\n        self._nx = nx\n        self._ny = ny\n        if unit_mode:\n            self._spx_unit = int(spx)  # type: int\n            self._spy_unit = int(spy)  # type: int\n        else:\n            self._spx_unit = int(round(spx / bbox.resolution))\n            self._spy_unit = int(round(spy / bbox.resolution))\n\n    def __iter__(self):\n        # type: () -> Iterator[BBox]\n        \"\"\"Iterates over all bounding boxes in this BBoxArray.\n\n        traverses from left to right, then from bottom to top.\n        \"\"\"\n        for idx in range(self._nx * self._ny):\n            yield self.get_bbox(idx)\n\n    @property\n    def base(self):\n        # type: () -> BBox\n        \"\"\"the lower-left bounding box\"\"\"\n        return self._bbox\n\n    @property\n    def nx(self):\n        # type: () -> int\n        \"\"\"number of columns\"\"\"\n        return self._nx\n\n    @property\n    def ny(self):\n        # type: () -> int\n        \"\"\"number of columns\"\"\"\n        return self._ny\n\n    @property\n    def spx(self):\n        # type: () -> float\n        \"\"\"column pitch\"\"\"\n        return self._spx_unit * self._bbox.resolution\n\n    @property\n    def spx_unit(self):\n        # type: () -> int\n        \"\"\"column pitch in resolution units.\"\"\"\n        return self._spx_unit\n\n    @property\n    def spy(self):\n        # type: () -> float\n        \"\"\"row pitch\"\"\"\n        return self._spy_unit * self._bbox.resolution\n\n    @property\n    def spy_unit(self):\n        # type: () -> int\n        \"\"\"row pitch in resolution units.\"\"\"\n        return self._spy_unit\n\n    @property\n    def left(self):\n        # type: () -> float\n        \"\"\"left-most edge coordinate.\"\"\"\n        return self._bbox.left\n\n    @property\n    def left_unit(self):\n        # type: () -> int\n        \"\"\"left-most edge coordinate.\"\"\"\n        return self._bbox.left_unit\n\n    @property\n    def right(self):\n        # type: () -> float\n        \"\"\"right-most edge coordinate.\"\"\"\n        return self.right_unit * self._bbox.resolution\n\n    @property\n    def right_unit(self):\n        # type: () -> int\n        \"\"\"right-most edge coordinate.\"\"\"\n        return self._bbox.right_unit + self._spx_unit * (self._nx - 1)\n\n    @property\n    def bottom(self):\n        # type: () -> float\n        \"\"\"bottom-most edge coordinate.\"\"\"\n        return self._bbox.bottom\n\n    @property\n    def bottom_unit(self):\n        # type: () -> int\n        \"\"\"bottom-most edge coordinate.\"\"\"\n        return self._bbox.bottom_unit\n\n    @property\n    def top(self):\n        # type: () -> float\n        \"\"\"top-most edge coordinate.\"\"\"\n        return self.top_unit * self._bbox.resolution\n\n    @property\n    def top_unit(self):\n        # type: () -> int\n        \"\"\"top-most edge coordinate.\"\"\"\n        return self._bbox.top_unit + self._spy_unit * (self._ny - 1)\n\n    @property\n    def xc(self):\n        return self.xc_unit * self._bbox.resolution\n\n    @property\n    def xc_unit(self):\n        # type: () -> int\n        return (self.left_unit + self.right_unit) // 2\n\n    @property\n    def yc(self):\n        return self.yc_unit * self._bbox.resolution\n\n    @property\n    def yc_unit(self):\n        # type: () -> int\n        return (self.bottom_unit + self.top_unit) // 2\n\n    def as_bbox_collection(self):\n        # type: () -> 'BBoxCollection'\n        \"\"\"Cast this BBoxArray as a BBoxCollection.\"\"\"\n        return BBoxCollection([self])\n\n    def get_bbox(self, idx):\n        # type: (int) -> BBox\n        \"\"\"Returns the bounding box with the given index.\n\n        index increases from left to right, then from bottom to top.  lower-left box is index 0.\n\n        Returns\n        -------\n        bbox : bag.layout.util.BBox\n            the bounding box with the given index.\n        \"\"\"\n        row_idx, col_idx = divmod(idx, self._nx)\n        return self._bbox.transform(loc=(col_idx * self._spx_unit,\n                                         row_idx * self._spy_unit), unit_mode=True)\n\n    def get_overall_bbox(self):\n        \"\"\"Returns the overall bounding box of this BBoxArray.\n\n        Returns\n        -------\n        overall_bbox : bag.layout.util.BBox\n            the overall bounding box of this BBoxArray.\n        \"\"\"\n        return BBox(self.left_unit, self.bottom_unit, self.right_unit, self.top_unit,\n                    self._bbox.resolution, unit_mode=True)\n\n    def move_by(self, dx=0, dy=0, unit_mode=False):\n        # type: (Union[float, int], Union[float, int], bool) -> BBoxArray\n        \"\"\"Returns a new BBox shifted by the given amount.\n\n        Parameters\n        ----------\n        dx : float\n            shift in X direction.\n        dy : float\n            shift in Y direction.\n        unit_mode : bool\n            True if shifts are given in resolution units\n\n        Returns\n        -------\n        box_arr : BBoxArray\n            the new BBoxArray.\n        \"\"\"\n        return self.transform((dx, dy), unit_mode=unit_mode)\n\n    def transform(self, loc=(0, 0), orient='R0', unit_mode=False):\n        # type: (Tuple[Union[float, int], Union[float, int]], str, bool) -> BBoxArray\n        \"\"\"Returns a new BBoxArray under the given transformation.\n\n        rotates first before shift.\n\n        Parameters\n        ----------\n        loc : Tuple[Union[float, int], Union[float, int]]\n            location of the anchor.\n        orient : str\n            the orientation of the bounding box.\n        unit_mode : bool\n            True if location is given in resolution units\n\n        Returns\n        -------\n        box_arr : BBoxArray\n            the new BBoxArray.\n        \"\"\"\n        if unit_mode:\n            dx, dy = loc[0], loc[1]\n        else:\n            res = self._bbox.resolution\n            dx = int(round(loc[0] / res))\n            dy = int(round(loc[1] / res))\n\n        if orient == 'R0':\n            left = self.left_unit + dx\n            bottom = self.bottom_unit + dy\n        elif orient == 'MX':\n            left = self.left_unit + dx\n            bottom = -self.top_unit + dy\n        elif orient == 'MY':\n            left = -self.right_unit + dx\n            bottom = self.bottom_unit + dy\n        elif orient == 'R180':\n            left = -self.right_unit + dx\n            bottom = -self.top_unit + dy\n        else:\n            raise ValueError('Invalid orientation: ' + orient)\n\n        # no 90 degree-ish rotation; width and height will not interchange\n        new_base = BBox(left, bottom, left + self._bbox.width_unit,\n                        bottom + self._bbox.height_unit, self._bbox.resolution,\n                        unit_mode=True)\n        return BBoxArray(new_base, nx=self._nx, ny=self._ny,\n                         spx=self._spx_unit, spy=self._spy_unit, unit_mode=True)\n\n    def arrayed_copies(self, nx=1, ny=1, spx=0, spy=0, unit_mode=False):\n        # type: (int, int, Union[float, int], Union[float, int], bool) -> 'BBoxCollection'\n        \"\"\"Returns a BBoxCollection containing arrayed copies of this BBoxArray\n\n        Parameters\n        ----------\n        nx : int\n            number of copies in horizontal direction.\n        ny : int\n            number of copies in vertical direction.\n        spx : Union[float, int]\n            pitch in horizontal direction.\n        spy : Union[float, int]\n            pitch in vertical direction.\n        unit_mode : bool\n            True if location is given in resolution units\n\n        Returns\n        -------\n        bcol : :class:`bag.layout.util.BBoxCollection`\n            a BBoxCollection of the arrayed copies.\n        \"\"\"\n        if not unit_mode:\n            res = self._bbox.resolution\n            spx = int(round(spy / res))\n            spy = int(round(spy / res))\n\n        x_info = self._array_helper(nx, spx, self.nx, self._spx_unit)\n        y_info = self._array_helper(ny, spy, self.ny, self._spy_unit)\n\n        base = self.base\n        barr_list = [BBoxArray(base.move_by(dx, dy, unit_mode=True), nx=new_nx, ny=new_ny,\n                               spx=new_spx, spy=new_spy)\n                     for new_nx, new_spx, dx in zip(*x_info)\n                     for new_ny, new_spy, dy in zip(*y_info)]\n        return BBoxCollection(barr_list)\n\n    @staticmethod\n    def _array_helper(n1, sp1, n2, sp2):\n        if n1 == 1:\n            return [n2], [sp2], [0]\n        elif n2 == 1:\n            return [n1], [sp1], [0]\n        elif sp1 == sp2 * n2:\n            return [n1 * n2], [sp2], [0]\n        elif sp2 == sp1 * n1:\n            return [n1 * n2], [sp1], [0]\n        else:\n            # no way to express as single array\n            if n1 < n2 or (n1 == n2 and sp2 < sp1):\n                return [n2] * n1, [sp2] * n1, list(range(0, sp1 * n1, sp1))\n            else:\n                return [n1] * n2, [sp1] * n2, list(range(0, sp2 * n2, sp2))\n\n    def __str__(self):\n        return repr(self)\n\n    def __repr__(self):\n        precision = max(1, -1 * int(np.floor(np.log10(self._bbox.resolution))))\n        fmt_str = '%s(%s, %d, %d, %.{0}f, %.{0}f)'.format(precision)\n        return fmt_str % (self.__class__.__name__, self._bbox, self._nx,\n                          self._ny, self.spx, self.spy)\n\n\nclass BBoxCollection(object):\n    \"\"\"A collection of bounding boxes.\n\n    To support efficient computation, this class stores bounding boxes as a list of\n    BBoxArray objects.\n\n    Parameters\n    ----------\n    box_arr_list : list[bag.layout.util.BBoxArray]\n        list of BBoxArrays in this collections.\n    \"\"\"\n\n    def __init__(self, box_arr_list):\n        self._box_arr_list = box_arr_list\n\n    def __iter__(self):\n        \"\"\"Iterates over all BBoxArray in this collection.\"\"\"\n        return self._box_arr_list.__iter__()\n\n    def __reversed__(self):\n        return self._box_arr_list.__reversed__()\n\n    def __len__(self):\n        return len(self._box_arr_list)\n\n    def as_bbox_array(self):\n        \"\"\"Attempt to cast this BBoxCollection into a BBoxArray.\n\n        Returns\n        -------\n        bbox_arr : bag.layout.util.BBoxArray\n            the BBoxArray object that's equivalent to this BBoxCollection.\n\n        Raises\n        ------\n        Exception :\n            if this BBoxCollection cannot be cast into a BBoxArray.\n        \"\"\"\n        if len(self._box_arr_list) != 1:\n            raise Exception('Unable to cast this BBoxCollection into a BBoxArray.')\n\n        return self._box_arr_list[0]\n\n    def as_bbox(self):\n        \"\"\"Attempt to cast this BBoxCollection into a BBox.\n\n        Returns\n        -------\n        bbox : bag.layout.util.BBox\n            the BBox object that's equivalent to this BBoxCollection.\n\n        Raises\n        ------\n        Exception :\n            if this BBoxCollection cannot be cast into a BBox.\n        \"\"\"\n        if len(self._box_arr_list) != 1:\n            raise Exception('Unable to cast this BBoxCollection into a BBoxArray.')\n        box_arr = self._box_arr_list[0]\n        if box_arr.nx != 1 or box_arr.ny != 1:\n            raise Exception('Unable to cast this BBoxCollection into a BBoxArray.')\n        return box_arr.base\n\n    def get_bounding_box(self):\n        \"\"\"Returns the bounding box that encloses all boxes in this collection.\n\n        Returns\n        -------\n        bbox : bag.layout.util.BBox\n            the bounding box of this BBoxCollection.\n        \"\"\"\n        box = BBox.get_invalid_bbox()\n        for box_arr in self._box_arr_list:\n            all_box = BBox(box_arr.left, box_arr.bottom, box_arr.right, box_arr.top,\n                           box_arr.base.resolution)\n            box = box.merge(all_box)\n\n        return box\n\n    def transform(self, loc=(0, 0), orient='R0'):\n        \"\"\"Returns a new BBoxCollection under the given transformation.\n\n        rotates first before shift.\n\n        Parameters\n        ----------\n        loc : (float, float)\n            location of the anchor.\n        orient : str\n            the orientation of the bounding box.\n\n        Returns\n        -------\n        box_collection : bag.layout.util.BBoxCollection\n            the new BBoxCollection.\n        \"\"\"\n        new_list = [box_arr.transform(loc=loc, orient=orient) for box_arr in self._box_arr_list]\n        return BBoxCollection(new_list)\n\n    def __str__(self):\n        return repr(self)\n\n    def __repr__(self):\n        return pprint.pformat(self._box_arr_list)\n\n\nclass Pin(object):\n    \"\"\"A layout pin.\n\n    Multiple pins can share the same terminal name.\n\n    Parameters\n    ----------\n    pin_name : str\n        the pin label.\n    term_name : str\n        the terminal name.\n    layer : str\n        the pin layer name.\n    bbox : bag.layout.util.BBox\n        the pin bounding box.\n    \"\"\"\n\n    def __init__(self, pin_name, term_name, layer, bbox):\n        if not bbox.is_physical():\n            raise Exception('Non-physical pin bounding box: %s' % bbox)\n\n        self._pin_name = pin_name\n        self._term_name = term_name\n        self._layer = layer\n        self._bbox = bbox\n\n    @property\n    def pin_name(self):\n        \"\"\"the pin label.\"\"\"\n        return self._pin_name\n\n    @property\n    def term_name(self):\n        \"\"\"the terminal name.\"\"\"\n        return self._term_name\n\n    @property\n    def layer(self):\n        \"\"\"the pin layer name\"\"\"\n        return self._layer\n\n    @property\n    def bbox(self):\n        \"\"\"the pin bounding box.\"\"\"\n        return self._bbox\n\n    def __str__(self):\n        return repr(self)\n\n    def __repr__(self):\n        return '%s(%s, %s, %s, %s)' % (self.__class__.__name__, self._pin_name,\n                                       self._term_name, self._layer, self._bbox)\n"
  },
  {
    "path": "bag/math/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/math/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package defines design template classes.\n\"\"\"\n\nfrom typing import Iterable\n\nimport numpy as np\nfrom . import interpolate\n\n__all__ = ['lcm', 'gcd', 'interpolate', 'float_to_si_string', 'si_string_to_float']\n\n\nsi_mag = [-18, -15, -12, -9, -6, -3, 0, 3, 6, 9, 12]\nsi_pre = ['a', 'f', 'p', 'n', 'u', 'm', '', 'k', 'M', 'G', 'T']\n\n\ndef float_to_si_string(num, precision=6):\n    \"\"\"Converts the given floating point number to a string using SI prefix.\n\n    Parameters\n    ----------\n    num : float\n        the number to convert.\n    precision : int\n        number of significant digits, defaults to 6.\n\n    Returns\n    -------\n    ans : str\n        the string representation of the given number using SI suffix.\n    \"\"\"\n    if abs(num) < 1e-21:\n        return '0'\n    exp = np.log10(abs(num))\n\n    pre_idx = len(si_mag) - 1\n    for idx in range(len(si_mag)):\n        if exp < si_mag[idx]:\n            pre_idx = idx - 1\n            break\n\n    fmt = '%%.%dg%%s' % precision\n    res = 10.0 ** (si_mag[pre_idx])\n    return fmt % (num / res, si_pre[pre_idx])\n\n\ndef si_string_to_float(si_str):\n    \"\"\"Converts the given string with SI prefix to float.\n\n    Parameters\n    ----------\n    si_str : str\n        the string to convert\n\n    Returns\n    -------\n    ans : float\n        the floating point value of the given string.\n    \"\"\"\n    if si_str[-1] in si_pre:\n        idx = si_pre.index(si_str[-1])\n        return float(si_str[:-1]) * 10**si_mag[idx]\n    else:\n        return float(si_str)\n\n\ndef gcd(a, b):\n    # type: (int, int) -> int\n    \"\"\"Compute greatest common divisor of two positive integers.\n\n    Parameters\n    ----------\n    a : int\n        the first number.\n    b : int\n        the second number.\n\n    Returns\n    -------\n    ans : int\n        the greatest common divisor of the two given integers.\n    \"\"\"\n    while b:\n        a, b = b, a % b\n    return a\n\n\ndef lcm(arr, init=1):\n    # type: (Iterable[int], int) -> int\n    \"\"\"Compute least common multiple of all numbers in the given list.\n\n    Parameters\n    ----------\n    arr : Iterable[int]\n        a list of integers.\n    init : int\n        the initial LCM.  Defaults to 1.\n\n    Returns\n    -------\n    ans : int\n        the least common multiple of all the given numbers.\n    \"\"\"\n    cur_lcm = init\n    for val in arr:\n        cur_lcm = cur_lcm * val // gcd(cur_lcm, val)\n    return cur_lcm\n"
  },
  {
    "path": "bag/math/dfun.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines the differentiable function class.\"\"\"\n\nfrom typing import Union, List, Optional, Tuple\n\nimport abc\n\nimport numpy as np\n\n\nclass DiffFunction(abc.ABC):\n    \"\"\"An abstract class representing a differentiable scalar function.\n\n    Supports Numpy broadcasting.  Defaults to using finite difference for derivative calculation.\n\n    Parameters\n    ----------\n    input_ranges : List[Tuple[Optional[float], Optional[float]]]\n        input ranges.\n    delta_list : Optional[List[float]]\n        a list of finite difference step size for each input.  If None,\n        finite difference will be disabled.\n    \"\"\"\n\n    def __init__(self, input_ranges, delta_list=None):\n        # type: (List[Tuple[Optional[float], Optional[float]]], Optional[List[float]]) -> None\n        # error checking\n        self._ndim = len(input_ranges)\n        if delta_list is not None and len(delta_list) != self._ndim:\n            raise ValueError('finite difference list length inconsistent.')\n\n        self._input_ranges = input_ranges\n        self.delta_list = delta_list  # type: Optional[List[float]]\n\n    @property\n    def input_ranges(self):\n        # type: () -> List[Tuple[Optional[float], Optional[float]]]\n        return self._input_ranges\n\n    @property\n    def ndim(self):\n        # type: () -> int\n        \"\"\"Number of input dimensions.\"\"\"\n        return self._ndim\n\n    @abc.abstractmethod\n    def __call__(self, xi):\n        \"\"\"Interpolate at the given coordinates.\n\n        Numpy broadcasting rules apply.\n\n        Parameters\n        ----------\n        xi : array_like\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : np.multiarray.ndarray\n            The interpolated values at the given coordinates.\n        \"\"\"\n        raise NotImplementedError('Not implemented')\n\n    def get_input_range(self, idx):\n        # type: (int) -> Tuple[Optional[float], Optional[float]]\n        \"\"\"Returns the input range of the given dimension.\"\"\"\n        return self._input_ranges[idx]\n\n    def deriv(self, xi, j):\n        \"\"\"Calculate the derivative at the given coordinates with respect to input j.\n\n        Numpy broadcasting rules apply.\n\n        Parameters\n        ----------\n        xi : array_like\n            The coordinates to evaluate, with shape (..., ndim)\n        j : int\n            input index.\n\n        Returns\n        -------\n        val : np.multiarray.ndarray\n            The derivatives at the given coordinates.\n        \"\"\"\n        return self._fd(xi, j, self.delta_list[j])\n\n    def jacobian(self, xi):\n        \"\"\"Calculate the Jacobian at the given coordinates.\n\n        Numpy broadcasting rules apply.\n\n        If finite difference step sizes are not specified,\n        will call deriv() in a for loop to compute the Jacobian.\n\n        Parameters\n        ----------\n        xi : array_like\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : np.multiarray.ndarray\n            The Jacobian matrices at the given coordinates.\n        \"\"\"\n        if self.delta_list:\n            return self._fd_jacobian(xi, self.delta_list)\n        else:\n            xi = np.asarray(xi, dtype=float)\n            ans = np.empty(xi.shape)\n            for n in range(self.ndim):\n                ans[..., n] = self.deriv(xi, n)\n            return ans\n\n    def _fd(self, xi, idx, delta):\n        \"\"\"Calculate the derivative along the given index using central finite difference.\n\n        Parameters\n        ----------\n        xi : array_like\n            The coordinates to evaluate, with shape (..., ndim)\n        idx : int\n            The index to calculate the derivative on.\n        delta : float\n            The finite difference step size.\n\n        Returns\n        -------\n        val : np.multiarray.ndarray\n            The derivatives at the given coordinates.\n        \"\"\"\n        if idx < 0 or idx >= self.ndim:\n            raise ValueError('Invalid derivative index: %d' % idx)\n\n        xi = np.asarray(xi, dtype=float)\n        if xi.shape[-1] != self.ndim:\n            raise ValueError(\"The requested sample points xi have dimension %d, \"\n                             \"but this interpolator has dimension %d\" % (xi.shape[-1], self.ndim))\n\n        # use broadcasting to evaluate two points at once\n        xtest = np.broadcast_to(xi, (2,) + xi.shape).copy()\n        xtest[0, ..., idx] += delta / 2.0\n        xtest[1, ..., idx] -= delta / 2.0\n        val = self(xtest)\n        ans = (val[0] - val[1]) / delta  # type: np.ndarray\n\n        if ans.size == 1 and not np.isscalar(ans):\n            return ans[0]\n        return ans\n\n    def _fd_jacobian(self, xi, delta_list):\n        \"\"\"Calculate the Jacobian matrix using central finite difference.\n\n        Parameters\n        ----------\n        xi : array_like\n            The coordinates to evaluate, with shape (..., ndim)\n        delta_list : List[float]\n            list of finite difference step sizes for each input.\n\n        Returns\n        -------\n        val : np.multiarray.ndarray\n            The Jacobian matrices at the given coordinates.\n        \"\"\"\n        xi = np.asarray(xi, dtype=float)\n        if xi.shape[-1] != self.ndim:\n            raise ValueError(\"The requested sample points xi have dimension %d, \"\n                             \"but this interpolator has dimension %d\" % (xi.shape[-1], self.ndim))\n\n        # use broadcasting to evaluate all points at once\n        xtest = np.broadcast_to(xi, (2 * self.ndim,) + xi.shape).copy()\n        for idx, delta in enumerate(delta_list):\n            xtest[2 * idx, ..., idx] += delta / 2.0\n            xtest[2 * idx + 1, ..., idx] -= delta / 2.0\n\n        val = self(xtest)\n        ans = np.empty(xi.shape)\n        for idx, delta in enumerate(delta_list):\n            ans[..., idx] = (val[2 * idx, ...] - val[2 * idx + 1, ...]) / delta\n        return ans\n\n    def transform_input(self, amat, bmat):\n        # type: (np.multiarray.ndarray, np.multiarray.ndarray) -> DiffFunction\n        \"\"\"Returns f(Ax + B), where f is this function and A, B are matrices.\n\n        Parameters\n        ----------\n        amat : np.multiarray.ndarray\n            the input transform matrix.\n        bmat : np.multiarray.ndarray\n            the input shift matrix.\n\n        Returns\n        -------\n        dfun : DiffFunction\n            a scalar differential function.\n        \"\"\"\n        return InLinTransformFunction(self, amat, bmat)\n\n    def __add__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        if isinstance(other, DiffFunction):\n            return SumDiffFunction(self, other, f2_sgn=1.0)\n        elif isinstance(other, float) or isinstance(other, int):\n            return ScaleAddFunction(self, other, 1.0)\n        elif isinstance(other, np.ndarray):\n            return ScaleAddFunction(self, np.asscalar(other), 1.0)\n        else:\n            raise NotImplementedError('Unknown type %s' % type(other))\n\n    def __radd__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        return self.__add__(other)\n\n    def __sub__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        if isinstance(other, DiffFunction):\n            return SumDiffFunction(self, other, f2_sgn=-1.0)\n        elif isinstance(other, float) or isinstance(other, int):\n            return ScaleAddFunction(self, -other, 1.0)\n        elif isinstance(other, np.ndarray):\n            return ScaleAddFunction(self, -np.asscalar(other), 1.0)\n        else:\n            raise NotImplementedError('Unknown type %s' % type(other))\n\n    def __rsub__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        if isinstance(other, DiffFunction):\n            return SumDiffFunction(other, self, f2_sgn=-1.0)\n        elif isinstance(other, float) or isinstance(other, int):\n            return ScaleAddFunction(self, other, -1.0)\n        elif isinstance(other, np.ndarray):\n            return ScaleAddFunction(self, np.asscalar(other), -1.0)\n        else:\n            raise NotImplementedError('Unknown type %s' % type(other))\n\n    def __mul__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        if isinstance(other, DiffFunction):\n            return ProdFunction(self, other)\n        elif isinstance(other, float) or isinstance(other, int):\n            return ScaleAddFunction(self, 0.0, other)\n        elif isinstance(other, np.ndarray):\n            return ScaleAddFunction(self, 0.0, np.asscalar(other))\n        else:\n            raise NotImplementedError('Unknown type %s' % type(other))\n\n    def __rmul__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        return self.__mul__(other)\n\n    def __pow__(self, other):\n        # type: (Union[float, int, np.multiarray.ndarray]) -> DiffFunction\n        if isinstance(other, float) or isinstance(other, int):\n            return PwrFunction(self, other, scale=1.0)\n        elif isinstance(other, np.ndarray):\n            return PwrFunction(self, np.asscalar(other), scale=1.0)\n        else:\n            raise NotImplementedError('Unknown type %s' % type(other))\n\n    def __div__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        if isinstance(other, DiffFunction):\n            return DivFunction(self, other)\n        elif isinstance(other, float) or isinstance(other, int):\n            return ScaleAddFunction(self, 0.0, 1.0 / other)\n        elif isinstance(other, np.ndarray):\n            return ScaleAddFunction(self, 0.0, 1.0 / np.asscalar(other))\n        else:\n            raise NotImplementedError('Unknown type %s' % type(other))\n\n    def __truediv__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        return self.__div__(other)\n\n    def __rdiv__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        if isinstance(other, DiffFunction):\n            return DivFunction(other, self)\n        elif isinstance(other, float) or isinstance(other, int):\n            return PwrFunction(self, -1.0, scale=other)\n        elif isinstance(other, np.ndarray):\n            return PwrFunction(self, -1.0, scale=np.asscalar(other))\n        else:\n            raise NotImplementedError('Unknown type %s' % type(other))\n\n    def __rtruediv__(self, other):\n        # type: (Union[DiffFunction, float, int, np.multiarray.ndarray]) -> DiffFunction\n        return self.__rdiv__(other)\n\n    def __neg__(self):\n        # type: () -> DiffFunction\n        return ScaleAddFunction(self, 0.0, -1.0)\n\n\nclass InLinTransformFunction(DiffFunction):\n    \"\"\"A DiffFunction where the input undergoes a linear transformation first.\n\n    This function computes f(Ax + B), where A and B are matrices.\n\n    Parameters\n    ----------\n    f1 : DiffFunction\n        the parent function.\n    amat : np.multiarray.ndarray\n        the input transform matrix.\n    bmat : np.multiarray.ndarray\n        the input shift matrix.\n    \"\"\"\n    def __init__(self, f1, amat, bmat):\n        # type: (DiffFunction, np.multiarray.ndarray, np.multiarray.ndarray) -> None\n        if amat.shape[0] != f1.ndim or bmat.shape[0] != f1.ndim:\n            raise ValueError('amat/bmat number of rows must be %d' % f1.ndim)\n        if len(bmat.shape) != 1:\n            raise ValueError('bmat must be 1 dimension.')\n\n        # domain of f(Ax+B) cannot be represented by input ranges.\n        super(InLinTransformFunction, self).__init__([(None, None)] * amat.shape[1], delta_list=None)\n        self._f1 = f1\n        self._amat = amat\n        self._bmat = bmat.reshape(-1, 1)\n\n    def _get_arg(self, xi):\n        xi = np.asarray(xi)\n        xi_shape = xi.shape\n        my_ndim = self.ndim\n        if xi_shape[-1] != my_ndim:\n            raise ValueError('Last dimension must have size %d' % my_ndim)\n\n        xi = xi.reshape(-1, my_ndim)\n        return (self._amat.dot(xi.T) + self._bmat).T, xi_shape\n\n    def __call__(self, xi):\n        farg, xi_shape = self._get_arg(xi)\n        result = self._f1(farg)\n        if np.isscalar(result):\n            return result\n        return result.reshape(xi_shape[:-1])\n\n    def deriv(self, xi, j):\n        jmat = self.jacobian(xi)\n        return jmat[..., 0, j]\n\n    def jacobian(self, xi):\n        farg, xi_shape = self._get_arg(xi)\n        jmat = self._f1.jacobian(farg).dot(self._amat)\n        shape_trunc = xi_shape[:-1]  # type: Tuple[int, ...]\n        return jmat.reshape(shape_trunc + (1, self.ndim))\n\n\nclass ScaleAddFunction(DiffFunction):\n    \"\"\"A DiffFunction multiply by a scalar then added to a scalar.\n\n    Parameters\n    ----------\n    f1 : DiffFunction\n        the first function.\n    adder : float\n        constant to add.\n    scaler : float\n        constant to multiply.\n    \"\"\"\n    def __init__(self, f1, adder, scaler):\n        # type: (DiffFunction, float, float) -> None\n        DiffFunction.__init__(self, f1.input_ranges, delta_list=None)\n        self._f1 = f1\n        self._adder = adder\n        self._scaler = scaler\n\n    def __call__(self, xi):\n        return self._f1(xi) * self._scaler + self._adder\n\n    def deriv(self, xi, j):\n        return self._f1.deriv(xi, j) * self._scaler\n\n    def jacobian(self, xi):\n        return self._f1.jacobian(xi) * self._scaler\n\n\ndef _intersection(*args):\n    input_ranges = []\n    for bound_list in zip(*args):\n        lmax, umin = None, None\n        for l, u in bound_list:\n            if l is None:\n                lmax, umin = None, None\n                break\n            else:\n                if lmax is None:\n                    lmax, umin = l, u\n                else:\n                    lmax = max(l, lmax)\n                    umin = min(u, umin)\n\n        input_ranges.append((lmax, umin))\n\n    return input_ranges\n\n\nclass SumDiffFunction(DiffFunction):\n    \"\"\"Sum or Difference of two DiffFunctions\n\n    Parameters\n    ----------\n    f1 : DiffFunction\n        the first function.\n    f2 : DiffFunction\n        the second function.\n    f2_sgn : float\n        1 if adding, -1 if subtracting.\n    \"\"\"\n    def __init__(self, f1, f2, f2_sgn=1.0):\n        # type: (DiffFunction, DiffFunction, float) -> None\n        if f1.ndim != f2.ndim:\n            raise ValueError('functions dimension mismatch.')\n\n        DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)\n        self._f1 = f1\n        self._f2 = f2\n        self._f2_sgn = f2_sgn\n\n    def __call__(self, xi):\n        return self._f1(xi) + self._f2_sgn * self._f2(xi)\n\n    def deriv(self, xi, j):\n        return self._f1.deriv(xi, j) + self._f2_sgn * self._f2.deriv(xi, j)\n\n    def jacobian(self, xi):\n        return self._f1.jacobian(xi) + self._f2_sgn * self._f2.jacobian(xi)\n\n\nclass ProdFunction(DiffFunction):\n    \"\"\"product of two DiffFunctions\n\n    Parameters\n    ----------\n    f1 : DiffFunction\n        the first function.\n    f2 : DiffFunction\n        the second function.\n    \"\"\"\n    def __init__(self, f1, f2):\n        # type: (DiffFunction, DiffFunction) -> None\n        if f1.ndim != f2.ndim:\n            raise ValueError('functions dimension mismatch.')\n\n        DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)\n        self._f1 = f1\n        self._f2 = f2\n\n    def __call__(self, xi):\n        return self._f1(xi) * self._f2(xi)\n\n    def deriv(self, xi, j):\n        return self._f1.deriv(xi, j) * self._f2(xi) + self._f1(xi) * self._f2.deriv(xi, j)\n\n    def jacobian(self, xi):\n        f1_val = self._f1(xi)[..., np.newaxis]\n        f2_val = self._f2(xi)[..., np.newaxis]\n        f1_jac = self._f1.jacobian(xi)\n        f2_jac = self._f2.jacobian(xi)\n        return f1_jac * f2_val + f1_val * f2_jac\n\n\nclass DivFunction(DiffFunction):\n    \"\"\"division of two DiffFunctions\n\n    Parameters\n    ----------\n    f1 : DiffFunction\n        the first function.\n    f2 : DiffFunction\n        the second function.\n    \"\"\"\n    def __init__(self, f1, f2):\n        # type: (DiffFunction, DiffFunction) -> None\n        if f1.ndim != f2.ndim:\n            raise ValueError('functions dimension mismatch.')\n\n        DiffFunction.__init__(self, _intersection(f1.input_ranges, f2.input_ranges), delta_list=None)\n        self._f1 = f1\n        self._f2 = f2\n\n    def __call__(self, xi):\n        return self._f1(xi) / self._f2(xi)\n\n    def deriv(self, xi, j):\n        f2_val = self._f2(xi)\n        return self._f1.deriv(xi, j) / f2_val - (self._f1(xi) * self._f2.deriv(xi, j) / (f2_val**2))\n\n    def jacobian(self, xi):\n        f1_val = self._f1(xi)[..., np.newaxis]\n        f2_val = self._f2(xi)[..., np.newaxis]\n        f1_jac = self._f1.jacobian(xi)\n        f2_jac = self._f2.jacobian(xi)\n\n        return f1_jac / f2_val - (f1_val * f2_jac) / (f2_val**2)\n\n\nclass PwrFunction(DiffFunction):\n    \"\"\"a DiffFunction raised to a power.\n\n    Parameters\n    ----------\n    f : DiffFunction\n        the DiffFunction.\n    pwr : float\n        the power.\n    scale : float\n        scaling factor.  Used to implement a / x.\n    \"\"\"\n    def __init__(self, f, pwr, scale=1.0):\n        # type: (DiffFunction, float, float) -> None\n        DiffFunction.__init__(self, f.input_ranges, delta_list=None)\n        self._f = f\n        self._pwr = pwr\n        self._scale = scale\n\n    def __call__(self, xi):\n        return (self._f(xi) ** self._pwr) * self._scale\n\n    def deriv(self, xi, j):\n        return (self._f(xi) ** (self._pwr - 1) * self._pwr * self._f.deriv(xi, j)) * self._scale\n\n    def jacobian(self, xi):\n        f_val = self._f(xi)[..., np.newaxis]\n        f_jac = self._f.jacobian(xi)\n        return (f_jac * (f_val ** (self._pwr - 1) * self._pwr)) * self._scale\n\n\nclass VectorDiffFunction(object):\n    \"\"\"A differentiable vector function.\n\n    Parameters\n    ----------\n    fun_list : List[DiffFunction]\n        list of interpolator functions, one for each element of the output vector.\n    \"\"\"\n\n    def __init__(self, fun_list):\n        # type: (List[DiffFunction]) -> None\n        # error checking\n        if not fun_list:\n            raise ValueError('No interpolators are given.')\n\n        self._input_ranges = _intersection(*(f.input_ranges for f in fun_list))\n\n        self._in_dim = fun_list[0].ndim\n        for fun in fun_list:\n            if fun.ndim != self._in_dim:\n                raise ValueError('Interpolators input dimension mismatch.')\n\n        self._fun_list = fun_list\n        self._out_dim = len(fun_list)\n\n    @property\n    def in_dim(self):\n        # type: () -> int\n        \"\"\"Input dimension number.\"\"\"\n        return self._in_dim\n\n    @property\n    def out_dim(self):\n        # type: () -> int\n        \"\"\"Output dimension number.\"\"\"\n        return self._out_dim\n\n    def get_input_range(self, idx):\n        # type: (int) -> Tuple[Optional[float], Optional[float]]\n        \"\"\"Returns the input range of the given dimension.\"\"\"\n        return self._input_ranges[idx]\n\n    def __call__(self, xi):\n        \"\"\"Returns the output vector at the given coordinates.\n\n        Parameters\n        ----------\n        xi : array-like\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : numpy.array\n            The interpolated values at the given coordinates.\n        \"\"\"\n        xi = np.asarray(xi, dtype=float)\n        shape_trunc = xi.shape[:-1]  # type: Tuple[int, ...]\n        ans = np.empty(shape_trunc + (self._out_dim, ))\n        for idx in range(self._out_dim):\n            ans[..., idx] = self._fun_list[idx](xi)\n        return ans\n\n    def jacobian(self, xi):\n        \"\"\"Calculate the Jacobian matrices of this function at the given coordinates.\n\n        Parameters\n        ----------\n        xi : array-like\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : numpy.array\n            The jacobian matrix at the given coordinates.\n        \"\"\"\n        xi = np.asarray(xi, dtype=float)\n        shape_trunc = xi.shape[:-1]  # type: Tuple[int, ...]\n        ans = np.empty(shape_trunc + (self._out_dim, self._in_dim))\n        for m in range(self._out_dim):\n            ans[..., m, :] = self._fun_list[m].jacobian(xi)\n        return ans\n\n    def deriv(self, xi, i, j):\n        \"\"\"Compute the derivative of output i with respect to input j\n\n        Parameters\n        ----------\n        xi : array-like\n            The coordinates to evaluate, with shape (..., ndim)\n        i : int\n            output index.\n        j : int\n            input index.\n\n        Returns\n        -------\n        val : numpy.array\n            The derivatives at the given coordinates.\n        \"\"\"\n        return self._fun_list[i].deriv(xi, j)\n"
  },
  {
    "path": "bag/math/interpolate.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines various interpolation classes.\n\"\"\"\n\nfrom typing import List, Tuple, Union, Sequence, Optional\n\nimport numpy as np\nimport scipy.interpolate as interp\nimport scipy.ndimage.interpolation as imag_interp\n\nfrom ..math.dfun import DiffFunction\n\n__author__ = 'erichang'\n__all__ = ['interpolate_grid', 'LinearInterpolator']\n\n\ndef _scales_to_points(scale_list, values, delta=1e-4):\n    # type: (List[Tuple[float, float]], np.multiarray.ndarray, float) -> Tuple[List[np.multiarray.ndarray], List[float]]\n    \"\"\"convert scale_list to list of point values and finite difference deltas.\"\"\"\n\n    ndim = len(values.shape)\n    # error checking\n    if ndim == 1:\n        raise ValueError('This class only works for dimension >= 2.')\n    elif ndim != len(scale_list):\n        raise ValueError('input and output dimension mismatch.')\n\n    points = []\n    delta_list = []\n    for idx in range(ndim):\n        num_pts = values.shape[idx]  # type: int\n        if num_pts < 2:\n            raise ValueError('Every dimension must have at least 2 points.')\n        offset, scale = scale_list[idx]\n        points.append(np.linspace(offset, (num_pts - 1) * scale + offset, num_pts))\n        delta_list.append(scale * delta)\n\n    return points, delta_list\n\n\ndef interpolate_grid(scale_list, values, method='spline',\n                     extrapolate=False, delta=1e-4, num_extrapolate=3):\n    # type: (List[Tuple[float, float]], np.multiarray.ndarray, str, bool, float, int) -> DiffFunction\n    \"\"\"Interpolates multidimensional data on a regular grid.\n\n    returns an Interpolator for the given dataset.\n\n    Parameters\n    ----------\n    scale_list : List[Tuple[float, float]]\n        a list of (offset, spacing).\n    values : np.multiarray.ndarray\n        The output data in N dimensions.  The length in each dimension must\n        be at least 2.\n    method : str\n        The interpolation method.  Either 'linear', or 'spline'.\n        Defaults to 'spline'.\n    extrapolate : bool\n        True to extrapolate data output of given bounds.  Defaults to False.\n    delta : float\n        the finite difference step size.  Finite difference is only used for\n        linear interpolation and spline interpolation on 3D data or greater.\n        Defaults to 1e-4 of the grid spacing.\n    num_extrapolate: int\n        If spline interpolation is selected on 3D data or greater, we linearly\n        extrapolate the given data by this many points to fix behavior near\n        input boundaries.\n\n    Returns\n    -------\n    fun : DiffFunction\n        the interpolator function.\n    \"\"\"\n    ndim = len(values.shape)\n    if method == 'linear':\n        points, delta_list = _scales_to_points(scale_list, values, delta)\n        return LinearInterpolator(points, values, delta_list, extrapolate=extrapolate)\n    elif ndim == 1:\n        return Interpolator1D(scale_list, values, method=method, extrapolate=extrapolate)\n    elif method == 'spline':\n        if ndim == 2:\n            return Spline2D(scale_list, values, extrapolate=extrapolate)\n        else:\n            return MapCoordinateSpline(scale_list, values, delta=delta, extrapolate=extrapolate,\n                                       num_extrapolate=num_extrapolate)\n    else:\n        raise ValueError('Unsupported interpolation method: %s' % method)\n\n\nclass LinearInterpolator(DiffFunction):\n    \"\"\"A linear interpolator on a regular grid for arbitrary dimensions.\n\n    This class is backed by scipy.interpolate.RegularGridInterpolator.\n    Derivatives are calculated using finite difference.\n\n    Parameters\n    ----------\n    points : Sequence[np.multiarray.ndarray]\n        list of points of each dimension.\n    values : np.multiarray.ndarray\n        The output data in N dimensions.\n    delta_list : List[float]\n        list of finite difference step size for each axis.\n    extrapolate : bool\n        True to extrapolate data output of given bounds.  Defaults to False.\n    \"\"\"\n\n    def __init__(self, points, values, delta_list, extrapolate=False):\n        # type: (Sequence[np.multiarray.ndarray], np.multiarray.ndarray, List[float], bool) -> None\n        input_range = [(pvec[0], pvec[-1]) for pvec in points]\n        DiffFunction.__init__(self, input_range, delta_list=delta_list)\n        self._points = points\n        self._extrapolate = extrapolate\n        self.fun = interp.RegularGridInterpolator(points, values, method='linear',\n                                                  bounds_error=not extrapolate,\n                                                  fill_value=None)\n\n    def get_input_points(self, idx):\n        # type: (int) -> np.multiarray.ndarray\n        \"\"\"Returns the input points for the given dimension.\"\"\"\n        return self._points[idx]\n\n    def __call__(self, xi):\n        \"\"\"Interpolate at the given coordinate.\n\n        Parameters\n        ----------\n        xi : numpy.array\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : numpy.array\n            The interpolated values at the given coordinates.\n        \"\"\"\n        ans = self.fun(xi)\n        if ans.size == 1:\n            return ans[0]\n        return ans\n\n    def integrate(self, xstart, xstop, axis=-1, logx=False, logy=False, raw=False):\n        # type: (float, float, int, bool, bool, bool) -> Union[LinearInterpolator, np.ndarray]\n        \"\"\"Integrate away the given axis.\n\n        if logx/logy is True, that means this LinearInterpolator is actually used\n        to do linear interpolation on the logarithm of the actual data.  This method\n        will returns the integral of the actual data.\n\n        Parameters\n        ----------\n        xstart : float\n            the X start value.\n        xstop : float\n            the X stop value.\n        axis : int\n            the axis of integration.\n            If unspecified, this will be the last axis.\n        logx : bool\n            True if the values on the given axis are actually the logarithm of\n            the real values.\n        logy : bool\n            True if the Y values are actually the logarithm of the real values.\n        raw : bool\n            True to return the raw data points instead of a LinearInterpolator object.\n\n        Returns\n        -------\n        result : Union[LinearInterpolator, np.ndarray]\n            float if this interpolator has only 1 dimension, otherwise a new\n            LinearInterpolator is returned.\n        \"\"\"\n        if self.delta_list is None:\n            raise ValueError(\"Finite differences must be enabled\")\n\n        if logx != logy:\n            raise ValueError('Currently only works for linear or log-log relationship.')\n\n        ndim = self.ndim\n        if axis < 0:\n            axis = ndim - 1\n        if axis < 0 or axis >= ndim:\n            raise IndexError('index out of range.')\n\n        if len(self._points) < ndim:\n            raise ValueError(\"len(self._points) != ndim\")\n\n        def calculate_integ_x() -> np.ndarray:\n            # find data points between xstart and xstop\n            vec = self._points[axis]\n            start_idx, stop_idx = np.searchsorted(vec, [xstart, xstop])\n\n            cur_len = stop_idx - start_idx\n            if vec[start_idx] > xstart:\n                cur_len += 1\n                istart = 1\n            else:\n                istart = 0\n            if vec[stop_idx - 1] < xstop:\n                cur_len += 1\n                istop = cur_len - 1\n            else:\n                istop = cur_len\n\n            integ_x = np.empty(cur_len)\n            integ_x[istart:istop] = vec[start_idx:stop_idx]\n            if istart != 0:\n                integ_x[0] = xstart\n\n            if istop != cur_len:\n                integ_x[cur_len - 1] = xstop\n\n            return integ_x\n\n        # get all input sample points we need to integrate.\n        plist = []\n        integ_x = calculate_integ_x()  # type: np.ndarray\n        new_points = []\n        new_deltas = []\n        for axis_idx, vec in enumerate(self._points):\n            if axis == axis_idx:\n                plist.append(integ_x)\n            else:\n                plist.append(vec)\n                new_points.append(vec)\n                new_deltas.append(self.delta_list[axis_idx])\n\n        fun_arg = np.stack(np.meshgrid(*plist, indexing='ij'), axis=-1)\n        values = self.fun(fun_arg)\n\n        if logx:\n            if axis != ndim - 1:\n                # transpose values so that broadcasting/slicing is easier\n                new_order = [idx for idx in range(ndim) if idx != axis]\n                new_order.append(axis)\n                values = np.transpose(values, axes=new_order)\n\n            # integrate given that log-log plot is piece-wise linear\n            ly1 = values[..., :-1]\n            ly2 = values[..., 1:]\n            lx1 = np.broadcast_to(integ_x[:-1], ly1.shape)\n            lx2 = np.broadcast_to(integ_x[1:], ly1.shape)\n            m = (ly2 - ly1) / (lx2 - lx1)\n\n            x1 = np.exp(lx1)\n            y1 = np.exp(ly1)\n            scale = y1 / np.power(x1, m)\n\n            log_idx = np.abs(m + 1) < 1e-6\n            log_idxb = np.invert(log_idx)\n            area = np.empty(m.shape)\n            area[log_idx] = scale[log_idx] * (lx2[log_idx] - lx1[log_idx])\n\n            mp1 = m[log_idxb] + 1\n            x2 = np.exp(lx2[log_idxb])\n            area[log_idxb] = scale[log_idxb] / mp1 * (np.power(x2, mp1) - np.power(x1[log_idxb], mp1))\n            new_values = np.sum(area, axis=-1)  # type: np.multiarray.ndarray\n        else:\n            # just use trapezoid integration\n            new_values = np.trapz(values, x=integ_x, axis=axis)  # type: np.multiarray.ndarray\n\n        if not raw and new_points:\n            return LinearInterpolator(new_points, new_values, new_deltas, extrapolate=self._extrapolate)\n        else:\n            return new_values\n\n\nclass Interpolator1D(DiffFunction):\n    \"\"\"An interpolator on a regular grid for 1 dimensional data.\n\n    This class is backed by scipy.interpolate.InterpolatedUnivariateSpline.\n\n    Parameters\n    ----------\n    scale_list : list[(float, float)]\n        a list of (offset, spacing) for each input dimension.\n    values : numpy.array\n        The output data.  Must be 1 dimension.\n    method : str\n        extrapolation method.  Either 'linear' or 'spline'.  Defaults to spline.\n    extrapolate : bool\n        True to extrapolate data output of given bounds.  Defaults to False.\n    \"\"\"\n\n    def __init__(self, scale_list, values, method='spline', extrapolate=False):\n        # error checking\n        if len(values.shape) != 1:\n            raise ValueError('This class only works for 1D data.')\n        elif len(scale_list) != 1:\n            raise ValueError('input and output dimension mismatch.')\n\n        if method == 'linear':\n            k = 1\n        elif method == 'spline':\n            k = 3\n        else:\n            raise ValueError('Unsuppoorted interpolation method: %s' % method)\n\n        offset, scale = scale_list[0]\n        num_pts = values.shape[0]\n        points = np.linspace(offset, (num_pts - 1) * scale + offset, num_pts)  # type: np.multiarray.ndarray\n\n        DiffFunction.__init__(self, [(points[0], points[-1])], delta_list=None)\n\n        ext = 0 if extrapolate else 2\n        self.fun = interp.InterpolatedUnivariateSpline(points, values, k=k, ext=ext)\n\n    def __call__(self, xi):\n        \"\"\"Interpolate at the given coordinate.\n\n        Parameters\n        ----------\n        xi : numpy.array\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : numpy.array\n            The interpolated values at the given coordinates.\n        \"\"\"\n        ans = self.fun(xi)\n        if ans.size == 1:\n            return ans[0]\n        return ans\n\n    def deriv(self, xi, idx):\n        \"\"\"Calculate the derivative of the spline along the given index.\n\n        Parameters\n        ----------\n        xi : numpy.array\n            The coordinates to evaluate, with shape (..., ndim)\n        idx : int\n            The index to calculate the derivative on.\n\n        Returns\n        -------\n        val : numpy.array\n            The derivatives at the given coordinates.\n        \"\"\"\n        if idx != 0:\n            raise ValueError('Invalid derivative index: %d' % idx)\n\n        ans = self.fun(xi, 1)\n        if ans.size == 1:\n            return ans[0]\n        return ans\n\n\nclass Spline2D(DiffFunction):\n    \"\"\"A spline interpolator on a regular grid for 2D data.\n\n    This class is backed by scipy.interpolate.RectBivariateSpline.\n\n    Parameters\n    ----------\n    scale_list : list[(float, float)]\n        a list of (offset, spacing) for each input dimension.\n    values : numpy.array\n        The output data.  Must be 2D.\n    extrapolate : bool\n        True to extrapolate data output of given bounds.  Defaults to False.\n    \"\"\"\n\n    def __init__(self, scale_list, values, extrapolate=False):\n        # error checking\n        if len(values.shape) != 2:\n            raise ValueError('This class only works for 2D data.')\n        elif len(scale_list) != 2:\n            raise ValueError('input and output dimension mismatch.')\n\n        nx, ny = values.shape\n        offset, scale = scale_list[0]\n        x = np.linspace(offset, (nx - 1) * scale + offset, nx)  # type: np.multiarray.ndarray\n        offset, scale = scale_list[1]\n        y = np.linspace(offset, (ny - 1) * scale + offset, ny)  # type: np.multiarray.ndarray\n\n        self._min = x[0], y[0]\n        self._max = x[-1], y[-1]\n\n        DiffFunction.__init__(self, [(x[0], x[-1]), (y[0], y[-1])], delta_list=None)\n\n        self.fun = interp.RectBivariateSpline(x, y, values)\n        self._extrapolate = extrapolate\n\n    def _get_xy(self, xi):\n        \"\"\"Get X and Y array from given coordinates.\"\"\"\n        xi = np.asarray(xi, dtype=float)\n        if xi.shape[-1] != 2:\n            raise ValueError(\"The requested sample points xi have dimension %d, \"\n                             \"but this interpolator has dimension 2\" % (xi.shape[-1]))\n\n        # check input within bounds.\n        x = xi[..., 0]  # type: np.multiarray.ndarray\n        y = xi[..., 1]  # type: np.multiarray.ndarray\n        if not self._extrapolate and not np.all((self._min[0] <= x) & (x <= self._max[0]) &\n                                                (self._min[1] <= y) & (y <= self._max[1])):\n            raise ValueError('some inputs are out of bounds.')\n\n        return x, y\n\n    def __call__(self, xi):\n        \"\"\"Interpolate at the given coordinates.\n\n        Parameters\n        ----------\n        xi : numpy.array\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : numpy.array\n            The interpolated values at the given coordinates.\n        \"\"\"\n        x, y = self._get_xy(xi)\n        return self.fun(x, y, grid=False)\n\n    def deriv(self, xi, idx):\n        \"\"\"Calculate the derivative of the spline along the given index.\n\n        Parameters\n        ----------\n        xi : numpy.array\n            The coordinates to evaluate, with shape (..., ndim)\n        idx : int\n            The index to calculate the derivative on.\n\n        Returns\n        -------\n        val : numpy.array\n            The derivatives at the given coordinates.\n        \"\"\"\n        if idx < 0 or idx > 1:\n            raise ValueError('Invalid derivative index: %d' % idx)\n\n        x, y = self._get_xy(xi)\n        if idx == 0:\n            return self.fun(x, y, dx=1, grid=False)\n        else:\n            return self.fun(x, y, dy=1, grid=False)\n\n\nclass MapCoordinateSpline(DiffFunction):\n    \"\"\"A spline interpolator on a regular grid for multidimensional data.\n\n    The spline interpolation is done using map_coordinate method in the\n    scipy.ndimage.interpolation package.  The derivative is done using\n    finite difference.\n\n    if extrapolate is True, we use linear interpolation for values outside of\n    bounds.\n\n    Note: By default, map_coordinate uses the nearest value for all points\n    outside the boundary.  This will cause undesired interpolation\n    behavior near boundary points.  To solve this, we linearly\n    extrapolates the given data for a fixed number of points.\n\n    Parameters\n    ----------\n    scale_list : list[(float, float)]\n        a list of (offset, spacing) for each input dimension.\n    values : numpy.array\n        The output data.\n    extrapolate : bool\n        True to linearly extrapolate outside of bounds.\n    num_extrapolate : int\n        number of points to extrapolate in each dimension in each direction.\n    delta : float\n        the finite difference step size.  Defaults to 1e-4 (relative to a spacing of 1).\n    \"\"\"\n\n    def __init__(self, scale_list, values, extrapolate=False, num_extrapolate=3,\n                 delta=1e-4):\n        shape = values.shape\n        ndim = len(shape)\n\n        # error checking\n        if ndim < 3:\n            raise ValueError('Data must have 3 or more dimensions.')\n        elif ndim != len(scale_list):\n            raise ValueError('input and output dimension mismatch.')\n\n        self._scale_list = scale_list\n        self._max = [n - 1 + num_extrapolate for n in shape]\n        self._extrapolate = extrapolate\n        self._ext = num_extrapolate\n\n        # linearly extrapolate given values\n        ext_points = [np.arange(num_extrapolate, n + num_extrapolate) for n in shape]\n        points, delta_list = _scales_to_points(scale_list, values, delta)\n        input_ranges = [(pvec[0], pvec[-1]) for pvec in points]\n        self._extfun = LinearInterpolator(ext_points, values, [delta] * ndim, extrapolate=True)\n\n        xi_ext = np.stack(np.meshgrid(*(np.arange(0, n + 2 * num_extrapolate) for n in shape),\n                                      indexing='ij', copy=False), axis=-1)\n\n        values_ext = self._extfun(xi_ext)\n        self._filt_values = imag_interp.spline_filter(values_ext)\n\n        DiffFunction.__init__(self, input_ranges, delta_list=delta_list)\n\n    def _normalize_inputs(self, xi):\n        \"\"\"Normalize the inputs.\"\"\"\n        xi = np.asarray(xi, dtype=float)\n        if xi.shape[-1] != self.ndim:\n            raise ValueError(\"The requested sample points xi have dimension %d, \"\n                             \"but this interpolator has dimension %d\" % (xi.shape[-1], self.ndim))\n\n        xi = np.atleast_2d(xi.copy())\n        for idx, (offset, scale) in enumerate(self._scale_list):\n            xi[..., idx] -= offset\n            xi[..., idx] /= scale\n\n        # take extension input account.\n        xi += self._ext\n\n        return xi\n\n    def __call__(self, xi):\n        \"\"\"Interpolate at the given coordinate.\n\n        Parameters\n        ----------\n        xi : numpy.array\n            The coordinates to evaluate, with shape (..., ndim)\n\n        Returns\n        -------\n        val : numpy.array\n            The interpolated values at the given coordinates.\n        \"\"\"\n        ext = self._ext\n        ndim = self.ndim\n        xi = self._normalize_inputs(xi)\n        ans_shape = xi.shape[:-1]\n        xi = xi.reshape(-1, ndim)\n\n        ext_idx_vec = False\n        for idx in range(self.ndim):\n            ext_idx_vec = ext_idx_vec | (xi[:, idx] < ext) | (xi[:, idx] > self._max[idx])\n\n        int_idx_vec = ~ext_idx_vec\n        xi_ext = xi[ext_idx_vec, :]\n        xi_int = xi[int_idx_vec, :]\n        ans = np.empty(xi.shape[0])\n        ans[int_idx_vec] = imag_interp.map_coordinates(self._filt_values, xi_int.T, mode='nearest', prefilter=False)\n        if xi_ext.size > 0:\n            if not self._extrapolate:\n                raise ValueError('some inputs are out of bounds.')\n            ans[ext_idx_vec] = self._extfun(xi_ext)\n\n        if ans.size == 1:\n            return ans[0]\n        return ans.reshape(ans_shape)\n"
  },
  {
    "path": "bag/mdao/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/mdao/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package contains various openmdao related modules.\n\"\"\""
  },
  {
    "path": "bag/mdao/components.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines various OpenMDAO component classes.\n\"\"\"\n\nimport numpy as np\nimport openmdao.api as omdao\n\n\nclass VecFunComponent(omdao.Component):\n    \"\"\"A component based on a list of functions.\n\n    A component that evaluates multiple functions on the given inputs, then\n    returns the result as an 1D array.  Each of the inputs may be a scalar or\n    a vector with the same size as the output.  If a vector input is given,\n    each function will use a different element of the vector.\n\n    Parameters\n    ----------\n    output_name : str\n        output name.\n    fun_list : list[bag.math.dfun.DiffFunction]\n        list of interpolator functions, one for each dimension.\n    params : list[str]\n        list of parameter names.  Parameter names may repeat, in which case the\n        same parameter will be used for multiple arguments of the function.\n    vector_params : set[str]\n        set of parameters that are vector instead of scalar.  If a parameter\n        is a vector, it will be the same size as the output, and each function\n        only takes in the corresponding element of the parameter.\n    \"\"\"\n\n    def __init__(self, output_name, fun_list, params,\n                 vector_params=None):\n        omdao.Component.__init__(self)\n\n        vector_params = vector_params or set()\n\n        self._output = output_name\n        self._out_dim = len(fun_list)\n        self._in_dim = len(params)\n        self._params = params\n        self._unique_params = {}\n        self._fun_list = fun_list\n\n        for par in params:\n            adj = par in vector_params\n            shape = self._out_dim if adj else 1\n\n            if par not in self._unique_params:\n                # linear check, but small list so should be fine.\n                self.add_param(par, val=np.zeros(shape))\n                self._unique_params[par] = len(self._unique_params), adj\n\n        # construct chain rule jacobian matrix\n        self._chain_jacobian = np.zeros((self._in_dim, len(self._unique_params)))\n        for idx, par in enumerate(params):\n            self._chain_jacobian[idx, self._unique_params[par][0]] = 1\n\n        self.add_output(output_name, val=np.zeros(self._out_dim))\n\n    def __call__(self, **kwargs):\n        \"\"\"Evaluate on the given inputs.\n\n        Parameters\n        ----------\n        kwargs : dict[str, np.array or float]\n            the inputs as a dictionary.\n\n        Returns\n        -------\n        out : np.array\n            the output array.\n        \"\"\"\n        tmp = {}\n        self.solve_nonlinear(kwargs, tmp)\n        return tmp[self._output]\n\n    def _get_inputs(self, params):\n        \"\"\"Given parameter values, construct inputs for functions.\n\n        Parameters\n        ----------\n        params : VecWrapper, optional\n            VecWrapper containing parameters. (p)\n\n        Returns\n        -------\n        ans : list[list[float]]\n            input lists.\n        \"\"\"\n        ans = np.empty((self._out_dim, self._in_dim))\n        for idx, name in enumerate(self._params):\n            ans[:, idx] = params[name]\n        return ans\n\n    def solve_nonlinear(self, params, unknowns, resids=None):\n        \"\"\"Compute the output parameter.\n\n        Parameters\n        ----------\n        params : VecWrapper, optional\n            VecWrapper containing parameters. (p)\n\n        unknowns : VecWrapper, optional\n            VecWrapper containing outputs and states. (u)\n\n        resids : VecWrapper, optional\n            VecWrapper containing residuals. (r)\n        \"\"\"\n        xi_mat = self._get_inputs(params)\n\n        tmp = np.empty(self._out_dim)\n        for idx in range(self._out_dim):\n            tmp[idx] = self._fun_list[idx](xi_mat[idx, :])\n\n        unknowns[self._output] = tmp\n\n    def linearize(self, params, unknowns=None, resids=None):\n        \"\"\"Compute the Jacobian of the parameter.\n\n        Parameters\n        ----------\n        params : VecWrapper, optional\n            VecWrapper containing parameters. (p)\n\n        unknowns : VecWrapper, optional\n            VecWrapper containing outputs and states. (u)\n\n        resids : VecWrapper, optional\n            VecWrapper containing residuals. (r)\n        \"\"\"\n        # print('rank {} computing jac for {}'.format(self.comm.rank, self._outputs))\n\n        xi_mat = self._get_inputs(params)\n\n        jf = np.empty((self._out_dim, self._in_dim))\n        for k, fun in enumerate(self._fun_list):\n            jf[k, :] = fun.jacobian(xi_mat[k, :])\n\n        jmat = np.dot(jf, self._chain_jacobian)\n        jdict = {}\n        for par, (pidx, adj) in self._unique_params.items():\n            tmp = jmat[:, pidx]\n            if adj:\n                tmp = np.diag(tmp)\n            jdict[self._output, par] = tmp\n\n        return jdict\n"
  },
  {
    "path": "bag/mdao/core.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines core BAG openmdao classes.\"\"\"\n\nimport numpy as np\nimport networkx as nx\nimport openmdao.api as omdao\n\nimport bag.util.parse\n\nfrom .components import VecFunComponent\n\n\nclass GroupBuilder(object):\n    \"\"\"A class that builds new OpenMDAO groups.\n\n    This class provides a simple interface to define new variables as function of\n    other variables, and it tracks the variable dependencies using a directed\n    acyclic graph.\n\n    \"\"\"\n\n    def __init__(self):\n        self._g = nx.DiGraph()\n        self._input_vars = set()\n\n    def _add_node(self, name, ndim, **kwargs):\n        \"\"\"Helper method to add a node and keep track of input variables.\"\"\"\n        self._g.add_node(name, ndim=ndim, **kwargs)\n        self._input_vars.add(name)\n\n    def _add_edge(self, parent, child):\n        \"\"\"Helper method to add an edge and update input variables.\"\"\"\n        self._g.add_edge(parent, child)\n        try:\n            self._input_vars.remove(child)\n        except KeyError:\n            pass\n\n    def get_inputs(self):\n        \"\"\"Returns a set of current input variable names.\n\n        Returns\n        -------\n        input_vars : set[str]\n            a set of input variable names.\n        \"\"\"\n        return self._input_vars.copy()\n\n    def get_variables(self):\n        \"\"\"Returns a list of variables.\n\n        Returns\n        -------\n        var_list : list[str]\n            a list of variables.\n        \"\"\"\n        return list(self._g.nodes_iter())\n\n    def get_variable_info(self, name):\n        \"\"\"Returns the range and dimension of the given variable.\n\n        Parameters\n        ----------\n        name : str\n            variable name.\n\n        Returns\n        -------\n        min : float\n            minimum value.\n        max : float\n            maximum value.\n        ndim : int\n            variable dimension.\n        \"\"\"\n        nattr = self._g.node[name]\n        return nattr.copy()\n\n    def add_fun(self, var_name, fun_list, params, param_ranges, vector_params=None):\n        \"\"\"Add a new variable defined by the given list of functions.\n\n        Parameters\n        ----------\n        var_name : str\n            variable name.\n        fun_list : list[bag.math.interpolate.Interpolator]\n            list of functions, one for each dimension.\n        params : list[str]\n            list of parameter names.  Parameter names may repeat, in which case the\n            same parameter will be used for multiple arguments of the function.\n        param_ranges : dict[str, (float, float)]\n            a dictionary of parameter valid range.\n        vector_params : set[str]\n            set of parameters that are vector instead of scalar.  If a parameter\n            is a vector, it will be the same size as the output, and each function\n            only takes in the corresponding element of the parameter.\n        \"\"\"\n        vector_params = vector_params or set()\n        ndim = len(fun_list)\n\n        # error checking\n        for par in params:\n            if par not in param_ranges:\n                raise ValueError('Valid range of %s not specified.' % par)\n\n        # add inputs\n        for par, (par_min, par_max) in param_ranges.items():\n            par_dim = ndim if par in vector_params else 1\n            if par not in self._g:\n                # add input to graph if it's not in there.\n                self._add_node(par, par_dim)\n\n            nattrs = self._g.node[par]\n            if nattrs['ndim'] != par_dim:\n                # error checking.\n                raise ValueError('Variable %s has dimension mismatch.' % par)\n            # update input range\n            nattrs['min'] = max(par_min, nattrs.get('min', par_min))\n            nattrs['max'] = min(par_max, nattrs.get('max', par_max))\n\n        # add current variable\n        if var_name not in self._g:\n            self._add_node(var_name, ndim)\n\n        nattrs = self._g.node[var_name]\n        # error checking.\n        if nattrs['ndim'] != ndim:\n            raise ValueError('Variable %s has dimension mismatch.' % var_name)\n        if self._g.in_degree(var_name) > 0:\n            raise Exception('Variable %s already has other dependencies.' % var_name)\n\n        nattrs['fun_list'] = fun_list\n        nattrs['params'] = params\n        nattrs['vec_params'] = vector_params\n        for parent in param_ranges.keys():\n            self._add_edge(parent, var_name)\n\n    def add_var(self, variable, vmin, vmax, ndim=1):\n        \"\"\"Adds a new independent variable.\n\n        Parameters\n        ----------\n        variable : str\n            the variable to add\n        vmin : float\n            the minimum allowable value.\n        vmax : float\n            the maximum allowable value.\n        ndim : int\n            the dimension of the variable.  Defaults to 1.\n        \"\"\"\n        if variable in self._g:\n            raise Exception('Variable %s already exists.' % variable)\n        self._add_node(variable, ndim, min=vmin, max=vmax)\n\n    def set_input_limit(self, var, equals=None, lower=None, upper=None):\n        \"\"\"Sets the limit on the given input variable.\n\n        Parameters\n        ----------\n        var : str\n            name of the variable.\n        equals : float or None\n            if given, the equality value.\n        lower : float or None\n            if given, the minimum.\n        upper : float or None\n            if given, the maximum.\n        \"\"\"\n        if var in self._g:\n            if self._g.in_degree(var) > 0:\n                raise Exception('Variable %s is not an input variable' % var)\n            nattr = self._g.node[var]\n            if equals is not None:\n                nattr['equals'] = equals\n                lower = upper = equals\n            print(var, lower, upper)\n            if lower is not None:\n                nattr['min'] = max(nattr.get('min', lower), lower)\n            if upper is not None:\n                nattr['max'] = min(nattr.get('max', upper), upper)\n            print(var, nattr['min'], nattr['max'])\n\n    def add_expr(self, eqn, ndim):\n        \"\"\"Adds a new variable with the given expression.\n\n        Parameters\n        ----------\n        eqn : str\n            An equation of the form \"<var> = <expr>\", where var\n            is the output variable name, and expr is the expression.\n            All variables in expr must be already added.\n        ndim : int\n            the dimension of the output variable.\n        \"\"\"\n        variable, expr = eqn.split('=', 1)\n        variable = variable.strip()\n        expr = expr.strip()\n\n        if variable not in self._g:\n            self._add_node(variable, ndim)\n        nattrs = self._g.node[variable]\n        if nattrs['ndim'] != ndim:\n            raise Exception('Dimension mismatch for %s' % variable)\n        if self._g.in_degree(variable) > 0:\n            raise Exception('%s already depends on other variables' % variable)\n\n        invars = bag.util.parse.get_variables(expr)\n        for parent in invars:\n            if parent not in self._g:\n                raise Exception('Variable %s is not defined.' % parent)\n            self._add_edge(parent, variable)\n\n        nattrs['expr'] = expr\n\n    def build(self, debug=False):\n        \"\"\"Returns a OpenMDAO Group from the variable graph.\n\n        Parameters\n        ----------\n        debug : bool\n            True to print debug messages.\n\n        Returns\n        -------\n        grp : omdao.Group\n            the OpenMDAO group that computes all variables.\n        input_bounds : dict[str, any]\n            a dictionary from input variable name to (min, max, ndim) tuple.\n        \"\"\"\n        input_bounds = {}\n        ndim_dict = {}\n\n        if not nx.is_directed_acyclic_graph(self._g):\n            raise Exception('Dependency loop detected')\n\n        grp = omdao.Group()\n        prom = ['*']\n        for var in nx.topological_sort(self._g):\n            nattrs = self._g.node[var]\n            ndim = nattrs['ndim']\n            ndim_dict[var] = ndim\n            if self._g.in_degree(var) == 0:\n                if debug:\n                    # input variable\n                    print('Input variable: %s' % var)\n                # range checking\n                vmin, vmax = nattrs['min'], nattrs['max']\n                veq = nattrs.get('equals', None)\n                if vmin > vmax:\n                    raise Exception('Variable %s input range not valid.' % var)\n                input_bounds[var] = veq, vmin, vmax, ndim\n            else:\n                init_vals = {par: np.zeros(ndim_dict[par]) for par in self._g.predecessors_iter(var)}\n                comp_name = 'comp__%s' % var\n                if 'expr' in nattrs:\n                    eqn = '{}={}'.format(var, nattrs['expr'])\n                    init_vals[var] = np.zeros(ndim)\n                    # noinspection PyTypeChecker\n                    grp.add(comp_name, omdao.ExecComp(eqn, **init_vals), promotes=prom)\n                elif 'fun_list' in nattrs:\n                    params = nattrs['params']\n                    fun_list = nattrs['fun_list']\n                    vec_params = nattrs['vec_params']\n                    comp = VecFunComponent(var, fun_list, params, vector_params=vec_params)\n                    # noinspection PyTypeChecker\n                    grp.add(comp_name, comp, promotes=prom)\n                else:\n                    raise Exception('Unknown attributes: {}'.format(nattrs))\n\n        return grp, input_bounds\n"
  },
  {
    "path": "bag/simulation/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/simulation/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package defines various utility classes for running simulations and data post-processing.\n\"\"\""
  },
  {
    "path": "bag/simulation/core.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom typing import TYPE_CHECKING, Optional, Dict, Any, Tuple, List, Iterable, Sequence\n\nimport abc\nimport importlib\nimport itertools\nimport os\n\nimport yaml\n\nfrom bag import float_to_si_string\nfrom bag.io import read_yaml, open_file, load_sim_results, save_sim_results, load_sim_file\nfrom bag.layout import RoutingGrid, TemplateDB\nfrom bag.concurrent.core import batch_async_task\nfrom bag import BagProject\n\nif TYPE_CHECKING:\n    import numpy as np\n    from bag.core import Testbench\n\n\nclass TestbenchManager(object, metaclass=abc.ABCMeta):\n    \"\"\"A class that creates and setups up a testbench for simulation, then save the result.\n\n    This class is used by MeasurementManager to run simulations.\n\n    Parameters\n    ----------\n    data_fname : str\n        Simulation data file name.\n    tb_name : str\n        testbench name.\n    impl_lib : str\n        implementation library name.\n    specs : Dict[str, Any]\n        testbench specs.\n    sim_view_list : Sequence[Tuple[str, str]]\n        simulation view list\n    env_list : Sequence[str]\n        simulation environments list.\n    \"\"\"\n    def __init__(self,\n                 data_fname,  # type: str\n                 tb_name,  # type: str\n                 impl_lib,  # type: str\n                 specs,  # type: Dict[str, Any]\n                 sim_view_list,  # type: Sequence[Tuple[str, str]]\n                 env_list,  # type: Sequence[str]\n                 ):\n        # type: (...) -> None\n        self.data_fname = os.path.abspath(data_fname)\n        self.tb_name = tb_name\n        self.impl_lib = impl_lib\n        self.specs = specs\n        self.sim_view_list = sim_view_list\n        self.env_list = env_list\n\n    @abc.abstractmethod\n    def setup_testbench(self, tb):\n        # type: (Testbench) -> None\n        \"\"\"Configure the simulation state of the given testbench.\n\n        No need to call update_testbench(), set_simulation_environments(), and\n        set_simulation_view().  These are called for you.\n\n        Parameters\n        ----------\n        tb : Testbench\n            the simulation Testbench instance.\n        \"\"\"\n        pass\n\n    async def setup_and_simulate(self, prj: BagProject,\n                                 sch_params: Dict[str, Any]) -> Dict[str, Any]:\n        if sch_params is None:\n            print('loading testbench %s' % self.tb_name)\n            tb = prj.load_testbench(self.impl_lib, self.tb_name)\n        else:\n            print('Creating testbench %s' % self.tb_name)\n            tb = self._create_tb_schematic(prj, sch_params)\n\n        print('Configuring testbench %s' % self.tb_name)\n        tb.set_simulation_environments(self.env_list)\n        self.setup_testbench(tb)\n        for cell_name, view_name in self.sim_view_list:\n            tb.set_simulation_view(self.impl_lib, cell_name, view_name)\n        tb.update_testbench()\n\n        # run simulation and save/return raw result\n        print('Simulating %s' % self.tb_name)\n        save_dir = await tb.async_run_simulation()\n        print('Finished simulating %s' % self.tb_name)\n        results = load_sim_results(save_dir)\n        save_sim_results(results, self.data_fname)\n        return results\n\n    @classmethod\n    def record_array(cls, output_dict, data_dict, arr, arr_name, sweep_params):\n        # type: (Dict[str, Any], Dict[str, Any], np.ndarray, str, List[str]) -> None\n        \"\"\"Add the given numpy array into BAG's data structure dictionary.\n\n        This method adds the given numpy array to output_dict, and make sure\n        sweep parameter information are treated properly.\n\n        Parameters\n        ----------\n        output_dict : Dict[str, Any]\n            the output dictionary.\n        data_dict : Dict[str, Any]\n            the raw simulation data dictionary.\n        arr : np.ndarray\n            the numpy array to record.\n        arr_name : str\n            name of the given numpy array.\n        sweep_params : List[str]\n            a list of sweep parameters for thhe given array.\n        \"\"\"\n        if 'sweep_params' in output_dict:\n            swp_info = output_dict['sweep_params']\n        else:\n            swp_info = {}\n            output_dict['sweep_params'] = swp_info\n\n        # record sweep parameters information\n        for var in sweep_params:\n            if var not in output_dict:\n                output_dict[var] = data_dict[var]\n        swp_info[arr_name] = sweep_params\n        output_dict[arr_name] = arr\n\n    def _create_tb_schematic(self, prj, sch_params):\n        # type: (BagProject, Dict[str, Any]) -> Testbench\n        \"\"\"Helper method to create a testbench schematic.\n\n        Parmaeters\n        ----------\n        prj : BagProject\n            the BagProject instance.\n        sch_params : Dict[str, Any]\n            the testbench schematic parameters dictionary.\n\n        Returns\n        -------\n        tb : Testbench\n            the simulation Testbench instance.\n        \"\"\"\n        tb_lib = self.specs['tb_lib']\n        tb_cell = self.specs['tb_cell']\n        tb_sch = prj.create_design_module(tb_lib, tb_cell)\n        tb_sch.design(**sch_params)\n        tb_sch.implement_design(self.impl_lib, top_cell_name=self.tb_name)\n\n        return prj.configure_testbench(self.impl_lib, self.tb_name)\n\n\nclass MeasurementManager(object, metaclass=abc.ABCMeta):\n    \"\"\"A class that handles circuit performance measurement.\n\n    This class handles all the steps needed to measure a specific performance\n    metric of the device-under-test.  This may involve creating and simulating\n    multiple different testbenches, where configuration of successive testbenches\n    depends on previous simulation results. This class reduces the potentially\n    complex measurement tasks into a few simple abstract methods that designers\n    simply have to implement.\n\n    Parameters\n    ----------\n    data_dir : str\n        Simulation data directory.\n    meas_name : str\n        measurement setup name.\n    impl_lib : str\n        implementation library name.\n    specs : Dict[str, Any]\n        the measurement specification dictionary.\n    wrapper_lookup : Dict[str, str]\n        the DUT wrapper cell name lookup table.\n    sim_view_list : Sequence[Tuple[str, str]]\n        simulation view list\n    env_list : Sequence[str]\n        simulation environments list.\n    \"\"\"\n    def __init__(self,  # type: MeasurementManager\n                 data_dir,  # type: str\n                 meas_name,  # type: str\n                 impl_lib,  # type: str\n                 specs,  # type: Dict[str, Any]\n                 wrapper_lookup,  # type: Dict[str, str]\n                 sim_view_list,  # type: Sequence[Tuple[str, str]]\n                 env_list,  # type: Sequence[str]\n                 ):\n        # type: (...) -> None\n        self.data_dir = os.path.abspath(data_dir)\n        self.impl_lib = impl_lib\n        self.meas_name = meas_name\n        self.specs = specs\n        self.wrapper_lookup = wrapper_lookup\n        self.sim_view_list = sim_view_list\n        self.env_list = env_list\n\n        os.makedirs(self.data_dir, exist_ok=True)\n\n    @abc.abstractmethod\n    def get_initial_state(self):\n        # type: () -> str\n        \"\"\"Returns the initial FSM state.\"\"\"\n        return ''\n\n    # noinspection PyUnusedLocal\n    def get_testbench_info(self,  # type: MeasurementManager\n                           state,  # type: str\n                           prev_output,  # type: Optional[Dict[str, Any]]\n                           ):\n        # type: (...) -> Tuple[str, str, Dict[str, Any], Optional[Dict[str, Any]]]\n        \"\"\"Get information about the next testbench.\n\n        Override this method to perform more complex operations.\n\n        Parameters\n        ----------\n        state : str\n            the current FSM state.\n        prev_output : Optional[Dict[str, Any]]\n            the previous post-processing output.\n\n        Returns\n        -------\n        tb_name : str\n            cell name of the next testbench.  Should incorporate self.meas_name to avoid\n            collision with testbench for other designs.\n        tb_type : str\n            the next testbench type.\n        tb_specs : str\n            the testbench specification dictionary.\n        tb_params : Optional[Dict[str, Any]]\n            the next testbench schematic parameters.  If we are reusing an existing\n            testbench, this should be None.\n        \"\"\"\n        tb_type = state\n        tb_name = self.get_testbench_name(tb_type)\n        tb_specs = self.get_testbench_specs(tb_type).copy()\n        tb_params = self.get_default_tb_sch_params(tb_type)\n\n        return tb_name, tb_type, tb_specs, tb_params\n\n    @abc.abstractmethod\n    def process_output(self, state, data, tb_manager):\n        # type: (str, Dict[str, Any], TestbenchManager) -> Tuple[bool, str, Dict[str, Any]]\n        \"\"\"Process simulation output data.\n\n        Parameters\n        ----------\n        state : str\n            the current FSM state\n        data : Dict[str, Any]\n            simulation data dictionary.\n        tb_manager : TestbenchManager\n            the testbench manager object.\n\n        Returns\n        -------\n        done : bool\n            True if this measurement is finished.\n        next_state : str\n            the next FSM state.\n        output : Dict[str, Any]\n            a dictionary containing post-processed data.\n        \"\"\"\n        return False, '', {}\n\n    def get_testbench_name(self, tb_type):\n        # type: (str) -> str\n        \"\"\"Returns a default testbench name given testbench type.\"\"\"\n        return '%s_TB_%s' % (self.meas_name, tb_type)\n\n    async def async_measure_performance(self,\n                                        prj: BagProject,\n                                        load_from_file: bool = False) -> Dict[str, Any]:\n        \"\"\"A coroutine that performs measurement.\n\n        The measurement is done like a FSM.  On each iteration, depending on the current\n        state, it creates a new testbench (or reuse an existing one) and simulate it.\n        It then post-process the simulation data to determine the next FSM state, or\n        if the measurement is done.\n\n        Parameters\n        ----------\n        prj : BagProject\n            the BagProject instance.\n        load_from_file : bool\n            If True, then load existing simulation data instead of running actual simulation.\n\n        Returns\n        -------\n        output : Dict[str, Any]\n            the last dictionary returned by process_output().\n        \"\"\"\n        cur_state = self.get_initial_state()\n        prev_output = None\n        done = False\n\n        while not done:\n            # create and setup testbench\n            tb_name, tb_type, tb_specs, tb_sch_params = self.get_testbench_info(cur_state,\n                                                                                prev_output)\n\n            tb_package = tb_specs['tb_package']\n            tb_cls_name = tb_specs['tb_class']\n            tb_module = importlib.import_module(tb_package)\n            tb_cls = getattr(tb_module, tb_cls_name)\n            raw_data_fname = os.path.join(self.data_dir, '%s.hdf5' % cur_state)\n\n            tb_manager = tb_cls(raw_data_fname, tb_name, self.impl_lib, tb_specs,\n                                self.sim_view_list, self.env_list)\n\n            if load_from_file:\n                print('Measurement %s in state %s, '\n                      'load sim data from file.' % (self.meas_name, cur_state))\n                if os.path.isfile(raw_data_fname):\n                    cur_results = load_sim_file(raw_data_fname)\n                else:\n                    print('Cannot find data file, simulating...')\n                    cur_results = await tb_manager.setup_and_simulate(prj, tb_sch_params)\n            else:\n                cur_results = await tb_manager.setup_and_simulate(prj, tb_sch_params)\n\n            # process and save simulation data\n            print('Measurement %s in state %s, '\n                  'processing data from %s' % (self.meas_name, cur_state, tb_name))\n            done, next_state, prev_output = self.process_output(cur_state, cur_results, tb_manager)\n            with open_file(os.path.join(self.data_dir, '%s.yaml' % cur_state), 'w') as f:\n                yaml.dump(prev_output, f)\n\n            cur_state = next_state\n\n        return prev_output\n\n    def get_state_output(self, state):\n        # type: (str) -> Dict[str, Any]\n        \"\"\"Get the post-processed output of the given state.\"\"\"\n        file_name = os.path.join(self.data_dir, '%s.yaml' % state)\n        return read_yaml(file_name)\n\n    def get_testbench_specs(self, tb_type):\n        # type: (str) -> Dict[str, Any]\n        \"\"\"Helper method to get testbench specifications.\"\"\"\n        return self.specs['testbenches'][tb_type]\n\n    def get_default_tb_sch_params(self, tb_type):\n        # type: (str) -> Dict[str, Any]\n        \"\"\"Helper method to return a default testbench schematic parameters dictionary.\n\n        This method loads default values from specification file, the fill in dut_lib\n        and dut_cell for you.\n\n        Parameters\n        ----------\n        tb_type : str\n            the testbench type.\n\n        Returns\n        -------\n        sch_params : Dict[str, Any]\n            the default schematic parameters dictionary.\n        \"\"\"\n        tb_specs = self.get_testbench_specs(tb_type)\n        wrapper_type = tb_specs['wrapper_type']\n\n        if 'sch_params' in tb_specs:\n            tb_params = tb_specs['sch_params'].copy()\n        else:\n            tb_params = {}\n\n        tb_params['dut_lib'] = self.impl_lib\n        tb_params['dut_cell'] = self.wrapper_lookup[wrapper_type]\n        return tb_params\n\n\nclass DesignManager(object):\n    \"\"\"A class that manages instantiating design instances and running simulations.\n\n    This class provides various methods to allow you to sweep design parameters\n    and generate multiple instances at once.  It also provides methods for running\n    simulations and helps you interface with TestbenchManager instances.\n\n    Parameters\n    ----------\n    prj : Optional[BagProject]\n        The BagProject instance.\n    spec_file : str\n        the specification file name or the data directory.\n    \"\"\"\n\n    def __init__(self, prj, spec_file):\n        # type: (Optional[BagProject], str) -> None\n        self.prj = prj\n        self._specs = None\n\n        if os.path.isfile(spec_file):\n            self._specs = read_yaml(spec_file)\n            self._root_dir = os.path.abspath(self._specs['root_dir'])\n        elif os.path.isdir(spec_file):\n            self._root_dir = os.path.abspath(spec_file)\n            self._specs = read_yaml(os.path.join(self._root_dir, 'specs.yaml'))\n        else:\n            raise ValueError('%s is neither data directory or specification file.' % spec_file)\n\n        self._swp_var_list = tuple(sorted(self._specs['sweep_params'].keys()))\n\n    @classmethod\n    def load_state(cls, prj, root_dir):\n        # type: (BagProject, str) -> DesignManager\n        \"\"\"Create the DesignManager instance corresponding to data in the given directory.\"\"\"\n        return cls(prj, root_dir)\n\n    @classmethod\n    def get_measurement_name(cls, dsn_name, meas_type):\n        # type: (str, str) -> str\n        \"\"\"Returns the measurement name.\n\n        Parameters\n        ----------\n        dsn_name : str\n            design cell name.\n        meas_type : str\n            measurement type.\n\n        Returns\n        -------\n        meas_name : str\n            measurement name\n        \"\"\"\n        return '%s_MEAS_%s' % (dsn_name, meas_type)\n\n    @classmethod\n    def get_wrapper_name(cls, dut_name, wrapper_name):\n        # type: (str, str) -> str\n        \"\"\"Returns the wrapper cell name corresponding to the given DUT.\"\"\"\n        return '%s_WRAPPER_%s' % (dut_name, wrapper_name)\n\n    @property\n    def specs(self):\n        # type: () -> Dict[str, Any]\n        \"\"\"Return the specification dictionary.\"\"\"\n        return self._specs\n\n    @property\n    def swp_var_list(self):\n        # type: () -> Tuple[str, ...]\n        return self._swp_var_list\n\n    async def extract_design(self, lib_name: str, dsn_name: str,\n                             rcx_params: Optional[Dict[str, Any]]) -> None:\n        \"\"\"A coroutine that runs LVS/RCX on a given design.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        dsn_name : str\n            design cell name.\n        rcx_params : Optional[Dict[str, Any]]\n            extraction parameters dictionary.\n        \"\"\"\n        print('Running LVS on %s' % dsn_name)\n        lvs_passed, lvs_log = await self.prj.async_run_lvs(lib_name, dsn_name)\n        if not lvs_passed:\n            raise ValueError('LVS failed for %s.  Log file: %s' % (dsn_name, lvs_log))\n\n        print('LVS passed on %s' % dsn_name)\n        print('Running RCX on %s' % dsn_name)\n        rcx_passed, rcx_log = await self.prj.async_run_rcx(lib_name, dsn_name,\n                                                           rcx_params=rcx_params)\n        if not rcx_passed:\n            raise ValueError('RCX failed for %s.  Log file: %s' % (dsn_name, rcx_log))\n        print('RCX passed on %s' % dsn_name)\n\n    async def verify_design(self, lib_name: str, dsn_name: str,\n                            load_from_file: bool = False) -> None:\n        \"\"\"Run all measurements on the given design.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        dsn_name : str\n            design cell name.\n        load_from_file : bool\n            If True, then load existing simulation data instead of running actual simulation.\n        \"\"\"\n        meas_list = self.specs['measurements']\n        summary_fname = self.specs['summary_fname']\n        view_name = self.specs['view_name']\n        env_list = self.specs['env_list']\n        wrapper_list = self.specs['dut_wrappers']\n\n        wrapper_lookup = {'': dsn_name}\n        for wrapper_config in wrapper_list:\n            wrapper_type = wrapper_config['name']\n            wrapper_lookup[wrapper_type] = self.get_wrapper_name(dsn_name, wrapper_type)\n\n        result_summary = {}\n        dsn_data_dir = os.path.join(self._root_dir, dsn_name)\n        for meas_specs in meas_list:\n            meas_type = meas_specs['meas_type']\n            meas_package = meas_specs['meas_package']\n            meas_cls_name = meas_specs['meas_class']\n            out_fname = meas_specs['out_fname']\n            meas_name = self.get_measurement_name(dsn_name, meas_type)\n            data_dir = self.get_measurement_directory(dsn_name, meas_type)\n\n            meas_module = importlib.import_module(meas_package)\n            meas_cls = getattr(meas_module, meas_cls_name)\n\n            meas_manager = meas_cls(data_dir, meas_name, lib_name, meas_specs,\n                                    wrapper_lookup, [(dsn_name, view_name)], env_list)\n            print('Performing measurement %s on %s' % (meas_name, dsn_name))\n            meas_res = await meas_manager.async_measure_performance(self.prj,\n                                                                    load_from_file=load_from_file)\n            print('Measurement %s finished on %s' % (meas_name, dsn_name))\n\n            with open_file(os.path.join(data_dir, out_fname), 'w') as f:\n                yaml.dump(meas_res, f)\n            result_summary[meas_type] = meas_res\n\n        with open_file(os.path.join(dsn_data_dir, summary_fname), 'w') as f:\n            yaml.dump(result_summary, f)\n\n    async def main_task(self, lib_name: str, dsn_name: str,\n                        rcx_params: Optional[Dict[str, Any]],\n                        extract: bool = True,\n                        measure: bool = True,\n                        load_from_file: bool = False) -> None:\n        \"\"\"The main coroutine.\"\"\"\n        if extract:\n            await self.extract_design(lib_name, dsn_name, rcx_params)\n        if measure:\n            await self.verify_design(lib_name, dsn_name, load_from_file=load_from_file)\n\n    def characterize_designs(self, generate=True, measure=True, load_from_file=False):\n        # type: (bool, bool, bool) -> None\n        \"\"\"Sweep all designs and characterize them.\n\n        Parameters\n        ----------\n        generate : bool\n            If True, create schematic/layout and run LVS/RCX.\n        measure : bool\n            If True, run all measurements.\n        load_from_file : bool\n            If True, measurements will load existing simulation data\n            instead of running simulations.\n        \"\"\"\n        if generate:\n            extract = self.specs['view_name'] != 'schematic'\n            self.create_designs(extract)\n        else:\n            extract = False\n\n        rcx_params = self.specs.get('rcx_params', None)\n        impl_lib = self.specs['impl_lib']\n        dsn_name_list = [self.get_design_name(combo_list)\n                         for combo_list in self.get_combinations_iter()]\n\n        coro_list = [self.main_task(impl_lib, dsn_name, rcx_params, extract=extract,\n                                    measure=measure, load_from_file=load_from_file)\n                     for dsn_name in dsn_name_list]\n\n        results = batch_async_task(coro_list)\n        if results is not None:\n            for val in results:\n                if isinstance(val, Exception):\n                    raise val\n\n    def get_result(self, dsn_name):\n        # type: (str) -> Dict[str, Any]\n        \"\"\"Returns the measurement result summary dictionary.\n\n        Parameters\n        ----------\n        dsn_name : str\n            the design name.\n\n        Returns\n        -------\n        result : Dict[str, Any]\n            the result dictionary.\n        \"\"\"\n        fname = os.path.join(self._root_dir, dsn_name, self.specs['summary_fname'])\n        summary = read_yaml(fname)\n\n        return summary\n\n    def test_layout(self, gen_sch=True):\n        # type: (bool) -> None\n        \"\"\"Create a test schematic and layout for debugging purposes\"\"\"\n\n        sweep_params = self.specs['sweep_params']\n        dsn_name = self.specs['dsn_basename'] + '_TEST'\n\n        val_list = tuple((sweep_params[key][0] for key in self.swp_var_list))\n        lay_params = self.get_layout_params(val_list)\n\n        temp_db = self.make_tdb()\n        print('create test layout')\n        sch_params_list = self.create_dut_layouts([lay_params], [dsn_name], temp_db)\n\n        if gen_sch:\n            print('create test schematic')\n            self.create_dut_schematics(sch_params_list, [dsn_name], gen_wrappers=False)\n        print('done')\n\n    def create_designs(self, create_layout):\n        # type: (bool) -> None\n        \"\"\"Create DUT schematics/layouts.\n        \"\"\"\n        if self.prj is None:\n            raise ValueError('BagProject instance is not given.')\n\n        temp_db = self.make_tdb()\n\n        # make layouts\n        dsn_name_list, lay_params_list, combo_list_list = [], [], []\n        for combo_list in self.get_combinations_iter():\n            dsn_name = self.get_design_name(combo_list)\n            lay_params = self.get_layout_params(combo_list)\n            dsn_name_list.append(dsn_name)\n            lay_params_list.append(lay_params)\n            combo_list_list.append(combo_list)\n\n        if create_layout:\n            print('creating all layouts.')\n            sch_params_list = self.create_dut_layouts(lay_params_list, dsn_name_list, temp_db)\n        else:\n            print('schematic simulation, skipping layouts.')\n            sch_params_list = [self.get_schematic_params(combo_list)\n                               for combo_list in self.get_combinations_iter()]\n\n        print('creating all schematics.')\n        self.create_dut_schematics(sch_params_list, dsn_name_list, gen_wrappers=True)\n\n        print('design generation done.')\n\n    def get_swp_var_values(self, var):\n        # type: (str) -> List[Any]\n        \"\"\"Returns a list of valid sweep variable values.\n\n        Parameter\n        ---------\n        var : str\n            the sweep variable name.\n\n        Returns\n        -------\n        val_list : List[Any]\n            the sweep values of the given variable.\n        \"\"\"\n        return self.specs['sweep_params'][var]\n\n    def get_combinations_iter(self):\n        # type: () -> Iterable[Tuple[Any, ...]]\n        \"\"\"Returns an iterator of schematic parameter combinations we sweep over.\n\n        Returns\n        -------\n        combo_iter : Iterable[Tuple[Any, ...]]\n            an iterator of tuples of schematic parameters values that we sweep over.\n        \"\"\"\n\n        swp_par_dict = self.specs['sweep_params']\n        return itertools.product(*(swp_par_dict[var] for var in self.swp_var_list))\n\n    def get_dsn_name_iter(self):\n        # type: () -> Iterable[str]\n        \"\"\"Returns an iterator over design names.\n\n        Returns\n        -------\n        dsn_name_iter : Iterable[str]\n            an iterator of design names.\n        \"\"\"\n        return (self.get_design_name(combo_list) for combo_list in self.get_combinations_iter())\n\n    def get_measurement_directory(self, dsn_name, meas_type):\n        meas_name = self.get_measurement_name(dsn_name, meas_type)\n        return os.path.join(self._root_dir, dsn_name, meas_name)\n\n    def make_tdb(self):\n        # type: () -> TemplateDB\n        \"\"\"Create and return a new TemplateDB object.\n\n        Returns\n        -------\n        tdb : TemplateDB\n            the TemplateDB object.\n        \"\"\"\n        if self.prj is None:\n            raise ValueError('BagProject instance is not given.')\n\n        target_lib = self.specs['impl_lib']\n        grid_specs = self.specs['routing_grid']\n        layers = grid_specs['layers']\n        spaces = grid_specs['spaces']\n        widths = grid_specs['widths']\n        bot_dir = grid_specs['bot_dir']\n        width_override = grid_specs.get('width_override', None)\n\n        routing_grid = RoutingGrid(self.prj.tech_info, layers, spaces, widths, bot_dir, width_override=width_override)\n        tdb = TemplateDB('', routing_grid, target_lib, use_cybagoa=True)\n        return tdb\n\n    def get_layout_params(self, val_list):\n        # type: (Tuple[Any, ...]) -> Dict[str, Any]\n        \"\"\"Returns the layout dictionary from the given sweep parameter values.\"\"\"\n        lay_params = self.specs['layout_params'].copy()\n        for var, val in zip(self.swp_var_list, val_list):\n            lay_params[var] = val\n\n        return lay_params\n\n    def get_schematic_params(self, val_list):\n        # type: (Tuple[Any, ...]) -> Dict[str, Any]\n        \"\"\"Returns the layout dictionary from the given sweep parameter values.\"\"\"\n        lay_params = self.specs['schematic_params'].copy()\n        for var, val in zip(self.swp_var_list, val_list):\n            lay_params[var] = val\n\n        return lay_params\n\n    def create_dut_schematics(self, sch_params_list, cell_name_list, gen_wrappers=True):\n        # type: (Sequence[Dict[str, Any]], Sequence[str], bool) -> None\n        dut_lib = self.specs['dut_lib']\n        dut_cell = self.specs['dut_cell']\n        impl_lib = self.specs['impl_lib']\n        wrapper_list = self.specs['dut_wrappers']\n\n        inst_list, name_list = [], []\n        for sch_params, cur_name in zip(sch_params_list, cell_name_list):\n            dsn = self.prj.create_design_module(dut_lib, dut_cell)\n            dsn.design(**sch_params)\n            inst_list.append(dsn)\n            name_list.append(cur_name)\n            if gen_wrappers:\n                for wrapper_config in wrapper_list:\n                    wrapper_name = wrapper_config['name']\n                    wrapper_lib = wrapper_config['lib']\n                    wrapper_cell = wrapper_config['cell']\n                    wrapper_params = wrapper_config['params'].copy()\n                    wrapper_params['dut_lib'] = impl_lib\n                    wrapper_params['dut_cell'] = cur_name\n                    dsn = self.prj.create_design_module(wrapper_lib, wrapper_cell)\n                    dsn.design(**wrapper_params)\n                    inst_list.append(dsn)\n                    name_list.append(self.get_wrapper_name(cur_name, wrapper_name))\n\n        self.prj.batch_schematic(impl_lib, inst_list, name_list=name_list)\n\n    def create_dut_layouts(self, lay_params_list, cell_name_list, temp_db):\n        # type: (Sequence[Dict[str, Any]], Sequence[str], TemplateDB) -> Sequence[Dict[str, Any]]\n        \"\"\"Create multiple layouts\"\"\"\n        if self.prj is None:\n            raise ValueError('BagProject instance is not given.')\n\n        cls_package = self.specs['layout_package']\n        cls_name = self.specs['layout_class']\n\n        lay_module = importlib.import_module(cls_package)\n        temp_cls = getattr(lay_module, cls_name)\n\n        temp_list, sch_params_list = [], []\n        for lay_params in lay_params_list:\n            template = temp_db.new_template(params=lay_params, temp_cls=temp_cls, debug=False)\n            temp_list.append(template)\n            sch_params_list.append(template.sch_params)\n        temp_db.batch_layout(self.prj, temp_list, cell_name_list)\n        return sch_params_list\n\n    def get_design_name(self, combo_list):\n        # type: (Sequence[Any, ...]) -> str\n        \"\"\"Generate cell names based on sweep parameter values.\"\"\"\n\n        name_base = self.specs['dsn_basename']\n        suffix = ''\n        for var, val in zip(self.swp_var_list, combo_list):\n            if isinstance(val, str):\n                suffix += '_%s_%s' % (var, val)\n            elif isinstance(val, int):\n                suffix += '_%s_%d' % (var, val)\n            elif isinstance(val, float):\n                suffix += '_%s_%s' % (var, float_to_si_string(val))\n            else:\n                raise ValueError('Unsupported parameter type: %s' % (type(val)))\n\n        return name_base + suffix\n"
  },
  {
    "path": "bag/simulation/core_v2.py",
    "content": "from __future__ import annotations\nfrom typing import (\n    TYPE_CHECKING, Optional, Dict, Any, Type, cast, List\n)\n\nimport abc\nfrom pathlib import Path\nimport numpy as np\n\nfrom ..io.sim_data import load_sim_results, save_sim_results, load_sim_file\nfrom ..concurrent.core import batch_async_task\nfrom ..core import _import_class_from_str\nfrom ..util.immutable import to_immutable\n\nif TYPE_CHECKING:\n    from ..core import BagProject\n    from ..core import Testbench\n    from bag.util.immutable import ImmutableType\n\n\nclass TestbenchManager(abc.ABC):\n    \"\"\"A class that creates and setups up a testbench for simulation, then save the result.\n\n    This class is used by MeasurementManager to run simulations.\n\n    Parameters\n    ----------\n    work_dir : Path\n        working directory path.\n    \"\"\"\n\n    def __init__(self, work_dir: Path) -> None:\n        self._work_dir = work_dir.resolve()\n        self._work_dir.mkdir(parents=True, exist_ok=True)\n        self._specs = None\n\n    @property\n    def work_dir(self) -> Path:\n        return self._work_dir\n\n    @property\n    def specs(self):\n        return self._specs\n\n    @property\n    def sim_vars(self):\n        return self.specs.get('sim_vars', {})\n\n    # noinspection PyMethodMayBeStatic\n    def pre_setup(self, tb_params: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:\n        \"\"\"Override to perform any operations prior to calling the setup() function.\n\n        Parameters\n        ----------\n        tb_params :\n            the test bench schematic parameters.  None means the previous test bench will be reused.\n            This dictionary should not be modified.\n\n        Returns\n        -------\n        new_params :\n            the schematic parameters to use.  Could be a modified copy of the original.\n        \"\"\"\n        return tb_params\n\n    def setup(self, bprj, impl_lib, impl_cell, sim_view_list, env_list,\n              tb_dict, wrapper_dict=None, gen_tb=True, gen_wrapper=True,\n              run_sim=True) -> Optional[Testbench]:\n        tb_dict = self.pre_setup(tb_dict)\n        self._specs = tb_dict\n\n        if wrapper_dict is None:\n            wrapper_dict = tb_dict.pop('wrapper', None)\n        has_wrapper = wrapper_dict is not None\n        wrapper_lib = wrapper_cell = wrapped_cell = wrapper_params = None\n        if has_wrapper:\n            wrapper_lib = wrapper_dict['wrapper_lib']\n            wrapper_cell = wrapper_dict['wrapper_cell']\n            wrapper_params = wrapper_dict.get('params', {})\n            wrapper_suffix = wrapper_dict.get('wrapper_suffix', '')\n            if not wrapper_suffix:\n                wrapper_suffix = f'{wrapper_cell}'\n            wrapped_cell = f'{impl_cell}_{wrapper_suffix}'\n\n        tb_lib = tb_dict['tb_lib']\n        tb_cell = tb_dict['tb_cell']\n        tb_params = tb_dict.get('tb_params', {})\n        tb_suffix = tb_dict.get('tb_suffix', '')\n        if not tb_suffix:\n            tb_suffix = f'{tb_cell}'\n        tb_name = f'{impl_cell}_{tb_suffix}'\n\n        if has_wrapper and gen_wrapper:\n            print(f'Generating wrapper {impl_lib}_{wrapped_cell}')\n            master = bprj.create_design_module(lib_name=wrapper_lib, cell_name=wrapper_cell)\n            bprj.replace_dut_in_wrapper(wrapper_params, impl_lib, impl_cell)\n            master.design(**wrapper_params)\n            master.implement_design(impl_lib, wrapped_cell)\n            print('wrapper generated.')\n\n        if gen_tb:\n            print(f'Generating testbench {impl_cell}_{tb_name}')\n            tb_master = bprj.create_design_module(tb_lib, tb_cell)\n            dut_cell = wrapped_cell if has_wrapper else impl_cell\n            tb_master.design(dut_lib=impl_lib, dut_cell=dut_cell, **tb_params)\n            tb_master.implement_design(impl_lib, tb_name)\n            print('testbench generated.')\n            tb = bprj.configure_testbench(impl_lib, tb_name)\n        else:\n            if run_sim:\n                print(f'loading testbench {impl_lib}_{tb_name}')\n                tb = bprj.load_testbench(impl_lib, tb_name)\n            else:\n                return None\n\n        print(f'Configuring testbench {tb_name}')\n\n        sim_swp_params = tb_dict.get('sim_swp_params', {})\n        sim_vars = tb_dict.get('sim_vars', {})\n        sim_outputs = tb_dict.get('sim_outputs', {})\n\n        tb.set_simulation_environments(env_list)\n\n        for cell_name, view_name in sim_view_list:\n            tb.set_simulation_view(impl_lib, cell_name, view_name)\n\n        for key, val in sim_vars.items():\n            tb.set_parameter(key, val)\n\n        for key, val in sim_swp_params.items():\n            tb.set_sweep_parameter(key, **val)\n\n        for key, val in sim_outputs.items():\n            tb.add_output(key, val)\n\n        tb.update_testbench()\n        print(f'Testbench configured.')\n        return tb\n\n    async def setup_and_simulate(self, bprj, impl_lib, impl_cell, sim_view_list, env_list, tb_dict,\n                                 wrapper_dict, gen_tb, gen_wrapper, run_sim):\n        tb: Testbench = self.setup(bprj, impl_lib=impl_lib, impl_cell=impl_cell,\n                                   sim_view_list=sim_view_list, env_list=env_list,\n                                   tb_dict=tb_dict, wrapper_dict=wrapper_dict, gen_tb=gen_tb,\n                                   gen_wrapper=gen_wrapper, run_sim=run_sim)\n        if run_sim:\n            print(f'Simulating {tb.cell}')\n            save_dir = await tb.async_run_simulation()\n            print(f'Finished simulating {tb.cell}')\n            results = load_sim_results(save_dir)\n            results_dir = str(self.work_dir / impl_cell / f'{tb.cell}_data.hdf5')\n            save_sim_results(results, results_dir)\n            return results\n\n    def simulate(self, bprj, impl_lib, impl_cell, sim_view_list, env_list, tb_dict,\n                 wrapper_dict=None, gen_tb=True, gen_wrapper=True, run_sim=True):\n        coro = self.setup_and_simulate(bprj, impl_lib=impl_lib, impl_cell=impl_cell,\n                                       sim_view_list=sim_view_list, env_list=env_list,\n                                       tb_dict=tb_dict, wrapper_dict=wrapper_dict, gen_tb=gen_tb,\n                                       gen_wrapper=gen_wrapper, run_sim=run_sim)\n        results = batch_async_task([coro])[0]\n        if isinstance(results, Exception):\n            raise results\n        return results\n\n    def load_results(self, impl_cell, tb_dict):\n        self._specs = tb_dict\n        tb_cell = tb_dict['tb_cell']\n        tb_suffix = tb_dict.get('tb_suffix', '')\n        if not tb_suffix:\n            tb_suffix = f'{tb_cell}'\n        tb_name = f'{impl_cell}_{tb_suffix}'\n        tb_fname = self.work_dir / impl_cell / f'{tb_name}_data.hdf5'\n        if tb_fname.exists():\n            return load_sim_file(str(tb_fname))\n        raise ValueError(f'simulation results does not exist in {str(tb_fname)}')\n\n\nclass MeasurementManager(abc.ABC):\n\n    def __init__(self, work_dir: Path, mm_specs: Dict[str, Any]) -> None:\n        self._work_dir = work_dir\n        self._specs = mm_specs\n\n        self.tb_managers: Dict[str, TestbenchManager] = {}\n        self.tb_params: Dict[str, Dict[str, Any]] = {}\n        self._wrapper_lookup: Dict[ImmutableType, Dict[str, Any]] = {}\n\n        # fill up tb_managers and tb_params\n        self._prepare_tb_specs()\n\n        self.gen_wrapper: bool = True\n        self.gen_tb: bool = True\n        self.run_sims: bool = True\n\n    @property\n    def specs(self):\n        return self._specs\n\n    @property\n    def work_dir(self):\n        return self._work_dir\n\n    def _prepare_tb_specs(self) -> None:\n        # creates testbench manager objects and fills up the mappings\n        testbenches = self.specs['testbenches']\n        for tb_name, tb_dict in testbenches.items():\n            tbm_cls = _import_class_from_str(tb_dict['tbm_cls'])\n            tbm_cls = cast(Type[TestbenchManager], tbm_cls)\n            self.tb_params[tb_name] = tb_dict\n            self.tb_managers[tb_name] = tbm_cls(self._work_dir)\n\n    def _prepare_tbm_dict(self, impl_cell, tbm_dict, extract):\n        # adds sim_view_list and env_list to tbm_dict if they don't exist, so that after this\n        # function there must be sim_view_list and sim_envs entries in tbm_dict\n        if 'sim_view_list' not in tbm_dict:\n            try:\n                view_name = self.specs['view_name']\n                tbm_dict['sim_view_list'] = [(impl_cell, view_name)]\n            except KeyError:\n                default_sim_view_list = self.specs.get('sim_view_list', [])\n                if not default_sim_view_list:\n                    view_name = 'netlist' if extract else 'schematic'\n                    default_sim_view_list.append((impl_cell, view_name))\n                tbm_dict['sim_view_list'] = default_sim_view_list\n        if 'sim_envs' not in tbm_dict:\n            try:\n                default_env_list = self.specs['sim_envs']\n                tbm_dict['sim_envs'] = default_env_list\n            except KeyError:\n                raise ValueError('Did you forget to specify simulation environment?')\n\n    def _wrapper_exists(self, wrapper: ImmutableType) -> bool:\n        # checks if the wrapper (around impl_lib, impl_cell) has been created to avoid recreation\n        return wrapper in self._wrapper_lookup\n\n    def run_tb(self, bprj, impl_lib, impl_cell, tb_name, tbm_dict=None, extract=True,\n               load_results=False):\n        # if tb_dict is None the default tb_dict is used\n        if tbm_dict is None:\n            tbm_dict = self.tb_params[tb_name]\n        tb_obj: TestbenchManager = self.tb_managers[tb_name]\n\n        if load_results:\n            return tb_obj.load_results(impl_cell, tbm_dict)\n\n        wrapper = tbm_dict['wrapper']\n        wrapper_key = to_immutable(wrapper)\n        gen_wrapper = not self._wrapper_exists(wrapper_key)\n        gen_wrapper = self.gen_wrapper and gen_wrapper\n\n        # inherit default sim_envs and sim_view_list from self.specs\n        self._prepare_tbm_dict(impl_cell, tbm_dict, extract)\n\n        sim_view_list = tbm_dict['sim_view_list']\n        sim_envs = tbm_dict['sim_envs']\n\n        results = tb_obj.simulate(bprj, impl_lib, impl_cell, sim_view_list=sim_view_list,\n                                  env_list=sim_envs, tb_dict=tbm_dict, gen_tb=self.gen_tb,\n                                  gen_wrapper=gen_wrapper, run_sim=self.run_sims)\n\n        if not gen_wrapper:\n            self._wrapper_lookup[wrapper_key] = wrapper\n\n        return results\n\n    @abc.abstractmethod\n    def run_flow(self, bprj: BagProject, impl_lib: str, impl_cell: str,\n                 load_results: bool = False, extract: bool = True) -> Any:\n        \"\"\"\n        Defines the FSM in code rather than passing state indicators through a dictionary\n        use self.run_tb to orchestrate test benches and modify their parameters if necessary\n\n        Don't call this method directly, call measure instead\n\n        Parameters\n        ----------\n        bprj: BagProject\n            BagProject object\n        impl_lib:\n            DUT implementation library\n        impl_cell\n            DUT implementation cell\n        load_results:\n            True to load results, this is used when debugging post processing functions\n        extract:\n            True to use post-layout extracted view for simulations\n\n        Returns\n        -------\n            Any post processed result, even returning nothing is also an option\n        \"\"\"\n\n        raise NotImplementedError\n\n    def measure(self, bprj: BagProject, impl_lib: str, impl_cell: str, load_results: bool = False,\n                gen_wrapper: bool = True, gen_tb: bool = True, run_sims: bool = True,\n                extract: bool = True) -> Any:\n        self.gen_wrapper = gen_wrapper\n        self.gen_tb = gen_tb\n        self.run_sims = run_sims\n\n        return self.run_flow(bprj, impl_lib, impl_cell, load_results, extract)\n"
  },
  {
    "path": "bag/tech/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/tech/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package contains various technology related utilities, such as transistor characterization.\n\"\"\""
  },
  {
    "path": "bag/tech/core.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module contains commonly used technology related classes and functions.\n\"\"\"\n\nimport os\nimport abc\nimport itertools\nfrom typing import List, Union, Tuple, Dict, Any, Optional, Set\n\nimport numpy as np\nimport h5py\nimport openmdao.api as omdao\n\nfrom bag.core import BagProject\nfrom ..math.interpolate import interpolate_grid\nfrom bag.math.dfun import VectorDiffFunction, DiffFunction\nfrom ..mdao.core import GroupBuilder\nfrom ..io import fix_string, to_bytes\nfrom ..simulation.core import SimulationManager\n\n\ndef _equal(a, b, rtol, atol):\n    \"\"\"Returns True if a == b.  a and b are both strings, floats or numpy arrays.\"\"\"\n    # python 2/3 compatibility: convert raw bytes to string\n    a = fix_string(a)\n    b = fix_string(b)\n\n    if isinstance(a, str):\n        return a == b\n    return np.allclose(a, b, rtol=rtol, atol=atol)\n\n\ndef _equal_list(a, b, rtol, atol):\n    \"\"\"Returns True if a == b.  a and b are list of strings/floats/numpy arrays.\"\"\"\n    if len(a) != len(b):\n        return False\n    for a_item, b_item in zip(a, b):\n        if not _equal(a_item, b_item, rtol, atol):\n            return False\n    return True\n\n\ndef _index_in_list(item_list, item, rtol, atol):\n    \"\"\"Returns index of item in item_list, with tolerance checking for floats.\"\"\"\n    for idx, test in enumerate(item_list):\n        if _equal(test, item, rtol, atol):\n            return idx\n    return -1\n\n\ndef _in_list(item_list, item, rtol, atol):\n    \"\"\"Returns True if item is in item_list, with tolerance checking for floats.\"\"\"\n    return _index_in_list(item_list, item, rtol, atol) >= 0\n\n\nclass CircuitCharacterization(SimulationManager, metaclass=abc.ABCMeta):\n    \"\"\"A class that handles characterization of a circuit.\n\n    This class sweeps schematic parameters and run a testbench with a single analysis.\n    It will then save the simulation data in a format CharDB understands.\n\n    For now, this class will overwrite existing data, so please backup if you need to.\n\n    Parameters\n    ----------\n    prj : BagProject\n        the BagProject instance.\n    spec_file : str\n        the SimulationManager specification file.\n    tb_type : str\n        the testbench type name.  The parameter dictionary corresponding to this\n        testbench should have the following entries (in addition to those required\n        by Simulation Manager:\n\n        outputs :\n            list of testbench output names to save.\n        constants :\n            constant values used to identify this simulation run.\n        sweep_params:\n            a dictionary from testbench parameters to (start, stop, num_points)\n            sweep tuple.\n\n    compression : str\n        HDF5 compression method.\n    \"\"\"\n\n    def __init__(self, prj, spec_file, tb_type, compression='gzip'):\n        super(CircuitCharacterization, self).__init__(prj, spec_file)\n        self._compression = compression\n        self._outputs = self.specs[tb_type]['outputs']\n        self._constants = self.specs[tb_type]['constants']\n        self._sweep_params = self.specs[tb_type]['sweep_params']\n\n    def record_results(self, data, tb_type, val_list):\n        # type: (Dict[str, Any], str, Tuple[Any, ...]) -> None\n        \"\"\"Record simulation results to file.\n\n        Override implementation in SimulationManager in order to save data\n        in a format that CharDB understands.\n        \"\"\"\n        env_list = self.specs['sim_envs']\n\n        tb_specs = self.specs[tb_type]\n        results_dir = tb_specs['results_dir']\n\n        os.makedirs(results_dir, exist_ok=True)\n        fname = os.path.join(results_dir, 'data.hdf5')\n\n        with h5py.File(fname, 'w') as f:\n            for key, val in self._constants.items():\n                f.attrs[key] = val\n            for key, val in self._sweep_params.items():\n                f.attrs[key] = val\n\n            for env in env_list:\n                env_result, sweep_list = self._get_env_result(data, env)\n\n                grp = f.create_group('%d' % len(f))\n                for key, val in zip(self.swp_var_list, val_list):\n                    grp.attrs[key] = val\n                # h5py workaround: explicitly store strings as encoded unicode data\n                grp.attrs['env'] = to_bytes(env)\n                grp.attrs['sweep_params'] = [to_bytes(swp) for swp in sweep_list]\n\n                for name, val in env_result.items():\n                    grp.create_dataset(name, data=val, compression=self._compression)\n\n    def get_sim_results(self, tb_type, val_list):\n        # type: (str, Tuple[Any, ...]) -> Dict[str, Any]\n        # TODO: implement this.\n        raise NotImplementedError('not implemented yet.')\n\n    def _get_env_result(self, sim_results, env):\n        \"\"\"Extract results from a given simulation environment from the given data.\n\n        all output sweep parameter order and data shape must be the same.\n\n        Parameters\n        ----------\n        sim_results : dict[string, any]\n            the simulation results dictionary\n        env : str\n            the target simulation environment\n\n        Returns\n        -------\n        results : dict[str, any]\n            the results from a given simulation environment.\n        sweep_list : list[str]\n            a list of sweep parameter order.\n        \"\"\"\n        if 'corner' not in sim_results:\n            # no corner sweep anyways\n            results = {output: sim_results[output] for output in self._outputs}\n            sweep_list = sim_results['sweep_params'][self._outputs[0]]\n            return results, sweep_list\n\n        corner_list = sim_results['corner'].tolist()\n        results = {}\n        # we know all sweep order and shape is the same.\n        test_name = self._outputs[0]\n        sweep_list = list(sim_results['sweep_params'][test_name])\n        shape = sim_results[test_name].shape\n        # make numpy array slice index list\n        index_list = [slice(0, l) for l in shape]\n        if 'corner' in sweep_list:\n            idx = sweep_list.index('corner')\n            index_list[idx] = corner_list.index(env)\n            del sweep_list[idx]\n\n        # store outputs in results\n        for output in self._outputs:\n            results[output] = sim_results[output][index_list]\n\n        return results, sweep_list\n\n\nclass CharDB(abc.ABC):\n    \"\"\"The abstract base class of a database of characterization data.\n\n    This class provides useful query/optimization methods and ways to store/retrieve\n    data.\n\n    Parameters\n    ----------\n    root_dir : str\n        path to the root characterization data directory.  Supports environment variables.\n    constants : Dict[str, Any]\n        constants dictionary.\n    discrete_params : List[str]\n        a list of parameters that should take on discrete values.\n    init_params : Dict[str, Any]\n        a dictionary of initial parameter values.  All parameters should be specified,\n        and None should be used if the parameter value is not set.\n    env_list : List[str]\n        list of simulation environments to consider.\n    update : bool\n        By default, CharDB saves and load post-processed data directly.  If update is True,\n        CharDB will update the post-process data from raw simulation data. Defaults to\n        False.\n    rtol : float\n        relative tolerance used to compare constants/sweep parameters/sweep attributes.\n    atol : float\n        relative tolerance used to compare constants/sweep parameters/sweep attributes.\n    compression : str\n        HDF5 compression method.  Used only during post-processing.\n    method : str\n        interpolation method.\n    opt_package : str\n        default Python optimization package.  Supports 'scipy' or 'pyoptsparse'.  Defaults\n        to 'scipy'.\n    opt_method : str\n        default optimization method.  Valid values depends on the optimization package.\n        Defaults to 'SLSQP'.\n    opt_settings : Optional[Dict[str, Any]]\n        optimizer specific settings.\n    \"\"\"\n\n    def __init__(self,  # type: CharDB\n                 root_dir,  # type: str\n                 constants,  # type: Dict[str, Any]\n                 discrete_params,  # type: List[str]\n                 init_params,  # type: Dict[str, Any]\n                 env_list,  # type: List[str]\n                 update=False,  # type: bool\n                 rtol=1e-5,  # type: float\n                 atol=1e-18,  # type: float\n                 compression='gzip',  # type: str\n                 method='spline',  # type: str\n                 opt_package='scipy',  # type: str\n                 opt_method='SLSQP',  # type: str\n                 opt_settings=None,  # type: Optional[Dict[str, Any]]\n                 **kwargs\n                 ):\n        # type: (...) -> None\n\n        root_dir = os.path.abspath(os.path.expandvars(root_dir))\n\n        if not os.path.isdir(root_dir):\n            # error checking\n            raise ValueError('Directory %s not found.' % root_dir)\n        if 'env' in discrete_params:\n            discrete_params.remove('env')\n\n        if opt_settings is None:\n            opt_settings = {}\n        else:\n            pass\n\n        if opt_method == 'IPOPT' and not opt_settings:\n            # set default IPOPT settings\n            opt_settings['option_file_name'] = ''\n\n        self._discrete_params = discrete_params\n        self._params = init_params.copy()\n        self._env_list = env_list\n        self._config = dict(opt_package=opt_package,\n                            opt_method=opt_method,\n                            opt_settings=opt_settings,\n                            rtol=rtol,\n                            atol=atol,\n                            method=method,\n                            )\n\n        cache_fname = self.get_cache_file(root_dir, constants)\n        if not os.path.isfile(cache_fname) or update:\n            sim_fname = self.get_sim_file(root_dir, constants)\n            results = self._load_sim_data(sim_fname, constants, discrete_params)\n            sim_data, total_params, total_values, self._constants = results\n            self._data = self.post_process_data(sim_data, total_params, total_values, self._constants)\n\n            # save to cache\n            with h5py.File(cache_fname, 'w') as f:\n                for key, val in self._constants.items():\n                    f.attrs[key] = val\n                sp_grp = f.create_group('sweep_params')\n                # h5py workaround: explicitly store strings as encoded unicode data\n                sp_grp.attrs['sweep_order'] = [to_bytes(swp) for swp in total_params]\n                for par, val_list in zip(total_params, total_values):\n                    if val_list.dtype.kind == 'U':\n                        # unicode array, convert to raw bytes array\n                        val_list = val_list.astype('S')\n                    sp_grp.create_dataset(par, data=val_list, compression=compression)\n                data_grp = f.create_group('data')\n                for name, data_arr in self._data.items():\n                    data_grp.create_dataset(name, data=data_arr, compression=compression)\n        else:\n            # load from cache\n            with h5py.File(cache_fname, 'r') as f:\n                self._constants = dict(iter(f.attrs.items()))\n                sp_grp = f['sweep_params']\n                total_params = [fix_string(swp) for swp in sp_grp.attrs['sweep_order']]\n                total_values = [self._convert_hdf5_array(sp_grp[par][()]) for par in total_params]\n                data_grp = f['data']\n                self._data = {name: data_grp[name][()] for name in data_grp}\n\n        # change axes location so discrete parameters are at the start of sweep_params\n        env_disc_params = ['env'] + discrete_params\n        for idx, dpar in enumerate(env_disc_params):\n            if total_params[idx] != dpar:\n                # swap\n                didx = total_params.index(dpar)\n                ptmp = total_params[idx]\n                vtmp = total_values[idx]\n                total_params[idx] = total_params[didx]\n                total_values[idx] = total_values[didx]\n                total_params[didx] = ptmp\n                total_values[didx] = vtmp\n                for key, val in self._data.items():\n                    self._data[key] = np.swapaxes(val, idx, didx)\n\n        sidx = len(self._discrete_params) + 1\n        self._cont_params = total_params[sidx:]\n        self._cont_values = total_values[sidx:]\n        self._discrete_values = total_values[1:sidx]\n        self._env_values = total_values[0]\n\n        # get lazy function table.\n        shape = [total_values[idx].size for idx in range(len(env_disc_params))]\n\n        fun_name_iter = itertools.chain(iter(self._data.keys()), self.derived_parameters())\n        # noinspection PyTypeChecker\n        self._fun = {name: np.full(shape, None, dtype=object) for name in fun_name_iter}\n\n    @staticmethod\n    def _convert_hdf5_array(arr):\n        # type: (np.ndarray) -> np.ndarray\n        \"\"\"Check if raw bytes array, if so convert to unicode array.\"\"\"\n        if arr.dtype.kind == 'S':\n            return arr.astype('U')\n        return arr\n\n    def _load_sim_data(self,  # type: CharDB\n                       fname,  # type: str\n                       constants,  # type: Dict[str, Any]\n                       discrete_params  # type: List[str]\n                       ):\n        # type: (...) -> Tuple[Dict[str, np.ndarray], List[str], List[np.ndarray], Dict[str, Any]]\n        \"\"\"Returns the simulation data.\n\n        Parameters\n        ----------\n        fname : str\n            the simulation filename.\n        constants : Dict[str, Any]\n            the constants dictionary.\n        discrete_params : List[str]\n            a list of parameters that should take on discrete values.\n\n        Returns\n        -------\n        data_dict : Dict[str, np.ndarray]\n            a dictionary from output name to data as numpy array.\n        master_attrs : List[str]\n            list of attribute name for each dimension of numpy array.\n        master_values : List[np.ndarray]\n            list of attribute values for each dimension.\n        file_constants : Dict[str, Any]\n            the constants dictionary in file.\n        \"\"\"\n        if not os.path.exists(fname):\n            raise ValueError('Simulation file %s not found.' % fname)\n\n        rtol, atol = self.get_config('rtol'), self.get_config('atol')  # type: float\n\n        master_attrs = None\n        master_values = None\n        master_dict = None\n        file_constants = None\n        with h5py.File(fname, 'r') as f:\n            # check constants is consistent\n            for key, val in constants.items():\n                if not _equal(val, f.attrs[key], rtol, atol):\n                    raise ValueError('sim file attr %s = %s != %s' % (key, f.attrs[key], val))\n\n            # simple error checking.\n            if len(f) == 0:\n                raise ValueError('simulation file has no data.')\n\n            # check that attributes sweep forms regular grid.\n            attr_table = {}\n            for gname in f:\n                grp = f[gname]\n                for key, val in grp.attrs.items():\n                    # convert raw bytes to unicode\n                    # python 2/3 compatibility: convert raw bytes to string\n                    val = fix_string(val)\n\n                    if key != 'sweep_params':\n                        if key not in attr_table:\n                            attr_table[key] = []\n                        val_list = attr_table[key]\n                        if not _in_list(val_list, val, rtol, atol):\n                            val_list.append(val)\n\n            expected_len = 1\n            for val in attr_table.values():\n                expected_len *= len(val)\n\n            if expected_len != len(f):\n                raise ValueError('Attributes of f does not form complete sweep. '\n                                 'Expect length = %d, but actually = %d.' % (expected_len, len(f)))\n\n            # check all discrete parameters in attribute table.\n            for disc_par in discrete_params:\n                if disc_par not in attr_table:\n                    raise ValueError('Discrete attribute %s not found' % disc_par)\n\n            # get attribute order\n            attr_order = sorted(attr_table.keys())\n            # check all non-discrete attribute value list lies on regular grid\n            attr_values = [np.array(sorted(attr_table[attr])) for attr in attr_order]\n            for attr, aval_list in zip(attr_order, attr_values):\n                if attr not in discrete_params and attr != 'env':\n                    test_vec = np.linspace(aval_list[0], aval_list[-1], len(aval_list), endpoint=True)\n                    if not np.allclose(test_vec, aval_list, rtol=rtol, atol=atol):\n                        raise ValueError('Attribute %s values do not lie on regular grid' % attr)\n\n            # consolidate all data into one giant numpy array.\n            # first compute numpy array shape\n            test_grp = f['0']\n            sweep_params = [fix_string(tmpvar) for tmpvar in test_grp.attrs['sweep_params']]\n\n            # get constants dictionary\n            file_constants = {}\n            for key, val in f.attrs.items():\n                if key not in sweep_params:\n                    file_constants[key] = val\n\n            master_attrs = attr_order + sweep_params\n            swp_values = [np.linspace(f.attrs[var][0], f.attrs[var][1], f.attrs[var][2],\n                                      endpoint=True) for var in sweep_params]  # type: List[np.array]\n            master_values = attr_values + swp_values\n            master_shape = [len(val_list) for val_list in master_values]\n            master_index = [slice(0, n) for n in master_shape]\n            master_dict = {}\n            for gname in f:\n                grp = f[gname]\n                # get index of the current group in the giant array.\n                # Note: using linear search to compute index now, but attr_val_list should be small.\n                for aidx, (attr, aval_list) in enumerate(zip(attr_order, attr_values)):\n                    master_index[aidx] = _index_in_list(aval_list, grp.attrs[attr], rtol, atol)\n\n                for output in grp:\n                    dset = grp[output]\n                    if output not in master_dict:\n                        master_dict[output] = np.empty(master_shape, dtype=dset.dtype)\n                    master_dict[output][master_index] = dset\n\n        return master_dict, master_attrs, master_values, file_constants\n\n    def __getitem__(self, param):\n        # type: (str) -> Any\n        \"\"\"Returns the given parameter value.\n\n        Parameters\n        ----------\n        param : str\n            parameter name.\n\n        Returns\n        -------\n        val : Any\n            parameter value.\n        \"\"\"\n        return self._params[param]\n\n    def __setitem__(self, key, value):\n        # type: (str, Any) -> None\n        \"\"\"Sets the given parameter value.\n\n        Parameters\n        ----------\n        key : str\n            parameter name.\n        value : Any\n            parameter value.  None to unset.\n        \"\"\"\n        rtol, atol = self.get_config('rtol'), self.get_config('atol')\n\n        if key in self._discrete_params:\n            if value is not None:\n                idx = self._discrete_params.index(key)\n                if not _in_list(self._discrete_values[idx], value, rtol, atol):\n                    raise ValueError('Cannot set discrete variable %s value to %s' % (key, value))\n        elif key in self._cont_params:\n            if value is not None:\n                idx = self._cont_params.index(key)\n                val_list = self._cont_values[idx]\n                if value < val_list[0] or value > val_list[-1]:\n                    raise ValueError('Variable %s value %s out of bounds.' % (key, value))\n        else:\n            raise ValueError('Unknown variable %s.' % key)\n\n        self._params[key] = value\n\n    def get_config(self, name):\n        # type: (str) -> Any\n        \"\"\"Returns the configuration value.\n\n        Parameters\n        ----------\n        name : str\n            configuration name.\n\n        Returns\n        -------\n        val : Any\n            configuration value.\n        \"\"\"\n        return self._config[name]\n\n    def set_config(self, name, value):\n        # type: (str, Any) -> None\n        \"\"\"Sets the configuration value.\n\n        Parameters\n        ----------\n        name : str\n            configuration name.\n        value : Any\n            configuration value.\n        \"\"\"\n        if name not in self._config:\n            raise ValueError('Unknown configuration %s' % name)\n        self._config[name] = value\n\n    @property\n    def env_list(self):\n        # type: () -> List[str]\n        \"\"\"The list of simulation environments to consider.\"\"\"\n        return self._env_list\n\n    @env_list.setter\n    def env_list(self, new_env_list):\n        # type: (List[str]) -> None\n        \"\"\"Sets the list of simulation environments to consider.\"\"\"\n        self._env_list = new_env_list\n\n    @classmethod\n    def get_sim_file(cls, root_dir, constants):\n        # type: (str, Dict[str, Any]) -> str\n        \"\"\"Returns the simulation data file name.\n\n        Parameters\n        ----------\n        root_dir : str\n            absolute path to the root characterization data directory.\n        constants : Dict[str, Any]\n            constants dictionary.\n\n        Returns\n        -------\n        fname : str\n            the simulation data file name.\n        \"\"\"\n        raise NotImplementedError('Not implemented')\n\n    @classmethod\n    def get_cache_file(cls, root_dir, constants):\n        # type: (str, Dict[str, Any]) -> str\n        \"\"\"Returns the post-processed characterization data file name.\n\n        Parameters\n        ----------\n        root_dir : str\n            absolute path to the root characterization data directory.\n        constants : Dict[str, Any]\n            constants dictionary.\n\n        Returns\n        -------\n        fname : str\n            the post-processed characterization data file name.\n        \"\"\"\n        raise NotImplementedError('Not implemented')\n\n    @classmethod\n    def post_process_data(cls, sim_data, sweep_params, sweep_values, constants):\n        # type: (Dict[str, np.ndarray], List[str], List[np.ndarray], Dict[str, Any]) -> Dict[str, np.ndarray]\n        \"\"\"Postprocess simulation data.\n\n        Parameters\n        ----------\n        sim_data : Dict[str, np.ndarray]\n            the simulation data as a dictionary from output name to numpy array.\n        sweep_params : List[str]\n            list of parameter name for each dimension of numpy array.\n        sweep_values : List[np.ndarray]\n            list of parameter values for each dimension.\n        constants : Dict[str, Any]\n            the constants dictionary.\n\n        Returns\n        -------\n        data : Dict[str, np.ndarray]\n            a dictionary of post-processed data.\n        \"\"\"\n        raise NotImplementedError('Not implemented')\n\n    @classmethod\n    def derived_parameters(cls):\n        # type: () -> List[str]\n        \"\"\"Returns a list of derived parameters.\"\"\"\n        return []\n\n    @classmethod\n    def compute_derived_parameters(cls, fdict):\n        # type: (Dict[str, DiffFunction]) -> Dict[str, DiffFunction]\n        \"\"\"Compute derived parameter functions.\n\n        Parameters\n        ----------\n        fdict : Dict[str, DiffFunction]\n            a dictionary from core parameter name to the corresponding function.\n\n        Returns\n        -------\n        deriv_dict : Dict[str, DiffFunction]\n            a dictionary from derived parameter name to the corresponding function.\n        \"\"\"\n        return {}\n\n    def _get_function_index(self, **kwargs):\n        # type: (Any) -> List[int]\n        \"\"\"Returns the function index corresponding to given discrete parameter values.\n\n        simulation environment index will be set to 0\n\n        Parameters\n        ----------\n        **kwargs :\n            discrete parameter values.\n\n        Returns\n        -------\n        fidx_list : List[int]\n            the function index.\n        \"\"\"\n        rtol, atol = self.get_config('rtol'), self.get_config('atol')\n\n        fidx_list = [0]\n        for par, val_list in zip(self._discrete_params, self._discrete_values):\n            val = kwargs.get(par, self[par])\n            if val is None:\n                raise ValueError('Parameter %s value not specified' % par)\n\n            val_idx = _index_in_list(val_list, val, rtol, atol)\n            if val_idx < 0:\n                raise ValueError('Discrete parameter %s have illegal value %s' % (par, val))\n            fidx_list.append(val_idx)\n\n        return fidx_list\n\n    def _get_function_helper(self, name, fidx_list):\n        # type: (str, Union[List[int], Tuple[int]]) -> DiffFunction\n        \"\"\"Helper method for get_function()\n\n        Parameters\n        ----------\n        name : str\n            name of the function.\n        fidx_list : Union[List[int], Tuple[int]]\n            function index.\n\n        Returns\n        -------\n        fun : DiffFunction\n            the interpolator function.\n        \"\"\"\n        # get function table index\n        fidx_list = tuple(fidx_list)\n        ftable = self._fun[name]\n        if ftable[fidx_list] is None:\n            if name in self._data:\n                # core parameter\n                char_data = self._data[name]\n\n                # get scale list and data index\n                scale_list = []\n                didx = list(fidx_list)  # type: List[Union[int, slice]]\n                for vec in self._cont_values:\n                    scale_list.append((vec[0], vec[1] - vec[0]))\n                    didx.append(slice(0, vec.size))\n\n                # make interpolator.\n                cur_data = char_data[didx]\n                method = self.get_config('method')\n                ftable[fidx_list] = interpolate_grid(scale_list, cur_data, method=method, extrapolate=True)\n            else:\n                # derived parameter\n                core_fdict = {fn: self._get_function_helper(fn, fidx_list) for fn in self._data}\n                deriv_fdict = self.compute_derived_parameters(core_fdict)\n                for fn, deriv_fun in deriv_fdict.items():\n                    self._fun[fn][fidx_list] = deriv_fun\n\n        return ftable[fidx_list]\n\n    def get_function(self, name, env='', **kwargs):\n        # type: (str, str, **Any) -> Union[VectorDiffFunction, DiffFunction]\n        \"\"\"Returns a function for the given output.\n\n        Parameters\n        ----------\n        name : str\n            name of the function.\n        env : str\n            if not empty, we will return function for just the given simulation environment.\n        **kwargs : Any\n            dictionary of discrete parameter values.\n\n        Returns\n        -------\n        output : Union[VectorDiffFunction, DiffFunction]\n            the output vector function.\n        \"\"\"\n        fidx_list = self._get_function_index(**kwargs)\n        if not env:\n            fun_list = []\n            for env in self.env_list:\n                occur_list = np.where(self._env_values == env)[0]\n                if occur_list.size == 0:\n                    raise ValueError('environment %s not found.')\n                env_idx = occur_list[0]\n                fidx_list[0] = env_idx\n                fun_list.append(self._get_function_helper(name, fidx_list))\n            return VectorDiffFunction(fun_list)\n        else:\n            occur_list = np.where(self._env_values == env)[0]\n            if occur_list.size == 0:\n                raise ValueError('environment %s not found.')\n            env_idx = occur_list[0]\n            fidx_list[0] = env_idx\n            return self._get_function_helper(name, fidx_list)\n\n    def get_fun_sweep_params(self):\n        # type: () -> Tuple[List[str], List[Tuple[float, float]]]\n        \"\"\"Returns interpolation function sweep parameter names and values.\n\n        Returns\n        -------\n        sweep_params : List[str]\n            list of parameter names.\n        sweep_range : List[Tuple[float, float]]\n            list of parameter range\n        \"\"\"\n        return self._cont_params, [(vec[0], vec[-1]) for vec in self._cont_values]\n\n    def _get_fun_arg(self, **kwargs):\n        # type: (Any) -> np.ndarray\n        \"\"\"Make numpy array of interpolation function arguments.\"\"\"\n        val_list = []\n        for par in self._cont_params:\n            val = kwargs.get(par, self[par])\n            if val is None:\n                raise ValueError('Parameter %s value not specified.' % par)\n            val_list.append(val)\n\n        return np.array(val_list)\n\n    def query(self, **kwargs):\n        # type: (Any) -> Dict[str, np.ndarray]\n        \"\"\"Query the database for the values associated with the given parameters.\n\n        All parameters must be specified.\n\n        Parameters\n        ----------\n        **kwargs :\n            parameter values.\n\n        Returns\n        -------\n        results : Dict[str, np.ndarray]\n            the characterization results.\n        \"\"\"\n        results = {}\n        arg = self._get_fun_arg(**kwargs)\n        for name in self._data:\n            fun = self.get_function(name, **kwargs)\n            results[name] = fun(arg)\n\n        for var in itertools.chain(self._discrete_params, self._cont_params):\n            results[var] = kwargs.get(var, self[var])\n\n        results.update(self.compute_derived_parameters(results))\n\n        return results\n\n    def minimize(self,  # type: CharDB\n                 objective,  # type: str\n                 define=None,  # type: List[Tuple[str, int]]\n                 cons=None,  # type: Dict[str, Dict[str, float]]\n                 vector_params=None,  # type: Set[str]\n                 debug=False,  # type: bool\n                 **kwargs\n                 ):\n        # type: (...) -> Dict[str, Union[np.ndarray, float]]\n        \"\"\"Find operating point that minimizes the given objective.\n\n        Parameters\n        ----------\n        objective : str\n            the objective to minimize.  Must be a scalar.\n        define : List[Tuple[str, int]]\n            list of expressions to define new variables.  Each\n            element of the list is a tuple of string and integer.  The string\n            contains a python assignment that computes the variable from\n            existing ones, and the integer indicates the variable shape.\n\n            Note that define can also be used to enforce relationships between\n            existing variables.  Using transistor as an example, defining\n            'vgs = vds' will force the vgs of vds of the transistor to be\n            equal.\n        cons : Dict[str, Dict[str, float]]\n            a dictionary from variable name to constraints of that variable.\n            see OpenMDAO documentations for details on constraints.\n        vector_params : Set[str]\n            set of input variables that are vector instead of scalar.  An input\n            variable is a vector if it can change across simulation environments.\n        debug : bool\n            True to enable debugging messages.  Defaults to False.\n        **kwargs :\n            known parameter values.\n\n        Returns\n        -------\n        results : Dict[str, Union[np.ndarray, float]]\n            the results dictionary.\n        \"\"\"\n        cons = cons or {}\n        fidx_list = self._get_function_index(**kwargs)\n        builder = GroupBuilder()\n\n        params_ranges = dict(zip(self._cont_params,\n                                 ((vec[0], vec[-1]) for vec in self._cont_values)))\n        # add functions\n        fun_name_iter = itertools.chain(iter(self._data.keys()), self.derived_parameters())\n        for name in fun_name_iter:\n            fun_list = []\n            for idx, env in enumerate(self.env_list):\n                fidx_list[0] = idx\n                fun_list.append(self._get_function_helper(name, fidx_list))\n\n            builder.add_fun(name, fun_list, self._cont_params, params_ranges,\n                            vector_params=vector_params)\n\n        # add expressions\n        for expr, ndim in define:\n            builder.add_expr(expr, ndim)\n\n        # update input bounds from constraints\n        input_set = builder.get_inputs()\n        var_list = builder.get_variables()\n\n        for name in input_set:\n            if name in cons:\n                setup = cons[name]\n                if 'equals' in setup:\n                    eq_val = setup['equals']\n                    builder.set_input_limit(name, equals=eq_val)\n                else:\n                    vmin = vmax = None\n                    if 'lower' in setup:\n                        vmin = setup['lower']\n                    if 'upper' in setup:\n                        vmax = setup['upper']\n                    builder.set_input_limit(name, lower=vmin, upper=vmax)\n\n        # build the group and make the problem\n        grp, input_bounds = builder.build()\n\n        top = omdao.Problem()\n        top.root = grp\n\n        opt_package = self.get_config('opt_package')  # type: str\n        opt_settings = self.get_config('opt_settings')\n\n        if opt_package == 'scipy':\n            driver = top.driver = omdao.ScipyOptimizer()\n            print_opt_name = 'disp'\n        elif opt_package == 'pyoptsparse':\n            driver = top.driver = omdao.pyOptSparseDriver()\n            print_opt_name = 'print_results'\n        else:\n            raise ValueError('Unknown optimization package: %s' % opt_package)\n\n        driver.options['optimizer'] = self.get_config('opt_method')\n        driver.options[print_opt_name] = debug\n        driver.opt_settings.update(opt_settings)\n\n        # add constraints\n        constants = {}\n        for name, setup in cons.items():\n            if name not in input_bounds:\n                # add constraint\n                driver.add_constraint(name, **setup)\n\n        # add inputs\n        for name in input_set:\n            eq_val, lower, upper, ndim = input_bounds[name]\n            val = kwargs.get(name, self[name])  # type: float\n            if val is None:\n                val = eq_val\n            comp_name = 'comp__%s' % name\n            if val is not None:\n                val = np.atleast_1d(np.ones(ndim) * val)\n                constants[name] = val\n                top.root.add(comp_name, omdao.IndepVarComp(name, val=val), promotes=[name])\n            else:\n                avg = (lower + upper) / 2.0\n                span = upper - lower\n                val = np.atleast_1d(np.ones(ndim) * avg)\n                top.root.add(comp_name, omdao.IndepVarComp(name, val=val), promotes=[name])\n                driver.add_desvar(name, lower=lower, upper=upper, adder=-avg, scaler=1.0 / span)\n                # driver.add_desvar(name, lower=lower, upper=upper)\n\n        # add objective and setup\n        driver.add_objective(objective)\n        top.setup(check=debug)\n\n        # somehow html file is not viewable.\n        if debug:\n            omdao.view_model(top, outfile='CharDB_debug.html')\n\n        # set constants\n        for name, val in constants.items():\n            top[name] = val\n\n        top.run()\n\n        results = {var: kwargs.get(var, self[var]) for var in self._discrete_params}\n        for var in var_list:\n            results[var] = top[var]\n\n        return results\n"
  },
  {
    "path": "bag/tech/mos.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module contains transistor characterization and optimization related classes.\n\"\"\"\n\nimport os\n\n# import pyoptsparse\nimport numpy as np\n\nfrom .core import CharDB\n\n\nclass MosCharDB(CharDB):\n    \"\"\"The mosfet characterization database.\n\n    This class holds transistor characterization data and provides useful query methods.\n\n    Parameters\n    ----------\n    root_dir : str\n        path to the root characterization data directory.\n    mos_type : str\n        the transistor type.  Either 'pch' or 'nch'.\n    discrete_params : list[str]\n        a list of parameters that should take on discrete values instead of being interpolated.\n        Usually intent, length, or transistor width (for finfets).\n    env_list : list[str]\n        list of simulation environments to consider.\n    update : bool\n        True to update post-processed data from raw simulation data.\n    intent : str or None\n        the threshold flavor name.\n    l : float or None\n        the channel length, in meters.\n    w : int or float or None\n        the transistor width, in meters or number of fins.\n    vgs : float or None\n        the Vgs voltage.\n    vds : float or None\n        the Vds voltage.\n    vbs : float or None\n        the Vbs voltage.\n    **kwargs :\n        additional characterization database parameters.  See documentation for CharDB.\n    \"\"\"\n\n    _raw_data_names = ['ids', 'y11', 'y12', 'y13', 'y21', 'y22', 'y23', 'y31', 'y32', 'y33']\n\n    def __init__(self, root_dir, mos_type, discrete_params, env_list,\n                 intent=None, l=None, w=None, vgs=None, vds=None, vbs=None,\n                 **kwargs):\n        constants = dict(mos_type=mos_type)\n        init_params = dict(intent=intent, l=l, w=w, vgs=vgs, vds=vds, vbs=vbs)\n        CharDB.__init__(self, root_dir, constants, discrete_params, init_params, env_list, **kwargs)\n\n    @classmethod\n    def get_sim_file(cls, root_dir, constants):\n        \"\"\"Returns the simulation data file name.\n\n        Parameters\n        ----------\n        root_dir : str\n            absolute path to the root characterization data directory.\n        constants : dict[string, any]\n            constants dictionary.\n\n        Returns\n        -------\n        fname : str\n            the simulation data file name.\n        \"\"\"\n        return os.path.join(root_dir, '%s.hdf5' % constants['mos_type'])\n\n    @classmethod\n    def get_cache_file(cls, root_dir, constants):\n        \"\"\"Returns the post-processed characterization data file name.\n\n        Parameters\n        ----------\n        root_dir : str\n            absolute path to the root characterization data directory.\n        constants : dict[string, any]\n            constants dictionary.\n\n        Returns\n        -------\n        fname : str\n            the post-processed characterization data file name.\n        \"\"\"\n        return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))\n\n    @classmethod\n    def post_process_data(cls, sim_data, sweep_params, sweep_values, constants):\n        \"\"\"Postprocess simulation data.\n\n        Parameters\n        ----------\n        sim_data : dict[string, np.array]\n            the simulation data as a dictionary from output name to numpy array.\n        sweep_params : list[str]\n            list of parameter name for each dimension of numpy array.\n        sweep_values : list[numpy.array]\n            list of parameter values for each dimension.\n        constants : dict[string, any]\n            the constants dictionary.\n\n        Returns\n        -------\n        data : dict[str, np.array]\n            a dictionary of post-processed data.\n        \"\"\"\n        # compute small signal parameters\n        w = 2 * np.pi * constants['char_freq']\n        fg = constants['fg']\n\n        ids = sim_data['ids']\n        gm = (sim_data['y21'].real - sim_data['y31'].real) / 2.0\n        gds = (sim_data['y22'].real - sim_data['y32'].real) / 2.0\n        gb = (sim_data['y33'].real - sim_data['y23'].real) / 2.0 - gm - gds\n\n        cgd = -0.5 / w * (sim_data['y12'].imag + sim_data['y21'].imag)\n        cgs = -0.5 / w * (sim_data['y13'].imag + sim_data['y31'].imag)\n        cds = -0.5 / w * (sim_data['y23'].imag + sim_data['y32'].imag)\n        cgb = sim_data['y11'].imag / w - cgd - cgs\n        cdb = sim_data['y22'].imag / w - cds - cgd\n        csb = sim_data['y33'].imag / w - cgs - cds\n\n        ss_data = dict(\n            ids=ids / fg,\n            gm=gm / fg,\n            gds=gds / fg,\n            gb=gb / fg,\n            cgd=cgd / fg,\n            cgs=cgs / fg,\n            cds=cds / fg,\n            cgb=cgb / fg,\n            cdb=cdb / fg,\n            csb=csb / fg,\n        )\n\n        return ss_data\n\n    @classmethod\n    def derived_parameters(cls):\n        \"\"\"Returns a list of derived parameters.\"\"\"\n        return ['cgg', 'cdd', 'css', 'cbb', 'vstar', 'gain', 'ft']\n\n    @classmethod\n    def compute_derived_parameters(cls, fdict):\n        \"\"\"Compute derived parameter functions.\n\n        Parameters\n        ----------\n        fdict : dict[string, bag.math.dfun.DiffFunction]\n            a dictionary from core parameter name to the corresponding function.\n\n        Returns\n        -------\n        deriv_dict : dict[str, bag.math.dfun.DiffFunction]\n            a dictionary from derived parameter name to the corresponding function.\n        \"\"\"\n        cgg = fdict['cgd'] + fdict['cgs'] + fdict['cgb']\n        return dict(\n            cgg=cgg,\n            cdd=fdict['cgd'] + fdict['cds'] + fdict['cdb'],\n            css=fdict['cgs'] + fdict['cds'] + fdict['csb'],\n            cbb=fdict['cgb'] + fdict['cdb'] + fdict['csb'],\n            vstar=2.0 * (fdict['ids'] / fdict['gm']),\n            gain=fdict['gm'] / fdict['gds'],\n            ft=fdict['gm'] / (2.0 * np.pi * cgg),\n        )\n\n\nclass MosCharGDDB(CharDB):\n    \"\"\"The mosfet characterization database.\n\n    This class holds transistor characterization data and provides useful query methods.\n\n    Parameters\n    ----------\n    root_dir : str\n        path to the root characterization data directory.\n    mos_type : str\n        the transistor type.  Either 'pch' or 'nch'.\n    discrete_params : list[str]\n        a list of parameters that should take on discrete values instead of being interpolated.\n        Usually intent, length, or transistor width (for finfets).\n    env_list : list[str]\n        list of simulation environments to consider.\n    update : bool\n        True to update post-processed data from raw simulation data.\n    intent : str or None\n        the threshold flavor name.\n    l : float or None\n        the channel length, in meters.\n    w : int or float or None\n        the transistor width, in meters or number of fins.\n    vgs : float or None\n        the Vgs voltage.\n    vds : float or None\n        the Vds voltage.\n    vbs : float or None\n        the Vbs voltage.\n    **kwargs :\n        additional characterization database parameters.  See documentation for CharDB.\n    \"\"\"\n\n    _raw_data_names = ['ids', 'y11', 'y12', 'y21', 'y22']\n\n    def __init__(self, root_dir, mos_type, discrete_params, env_list,\n                 intent=None, l=None, w=None, vgs=None, vds=None,\n                 **kwargs):\n        constants = dict(mos_type=mos_type)\n        init_params = dict(intent=intent, l=l, w=w, vgs=vgs, vds=vds)\n        CharDB.__init__(self, root_dir, constants, discrete_params, init_params, env_list, **kwargs)\n\n    @classmethod\n    def get_sim_file(cls, root_dir, constants):\n        \"\"\"Returns the simulation data file name.\n\n        Parameters\n        ----------\n        root_dir : str\n            absolute path to the root characterization data directory.\n        constants : dict[string, any]\n            constants dictionary.\n\n        Returns\n        -------\n        fname : str\n            the simulation data file name.\n        \"\"\"\n        return os.path.join(root_dir, '%s.hdf5' % constants['mos_type'])\n\n    @classmethod\n    def get_cache_file(cls, root_dir, constants):\n        \"\"\"Returns the post-processed characterization data file name.\n\n        Parameters\n        ----------\n        root_dir : str\n            absolute path to the root characterization data directory.\n        constants : dict[string, any]\n            constants dictionary.\n\n        Returns\n        -------\n        fname : str\n            the post-processed characterization data file name.\n        \"\"\"\n        return os.path.join(root_dir, '%s__%s.hdf5' % (constants['mos_type'], cls.__name__))\n\n    @classmethod\n    def post_process_data(cls, sim_data, sweep_params, sweep_values, constants):\n        \"\"\"Postprocess simulation data.\n\n        Parameters\n        ----------\n        sim_data : dict[string, np.array]\n            the simulation data as a dictionary from output name to numpy array.\n        sweep_params : list[str]\n            list of parameter name for each dimension of numpy array.\n        sweep_values : list[numpy.array]\n            list of parameter values for each dimension.\n        constants : dict[string, any]\n            the constants dictionary.\n\n        Returns\n        -------\n        data : dict[str, np.array]\n            a dictionary of post-processed data.\n        \"\"\"\n        # compute small signal parameters\n        w = 2 * np.pi * constants['char_freq']\n        fg = constants['fg']\n\n        ids = sim_data['ids']\n        gm = sim_data['y21'].real\n        gds = sim_data['y22'].real\n\n        cgd = -0.5 / w * (sim_data['y12'].imag + sim_data['y21'].imag)\n        cgs = sim_data['y11'].imag / w - cgd\n        cds = sim_data['y22'].imag / w - cgd\n\n        ss_data = dict(\n            ids=ids / fg,\n            gm=gm / fg,\n            gds=gds / fg,\n            cgd=cgd / fg,\n            cgs=cgs / fg,\n            cds=cds / fg,\n        )\n\n        return ss_data\n\n    @classmethod\n    def derived_parameters(cls):\n        \"\"\"Returns a list of derived parameters.\"\"\"\n        return ['cgg', 'cdd', 'vstar', 'gain', 'ft']\n\n    @classmethod\n    def compute_derived_parameters(cls, fdict):\n        \"\"\"Compute derived parameter functions.\n\n        Parameters\n        ----------\n        fdict : dict[string, bag.math.dfun.DiffFunction]\n            a dictionary from core parameter name to the corresponding function.\n\n        Returns\n        -------\n        deriv_dict : dict[str, bag.math.dfun.DiffFunction]\n            a dictionary from derived parameter name to the corresponding function.\n        \"\"\"\n        cgg = fdict['cgd'] + fdict['cgs']\n        return dict(\n            cgg=cgg,\n            cdd=fdict['cgd'] + fdict['cds'],\n            vstar=2.0 * (fdict['ids'] / fdict['gm']),\n            gain=fdict['gm'] / fdict['gds'],\n            ft=fdict['gm'] / (2.0 * np.pi * cgg),\n        )\n"
  },
  {
    "path": "bag/util/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/util/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package defines various utilities classes.\n\"\"\""
  },
  {
    "path": "bag/util/cache.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines classes used to cache existing design masters\n\"\"\"\n\nfrom typing import Sequence, Dict, Set, Any, Optional, TypeVar, Type, Callable, Iterable\n\nimport sys\nimport os\nimport time\nimport numbers\nimport importlib\nimport abc\nfrom collections import OrderedDict\n\nfrom ..io import readlines_iter, write_file, fix_string\nfrom .search import BinaryIterator\n\n\ndef _get_unique_name(basename, *args):\n    # type: (str, *Iterable[str]) -> str\n    \"\"\"Returns a unique name that's not used yet.\n\n    This method appends an index to the given basename.  Binary\n    search is used to achieve logarithmic run time.\n\n    Parameters\n    ----------\n    basename : str\n        the base name.\n    *args :\n        a list of containers of used names.\n\n    Returns\n    -------\n    new_name : str\n        the unique name.\n    \"\"\"\n    new_name = basename\n    exist = False\n    for used_names in args:\n        if new_name in used_names:\n            # the original name just works\n            exist = True\n            break\n\n    if not exist:\n        return new_name\n\n    bin_iter = BinaryIterator(1, None)\n    while bin_iter.has_next():\n        cur_name = '%s_%d' % (basename, bin_iter.get_next())\n\n        exist = False\n        for used_names in args:\n            if cur_name in used_names:\n                # the original name just works\n                exist = True\n                break\n\n        if exist:\n            bin_iter.up()\n        else:\n            bin_iter.save()\n            bin_iter.down()\n\n    last_save = bin_iter.get_last_save()\n    assert last_save is not None, \"No save marker defined\"\n    return '%s_%d' % (basename, last_save)\n\n\nclass ClassImporter(object):\n    \"\"\"A class that dynamically imports Python class from a definition file.\n\n    This class is used to import design modules to enable code reuse and design collaboration.\n\n    Parameters\n    ----------\n    lib_defs : str\n        path to the design library definition file.\n    \"\"\"\n    def __init__(self, lib_defs):\n        \"\"\"Create a new design database instance.\n        \"\"\"\n        lib_defs = os.path.abspath(lib_defs)\n        if not os.path.exists(lib_defs):\n            raise Exception(\"design library definition file %s not found\" % lib_defs)\n\n        self.lib_defs = lib_defs\n        self.libraries = {}\n        for line in readlines_iter(lib_defs):\n            line = line.strip()\n            # ignore comments and empty lines\n            if line and not line.startswith('#'):\n                lib_name, lib_path = line.split()\n                lib_path = os.path.abspath(os.path.expandvars(lib_path))\n                check_path = os.path.join(lib_path, lib_name)\n                if not os.path.exists(check_path):\n                    raise Exception('Library %s not found.' % check_path)\n                # make sure every library is on python path, so we can import it.\n                if lib_path not in sys.path:\n                    sys.path.append(lib_path)\n                self.libraries[lib_name] = lib_path\n\n    def append_library(self, lib_name, lib_path):\n        \"\"\"Adds a new library to the library definition file.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the library.\n        lib_path : str\n            path to this library.\n        \"\"\"\n        if lib_name not in self.libraries:\n            lib_path = os.path.abspath(lib_path)\n            self.libraries[lib_name] = lib_path\n            write_file(self.lib_defs, '%s %s\\n' % (lib_name, lib_path), append=True)\n\n    def get_library_path(self, lib_name):\n        \"\"\"Returns the location of the given library.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n\n        Returns\n        -------\n        lib_path : str or None\n            the location of the library, or None if library not defined.\n        \"\"\"\n        return self.libraries.get(lib_name, None)\n\n    def get_class(self, lib_name, cell_name):\n        \"\"\"Returns the Python class with the given library and cell name.\n\n        Parameters\n        ----------\n        lib_name : str\n            design module library name.\n        cell_name : str\n            design module cell name\n\n        Returns\n        -------\n        cls : class\n            the corresponding Python class.\n        \"\"\"\n\n        if lib_name not in self.libraries:\n            raise Exception(\"Library %s not listed in definition \"\n                            \"file %s\" % (lib_name, self.lib_defs))\n\n        module_name = '%s.%s' % (lib_name, cell_name)\n        module_cls = '%s__%s' % (lib_name, cell_name)\n\n        lib_package = importlib.import_module(lib_name)\n        cell_package = importlib.import_module(module_name, package=lib_package)\n        return getattr(cell_package, module_cls)\n\n\nclass DesignMaster(abc.ABC):\n    \"\"\"A design master instance.\n\n    This class represents a design master in the design database.\n\n    Parameters\n    ----------\n    master_db : MasterDB\n        the master database.\n    lib_name : str\n        the generated instance library name.\n    params : Dict[str, Any]\n        the parameters dictionary.\n    used_names : Set[str]\n        a set of already used cell names.\n    **kwargs :\n        optional parameters.\n    \"\"\"\n    def __init__(self, master_db, lib_name, params, used_names, **kwargs):\n        # type: (MasterDB, str, Dict[str, Any], Set[str], **Any) -> None\n        self._master_db = master_db\n        self._lib_name = lib_name\n        self._used_names = used_names\n\n        # set parameters\n        params_info = self.get_params_info()\n        default_params = self.get_default_param_values()\n        self._cell_name = \"\"  # type: str\n        self.params = {}  # type: Dict[str, Any]\n        if params_info is None:\n            # compatibility with old schematics generators\n            self.params.update(params)\n            self._prelim_key = self.to_immutable_id((self._get_qualified_name(), params))\n            self._key = None\n        else:\n            self.populate_params(params, params_info, default_params, **kwargs)\n            # get unique cell name\n            self._prelim_key = self.compute_unique_key()\n            self.update_master_info()\n\n        self.children = None\n        self._finalized = False\n\n    def update_master_info(self):\n        self._cell_name = _get_unique_name(self.get_master_basename(), self._used_names)\n        self._key = self.compute_unique_key()\n\n    def populate_params(self, table, params_info, default_params, **kwargs):\n        # type: (Dict[str, Any], Dict[str, str], Dict[str, Any], **Any) -> None\n        \"\"\"Fill params dictionary with values from table and default_params\"\"\"\n        for key, desc in params_info.items():\n            if key not in table:\n                if key not in default_params:\n                    raise ValueError('Parameter %s not specified.  Description:\\n%s' % (key, desc))\n                else:\n                    self.params[key] = default_params[key]\n            else:\n                self.params[key] = table[key]\n\n        # add hidden parameters\n        hidden_params = kwargs.get('hidden_params', {})\n        for name, value in hidden_params.items():\n            self.params[name] = table.get(name, value)\n\n    @classmethod\n    def to_immutable_id(cls, val):\n        # type: (Any) -> Any\n        \"\"\"Convert the given object to an immutable type for use as keys in dictionary.\n        \"\"\"\n        # python 2/3 compatibility: convert raw bytes to string\n        val = fix_string(val)\n\n        if val is None or isinstance(val, numbers.Number) or isinstance(val, str):\n            return val\n        elif isinstance(val, list) or isinstance(val, tuple):\n            return tuple((cls.to_immutable_id(item) for item in val))\n        elif isinstance(val, dict):\n            return tuple(((k, cls.to_immutable_id(val[k])) for k in sorted(val.keys())))\n        elif isinstance(val, set):\n            return tuple((k for k in sorted(val)))\n        elif hasattr(val, 'get_immutable_key') and callable(val.get_immutable_key):\n            return val.get_immutable_key()\n        else:\n            raise Exception('Unrecognized value %s with type %s' % (str(val), type(val)))\n\n    @classmethod\n    @abc.abstractmethod\n    def get_params_info(cls):\n        # type: () -> Optional[Dict[str, str]]\n        \"\"\"Returns a dictionary from parameter names to descriptions.\n\n        Returns\n        -------\n        param_info : Optional[Dict[str, str]]\n            dictionary from parameter names to descriptions.\n        \"\"\"\n        return None\n\n    @classmethod\n    def get_default_param_values(cls):\n        # type: () -> Dict[str, Any]\n        \"\"\"Returns a dictionary containing default parameter values.\n\n        Override this method to define default parameter values.  As good practice,\n        you should avoid defining default values for technology-dependent parameters\n        (such as channel length, transistor width, etc.), but only define default\n        values for technology-independent parameters (such as number of tracks).\n\n        Returns\n        -------\n        default_params : Dict[str, Any]\n            dictionary of default parameter values.\n        \"\"\"\n        return {}\n\n    @abc.abstractmethod\n    def get_master_basename(self):\n        # type: () -> str\n        \"\"\"Returns the base name to use for this instance.\n\n        Returns\n        -------\n        basename : str\n            the base name for this instance.\n        \"\"\"\n        return ''\n\n    @abc.abstractmethod\n    def get_content(self, lib_name, rename_fun):\n        # type: (str, Callable[[str], str]) -> Any\n        \"\"\"Returns the content of this master instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library to create the design masters in.\n        rename_fun : Callable[[str], str]\n            a function that renames design masters.\n\n        Returns\n        -------\n        content : Any\n            the master content data structure.\n        \"\"\"\n        return None\n\n    @property\n    def master_db(self):\n        # type: () -> MasterDB\n        \"\"\"Returns the database used to create design masters.\"\"\"\n        return self._master_db\n\n    @property\n    def lib_name(self):\n        # type: () -> str\n        \"\"\"The master library name\"\"\"\n        return self._lib_name\n\n    @property\n    def cell_name(self):\n        # type: () -> str\n        \"\"\"The master cell name\"\"\"\n        return self._cell_name\n\n    @property\n    def key(self):\n        # type: () -> Optional[Any]\n        \"\"\"A unique key representing this master.\"\"\"\n        return self._key\n\n    @property\n    def finalized(self):\n        # type: () -> bool\n        \"\"\"Returns True if this DesignMaster is finalized.\"\"\"\n        return self._finalized\n\n    @property\n    def prelim_key(self):\n        # type: () -> Any\n        \"\"\"Returns a preliminary unique key.  For compatibility with old schematic generators.\"\"\"\n        return self._prelim_key\n\n    def _get_qualified_name(self):\n        # type: () -> str\n        \"\"\"Returns the qualified name of this class.\"\"\"\n        my_module = self.__class__.__module__\n        if my_module is None or my_module == str.__class__.__module__:\n            return self.__class__.__name__\n        else:\n            return my_module + '.' + self.__class__.__name__\n\n    def finalize(self):\n        # type: () -> None\n        \"\"\"Finalize this master instance.\n        \"\"\"\n        self._finalized = True\n\n    def compute_unique_key(self):\n        # type: () -> Any\n        \"\"\"Returns a unique hashable object (usually tuple or string) that represents this instance.\n\n        Returns\n        -------\n        unique_id : Any\n            a hashable unique ID representing the given parameters.\n        \"\"\"\n        return self.to_immutable_id((self._get_qualified_name(), self.params))\n\n\nMasterType = TypeVar('MasterType', bound=DesignMaster)\n\n\nclass MasterDB(abc.ABC):\n    \"\"\"A database of existing design masters.\n\n    This class keeps track of existing design masters and maintain design dependency hierarchy.\n\n    Parameters\n    ----------\n    lib_name : str\n        the cadence library to put all generated templates in.\n    lib_defs : str\n        generator library definition file path.  If empty, then assume user supplies\n        Python class directly.\n    name_prefix : str\n        generated master name prefix.\n    name_suffix : str\n        generated master name suffix.\n    \"\"\"\n\n    def __init__(self, lib_name, lib_defs='', name_prefix='', name_suffix=''):\n        # type: (str, str, str, str) -> None\n\n        self._lib_name = lib_name\n        self._name_prefix = name_prefix\n        self._name_suffix = name_suffix\n\n        self._used_cell_names = set()  # type: Set[str]\n        self._importer = ClassImporter(lib_defs) if os.path.isfile(lib_defs) else None\n        self._key_lookup = {}  # type: Dict[Any, Any]\n        self._master_lookup = {}  # type: Dict[Any, DesignMaster]\n        self._rename_dict = {}  # type: Dict[str, str]\n\n    def clear(self):\n        \"\"\"Clear all existing schematic masters.\"\"\"\n        self._key_lookup.clear()\n        self._master_lookup.clear()\n        self._rename_dict.clear()\n\n    @abc.abstractmethod\n    def create_master_instance(self, gen_cls, lib_name, params, used_cell_names, **kwargs):\n        # type: (Type[MasterType], str, Dict[str, Any], Set[str], **Any) -> MasterType\n        \"\"\"Create a new non-finalized master instance.\n\n        This instance is used to determine if we created this instance before.\n\n        Parameters\n        ----------\n        gen_cls : Type[MasterType]\n            the generator Python class.\n        lib_name : str\n            generated instance library name.\n        params : Dict[str, Any]\n            instance parameters dictionary.\n        used_cell_names : Set[str]\n            a set of all used cell names.\n        **kwargs : Any\n            optional arguments for the generator.\n\n        Returns\n        -------\n        master : MasterType\n            the non-finalized generated instance.\n        \"\"\"\n        raise NotImplementedError('not implemented')\n\n    @abc.abstractmethod\n    def create_masters_in_db(self, lib_name, content_list, debug=False):\n        # type: (str, Sequence[Any], bool) -> None\n        \"\"\"Create the masters in the design database.\n\n        Parameters\n        ----------\n        lib_name : str\n            library to create the designs in.\n        content_list : Sequence[Any]\n            a list of the master contents.  Must be created in this order.\n        debug : bool\n            True to print debug messages\n        \"\"\"\n        pass\n\n    @property\n    def lib_name(self):\n        # type: () -> str\n        \"\"\"Returns the master library name.\"\"\"\n        return self._lib_name\n\n    @property\n    def cell_prefix(self):\n        # type: () -> str\n        \"\"\"Returns the cell name prefix.\"\"\"\n        return self._name_prefix\n\n    @cell_prefix.setter\n    def cell_prefix(self, new_val):\n        # type: (str) -> None\n        \"\"\"Change the cell name prefix.\"\"\"\n        self._name_prefix = new_val\n\n    @property\n    def cell_suffix(self):\n        # type: () -> str\n        \"\"\"Returns the cell name suffix.\"\"\"\n        return self._name_suffix\n\n    @cell_suffix.setter\n    def cell_suffix(self, new_val):\n        # type: (str) -> None\n        \"\"\"Change the cell name suffix.\"\"\"\n        self._name_suffix = new_val\n\n    @property\n    def used_cell_names(self):\n        # type: () -> Set[str]\n        return self._used_cell_names\n\n    def format_cell_name(self, cell_name):\n        # type: (str) -> str\n        \"\"\"Returns the formatted cell name.\n\n        Parameters\n        ----------\n        cell_name : str\n            the original cell name.\n\n        Returns\n        -------\n        final_name : str\n            the new cell name.\n        \"\"\"\n        cell_name = self._rename_dict.get(cell_name, cell_name)\n        return '%s%s%s' % (self._name_prefix, cell_name, self._name_suffix)\n\n    def append_library(self, lib_name, lib_path):\n        # type: (str, str) -> None\n        \"\"\"Adds a new library to the library definition file.\n\n        Parameters\n        ----------\n        lib_name : str\n            name of the library.\n        lib_path : str\n            path to this library.\n        \"\"\"\n        if self._importer is None:\n            raise ValueError('Cannot add generator library; library definition file not specified.')\n\n        self._importer.append_library(lib_name, lib_path)\n\n    def get_library_path(self, lib_name):\n        # type: (str) -> Optional[str]\n        \"\"\"Returns the location of the given library.\n\n        Parameters\n        ----------\n        lib_name : str\n            the library name.\n\n        Returns\n        -------\n        lib_path : Optional[str]\n            the location of the library, or None if library not defined.\n        \"\"\"\n        if self._importer is None:\n            raise ValueError('Cannot get generator library path; '\n                             'library definition file not specified.')\n\n        return self._importer.get_library_path(lib_name)\n\n    def get_generator_class(self, lib_name, cell_name):\n        # type: (str, str) -> Any\n        \"\"\"Returns the corresponding generator Python class.\n\n        Parameters\n        ----------\n        lib_name : str\n            template library name.\n        cell_name : str\n            generator cell name\n\n        Returns\n        -------\n        temp_cls : Any\n            the corresponding Python class.\n        \"\"\"\n        if self._importer is None:\n            raise ValueError('Cannot get generator class; library definition file not specified.')\n\n        return self._importer.get_class(lib_name, cell_name)\n\n    def new_master(self,  # type: MasterDB\n                   lib_name='',  # type: str\n                   cell_name='',  # type: str\n                   params=None,  # type: Optional[Dict[str, Any]]\n                   gen_cls=None,  # type: Optional[Type[MasterType]]\n                   debug=False,  # type: bool\n                   **kwargs):\n        # type: (...) -> MasterType\n        \"\"\"Create a generator instance.\n\n        Parameters\n        ----------\n        lib_name : str\n            generator library name.\n        cell_name : str\n            generator name\n        params : Optional[Dict[str, Any]]\n            the parameter dictionary.\n        gen_cls : Optional[Type[MasterType]]\n            the generator class to instantiate.  Overrides lib_name and cell_name.\n        debug : bool\n            True to print debug messages.\n        **kwargs :\n            optional arguments for generator.\n\n        Returns\n        -------\n        master : MasterType\n            the generator instance.\n        \"\"\"\n        if params is None:\n            params = {}\n\n        if gen_cls is None:\n            gen_cls = self.get_generator_class(lib_name, cell_name)\n\n        master = self.create_master_instance(gen_cls, self._lib_name, params,\n                                             self._used_cell_names, **kwargs)\n        key = master.key\n\n        if key is None:\n            prelim_key = master.prelim_key\n            if prelim_key in self._key_lookup:\n                key = self._key_lookup[prelim_key]\n                master = self._master_lookup[key]\n                if debug:\n                    print('master cached')\n            else:\n                if debug:\n                    print('finalizing master')\n                # In case master.finalize has a generates a child with the same cell name, add master name now\n                self._used_cell_names.add(master.cell_name)\n                start = time.time()\n                master.finalize()\n                end = time.time()\n\n                key = master.key\n                self._key_lookup[prelim_key] = key\n                if key in self._master_lookup:\n                    master = self._master_lookup[key]\n                    self._used_cell_names.add(master.cell_name)\n                else:\n                    self.register_master(key, master)\n\n                if debug:\n                    print('finalizing master took %.4g seconds' % (end - start))\n        else:\n            if key in self._master_lookup:\n                master = self._master_lookup[key]\n                if debug:\n                    print('master cached')\n            else:\n                if debug:\n                    print('finalizing master')\n                # In case master.finalize has a generates a child with the same cell name, add master name now\n                self._used_cell_names.add(master.cell_name)\n                start = time.time()\n                master.finalize()\n                end = time.time()\n                self.register_master(key, master)\n                if debug:\n                    print('finalizing master took %.4g seconds' % (end - start))\n\n        return master\n\n    def register_master(self, key, master):\n        self._master_lookup[key] = master\n        self._used_cell_names.add(master.cell_name)\n\n    def instantiate_masters(self,\n                            master_list,  # type: Sequence[DesignMaster]\n                            name_list=None,  # type: Optional[Sequence[Optional[str]]]\n                            lib_name='',  # type: str\n                            debug=False,  # type: bool\n                            rename_dict=None,  # type: Optional[Dict[str, str]]\n                            ):\n        # type: (...) -> None\n        \"\"\"create all given masters in the database.\n\n        Parameters\n        ----------\n        master_list : Sequence[DesignMaster]\n            list of masters to instantiate.\n        name_list : Optional[Sequence[Optional[str]]]\n            list of master cell names.  If not given, default names will be used.\n        lib_name : str\n            Library to create the masters in.  If empty or None, use default library.\n        debug : bool\n            True to print debugging messages\n        rename_dict : Optional[Dict[str, str]]\n            optional master cell renaming dictionary.\n        \"\"\"\n        if name_list is None:\n            name_list = [None] * len(master_list)  # type: Sequence[Optional[str]]\n        else:\n            if len(name_list) != len(master_list):\n                raise ValueError(\"Master list and name list length mismatch.\")\n\n        # configure renaming dictionary.  Verify that renaming dictionary is one-to-one.\n        rename = self._rename_dict\n        rename.clear()\n        reverse_rename = {}  # type: Dict[str, str]\n        if rename_dict:\n            for key, val in rename_dict.items():\n                if key != val:\n                    if val in reverse_rename:\n                        raise ValueError('Both %s and %s are renamed '\n                                         'to %s' % (key, reverse_rename[val], val))\n                    rename[key] = val\n                    reverse_rename[val] = key\n\n        for master, name in zip(master_list, name_list):\n            if name is not None and name != master.cell_name:\n                cur_name = master.cell_name\n                if name in reverse_rename:\n                    raise ValueError('Both %s and %s are renamed '\n                                     'to %s' % (cur_name, reverse_rename[name], name))\n                rename[cur_name] = name\n                reverse_rename[name] = cur_name\n\n                if name in self._used_cell_names:\n                    # name is an already used name, so we need to rename it to something else\n                    name2 = _get_unique_name(name, self._used_cell_names, reverse_rename)\n                    rename[name] = name2\n                    reverse_rename[name2] = name\n\n        if debug:\n            print('Retrieving master contents')\n\n        # use ordered dict so that children are created before parents.\n        info_dict = OrderedDict()  # type: Dict[str, DesignMaster]\n        start = time.time()\n        for master, top_name in zip(master_list, name_list):\n            self._instantiate_master_helper(info_dict, master)\n        end = time.time()\n\n        if not lib_name:\n            lib_name = self.lib_name\n        if not lib_name:\n            raise ValueError('master library name is not specified.')\n\n        content_list = [master.get_content(lib_name, self.format_cell_name)\n                        for master in info_dict.values()]\n\n        if debug:\n            print('master content retrieval took %.4g seconds' % (end - start))\n\n        self.create_masters_in_db(lib_name, content_list, debug=debug)\n\n    def _instantiate_master_helper(self, info_dict, master):\n        # type: (Dict[str, DesignMaster], DesignMaster) -> None\n        \"\"\"Helper method for batch_layout().\n\n        Parameters\n        ----------\n        info_dict : Dict[str, DesignMaster]\n            dictionary from existing master cell name to master objects.\n        master : DesignMaster\n            the master object to create.\n        \"\"\"\n        # get template master for all children\n        for master_key in master.children:\n            child_temp = self._master_lookup[master_key]\n            if child_temp.cell_name not in info_dict:\n                self._instantiate_master_helper(info_dict, child_temp)\n\n        # get template master for this cell.\n        info_dict[master.cell_name] = self._master_lookup[master.key]\n"
  },
  {
    "path": "bag/util/immutable.py",
    "content": "\"\"\"This module defines various immutable and hashable data types.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import TypeVar, Any, Generic, Dict, Iterable, Tuple, Union, Optional, overload\n\nimport sys\nimport bisect\nfrom collections import Hashable, Mapping, Sequence\n\nT = TypeVar('T')\nU = TypeVar('U')\nImmutableType = Union[None, Hashable, Tuple[Hashable, ...]]\n\n\ndef combine_hash(a: int, b: int) -> int:\n    \"\"\"Combine the two given hash values.\n\n    Parameter\n    ---------\n    a : int\n        the first hash value.\n    b : int\n        the second hash value.\n\n    Returns\n    -------\n    hash : int\n        the combined hash value.\n    \"\"\"\n    # algorithm taken from boost::hash_combine\n    return sys.maxsize & (a ^ (b + 0x9e3779b9 + (a << 6) + (a >> 2)))\n\n\nclass ImmutableList(Hashable, Sequence, Generic[T]):\n    \"\"\"An immutable homogeneous list.\"\"\"\n\n    def __init__(self, values: Optional[Sequence[T]] = None) -> None:\n        if values is None:\n            self._content = []\n            self._hash = 0\n        elif isinstance(values, ImmutableList):\n            self._content = values._content\n            self._hash = values._hash\n        else:\n            self._content = values\n            self._hash = 0\n            for v in values:\n                self._hash = combine_hash(self._hash, hash(v))\n\n    @classmethod\n    def sequence_equal(cls, a: Sequence[T], b: Sequence[T]) -> bool:\n        if len(a) != len(b):\n            return False\n        for av, bv in zip(a, b):\n            if av != bv:\n                return False\n        return True\n\n    def __repr__(self) -> str:\n        return repr(self._content)\n\n    def __eq__(self, other: Any) -> bool:\n        return (isinstance(other, ImmutableList) and self._hash == other._hash and\n                self.sequence_equal(self._content, other._content))\n\n    def __hash__(self) -> int:\n        return self._hash\n\n    def __bool__(self) -> bool:\n        return len(self) > 0\n\n    def __len__(self) -> int:\n        return len(self._content)\n\n    def __iter__(self) -> Iterable[T]:\n        return iter(self._content)\n\n    @overload\n    def __getitem__(self, idx: int) -> T: ...\n    @overload\n    def __getitem__(self, idx: slice) -> ImmutableList[T]: ...\n\n    def __getitem__(self, idx) -> T:\n        if isinstance(idx, int):\n            return self._content[idx]\n        return ImmutableList(self._content[idx])\n\n    def __contains__(self, val: Any) -> bool:\n        return val in self._content\n\n\nclass ImmutableSortedDict(Hashable, Mapping, Generic[T, U]):\n    \"\"\"An immutable dictionary with sorted keys.\"\"\"\n\n    def __init__(self,\n                 table: Optional[Mapping[T, Any]] = None) -> None:\n        if table is not None:\n            if isinstance(table, ImmutableSortedDict):\n                self._keys = table._keys\n                self._vals = table._vals\n                self._hash = table._hash\n            else:\n                self._keys = ImmutableList(sorted(table.keys()))\n                self._vals = ImmutableList([to_immutable(table[k]) for k in self._keys])\n                self._hash = combine_hash(hash(self._keys), hash(self._vals))\n        else:\n            self._keys = ImmutableList([])\n            self._vals = ImmutableList([])\n            self._hash = combine_hash(hash(self._keys), hash(self._vals))\n\n    def __repr__(self) -> str:\n        return repr(list(zip(self._keys, self._vals)))\n\n    def __eq__(self, other: Any) -> bool:\n        return (isinstance(other, ImmutableSortedDict) and\n                self._hash == other._hash and\n                self._keys == other._keys and\n                self._vals == other._vals)\n\n    def __hash__(self) -> int:\n        return self._hash\n\n    def __bool__(self) -> bool:\n        return len(self) > 0\n\n    def __len__(self) -> int:\n        return len(self._keys)\n\n    def __iter__(self) -> Iterable[T]:\n        return iter(self._keys)\n\n    def __contains__(self, item: Any) -> bool:\n        idx = bisect.bisect_left(self._keys, item)\n        return idx != len(self._keys) and self._keys[idx] == item\n\n    def __getitem__(self, item: T) -> U:\n        idx = bisect.bisect_left(self._keys, item)\n        if idx == len(self._keys) or self._keys[idx] != item:\n            raise KeyError('Key not found: {}'.format(item))\n        return self._vals[idx]\n\n    def get(self, item: T, default: Optional[U] = None) -> Optional[U]:\n        idx = bisect.bisect_left(self._keys, item)\n        if idx == len(self._keys) or self._keys[idx] != item:\n            return default\n        return self._vals[idx]\n\n    def keys(self) -> Iterable[T]:\n        return iter(self._keys)\n\n    def values(self) -> Iterable[U]:\n        return iter(self._vals)\n\n    def items(self) -> Iterable[Tuple[T, U]]:\n        return zip(self._keys, self._vals)\n\n    def copy(self, append: Optional[Dict[T, Any]] = None) -> ImmutableSortedDict[T, U]:\n        if append is None:\n            return self.__class__(self)\n        else:\n            tmp = self.to_dict()\n            tmp.update(append)\n            return self.__class__(tmp)\n\n    def to_dict(self) -> Dict[T, U]:\n        return dict(zip(self._keys, self._vals))\n\n\nParam = ImmutableSortedDict[str, Any]\n\n\ndef to_immutable(obj: Any) -> ImmutableType:\n    \"\"\"Convert the given Python object into an immutable type.\"\"\"\n    if obj is None:\n        return obj\n    if isinstance(obj, Hashable):\n        # gets around cases of tuple of un-hashable types.\n        try:\n            hash(obj)\n            return obj\n        except TypeError:\n            pass\n    if isinstance(obj, tuple):\n        return tuple((to_immutable(v) for v in obj))\n    if isinstance(obj, list):\n        return ImmutableList([to_immutable(v) for v in obj])\n    if isinstance(obj, set):\n        return ImmutableList([to_immutable(v) for v in sorted(obj)])\n    if isinstance(obj, dict):\n        return ImmutableSortedDict(obj)\n\n    raise ValueError('Cannot convert the following object to immutable type: {}'.format(obj))\n"
  },
  {
    "path": "bag/util/interval.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module provides data structure that keeps track of intervals.\n\"\"\"\n\nfrom typing import List, Optional, Tuple, Any, Iterable, Generator\n\nimport bisect\n\n\nclass IntervalSet(object):\n    \"\"\"A data structure that keeps track of disjoint 1D integer intervals.\n\n    Each interval has a value associated with it.  If not specified, the value defaults to None.\n\n    Parameters\n    ----------\n    intv_list : Optional[Iterable[Tuple[int, int]]]\n        the sorted initial interval list.\n    val_list : Optional[Iterable[Any]]\n        the initial values list.\n    \"\"\"\n\n    def __init__(self, intv_list=None, val_list=None):\n        # type: (Optional[Iterable[Tuple[int, int]]], Optional[Iterable[Any]]) -> None\n        self._start_list = []  # type: List[int]\n        self._end_list = []  # type: List[int]\n        if intv_list is None:\n            self._val_list = []  # type: List[Any]\n        else:\n            for v0, v1 in intv_list:\n                self._start_list.append(v0)\n                self._end_list.append(v1)\n            if val_list is None:\n                self._val_list = [None] * len(self._start_list)\n            else:\n                self._val_list = list(val_list)\n\n    def __contains__(self, key):\n        # type: (Tuple[int, int]) -> bool\n        \"\"\"Returns True if this IntervalSet contains the given interval.\n\n        Parameters\n        ----------\n        key : Tuple[int, int]\n            the interval to test.\n\n        Returns\n        -------\n        contains : bool\n            True if this IntervalSet contains the given interval.\n        \"\"\"\n        idx = self._get_first_overlap_idx(key)\n        return idx >= 0 and key[0] == self._start_list[idx] and key[1] == self._end_list[idx]\n\n    def __getitem__(self, intv):\n        # type: (Tuple[int, int]) -> Any\n        \"\"\"Returns the value associated with the given interval.\n\n        Raises KeyError if the given interval is not in this IntervalSet.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval to query.\n\n        Returns\n        -------\n        val : Any\n            the value associated with the given interval.\n        \"\"\"\n        idx = self._get_first_overlap_idx(intv)\n        if idx < 0 or intv[0] != self._start_list[idx] or intv[1] != self._end_list[idx]:\n            raise KeyError('Invalid interval: %s' % repr(intv))\n        return self._val_list[idx]\n\n    def __setitem__(self, intv, value):\n        # type: (Tuple[int, int], Any) -> None\n        \"\"\"Update the value associated with the given interval.\n\n        Raises KeyError if the given interval is not in this IntervalSet.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval to update.\n        value : Any\n            the new value.\n        \"\"\"\n        idx = self._get_first_overlap_idx(intv)\n        if idx < 0:\n            self.add(intv, value)\n        elif intv[0] != self._start_list[idx] or intv[1] != self._end_list[idx]:\n            raise KeyError('Invalid interval: %s' % repr(intv))\n        else:\n            self._val_list[idx] = value\n\n    def __iter__(self):\n        # type: () -> Iterable[Tuple[int, int]]\n        \"\"\"Iterates over intervals in this IntervalSet in increasing order.\n\n        Yields\n        ------\n        intv : Tuple[int, int]\n            the next interval.\n        \"\"\"\n        return zip(self._start_list, self._end_list)\n\n    def __len__(self):\n        # type: () -> int\n        \"\"\"Returns the number of intervals in this IntervalSet.\n\n        Returns\n        -------\n        length : int\n            number of intervals in this set.\n        \"\"\"\n        return len(self._start_list)\n\n    def get_start(self):\n        # type: () -> int\n        \"\"\"Returns the start of the first interval.\n\n        Returns\n        -------\n        start : int\n            the start of the first interval.\n        \"\"\"\n        return self._start_list[0]\n\n    def get_end(self):\n        # type: () -> int\n        \"\"\"Returns the end of the last interval.\n\n        Returns\n        -------\n        end : int\n            the end of the last interval.\n        \"\"\"\n        return self._end_list[-1]\n\n    def get_interval(self, idx):\n        # type: (int) -> Tuple[int, int]\n        if idx < 0:\n            idx += len(self._start_list)\n        if idx < 0:\n            raise IndexError('Invalid index: %d' % idx)\n        if idx >= len(self._start_list):\n            raise IndexError('Invalid index: %d' % idx)\n\n        return self._start_list[idx], self._end_list[idx]\n\n    def copy(self):\n        # type: () -> IntervalSet\n        \"\"\"Create a copy of this interval set.\n\n        Returns\n        -------\n        intv_set : IntervalSet\n            a copy of this IntervalSet.\n        \"\"\"\n        return IntervalSet(intv_list=list(zip(self._start_list, self._end_list)),\n                           val_list=self._val_list)\n\n    def _get_first_overlap_idx(self, intv, abut=False):\n        # type: (Tuple[int, int], bool) -> int\n        \"\"\"Returns the index of the first interval that overlaps with the given interval.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the given interval.\n        abut : bool\n            True to return abutted interval too.\n\n        Returns\n        -------\n        idx : int\n            the index of the overlapping interval.  If no overlapping intervals are\n            found, -(idx + 1) is returned, where idx is the index to insert the interval.\n        \"\"\"\n        start, end = intv\n        if not self._start_list:\n            return -1\n        # find the smallest start index greater than start\n        idx = bisect.bisect_right(self._start_list, start)\n        if idx == 0:\n            # all interval's starting point is greater than start\n            test = self._start_list[0]\n            return 0 if test < end or (abut and test == end) else -1\n\n        # interval where start index is less than or equal to start\n        test_idx = idx - 1\n        test = self._end_list[test_idx]\n        if start < test or (abut and start == test):\n            # start is covered by the interval; overlaps.\n            return test_idx\n        elif idx < len(self._start_list) and \\\n                (self._start_list[idx] < end or (abut and self._start_list[idx] == end)):\n            # _start_list[idx] covered by interval.\n            return idx\n        else:\n            # no overlap interval found\n            return -(idx + 1)\n\n    def _get_last_overlap_idx(self, intv, abut=False):\n        # type: (Tuple[int, int], bool) -> int\n        \"\"\"Returns the index of the last interval that overlaps with the given interval.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the given interval.\n        abut : bool\n            True to return abutted interval too.\n\n        Returns\n        -------\n        idx : int\n            the index of the overlapping interval.  If no overlapping intervals are\n            found, -(idx + 1) is returned, where idx is the index to insert the interval.\n        \"\"\"\n        start, end = intv\n        if not self._start_list:\n            return -1\n        # find the smallest start index greater than end\n        idx = bisect.bisect_right(self._start_list, end)\n        if idx == 0:\n            # all interval's starting point is greater than end\n            return -1\n\n        # interval where start index is less than or equal to end\n        test_idx = idx - 1\n        test = self._end_list[test_idx]\n        if test > start or (abut and test == start):\n            return test_idx\n        return -(idx + 1)\n\n    def has_overlap(self, intv):\n        # type: (Tuple[int, int]) -> bool\n        \"\"\"Returns True if the given interval overlaps at least one interval in this set.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the given interval.\n\n        Returns\n        -------\n        has_overlap : bool\n            True if there is at least one interval in this set that overlaps with the given one.\n        \"\"\"\n        return self._get_first_overlap_idx(intv) >= 0\n\n    def has_single_cover(self, intv):\n        # type: (Tuple[int, int]) -> bool\n        \"\"\"Returns True if the given interval is completed covered by a single interval.\"\"\"\n        idx = self._get_first_overlap_idx(intv)\n        if idx < 0:\n            return False\n        return self._start_list[idx] <= intv[0] and self._end_list[idx] >= intv[1]\n\n    def remove(self, intv):\n        # type: (Tuple[int, int]) -> bool\n        \"\"\"Removes the given interval from this IntervalSet.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval to remove.\n\n        Returns\n        -------\n        success : bool\n            True if the given interval is found and removed.  False otherwise.\n        \"\"\"\n        idx = self._get_first_overlap_idx(intv)\n        if idx < 0:\n            return False\n        if intv[0] == self._start_list[idx] and intv[1] == self._end_list[idx]:\n            del self._start_list[idx]\n            del self._end_list[idx]\n            del self._val_list[idx]\n            return True\n        return False\n\n    def get_intersection(self, other):\n        # type: (IntervalSet) -> IntervalSet\n        \"\"\"Returns the intersection of two IntervalSets.\n\n        the new IntervalSet will have all values set to None.\n\n        Parameters\n        ----------\n        other : IntervalSet\n            the other IntervalSet.\n\n        Returns\n        -------\n        intersection : IntervalSet\n            a new IntervalSet containing all intervals present in both sets.\n        \"\"\"\n        idx1 = idx2 = 0\n        len1 = len(self._start_list)\n        len2 = len(other._start_list)\n        intvs = []\n        while idx1 < len1 and idx2 < len2:\n            intv1 = self._start_list[idx1], self._end_list[idx1]\n            intv2 = other._start_list[idx2], other._end_list[idx2]\n            test = max(intv1[0], intv2[0]), min(intv1[1], intv2[1])\n            if test[1] > test[0]:\n                intvs.append(test)\n            if intv1[1] < intv2[1]:\n                idx1 += 1\n            elif intv2[1] < intv1[1]:\n                idx2 += 1\n            else:\n                idx1 += 1\n                idx2 += 1\n\n        return IntervalSet(intv_list=intvs)\n\n    def get_complement(self, total_intv):\n        # type: (Tuple[int, int]) -> IntervalSet\n        \"\"\"Returns a new IntervalSet that's the complement of this one.\n\n        The new IntervalSet will have all values set to None.\n\n        Parameters\n        ----------\n        total_intv : Tuple[int, int]\n            the universal interval.  All intervals in this IntervalSet must be as subinterval\n            of the universal interval.\n\n        Returns\n        -------\n        complement : IntervalSet\n            the complement of this IntervalSet.\n        \"\"\"\n        return IntervalSet(intv_list=self.complement_iter(total_intv))\n\n    def complement_iter(self, total_intv):\n        # type: (Tuple[int, int]) -> Generator[Tuple[int, int], None, None]\n        \"\"\"Iterate over all intervals that;s the complement of this one.\"\"\"\n        if not self._start_list:\n            yield total_intv\n        elif self._start_list[0] < total_intv[0] or total_intv[1] < self._end_list[-1]:\n            raise ValueError('The given interval [{0}, {1}) is '\n                             'not a valid universal interval'.format(*total_intv))\n        else:\n            marker = total_intv[0]\n            for start, end in zip(self._start_list, self._end_list):\n                if marker < start:\n                    yield marker, start\n                marker = end\n\n            if marker < total_intv[1]:\n                yield marker, total_intv[1]\n\n    def remove_all_overlaps(self, intv):\n        # type: (Tuple[int, int]) -> None\n        \"\"\"Remove all intervals in this set that overlaps with the given interval.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the given interval\n        \"\"\"\n        sidx = self._get_first_overlap_idx(intv)\n        if sidx >= 0:\n            eidx = self._get_last_overlap_idx(intv) + 1\n            del self._start_list[sidx:eidx]\n            del self._end_list[sidx:eidx]\n            del self._val_list[sidx:eidx]\n\n    def add(self, intv, val=None, merge=False, abut=False):\n        # type: (Tuple[int, int], Any, bool, bool) -> bool\n        \"\"\"Adds the given interval to this IntervalSet.\n\n        Can only add interval that does not overlap with any existing ones, unless merge is True.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval to add.\n        val : Any\n            the value associated with the given interval.\n        merge : bool\n            If true, the given interval will be merged with any existing intervals\n            that overlaps with it.  The merged interval will have the given value.\n        abut : bool\n            True to count merge abutting intervals.\n\n        Returns\n        -------\n        success : bool\n            True if the given interval is added.\n        \"\"\"\n        abut = abut and merge\n        bidx = self._get_first_overlap_idx(intv, abut=abut)\n        if bidx >= 0:\n            if not merge:\n                return False\n            eidx = self._get_last_overlap_idx(intv, abut=abut)\n            new_start = min(self._start_list[bidx], intv[0])\n            new_end = max(self._end_list[eidx], intv[1])\n            del self._start_list[bidx:eidx + 1]\n            del self._end_list[bidx:eidx + 1]\n            del self._val_list[bidx:eidx + 1]\n            self._start_list.insert(bidx, new_start)\n            self._end_list.insert(bidx, new_end)\n            self._val_list.insert(bidx, val)\n            return True\n        else:\n            # insert interval\n            idx = -bidx - 1\n            self._start_list.insert(idx, intv[0])\n            self._end_list.insert(idx, intv[1])\n            self._val_list.insert(idx, val)\n            return True\n\n    def subtract(self, intv):\n        # type: (Tuple[int, int]) -> List[Tuple[int, int]]\n        \"\"\"Subtract the given interval from this IntervalSet.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval to subtract.\n\n        Returns\n        -------\n        remaining_intvs : List[Tuple[int, int]]\n            intervals created from subtraction.\n        \"\"\"\n        bidx = self._get_first_overlap_idx(intv)\n        insert_intv = []\n        if bidx >= 0:\n            eidx = self._get_last_overlap_idx(intv)\n            insert_val = []\n            if self._start_list[bidx] < intv[0]:\n                insert_intv.append((self._start_list[bidx], intv[0]))\n                insert_val.append(self._val_list[bidx])\n            if intv[1] < self._end_list[eidx]:\n                insert_intv.append((intv[1], self._end_list[eidx]))\n                insert_val.append(self._val_list[eidx])\n            del self._start_list[bidx:eidx + 1]\n            del self._end_list[bidx:eidx + 1]\n            del self._val_list[bidx:eidx + 1]\n            insert_idx = bidx\n            for (new_start, new_end), val in zip(insert_intv, insert_val):\n                self._start_list.insert(insert_idx, new_start)\n                self._end_list.insert(insert_idx, new_end)\n                self._val_list.insert(insert_idx, val)\n                insert_idx += 1\n\n        return insert_intv\n\n    def items(self):\n        # type: () -> Iterable[Tuple[Tuple[int, int], Any]]\n        \"\"\"Iterates over intervals and values in this IntervalSet\n\n        The intervals are returned in increasing order.\n\n        Yields\n        ------\n        intv : Tuple[Tuple[int, int]\n            the interval.\n        val : Any\n            the value associated with the interval.\n        \"\"\"\n        return zip(self.__iter__(), self._val_list)\n\n    def intervals(self):\n        # type: () -> Iterable[Tuple[int, int]]\n        \"\"\"Iterates over intervals in this IntervalSet\n\n        The intervals are returned in increasing order.\n\n        Yields\n        ------\n        intv : Tuple[int, int]\n            the interval.\n        \"\"\"\n        return self.__iter__()\n\n    def values(self):\n        # type: () -> Iterable[Any]\n        \"\"\"Iterates over values in this IntervalSet\n\n        The values correspond to intervals in increasing order.\n\n        Yields\n        ------\n        val : Any\n            the value.\n        \"\"\"\n        return self._val_list.__iter__()\n\n    def overlap_items(self, intv):\n        # type: (Tuple[int, int]) -> Generator[Tuple[Tuple[int, int], Any], None, None]\n        \"\"\"Iterates over intervals and values overlapping the given interval.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval.\n\n        Yields\n        -------\n        ovl_intv : Tuple[int, int]\n            the overlapping interval.\n        val : Any\n            value associated with ovl_intv.\n        \"\"\"\n        sidx = self._get_first_overlap_idx(intv)\n        if sidx >= 0:\n            eidx = self._get_last_overlap_idx(intv) + 1\n            for idx in range(sidx, eidx):\n                yield (self._start_list[idx], self._end_list[idx]), self._val_list[idx]\n\n    def overlap_intervals(self, intv):\n        # type: (Tuple[int, int]) -> Generator[Tuple[int, int], None, None]\n        \"\"\"Iterates over intervals overlapping the given interval.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval.\n\n        Yields\n        -------\n        ovl_intv : Tuple[int, int]\n            the overlapping interval.\n        \"\"\"\n        sidx = self._get_first_overlap_idx(intv)\n        if sidx >= 0:\n            eidx = self._get_last_overlap_idx(intv) + 1\n            for idx in range(sidx, eidx):\n                yield self._start_list[idx], self._end_list[idx]\n\n    def overlap_values(self, intv):\n        # type: (Tuple[int, int]) -> Generator[Any, None, None]\n        \"\"\"Iterates over values of intervals overlapping the given interval.\n\n        Parameters\n        ----------\n        intv : Tuple[int, int]\n            the interval.\n\n        Yields\n        -------\n        ovl_intv : Tuple[int, int]\n            the overlapping interval.\n        \"\"\"\n        sidx = self._get_first_overlap_idx(intv)\n        if sidx >= 0:\n            eidx = self._get_last_overlap_idx(intv) + 1\n            for idx in range(sidx, eidx):\n                yield self._val_list[idx]\n\n    def get_first_overlap_item(self, intv):\n        # type: (Tuple[int, int]) -> Optional[Tuple[Tuple[int, int], Any]]\n        \"\"\"Returns the first item with interval that overlaps the given one.\"\"\"\n        idx = self._get_first_overlap_idx(intv)\n        if idx < 0:\n            return None\n        return (self._start_list[idx], self._end_list[idx]), self._val_list[idx]\n\n    def transform(self, scale=1, shift=0):\n        # type: (int, int) -> IntervalSet\n        \"\"\"Return a new IntervalSet under the given transformation.\n\n        Parameters\n        ----------\n        scale : int\n            multiple all interval values by this scale.  Either 1 or -1.\n        shift : int\n            add this amount to all intervals.\n\n        Returns\n        -------\n        intv_set : IntervalSet\n            the transformed IntervalSet.\n        \"\"\"\n        if scale < 0:\n            new_start = [-v + shift for v in reversed(self._end_list)]\n            new_end = [-v + shift for v in reversed(self._start_list)]\n            new_val = list(reversed(self._val_list))\n        else:\n            new_start = [v + shift for v in self._start_list]\n            new_end = [v + shift for v in self._end_list]\n            new_val = list(self._val_list)\n\n        result = self.__class__.__new__(self.__class__)\n        result._start_list = new_start\n        result._end_list = new_end\n        result._val_list = new_val\n\n        return result\n"
  },
  {
    "path": "bag/util/parse.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines parsing utility methods.\n\"\"\"\n\nimport ast\n\n\nclass ExprVarScanner(ast.NodeVisitor):\n    \"\"\"\n    This node visitor collects all variable names found in the\n    AST, and excludes names of functions.  Variables having\n    dotted names are not supported.\n    \"\"\"\n    def __init__(self):\n        self.varnames = set()\n\n    # noinspection PyPep8Naming\n    def visit_Name(self, node):\n        self.varnames.add(node.id)\n\n    # noinspection PyPep8Naming\n    def visit_Call(self, node):\n        if not isinstance(node.func, ast.Name):\n            self.visit(node.func)\n        for arg in node.args:\n            self.visit(arg)\n\n    # noinspection PyPep8Naming\n    def visit_Attribute(self, node):\n        # ignore attributes\n        pass\n\n\ndef get_variables(expr):\n    \"\"\"Parses the given Python expression and return a list of all variables.\n\n    Parameters\n    ----------\n    expr : str\n        An expression string that we want to parse for variable names.\n\n    Returns\n    -------\n    var_list : list[str]\n        Names of variables from the given expression.\n    \"\"\"\n    root = ast.parse(expr, mode='exec')\n    scanner = ExprVarScanner()\n    scanner.visit(root)\n    return list(scanner.varnames)\n"
  },
  {
    "path": "bag/util/search.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module provides search related utilities.\n\"\"\"\n\nfrom typing import Optional, Callable, Any\n\nfrom collections import namedtuple\n\nMinCostResult = namedtuple('MinCostResult', ['x', 'xmax', 'vmax', 'nfev'])\n\n\nclass BinaryIterator(object):\n    \"\"\"A class that performs binary search over integers.\n\n    This class supports both bounded or unbounded binary search, and\n    you can also specify a step size.\n\n    Parameters\n    ----------\n    low : int\n        the lower bound (inclusive).\n    high : Optional[int]\n        the upper bound (exclusive).  None for unbounded binary search.\n    step : int\n        the step size.  All return values will be low + N * step\n    \"\"\"\n\n    def __init__(self, low, high, step=1):\n        # type: (int, Optional[int], int) -> None\n\n        if not isinstance(low, int) or not isinstance(step, int):\n            raise ValueError('low and step must be integers.')\n\n        self._offset = low\n        self._step = step\n        self._high = None  # type: Optional[int]\n        self._low = 0  # type: int\n        self._current = 0  # type: int\n        self._save_marker = None  # type: Optional[int]\n\n        if high is not None:\n            if not isinstance(high, int):\n                raise ValueError('high must be None or integer.')\n\n            nmax = (high - low) // step\n            if low + step * nmax < high:\n                nmax += 1\n            self._high = nmax\n            self._current = (self._low + self._high) // 2\n        else:\n            self._high = None\n            self._current = self._low\n\n        self._save_marker = None\n        self._save_info = None\n\n    def set_current(self, val):\n        # type: (int) -> None\n        \"\"\"Set the value of the current marker.\"\"\"\n        if (val - self._offset) % self._step != 0:\n            raise ValueError('value %d is not multiple of step size.' % val)\n        self._current = (val - self._offset) // self._step\n\n    def has_next(self):\n        # type: () -> bool\n        \"\"\"returns True if this iterator is not finished yet.\"\"\"\n        return self._high is None or self._low < self._high\n\n    def get_next(self):\n        # type: () -> int\n        \"\"\"Returns the next value to look at.\"\"\"\n        return self._current * self._step + self._offset\n\n    def up(self):\n        # type: () -> None\n        \"\"\"Increment this iterator.\"\"\"\n        self._low = self._current + 1\n\n        if self._high is not None:\n            self._current = (self._low + self._high) // 2\n        else:\n            if self._current > 0:\n                self._current *= 2\n            else:\n                self._current = 1\n\n    def down(self):\n        # type: () -> None\n        \"\"\"Decrement this iterator.\"\"\"\n        self._high = self._current\n        self._current = (self._low + self._high) // 2\n\n    def save(self):\n        # type: () -> None\n        \"\"\"Save the current index.\"\"\"\n        self._save_marker = self._current\n\n    def save_info(self, info):\n        # type: (Any) -> None\n        \"\"\"Save current information.\"\"\"\n        self.save()\n        self._save_info = info\n\n    def get_last_save(self):\n        # type: () -> Optional[int]\n        \"\"\"Returns the last saved index.\"\"\"\n        if self._save_marker is None:\n            return None\n        return self._save_marker * self._step + self._offset\n\n    def get_last_save_info(self):\n        # type: () -> Any\n        \"\"\"Return last save information.\"\"\"\n        return self._save_info\n\n\nclass FloatBinaryIterator(object):\n    \"\"\"A class that performs binary search over floating point numbers.\n\n    This class supports both bounded or unbounded binary search, and terminates\n    when we can guarantee the given error tolerance.\n\n    Parameters\n    ----------\n    low : float\n        the lower bound.\n    high : Optional[float]\n        the upper bound.  None for unbounded binary search.\n    tol : float\n        we will guarantee that the final solution will be within this\n        tolerance.\n    search_step : float\n        for unbounded binary search, this is the initial step size when\n        searching for upper bound.\n    \"\"\"\n\n    def __init__(self, low, high, tol=1.0, search_step=1.0):\n        # type: (float, Optional[float], float, float) -> None\n        self._offset = low\n        self._tol = tol\n        self._high = None  # type: Optional[float]\n        self._low = 0.0  # type: float\n        self._search_step = search_step\n        self._save_marker = None  # type: Optional[float]\n\n        if high is not None:\n            self._high = high - low\n            self._current = self._high / 2\n        else:\n            self._high = None\n            self._current = 0\n\n        self._save_marker = None\n        self._save_info = None\n\n    def has_next(self):\n        # type: () -> bool\n        \"\"\"returns True if this iterator is not finished yet.\"\"\"\n        return self._high is None or self._low + 2 * self._tol < self._high\n\n    def get_next(self):\n        # type: () -> float\n        \"\"\"Returns the next value to look at.\"\"\"\n        return self._current + self._offset\n\n    def up(self):\n        # type: () -> None\n        \"\"\"Increment this iterator.\"\"\"\n        self._low = self._current\n\n        if self._high is not None:\n            self._current = (self._low + self._high) / 2\n        else:\n            if self._current != 0:\n                self._current *= 2\n            else:\n                self._current = self._search_step\n\n    def down(self):\n        # type: () -> None\n        \"\"\"Decrement this iterator.\"\"\"\n        self._high = self._current\n        self._current = (self._low + self._high) / 2\n\n    def save(self):\n        # type: () -> None\n        \"\"\"Save the current index\"\"\"\n        self._save_marker = self._current\n\n    def save_info(self, info):\n        # type: (Any) -> None\n        \"\"\"Save current information.\"\"\"\n        self.save()\n        self._save_info = info\n\n    def get_last_save(self):\n        # type: () -> Optional[float]\n        \"\"\"Returns the last saved index.\"\"\"\n        if self._save_marker is None:\n            return None\n        return self._save_marker + self._offset\n\n    def get_last_save_info(self):\n        # type: () -> Any\n        \"\"\"Return last save information.\"\"\"\n        return self._save_info\n\n\ndef minimize_cost_binary(f, vmin, start=0, stop=None, step=1, save=None, nfev=0):\n    # type: (Callable[[int], float], float, int, Optional[int], int, Optional[int], int) -> MinCostResult\n    \"\"\"Minimize cost given minimum output constraint using binary search.\n\n    Given discrete function f, find the minimum integer x such that f(x) >= vmin using binary search.\n\n    This algorithm only works if f is monotonically increasing, or if f monontonically increases\n    then monontonically decreases, but stop is given and f(stop) >= vmin.\n\n    Parameters\n    ----------\n    f : Callable[[int], float]\n        a function that takes a single integer and output a scalar value.  Must monotonically\n        increase then monotonically decrease.\n    vmin : float\n        the minimum output value.\n    start : int\n        the input lower bound.\n    stop : Optional[int]\n        the input upper bound.  Use None for unbounded binary search.\n    step : int\n        the input step.  function will only be evaulated at the points start + step * N\n    save : Optional[int]\n        If not none, this value will be returned if no solution is found.\n    nfev : int\n        number of function calls already made.\n\n    Returns\n    -------\n    result : MinCostResult\n        the MinCostResult named tuple, with attributes:\n\n        x : Optional[int]\n            the minimum integer such that f(x) >= vmin.  If no such x exists, this will be None.\n        nfev : int\n            total number of function calls made.\n\n    \"\"\"\n    bin_iter = BinaryIterator(start, stop, step=step)\n    while bin_iter.has_next():\n        x_cur = bin_iter.get_next()\n        v_cur = f(x_cur)\n        nfev += 1\n\n        if v_cur >= vmin:\n            save = x_cur\n            bin_iter.down()\n        else:\n            bin_iter.up()\n    return MinCostResult(x=save, xmax=None, vmax=None, nfev=nfev)\n\n\ndef minimize_cost_golden(f, vmin, offset=0, step=1, maxiter=1000):\n    # type: (Callable[[int], float], float, int, int, Optional[int]) -> MinCostResult\n    \"\"\"Minimize cost given minimum output constraint using golden section/binary search.\n\n    Given discrete function f that monotonically increases then monotonically decreases,\n    find the minimum integer x such that f(x) >= vmin.\n\n    This method uses Fibonacci search to find the upper bound of x.  If the upper bound\n    is found, a binary search is performed in the interval to find the solution.  If\n    vmin is close to the maximum of f, a golden section search is performed to attempt\n    to find x.\n\n    Parameters\n    ----------\n    f : Callable[[int], float]\n        a function that takes a single integer and output a scalar value.  Must monotonically\n        increase then monotonically decrease.\n    vmin : float\n        the minimum output value.\n    offset : int\n        the input lower bound.  We will for x in the range [offset, infinity).\n    step : int\n        the input step.  function will only be evaulated at the points offset + step * N\n    maxiter : Optional[int]\n        maximum number of iterations to perform.  If None, will run indefinitely.\n\n    Returns\n    -------\n    result : MinCostResult\n        the MinCostResult named tuple, with attributes:\n\n        x : Optional[int]\n            the minimum integer such that f(x) >= vmin.  If no such x exists, this will be None.\n        xmax : Optional[int]\n            the value at which f achieves its maximum.  This is set only if x is None\n        vmax : Optional[float]\n            the maximum value of f.  This is set only if x is None.\n        nfev : int\n            total number of function calls made.\n    \"\"\"\n    fib2 = fib1 = fib0 = 0\n    cur_idx = 0\n    nfev = 0\n    xmax = vmax = v_prev = None\n    while maxiter is None or nfev < maxiter:\n        v_cur = f(step * fib0 + offset)\n        nfev += 1\n\n        if v_cur >= vmin:\n            # found upper bound, use binary search to find answer\n            stop = step * fib0 + offset\n            return minimize_cost_binary(f, vmin, start=step * (fib1 + 1) + offset,\n                                        stop=stop, save=stop, step=step, nfev=nfev)\n        else:\n            if vmax is not None and v_cur <= vmax:\n                if cur_idx <= 3:\n                    # special case: 0 <= xmax < 3, and we already checked all possibilities, so\n                    # we know vmax < vmin.  There is no solution and just return.\n                    return MinCostResult(x=None, xmax=step * xmax + offset, vmax=vmax, nfev=nfev)\n                else:\n                    # we found the bracket that encloses maximum, perform golden section search\n                    a, x, b = fib2, fib1, fib0\n                    fx = v_prev\n                    while x > a + 1 or b > x + 1:\n                        u = a + b - x\n                        fu = f(step * u + offset)\n                        nfev += 1\n\n                        if fu >= fx:\n                            if u > x:\n                                a, x = x, u\n                                fx = fu\n                            else:\n                                x, b = u, x\n                                fx = fu\n\n                            if fx >= vmin:\n                                # found upper bound, use binary search to find answer\n                                stop = step * x + offset\n                                return minimize_cost_binary(f, vmin, start=step * (a + 1) + offset,\n                                                            stop=stop, save=stop, step=step, nfev=nfev)\n                        else:\n                            if u > x:\n                                b = u\n                            else:\n                                a = u\n\n                    # golden section search terminated, we found the maximum and it is less than vmin\n                    return MinCostResult(x=None, xmax=step * x + offset, vmax=fx, nfev=nfev)\n            else:\n                # still not close to maximum, continue searching\n                vmax = v_prev = v_cur\n                xmax = fib0\n                cur_idx += 1\n                if cur_idx <= 3:\n                    fib2, fib1, fib0 = fib1, fib0, cur_idx\n                else:\n                    fib2, fib1, fib0 = fib1, fib0, fib1 + fib0\n\n    raise ValueError('Maximum number of iteration achieved')\n\n\ndef minimize_cost_binary_float(f, vmin, start, stop, tol=1e-8, save=None, nfev=0):\n    # type: (Callable[[float], float], float, float, float, float, float, int) -> MinCostResult\n    \"\"\"Minimize cost given minimum output constraint using binary search.\n\n    Given discrete function f and an interval, find minimum input x such that f(x) >= vmin using binary search.\n\n    This algorithm only works if f is monotonically increasing, or if f monontonically increases\n    then monontonically decreases, and f(stop) >= vmin.\n\n    Parameters\n    ----------\n    f : Callable[[int], float]\n        a function that takes a single integer and output a scalar value.  Must monotonically\n        increase then monotonically decrease.\n    vmin : float\n        the minimum output value.\n    start : float\n        the input lower bound.\n    stop : float\n        the input upper bound.\n    tol : float\n        output tolerance.\n    save : Optional[float]\n        If not none, this value will be returned if no solution is found.\n    nfev : int\n        number of function calls already made.\n\n    Returns\n    -------\n    result : MinCostResult\n        the MinCostResult named tuple, with attributes:\n\n        x : Optional[float]\n            the minimum x such that f(x) >= vmin.  If no such x exists, this will be None.\n        nfev : int\n            total number of function calls made.\n\n    \"\"\"\n    bin_iter = FloatBinaryIterator(start, stop, tol=tol)\n    while bin_iter.has_next():\n        x_cur = bin_iter.get_next()\n        v_cur = f(x_cur)\n        nfev += 1\n\n        if v_cur >= vmin:\n            save = x_cur\n            bin_iter.down()\n        else:\n            bin_iter.up()\n    return MinCostResult(x=save, xmax=None, vmax=None, nfev=nfev)\n\n\ndef minimize_cost_golden_float(f, vmin, start, stop, tol=1e-8, maxiter=1000):\n    # type: (Callable[[float], float], float, float, float, float, int) -> MinCostResult\n    \"\"\"Minimize cost given minimum output constraint using golden section/binary search.\n\n    Given discrete function f that monotonically increases then monotonically decreases,\n    find the minimum integer x such that f(x) >= vmin.\n\n    This method uses Fibonacci search to find the upper bound of x.  If the upper bound\n    is found, a binary search is performed in the interval to find the solution.  If\n    vmin is close to the maximum of f, a golden section search is performed to attempt\n    to find x.\n\n    Parameters\n    ----------\n    f : Callable[[int], float]\n        a function that takes a single integer and output a scalar value.  Must monotonically\n        increase then monotonically decrease.\n    vmin : float\n        the minimum output value.\n    start : float\n        the input lower bound.\n    stop : float\n        the input upper bound.\n    tol : float\n        the solution tolerance.\n    maxiter : int\n        maximum number of iterations to perform.\n\n    Returns\n    -------\n    result : MinCostResult\n        the MinCostResult named tuple, with attributes:\n\n        x : Optional[int]\n            the minimum integer such that f(x) >= vmin.  If no such x exists, this will be None.\n        xmax : Optional[int]\n            the value at which f achieves its maximum.  This is set only if x is None\n        vmax : Optional[float]\n            the maximum value of f.  This is set only if x is None.\n        nfev : int\n            total number of function calls made.\n    \"\"\"\n\n    fa = f(start)\n    if fa >= vmin:\n        # solution found at start\n        return MinCostResult(x=start, xmax=None, vmax=None, nfev=1)\n\n    fb = f(stop)  # type: Optional[float]\n    if fb is None:\n        raise TypeError(\"f(stop) returned None instead of float\")\n    if fb >= vmin:\n        # found upper bound, use binary search to find answer\n        return minimize_cost_binary_float(f, vmin, start, stop, tol=tol, save=stop, nfev=2)\n\n    # solution is somewhere in middle\n    gr = (5**0.5 + 1) / 2\n    delta = (stop - start) / gr\n    c = stop - delta\n    d = start + delta\n\n    fc = f(c)  # type: Optional[float]\n    if fc is None:\n        raise TypeError(\"f(c) returned None instead of float\")\n    if fc >= vmin:\n        # found upper bound, use binary search to find answer\n        return minimize_cost_binary_float(f, vmin, start, c, tol=tol, save=stop, nfev=3)\n\n    fd = f(d)  # type: Optional[float]\n    if fd is None:\n        raise TypeError(\"f(d) returned None instead of float\")\n    if fd >= vmin:\n        # found upper bound, use binary search to find answer\n        return minimize_cost_binary_float(f, vmin, start, c, tol=tol, save=stop, nfev=4)\n\n    if fc > fd:\n        a, b, d = start, d, c\n        c = b - (b - a) / gr\n        fb, fc, fd = fd, None, fc\n    else:\n        a, b, c = c, stop, d\n        d = a + (b - a) / gr\n        fa, fc, fd = fc, fd, None\n\n    nfev = 4\n    while abs(b - a) > tol and nfev < maxiter:\n        if fc is None:\n            fc = f(c)\n        else:\n            fd = f(d)\n        assert fc is not None, 'Either fc or fd was None and the above should have set it'\n        assert fd is not None, 'Either fc or fd was None and the above should have set it'\n        nfev += 1\n        if fc > fd:\n            if fc >= vmin:\n                return minimize_cost_binary_float(f, vmin, a, c, tol=tol, save=stop, nfev=nfev)\n            b, d = d, c\n            c = b - (b - a) / gr\n            fb, fc, fd = fd, None, fc\n        else:\n            if fd >= vmin:\n                return minimize_cost_binary_float(f, vmin, a, d, tol=tol, save=stop, nfev=nfev)\n            a, c = c, d\n            d = a + (b - a) / gr\n            fa, fc, fd = fc, fd, None\n\n    test = (a + b) / 2\n    vmax = f(test)\n    nfev += 1\n    if vmax >= vmin:\n        return MinCostResult(x=test, xmax=test, vmax=vmax, nfev=nfev)\n    else:\n        return MinCostResult(x=None, xmax=test, vmax=vmax, nfev=nfev)\n"
  },
  {
    "path": "bag/verification/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/verification/__init__.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This package contains LVS/RCX related verification methods.\n\"\"\"\n\nfrom typing import Any\n\nimport importlib\n\nfrom .base import Checker\n\n__all__ = ['make_checker', 'Checker']\n\n\ndef make_checker(checker_cls, tmp_dir, **kwargs):\n    # type: (str, str, **Any) -> Checker\n    \"\"\"Returns a checker object.\n\n    Parameters\n    -----------\n    checker_cls : str\n        the Checker class absolute path name.\n    tmp_dir : str\n        directory to save temporary files in.\n    **kwargs : Any\n        keyword arguments needed to create a Checker object.\n    \"\"\"\n    sections = checker_cls.split('.')\n\n    module_str = '.'.join(sections[:-1])\n    class_str = sections[-1]\n    module = importlib.import_module(module_str)\n    return getattr(module, class_str)(tmp_dir, **kwargs)\n"
  },
  {
    "path": "bag/verification/base.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module defines Checker, an abstract base class that handles LVS/RCX.\"\"\"\n\nfrom typing import TYPE_CHECKING, List, Dict, Any, Tuple, Sequence, Optional\n\nimport abc\n\nfrom ..io.template import new_template_env\nfrom ..concurrent.core import SubProcessManager\n\nif TYPE_CHECKING:\n    from ..concurrent.core import FlowInfo, ProcInfo\n\n\nclass Checker(abc.ABC):\n    \"\"\"A class that handles LVS/RCX.\n\n    Parameters\n    ----------\n    tmp_dir : str\n        temporary directory to save files in.\n    \"\"\"\n    def __init__(self, tmp_dir):\n        # type: (str) -> None\n        self.tmp_dir = tmp_dir\n        self._tmp_env = new_template_env('bag.verification', 'templates')\n\n    @abc.abstractmethod\n    def get_rcx_netlists(self, lib_name, cell_name):\n        # type: (str, str) -> List[str]\n        \"\"\"Returns a list of generated extraction netlist file names.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n\n        Returns\n        -------\n        netlists : List[str]\n            a list of generated extraction netlist file names.  The first index is the main netlist.\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    async def async_run_lvs(self, lib_name, cell_name, sch_view='schematic',\n                            lay_view='layout', params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Tuple[bool, str]\n        \"\"\"A coroutine for running LVS.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        sch_view : str\n            schematic view name.  Optional.\n        lay_view : str\n            layout view name.  Optional.\n        params : Optional[Dict[str, Any]]\n            optional LVS parameter values.\n        kwargs : Any\n            optional keyword arguments.\n            gds_layout_path : str\n                Path to the gds of the layout. If passed, do not export layout, instead copy gds\n\n\n        Returns\n        -------\n        success : bool\n            True if LVS succeeds.\n        log_fname : str\n            LVS log file name.\n        \"\"\"\n        return False, ''\n\n    @abc.abstractmethod\n    async def async_run_rcx(self, lib_name, cell_name, sch_view='schematic',\n                            lay_view='layout', params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Tuple[Optional[str], str]\n        \"\"\"A coroutine for running RCX.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        sch_view : str\n            schematic view name.  Optional.\n        lay_view : str\n            layout view name.  Optional.\n        params : Optional[Dict[str, Any]]\n            optional RCX parameter values.\n        kwargs : Any\n            optional keyword arguments.\n            gds_layout_path : str\n                Path to the gds of the layout. If passed, do not export layout, instead copy gds\n\n        Returns\n        -------\n        netlist : Optional[str]\n            The RCX netlist file name.  None if RCX failed, empty if no extracted\n            netlist is generated\n        log_fname : str\n            RCX log file name.\n        \"\"\"\n        return '', ''\n\n    @abc.abstractmethod\n    async def async_export_layout(self, lib_name, cell_name, out_file,\n                                  view_name='layout', params=None):\n        # type: (str, str, str, str, Optional[Dict[str, Any]]) -> str\n        \"\"\"A coroutine for exporting layout.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        view_name : str\n            layout view name.\n        out_file : str\n            output file name.\n        params : Optional[Dict[str, Any]]\n            optional export parameter values.\n\n        Returns\n        -------\n        log_fname : str\n            log file name.\n        \"\"\"\n        return ''\n\n    @abc.abstractmethod\n    async def async_export_schematic(self, lib_name, cell_name, out_file,\n                                     view_name='schematic', params=None):\n        # type: (str, str, str, str, Optional[Dict[str, Any]]) -> str\n        \"\"\"A coroutine for exporting schematic.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        view_name : str\n            schematic view name.\n        out_file : str\n            output file name.\n        params : Optional[Dict[str, Any]]\n            optional export parameter values.\n\n        Returns\n        -------\n        log_fname : str\n            log file name.\n        \"\"\"\n        return ''\n\n    def render_file_template(self, temp_name, params):\n        # type: (str, Dict[str, Any]) -> str\n        \"\"\"Returns the rendered content from the given template file.\"\"\"\n        template = self._tmp_env.get_template(temp_name)\n        return template.render(**params)\n\n    def render_string_template(self, content, params):\n        # type: (str, Dict[str, Any]) -> str\n        \"\"\"Returns the rendered content from the given template string.\"\"\"\n        template = self._tmp_env.from_string(content)\n        return template.render(**params)\n\n\nclass SubProcessChecker(Checker, abc.ABC):\n    \"\"\"An implementation of :class:`Checker` using :class:`SubProcessManager`.\n\n    Parameters\n    ----------\n    tmp_dir : str\n        temporary file directory.\n    max_workers : int\n        maximum number of parallel processes.\n    cancel_timeout : float\n        timeout for cancelling a subprocess.\n    \"\"\"\n\n    def __init__(self, tmp_dir, max_workers, cancel_timeout):\n        # type: (str, int, float) -> None\n        Checker.__init__(self, tmp_dir)\n        self._manager = SubProcessManager(max_workers=max_workers, cancel_timeout=cancel_timeout)\n\n    @abc.abstractmethod\n    def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic',\n                       lay_view='layout', params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n        \"\"\"This method performs any setup necessary to configure a LVS subprocess flow.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        sch_view : str\n            schematic view name.\n        lay_view : str\n            layout view name.\n        params : Optional[Dict[str, Any]]\n            optional LVS parameter values.\n        kwargs : Any\n            optional keyword arguments.\n            gds_layout_path : str\n                Path to the gds of the layout. If passed, do not export layout, instead copy gds\n\n\n        Returns\n        -------\n        flow_info : Sequence[FlowInfo]\n            the LVS flow information list.  Each element is a tuple of:\n\n            args : Union[str, Sequence[str]]\n                command to run, as string or list of string arguments.\n            log : str\n                log file name.\n            env : Optional[Dict[str, str]]\n                environment variable dictionary.  None to inherit from parent.\n            cwd : Optional[str]\n                working directory path.  None to inherit from parent.\n            vfun : Sequence[Callable[[Optional[int], str], Any]]\n                a function to validate if it is ok to execute the next process.  The output of the\n                last function is returned.  The first argument is the return code, the\n                second argument is the log file name.\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic',\n                       lay_view='layout', params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n        \"\"\"This method performs any setup necessary to configure a RCX subprocess flow.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        sch_view : str\n            schematic view name.\n        lay_view : str\n            layout view name.\n        params : Optional[Dict[str, Any]]\n            optional RCX parameter values.\n        kwargs : Any\n            optional keyword arguments.\n            gds_layout_path : str\n                Path to the gds of the layout. If passed, do not export layout, instead copy gds\n\n        Returns\n        -------\n        flow_info : Sequence[FlowInfo]\n            the RCX flow information list.  Each element is a tuple of:\n\n            args : Union[str, Sequence[str]]\n                command to run, as string or list of string arguments.\n            log : str\n                log file name.\n            env : Optional[Dict[str, str]]\n                environment variable dictionary.  None to inherit from parent.\n            cwd : Optional[str]\n                working directory path.  None to inherit from parent.\n            vfun : Sequence[Callable[[Optional[int], str], Any]]\n                a function to validate if it is ok to execute the next process.  The output of the\n                last function is returned.  The first argument is the return code, the\n                second argument is the log file name.\n        \"\"\"\n        return []\n\n    @abc.abstractmethod\n    def setup_export_layout(self, lib_name, cell_name, out_file, view_name='layout', params=None):\n        # type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo\n        \"\"\"This method performs any setup necessary to export layout.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        out_file : str\n            output file name.\n        view_name : str\n            layout view name.\n        params : Optional[Dict[str, Any]]\n            optional export parameter values.\n\n        Returns\n        -------\n        args : Union[str, Sequence[str]]\n            command to run, as string or list of string arguments.\n        log : str\n            log file name.\n        env : Optional[Dict[str, str]]\n            environment variable dictionary.  None to inherit from parent.\n        cwd : Optional[str]\n            working directory path.  None to inherit from parent.\n        \"\"\"\n        return '', '', None, None\n\n    @abc.abstractmethod\n    def setup_export_schematic(self, lib_name, cell_name, out_file,\n                               view_name='schematic', params=None):\n        # type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo\n        \"\"\"This method performs any setup necessary to export schematic.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell name.\n        out_file : str\n            output file name.\n        view_name : str\n            layout view name.\n        params : Optional[Dict[str, Any]]\n            optional export parameter values.\n\n        Returns\n        -------\n        args : Union[str, Sequence[str]]\n            command to run, as string or list of string arguments.\n        log : str\n            log file name.\n        env : Optional[Dict[str, str]]\n            environment variable dictionary.  None to inherit from parent.\n        cwd : Optional[str]\n            working directory path.  None to inherit from parent.\n        \"\"\"\n        return '', '', None, None\n\n    async def async_run_lvs(self, lib_name: str, cell_name: str,\n                            sch_view: str = 'schematic',\n                            lay_view: str = 'layout',\n                            params: Optional[Dict[str, Any]] = None,\n                            **kwargs: Any,\n                            ) -> Tuple[bool, str]:\n\n        flow_info = self.setup_lvs_flow(lib_name, cell_name, sch_view, lay_view, params, **kwargs)\n        return await self._manager.async_new_subprocess_flow(flow_info)\n\n    async def async_run_rcx(self, lib_name: str, cell_name: str,\n                            sch_view: str = 'schematic',\n                            lay_view: str = 'layout',\n                            params: Optional[Dict[str, Any]] = None,\n                            **kwargs: Any,\n                            ) -> Tuple[str, str]:\n        flow_info = self.setup_rcx_flow(lib_name, cell_name, sch_view, lay_view, params, **kwargs)\n        return await self._manager.async_new_subprocess_flow(flow_info)\n\n    async def async_export_layout(self, lib_name: str, cell_name: str,\n                                  out_file: str, view_name: str = 'layout',\n                                  params: Optional[Dict[str, Any]] = None) -> str:\n        proc_info = self.setup_export_layout(lib_name, cell_name, out_file, view_name, params)\n        await self._manager.async_new_subprocess(*proc_info)\n        return proc_info[1]\n\n    async def async_export_schematic(self, lib_name: str, cell_name: str,\n                                     out_file: str, view_name: str = 'layout',\n                                     params: Optional[Dict[str, Any]] = None) -> str:\n        proc_info = self.setup_export_schematic(lib_name, cell_name, out_file, view_name, params)\n        await self._manager.async_new_subprocess(*proc_info)\n        return proc_info[1]\n"
  },
  {
    "path": "bag/verification/calibre.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module implements LVS/RCX using Calibre and stream out from Virtuoso.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Optional, List, Tuple, Dict, Any, Sequence\n\nimport os\nimport subprocess\nimport shutil\n\nfrom .virtuoso import VirtuosoChecker\nfrom ..io import read_file, open_temp, readlines_iter\n\nif TYPE_CHECKING:\n    from .base import FlowInfo\n\n\n# noinspection PyUnusedLocal\ndef _all_pass(retcode, log_file):\n    return True\n\n\n# noinspection PyUnusedLocal\ndef lvs_passed(retcode, log_file):\n    # type: (int, str) -> Tuple[bool, str]\n    \"\"\"Check if LVS passed\n\n    Parameters\n    ----------\n    retcode : int\n        return code of the LVS process.\n    log_file : str\n        log file name.\n\n    Returns\n    -------\n    success : bool\n        True if LVS passed.\n    log_file : str\n        the log file name.\n    \"\"\"\n    if not os.path.isfile(log_file):\n        return False, ''\n\n    test_str = 'LVS completed. CORRECT.'\n    LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    stdout, stderr = LogCheck.communicate()\n\n    return stdout.decode() != '', log_file\n\n\n# noinspection PyUnusedLocal\ndef query_passed(retcode, log_file):\n    # type: (int, str) -> Tuple[bool, str]\n    \"\"\"Check if query passed\n\n    Parameters\n    ----------\n    retcode : int\n        return code of the query process.\n    log_file : str\n        log file name.\n\n    Returns\n    -------\n    success : bool\n        True if query passed.\n    log_file : str\n        the log file name.\n    \"\"\"\n    if not os.path.isfile(log_file):\n        return False, ''\n\n    test_str = 'OK: Terminating.'\n    LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    stdout, stderr = LogCheck.communicate()\n\n    return stdout.decode() != '', log_file\n\n\nclass Calibre(VirtuosoChecker):\n    \"\"\"A subclass of VirtuosoChecker that uses Calibre for verification.\n\n    Parameters\n    ----------\n    tmp_dir : string\n        temporary directory to save files in.\n    lvs_run_dir : str\n        the LVS run directory.\n    lvs_runset : str\n        the LVS runset filename.\n    rcx_run_dir : str\n        the RCX run directory.\n    rcx_runset : str\n        the RCX runset filename.\n    source_added_file : str\n        the Calibre source.added file location.  Environment variable is supported.\n        Default value is '$DK/Calibre/lvs/source.added'.\n    rcx_mode : str\n        the RC extraction mode.  Either 'pex' or 'xact' or 'starrc'.  Defaults to 'pex'.\n    xact_rules : str\n        the XACT rules file name.\n    \"\"\"\n\n    def __init__(self, tmp_dir, lvs_run_dir, lvs_runset, rcx_run_dir, rcx_runset,\n                 source_added_file='$DK/Calibre/lvs/source.added', rcx_mode='pex',\n                 xact_rules='', **kwargs):\n\n        max_workers = kwargs.get('max_workers', None)\n        cancel_timeout = kwargs.get('cancel_timeout_ms', None)\n        rcx_params = kwargs.get('rcx_params', {})\n        lvs_params = kwargs.get('lvs_params', {})\n        rcx_link_files = kwargs.get('rcx_link_files', None)\n\n        if cancel_timeout is not None:\n            cancel_timeout /= 1e3\n\n        VirtuosoChecker.__init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file)\n\n        self.default_rcx_params = rcx_params\n        self.default_lvs_params = lvs_params\n        self.lvs_run_dir = os.path.abspath(rcx_run_dir if (rcx_mode == 'starrc' or rcx_mode == 'qrc') else lvs_run_dir)\n        self.lvs_runset = lvs_runset\n        self.rcx_run_dir = os.path.abspath(rcx_run_dir)\n        self.rcx_runset = rcx_runset\n        self.rcx_link_files = rcx_link_files\n        self.xact_rules = xact_rules\n        self.rcx_mode = rcx_mode\n\n    def get_rcx_netlists(self, lib_name, cell_name):\n        # type: (str, str) -> List[str]\n        \"\"\"Returns a list of generated extraction netlist file names.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n\n        Returns\n        -------\n        netlists : List[str]\n            a list of generated extraction netlist file names.  The first index is the main netlist.\n        \"\"\"\n        # PVS generate schematic cellviews directly.\n        if self.rcx_mode == 'starrc' or self.rcx_mode == 'qrc':\n            return ['%s.spf' % cell_name]\n        else:\n            return ['%s.pex.netlist' % cell_name,\n                    # '%s.pex.netlist.pex' % cell_name,\n                    # '%s.pex.netlist.%s.pxi' % (cell_name, cell_name),\n                    ]\n\n    def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',\n                       params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n\n        run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)\n        os.makedirs(run_dir, exist_ok=True)\n        lay_file, sch_file = self._get_lay_sch_files(run_dir)\n\n        # add schematic/layout export to flow\n        flow_list = []\n\n        # Check if gds layout is provided\n        gds_layout_path = kwargs.pop('gds_layout_path', None)\n\n        # If not provided the gds layout, need to export layout\n        if not gds_layout_path:\n            cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)\n            flow_list.append((cmd, log, env, cwd, _all_pass))\n        # If provided gds layout, do not export layout, just copy gds\n        else:\n            if not os.path.exists(gds_layout_path):\n                raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')\n            with open_temp(prefix='copy', dir=run_dir, delete=True) as f:\n                copy_log_file = f.name\n            copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]\n            flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))\n\n        cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view, None)\n        flow_list.append((cmd, log, env, cwd, _all_pass))\n\n        lvs_params_actual = self.default_lvs_params.copy()\n        if params is not None:\n            lvs_params_actual.update(params)\n\n        with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as logf:\n            log_file = logf.name\n\n        # generate new runset\n        runset_content = self.modify_lvs_runset(run_dir, lib_name, cell_name, lay_view, lay_file,\n                                                sch_file, lvs_params_actual)\n\n        # save runset\n        with open_temp(dir=run_dir, delete=False) as runset_file:\n            runset_fname = runset_file.name\n            runset_file.write(runset_content)\n\n        cmd = ['calibre', '-gui', '-lvs', '-runset', runset_fname, '-batch']\n\n        flow_list.append((cmd, log_file, None, run_dir, lvs_passed))\n\n        return flow_list\n\n    def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',\n                       params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n\n        # update default RCX parameters.\n        rcx_params_actual = self.default_rcx_params.copy()\n        if params is not None:\n            rcx_params_actual.update(params)\n\n        run_dir = os.path.join(self.rcx_run_dir, lib_name, cell_name)\n        os.makedirs(run_dir, exist_ok=True)\n\n        # make symlinks\n        query_input = None\n        if self.rcx_link_files:\n            for source_file in self.rcx_link_files:\n                base_name = os.path.basename(source_file)\n                targ_file = os.path.join(run_dir, base_name)\n                if 'query' in base_name:\n                    query_input = targ_file\n                if not os.path.exists(targ_file):\n                    os.symlink(source_file, targ_file)\n\n        lay_file, sch_file = self._get_lay_sch_files(run_dir)\n        with open_temp(prefix='rcxLog', dir=run_dir, delete=False) as logf:\n            log_file = logf.name\n        flow_list = []\n\n        # Check if gds layout is provided\n        gds_layout_path = kwargs.pop('gds_layout_path', None)\n\n        # If not provided the gds layout, need to export layout\n        if not gds_layout_path:\n            cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)\n            flow_list.append((cmd, log, env, cwd, _all_pass))\n        # If provided gds layout, do not export layout, just copy gds\n        else:\n            if not os.path.exists(gds_layout_path):\n                raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')\n            with open_temp(prefix='copy', dir=run_dir, delete=True) as f:\n                copy_log_file = f.name\n            copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]\n            flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))\n\n        cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view,\n                                                         None)\n        flow_list.append((cmd, log, env, cwd, _all_pass))\n\n        if self.rcx_mode == 'starrc' or self.rcx_mode == 'qrc':\n            # check if LVS was run prior to run_rcx\n            sp_file = os.path.join(run_dir, cell_name + '.sp')\n            if not os.path.isfile(sp_file):\n                raise Exception('Did you forget to do run_lvs first?')\n\n            # now query the LVS file using query.input\n            with open_temp(prefix='queryLog', dir=run_dir, delete=False) as queryf:\n                query_file = queryf.name\n\n            if query_input is None:\n                query_input = os.path.join(run_dir, 'query.input')\n            cmd = ['calibre', '-query_input', query_input,\n                   '-query', os.path.join(run_dir, 'svdb'), cell_name]\n            flow_list.append((cmd, query_file, None, run_dir,\n                              lambda rc, lf: query_passed(rc, lf)[0]))\n\n            if self.rcx_mode == 'starrc':\n                # generate new cmd for StarXtract\n                cmd_content, result = self.modify_starrc_cmd(run_dir, lib_name, cell_name,\n                                                             rcx_params_actual, query_input, sch_file)\n\n                # save cmd for StarXtract\n                with open_temp(dir=run_dir, delete=False) as cmd_file:\n                    cmd_fname = cmd_file.name\n                    cmd_file.write(cmd_content)\n\n                cmd = ['StarXtract', cmd_fname]\n            else:\n                # generate new cmd for QRC\n                cmd_content, result = self.modify_qrc_cmd(run_dir, cell_name, rcx_params_actual, sch_file)\n                # save cmd for QRC\n                with open_temp(dir=run_dir, delete=False) as cmd_file:\n                    cmd_fname = cmd_file.name\n                    cmd_file.write(cmd_content)\n\n                cmd = ['qrc', '-64', '-cmd', cmd_fname]\n        elif self.rcx_mode == 'pex':\n            # generate new runset\n            runset_content, result = self.modify_pex_runset(run_dir, lib_name, cell_name, lay_view,\n                                                            lay_file, sch_file, rcx_params_actual)\n\n            # save runset\n            with open_temp(dir=run_dir, delete=False) as runset_file:\n                runset_fname = runset_file.name\n                runset_file.write(runset_content)\n\n            # remove old svdb directory\n            svdb_dir = os.path.join(run_dir, 'svdb')\n            if os.path.exists(svdb_dir) and os.path.isdir(svdb_dir):\n                shutil.rmtree(svdb_dir)\n\n            cmd = ['calibre', '-gui', '-pex', '-runset', runset_fname, '-batch']\n        else:\n            # generate new runset\n            runset_content, result = self.modify_xact_rules(run_dir, cell_name, lay_file, sch_file,\n                                                            rcx_params_actual)\n\n            # save runset\n            with open_temp(dir=run_dir, delete=False) as runset_file:\n                runset_fname = runset_file.name\n                runset_file.write(runset_content)\n\n            with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as lvsf:\n                lvs_file = lvsf.name\n\n            num_cores = rcx_params_actual.get('num_cores', 2)\n            cmd = ['calibre', '-lvs', '-hier', '-turbo', '%d' % num_cores, '-nowait', runset_fname]\n            flow_list.append(\n                (cmd, lvs_file, None, run_dir, lambda rc, lf: lvs_passed(rc, lf)[0]))\n\n            extract_mode = rcx_params_actual.get('extract_mode', 'rcc')\n            cmd = ['calibre', '-xact', '-3d', '-%s' % extract_mode, '-turbo', '%d' % num_cores,\n                   runset_fname]\n\n        # noinspection PyUnusedLocal\n        def rcx_passed(retcode, log_fname):\n            if not os.path.isfile(result):\n                return None, log_fname\n\n            if self.rcx_mode in ['qrc', 'pex']:\n                if self.rcx_mode == 'qrc':\n                    test_str = ' terminated normally  *****'\n                else:\n                    test_str = ' Errors  =  0'\n                LogCheck = subprocess.Popen(['grep', '-i', test_str, log_fname], stdout=subprocess.PIPE,\n                                            stderr=subprocess.STDOUT)\n                stdout, stderr = LogCheck.communicate()\n\n                if stdout.decode() == '':\n                    return None, log_fname\n\n            return result, log_fname\n\n        flow_list.append((cmd, log_file, None, run_dir, rcx_passed))\n        return flow_list\n\n    @classmethod\n    def _get_lay_sch_files(cls, run_dir):\n        lay_file = os.path.join(run_dir, 'layout.gds')\n        sch_file = os.path.join(run_dir, 'schematic.net')\n        return lay_file, sch_file\n\n    def modify_lvs_runset(self, run_dir, lib_name, cell_name, lay_view, gds_file, netlist,\n                          lvs_params):\n        # type: (str, str, str, str, str, str, Dict[str, Any]) -> str\n        \"\"\"Modify the given LVS runset file.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        lib_name : str\n            the library name.\n        cell_name : str\n            the cell name.\n        lay_view : str\n            the layout view.\n        gds_file : str\n            the layout gds file name.\n        netlist : str\n            the schematic netlist file.\n        lvs_params : Dict[str, Any]\n            override LVS parameters.\n\n        Returns\n        -------\n        content : str\n            the new runset content.\n        \"\"\"\n        # convert runset content to dictionary\n        lvs_options = {}\n        for line in readlines_iter(self.lvs_runset):\n            key, val = line.split(':', 1)\n            key = key.strip('*')\n            lvs_options[key] = val.strip()\n\n        # override parameters\n        lvs_options['lvsRunDir'] = run_dir\n        lvs_options['lvsLayoutPaths'] = gds_file\n        lvs_options['lvsLayoutPrimary'] = cell_name\n        lvs_options['lvsLayoutLibrary'] = lib_name\n        lvs_options['lvsLayoutView'] = lay_view\n        lvs_options['lvsSourcePath'] = netlist\n        lvs_options['lvsSourcePrimary'] = cell_name\n        lvs_options['lvsSourceLibrary'] = lib_name\n        lvs_options['lvsSpiceFile'] = os.path.join(run_dir, '%s.sp' % cell_name)\n        lvs_options['lvsERCDatabase'] = '%s.erc.results' % cell_name\n        lvs_options['lvsERCSummaryFile'] = '%s.erc.summary' % cell_name\n        lvs_options['lvsReportFile'] = '%s.lvs.report' % cell_name\n        lvs_options['lvsMaskDBFile'] = '%s.maskdb' % cell_name\n        lvs_options['cmnFDILayoutLibrary'] = lib_name\n        lvs_options['cmnFDILayoutView'] = lay_view\n        lvs_options['cmnFDIDEFLayoutPath'] = '%s.def' % cell_name\n\n        lvs_options.update(lvs_params)\n\n        return ''.join(('*%s: %s\\n' % (key, val) for key, val in lvs_options.items()))\n\n    def modify_pex_runset(self, run_dir, lib_name, cell_name, lay_view, gds_file, netlist,\n                          rcx_params):\n        # type: (str, str ,str, str, str, str, Dict[str, Any]) -> Tuple[str, str]\n        \"\"\"Modify the given RCX runset file.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        lib_name : str\n            the library name.\n        cell_name : str\n            the cell name.\n        lay_view : str\n            the layout view.\n        gds_file : str\n            the layout gds file name.\n        netlist : str\n            the schematic netlist file.\n        rcx_params : Dict[str, Any]\n            override RCX parameters.\n\n        Returns\n        -------\n        content : str\n            the new runset content.\n        output_name : str\n            the extracted netlist file.\n        \"\"\"\n        # convert runset content to dictionary\n        rcx_options = {}\n        for line in readlines_iter(self.rcx_runset):\n            key, val = line.split(':', 1)\n            key = key.strip('*')\n            rcx_options[key] = val.strip()\n\n        output_name = '%s.pex.netlist' % cell_name\n\n        # override parameters\n        rcx_options['pexRunDir'] = run_dir\n        rcx_options['pexLayoutPaths'] = gds_file\n        rcx_options['pexLayoutPrimary'] = cell_name\n        rcx_options['pexLayoutLibrary'] = lib_name\n        rcx_options['pexLayoutView'] = lay_view\n        rcx_options['pexSourcePath'] = netlist\n        rcx_options['pexSourcePrimary'] = cell_name\n        rcx_options['pexSourceLibrary'] = lib_name\n        rcx_options['pexReportFile'] = '%s.lvs.report' % cell_name\n        rcx_options['pexPexNetlistFile'] = output_name\n        rcx_options['pexPexReportFile'] = '%s.pex.report' % cell_name\n        rcx_options['pexMaskDBFile'] = '%s.maskdb' % cell_name\n        rcx_options['cmnFDILayoutLibrary'] = lib_name\n        rcx_options['cmnFDILayoutView'] = lay_view\n        rcx_options['cmnFDIDEFLayoutPath'] = '%s.def' % cell_name\n\n        rcx_options['pexPexNetlistType'] = rcx_params.pop('netlist_type', 'RCC')\n        rcx_options['pexPexGroundNameValue'] = rcx_params.pop('ground_name_value', 'VSS')\n\n        rcx_options.update(rcx_params)\n\n        content = ''.join(('*%s: %s\\n' % (key, val) for key, val in rcx_options.items()))\n        return content, os.path.join(run_dir, output_name)\n\n    def modify_xact_rules(self, run_dir, cell_name, gds_file, netlist, xact_params):\n        # type: (str, str, str, str, Dict[str, Any]) -> Tuple[str, str]\n        \"\"\"Modify the given XACT runset file.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        cell_name : str\n            the cell name.\n        gds_file : str\n            the layout gds file name.\n        netlist : str\n            the schematic netlist file.\n        xact_params : Dict[str, Any]\n            additional XACT parameters.\n\n        Returns\n        -------\n        content : str\n            the new runset content.\n        output_name : str\n            the extracted netlist file.\n        \"\"\"\n        substrate_name = xact_params.get('substrate_name', 'VSS')\n        power_names = xact_params.get('power_names', 'VDD')\n        ground_names = xact_params.get('ground_names', 'VSS')\n\n        output_name = '%s.pex.netlist' % cell_name\n        content = self.render_string_template(read_file(self.xact_rules),\n                                              dict(\n                                                  cell_name=cell_name,\n                                                  gds_file=gds_file,\n                                                  netlist=netlist,\n                                                  substrate_name=substrate_name,\n                                                  power_names=power_names,\n                                                  ground_names=ground_names,\n                                                  output_name=output_name,\n                                              ))\n\n        return content, os.path.join(run_dir, output_name)\n\n    def modify_starrc_cmd(self, run_dir, lib_name, cell_name, starrc_params, query_input, sch_file):\n        # type: (str, str, str, Dict[str, Any], str, str) -> Tuple[str, str]\n        \"\"\"Modify the cmd file.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        lib_name : str\n            the library name.\n        cell_name : str\n            the cell name.\n        starrc_params : Dict[str, Any]\n            override StarRC parameters.\n        query_input : str\n            the path to query.input file\n        sch_file : str\n            the schematic netlist\n\n        Returns\n        -------\n        starrc_cmd : str\n            the new StarXtract cmd file.\n        output_name : str\n            the extracted netlist file.\n        \"\"\"\n        output_name = '%s.spf' % cell_name\n        if 'CDSLIBPATH' in os.environ:\n            cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))\n        else:\n            cds_lib_path = os.path.abspath('./cds.lib')\n        content = self.render_string_template(read_file(self.rcx_runset),\n                                              dict(\n                                                  cell_name=cell_name,\n                                                  query_input=query_input,\n                                                  extract_type=starrc_params['extract'].get('type', 'RCc'),\n                                                  netlist_format=starrc_params.get('netlist_format',\n                                                                                   'SPF'),\n                                                  sch_file=sch_file,\n                                                  cds_lib=cds_lib_path,\n                                                  lib_name=lib_name,\n                                                  run_dir=run_dir,\n                                                  skew=starrc_params.get('skew', 'tt'),\n                                              ))\n\n        return content, os.path.join(run_dir, output_name)\n\n    def modify_qrc_cmd(self, run_dir, cell_name, qrc_params, sch_file):\n        # type: (str, str, Dict[str, Any], str) -> Tuple[str, str]\n        \"\"\"Modify the cmd file.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        cell_name : str\n            the cell name.\n        qrc_params : Dict[str, Any]\n            override QRC parameters.\n        sch_file : str\n            the schematic netlist\n\n        Returns\n        -------\n        qrc_cmd : str\n            the new QRC cmd file.\n        output_name : str\n            the extracted netlist file.\n        \"\"\"\n        output_name = '%s.spf' % cell_name\n        if 'CDSLIBPATH' in os.environ:\n            cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))\n        else:\n            cds_lib_path = os.path.abspath('./cds.lib')\n        content = self.render_string_template(read_file(self.rcx_runset),\n                                              dict(\n                                                  cell_name=cell_name,\n                                                  netlist_format=qrc_params.get('netlist_format',\n                                                                                'spf'),\n                                                  extract_type=qrc_params['extract'].get('type', 'rc_coupled'),\n                                                  sch_file=sch_file,\n                                                  cds_lib=cds_lib_path,\n                                                  skew=qrc_params.get('skew', 'tt'),\n                                                  temp=qrc_params.get('temp', '25'),\n                                              ))\n\n        return content, os.path.join(run_dir, output_name)\n"
  },
  {
    "path": "bag/verification/icv.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module implements LVS/RCX using ICV and stream out from Virtuoso.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Optional, List, Tuple, Dict, Any, Sequence\n\nimport os\nimport subprocess\n\nfrom .virtuoso import VirtuosoChecker\nfrom ..io import read_file, open_temp\n\nif TYPE_CHECKING:\n    from .base import FlowInfo\n\n\n# noinspection PyUnusedLocal\ndef _all_pass(retcode, log_file):\n    return True\n\n\n# noinspection PyUnusedLocal\ndef lvs_passed(retcode, log_file):\n    # type: (int, str) -> Tuple[bool, str]\n    \"\"\"Check if LVS passed\n\n    Parameters\n    ----------\n    retcode : int\n        return code of the LVS process.\n    log_file : str\n        log file name.\n\n    Returns\n    -------\n    success : bool\n        True if LVS passed.\n    log_file : str\n        the log file name.\n    \"\"\"\n    dirname = os.path.dirname(log_file)\n    cell_name = os.path.basename(dirname)\n    lvs_error_file = os.path.join(dirname, cell_name + '.LVS_ERRORS')\n\n    # append error file at the end of log file\n    with open(log_file, 'a') as logf:\n        with open(lvs_error_file, 'r') as errf:\n            for line in errf:\n                logf.write(line)\n\n    if not os.path.isfile(log_file):\n        return False, ''\n\n    test_str = 'Final comparison result:PASS'\n    LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    stdout, stderr = LogCheck.communicate()\n\n    return stdout.decode() != '', log_file\n\n\nclass ICV(VirtuosoChecker):\n    \"\"\"A subclass of VirtuosoChecker that uses ICV for verification.\n\n    Parameters\n    ----------\n    tmp_dir : string\n        temporary directory to save files in.\n    lvs_run_dir : str\n        the LVS run directory.\n    lvs_runset : str\n        the LVS runset filename.\n    rcx_run_dir : str\n        the RCX run directory.\n    rcx_runset : str\n        the RCX runset filename.\n    source_added_file : str\n        the source.added file location.  Environment variable is supported.\n        Default value is '$DK/Calibre/lvs/source.added'.\n    rcx_mode : str\n        the RC extraction mode.  Defaults to 'starrc'.\n    \"\"\"\n\n    def __init__(self, tmp_dir, lvs_run_dir, lvs_runset, rcx_run_dir, rcx_runset,\n                 source_added_file='$DK/Calibre/lvs/source.added', rcx_mode='pex',\n                 **kwargs):\n\n        max_workers = kwargs.get('max_workers', None)\n        cancel_timeout = kwargs.get('cancel_timeout_ms', None)\n        rcx_params = kwargs.get('rcx_params', {})\n        lvs_params = kwargs.get('lvs_params', {})\n        rcx_link_files = kwargs.get('rcx_link_files', None)\n        lvs_link_files = kwargs.get('lvs_link_files', None)\n\n        if cancel_timeout is not None:\n            cancel_timeout /= 1e3\n\n        VirtuosoChecker.__init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file)\n\n        self.default_rcx_params = rcx_params\n        self.default_lvs_params = lvs_params\n        self.lvs_run_dir = os.path.abspath(lvs_run_dir)\n        self.lvs_runset = lvs_runset\n        self.lvs_link_files = lvs_link_files\n        self.rcx_run_dir = os.path.abspath(rcx_run_dir)\n        self.rcx_runset = rcx_runset\n        self.rcx_link_files = rcx_link_files\n        self.rcx_mode = rcx_mode\n        self.netlist_format = 'netlist'\n\n    def get_rcx_netlists(self, lib_name, cell_name):\n        # type: (str, str) -> List[str]\n        \"\"\"Returns a list of generated extraction netlist file names.\n\n        Parameters\n        ----------\n        lib_name : str\n            library name.\n        cell_name : str\n            cell_name\n\n        Returns\n        -------\n        netlists : List[str]\n            a list of generated extraction netlist file names.  The first index is the main netlist.\n        \"\"\"\n        # PVS generate schematic cellviews directly.\n        if self.rcx_mode == 'starrc' and self.netlist_format == 'netlist':\n            return ['%s.spf' % cell_name]\n        else:\n            return []\n\n    def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',\n                       params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n\n        run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)\n        os.makedirs(run_dir, exist_ok=True)\n\n        lay_file, sch_file = self._get_lay_sch_files(run_dir)\n\n        # add schematic/layout export to flow\n        flow_list = []\n\n        # Check if gds layout is provided\n        gds_layout_path = kwargs.pop('gds_layout_path', None)\n\n        # If not provided the gds layout, need to export layout\n        if not gds_layout_path:\n            cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)\n            flow_list.append((cmd, log, env, cwd, _all_pass))\n        # If provided gds layout, do not export layout, just copy gds\n        else:\n            if not os.path.exists(gds_layout_path):\n                raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')\n            with open_temp(prefix='copy', dir=run_dir, delete=True) as f:\n                copy_log_file = f.name\n            copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]\n            flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))\n\n        cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view, None)\n        flow_list.append((cmd, log, env, cwd, _all_pass))\n\n        lvs_params_actual = self.default_lvs_params.copy()\n        if params is not None:\n            lvs_params_actual.update(params)\n\n        with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as logf:\n            log_file = logf.name\n\n        # cmd_options\n        cmd_options = lvs_params_actual['cmd_options']\n\n        cmd = ['icv'] + cmd_options + ['-i', lay_file, '-s', sch_file, '-sf', 'SPICE', '-f', 'GDSII', '-c', cell_name,\n                                       '-vue', '-I']\n        for f in self.lvs_link_files:\n            cmd.append(f)\n\n        flow_list.append((cmd, log_file, None, run_dir, lvs_passed))\n        return flow_list\n\n    def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',\n                       params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n\n        # update default RCX parameters.\n        rcx_params_actual = self.default_rcx_params.copy()\n        if params is not None:\n            rcx_params_actual.update(params)\n\n        run_dir = os.path.join(self.rcx_run_dir, lib_name, cell_name)\n        os.makedirs(run_dir, exist_ok=True)\n\n        lay_file, sch_file = self._get_lay_sch_files(run_dir)\n        with open_temp(prefix='rcxLog', dir=run_dir, delete=False) as logf:\n            log_file = logf.name\n        flow_list = []\n        # Check if gds layout is provided\n        gds_layout_path = kwargs.pop('gds_layout_path', None)\n\n        # If not provided the gds layout, need to export layout\n        if not gds_layout_path:\n            cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)\n            flow_list.append((cmd, log, env, cwd, _all_pass))\n        # If provided gds layout, do not export layout, just copy gds\n        else:\n            if not os.path.exists(gds_layout_path):\n                raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')\n            with open_temp(prefix='copy', dir=run_dir, delete=True) as f:\n                copy_log_file = f.name\n            copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]\n            flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))\n\n        cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view, None)\n        flow_list.append((cmd, log, env, cwd, _all_pass))\n\n        if self.rcx_mode == 'starrc':\n            # first: run Extraction LVS\n            cmd_options = rcx_params_actual['cmd_options']\n\n            cmd = ['icv'] + cmd_options + ['-i', lay_file, '-s', sch_file, '-sf', 'SPICE', '-f', 'GDSII',\n                                           '-c', cell_name, '-I']\n            for f in self.lvs_link_files:\n                cmd.append(f)\n\n            # hack the environment variables to make sure $PWD is the same as current working directory\n            env_copy = os.environ.copy()\n            env_copy['PWD'] = run_dir\n            flow_list.append((cmd, log_file, env_copy, run_dir, lvs_passed))\n\n            # second: setup StarXtract\n            # make symlinks\n            if self.rcx_link_files:\n                for source_file in self.rcx_link_files:\n                    targ_file = os.path.join(run_dir, os.path.basename(source_file))\n                    if not os.path.exists(targ_file):\n                        os.symlink(source_file, targ_file)\n\n            # generate new cmd for StarXtract\n            cmd_content, result = self.modify_starrc_cmd(run_dir, lib_name, cell_name,\n                                                         rcx_params_actual, sch_file)\n\n            # save cmd for StarXtract\n            with open_temp(dir=run_dir, delete=False) as cmd_file:\n                cmd_fname = cmd_file.name\n                cmd_file.write(cmd_content)\n\n            cmd = ['StarXtract', '-clean', cmd_fname]\n        else:\n            pass\n\n        # noinspection PyUnusedLocal\n        def rcx_passed(retcode, log_fname):\n            dirname = os.path.dirname(log_fname)\n            cell_name = os.path.basename(dirname)\n            results_file = os.path.join(dirname, cell_name + '.RESULTS')\n\n            # append error file at the end of log file\n            with open(log_fname, 'a') as logf:\n                with open(results_file, 'r') as errf:\n                    for line in errf:\n                        logf.write(line)\n\n            if not os.path.isfile(log_fname):\n                return None, ''\n\n            test_str = 'DRC and Extraction Results: CLEAN'\n            LogCheck = subprocess.Popen(['grep', '-i', test_str, log_fname], stdout=subprocess.PIPE,\n                                        stderr=subprocess.STDOUT)\n            stdout, stderr = LogCheck.communicate()\n\n            if stdout.decode() != '':\n                if self.netlist_format == 'netlist':\n                    return results_file, log_fname\n                else:\n                    return [], log_fname\n            else:\n                return None, log_fname\n\n        flow_list.append((cmd, log_file, None, run_dir, rcx_passed))\n        return flow_list\n\n    @classmethod\n    def _get_lay_sch_files(cls, run_dir):\n        lay_file = os.path.join(run_dir, 'layout.gds')\n        sch_file = os.path.join(run_dir, 'schematic.net')\n        return lay_file, sch_file\n\n    def modify_starrc_cmd(self, run_dir, lib_name, cell_name, starrc_params, sch_file):\n        # type: (str, str, str, Dict[str, Any], str) -> Tuple[str, str]\n        \"\"\"Modify the cmd file.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        lib_name : str\n            the library name.\n        cell_name : str\n            the cell name.\n        starrc_params : Dict[str, Any]\n            override StarRC parameters.\n        sch_file : str\n            the schematic netlist\n\n        Returns\n        -------\n        starrc_cmd : str\n            the new StarXtract cmd file.\n        output_name : str\n            the extracted netlist file.\n        \"\"\"\n        output_name = '%s.spf' % cell_name\n        if 'CDSLIBPATH' in os.environ:\n            cds_lib_path = os.path.abspath(os.path.join(os.environ['CDSLIBPATH'], 'cds.lib'))\n        else:\n            cds_lib_path = os.path.abspath('./cds.lib')\n        content = self.render_string_template(read_file(self.rcx_runset),\n                                              dict(\n                                                  cell_name=cell_name,\n                                                  extract_type=starrc_params['extract'].get('type'),\n                                                  netlist_format=starrc_params.get('netlist_format',\n                                                                                   'SPF'),\n                                                  sch_file=sch_file,\n                                                  cds_lib=cds_lib_path,\n                                                  lib_name=lib_name,\n                                                  run_dir=run_dir,\n                                              ))\n        self.netlist_format = starrc_params.get('netlist_format', 'netlist')\n        return content, os.path.join(run_dir, output_name)\n"
  },
  {
    "path": "bag/verification/pvs.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module implements LVS/RCX using PVS/QRC and stream out from Virtuoso.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Optional, List, Dict, Any, Sequence, Tuple\n\nimport os\nimport subprocess\nimport time\n\nfrom ..io import read_yaml, open_temp, readlines_iter, fix_string\nfrom .virtuoso import VirtuosoChecker\n\nif TYPE_CHECKING:\n    from .base import FlowInfo\n\n\n# noinspection PyUnusedLocal\ndef _all_pass(retcode, log_file):\n    return True\n\n\n# noinspection PyUnusedLocal\ndef lvs_passed(retcode, log_file):\n    # type: (int, str) -> Tuple[bool, str]\n    \"\"\"Check if LVS passed\n\n    Parameters\n    ----------\n    retcode : int\n        return code of the LVS process.\n    log_file : str\n        log file name.\n\n    Returns\n    -------\n    success : bool\n        True if LVS passed.\n    log_file : str\n        the log file name.\n    \"\"\"\n    if not os.path.isfile(log_file):\n        return False, ''\n\n    test_str = '# Run Result             : MATCH'\n    LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    stdout, stderr = LogCheck.communicate()\n\n    return stdout.decode() != '', log_file\n\n\n# noinspection PyUnusedLocal\ndef rcx_passed(retcode, log_file):\n    \"\"\"Check if RCX passed.\n\n    Parameters\n    ----------\n    retcode : int\n        return code of the RCX process.\n    log_file : str\n        log file name.\n\n    Returns\n    -------\n    netlist : str\n        netlist file name.\n    log_file : str\n        the log file name.\n    \"\"\"\n    if not os.path.isfile(log_file):\n        return None, ''\n\n    test_str = ' terminated normally  *****'\n    LogCheck = subprocess.Popen(['grep', '-i', test_str, log_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n    stdout, stderr = LogCheck.communicate()\n\n    if stdout.decode() != '':\n        return '', log_file\n    else:\n        return None, ''\n\n\nclass PVS(VirtuosoChecker):\n    \"\"\"A subclass of VirtuosoChecker that uses PVS/QRC for verification.\n\n    Parameters\n    ----------\n    tmp_dir : string\n        temporary directory to save files in.\n    lvs_run_dir : string\n        the LVS run directory.\n    lvs_runset : string\n        the LVS runset filename.\n    lvs_rule_file : string\n        the LVS rule filename.\n    rcx_runset : string\n        the RCX runset filename.\n    source_added_file : string\n        the source.added file location.  Environment variable is supported.\n        Default value is '$DK/Calibre/lvs/source.added'.\n    \"\"\"\n\n    def __init__(self, tmp_dir, lvs_run_dir, lvs_runset, lvs_rule_file, rcx_runset,\n                 source_added_file='$DK/Calibre/lvs/source.added', **kwargs):\n\n        max_workers = kwargs.get('max_workers', None)\n        cancel_timeout = kwargs.get('cancel_timeout_ms', None)\n        if cancel_timeout is not None:\n            cancel_timeout /= 1e3\n\n        VirtuosoChecker.__init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file)\n\n        self.default_rcx_params = kwargs.get('rcx_params', {})\n        self.default_lvs_params = kwargs.get('lvs_params', {})\n        self.lvs_run_dir = os.path.abspath(lvs_run_dir)\n        self.lvs_runset = lvs_runset\n        self.lvs_rule_file = lvs_rule_file\n        self.rcx_runset = rcx_runset\n\n    def get_rcx_netlists(self, lib_name, cell_name):\n        # type: (str, str) -> List[str]\n        # PVS generate schematic cellviews directly.\n        return []\n\n    def setup_lvs_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',\n                       params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n\n        run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)\n        os.makedirs(run_dir, exist_ok=True)\n\n        lay_file = os.path.join(run_dir, 'layout.gds')\n        sch_file = os.path.join(run_dir, 'schematic.net')\n\n        # add schematic/layout export to flow\n        flow_list = []\n\n        # Check if gds layout is provided\n        gds_layout_path = kwargs.pop('gds_layout_path', None)\n\n        # If not provided the gds layout, need to export layout\n        if not gds_layout_path:\n            cmd, log, env, cwd = self.setup_export_layout(lib_name, cell_name, lay_file, lay_view, None)\n            flow_list.append((cmd, log, env, cwd, _all_pass))\n        # If provided gds layout, do not export layout, just copy gds\n        else:\n            if not os.path.exists(gds_layout_path):\n                raise ValueError(f'gds_layout_path does not exist: {gds_layout_path}')\n            with open_temp(prefix='copy', dir=run_dir, delete=True) as f:\n                copy_log_file = f.name\n            copy_cmd = ['cp', gds_layout_path, os.path.abspath(lay_file)]\n            flow_list.append((copy_cmd, copy_log_file, None, None, _all_pass))\n\n        cmd, log, env, cwd = self.setup_export_schematic(lib_name, cell_name, sch_file, sch_view,\n                                                         None)\n        flow_list.append((cmd, log, env, cwd, _all_pass))\n\n        lvs_params_actual = self.default_lvs_params.copy()\n        if params is not None:\n            lvs_params_actual.update(params)\n\n        with open_temp(prefix='lvsLog', dir=run_dir, delete=False) as logf:\n            log_file = logf.name\n\n        # generate new runset\n        runset_content = self.modify_lvs_runset(run_dir, cell_name, lvs_params_actual)\n\n        # save runset\n        with open_temp(dir=run_dir, delete=False) as runset_file:\n            runset_fname = runset_file.name\n            runset_file.write(runset_content)\n\n        num_cores = 4\n        cmd = ['pvs', '-perc', '-lvs', '-qrc_data', '-control', runset_fname, '-dp', str(num_cores),\n               '-gds', lay_file, '-layout_top_cell', cell_name,\n               '-source_cdl', sch_file, '-source_top_cell', cell_name,\n               self.lvs_rule_file,\n               ]\n\n        flow_list.append((cmd, log_file, None, run_dir, lvs_passed))\n\n        return flow_list\n\n    def setup_rcx_flow(self, lib_name, cell_name, sch_view='schematic', lay_view='layout',\n                       params=None, **kwargs):\n        # type: (str, str, str, str, Optional[Dict[str, Any]], Any) -> Sequence[FlowInfo]\n\n        # update default RCX parameters.\n        rcx_params_actual = self.default_rcx_params.copy()\n        if params is not None:\n            rcx_params_actual.update(params)\n\n        run_dir = os.path.join(self.lvs_run_dir, lib_name, cell_name)\n        os.makedirs(run_dir, exist_ok=True)\n\n        with open_temp(prefix='rcxLog', dir=run_dir, delete=False) as logf:\n            log_file = logf.name\n\n        # generate new runset\n        runset_content = self.modify_rcx_runset(run_dir, lib_name, cell_name, lay_view,\n                                                rcx_params_actual)\n\n        # save runset\n        with open_temp(dir=run_dir, delete=False) as runset_file:\n            runset_fname = runset_file.name\n            runset_file.write(runset_content)\n\n        cmd = ['qrc', '-cmd', runset_fname]\n\n        # NOTE: qrc needs to be run in the current working directory (virtuoso directory),\n        # because it needs to access cds.lib\n        return [(cmd, log_file, None, os.environ['BAG_WORK_DIR'], rcx_passed)]\n\n    def modify_lvs_runset(self, run_dir, cell_name, lvs_params):\n        # type: (str, str, Dict[str, Any]) -> str\n        \"\"\"Modify the given LVS runset file.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        cell_name : str\n            the cell name.\n        lvs_params : Dict[str, Any]\n            override LVS parameters.\n\n        Returns\n        -------\n        content : str\n            the new runset content.\n        \"\"\"\n        # convert runset content to dictionary\n        lvs_options = {}\n        for line in readlines_iter(self.lvs_runset):\n            key, val = line.split(' ', 1)\n            # remove semicolons\n            val = val.strip().rstrip(';')\n            if key in lvs_options:\n                lvs_options[key].append(val)\n            else:\n                lvs_options[key] = [val]\n\n        # get results_db file name\n        results_db = os.path.join(run_dir, '%s.erc_errors.ascii' % cell_name)\n        # override parameters\n        lvs_options['lvs_report_file'] = ['\"%s.rep\"' % cell_name]\n        lvs_options['report_summary'] = ['-erc \"%s.sum\" -replace' % cell_name]\n        lvs_options['results_db'] = ['-erc \"%s\" -ascii' % results_db]\n        lvs_options['mask_svdb_dir'] = ['\"%s\"' % os.path.join(run_dir, 'svdb')]\n\n        lvs_options.update(lvs_params)\n        content_list = []\n        for key, val_list in lvs_options.items():\n            for v in val_list:\n                content_list.append('%s %s;\\n' % (key, v))\n\n        return ''.join(content_list)\n\n    def modify_rcx_runset(self, run_dir, lib_name, cell_name, lay_view, rcx_params):\n        # type: (str, str, str, str, Dict[str, Any]) -> str\n        \"\"\"Modify the given QRC options.\n\n        Parameters\n        ----------\n        run_dir : str\n            the run directory.\n        lib_name : str\n            the library name.\n        cell_name : str\n            the cell name.\n        lay_view : str\n            the layout view.\n        rcx_params : Dict[str, Any]\n            override RCX parameters.\n\n        Returns\n        -------\n        content : str\n            the new runset content.\n        \"\"\"\n        data_dir = os.path.join(run_dir, 'svdb')\n        # wait 10 seconds to see if not finding directory is just a network drive problem\n        query_timeout = 10.0\n        tstart = time.time()\n        elapsed = 0.0\n        while not os.path.isdir(data_dir) and elapsed < query_timeout:\n            time.sleep(0.1)\n            elapsed = time.time() - tstart\n        if not os.path.isdir(data_dir):\n            raise ValueError('cannot find directory %s.  Did you run PVS first?' % data_dir)\n\n        # load default rcx options\n        rcx_options = read_yaml(self.rcx_runset)\n\n        # setup inputs/outputs\n        rcx_options['input_db']['design_cell_name'] = '{} {} {}'.format(cell_name, lay_view,\n                                                                        lib_name)\n        rcx_options['input_db']['run_name'] = cell_name\n        rcx_options['input_db']['directory_name'] = data_dir\n        rcx_options['output_db']['cdl_out_map_directory'] = run_dir\n        rcx_options['output_setup']['directory_name'] = data_dir\n        rcx_options['output_setup']['temporary_directory_name'] = cell_name\n\n        # override parameters\n        for key, val in rcx_options.items():\n            if key in rcx_params:\n                val.update(rcx_params[key])\n\n        # convert dictionary to QRC command file format.\n        content_list = []\n        for key, options in rcx_options.items():\n            content_list.append('%s \\\\' % key)\n            for k, v in options.items():\n                v = fix_string(v)\n                if isinstance(v, str):\n                    # add quotes around string\n                    v = '\"{}\"'.format(v)\n                content_list.append('    -%s %s \\\\' % (k, v))\n\n            # remove line continuation backslash from last option\n            content_list[-1] = content_list[-1][:-2]\n\n        return '\\n'.join(content_list)\n"
  },
  {
    "path": "bag/verification/templates/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "bag/verification/templates/layout_export_config.txt",
    "content": "case                               \"preserve\"\ncellListFile                       \"\"\ncellMap                            \"\"\ncellNamePrefix                     \"\"\ncellNameSuffix                     \"\"\nconvertDot                         \"node\"\nconvertPin                         \"geometry\"\n#doNotPreservePcellPins\n#flattenPcells\n#flattenVias\nfontMap                            \"\"\n#ignoreLines\n#ignorePcellEvalFail\nlabelCase                          \"preserve\"\nlabelDepth                         \"1\"\nlabelMap                           \"\"\nlayerMap                           \"\"\nlibrary                            \"{{lib_name}}\"\nlogFile                            \"strmOut.log\"\nmaxVertices                        \"200\"\n#mergePathSegsToPath\n#noConvertHalfWidthPath\nnoInfo                             \"\"\n#noObjectProp\n#noOutputTextDisplays\n#noOutputUnplacedInst\nnoWarn                             \"\"\nobjectMap                          \"\"\noutputDir                          \"{{run_dir}}\"\n#pathToPolygon\npinAttNum                          \"0\"\npropMap                            \"\"\n#propValueOnly\n#rectToBox\nrefLibList                         \"\"\n#replaceBusBitChar\n#reportPrecisionLoss\n#respectGDSIINameLimit\nrunDir                             \"{{run_dir}}\"\n#snapToGrid\nstrmFile                           \"{{output_name}}\"\nstrmVersion                        \"5\"\nsummaryFile                        \"\"\ntechLib                            \"\"\ntopCell                            \"{{cell_name}}\"\nuserSkillFile                      \"\"\nviaMap                             \"\"\nview                               \"{{view_name}}\"\nwarnToErr                          \"\"\n"
  },
  {
    "path": "bag/verification/templates/si_env.txt",
    "content": "simStopList = '(\"auCdl\")\nsimViewList = '(\"auCdl\" \"schematic\")\nglobalGndSig = \"\"\nglobalPowerSig = \"\"\nshrinkFACTOR = 0\ncheckScale = \"meter\"\ndiodeCheck = \"none\"\ncapacitorCheck = \"none\"\nresistorCheck = \"none\"\nresistorModel = \"\"\nshortRES = 2000\nsimNetlistHier = 't\npinMAP = 'nil\ndisplayPININFO = 't\ncheckLDD = 'nil\nconnects = \"\"\nsetEQUIV = \"\"\nsimRunDir = \"{{run_dir}}\"\nhnlNetlistFileName = \"{{output_name}}\"\nsimSimulator = \"auCdl\"\nsimViewName = \"{{view_name}}\"\nsimCellName = \"{{cell_name}}\"\nsimLibName = \"{{lib_name}}\"\nincFILE = \"{{source_added_file}}\"\ncdlSimViewList = '(\"auCdl\" \"schematic\")\ncdlSimStopList = '(\"auCdl\")\nauCdlDefNetlistProc = \"ansCdlHnlPrintInst\"\n"
  },
  {
    "path": "bag/verification/virtuoso.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module handles exporting schematic/layout from Virtuoso.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Optional, Dict, Any\n\nimport os\nfrom abc import ABC\n\nfrom ..io import write_file, open_temp\nfrom .base import SubProcessChecker\n\nif TYPE_CHECKING:\n    from .base import ProcInfo\n\n\nclass VirtuosoChecker(SubProcessChecker, ABC):\n    \"\"\"the base Checker class for Virtuoso.\n\n    This class implement layout/schematic export procedures.\n\n    Parameters\n    ----------\n    tmp_dir : str\n        temporary file directory.\n    max_workers : int\n        maximum number of parallel processes.\n    cancel_timeout : float\n        timeout for cancelling a subprocess.\n    source_added_file : str\n        file to include for schematic export.\n    \"\"\"\n\n    def __init__(self, tmp_dir, max_workers, cancel_timeout, source_added_file):\n        # type: (str, int, float, str) -> None\n        SubProcessChecker.__init__(self, tmp_dir, max_workers, cancel_timeout)\n        self._source_added_file = source_added_file\n\n    def setup_export_layout(self, lib_name, cell_name, out_file, view_name='layout', params=None):\n        # type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo\n        out_file = os.path.abspath(out_file)\n\n        run_dir = os.path.dirname(out_file)\n        out_name = os.path.basename(out_file)\n        log_file = os.path.join(run_dir, 'layout_export.log')\n\n        os.makedirs(run_dir, exist_ok=True)\n\n        # fill in stream out configuration file.\n        content = self.render_file_template('layout_export_config.txt',\n                                            dict(\n                                                lib_name=lib_name,\n                                                cell_name=cell_name,\n                                                view_name=view_name,\n                                                output_name=out_name,\n                                                run_dir=run_dir,\n                                            ))\n\n        with open_temp(prefix='stream_template', dir=run_dir, delete=False) as config_file:\n            config_fname = config_file.name\n            config_file.write(content)\n\n        # run strmOut\n        cmd = ['strmout', '-templateFile', config_fname]\n\n        return cmd, log_file, None, os.environ['BAG_WORK_DIR']\n\n    def setup_export_schematic(self, lib_name, cell_name, out_file, view_name='schematic',\n                               params=None):\n        # type: (str, str, str, str, Optional[Dict[str, Any]]) -> ProcInfo\n        out_file = os.path.abspath(out_file)\n\n        run_dir = os.path.dirname(out_file)\n        out_name = os.path.basename(out_file)\n        log_file = os.path.join(run_dir, 'schematic_export.log')\n\n        # fill in stream out configuration file.\n        content = self.render_file_template('si_env.txt',\n                                            dict(\n                                                lib_name=lib_name,\n                                                cell_name=cell_name,\n                                                view_name=view_name,\n                                                output_name=out_name,\n                                                source_added_file=self._source_added_file,\n                                                run_dir=run_dir,\n                                            ))\n\n        # create configuration file.\n        config_fname = os.path.join(run_dir, 'si.env')\n        write_file(config_fname, content)\n\n        # run command\n        cmd = ['si', run_dir, '-batch', '-command', 'netlist']\n\n        return cmd, log_file, None, os.environ['BAG_WORK_DIR']\n"
  },
  {
    "path": "bag/virtuoso.py",
    "content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module provides functions needed to get Virtuoso to work with BAG.\n\"\"\"\n\nimport os\nimport sys\nimport atexit\nimport signal\nimport argparse\n\nimport bag.interface\nimport bag.io\n\n\ndef run_skill_server(args):\n    \"\"\"Run the BAG/Virtuoso server.\"\"\"\n    error_msg = ''\n    server = None\n    port_file = None\n    port_number = None\n\n    try:\n        # process command line arguments\n        min_port = args.min_port\n        max_port = args.max_port\n        # remove directory from port file name\n        port_file = os.path.basename(args.port_file)\n        log_file = args.log_file\n\n        # create log file directory, and remove old log.\n        if log_file is not None:\n            log_file = os.path.abspath(log_file)\n            log_dir = os.path.dirname(log_file)\n            if not os.path.exists(log_dir):\n                os.makedirs(log_dir)\n            elif os.path.exists(log_file):\n                os.remove(log_file)\n\n        # determine port file name\n        if 'BAG_WORK_DIR' not in os.environ:\n            raise Exception('Environment variable BAG_WORK_DIR not defined')\n        work_dir = os.environ['BAG_WORK_DIR']\n        if not os.path.isdir(work_dir):\n            raise Exception('$BAG_WORK_DIR = %s is not a directory' % work_dir)\n\n        port_file = os.path.join(work_dir,  port_file)\n\n        # determine temp directory\n        tmp_dir = None\n        if 'BAG_TEMP_DIR' in os.environ:\n            tmp_dir = os.environ['BAG_TEMP_DIR']\n            if not os.path.isdir(tmp_dir):\n                if os.path.exists(tmp_dir):\n                    raise Exception('$BAG_TEMP_DIR = %s is not a directory' % tmp_dir)\n                else:\n                    os.makedirs(tmp_dir)\n\n        # attempt to open port and start server\n        router = bag.interface.ZMQRouter(min_port=min_port, max_port=max_port, log_file=log_file)\n        server = bag.interface.SkillServer(router, sys.stdout, sys.stdin, tmpdir=tmp_dir)\n        port_number = router.get_port()\n    except Exception as ex:\n        error_msg = 'bag server process error:\\n%s\\n' % str(ex)\n\n    if not error_msg:\n        bag.io.write_file(port_file, '%r\\n' % port_number)\n\n        # TODO: somehow this is a bug??!! figure it out.\n        # make sure port_file is removed at exit\n        # def exit_handler():\n        #     if os.path.exists(port_file):\n        #         os.remove(port_file)\n\n        # atexit.register(exit_handler)\n        # signal.signal(signal.SIGTERM, exit_handler)\n\n        try:\n            sys.stdout.write('BAG skill server has started.  Yay!\\n')\n            sys.stdout.flush()\n            server.run()\n        except Exception as ex:\n            error_msg = 'bag server process error:\\n%s\\n' % str(ex)\n\n    if error_msg:\n        sys.stderr.write(error_msg)\n        sys.stderr.flush()\n\n\ndef parse_command_line_arguments():\n    \"\"\"Parse command line arguments, then run the corresponding function.\"\"\"\n\n    desc = 'A Python program that performs tasks for virtuoso.'\n    parser = argparse.ArgumentParser(description=desc)\n    desc = 'Valid commands.  Supply -h/--help flag after the command name to learn more about the command.'\n    sub_parsers = parser.add_subparsers(title='Commands', description=desc, help='command name.')\n\n    desc = 'Run BAG skill server.'\n    par2 = sub_parsers.add_parser('run_skill_server', description=desc, help=desc)\n\n    par2.add_argument('min_port', type=int, help='minimum socket port number.')\n    par2.add_argument('max_port', type=int, help='maximum socket port number.')\n    par2.add_argument('port_file', type=str, help='file to write the port number to.')\n    par2.add_argument('log_file', type=str, nargs='?', default=None,\n                      help='log file name.')\n    par2.set_defaults(func=run_skill_server)\n\n    args = parser.parse_args()\n    args.func(args)\n\n\nif __name__ == '__main__':\n    parse_command_line_arguments()\n"
  },
  {
    "path": "docs/.gitignore",
    "content": "build\n"
  },
  {
    "path": "docs/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/Makefile",
    "content": "# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nPAPER         =\nBUILDDIR      = build\n\n# User-friendly check for sphinx-build\nifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)\n\t$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\\'t have Sphinx installed, grab it from http://sphinx-doc.org/)\nendif\n\n# Internal variables.\nPAPEROPT_a4     = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source\n\n.PHONY: help\nhelp:\n\t@echo \"Please use \\`make <target>' where <target> is one of\"\n\t@echo \"  html       to make standalone HTML files\"\n\t@echo \"  dirhtml    to make HTML files named index.html in directories\"\n\t@echo \"  singlehtml to make a single large HTML file\"\n\t@echo \"  pickle     to make pickle files\"\n\t@echo \"  json       to make JSON files\"\n\t@echo \"  htmlhelp   to make HTML files and a HTML help project\"\n\t@echo \"  qthelp     to make HTML files and a qthelp project\"\n\t@echo \"  applehelp  to make an Apple Help Book\"\n\t@echo \"  devhelp    to make HTML files and a Devhelp project\"\n\t@echo \"  epub       to make an epub\"\n\t@echo \"  epub3      to make an epub3\"\n\t@echo \"  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \"  latexpdf   to make LaTeX files and run them through pdflatex\"\n\t@echo \"  latexpdfja to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \"  text       to make text files\"\n\t@echo \"  man        to make manual pages\"\n\t@echo \"  texinfo    to make Texinfo files\"\n\t@echo \"  info       to make Texinfo files and run them through makeinfo\"\n\t@echo \"  gettext    to make PO message catalogs\"\n\t@echo \"  changes    to make an overview of all changed/added/deprecated items\"\n\t@echo \"  xml        to make Docutils-native XML files\"\n\t@echo \"  pseudoxml  to make pseudoxml-XML files for display purposes\"\n\t@echo \"  linkcheck  to check all external links for integrity\"\n\t@echo \"  doctest    to run all doctests embedded in the documentation (if enabled)\"\n\t@echo \"  coverage   to run coverage check of the documentation (if enabled)\"\n\t@echo \"  dummy      to check syntax errors of document sources\"\n\n.PHONY: clean\nclean:\n\trm -rf $(BUILDDIR)/*\n\n.PHONY: html\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\n.PHONY: dirhtml\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\n.PHONY: singlehtml\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\n.PHONY: pickle\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\n.PHONY: json\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\n.PHONY: htmlhelp\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t      \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\n.PHONY: qthelp\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t      \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/BAG.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/BAG.qhc\"\n\n.PHONY: applehelp\napplehelp:\n\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp\n\t@echo\n\t@echo \"Build finished. The help book is in $(BUILDDIR)/applehelp.\"\n\t@echo \"N.B. You won't be able to view it unless you put it in\" \\\n\t      \"~/Library/Documentation/Help or install it in your application\" \\\n\t      \"bundle.\"\n\n.PHONY: devhelp\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/BAG\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/BAG\"\n\t@echo \"# devhelp\"\n\n.PHONY: epub\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\n.PHONY: epub3\nepub3:\n\t$(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3\n\t@echo\n\t@echo \"Build finished. The epub3 file is in $(BUILDDIR)/epub3.\"\n\n.PHONY: latex\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t      \"(use \\`make latexpdf' here to do that automatically).\"\n\n.PHONY: latexpdf\nlatexpdf:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\n.PHONY: latexpdfja\nlatexpdfja:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\n.PHONY: text\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\n.PHONY: man\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\n.PHONY: texinfo\ntexinfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t      \"(use \\`make info' here to do that automatically).\"\n\n.PHONY: info\ninfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\n.PHONY: gettext\ngettext:\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\n.PHONY: changes\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\n.PHONY: linkcheck\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t      \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\n.PHONY: doctest\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/doctest/output.txt.\"\n\n.PHONY: coverage\ncoverage:\n\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage\n\t@echo \"Testing of coverage in the sources finished, look at the \" \\\n\t      \"results in $(BUILDDIR)/coverage/python.txt.\"\n\n.PHONY: xml\nxml:\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\n.PHONY: pseudoxml\npseudoxml:\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n\n.PHONY: dummy\ndummy:\n\t$(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy\n\t@echo\n\t@echo \"Build finished. Dummy builder generates no files.\"\n"
  },
  {
    "path": "docs/README",
    "content": "To build/update documentation:\n\n1. make sure BAG Python's bin folder is in your path.\n\n2. run: \n\n   ./refresh_api.sh\n   \n   to generate API documentations.\n\n3. run:\n\n   make html\n\n   to build the documentation webpage.\n"
  },
  {
    "path": "docs/refresh_api.sh",
    "content": "#!/usr/bin/env tcsh\n\nsphinx-apidoc --force --output-dir=source/api ../bag\n"
  },
  {
    "path": "docs/source/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/api/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/api/bag.data.rst",
    "content": "bag.data package\n================\n\nSubmodules\n----------\n\nbag.data.core module\n--------------------\n\n.. automodule:: bag.data.core\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.data.dc module\n------------------\n\n.. automodule:: bag.data.dc\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.data.digital module\n-----------------------\n\n.. automodule:: bag.data.digital\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.data.lti module\n-------------------\n\n.. automodule:: bag.data.lti\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.data.ltv module\n-------------------\n\n.. automodule:: bag.data.ltv\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.data.mos module\n-------------------\n\n.. automodule:: bag.data.mos\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.data.plot module\n--------------------\n\n.. automodule:: bag.data.plot\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.data\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.design.rst",
    "content": "bag.design package\n==================\n\nSubmodules\n----------\n\nbag.design.database module\n--------------------------\n\n.. automodule:: bag.design.database\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.design.module module\n------------------------\n\n.. automodule:: bag.design.module\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.design\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.interface.rst",
    "content": "bag.interface package\n=====================\n\nSubmodules\n----------\n\nbag.interface.database module\n-----------------------------\n\n.. automodule:: bag.interface.database\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.interface.ocean module\n--------------------------\n\n.. automodule:: bag.interface.ocean\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.interface.server module\n---------------------------\n\n.. automodule:: bag.interface.server\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.interface.simulator module\n------------------------------\n\n.. automodule:: bag.interface.simulator\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.interface.skill module\n--------------------------\n\n.. automodule:: bag.interface.skill\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.interface.zmqwrapper module\n-------------------------------\n\n.. automodule:: bag.interface.zmqwrapper\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.interface\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.io.rst",
    "content": "bag.io package\n==============\n\nSubmodules\n----------\n\nbag.io.common module\n--------------------\n\n.. automodule:: bag.io.common\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.io.file module\n------------------\n\n.. automodule:: bag.io.file\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.io.gui module\n-----------------\n\n.. automodule:: bag.io.gui\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.io.process module\n---------------------\n\n.. automodule:: bag.io.process\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.io.sim_data module\n----------------------\n\n.. automodule:: bag.io.sim_data\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.io\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.layout.routing.rst",
    "content": "bag.layout.routing package\n==========================\n\nSubmodules\n----------\n\nbag.layout.routing.base module\n------------------------------\n\n.. automodule:: bag.layout.routing.base\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.layout.routing.fill module\n------------------------------\n\n.. automodule:: bag.layout.routing.fill\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.layout.routing.grid module\n------------------------------\n\n.. automodule:: bag.layout.routing.grid\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.layout.routing\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.layout.rst",
    "content": "bag.layout package\n==================\n\nSubpackages\n-----------\n\n.. toctree::\n\n    bag.layout.routing\n\nSubmodules\n----------\n\nbag.layout.connection module\n----------------------------\n\n.. automodule:: bag.layout.connection\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.layout.core module\n----------------------\n\n.. automodule:: bag.layout.core\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.layout.digital module\n-------------------------\n\n.. automodule:: bag.layout.digital\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.layout.objects module\n-------------------------\n\n.. automodule:: bag.layout.objects\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.layout.template module\n--------------------------\n\n.. automodule:: bag.layout.template\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.layout.util module\n----------------------\n\n.. automodule:: bag.layout.util\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.layout\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.math.rst",
    "content": "bag.math package\n================\n\nSubmodules\n----------\n\nbag.math.dfun module\n--------------------\n\n.. automodule:: bag.math.dfun\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.math.interpolate module\n---------------------------\n\n.. automodule:: bag.math.interpolate\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.math\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.mdao.rst",
    "content": "bag.mdao package\n================\n\nSubmodules\n----------\n\nbag.mdao.components module\n--------------------------\n\n.. automodule:: bag.mdao.components\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.mdao.core module\n--------------------\n\n.. automodule:: bag.mdao.core\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.mdao\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.rst",
    "content": "bag package\n===========\n\nSubpackages\n-----------\n\n.. toctree::\n\n    bag.data\n    bag.design\n    bag.interface\n    bag.io\n    bag.layout\n    bag.math\n    bag.mdao\n    bag.tech\n    bag.util\n    bag.verification\n\nSubmodules\n----------\n\nbag.core module\n---------------\n\n.. automodule:: bag.core\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.virtuoso module\n-------------------\n\n.. automodule:: bag.virtuoso\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.tech.rst",
    "content": "bag.tech package\n================\n\nSubmodules\n----------\n\nbag.tech.core module\n--------------------\n\n.. automodule:: bag.tech.core\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.tech.mos module\n-------------------\n\n.. automodule:: bag.tech.mos\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.tech\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.util.rst",
    "content": "bag.util package\n================\n\nSubmodules\n----------\n\nbag.util.interval module\n------------------------\n\n.. automodule:: bag.util.interval\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.util.libimport module\n-------------------------\n\n.. automodule:: bag.util.libimport\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.util.parse module\n---------------------\n\n.. automodule:: bag.util.parse\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.util.search module\n----------------------\n\n.. automodule:: bag.util.search\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.util\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/bag.verification.rst",
    "content": "bag.verification package\n========================\n\nSubmodules\n----------\n\nbag.verification.base module\n----------------------------\n\n.. automodule:: bag.verification.base\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.verification.calibre module\n-------------------------------\n\n.. automodule:: bag.verification.calibre\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.verification.pvs module\n---------------------------\n\n.. automodule:: bag.verification.pvs\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\nbag.verification.virtuoso_export module\n---------------------------------------\n\n.. automodule:: bag.verification.virtuoso_export\n    :members:\n    :undoc-members:\n    :show-inheritance:\n\n\nModule contents\n---------------\n\n.. automodule:: bag.verification\n    :members:\n    :undoc-members:\n    :show-inheritance:\n"
  },
  {
    "path": "docs/source/api/modules.rst",
    "content": "bag\n===\n\n.. toctree::\n   :maxdepth: 4\n\n   bag\n"
  },
  {
    "path": "docs/source/conf.py",
    "content": "# -*- coding: utf-8 -*-\n#\n# BAG documentation build configuration file, created by\n# sphinx-quickstart on Fri May 27 15:45:44 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../..'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n    'sphinx.ext.autosummary',\n    'sphinx.ext.autodoc',\n    'sphinx.ext.imgmath',\n    'sphinx.ext.doctest',\n    'sphinx.ext.intersphinx',\n    'sphinx.ext.todo',\n    'sphinx.ext.coverage',\n    'sphinx.ext.viewcode',\n    'sphinx.ext.githubpages',\n    # napoleon has better support for class instance attribute than numpydoc.\n    'sphinx.ext.napoleon',\n    # 'numpydoc',\n]\n\n# make numpydoc work with autosummary\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'BAG'\ncopyright = u'2016, Eric Chang'\nauthor = u'Eric Chang'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'2.0'\n# The full version, including alpha/beta/rc tags.\nrelease = u'2.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\ndefault_role = 'any'\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.  See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further.  For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#html_title = u'BAG v2.0'\n\n# A shorter title for the navigation bar.  Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it.  The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n#   'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n#   'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'BAGdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n#  author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n    (master_doc, 'BAG.tex', u'BAG Documentation',\n     u'Eric Chang', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n    (master_doc, 'bag', u'BAG Documentation',\n     [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n#  dir menu entry, description, category)\ntexinfo_documents = [\n    (master_doc, 'BAG', u'BAG Documentation',\n     author, 'BAG', 'One line description of project.',\n     'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n    'python': ('https://docs.python.org/3.5', None),\n    'python3': ('https://docs.python.org/3.5', None),\n    'python2': ('https://docs.python.org/2.7', None),\n    'numpy': ('http://docs.scipy.org/doc/numpy/', None),\n    }\n"
  },
  {
    "path": "docs/source/developer/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/developer/developer.rst",
    "content": "Developer Guide\n===============\n\nNothing here yet...\n"
  },
  {
    "path": "docs/source/index.rst",
    "content": ".. BAG documentation master file, created by\n   sphinx-quickstart on Fri May 27 15:45:44 2016.\n   You can adapt this file completely to your liking, but it should at least\n   contain the root `toctree` directive.\n\nWelcome to BAG's documentation!\n===============================\n\nContents:\n\n.. toctree::\n   :maxdepth: 2\n\n   tutorial/tutorial\n   overview/overview\n   setup/setup\n   developer/developer\n   api/modules\n\nIndices and tables\n==================\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n"
  },
  {
    "path": "docs/source/overview/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/overview/design.rst",
    "content": "Design Module\n=============\n\nA design module is a Python class that generates new schematics.  It computes all parameters needed to generate a\nschematic from user defined specifications.  For example, a design module for an inverter needs to compute the width,\nlength, and threshold flavor of the NMOS and PMOS to generate a new inverter schematic.  The designer of this module can\nlet the user specify these parameters directly, or alternatively compute them from higher level specifications, such as\nfanout, input capacitance, and leakage specs.\n\nTo create a default design module for a schematic generator, create a :class:`~bag.BagProject` instance and call\n:meth:`~bag.BagProject.import_design_library` to import all schematic generators in a library from your CAD\nprogram into Python.  The designer should then implement the three methods, :meth:`~bag.design.Module.design`,\n:meth:`~bag.design.Module.get_layout_params`, and :meth:`~bag.design.Module.get_layout_pin_mapping` (The latter two are\noptional if you do not use BAG to generate layout).  Once you finish the design module definition, you can create new\ndesign module instances by calling :meth:`~bag.BagProject.create_design_module`.\n\n\nThe following sections describe how each of these methods should be implemented.\n\ndesign()\n--------\n\nThis method computes all parameters needed to generate a schematic from user defined specifications.  The input\narguments should also be specified in this method.\n\nA design module can have multiple design methods, as long as they have difference names.  For example, You can implement\nthe ``design()`` method to compute parameters from high level specifications, and define a new method named\n``design_override()`` that allows the user to assign parameter values directly for debugging purposes.\n\nTo enable hierarchical design, design module has a dictionary, :attr:`~bag.design.Module.instances`, that\nmaps children instance names to corresponding design modules, so you can simply call their\n:meth:`~bag.design.Module.design` methods to set their parameters.  See :doc:`/tutorial/tutorial` for an simple example.\n\nIf you need to modify the schematic structure (such as adding more inverter buffers), you should call the corresponding\nmethods before calling :meth:`~bag.design.Module.design` methods of child instances, as those design module could be\nchanged.  The rest of this section explains how you modify the schematic.\n\nPin Renaming\n^^^^^^^^^^^^\n\nMost of the time, you should not rename the pin of schematic.  The only time you should rename the pin is when you have\na variable bus pin where the number of bits in the bus can change with the design.  In this case, call\n:meth:`~bag.design.Module.rename_pin` to change the number of bits in the bus.  To connect/remove instances from\nthe added/deleted bus pins, see :ref:`instance_connection_modification`\n\nDelete Instances\n^^^^^^^^^^^^^^^^\n\nDelete a child instance by calling :meth:`~bag.design.Module.delete_instance`.  After\nthis call, the corresponding value in :attr:`~bag.design.Module.instances` dictionary will become ``None``.\n\n.. note::\n    You don't have to delete 0-width or 0-finger transistors; BAG already handles that for you.\n\nReplace Instance Master\n^^^^^^^^^^^^^^^^^^^^^^^\n\nIf you have two different designs of a child instance, and you want to swap between the two designs, you can call\n:meth:`~bag.design.Module.replace_instance_master` to change the instance master of a child.\n\n.. note::\n    You can replace instance masters only if the two instance masters have exactly the symbol, including pin names.\n\n.. _instance_connection_modification:\n\nInstance Connection Modification\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nCall :meth:`~bag.design.Module.reconnect_instance_terminal` to change a child instance's connection.\n\nArraying Child Instances\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nCall :meth:`~bag.design.Module.array_instance` to array a child instance.  After this call,\n:attr:`~bag.design.Module.instances` will map the child instance name to a list of design modules, one for each instance\nin the array.  You can then iterate through this list and design each of the instances.  They do not need to have the\nsame parameter values.\n\nRestoring to Default\n^^^^^^^^^^^^^^^^^^^^\n\nIf you are using the design module in a design iteration loop, or you're using BAG interactively through the Python\nconsole, and you want to restore a deleted/replaced/arrayed child instance to the default state, you can call\n:meth:`~bag.design.Module.restore_instance`.\n\n\nget_layout_params()\n-------------------\n\nThis method should return a dictionary from layout parameter names to their values.  This dictionary is used to create\na layout cell that will pass LVS against the generated schematic.\n\nget_layout_pin_mapping()\n------------------------\n\nThis method should return a dictionary from layout pin names to schematic pin names.  This method exists because a\nlayout cell may not have the same pin names as the schematic.  If a layout pin should be left un-exported, its\ncorresponding value in the dictionary must be ``None``.\n\nThis dictionary only need to list the layout pins that needs to be renamed.  If no renaming is necessary, an empty\ndictionary can be returned.\n"
  },
  {
    "path": "docs/source/overview/overview.rst",
    "content": "Overview\n========\n\n.. figure:: ./figures/bag_flow.png\n    :align: center\n    :figclass: align-center\n\n    BAG design flow diagram\n\nBAG is a Python-based circuit design platform that aims to automate analog circuit design, but at the same time give the\nuser full visibility and control over every step in the design flow.\n\nThe analog circuit design flow is generally as follows:\n\n#. Create a schematic generator of the circuit.\n#. Create a testbench generator to measure specifications and verify functionality.\n#. Create a layout generator if post-extraction verification is needed.\n#. Generate a schematic  with given specifications.\n#. Generate a testbench that instantiates the generated schematic.\n#. Simulate the testbenches and post-process data to verify that the circuit meets specifications.\n#. Create the layout of your schematic and verify it's LVS/DRC clean.\n#. Repeat step 3 on post-extraction schematic.\n\nBAG 2.0 is designed so that any or all steps of the design flow can be performed in a Python script or console, thus\nenabling rapid design iteration and architecture exploration.\n\nTo achieve its goal, BAG is divided into 4 components:  schematic generators, layout generators, design modules, and\ntestbench generators. These components are independent from one another, so the designer can pick and choose which steps\nin the design flow to automate.  For example, the designer can simply use BAG to generate new schematics, and use his\nown CAD program for simulation and verification.  Alternatively, The designer can provide an existing schematic to BAG\nand simply use it to automate the verification process.\n\nBAG interacts with an external CAD program or simulator to complete all the design and simulation tasks.  BAG comes with\nVirtuoso and Ocean simulator support, but can be extended to other CAD programs or simulators.  The rest of this\ndocument assumes you are using Virtuoso and running simulations in Ocean.\n\nNext we will describe each components of BAG in detail.\n\n.. toctree::\n    :maxdepth: 2\n\n    schematic\n    design\n    testbench"
  },
  {
    "path": "docs/source/overview/schematic.rst",
    "content": "Schematic Generator\n===================\n\nA schematic generator is a schematic in your CAD program that tells BAG all the information needed to create a design.\nBAG creates design modules from schematic generators, and BAG will copy and modify schematic generators to implement\nnew designs.\n\n.. figure:: ./figures/gm_schematic.png\n    :align: center\n    :figclass: align-center\n\n    An example schematic generator of a differential gm cell.\n\nA schematic generator needs to follow some rules to work with BAG:\n\n#. Instances in a schematic generator must be other schematic generators, or a cell in the ``BAG_prim`` library.\n#. BAG can array any instance in a schematic generator.  That is, in the design implementation phase, BAG can\n   copy/paste this instance any number of times, and modify the connections or parameters of any copy.  This is useful\n   in creating array structures, such as an inverter chain with variable number of stages, or a DAC with variable\n   number of bits.\n\n   However, if you need to array an instance, its ports must be connected to wire stubs, with net labels on each of the\n   wire stubs.  Also, there must be absolutely nothing to the right of the instance, since BAG will array the instance\n   by copying-and-pasting to the right.  An example of an inverter buffer chain schematic generator is shown below.\n\n    .. figure:: ./figures/inv_chain_schematic.png\n        :align: center\n        :figclass: align-center\n\n        An example schematic generator of an inverter buffer chain.  Ports connected by wire stubs, nothing on the right.\n\n#. BAG can replace the instance master of any instance.  The primary use of this is to allow the designer to change\n   transistor threshold values, but this could be used for other schematic generators if implemented.  Whenever you\n   switch the instance master of an instance, the symbol of the new instance must exactly match the old instance,\n   including the port names.\n#. Although not required, it is good practice to fill in default parameter values for all instances from the\n   ``BAG_prim`` library.  This makes it so that you can simulate a schematic generator in a normal testbench, and make\n   debugging easier.\n\n"
  },
  {
    "path": "docs/source/overview/testbench.rst",
    "content": "Testbench Generator\n===================\n\nA testbench generator is just a normal testbench with schematic and adexl view.  BAG will simply copy the schematic and\nadexl view, and replace the device under test with the new generated schematic.  There are only 3 restrictions to the\ntestbench:\n\n#. All device-under-test's (DUTs) in the testbench must have an instance name starting with ``XDUT``.  This is to inform BAG\n   which child instances should be replaced.\n#. The testbench must be configured to simulate with ADE-XL.  This is to make parametric/corner sweeps and monte carlo\n   easier.\n#. You should not define any process corners in the ADE-XL state, as BAG will load them for you.  This makes it\n   possible to use the same testbench generator across different technologies.\n\nTo verify a new design, call :meth:`~bag.BagProject.create_testbench` and specify the testbench generator library/cell,\nDUT library/cell, and the library to create the new testbench in.  BAG will create a :class:`~bag.core.Testbench` object\nto represent this testbench.  You can then call its methods to set the parameters, process corners, or enable parametric\nsweeps.  When you're done, call :meth:`~bag.core.Testbench.update_testbench` to commit the changes to Virtuoso.  If you\ndo not wish to run simulation in BAG, you can then open this testbench in Virtuoso and simulate it there.\n\nIf you want to start simulation from BAG and load simulation data, you need to call\n:meth:`~bag.core.Testbench.add_output` method to specify which outputs to record and send back to Python.  Output\nexpression is a Virtuoso calculator expression.  Then, call :meth:`~bag.core.Testbench.run_simulation` to start a\nsimulation run.  During the simulation, you can press ``Ctrl-C`` anytime to abort simulation.  When the simulation\nfinish, the result directory will be saved to the attribute :attr:`~bag.core.Testbench.save_dir`, and you can call\n:func:`bag.data.load_sim_results` to load the result in Python. See :doc:`/tutorial/tutorial` for an example.\n\nSince BAG uses the ADE-XL interface to run simulation, all simulation runs will be recorded in ADE-XL's history tab, so\nyou can plot them in Virtuoso later for debugging purposes.  By default, all simulation runs from BAG has the ``BagSim``\nhistory tag, but you can also specify your own tag name when you call :meth:`~bag.core.Testbench.run_simulation`.  Read\nADE-XL documentation if you want to know more about ADE-XL's history feature.\n"
  },
  {
    "path": "docs/source/setup/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/setup/bag_config/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/setup/bag_config/bag_config.rst",
    "content": "BAG Configuration File\n======================\n\nBAG configuration file is written in YAML format.  This document describes each setting.\nBAG configuration file may use environment variable to specify values of any entries.\n\n.. toctree::\n    :maxdepth: 4\n\n    socket/socket\n    database/database\n    simulation/simulation\n    misc\n"
  },
  {
    "path": "docs/source/setup/bag_config/database/database.rst",
    "content": "database\n========\n\nThis entry defines all settings related to Virtuoso.\n\n\ndata.class\n----------\n\nThe Python class that handles database interaction.  This entry is mainly to support non-Virtuoso CAD programs.  If you\nuse Virtuoso, the value must be ``bag.interface.skill.SkillInterface``.\n\ndatabase.schematic\n------------------\n\nThis entry contains all settings needed to read/generate schematics.\n\n.. _sch_tech_lib:\n\ndatabase.schematic.tech_lib\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nTechnology library.  When BAG create new libraries, they will be attached to this technology library.  Usually this is\nthe PDK library provided by the foundry.\n\n.. _sch_sympin:\n\ndatabase.schematic.sympin\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nInstance master of symbol pins.  This is a list of library/cell/view names.  Most of the time this should be\n``[\"basic\", \"sympin\", \"symbolNN\"]``.\n\n.. _sch_ipin:\n\ndatabase.schematic.ipin\n^^^^^^^^^^^^^^^^^^^^^^^\n\nInstance master of input pins in schematic.  This is a list of library/cell/view names.  Most of the time this should be\n``[\"basic\", \"ipin\", \"symbol\"]``.\n\n.. _sch_opin:\n\ndatabase.schematic.opin\n^^^^^^^^^^^^^^^^^^^^^^^\n\nInstance master of output pins in schematic.  This is a list of library/cell/view names. Most of the time this should be\n``[\"basic\", \"opin\", \"symbol\"]``.\n\n.. _sch_iopin:\n\ndatabase.schematic.iopin\n^^^^^^^^^^^^^^^^^^^^^^^^\n\nInstance master of inout pins in schematic.  This is a list of library/cell/view names. Most of the time this should be\n``[\"basic\", \"iopin\", \"symbolr\"]``.\n\n.. _sch_simulators:\n\ndatabase.schematic.simulators\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA list of simulators where the ``termOrder`` CDF field should be defined.\n\nWhen Virtuoso convert schematics to netlists, it uses the ``termOrder`` CDF field to decide how to order the pin names\nin the netlist.  This entry makes BAG update the ``termOrder`` field correctly whenever pins are changed.\n\nMost of the time, this should be ``[\"auLvs\", \"auCdl\", \"spectre\", \"hspiceD\"]``.\n\n.. _sch_exclude:\n\ndatabase.schematic.exclude_libraries\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA list of libraries to exclude when importing schematic generators to BAG.  Most of the time, this should be\n``[\"analogLib\", \"basic\", {PDK}]``, where ``{PDK}`` is the PDK library.\n\ndatabase.testbench\n------------------\n\nThis entry contains all settings needed to create new testbenches.\n\n.. _tb_config_libs:\n\ndatabase.testbench.config_libs\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA string of config view global libries, separated by spaces.  Used to generate config view.\n\ndatabase.testbench.config_views\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA string of config view global cellviews, separated by spaces.  Used to generate config view.  Most of the time this\nshould be ``\"spectre calibre schematic veriloga\"``.\n\ndatabase.testbench.config_stops\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA string of config view global stop cellviews, separated by spaces.  Used to generate config view.  Most of the time this\nshould be ``\"spectre veriloga\"``.\n\n.. _sim_env_file:\n\ndatabase.testbench.env_file\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe simulation environment file name.  A simulation environment is a combination of process corner and temperature.\nFor example, if you simulate your circuit at TT corner with a temperature of 50 degrees Celsius, you may say the\nsimulation environment is TT_50.  A simulation environment file contains all simulation environments you want to define\nwhen BAG creates a new testbench.  This file can be generated by exporting corner setup from an ADE-XL view.\n\ndatabase.testbench.def_files\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nA list of ADE/spectre definition files to include.  Sometimes, a process technology uses definition files\nin addition to model files.  If so, you can specify definition files to include here as a list of strings.\nUse an empty list (``[]``) if no definition file is needed.\n\ndatabase.testbench.default_env\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe default simulation environment name.  See :ref:`sim_env_file`.\n\ndatabase.checker\n----------------\n\nThis entry contains all settings needed to run LVS/RCX from BAG.\n\ndatabase.checker.checker_cls\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe Python class that handles LVS/RCX.  If you use Calibre with Virtuoso for LVS/RCX, the value must be\n``bag.verification.calibre.Calibre``.\n\n.. _lvs_rundir:\n\ndatabase.checker.lvs_run_dir\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nLVS run directory.\n\n.. _rcx_rundir:\n\ndatabase.checker.rcx_run_dir\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nRCX run directory\n\n.. _lvs_runset:\n\ndatabase.checker.lvs_runset\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nLVS runset.\n\n.. _rcx_runset:\n\ndatabase.checker.rcx_runset\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nRCX runset.\n\ndatabase.checker.source_added_file\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nLocation of the source.added file for Calibre LVS.  If this entry is not defined, BAG\ndefaults to ``$DK/Calibre/lvs/source.added``.\n\ndatabase.checker.rcx_mode\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWhether to use Calibre PEX or Calibre XACT3D flow to perform parasitic extraction.  The\nvalue should be either ``pex`` or ``xact``.  If this entry is not defined, BAG defaults to\n``pex``.\n\ndatabase.checker.xact_rules\n^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nLocation of the Calibre XACT3D rules file.  This entry must be defined if using Calibre XACT3D flow.\n\n\ndatabase.calibreview\n--------------------\n\nThis entry contains all settings needed to generate calibre view after RCX.\n\n.. _calibre_cellmap:\n\ndatabase.calibreview.cell_map\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe calibre view cellmap file.\n\ndatabase.calibreview.view_name\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nview name for calibre view.  Usually ``calibre``.\n"
  },
  {
    "path": "docs/source/setup/bag_config/misc.rst",
    "content": "class\n=====\n\nThe subclass of :ref:\n\n\n.. _bag_lib_defs:\n\nlib_defs\n========\n\nLocation of the BAG design module libraries definition file.\n\nThe BAG libraries definition file is similar to the ``cds.lib`` file for Virtuoso, where it defines every design module\nlibrary and its location.  This file makes it easy to share design module libraries made by different designers.\n\nEach line in the file contains two entries, separated by spaces.  The first entry is the name of the design module\nlibrary, and the second entry is the location of the design module library.  Environment variables may be used in this\nfile.\n\n.. _bag_new_lib_path:\n\nnew_lib_path\n============\n\nDirectory to put new generated design module libraries.\n\nWhen you import a new schematic generator library, BAG will create a corresponding Python design module library and\ndefine this library in the library definition file (see :ref:`bag_lib_defs`).  This field tells BAG where new design\nmodule libraries should be created.\n"
  },
  {
    "path": "docs/source/setup/bag_config/simulation/simulation.rst",
    "content": "simulation\n==========\n\nThis entry defines all settings related to Ocean.\n\nsimulation.class\n----------------\n\nThe Python class that handles simulator interaction.  This entry is mainly to support non-Ocean simulators.  If you\nuse Ocean, the value must be ``bag.interface.ocean.OceanInterface``.\n\nsimulation.prompt\n-----------------\n\nThe ocean prompt string.\n\n.. _sim_init_file:\n\nsimulation.init_file\n--------------------\n\nThis file will be loaded when Ocean first started up.  This allows you to configure the Ocean simulator.  If you do not want to load an initialization file, set this field to an empty string (``\"\"``).\n\nsimulation.view\n---------------\n\nTestbench view name.  Usually ``adexl``.\n\nsimulation.state\n----------------\n\nADE-XL setup state name.  When you run simulations from BAG, the simulation configuration will be saved to this setup\nstate.\n\nsimulation.update_timeout_ms\n----------------------------\n\nIf simulation takes a lone time, BAG will print out a message at this time interval (in milliseconds) so you can know\nif BAG is still running.\n\nsimulation.kwargs\n-----------------\n\npexpect keyword arguments dictionary used to start the simulation.  When BAG server receive a simulation request, it\nwill run Ocean in a subprocess using Python pexpect module.  This entry allows you to control how pexpect starts the\nOcean subprocess.  Refer to pexpect documentation for more information.\n\njob_options\n-----------\n\nA dictionary of job options for ADE-XL.  This entry controls whether ADE-XL runs simulations remotely or locally, and how many jobs it launches for a simulation run.  Refer to ADE-XL documentation for available options.\n"
  },
  {
    "path": "docs/source/setup/bag_config/socket/socket.rst",
    "content": "socket\n======\n\nThis entry defines socket settings for BAG to communicate with Virtuoso.\n\nsocket.host\n-----------\n\nThe host of the BAG server socket, i.e. the machine running the Virtuoso program.  usually ``localhost``.\n\nsocket.port_file\n----------------\n\nFile containing socket port number for BAG server.  When Virtuoso starts the BAG server process, it finds a open port and bind the\nserver to this port.  It then creates a file with name in ``$BAG_WORK_DIR`` directory, and write the port number to this\nfile.\n\nsocket.sim_port_file\n--------------------\n\nFile containing socket port number for simulation server.  When the simulation server starts, it finds a open port and bind the\nserver to this port.  It then creates a file with name in ``$BAG_WORK_DIR`` directory, and write the port number to this\nfile.\n\n\nsocket.log_file\n---------------\n\nSocket communication debugging log file.  All messages sent or received by BAG will be recorded in this log.\n\nsocket.pipeline\n---------------\n\nnumber of messages allowed in the ZMQ pipeline.  Usually you don't have to change this.\n"
  },
  {
    "path": "docs/source/setup/config_summary.rst",
    "content": "Configuration Files Summary\n===========================\n\nAlthough BAG has many configuration settings, most of them do not need to be changed.  This file summarizes which\nsettings you should modify under various use cases.\n\nStarting New Project\n--------------------\n\nFor every new project, it is a good practice to keep a set of global configuration files to make sure everyone working\non the project is simulating the same corners, running LVS and extraction with the same settings, and so on.  In this\ncase, you should change the following fields to point to the global configuration files:\n\n* :ref:`sim_env_file`\n* :ref:`lvs_runset`\n* :ref:`rcx_runset`\n* :ref:`calibre_cellmap`\n\nCustomizing Virtuoso Setups\n---------------------------\n\nIf you changed your Virtuoso setup (configuration files, working directory, etc.), double check the following fields to\nsee if they need to be modified:\n\n* :ref:`lvs_rundir`\n* :ref:`rcx_rundir`\n* :ref:`sim_init_file`\n\nPython Design Module Customization\n----------------------------------\n\nThe following fields control how BAG 2.0 finds design modules, and also where it puts new imported modules:\n\n* :ref:`bag_lib_defs`\n* :ref:`bag_new_lib_path`\n\n.. _change_pdk:\n\nChanging Process Technology\n---------------------------\n\nIf you want to change the process technology, double check the following fields:\n\n* :ref:`sch_tech_lib`\n* :ref:`sch_exclude`\n* :ref:`tb_config_libs`\n* :ref:`tech_config_path`\n\nThe following fields probably won't change, but if something doesn't work it's worth to double check:\n\n* :ref:`sch_sympin`\n* :ref:`sch_ipin`\n* :ref:`sch_opin`\n* :ref:`sch_iopin`\n* :ref:`sch_simulators`\n\n"
  },
  {
    "path": "docs/source/setup/install_python.rst",
    "content": "Installing Python for BAG\n==========================\n\nThis section describes how to install Python for running BAG.\n\nInstallation Requirements\n-------------------------\n\nBAG is compatible with Python 3.5+ (Python 2.7+ is theoretically supported but untested), so you will need to have\nPython 3.5+ installed.  For Linux/Unix systems, it is recommended to install a separate Python distribution from\nthe system Python.\n\nBAG requires multiple Python packages, some of which requires compiling C++/C/Fortran extensions.  Therefore, it is\nstrongly recommended to download `Anaconda Python <https://www.continuum.io/downloads>`_, which provides a Python\ndistribution with most of the packages preinstalled.  Otherwise, please refer to documentation for each required\npackage for how to install/build from source.\n\nRequired Packages\n-----------------\nIn addition to the default packages that come with Anaconda (numpy, scipy, etc.), you'll need the following additional\npackages:\n\n- `subprocess32 <https://pypi.python.org/pypi/subprocess32>`_ (Python 2 only)\n\n  This package is a backport of Python 3.2's subprocess module to Python 2.  It is installable from ``pip``.\n\n- `sqlitedict <https://pypi.python.org/pypi/sqlitedict>`_\n\n  This is a dependency of OpenMDAO.  It is installable from ``pip``.\n\n- `OpenMDAO <https://pypi.python.org/pypi/openmdao>`_\n\n  This is a flexible optimization framework in Python developed by NASA.  It is installable from ``pip``.\n\n- `mpich2 <https://anaconda.org/anaconda/mpich2>`_ (optional)\n\n  This is the Message Passing Interface (MPI) library.  OpenMDAO and Pyoptsparse can optionally use this library\n  for parallel computing.  You can install this package with:\n\n  .. code-block:: bash\n\n      > conda install mpich2\n\n- `mpi4py <https://anaconda.org/anaconda/mpi4py>`_ (optional)\n\n  This is the Python wrapper of ``mpich2``.  You can install this package with:\n\n  .. code-block:: bash\n\n      > conda install mpi4py\n\n- `ipopt <https://anaconda.org/pkerichang/ipopt>`__ (optional)\n\n  `Ipopt <https://projects.coin-or.org/Ipopt>`__ is a free software package for large-scale nonlinear optimization.\n  This can be used to replace the default optimization solver that comes with scipy.  You can install this package with:\n\n  .. code-block:: bash\n\n      > conda install --channel pkerichang ipopt\n\n- `pyoptsparse <https://anaconda.org/pkerichang/pyoptsparse>`_ (optional)\n\n  ``pyoptsparse`` is a python package that contains a collection of optmization solvers, including a Python wrapper\n  around ``Ipopt``.  You can install this package with:\n\n  .. code-block:: bash\n\n      > conda install --channel pkerichang pyoptsparse\n"
  },
  {
    "path": "docs/source/setup/new_pdk.rst",
    "content": "Setting up New PDK\n==================\n\nThis section describes how to get BAG 2.0 to work with a new PDK.\n\n#. Create a new technology configuration file for this PDK.  See :doc:`tech_config/tech_config` for a description of\n   the technology configuration file format.\n\n#. Create a new BAG configuration file for this PDK.  You can simply copy an existing configuration, then change the\n   fields listed in :ref:`change_pdk`.\n\n#. Create a new ``BAG_prim`` library for this PDK.  The easiest way to do this is to copy an existing ``BAG_prim``\n   library, then change the underlying instances to be instances from the new PDK.  You should use the **pPar** command\n   in Virtuoso to pass CDF parameters from ``BAG_prim`` instances to PDK instances.\n\n#. Change your cds.lib to refer to the new ``BAG_prim`` library.\n\n#. To avoid everyone having their own python design modules for BAG primitive, you should generated a global design module\n   library for BAG primitives, then ask every user to include this global library in their ``bag_libs.def`` file.  To\n   do so, setup a BAG workspace and execute the following commands:\n\n    .. code-block:: python\n\n        import bag\n        prj = bag.BagProject()\n        prj.import_design_library('BAG_prim')\n\n   now copy the generate design library to a global location.\n"
  },
  {
    "path": "docs/source/setup/pyoptsparse.rst",
    "content": "Building Pyoptsparse\n====================\n\nTo be written.\n"
  },
  {
    "path": "docs/source/setup/setup.rst",
    "content": "BAG Setup Procedure\n===================\n\nThis document describes how to install Python for BAG and the various configuration settings.  Since a lot of the\nconfiguration depends on the external CAD program and simulator, this document assumes you are using Virtuoso and\nOcean (with ADEXL) for schematic design and simulation, respectively.\n\n.. toctree::\n    :maxdepth: 2\n\n    install_python\n    pyoptsparse\n    config_summary\n    bag_config/bag_config\n    tech_config/tech_config\n    new_pdk\n"
  },
  {
    "path": "docs/source/setup/tech_config/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/setup/tech_config/layout/layout.rst",
    "content": "layout\n======\n\nThis entry defines all layout specific settings.\n\n\nlayout.em_temp\n--------------\n\nThe temperature used to calculate electro-migration specs.  The temperature should\nbe specified in degrees Celsius.\n"
  },
  {
    "path": "docs/source/setup/tech_config/misc.rst",
    "content": ".. _tech_config_path:\n\nclass\n=====\n\nThe subclass of :class:`bag.layout.core.TechInfo` for this process technology.\nIf this entry is not defined, a default dummy :class:`~bag.layout.core.TechInfo`\ninstance will be created for schematic-only design flow.\n"
  },
  {
    "path": "docs/source/setup/tech_config/mos/mos.rst",
    "content": "mos\n===\n\nThis entry defines all MOS transistor settings.\n\n\nmos.width_resolution\n--------------------\n\nThe transistor width minimum resolution, in meters or number of fins in finfet technology.\n\nmos.length_resolution\n---------------------\n\nThe transistor length minimum resolution, in meters.\n\nmos.mos_char_root\n-----------------\n\nThe default transistor characterization data directory.\n"
  },
  {
    "path": "docs/source/setup/tech_config/tech_config.rst",
    "content": "Technology Configuration File\n=============================\n\nTechnology configuration file is written in YAML format.  This document describes each setting.\nTechnology configuration file may use environment variable to specify values of any entries.\n\n.. toctree::\n    :maxdepth: 4\n\n    misc\n    mos/mos\n    layout/layout\n"
  },
  {
    "path": "docs/source/tutorial/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/tutorial/figures/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "docs/source/tutorial/tutorial.rst",
    "content": "Tutorial\n========\n\nThis section contains several simple tutorials for you to get an idea of the BAG workflow.\n\nIn these tutorials, we will be using :program:`git` extensively.  git allows you to copy a working setup,\nand it also allows you to checkout and use other people's design while they can work on adding future\nimprovements.  To learn git, you can read the documentations here_, or alternatively you can just\ngoogle git commands to learn more about it while working through the tutorial.\n\n.. _here: https://git-scm.com/doc\n\n.. toctree::\n    :maxdepth: 2\n\n    schematic\n    collaboration"
  },
  {
    "path": "run_scripts/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "run_scripts/clean_cds_lib.py",
    "content": "\"\"\"\nThis script removes the '#Removed by ddDeleteObj' items in cds.lib.\nIt is also capable of taking a list of patterns as input and delete the generated libraries that\nmatch that pattern in glob style.\n\"\"\"\n\nfrom argparse import Namespace\nimport argparse\nimport re\nimport os\nimport shutil\n\nfrom pathlib import Path\nfrom bag.io.file import read_yaml_env, readlines_iter, write_file\n\n\ndef arg_parse() -> Namespace:\n\n    parser = argparse.ArgumentParser()\n    parser.add_argument('-rm', '--rm_patterns', nargs='+', dest='patterns', default=[],\n                        help='A list of patterns to be removed from cadence library, the pattern '\n                             'should be the name of the library in glob style')\n\n    args = parser.parse_args()\n    return args\n\n\ndef run_main(args: Namespace):\n    bag_workspace_dir = Path(os.environ['BAG_WORK_DIR'])\n    if 'BAG_CONFIG_PATH' not in os.environ:\n        raise Exception('BAG_CONFIG_PATH not defined.')\n    bag_config_path = os.environ['BAG_CONFIG_PATH']\n    bag_config = read_yaml_env(bag_config_path)\n\n    gen_libs_path = Path(bag_config['database']['default_lib_path'])\n    if not gen_libs_path.exists():\n        print(f'path {str(gen_libs_path)} does not exist')\n\n    cds_lib_path = bag_workspace_dir / 'cds.lib'\n    if not cds_lib_path.exists():\n        print(f'path {str(cds_lib_path)} does not exist')\n        return\n\n    cds_patterns = ['#Removed by ddDeleteObj'] + [f'DEFINE {p}' for p in args.patterns]\n\n    # clean cds.lib\n    cds_lib_lines_iter = readlines_iter(str(cds_lib_path))\n\n    new_cds_lib_content = []\n    for line in cds_lib_lines_iter:\n        found = False\n        for p in cds_patterns:\n            if re.match(p, line):\n                found = True\n                break\n        if not found:\n            new_cds_lib_content.append(line)\n\n    write_file(str(cds_lib_path), ''.join(new_cds_lib_content))\n\n    # clean gen_libs library names that match the pattern in args\n    for p in args.patterns:\n        for dir in gen_libs_path.glob(f'{p}*'):\n            if dir.is_dir():\n                shutil.rmtree(dir)\n            else:\n                print(f'path {str(dir)} is not a directory')\n\n\nif __name__ == '__main__':\n    args = arg_parse()\n    run_main(args)\n"
  },
  {
    "path": "run_scripts/compile_verilog.il",
    "content": "\nprocedure( compile_netlist_views(fname \"t\")\n    let( (p line info_list lib cell view obj cv)\n        unless( p = infile(fname)\n            error(\"Cannot open file %s\" fname)\n        )\n        while( gets(line p)\n            info_list = parseString(line)\n            lib = car(info_list)\n            cell = cadr(info_list)\n            view = caddr(info_list)\n            obj = ddGetObj(lib cell view \"verilog.sv\" nil \"a\")\n            cv = dbOpenCellViewByType(lib cell view \"netlist\" \"ac\")\n            dbSetConnCurrent(cv)\n            dbSave(cv)\n            dbClose(cv)\n        )\n        close(p)\n    )\n)\n\ncompile_netlist_views(\"verilog_cell_list.txt\")\n"
  },
  {
    "path": "run_scripts/gen_cell.py",
    "content": "import argparse\nfrom argparse import Namespace\nfrom pathlib import Path\n\nfrom bag.io.file import Pickle, Yaml\nfrom bag.core import BagProject\n\nio_cls_dict = {\n    'pickle': Pickle,\n    'yaml': Yaml,\n}\n\n\ndef parse_args() -> Namespace:\n    parser = argparse.ArgumentParser()\n    parser.add_argument('specs_fname', help='specs yaml file')\n    parser.add_argument('--no-sch', dest='gen_sch', action='store_false',\n                        default=True, help='skip schematic generation')\n    parser.add_argument('--no-lay', dest='gen_lay', action='store_false',\n                        default=True, help='skip layout generation')\n    parser.add_argument('-v', '--lvs', action='store_true', default=False,\n                        help='run lvs')\n    parser.add_argument('-x', '--rcx', action='store_true', default=False,\n                        help='run rcx')\n    parser.add_argument('--use-cache', dest='use_cache', action='store_true',\n                        default=False,\n                        help='uses the cache in cache_dir')\n    parser.add_argument('--save-cache', dest='save_cache', action='store_true',\n                        default=False,\n                        help='updates design database stored in cache_dir')\n    parser.add_argument('--pre', dest='prefix', default='',\n                        help='prefix used to generate all the cells')\n    parser.add_argument('--suf', dest='suffix', default='',\n                        help='suffix used to generate all the cells')\n    parser.add_argument('--format', default='yaml',\n                        help='format of spec file (yaml, json, pickle)')\n    parser.add_argument('-dump', '--dump', default='',\n                        help='If given will dump output of script into that '\n                             'file according to the format specified')\n    args = parser.parse_args()\n    return args\n\n\ndef run_main(prj: BagProject, args: Namespace):\n    specs_fname = Path(args.specs_fname)\n    io_cls = io_cls_dict[args.format]\n    specs = io_cls.load(str(specs_fname))\n\n    results = prj.generate_cell(specs=specs,\n                                gen_lay=args.gen_lay,\n                                gen_sch=args.gen_sch,\n                                run_lvs=args.lvs,\n                                run_rcx=args.rcx,\n                                use_cybagoa=True,\n                                use_cache=args.use_cache,\n                                save_cache=args.save_cache,\n                                prefix=args.prefix,\n                                suffix=args.suffix)\n\n    if results is not None and args.dump:\n        out_tmp_file = Path(args.dump)\n        io_cls.save(results, out_tmp_file)\n\n\nif __name__ == '__main__':\n\n    args = parse_args()\n    local_dict = locals()\n    bprj = local_dict.get('bprj', BagProject())\n    run_main(bprj, args)\n\n"
  },
  {
    "path": "run_scripts/generate_verilog.py",
    "content": "\nimport os\n\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader\n\n\ndef run_main():\n    verilog_dir = 'verilog_models'\n    cell_map_fname = 'verilog_cell_map.yaml'\n    skill_read_fname = 'verilog_cell_list.txt'\n    lib_name = 'AAAMODEL_QDR_HYBRID3'\n    lib_loc = 'gen_libs'\n    view_name = 'systemVerilog'\n    model_fname = 'verilog.sv'\n\n    with open(cell_map_fname, 'r') as f:\n        cell_map = yaml.load(f)\n\n    jinja_env = Environment(loader=FileSystemLoader(verilog_dir))\n\n    with open(skill_read_fname, 'w') as g:\n        for cell_name, fname in cell_map.items():\n            root_dir = os.path.join(lib_loc, lib_name, cell_name, view_name)\n            os.makedirs(root_dir, exist_ok=True)\n\n            content = jinja_env.get_template(fname).render(cell_name=cell_name)\n\n            with open(os.path.join(root_dir, model_fname), 'w') as f:\n                f.write(content)\n\n            g.write('%s %s %s\\n' % (lib_name, cell_name, view_name))\n\n\nif __name__ == '__main__':\n    run_main()\n"
  },
  {
    "path": "run_scripts/meas_cell.py",
    "content": "import argparse\nfrom argparse import Namespace\nfrom pathlib import Path\nimport pdb\n\nfrom bag.io.file import Pickle, Yaml\nfrom bag.core import BagProject\n\nio_cls_dict = {\n    'pickle': Pickle,\n    'yaml': Yaml,\n}\n\n\ndef parse_args() -> Namespace:\n    parser = argparse.ArgumentParser()\n    parser.add_argument('specs_fname', help='specs yaml file')\n    parser.add_argument('--no-cell', dest='gen_cell', action='store_false',\n                        default=True, help='skip cell generation')\n    parser.add_argument('--no-wrapper', dest='gen_wrapper', action='store_false',\n                        default=True,  help='skip wrapper generation')\n    parser.add_argument('--no-tb', dest='gen_tb', action='store_false',\n                        default=True,  help='skip tb generation')\n    parser.add_argument('--load', dest='load_results', action='store_true',\n                        default=False,  help='skip simulation, just load the results')\n    parser.add_argument('-x', '--extract', dest='extract', action='store_true',\n                        default=False, help='do extracted simulation')\n    parser.add_argument('--no-sim', dest='run_sim', action='store_false',\n                        default=True, help='run simulation, --load has a priority over this')\n    parser.add_argument('--format', default='yaml',\n                        help='format of spec file (yaml, json, pickle)')\n    parser.add_argument('-dump', '--dump', default='', help='output will be dumped to this path, '\n                                                            'according to the format specified')\n    parser.add_argument('--pause', default=False, action='store_true',\n                        help='True to pause using pdb.set_trace() after simulation is done')\n    args = parser.parse_args()\n    return args\n\n\ndef run_main(prj: BagProject, args: Namespace):\n    specs_fname = Path(args.specs_fname)\n    io_cls = io_cls_dict[args.format]\n    specs = io_cls.load(str(specs_fname))\n\n    results = prj.measure_cell(specs=specs,\n                               gen_cell=args.gen_cell,\n                               gen_wrapper=args.gen_wrapper,\n                               gen_tb=args.gen_tb,\n                               load_results=args.load_results,\n                               extract=args.extract,\n                               run_sims=args.run_sim)\n    if args.pause:\n        pdb.set_trace()\n\n    if results is not None and args.dump:\n        out_tmp_file = Path(args.dump)\n        io_cls.save(results, out_tmp_file)\n\n\nif __name__ == '__main__':\n\n    args = parse_args()\n    local_dict = locals()\n    bprj = local_dict.get('bprj', BagProject())\n    run_main(bprj, args)"
  },
  {
    "path": "run_scripts/run_bag.sh",
    "content": "#!/usr/bin/env bash\n\nsource .bashrc_pypath\n\nexec ${BAG_PYTHON} $@\n"
  },
  {
    "path": "run_scripts/setup_submodules.py",
    "content": "#!/usr/bin/env bash\n\n# crazy black magic from:\n# https://unix.stackexchange.com/questions/20880/how-can-i-use-environment-variables-in-my-shebang\n# this block of code is valid in both bash and python.\n# this means if this script is run under bash, it'll\n# call this script again using BAG_PYTHON.  If\n# this script is run under Python, this block of code\n# effectively does nothing.\nif \"true\" : '''\\'\nthen\nif [[ $BAG_PYTHON ]]; then\nexec ${BAG_PYTHON} \"$0\" \"$@\"\nelse\necho \"BAG_PYTHON environment variable is not set\"\nfi\nexit 127\nfi\n'''\nimport os\nimport subprocess\n\nimport yaml\n\n\ndef write_to_file(fname, lines):\n    with open(fname, 'w') as f:\n        f.writelines((l + '\\n' for l in lines))\n    add_git_file(fname)\n\n\ndef setup_python_path(module_list):\n    lines = ['# -*- coding: utf-8 -*-',\n             'import os',\n             'import sys',\n             '',\n             \"sys.path.append(os.environ['BAG_FRAMEWORK'])\",\n             \"sys.path.append(os.environ['BAG_TECH_CONFIG_DIR'])\",\n             ]\n    template = \"sys.path.append(os.path.join(os.environ['BAG_WORK_DIR'], '%s'))\"\n    lines.append(template % 'BAG2_TEMPLATES_EC')\n    for mod_name, _ in module_list:\n        lines.append(template % mod_name)\n\n    write_to_file('bag_startup.py', lines)\n\n\ndef get_sch_libraries(mod_name, mod_info):\n    bag_modules = mod_info.get('lib_path', 'BagModules')\n    root_dir = os.path.realpath(os.path.join(mod_name, bag_modules))\n    if not os.path.isdir(root_dir):\n        return []\n    return [name for name in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, name))]\n\n\ndef setup_libs_def(module_list):\n    lines = ['BAG_prim $BAG_TECH_CONFIG_DIR/DesignModules']\n    template = '%s $BAG_WORK_DIR/%s/%s'\n    for mod_name, mod_info in module_list:\n        bag_modules = mod_info.get('lib_path', 'BagModules')\n        for lib_name in get_sch_libraries(mod_name, mod_info):\n            lines.append(template % (lib_name, mod_name, bag_modules))\n\n    write_to_file('bag_libs.def', lines)\n\n\ndef setup_cds_lib(module_list):\n    lines = ['DEFINE BAG_prim $BAG_TECH_CONFIG_DIR/BAG_prim']\n    template = 'DEFINE %s $BAG_WORK_DIR/%s/%s'\n    for mod_name, mod_info in module_list:\n        for lib_name in get_sch_libraries(mod_name, mod_info):\n            lines.append(template % (lib_name, mod_name, lib_name))\n\n    write_to_file('cds.lib.bag', lines)\n\n\ndef run_command(cmd):\n    timeout = 5\n    proc = subprocess.Popen(cmd)\n    try:\n        proc.communicate()\n    except KeyboardInterrupt:\n        print('Ctrl-C detected, terminating')\n        if proc.returncode is None:\n            proc.terminate()\n            print('terminating process...')\n            try:\n                proc.wait(timeout=timeout)\n                print('process terminated')\n            except subprocess.TimeoutExpired:\n                proc.kill()\n                print('process did not terminate, try killing...')\n                try:\n                    proc.wait(timeout=timeout)\n                    print('process killed')\n                except subprocess.TimeoutExpired:\n                    print('cannot kill process...')\n\n    if proc.returncode is None:\n        raise ValueError('Ctrl-C detected, but cannot kill process')\n    elif proc.returncode < 0:\n        raise ValueError('process terminated with return code = %d' % proc.returncode)\n    elif proc.returncode > 0:\n        raise ValueError('command %s failed' % ' '.join(cmd))\n\n\ndef add_git_submodule(module_name, url):\n    if os.path.exists(module_name):\n        # skip if already exists\n        return\n\n    run_command(['git', 'submodule', 'add', url])\n\n\ndef add_git_file(fname):\n    run_command(['git', 'add', '-f', fname])\n\n\ndef link_submodule(repo_path, module_name):\n    if os.path.exists(module_name):\n        # skip if already exists\n        return\n\n    src = os.path.join(repo_path, module_name)\n    if not os.path.isdir(src):\n        raise ValueError('Cannot find submodule %s in %s' % (module_name, repo_path))\n    os.symlink(src, module_name)\n    add_git_file(module_name)\n\n\ndef setup_git_submodules(module_list):\n    add_git_submodule('BAG2_TEMPLATES_EC', 'git@github.com:ucb-art/BAG2_TEMPLATES_EC')\n\n    for module_name, module_info in module_list:\n        add_git_submodule(module_name, module_info['url'])\n\n\ndef setup_submodule_links(module_list, repo_path):\n    link_submodule(repo_path, 'BAG2_TEMPLATES_EC')\n    for module_name, _ in module_list:\n        link_submodule(repo_path, module_name)\n\n\ndef run_main():\n    with open('bag_submodules.yaml', 'r') as f:\n        modules_info = yaml.load(f)\n\n    module_list = [(key, modules_info[key]) for key in sorted(modules_info.keys())]\n\n    # error checking\n    bag_dir = 'BAG_framework'\n    if not os.path.isdir(bag_dir):\n        raise ValueError('Cannot find directory %s' % bag_dir)\n\n    # get real absolute path of parent directory of BAG_framework\n    repo_path = os.path.dirname(os.path.realpath(bag_dir))\n    cur_path = os.path.realpath('.')\n    if cur_path == repo_path:\n        # BAG_framework is an actual directory in this repo; add dependencies as git submodules\n        setup_git_submodules(module_list)\n    else:\n        setup_submodule_links(module_list, repo_path)\n\n    setup_python_path(module_list)\n    setup_libs_def(module_list)\n    setup_cds_lib(module_list)\n\n\nif __name__ == '__main__':\n    run_main()\n"
  },
  {
    "path": "run_scripts/sim_cell.py",
    "content": "import argparse\nfrom argparse import Namespace\nfrom pathlib import Path\nimport pdb\n\nfrom bag.io.file import Pickle, Yaml\nfrom bag.core import BagProject\n\nio_cls_dict = {\n    'pickle': Pickle,\n    'yaml': Yaml,\n}\n\n\ndef parse_args() -> Namespace:\n    parser = argparse.ArgumentParser()\n    parser.add_argument('specs_fname', help='specs yaml file')\n    parser.add_argument('--no-cell', dest='gen_cell', action='store_false',\n                        default=True, help='skip cell generation')\n    parser.add_argument('--no-wrapper', dest='gen_wrapper', action='store_false',\n                        default=True,  help='skip wrapper generation')\n    parser.add_argument('--no-tb', dest='gen_tb', action='store_false',\n                        default=True,  help='skip tb generation')\n    parser.add_argument('--load', dest='load_results', action='store_true',\n                        default=False,  help='skip simulation, just load the results')\n    parser.add_argument('-x', '--extract', dest='extract', action='store_true',\n                        default=False, help='do extracted simulation')\n    parser.add_argument('--no-sim', dest='run_sim', action='store_false',\n                        default=True, help='run simulation, --load has a priority over this')\n    parser.add_argument('--format', default='yaml',\n                        help='format of spec file (yaml, json, pickle)')\n    parser.add_argument('-dump', '--dump', default='', help='output will be dumped to this path, '\n                                                            'according to the format specified')\n    parser.add_argument('--pause', default=False, action='store_true',\n                        help='True to pause using pdb.set_trace() after simulation is done')\n    args = parser.parse_args()\n    return args\n\n\ndef run_main(prj: BagProject, args: Namespace):\n    specs_fname = Path(args.specs_fname)\n    io_cls = io_cls_dict[args.format]\n    specs = io_cls.load(str(specs_fname))\n\n    results = prj.simulate_cell(specs=specs,\n                                gen_cell=args.gen_cell,\n                                gen_wrapper=args.gen_wrapper,\n                                gen_tb=args.gen_tb,\n                                load_results=args.load_results,\n                                extract=args.extract,\n                                run_sim=args.run_sim)\n\n    if args.pause:\n        pdb.set_trace()\n\n    if results is not None and args.dump:\n        out_tmp_file = Path(args.dump)\n        io_cls.save(results, out_tmp_file)\n\n\nif __name__ == '__main__':\n\n    args = parse_args()\n    local_dict = locals()\n    bprj = local_dict.get('bprj', BagProject())\n    run_main(bprj, args)"
  },
  {
    "path": "run_scripts/start_bag.il",
    "content": "/*  Note:\n\nDue to licensing reasons, this skill script is missing the function \nCCSinvokeCdfCallbacks() from Cadence solution 11018344, which executes \nCDF parameters callback from skill.\n\nIf you do not need to instantiate a pcell instance, this method\nis not needed.\n\nEric Chang, Mar 2, 2017.\n\n*/\n\n\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n;;  Virtuoso Database operations functions  ;;\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n; reads a skill data structure from file\nprocedure( parse_data_from_file( fname \"t\" )\n    let( (p ans)\n        unless( p = infile( fname )\n            error(\"Cannot open file %s\" fname)\n        )\n        ans = parse_data_from_file_helper(p)\n        close( p )\n        ans\n    )\n)\n\n; recursive helper for parse_data_from_file\nprocedure( parse_data_from_file_helper( p )\n    let( (line item ans finish key)\n        gets( line p )\n        ; remove newline\n        line = substring(line 1 strlen(line) - 1)\n        ; printf(\"read line: %s\\n\" line)\n        cond(\n            (line == \"#list\"\n            ; parse a list\n            ans = tconc(nil 0)\n            while( nequal(item = parse_data_from_file_helper(p) \"#end\")\n                tconc(ans item)\n            )\n            ; printf(\"returning list \")\n            ; print(cdar(ans))\n            ; printf(\"\\n\")\n            cdar(ans)\n            )\n            (line == \"#prop_list\"\n            ; parse a disembodied property list\n            ans = ncons(nil)\n            finish = nil\n            while( !finish\n                key = parse_data_from_file_helper(p)\n                if( key == \"#end\" then\n                    finish = 't\n                else\n                    item = parse_data_from_file_helper(p)\n                    putprop(ans item key)\n                )\n            )\n            ans\n            )\n            ; parse a float\n            (strncmp( line \"#float\" 6 ) == 0\n                cdfParseFloatString(cadr(parseString(line)))\n            )\n            ; parse an int\n            (strncmp( line \"#int\" 4 ) == 0\n                atoi(cadr(parseString(line)))\n            )\n            ; parse a boolean\n            (strncmp( line \"#bool\" 5 ) == 0\n                if( atoi(cadr(parseString(line))) == 1 then\n                    't\n                else\n                    nil\n                )\n            )\n            ; parse a string token or #end\n            ('t\n                ; printf(\"returning str %s\\n\" line)\n                line\n            )\n        )\n    )\n)\n\n; return a list of cells in the given library.\nprocedure( get_cells_in_library( lib_name \"t\" )\n    let( ( lib_obj ans )\n        if( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\") then\n            ans = ddGetObjChildren(lib_obj)~>name\n            ddReleaseObj(lib_obj)\n        else\n            ; library does not exist, return empty list\n            ans = '()\n        )\n        ans\n    )\n)\n\n; return a list of cells in the given library.\nprocedure( get_cells_in_library_file( lib_name fname \"tt\" )\n    let( ( p )\n        p = outfile( fname \"w\" )\n        foreach( cell get_cells_in_library(lib_name)\n            fprintf(p \"%s\\n\" cell)\n        )\n        close(p)\n    )\n)\n\n; Returns the directory corresponding to the given library.\nprocedure( get_lib_directory(lib_name \"t\")\n    let( ( lib_obj ans )\n        if( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\") then\n            ans = lib_obj~>readPath\n            ddReleaseObj(lib_obj)\n        else\n            ; library does not exist, return empty list\n            ans = \"\"\n        )\n        ans\n    )\n)\n\n; Parse the netlist of the given cellview.\n; Works on schematic and veriloga.\nprocedure( parse_cad_sch(lib_name cell_name file_name \"ttt\")\n    let( (cv cell_type p indent direction term_names tb_list tb_match\n          inst_lib_name inst_cell_name inst_cnt)\n        indent = \"\"\n        cell_type = \"schematic\"\n        unless( cv = dbOpenCellViewByType( lib_name cell_name \"schematic\" nil \"r\" )\n            cell_type = \"veriloga\"\n            unless( cv = dbOpenCellViewByType( lib_name cell_name \"veriloga\" nil \"r\" )\n                error( \"Cannot find schematic or veriloga view of cell %s__%s\" lib_name cell_name )\n            )\n        )\n        p = outfile( file_name \"w\" )\n\n        ; print cellview information\n        printf( \"*INFO* Writing cell %s__%s (%s) netlist to %s\\n\" lib_name cell_name cell_type file_name )\n        fprintf( p \"%slib_name: %s\\n\" indent lib_name )\n        fprintf( p \"%scell_name: %s\\n\" indent cell_name )\n\n        ; print pins\n        fprintf( p \"%spins: [ \" indent )\n        if( cell_type == \"veriloga\" then\n           term_names = reverse(cv~>terminals~>name)\n        else\n           term_names = cv~>terminals~>name\n        )\n        ; add quotes around pin names to escape array pins\n        term_names = mapcar( lambda( (x) sprintf(nil \"\\\"%s\\\"\" x) ) term_names )\n        fprintf( p \"%s ]\\n\" buildString(term_names \", \"))\n\n        ; print instances\n        if( not(cv~>instances) then\n            fprintf( p \"%sinstances: {}\\n\" indent )\n        else\n            inst_cnt = 0\n            fprintf( p \"%sinstances:\\n\" indent )\n            foreach( inst cv~>instances\n                inst_cnt++\n                ; print entry for instance\n                indent = \"  \"\n                fprintf( p \"%s%s:\\n\" indent inst~>name )\n                ; print instance master information.\n                indent = \"    \"\n                fprintf( p \"%slib_name: %s\\n\" indent inst~>libName )\n                fprintf( p \"%scell_name: %s\\n\" indent inst~>cellName )\n                ; print instance terminal information\n                if( !(inst~>instTerms) then\n                    fprintf( p \"%sinstpins: {}\\n\" indent )\n                else\n                    fprintf( p \"%sinstpins:\\n\" indent )\n                    foreach( inst_term inst~>instTerms\n                        unless( direction = inst_term~>direction\n                            direction = \"\"\n                        )\n                        indent = \"      \"\n                        fprintf( p \"%s%s:\\n\" indent inst_term~>name )\n                        indent = \"        \"\n                        fprintf( p \"%sdirection: %s\\n\" indent direction )\n                        fprintf( p \"%snet_name: \\\"%s\\\"\\n\" indent inst_term~>net~>name )\n                        fprintf( p \"%snum_bits: %d\\n\" indent inst_term~>numBits )\n                    )\n                )\n            )\n            when(inst_cnt == 0\n                fprintf( p \"  {}\\n\" )\n            )\n        )\n\n        ; close resources\n        close(p)\n        dbClose(cv)\n    )\n)\n\n; Delete a cellview if it exists.  Currently used to delete old calibre file.\nprocedure( delete_cellview(lib_name cell_name view_name \"ttt\")\n    let( (obj)\n        obj = ddGetObj(lib_name cell_name view_name)\n        if( obj then\n            ddDeleteObj(obj)\n        else\n            't\n        )\n    )\n)\n\n; Parse the structure of the given cellview.\n; Works on layout.\nprocedure( parse_cad_layout(lib_name cell_name file_name \"ttt\")\n    let( (cv cell_type p indent rect_cnt label_cnt inst_cnt)\n\n        indent = \"\"\n        cell_type = \"layout\"\n        unless( cv = dbOpenCellViewByType( lib_name cell_name cell_type nil \"r\" )\n            error( \"Cannot find layout view of cell %s__%s\" lib_name cell_name )\n        )\n        p = outfile( file_name \"w\" )\n\n        ; print cellview information\n        printf( \"*INFO* Writing cell %s__%s (%s) netlist to %s\\n\" lib_name cell_name cell_type file_name )\n        fprintf( p \"%slib_name: %s\\n\" indent lib_name )\n        fprintf( p \"%scell_name: %s\\n\" indent cell_name )\n\n        ; print rects\n        if( not(cv~>shapes) then\n            fprintf( p \"%srects: {}\\n\" indent )\n        else\n            rect_cnt = 0\n            fprintf( p \"%srects:\\n\" indent )\n            foreach( shape cv~>shapes\n                if( (shape~>objType == \"rect\") then\n                    rect_cnt++ \n                    ; print entry for rect\n                    indent = \"  \"\n                    fprintf( p \"%s%d:\\n\" indent rect_cnt )\n                    ; print rect master information.\n                    indent = \"    \"\n                    fprintf( p \"%slayer: %s %s\\n\" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))\n                    fprintf( p \"%sbBox: [[%f, %f], [%f, %f]]\\n\" indent \n                                 nthelem(1 nthelem(1 shape~>bBox)) nthelem(2 nthelem(1 shape~>bBox)) \n                                 nthelem(1 nthelem(2 shape~>bBox)) nthelem(2 nthelem(2 shape~>bBox)) \n                    );fprintf\n                )\n            );if\n            if((rect_cnt == 0) then\n                fprintf( p \"  {}\\n\" )\n           );if\n        )\n\n        ; print labels\n        indent = \"\"\n        if( not(cv~>shapes) then\n            fprintf( p \"%slabels: {}\\n\" indent )\n        else\n            label_cnt = 0\n            fprintf( p \"%slabels:\\n\" indent )\n            foreach( shape cv~>shapes\n                if( (shape~>objType == \"label\") then\n                    label_cnt++ \n                    ; print entry for label\n                    indent = \"  \"\n                    fprintf( p \"%s%d:\\n\" indent label_cnt )\n                    ; print label master information.\n                    indent = \"    \"\n                    fprintf( p \"%slabel: %s\\n\" indent shape~>theLabel )\n                    fprintf( p \"%slayer: %s %s\\n\" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))\n                    fprintf( p \"%sxy: [%f, %f]\\n\" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))\n                )\n                if( (shape~>objType == \"textDisplay\") then ;some labels are instantiated as text displays\n                    label_cnt++\n                    ; print entry for label\n                    indent = \"  \"\n                    fprintf( p \"%s%d:\\n\" indent label_cnt )\n                    ; print label master information.\n                    indent = \"    \"\n                    fprintf( p \"%slabel: %s\\n\" indent shape~>owner~>name )\n                    fprintf( p \"%slayer: %s %s\\n\" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))\n                    fprintf( p \"%sxy: [%f, %f]\\n\" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))\n                )\n            );if\n            if((label_cnt == 0) then\n                fprintf( p \"  {}\\n\" )\n           );if\n        )\n        \n        ; print instances\n        indent = \"\"\n        if( not(cv~>instances) then\n            fprintf( p \"%sinstances: {}\\n\" indent )\n        else\n            inst_cnt = 0\n            fprintf( p \"%sinstances:\\n\" indent )\n            foreach( inst cv~>instances\n                inst_cnt++\n                ; print entry for instance\n                indent = \"  \"\n                fprintf( p \"%s%s:\\n\" indent inst~>name )\n                ; print instance master information.\n                indent = \"    \"\n                fprintf( p \"%slib_name: %s\\n\" indent inst~>libName )\n                fprintf( p \"%scell_name: %s\\n\" indent inst~>cellName )\n                fprintf( p \"%sxy: [%f, %f]\\n\" indent nthelem(1 inst~>xy) nthelem(2 inst~>xy))\n                if( (inst~>objType == \"mosaic\") then\n                    fprintf( p \"%scols: %d\\n\" indent inst~>columns)\n                    fprintf( p \"%srows: %d\\n\" indent inst~>rows)\n                    fprintf( p \"%ssp_cols: %f\\n\" indent inst~>uX)\n                    fprintf( p \"%ssp_rows: %f\\n\" indent inst~>uY)\n                    fprintf( p \"%srotation: %s\\n\" indent car(inst~>tileArray))\n                    else\n                    fprintf( p \"%srotation: %s\\n\" indent inst~>orient)\n                );if\n            )\n            when(inst_cnt == 0\n                fprintf( p \"  {}\\n\" )\n            )\n        )\n        \n        ; close resources\n        close(p)\n        dbClose(cv)\n    )\n)\n\n; get a list of cells containing in the specficied library\nprocedure( get_cell_list(lib_name file_name \"tt\")\n    let( (lib cellname p)\n        lib=ddGetObj(lib_name)\n        p = outfile( file_name \"w\" )\n        fprintf( p \"%s: [\" lib_name)\n        foreach( cellname lib~>cells~>name\n            fprintf( p \"%s, \" cellname)\n        );foreach\n        fprintf( p \"] \\n\" )\n        ; close resources\n        close(p)\n    );let\n)\n\n; if library with lib_name does not exists, create a new\n; library with that name.  Otherwise, if erase is true,\n; remove all cells in that library.  Returns the library\n; database object.\nprocedure( create_or_erase_library(lib_name tech_lib lib_path erase \"tttg\")\n    let( (lib_obj)\n        if( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\") then\n            when( erase\n                ; delete all cells in the library\n                foreach( cell lib_obj~>cells\n                    unless( ddDeleteObj(cell)\n                        error(\"cannot delete cell %s in library %s\\n\" cell~>name lib_name)\n                    )\n                )\n            )\n            ddReleaseObj(lib_obj)\n            't\n        else\n            ; create library if not exist\n            when( and(lib_path (lib_path != \".\"))\n                lib_path = strcat(lib_path \"/\" lib_name)\n            )\n            lib_obj = ddCreateLib(lib_name lib_path)\n            ; attach technology file\n            techBindTechFile(lib_obj tech_lib)\n            ; close library\n            ddReleaseObj(lib_obj)\n            't\n        )\n    )\n)\n\n; copy all template cells to the given library.\n; template list is a list of three-element lists with the format\n; '(\"master_lib_name\" \"master_cell_name\" \"target_cell_name\")\n; any existing cellviews will be overwritten.\nprocedure( copy_templates_to_library(lib_name template_list \"tl\")\n    let( (current remaining src_gdm targ_gdm table master_lib master_cell target_cell key cnt\n          empty_spec targ_lib_obj test_cv)\n\n        current = template_list\n        remaining = '()\n        empty_spec = gdmCreateSpecList()\n        targ_lib_obj = ddGetObj(lib_name nil nil nil nil \"r\")\n\n        ; ccpCopy cannot copy the same cell to multiple different cells.\n        ; because of this, we need to copy a set of unique cells at a time,\n        ; hence the while loop.\n        while( current\n            ; Create GDMSpecList used to copy all cells\n            src_gdm = gdmCreateSpecList()\n            targ_gdm = gdmCreateSpecList()\n            ; table to keep track of seen cells.\n            table = makeTable(\"mytable\" 0)\n            ; Populate GDMSpecList\n            foreach( template_info current\n                master_lib = car(template_info)\n                master_cell = cadr(template_info)\n                target_cell = caddr(template_info)\n\n                ; check if we copied this cell on this iteration yet\n                key = list(master_lib master_cell)\n                if( table[key] == 1 then\n                    ; wait for the next iteration\n                    remaining = cons(template_info remaining)\n                else\n                    ; purge target cellview if exist\n                    when( targ_lib_obj\n                        test_cv = dbFindOpenCellView(targ_lib_obj target_cell \"schematic\")\n                        when( test_cv\n                            dbPurge(test_cv)\n                        )\n                        test_cv = dbFindOpenCellView(targ_lib_obj target_cell \"symbol\")\n                        when( test_cv\n                            dbPurge(test_cv)\n                        )\n                        ; hard remove adexl state if it exists\n                        test_cv = ddGetObj(lib_name target_cell \"adexl\")\n                        when( test_cv\n                            ddDeleteObj(test_cv)\n                        )\n                    )\n                    gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil \"CDBA\") src_gdm)\n                    gdmAddSpecToSpecList(gdmCreateSpec(lib_name target_cell nil nil \"CDBA\") targ_gdm)\n                    table[key] = 1\n                )\n            )\n            ; Perform copy\n            ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED nil nil \"\" \"\" 'CCP_UPDATE_FROM_LIBLIST empty_spec)\n\n            ; set current and remaining\n            current = remaining\n            remaining = '()\n\n            ; debug printing\n            ; printstruct(table)\n        )\n    )\n    't\n)\n\n; returns a unique terminal name in the given cellview.\n; name_base is the suffix of the returned terminal name.\nprocedure( get_unique_term_name( cvid name_base \"gt\")\n    let( (cnt new_term_name)\n        cnt = 1\n        sprintf( new_term_name \"temp%d_%s\" cnt name_base )\n        while( dbFindTermByName(cvid new_term_name)\n            cnt = cnt + 1\n            sprintf( new_term_name \"temp%d_%s\" cnt name_base )\n        )\n        new_term_name\n    )\n)\n\n; helper method to open pin master\nprocedure( open_pin_master(cvid pin_cv_info)\n    let( (pin_master mpin_lib mpin_cell mpin_view)\n        mpin_lib = car(pin_cv_info)\n        mpin_cell = cadr(pin_cv_info)\n        mpin_view = caddr(pin_cv_info)\n        unless( pin_master = dbOpenCellViewByType( mpin_lib mpin_cell mpin_view nil \"r\" )\n            dbClose(cvid)\n            error( \"Cannot find pin master cellview: %s__%s (%s)\" mpin_lib mpin_cell mpin_view)\n        )\n        pin_master\n    )\n)\n\n; update pins of a schematic\n; cvid is the opened cellview id of the schematic.  It must be in append mode.\n; pin_map is a list of two-element lists of old pin names and new pin names, respectively.\n; ipin, opin, and iopin are lists of three strings for input/output/inout pins, respectively.\n; first element is the pin master library, second element is the pin mater cell, and third element\n; is the pin master cellview.\nprocedure( update_schematic_pin(cvid pin_map new_pins ipin opin iopin \"glllll\")\n    let( (snap_dist cur_term_name new_term_name term pin pin_orient pin_location pin_direction\n          temp_new_term_name pin_master ipin_master opin_master iopin_master\n          pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type)\n\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n\n        ; open pin masters\n        ipin_master = open_pin_master(cvid ipin)\n        opin_master = open_pin_master(cvid opin)\n        iopin_master = open_pin_master(cvid iopin)\n        pin_master = nil\n\n        ; get new pin locations before any pin addition/substraction.\n        pin_xy_info = get_new_pin_locations(cvid snap_dist)\n\n        ; rename or remove pins\n        foreach( p pin_map\n            cur_term_name = car(p)\n            new_term_name = cadr(p)\n            ; printf(\"%s %s\\n\" cur_term_name new_term_name)\n            when(cur_term_name != new_term_name\n                unless( term = dbFindTermByName(cvid cur_term_name)\n                    dbClose(cvid)\n                    dbClose(ipin_master)\n                    dbClose(opin_master)\n                    dbClose(iopin_master)\n                    error( \"Terminal %s not found.\" cur_term_name )\n                )\n                when( term~>pinCount != 1\n                    dbClose(cvid)\n                    dbClose(ipin_master)\n                    dbClose(opin_master)\n                    dbClose(iopin_master)\n                    error( \"Terminal %s does not have exactly one pin.\" cur_term_name)\n                )\n                pin = car(term~>pins)\n\n                if( strlen(new_term_name) != 0 then\n                    ; rename pin\n                    pin_orient = pin~>fig~>orient\n                    pin_location = pin~>fig~>xy\n                    pin_direction = term~>direction\n\n                    ; create new pin figure\n                    cond( ( pin_direction == \"input\" pin_master = ipin_master)\n                          ( pin_direction == \"output\" pin_master = opin_master)\n                          ( 't pin_master = iopin_master)\n                    )\n\n                    ; delete pin\n                    unless( dbDeleteObject(pin~>fig)\n                        dbClose(cvid)\n                        dbClose(ipin_master)\n                        dbClose(opin_master)\n                        dbClose(iopin_master)\n                        error( \"Cannot delete pin for terminal %s\" cur_term_name )\n                    )\n\n                    ; create a temporary terminal with a unique name so we can change the number of bits without getting an error\n                    temp_new_term_name = get_unique_term_name(cvid new_term_name)\n                    schCreatePin(cvid pin_master temp_new_term_name pin_direction nil pin_location \"R0\" )\n\n                    ; now rename the new terminal\n                    new_term = dbFindTermByName(cvid temp_new_term_name )\n                    new_term~>name = new_term_name\n                else\n                    ; remove pin\n                    dbDeleteObject(pin~>fig)\n                )\n            )\n        )\n\n        ; add new pins\n        when( new_pins\n            ; get location for new pins\n            npin_xl = xCoord(car(pin_xy_info))\n            npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist\n            npin_xr = xCoord(cadr(pin_xy_info))\n            npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist\n            foreach( npin_info new_pins\n                npin_name = car(npin_info)\n                npin_type = cadr(npin_info)\n\n                ; verify that this pin does not exist yet\n                when(dbFindTermByName(cvid npin_name)\n                    dbClose(cvid)\n                    dbClose(ipin_master)\n                    dbClose(opin_master)\n                    dbClose(iopin_master)\n                    error( \"Terminal %s already exists\" npin_name)\n                )\n\n                ; get pin location based on pin type\n                cond( ( npin_type == \"input\" pin_master = ipin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)\n                      ( npin_type == \"output\" pin_master = opin_master pin_location = npin_xr:npin_yr npin_yr = npin_yr - 2 * snap_dist)\n                      ( 't pin_master = iopin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)\n                )\n                ; create pin\n                schCreatePin(cvid pin_master npin_name npin_type nil pin_location \"R0\")\n            )\n        )\n\n        dbClose(ipin_master)\n        dbClose(opin_master)\n        dbClose(iopin_master)\n    )\n)\n\n; find X and Y coordinates to insert new symbol pins\nprocedure( get_new_pin_locations(cvid snap_dist)\n    let( (pin bbox pin_x pin_y xl xr yl yr)\n        ; find the left-most/right-most pin X coordinates, and find the lowst\n        ; Y coordinate of the left-most/right-most pins\n        xl = nil\n        xr = nil\n        yl = nil\n        yr = nil\n        foreach( term cvid->terminals\n            when( term~>pinCount != 1\n                dbClose(cvid)\n                error( \"Terminal %s does not have exactly one pin\" term~>name)\n            )\n            pin = car(term~>pins)\n            bbox = pin~>fig~>bBox\n            pin_x = round2((xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0 / snap_dist)\n            pin_y = round2((yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0 / snap_dist)\n            if( xl == nil then\n                xl = pin_x\n                xr = pin_x\n                yl = pin_y\n                yr = pin_y\n            else\n                cond( (pin_x < xl xl = pin_x yl = pin_y)\n                      (pin_x == xl yl = min(yl pin_y)))\n                cond( (pin_x > xr xr = pin_x yr = pin_y)\n                      (pin_x == xr yr = min(yr pin_y)))\n            )\n        )\n        when(xl == nil\n            ; default values if schematic has no terminals\n            ; this usually means you have a testbench schematic\n            xl = 0\n            yl = 0\n            xr = 10\n            yr = 0\n        )\n        list((xl * snap_dist):(yl * snap_dist) (xr * snap_dist):(yr * snap_dist))\n    )\n)\n\n; update pins of a symbol\n; pin_map is a list of two-element lists, first element is old pin name, second element is new pin name.\n; sympin is a 3-element list of strings. first element is the pin master library,\n; second element is the pin mater cell, and third element is the pin master cellview.\n; simulators is a list of simulator names for which termOrder should be updated.\n; Usually simulators = '(\"auLvs\" \"auCdl\" \"spectre\" \"hspiceD\")\nprocedure( update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators \"ttllll\")\n    let( (snap_dist cvid pin_master cur_term_name new_term_name term pin bbox pin_x pin_y pin_location pin_direction\n          label_location label_rel_location temp_new_term_name new_term new_port_order cell_obj bc\n          mpin_lib mpin_cell mpin_view pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type\n          modified_pins)\n\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        modified_pins = nil\n        mpin_lib = car(sympin)\n        mpin_cell = cadr(sympin)\n        mpin_view = caddr(sympin)\n        unless( pin_master = dbOpenCellViewByType(mpin_lib mpin_cell mpin_view nil \"r\")\n            error(\"Cannot open symbol pin cellview %s__%s (%s).\" mpin_lib mpin_cell mpin_view)\n        )\n        unless( cvid = dbOpenCellViewByType(lib_name cell_name \"symbol\" nil \"a\")\n            dbClose(pin_master)\n            error(\"Cannot open cellview %s__%s (symbol).\" lib_name cell_name)\n        )\n\n        ; get new pin locations before any pin addition/substraction.\n        pin_xy_info = get_new_pin_locations(cvid snap_dist)\n\n        ; modify existing pins\n        new_port_order = tconc(nil \"\")\n        foreach( p pin_map\n            cur_term_name = car(p)\n            new_term_name = cadr(p)\n            new_port_order = tconc(new_port_order new_term_name)\n            when( cur_term_name != new_term_name\n                modified_pins = 't\n                ; printf(\"%s %s\\n\" cur_term_name new_term_name)\n                unless( term = dbFindTermByName(cvid cur_term_name)\n                    dbClose(pin_master)\n                    dbReopen(cvid, \"r\")\n                    dbClose(cvid)\n                    error( \"Terminal %s not found.\" cur_term_name )\n                )\n                when( term~>pinCount != 1\n                    dbClose(pin_master)\n                    dbReopen(cvid, \"r\")\n                    dbClose(cvid)\n                    error( \"Terminal %s does not have exactly one pin.\" cur_term_name)\n                )\n                pin = car(term~>pins)\n\n                if( strlen(new_term_name) != 0 then\n                    ; rename pin\n                    bbox = pin~>fig~>bBox\n                    pin_x = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0\n                    pin_y = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0\n                    pin_location = round2(pin_x / snap_dist) * snap_dist:round2(pin_y / snap_dist) * snap_dist\n                    pin_direction = term~>direction\n\n                    ; change label\n                    prog( (label_orientation label_font label_font_size label_type label_text)\n                        foreach( label pin~>fig~>children\n                            when( label~>objType == \"label\"\n                                label_location = label~>xy\n                                label_orientation = label~>orient\n                                label_rel_location = label~>justify\n                                label_font = label~>font\n                                label_font_size = label~>height\n                                label_type = label~>labelType\n                                label_text = label~>theLabel\n                                when( label_text == cur_term_name\n                                    schCreateSymbolLabel(cvid label_location \"pin label\" new_term_name label_rel_location\n                                                         label_orientation label_font label_font_size label_type)\n                                    return('t)\n                                )\n                            )\n                        )\n                        return(nil)\n                    )\n\n                    dbDeleteObject(pin~>fig)\n                    dbDeleteObject(pin)\n\n                    ;create a temporary terminal with a unique name so we can change the number of bits without getting an error\n                    temp_new_term_name = get_unique_term_name(cvid new_term_name)\n                    schCreateSymbolPin(cvid pin_master temp_new_term_name pin_direction pin_location \"R0\" )\n\n                    new_term = dbFindTermByName(cvid temp_new_term_name )\n                    dbDeleteObject(term)\n                    new_term~>name = new_term_name\n                else\n                    ; remove pin\n                    dbDeleteObject(pin~>fig)\n                    dbDeleteObject(pin)\n                    dbDeleteObject(term)\n                )\n            )\n        )\n\n        ; add new pins\n        when( new_pins\n            modified_pins = 't\n            ; get location for new pins\n            npin_xl = xCoord(car(pin_xy_info))\n            npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist\n            npin_xr = xCoord(cadr(pin_xy_info))\n            npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist\n            foreach( npin_info new_pins\n                npin_name = car(npin_info)\n                npin_type = cadr(npin_info)\n\n                ; verify that this pin does not exist yet\n                when(dbFindTermByName(cvid npin_name)\n                    dbClose(pin_master)\n                    dbReopen(cvid, \"r\")\n                    dbClose(cvid)\n                    error( \"Terminal %s already exists\" npin_name)\n                )\n\n                ; update pin order\n                new_port_order = tconc(new_port_order npin_name)\n\n                ; get pin location based on pin type\n                if( equal(npin_type \"output\") then\n                    label_location = npin_xr:npin_yr\n                    label_rel_location = \"lowerLeft\"\n                    npin_yr = npin_yr - 2 * snap_dist\n                else\n                    label_location = npin_xl:npin_yl\n                    label_rel_location = \"lowerRight\"\n                    npin_yl = npin_yl - 2 * snap_dist\n                )\n\n                ; create label and pin\n                schCreateSymbolLabel(cvid label_location \"pin label\" npin_name label_rel_location\n                                     \"R0\" \"stick\" snap_dist \"normalLabel\")\n                schCreateSymbolPin(cvid pin_master npin_name npin_type label_location \"R0\")\n            )\n        )\n\n        dbClose(pin_master)\n\n        when( modified_pins\n            ; update pin order\n            new_port_order = cdar(new_port_order)\n            schEditPinOrder(cvid new_port_order 't)\n            dbSave(cvid)\n\n            ; update termOrder for each simulators\n            cell_obj = ddGetObj(lib_name cell_name nil nil nil \"r\")\n            unless( bc = cdfGetBaseCellCDF(cell_obj)\n                ddReleaseObj(cell_obj)\n                dbReopen(cvid, \"r\")\n                dbClose(cvid)\n                error(\"Cannot find CDF parameters for %s__%s.  Delete generated cell and try again\" lib_name cell_name)\n            )\n            foreach( simu simulators\n                get(bc->simInfo simu)->termOrder = new_port_order\n            )\n            unless( cdfSaveCDF(bc)\n                ddReleaseObj(cell_obj)\n                dbReopen(cvid, \"r\")\n                dbClose(cvid)\n                error(\"Cannot save termOrder CDF for %s__%s.\" lib_name cell_name)\n            )\n            ddReleaseObj(cell_obj)\n        )\n        ; opening schematic will open all symbols inside that schematic.\n        ; as the result, dbClose may not close this symbol view.  To get rid\n        ; of edit lock, we use dbReopen so even if dbClose fails the edit lock\n        ; will be gone.\n        dbReopen(cvid, \"r\")\n        dbClose(cvid)\n    )\n)\n\n; record an association list from pin name to pin location in units of snap distances.\n; the pin name is sorted alphabetically so we can use the equal function to test\n; for equality.\nprocedure( get_instance_pin_info(inst \"g\")\n    let( (snap_dist term_name pin_fig xval yval inst_term_xy ans)\n        ans = nil\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        foreach( term inst->master->terminals\n            term_name = term~>name\n            ; get terminal coordinate in symbol\n            pin_fig = car(term~>pins)~>fig\n            bbox = pin_fig~>bBox\n            xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0\n            yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0\n            ; quantize to schematic snap spacing to avoid floating point rounding error.\n            inst_term_xy = round2(xval / snap_dist):round2(yval / snap_dist)\n            ans = cons(list(term_name inst_term_xy) ans)\n        )\n        sortcar(ans nil)\n    )\n)\n\n; get all the wire objects connected to terminals of the given instance.\n; we assume each terminal has exactly one pin with 1 wire connected, with a\n; single label on the wire.  The wire doesn't connect to anything else.\n; returns an association list from terminal name to a list of net name and wire figure object.\nprocedure( get_instance_terminal_wires(sch inst \"gg\")\n    let( (snap_dist term_name pin_fig xval yval inst_term_xy net_name ans net_map)\n        ans = nil\n        net_map = nil\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        foreach( inst_term inst~>instTerms\n            term_name = inst_term~>name\n            ; printf(\"terminal name: %s\\n\" term_name)\n            when( inst_term~>term~>pinCount != 1\n                dbClose(sch)\n                error(\"Terminal %s must have exactly one pin.\" term_name)\n            )\n            unless( pin_fig = car(inst_term~>term~>pins)~>fig\n                dbClose(sch)\n                error(\"Cannot find pin figure for terminal %s\" term_name)\n            )\n            ; get instance terminal coordinate in schematic\n            bbox = dbTransformBBox(pin_fig~>bBox inst~>transform)\n            ; printf(\"terminal pin fig bbox: %A\\n\" bbox)\n            xval = xCoord(car(bbox)) + (xCoord(cadr(bbox)) - xCoord(car(bbox))) / 2.0\n            yval = yCoord(car(bbox)) + (yCoord(cadr(bbox)) - yCoord(car(bbox))) / 2.0\n            ; quantize to schematic snap spacing to avoid floating point rounding error.\n            inst_term_xy = round2(xval / snap_dist) * snap_dist:round2(yval / snap_dist) * snap_dist\n            net_name = inst_term~>net~>name\n            net_map = cons(list(term_name net_name) net_map)\n            ; printf(\"terminal pin x/y: %A\\n\" inst_term_xy)\n            foreach( fig inst_term~>net~>figs\n                points = fig~>points\n                ; printf(\"figure points: %A\\n\" points)\n                when( member(inst_term_xy points)\n                    when( length(points) != 2\n                        error(\"pin for terminal %s must be connected to a single wire with label\" term_name)\n                    )\n                    ; printf(\"adding figure for terminal %s\\n\" term_name)\n                    ans = cons(list(term_name fig) ans)\n                )\n            )\n        )\n        list(ans net_map)\n    )\n)\n\n; Modify the instance terminal connections of the given instance.\n; we assume each terminal to modify has at most 1 wire connected,\n; if it exists, the wire connects to nothing else, and it has a label.\n; In this way, this function just have to change the label text.\n;\n; if wire_list is not empty, then that means each terminal has exactly one\n; wire connected.  This function will update the label on the wires according\n; to term_mapping.\n;\n; if wire_list is empty, then that means no wires are connected to terminals.\n; this function will attach labels directly to each terminal.  The labels are\n; determined first from term_mapping, then from net_map\n;\n; sch is the schematic database object.  Must be opened in append/write mode.\n; inst is the instance object to modify.\n; term_mapping is a list of key-value pairs, where keys are old net names,\n; and values are new net names.\nprocedure( modify_instance_terminal(sch inst wire_list net_map term_mapping \"gglll\")\n    let( (snap_dist key_val old_name new_name fig points mid_point new_wire inst_term inst_pin\n          bbox xval yval term_map_final db_term)\n        ; get schematic snap distance spacing.\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        if( wire_list then\n            foreach( wire_info wire_list\n                old_name = car(wire_info)\n                when(key_val = assoc(old_name term_mapping)\n                    new_name = cadr(key_val)\n                    fig = cadr(wire_info)\n                    points = fig~>points\n                    mid_point = foreach(mapcar (c1 c2) car(points) cadr(points) (c1 + c2) / 2.0)\n                    ; delete old wire, then add wire back with new label.\n                    schDelete(fig)\n                    new_wire = car(schCreateWire(sch \"draw\" \"full\" points snap_dist snap_dist 0))\n                    schCreateWireLabel(sch new_wire mid_point new_name \"lowerCenter\" \"R0\" \"stick\" 0.0625 nil)\n                )\n            )\n            't\n        else\n            ; combine net_map and term_mapping\n            term_map_final = copy(term_mapping)\n            foreach( net_info net_map\n                old_name = car(net_info)\n                unless( assoc(old_name term_map_final)\n                    ; add net mapping only if it's not in term_mapping\n                    term_map_final = cons(net_info term_map_final)\n                )\n            )\n            foreach( net_info term_map_final\n                old_name = car(net_info)\n                new_name = cadr(net_info)\n\n                when(db_term = dbFindTermByName(inst->master old_name)\n                    ; only create terminal that's present in the current master\n                    inst_term = dbCreateInstTerm(nil inst db_term)\n                    inst_pin = car(inst_term~>term~>pins)~>fig\n                    bbox = dbTransformBBox(inst_pin~>bBox inst~>transform)\n                    xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0\n                    yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0\n                    xval = round2(xval / snap_dist) * snap_dist\n                    yval = round2(yval / snap_dist) * snap_dist\n\n                    new_wire = car(schCreateWire(sch \"draw\" \"full\" list(xval-snap_dist:yval-snap_dist xval:yval)\n                                   snap_dist snap_dist 0))\n                    schCreateWireLabel(sch new_wire xval:yval new_name \"lowerCenter\" \"R0\" \"stick\" 0.0625 nil)\n                )\n            )\n            't\n        )\n    )\n)\n\n; Perform check-and-save on the given schematic database object, then close it.\nprocedure( check_and_save_schematic(sch \"g\")\n    let( (errs)\n        schSetEnv( \"checkHierSave\" 't)\n        schSetEnv( \"saveAction\" \"Save\")\n        errs = schCheckHier(sch \"schematic symbol\" \"\")\n        foreach( ex errs\n            warn( \"%s__%s (%s) has %d errors.\" car(ex)~>lib~>name car(ex)~>cellName car(ex)~>viewName cadr(ex))\n        )\n        ; make sure all edit locks are gone by reopening in read mode\n        dbReopen(sch, \"r\")\n        dbClose(sch)\n    )\n)\n\n\n; modify a schematic cell.  Used to convert copied template cells into concrete instantiation.\n;\n; inst_list is an association list of (inst_name, rinst_list) pairs.  Where:\n;\n; inst_name : name of the instance in the template cell.\n; rinst_list : a list of rinsts, which are instances to replace the original instance by.\n;              If this list is empty, the original instance should be deleted.  If the list\n;              has more than one element, we should array the original instance.\n;\n; Each rinst is a disembodied property lists, with the properties:\n;\n; rinst->name : the name of this rinst.\n; rinst->lib_name : the instance master library.\n; rinst->cell_name : the instance master cell.\n; rinst->params : an association list of the CDF params of this rinst.  The values are always string.\n; rinst->term_mapping : an association list of the modified terminal connections of this rinst.\n;                       if no connections are changed, this list should be empty.\n;\n; (You can read more about disembodied property lists and association list in the skill\n; language user guide).\n;\n; For each instance, this function does the following:\n; 1. Find the instance with the given name.\n; 2. If rinst_list is nil, delete this instance.\n; 3. If rinst_list has exactly one element:\n;    i.   rename the instance name to rinst's name.\n;    ii.  change the instance master of the instance.\n;    iii. change the CDF parameters (this should only happen with BAG primitives).\n;    iv.  change the port connections of this instance.\n; 4. If rinst_list has more than one element, for each additional element,\n;    copy the original instance and perform step 3 on that instance.\n;\n; This procedure allows one to delete or array any instances in the schematic template.\nprocedure( modify_schematic_content(sch_cv inst_list \"gl\")\n    let( (inst_obj inst_name rinst_list rinst_len cur_inst wire_list net_map par_val xl xr transform\n          snap_dist errmsg pin_info tmp_result)\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        foreach( inst inst_list\n            inst_name = car(inst)\n            unless( inst_obj = dbFindAnyInstByName(sch_cv inst_name)\n                dbClose(sch_cv)\n                error( \"Cannot find instance %s\" inst_name )\n            )\n            rinst_list = cadr(inst)\n            rinst_len = length(rinst_list)\n            last_inst = nil\n            if( rinst_len == 0 then\n                ; no instances to replace by, delete.\n                wire_list = car(get_instance_terminal_wires(sch_cv inst_obj))\n                ; delete wires connected to instance\n                foreach( wire_info wire_list\n                    schDelete(cadr(wire_info))\n                )\n                ; delete instance\n                dbDeleteObject(inst_obj)\n            else\n                cur_inst = nil\n                pin_info = nil\n                foreach( rinst rinst_list\n                    if( !cur_inst then\n                        cur_inst = inst_obj\n                        ; printf(\"inst %s lib = %s, cell = %s\\n\" inst_name inst_obj->master->libName inst_obj->master->cellName)\n                        tmp_result = get_instance_terminal_wires(sch_cv cur_inst)\n                        net_map = cadr(tmp_result)\n                        wire_list = car(tmp_result)\n                        pin_info = get_instance_pin_info(cur_inst)\n                        ; printf(\"%s wire_list: %A\\n\" inst_name wire_list)\n                        ; figure out bounding box for potential future array\n                        ; printf(\"instance %s bbox: %A\\n\" cur_inst~>name cur_inst~>bBox)\n                        xl = xCoord(car(cur_inst~>bBox))\n                        xr = xCoord(cadr(cur_inst~>bBox))\n                        foreach( wire_info wire_list\n                            ; printf(\"instance %s wire: %A %A\\n\" cur_inst~>name xCoord(car(cadr(wire_info)~>bBox)) xCoord(cadr(cadr(wire_info)~>bBox)))\n                            xl = min(xl xCoord(car(cadr(wire_info)~>bBox)))\n                            xr = max(xr xCoord(cadr(cadr(wire_info)~>bBox)))\n                        )\n                        transform = list(round2((xr - xl + snap_dist) / snap_dist) * snap_dist:0 \"R0\" 1.0)\n                        ; printf(\"instance %s transform: %A\\n\" cur_inst~>name transform)\n                    else\n                        ; more than 1 rinst, copy cur_inst, do not copy wires\n                        wire_list = nil\n                        ; copy instance\n                        cur_inst = dbCopyFig(cur_inst nil transform)\n                    )\n                    ; change instance name and master\n                    when(cur_inst->name != rinst->name\n                        cur_inst->name = rinst->name\n                    )\n                    schReplaceProperty(list(cur_inst) \"master\" sprintf(nil \"%s %s %s\" rinst->lib_name\n                                                                       rinst->cell_name cur_inst->viewName))\n                    ; set parameters\n                    foreach( cdf_par cdfGetInstCDF(cur_inst)~>parameters\n                        par_val = cadr(assoc(cdf_par->name rinst->params))\n                        ; change CDF parameter value only if specified in given parameters\n                        when( par_val != nil\n                            cdf_par->value = par_val\n                        )\n                    )\n                    when( wire_list\n                        ; if wire_list is not empty, check that the pins match.  If so, keep wires around,\n                        ; otherwise, delete wires\n                        unless( equal(pin_info get_instance_pin_info(cur_inst))\n                            ; delete wires connected to instance\n                            foreach( wire_info wire_list\n                                schDelete(cadr(wire_info))\n                            )\n                            wire_list = nil\n                        )\n                    )\n                    ; modify connections, keeping old wires around\n                    ; printf(\"instance %s wire_list: %A net_map: %A term_map: %A\\n\" cur_inst~>name wire_list net_map rinst->term_mapping)\n                    modify_instance_terminal(sch_cv cur_inst wire_list net_map rinst->term_mapping)\n                )\n            )\n        )\n    )\n)\n\n; given a copied template cell, modify it to a concrete schematic.\nprocedure( convert_template_cells(lib_name cell_name pin_map new_pins inst_list sympin ipin opin iopin simulators)\n    let( (sym_cv sch)\n        ; update symbol view first.\n        if( sym_cv = dbOpenCellViewByType(lib_name cell_name \"symbol\" nil \"r\") then\n            printf(\"*INFO* Updating %s__%s symbol pins.\\n\" lib_name cell_name)\n            update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators)\n        else\n            warn(\"Did not find symbol for %s__%s.  Skipping.  Is it testbench?\" lib_name cell_name)\n        )\n\n        ; attempt to open schematic in append mode\n        unless( sch = dbOpenCellViewByType(lib_name cell_name \"schematic\" nil \"a\")\n            error(\"Cannot open %s__%s (schematic) in append mode.\" lib_name cell_name)\n        )\n        ; update schematic content\n        printf(\"*INFO* Updating %s__%s instances and connections.\\n\" lib_name cell_name)\n        modify_schematic_content(sch inst_list)\n        ; update schematic pins\n        printf(\"*INFO* Updating %s__%s schematic pins.\\n\" lib_name cell_name)\n        update_schematic_pin(sch pin_map new_pins ipin opin iopin)\n        check_and_save_schematic(sch)\n    )\n)\n\n; create concrete schematics\nprocedure( create_concrete_schematic( lib_name tech_lib lib_path temp_file change_file\n                                      sympin ipin opin iopin simulators copy \"tttttlllllg\" )\n    let( (template_list change_list cell_name pin_map inst_list)\n        printf(\"*INFO* Reading template and change list from file\\n\")\n        template_list = parse_data_from_file( temp_file )\n        change_list = parse_data_from_file( change_file )\n        when( copy\n            printf(\"*INFO* Creating library: %s\\n\" lib_name)\n            create_or_erase_library( lib_name tech_lib lib_path nil )\n            printf(\"*INFO* Copying templates to library: %s\\n\" lib_name)\n            copy_templates_to_library( lib_name template_list )\n        )\n        foreach( change change_list\n            cell_name = change->name\n            pin_map = change->pin_map\n            new_pins = change->new_pins\n            inst_list = change->inst_list\n            printf(\"*INFO* Updating cell %s__%s\\n\" lib_name cell_name)\n            convert_template_cells( lib_name cell_name pin_map new_pins inst_list\n                                    sympin ipin opin iopin simulators )\n        )\n        't\n    )\n)\n\n; create a new layout view then instantiate a single pcell instance.\n; this method also copy all the labels in the pcell top level.  In this way LVS/PEX will\n; work correctly.\n; params is a list of (variable_name type_string value) lists.\n; pin_mapping is a list of (old_pin new_pin) lists.\nprocedure( create_layout_with_pcell(lib_name cell_name view_name inst_lib inst_cell params_f pin_mapping_f \"ttttttt\")\n    let( (lay_cv inst_master inst inst_shapes label_location label_orientation label_lpp\n          label_just label_font label_height label_type label_text params pin_mapping)\n        unless( lay_cv = dbOpenCellViewByType(lib_name cell_name view_name \"maskLayout\" \"w\")\n            error(\"Cannot open cellview %s__%s (%s).\" lib_name cell_name view_name)\n        )\n        unless( inst_master = dbOpenCellViewByType(inst_lib inst_cell \"layout\" \"maskLayout\" \"r\")\n            dbClose(lay_cv)\n            error(\"Cannot open cellview %s__%s (layout).\" inst_lib inst_cell)\n        )\n\n        params = parse_data_from_file(params_f)\n        pin_mapping = parse_data_from_file(pin_mapping_f)\n\n        inst = dbCreateParamInst(lay_cv inst_master \"XTOP\" '(0 0) \"R0\" 1 params)\n        inst_shapes = inst~>master~>shapes\n\n        foreach(shape inst_shapes\n            when( shape->objType == \"label\"\n                label_location = shape~>xy\n                label_orientation = shape~>orient\n                label_lpp = shape~>lpp\n                label_just = shape~>justify\n                label_font = shape~>font\n                label_height = shape~>height\n                label_type = shape~>labelType\n                label_text = shape~>theLabel\n                when( cadr(assoc(label_text pin_mapping))\n                    label_text = cadr(assoc(label_text pin_mapping))\n                )\n                dbCreateLabel(lay_cv label_lpp label_location label_text label_just label_orientation label_font label_height )\n            )\n        )\n        dbClose(inst_master)\n        dbSave(lay_cv)\n        dbClose(lay_cv)\n    )\n)\n\n; helper for creating a path segment\nprocedure( create_path_seg_helper(cv lay p0 p1 width start_s end_s)\n    let( (diag_ext info_list bext eext)\n        if( and(car(p0) != car(p1) cadr(p0) != cadr(p1)) then\n            diag_ext = width / 2\n            width = width * sqrt(2)\n        else\n            diag_ext = width * sqrt(2) / 2\n        )\n\n        bext = 0\n        eext = 0 \n        when( start_s == \"round\"\n            bext = width / 2\n            start_s = \"custom\"\n        )\n        when( end_s == \"round\"\n            eext = width / 2\n            end_s = \"custom\"\n        )\n        info_list = list(bext eext list(diag_ext diag_ext width/2 diag_ext diag_ext width/2))\n        dbCreatePathSeg(cv lay p0 p1 width start_s end_s info_list)\n    )\n)\n\n\n; helper for creating a path\nprocedure( create_path_helper( cv path )\n    let( (lay width points estyle jstyle p0 p1 plen idx start_s end_s)\n        lay = path->layer\n        width = path->width\n        points = path->points\n        estyle = path->end_style\n        jstyle = path->join_style\n        p0 = nil\n        plen = length(points)\n        idx = 0\n        foreach( cur_point points\n            p1 = cur_point\n            when( idx > 0\n                if( idx == 1 then\n                    start_s = estyle\n                else\n                    start_s = jstyle\n                )\n                if( idx == plen - 1 then\n                    end_s = estyle\n                else\n                    end_s = jstyle\n                )\n                create_path_seg_helper(cv lay p0 p1 width start_s end_s)\n            )\n            p0 = p1\n            idx = idx + 1\n        )\n    )\n)\n\n\n; helper for creating a single layout view\nprocedure( create_layout_helper( cv tech_file inst_list rect_list via_list pin_list path_list\n                                 blockage_list boundary_list polygon_list \"ggllllllll\" )\n    let( (inst_cv obj via_def via_enc1 via_enc2 enc1 enc2 off1 off2 via_params make_pin_rect\n          pin_bb pin_w pin_h pin_xc pin_yc pin_orient label_h param_order orig_shape arr_dx arr_dy)\n\n        ; create instances\n        foreach( inst inst_list\n            if( inst_cv = dbOpenCellViewByType( inst->lib inst->cell inst->view nil \"r\" ) then\n\n                if( and( inst->num_rows==1 inst->num_cols==1) then\n                    if( inst->params != nil then\n                        ; create pcell instance\n                        obj = dbCreateParamInst(cv inst_cv inst->name inst->loc inst->orient 1 inst->params)\n                        when( obj\n                            if( inst->param_order != nil then\n                                param_order=inst->param_order\n                            else\n                                param_order= mapcar( lambda( (x) car(x) ) inst->params)\n                            )\n                            CCSinvokeInstCdfCallbacks(obj ?order param_order)\n                        )\n\n                    else\n                        obj = dbCreateInst(cv inst_cv inst->name inst->loc inst->orient)\n                    )\n                else\n                    if( inst->params != nil then\n                        ; create pcell mosaic\n                        obj = dbCreateParamSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient\n                                                        inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols\n                                                        inst->params)\n                        when( obj\n                            if( inst->param_order != nil then\n                                param_order=inst->param_order\n                            else\n                                param_order= mapcar( lambda( (x) car(x) ) inst->params)\n                            )\n                            CCSinvokeInstCdfCallbacks(obj ?order param_order)\n                        )\n                    else\n                        obj = dbCreateSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient\n                                                   inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols)\n                    )\n                )\n                unless( obj\n                    warn(\"Error creating instance %s of %s__%s (%s).  Skipping.\" inst->name inst->lib inst->cell inst->view)\n                )\n\n            else\n                warn(\"Cannot find instance %s__%s (%s).  Skipping.\" inst->lib inst->cell inst->view)\n            )\n        )\n\n        ; create rectangles\n        foreach( rect rect_list\n            orig_shape = dbCreateRect(cv rect->layer rect->bbox)\n            if( not(orig_shape) then\n                warn(\"Error creating rectangle of layer %A.  Skipping.\" rect->layer)\n            else\n                when( rect->arr_nx != nil\n                    for(icol 2 rect->arr_nx\n                        arr_dx = rect->arr_spx * (icol - 1)\n                        for(irow 1 rect->arr_ny\n                            arr_dy = rect->arr_spy * (irow - 1)\n                            dbCopyFig(orig_shape nil list(arr_dx:arr_dy \"R0\" 1))\n                        )\n                    )\n                    for(irow 2 rect->arr_ny\n                        arr_dy = rect->arr_spy * (irow - 1)\n                        dbCopyFig(orig_shape nil list(0:arr_dy \"R0\" 1))\n                    )\n                )\n            )\n        )\n\n        ; create paths\n        foreach( path path_list\n            create_path_helper(cv path)\n        )\n\n        ; create polygons\n        foreach( poly polygon_list\n            dbCreatePolygon(cv poly->layer poly->points)\n        )\n\n        ; create blockages\n        foreach( block blockage_list\n            if( block->btype == \"placement\" then\n                dbCreateAreaBlockage(cv block->points)\n            else\n                dbCreateLayerBlockage(cv block->layer block->btype block->points)\n            )\n        )\n\n        ; create boundaries\n        foreach( bound boundary_list\n            cond( (bound->btype == \"PR\"\n                   dbCreatePRBoundary(cv bound->points))\n                  (bound->btype == \"snap\"\n                   dbCreateSnapBoundary(cv bound->points))\n                  (bound->btype == \"area\"\n                   dbCreateAreaBoundary(cv bound->points))\n                  ('t\n                   warn(\"Unknown boundary type %s.  Skipping.\" bound->btype))\n            )\n        )\n\n        ; create vias\n        foreach( via via_list\n            if( via_def = techFindViaDefByName(tech_file via->id) then\n                ; compute via parameter list\n                via_enc1 = via->enc1\n                via_enc2 = via->enc2\n                enc1 = list( (car(via_enc1) + cadr(via_enc1)) / 2.0\n                             (caddr(via_enc1) + cadr(cddr(via_enc1))) / 2.0 )\n                enc2 = list( (car(via_enc2) + cadr(via_enc2)) / 2.0\n                             (caddr(via_enc2) + cadr(cddr(via_enc2))) / 2.0 )\n                off1 = list( (cadr(via_enc1) - car(via_enc1)) / 2.0\n                             (caddr(via_enc1) - cadr(cddr(via_enc1))) / 2.0 )\n                off2 = list( (cadr(via_enc2) - car(via_enc2)) / 2.0\n                             (caddr(via_enc2) - cadr(cddr(via_enc2))) / 2.0 )\n\n                via_params = list( list(\"cutRows\" via->num_rows)\n                                   list(\"cutColumns\" via->num_cols)\n                                   list(\"cutSpacing\" list(via->sp_cols via->sp_rows))\n                                   list(\"layer1Enc\" enc1)\n                                   list(\"layer2Enc\" enc2)\n                                   list(\"layer1Offset\" off1)\n                                   list(\"layer2Offset\" off2) )\n\n                ; if via width and height given, add to via_params\n                when( via->cut_width != nil\n                    via_params = cons( list(\"cutWidth\" via->cut_width) via_params)\n                )\n                when( via->cut_height != nil\n                    via_params = cons( list(\"cutHeight\" via->cut_height) via_params)\n                )\n\n                ; create actual via\n                orig_shape = dbCreateVia(cv via_def via->loc via->orient via_params)\n                if( not(orig_shape) then\n                    warn(\"Error creating via %s.  Skipping.\" via->id)\n                else\n                    when( via->arr_nx != nil\n                        for(icol 2 via->arr_nx\n                            arr_dx = via->arr_spx * (icol - 1)\n                            for(irow 1 via->arr_ny\n                                arr_dy = via->arr_spy * (irow - 1)\n                                dbCopyFig(orig_shape nil list(arr_dx:arr_dy \"R0\" 1))\n                            )\n                        )\n                        for(irow 2 via->arr_ny\n                            arr_dy = via->arr_spy * (irow - 1)\n                            dbCopyFig(orig_shape nil list(0:arr_dy \"R0\" 1))\n                        )\n                    )\n                )\n            else\n                warn(\"Via %s not found.  Skipping.\" via->id)\n            )\n        )\n\n        ; create pins\n        foreach( pin pin_list\n            pin_bb = pin->bbox\n            pin_w = caadr(pin_bb) - caar(pin_bb)\n            pin_h = cadr(cadr(pin_bb)) - cadr(car(pin_bb))\n            pin_xc = (caar(pin_bb) + caadr(pin_bb)) / 2.0\n            pin_yc = (cadr(car(pin_bb)) + cadr(cadr(pin_bb))) / 2.0\n\n            if( pin_w >= pin_h then\n                pin_orient = \"R0\"\n                label_h = pin_h\n            else\n                pin_orient = \"R90\"\n                label_h = pin_w\n            )\n\n            ; get make_pin_rect, true if both net_name and pin_name are non-empty\n            make_pin_rect = pin->net_name != \"\" && pin->pin_name != \"\"\n            when( pin->make_rect != nil\n                make_pin_rect = pin->make_rect\n            )\n            ; printf(\"make_pin_rect: %A\\n\" make_pin_rect)\n            ; create pin object only if make_pin_rect is True.\n            when( make_pin_rect != 0 && make_pin_rect != nil\n                ; printf(\"making pin.\\n\")\n                dbCreatePin( dbMakeNet(cv pin->net_name) dbCreateRect(cv pin->layer pin_bb) pin->pin_name )\n            )\n            ; printf(\"%A %A %A %A\\n\" pin->label pin->layer pin_xc pin_yc)\n            dbCreateLabel( cv pin->layer list(pin_xc pin_yc) pin->label \"centerCenter\" pin_orient \"roman\" label_h )\n        )\n    )\n)\n\n; create a new layout view with the given geometries\n; inst_f, rect_f, via_f, and pin_f are files containing list of disembodied property lists.\nprocedure( create_layout( lib_name view_name via_tech layout_f \"ttt\" )\n    let( (tech_file layout_info cell_name inst_list rect_list via_list pin_list\n          path_list blockage_list boundary_list polygon_list cv)\n\n        unless( tech_file = techGetTechFile(ddGetObj(via_tech))\n            error(\"Via technology file %s not found.\" via_tech)\n        )\n\n        layout_info = parse_data_from_file(layout_f)\n        foreach( info layout_info\n            cell_name = nthelem(1 info)\n            inst_list = nthelem(2 info)\n            rect_list = nthelem(3 info)\n            via_list = nthelem(4 info)\n            pin_list = nthelem(5 info)\n            path_list = nthelem(6 info)\n            blockage_list = nthelem(7 info)\n            boundary_list = nthelem(8 info)\n            polygon_list = nthelem(9 info)\n\n            unless( cv = dbOpenCellViewByType( lib_name cell_name view_name \"maskLayout\" \"w\" )\n                error(\"Cannot create new layout cell %s__%s (%s).\" lib_name cell_name view_name)\n            )\n\n            printf(\"Creating %s__%s (%s)\\n\" lib_name cell_name view_name)\n            create_layout_helper(cv tech_file inst_list rect_list via_list pin_list path_list\n                                 blockage_list boundary_list polygon_list)\n            dbSave(cv)\n            dbClose(cv)\n        )\n\n        t\n    )\n)\n\n; release write locks from all the given cellviews\nprocedure( release_write_locks( lib_name cell_view_list_f \"tt\" )\n    let( (cell_view_list lib_obj cv)\n        cell_view_list = parse_data_from_file(cell_view_list_f)\n        when( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\")\n            foreach( info cell_view_list\n                when( cv = dbFindOpenCellView( lib_obj car(info) cadr(info) )\n                    dbReopen(cv, \"r\")\n                    dbClose(cv)\n                )\n            )\n            ddReleaseObj(lib_obj)\n        )\n        t\n    )\n)\n\n\n\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n;;  Simulation/Testbench related functions  ;;\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n\n\n; set an entry in an association list\n; returns the modified association list.\nprocedure( set_assoc_list(mylist mykey myval)\n    let( (tmp)\n        when( tmp = assoc(mykey mylist)\n            ; print(\"replacing\")\n            rplacd(tmp list(myval))\n        )\n    )\n    mylist\n)\n\n; Copy the schematic of a testbench, and replace the DUT instance.\n;\n; This procedure copies the schematic of a testbench to a new library and cell, then finds all\n; instances with the name prefix \"XDUT\", then change their instance master to dut_lib and dut_cell.\n;\nprocedure( copy_testbench(master_lib master_cell targ_lib\n                          dut_lib dut_cell tech_lib new_lib_path \"ttttttt\")\n    let( (tlib_obj sch replace_count inst_prefix new_master)\n        inst_prefix = \"XDUT\"\n\n        printf(\"Copying testbench %s__%s to %s__%s\\n\" master_lib master_cell targ_lib master_cell)\n\n        ; create target library if does not exist\n        unless( tlib_obj = ddGetObj(targ_lib nil nil nil nil \"r\")\n            when( and(new_lib_path (new_lib_path != \".\"))\n                new_lib_path = strcat(new_lib_path \"/\" lib_name)\n            )\n            tlib_obj = ddCreateLib(targ_lib new_lib_path)\n            ; attach technology file\n            techBindTechFile(tlib_obj tech_lib)\n        )\n\n        ; copy testbench to new library\n        src_gdm = gdmCreateSpecList()\n        gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil \"CDBA\") src_gdm)\n        targ_gdm = gdmCreateSpecList()\n        gdmAddSpecToSpecList(gdmCreateSpec(targ_lib master_cell nil nil \"CDBA\") targ_gdm)\n        ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED)\n\n        ; open copied schematic\n        unless( sch = dbOpenCellViewByType(tlib_obj master_cell \"schematic\" nil \"a\")\n            ddReleaseObj(tlib_obj)\n            error(\"Cannot open testbench schematic %s__%s\" targ_lib master_cell)\n        )\n\n        ; replace instances\n        replace_count = 0\n        sprintf(new_master \"%s %s symbol\" dut_lib dut_cell)\n        foreach( inst sch~>instances\n           when( strncmp( inst~>name inst_prefix strlen(inst_prefix) ) == 0\n               replace_count = replace_count + 1\n               schReplaceProperty(list(inst) \"master\" new_master)\n           )\n        )\n\n        ; save and close resources\n        check_and_save_schematic(sch)\n        ddReleaseObj(tlib_obj)\n\n        ; error if nothing is replaced\n        when( replace_count == 0\n            error(\"Cannot find any instances in %s__%s with name prefix %s\" targ_lib master_cell inst_prefix)\n        )\n        't\n    )\n)\n\n; opens an adexl session.  Returns a list of session name and setup database handle.\nprocedure( open_adexl_session(tb_lib tb_cell tb_view session_name mode \"ttttt\")\n    let( (session sdb)\n        unless( session = axlCreateSession(session_name)\n            error(\"Cannot create temporary adexl session: %s\" session_name)\n        )\n        unless( sdb = axlSetMainSetupDBLCV(session tb_lib tb_cell tb_view)\n            axlCloseSession(session)\n            error(\"Cannot load adexl database from %s__%s (%s)\" tb_lib tb_cell tb_view)\n        )\n        list(session sdb)\n    )\n)\n\n; Enables only the given corners in the simulation setup database.\nprocedure( enable_adexl_corners( sdb corner_list env_param_list \"gll\")\n    let( (env_name par_val_list corner)\n        foreach(cur_name cadr(axlGetCorners(sdb))\n            axlSetEnabled( axlGetCorner(sdb cur_name) member(cur_name corner_list) )\n        )\n        foreach(env_par_obj env_param_list\n            env_name = car(env_par_obj)\n            par_val_list = cadr(env_par_obj)\n            corner = axlGetCorner(sdb env_name)\n            foreach(par_val par_val_list\n                axlPutVar(corner car(par_val) cadr(par_val))\n            )\n        )\n    )\n)\n\n; Set testbench parameters\n; val_list is an association list from variable names to variable values as string, which\n; could be a constant value or a parametric sweep string\nprocedure( set_adexl_parameters(sdb par_val_list \"gl\")\n    foreach( var_spec par_val_list\n        axlPutVar(sdb car(var_spec) cadr(var_spec))\n    )\n)\n\n; Create a new config view for a testbench.\n;\n; lib_name : testbench library name.\n; cell_name : testbench cell name.\n; view_name : name of the config view (a testbench can have multiple config views)\n; libs : a string of global libraries, separated by spaces.\n; views : a string of cellviews to use, separated by spaces.\n; stops : a string of cellviews to stop at, separated by spaces.\nprocedure( create_config_view(lib_name cell_name view_name libs views stops \"tttttt\")\n    let( (conf conf_bag)\n        printf(\"Creating config view %s__%s (%s)\\n\" lib_name cell_name view_name)\n\n        unless( conf = hdbOpen(lib_name cell_name view_name \"w\")\n            error(\"Cannot open config view %s__%s (%s).\" lib_name cell_name view_name)\n        )\n        hdbSetTopCellViewName(conf lib_name cell_name \"schematic\")\n        hdbSetDefaultLibListString(conf libs)\n        hdbSetDefaultViewListString(conf views)\n        hdbSetDefaultStopListString(conf stops)\n        hdbSaveAs(conf lib_name cell_name view_name)\n\n        ; close configuration\n        conf_bag = hdbCreateConfigBag()\n        hdbAddConfigToBag(conf_bag conf)\n        hdbCloseConfigsInBag(conf_bag)\n    )\n)\n\n; edit the config view of a testbench.  Use to control whether we're simulating with\n; schematic or post-extraction.\n;\n; lib_name : testbench library name.\n; cell_name : testbench cell name.\n; view_name : name of the config view (a testbench can have multiple config views)\n; conf_list : a list of (<lib>, <cell>, <view>) configurations.  Where each entry\n;             means that view <view> should be used for the cell <cell> in library <lib>.\nprocedure( edit_config_view(lib_name cell_name view_name conf_list \"tttl\")\n    let( (conf lib cell view conf_bag netlist_list)\n        unless( conf = hdbOpen(lib_name cell_name view_name \"a\")\n            error(\"Cannot open config view %s__%s (%s).\" lib_name cell_name view_name)\n        )\n        netlist_list = '()\n        foreach( cell_config conf_list\n            lib = car(cell_config)\n            cell = cadr(cell_config)\n            view = caddr(cell_config)\n            if( view == \"netlist\" then\n                ; set to use extracted netlist\n                netlist_list = cons(list(lib cell) netlist_list)\n            else\n                ; set to use extracted cellview\n                hdbSetObjBindRule(conf list(list(lib cell nil nil))\n                                  list('hdbcBindingRule list(nil nil view)))\n            )\n        )\n        hdbSaveAs(conf lib_name cell_name view_name)\n\n        ; close configuration\n        conf_bag = hdbCreateConfigBag()\n        hdbAddConfigToBag(conf_bag conf)\n        hdbCloseConfigsInBag(conf_bag)\n\n        ; update netlist source files\n        edit_config_source_files(lib_name cell_name view_name netlist_list)\n    )\n)\n\n; HACKERMAN FUNCTION:\n; so as usual, cadence is so terrible they don't have skill API to set source files.\n; instead, spice/spectre source files are defined in a secret ASCII prop.cfg file.\n; this hacky method will create the right prop.cfg file for you.\nprocedure( edit_config_source_files(lib_name cell_name view_name netlist_list \"tttl\")\n    let( (p lib_dir cell_lib_dir)\n        lib_dir = get_lib_directory(lib_name)\n        p = outfile( sprintf(nil \"%s/%s/%s/%s\" lib_dir cell_name view_name \"prop.cfg\") \"w\" )\n        ; common header\n        fprintf( p \"file-format-id 1.1;\\ndefault\\n{\\n}\\n\" )\n        foreach( lib_cell netlist_list\n            lib = car(lib_cell)\n            cell = cadr(lib_cell)\n            cell_lib_dir = get_lib_directory(lib)\n            fprintf( p \"cell %s.%s\\n{\\n\" lib cell )\n            fprintf( p \"    non-inherited string prop sourcefile = \\\"%s/%s/netlist/netlist\\\";\\n}\\n\"\n                     cell_lib_dir cell )\n        )\n        close(p)\n    )\n)\n\n; Write testbench information to file.\nprocedure( write_testbench_info_to_file(sdb result_file output_list en_corner_list)\n    let( (p output_count)\n\n        ; write testbench information to result_file\n        p = outfile(result_file \"w\")\n\n        fprintf(p \"corners:\\n\")\n        foreach( corn cadr(axlGetCorners(sdb))\n            fprintf(p \"  - %s\\n\" corn)\n        )\n        fprintf(p \"enabled_corners:\\n\")\n        foreach( corn en_corner_list\n            fprintf(p \"  - %s\\n\" corn)\n        )\n        fprintf(p \"parameters:\\n\")\n        if( var_list = cadr(axlGetVars(sdb)) then\n            foreach( var_name var_list\n                fprintf(p \"  %s: \\\"%s\\\"\\n\" var_name axlGetVarValue(axlGetVar(sdb var_name)))\n            )\n        else\n            fprintf(p \"  {}\\n\")\n        )\n        fprintf(p \"outputs:\\n\")\n        output_count = 0\n        foreach( out_obj output_list\n            if( rexMatchp( \"\\\"\" out_obj->name) then\n                warn(\"Output expression name (%s) have quotes, skipping\" out_obj->name)\n            else\n                fprintf(p \"  \\\"%s\\\": !!str %A\\n\" out_obj->name out_obj->expression)\n                output_count = output_count + 1\n            )\n        )\n        when( output_count == 0\n            fprintf(p \"  {}\\n\")\n        )\n        close(p)\n    )\n)\n\n; Instantiates a testbench.\n;\n; Copy a testbench template to the desired location, replace instances, make config view,\n; and also setup corner settings in adexl.\n; this method will also record list of corners, global variables, and output expressions\n; to result_file\nprocedure( instantiate_testbench(tb_cell targ_lib\n                                 config_libs config_views config_stops\n                                 default_corner corner_file def_files\n                                 tech_lib result_file\n                                 \"tttttttltt\")\n    let( (session_name session_sdb session sdb test_names test_name test tool_args corner_list\n          ade_symbol ade_session output_list tmp_state_name state_obj success)\n\n        tmp_state_name = \"orig_state\"\n\n        ; check if temporary ADE session state already exists, if so, delete it\n        state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)\n        when( state_obj\n            success = ddDeleteObj(state_obj)\n            unless( success\n                error(\"Cannot delete orig_state cellview.\")\n            )\n        )\n\n        ; create config view\n        create_config_view(targ_lib tb_cell \"config\" config_libs config_views config_stops)\n\n        ; session_name = \"modify_adexl\"\n        session_name = sprintf(nil \"modify_adexl_%d\" bag_modify_adexl_counter)\n        bag_modify_adexl_counter = bag_modify_adexl_counter + 1\n\n        session_sdb = open_adexl_session(targ_lib tb_cell \"adexl\" session_name \"a\")\n        session = car(session_sdb)\n        sdb = cadr(session_sdb)\n\n        ; check that only one test is defined\n        test_names = cadr(axlGetTests(sdb))\n        when(length(test_names) != 1\n            axlCommitSetupDB(sdb)\n            axlCloseSetupDB(sdb)\n            axlCloseSession(session)\n            error(\"ADEXL testbench must have exactly 1 test defined.\")\n        )\n\n        ; save current test setup state\n        axlSaveSetupState(session \"adexl_default\" \"All\")\n\n        ; change all tests to use config view, and set all test's definition files\n        ; also get a list of defined output expressions\n        ; step 1: get ADE session\n        test_name = car(test_names)\n        ade_symbol = axlGetToolSession(session_name test_name)\n        ade_session = asiGetSession(ade_symbol)\n        ; step 2: save original ADE session\n        asiSaveState(ade_session ?name tmp_state_name ?option 'cellview ?lib targ_lib ?cell tb_cell)\n        ; step 3: change test library\n        test = axlGetTest(sdb test_name)\n        tool_args = axlGetTestToolArgs(test)\n        set_assoc_list(tool_args \"view\" \"config\")\n        set_assoc_list(tool_args \"lib\" targ_lib)\n        set_assoc_list(tool_args \"cell\" tb_cell)\n        axlSetTestToolArgs(test tool_args)\n        ; step 4: reopen ADE session, then load original ADE state\n        ade_symbol = axlGetToolSession(session_name test_name)\n        ade_session = asiGetSession(ade_symbol)\n        asiLoadState(ade_session ?name tmp_state_name ?option 'cellview)\n        asiSetEnvOptionVal(ade_session 'definitionFiles def_files)\n        output_list = setof(ele asiGetOutputList(ade_session) ele->name)\n        ; step 5: delete temporary ADE session state\n        state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)\n        ddDeleteObj(state_obj)\n\n        axlMainAppSaveSetup(session_name)\n\n        ; load corner\n        unless(axlLoadCorners(sdb corner_file)\n            axlCommitSetupDB(sdb)\n            axlCloseSetupDB(sdb)\n            axlCloseSession(session)\n            error(\"Error loading corner file %s to %s__%s (%s)\" corner_file lib_name cell_name view_name)\n        )\n\n        ; set default corner\n        corner_list = list(default_corner)\n        enable_adexl_corners(sdb corner_list nil)\n\n        ; write testbench information to file\n        write_testbench_info_to_file(sdb result_file output_list corner_list)\n\n        ; save and close\n        axlSaveSetupState(session \"adexl_default\" \"All\")\n        axlSaveSetupState(session \"ocean_default\" \"All\")\n        axlMainAppSaveSetup(session_name)\n        axlCommitSetupDB(sdb)\n        axlCloseSetupDB(sdb)\n        axlCloseSession(session)\n    )\n)\n\n; Returns parameter and corner information of a testbench.\nprocedure( get_testbench_info(tb_lib tb_cell result_file \"ttt\")\n    let( (session_name session_sdb session sdb test_names test_name ade_symbol asi_sess\n        output_list corner_list en_list success)\n        session_name = \"read_adexl\"\n        session_sdb = open_adexl_session(tb_lib tb_cell \"adexl\" session_name \"r\")\n        session = car(session_sdb)\n        sdb = cadr(session_sdb)\n\n        ; check that only one test is defined\n        test_names = cadr(axlGetTests(sdb))\n        when(length(test_names) != 1\n            axlCommitSetupDB(sdb)\n            axlCloseSetupDB(sdb)\n            axlCloseSession(session)\n            error(\"ADEXL testbench must have exactly 1 test defined.\")\n        )\n\n        ; get output list\n        test_name = car(test_names)\n        ade_symbol = axlGetToolSession(session_name test_name)\n        asi_sess = sevEnvironment(ade_symbol)\n        output_list = setof(ele asiGetOutputList(asi_sess) ele->name)\n\n        ; get enabled corners\n        corner_list = cadr(axlGetCorners(sdb))\n        en_list = setof(corner corner_list axlGetEnabled(axlGetCorner(sdb corner)))\n\n        ; write testbench information to file\n        write_testbench_info_to_file(sdb result_file output_list en_list)\n\n        ; close\n        axlCommitSetupDB(sdb)\n        axlCloseSetupDB(sdb)\n        axlCloseSession(session)\n    )\n)\n\n; Configure run options.  Used to setup monte carlo parameters.\n; run_params is an association list of run options and their values.  The key \"mode\"\n; corresponds to the run mode.\nprocedure( set_run_options(session sdb run_params \"ggl\")\n    let( (run_mode opt_list run_opt)\n        when( run_mode = cadr(assoc(\"mode\" run_params))\n                  ; no options for single run/sweep mode.\n            cond( (run_mode == \"Single Run, Sweeps and Corners\"\n                   opt_list = nil)\n                  (run_mode == \"Monte Carlo Sampling\"\n                   opt_list = '(\"mcnumpoints\" \"mcmethod\") )\n                  ('t\n                      axlCloseSession(session)\n                      error(\"Unsupported run mode: %s\" run_mode) )\n            )\n            foreach( opt_name opt_list\n                when( opt_val = cadr(assoc(opt_name run_params))\n                    run_opt = axlPutRunOption(sdb run_mode opt_name)\n                    axlSetRunOptionValue(run_opt opt_val)\n                )\n            )\n            axlSetCurrentRunMode(sdb run_mode)\n        )\n    )\n)\n\n; modify the given testbench.\n; tb_lib and tb_cell describes the library and cell of the testbench to simulate.\n; conf_file contains the config view settings.\n; opt_file contains the association list of run mode options.\n; corner_file contains a list of corners to simulate.\n; param_file contains the association list of parameter values.\nprocedure( modify_testbench(tb_lib tb_cell conf_file opt_file corner_file param_file env_params_file \"ttttttt\")\n    let( (tmp_list session sdb conf_list run_params corner_list param_values env_param_values session_name)\n        sprintf(session_name \"bag_sim_adexl_%s\" getCurrentTime())\n\n        ; read inputs from file.\n        conf_list = parse_data_from_file(conf_file)\n        run_params = parse_data_from_file(opt_file)\n        corner_list = parse_data_from_file(corner_file)\n        param_values = parse_data_from_file(param_file)\n        env_param_values = parse_data_from_file(env_params_file)\n\n        ; modify config view\n        when( conf_list\n            edit_config_view(tb_lib tb_cell \"config\" conf_list)\n        )\n\n        tmp_list = open_adexl_session(tb_lib tb_cell \"adexl\" session_name \"a\")\n        session = car(tmp_list)\n        sdb = cadr(tmp_list)\n\n        ; change corners, parameters, and run options\n        enable_adexl_corners( sdb corner_list env_param_values)\n        set_adexl_parameters( sdb param_values )\n        set_run_options( session sdb run_params )\n\n        ; save and close\n        axlSaveSetupState(session \"adexl_default\" \"All\")\n        axlSaveSetupState(session \"ocean_default\" \"All\")\n        axlMainAppSaveSetup(session_name)\n        axlCommitSetupDB(sdb)\n        axlCloseSetupDB(sdb)\n        axlCloseSession(session)\n    )\n)\n\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n;;  BAG server related functions            ;;\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n\nprocedure( stdoutHandler(ipcId data)\n    let( (result result_str)\n        if( bag_server_started > 0 then\n            printf(\"*INFO* Evaluate expression from BAG process: %s\\n\" data)\n            if( result = errsetstring(data 't) then\n                sprintf(result_str \"%A\\n\" car(result))\n            else\n                sprintf(result_str \"%s\\n\" car(nthelem(5 errset.errset)))\n            )\n            printf(\"*INFO* Sending result to BAG process: %s\" result_str)\n            ipcWriteProcess(ipcId sprintf(nil \"%d\\n\" strlen(result_str)))\n            ipcWriteProcess(ipcId result_str)\n            't\n        else\n            if( data == \"BAG skill server has started.  Yay!\\n\" then\n                bag_server_started = 1\n                printf(\"*INFO* BAG skill server started.\\n\")\n            else\n                printf(\"*INFO* Waiting for BAG skill server.  Message: %s\\n\" data)\n            )\n        )\n    )\n)\n\nprocedure( stderrHandler(ipcId data)\n    warn(\"BAG server process error: %s\\n\" data)\n    warn(\"Shutting down BAG server.\")\n    ipcKillProcess(ipcId)\n    't\n)\n\nprocedure( exitHandler(ipcId exitId)\n    printf(\"*INFO* BAG server process exited with status: %d\\n\" exitId)\n    't\n)\n\nprocedure( start_bag_server()\n    bag_server_started = 0\n    printf(\"*INFO* Starting BAG server process.\\n\")\n    ipcBeginProcess(\"bash virt_server.sh\" \"\" 'stdoutHandler 'stderrHandler 'exitHandler \"\")\n)\n\nbag_server_started = 0\nbag_modify_adexl_counter = 0\nbag_proc = start_bag_server()\n"
  },
  {
    "path": "run_scripts/start_bag.sh",
    "content": "#!/usr/bin/env bash\n\nexport PYTHONPATH=\"\"\n\n# disable QT session manager warnings\nunset SESSION_MANAGER\n\nexec ${BAG_PYTHON} -m IPython $@\n"
  },
  {
    "path": "run_scripts/start_bag_ICADV12d3.il",
    "content": "/*  Note:\n\nDue to licensing reasons, this skill script is missing the function \nCCSinvokeCdfCallbacks() from Cadence solution 11018344, which executes \nCDF parameters callback from skill.\n\nIf you do not need to instantiate a pcell instance, this method\nis not needed.\n\nEric Chang, Mar 2, 2017.\n\n*/\n\n\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n;;  Virtuoso Database operations functions  ;;\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n; reads a skill data structure from file\nprocedure( parse_data_from_file( fname \"t\" )\n    let( (p ans)\n        unless( p = infile( fname )\n            error(\"Cannot open file %s\" fname)\n        )\n        ans = parse_data_from_file_helper(p)\n        close( p )\n        ans\n    )\n)\n\n; recursive helper for parse_data_from_file\nprocedure( parse_data_from_file_helper( p )\n    let( (line item ans finish key)\n        gets( line p )\n        ; remove newline\n        line = substring(line 1 strlen(line) - 1)\n        ; printf(\"read line: %s\\n\" line)\n        cond(\n            (line == \"#list\"\n            ; parse a list\n            ans = tconc(nil 0)\n            while( nequal(item = parse_data_from_file_helper(p) \"#end\")\n                tconc(ans item)\n            )\n            ; printf(\"returning list \")\n            ; print(cdar(ans))\n            ; printf(\"\\n\")\n            cdar(ans)\n            )\n            (line == \"#prop_list\"\n            ; parse a disembodied property list\n            ans = ncons(nil)\n            finish = nil\n            while( !finish\n                key = parse_data_from_file_helper(p)\n                if( key == \"#end\" then\n                    finish = 't\n                else\n                    item = parse_data_from_file_helper(p)\n                    putprop(ans item key)\n                )\n            )\n            ans\n            )\n            ; parse a float\n            (strncmp( line \"#float\" 6 ) == 0\n                cdfParseFloatString(cadr(parseString(line)))\n            )\n            ; parse an int\n            (strncmp( line \"#int\" 4 ) == 0\n                atoi(cadr(parseString(line)))\n            )\n            ; parse a boolean\n            (strncmp( line \"#bool\" 5 ) == 0\n                if( atoi(cadr(parseString(line))) == 1 then\n                    't\n                else\n                    nil\n                )\n            )\n            ; parse a string token or #end\n            ('t\n                ; printf(\"returning str %s\\n\" line)\n                line\n            )\n        )\n    )\n)\n\n; return a list of cells in the given library.\nprocedure( get_cells_in_library( lib_name \"t\" )\n    let( ( lib_obj ans )\n        if( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\") then\n            ans = ddGetObjChildren(lib_obj)~>name\n            ddReleaseObj(lib_obj)\n        else\n            ; library does not exist, return empty list\n            ans = '()\n        )\n        ans\n    )\n)\n\n; return a list of cells in the given library.\nprocedure( get_cells_in_library_file( lib_name fname \"tt\" )\n    let( ( p )\n        p = outfile( fname \"w\" )\n        foreach( cell get_cells_in_library(lib_name)\n            fprintf(p \"%s\\n\" cell)\n        )\n        close(p)\n    )\n)\n\n; Returns the directory corresponding to the given library.\nprocedure( get_lib_directory(lib_name \"t\")\n    let( ( lib_obj ans )\n        if( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\") then\n            ans = lib_obj~>readPath\n            ddReleaseObj(lib_obj)\n        else\n            ; library does not exist, return empty list\n            ans = \"\"\n        )\n        ans\n    )\n)\n\n; Parse the netlist of the given cellview.\n; Works on schematic and veriloga.\nprocedure( parse_cad_sch(lib_name cell_name file_name \"ttt\")\n    let( (cv cell_type p indent direction term_names tb_list tb_match\n          inst_lib_name inst_cell_name inst_cnt)\n        indent = \"\"\n        cell_type = \"schematic\"\n        unless( cv = dbOpenCellViewByType( lib_name cell_name \"schematic\" nil \"r\" )\n            cell_type = \"veriloga\"\n            unless( cv = dbOpenCellViewByType( lib_name cell_name \"veriloga\" nil \"r\" )\n                error( \"Cannot find schematic or veriloga view of cell %s__%s\" lib_name cell_name )\n            )\n        )\n        p = outfile( file_name \"w\" )\n\n        ; print cellview information\n        printf( \"*INFO* Writing cell %s__%s (%s) netlist to %s\\n\" lib_name cell_name cell_type file_name )\n        fprintf( p \"%slib_name: %s\\n\" indent lib_name )\n        fprintf( p \"%scell_name: %s\\n\" indent cell_name )\n\n        ; print pins\n        fprintf( p \"%spins: [ \" indent )\n        if( cell_type == \"veriloga\" then\n           term_names = reverse(cv~>terminals~>name)\n        else\n           term_names = cv~>terminals~>name\n        )\n        ; add quotes around pin names to escape array pins\n        term_names = mapcar( lambda( (x) sprintf(nil \"\\\"%s\\\"\" x) ) term_names )\n        fprintf( p \"%s ]\\n\" buildString(term_names \", \"))\n\n        ; print instances\n        if( not(cv~>instances) then\n            fprintf( p \"%sinstances: {}\\n\" indent )\n        else\n            inst_cnt = 0\n            fprintf( p \"%sinstances:\\n\" indent )\n            foreach( inst cv~>instances\n                inst_cnt++\n                ; print entry for instance\n                indent = \"  \"\n                fprintf( p \"%s%s:\\n\" indent inst~>name )\n                ; print instance master information.\n                indent = \"    \"\n                fprintf( p \"%slib_name: %s\\n\" indent inst~>libName )\n                fprintf( p \"%scell_name: %s\\n\" indent inst~>cellName )\n                ; print instance terminal information\n                if( !(inst~>instTerms) then\n                    fprintf( p \"%sinstpins: {}\\n\" indent )\n                else\n                    fprintf( p \"%sinstpins:\\n\" indent )\n                    foreach( inst_term inst~>instTerms\n                        unless( direction = inst_term~>direction\n                            direction = \"\"\n                        )\n                        indent = \"      \"\n                        fprintf( p \"%s%s:\\n\" indent inst_term~>name )\n                        indent = \"        \"\n                        fprintf( p \"%sdirection: %s\\n\" indent direction )\n                        fprintf( p \"%snet_name: \\\"%s\\\"\\n\" indent inst_term~>net~>name )\n                        fprintf( p \"%snum_bits: %d\\n\" indent inst_term~>numBits )\n                    )\n                )\n            )\n            when(inst_cnt == 0\n                fprintf( p \"  {}\\n\" )\n            )\n        )\n\n        ; close resources\n        close(p)\n        dbClose(cv)\n    )\n)\n\n; Delete a cellview if it exists.  Currently used to delete old calibre file.\nprocedure( delete_cellview(lib_name cell_name view_name \"ttt\")\n    let( (obj)\n        obj = ddGetObj(lib_name cell_name view_name)\n        if( obj then\n            ddDeleteObj(obj)\n        else\n            't\n        )\n    )\n)\n\n; Parse the structure of the given cellview.\n; Works on layout.\nprocedure( parse_cad_layout(lib_name cell_name file_name \"ttt\")\n    let( (cv cell_type p indent rect_cnt label_cnt inst_cnt)\n\n        indent = \"\"\n        cell_type = \"layout\"\n        unless( cv = dbOpenCellViewByType( lib_name cell_name cell_type nil \"r\" )\n            error( \"Cannot find layout view of cell %s__%s\" lib_name cell_name )\n        )\n        p = outfile( file_name \"w\" )\n\n        ; print cellview information\n        printf( \"*INFO* Writing cell %s__%s (%s) netlist to %s\\n\" lib_name cell_name cell_type file_name )\n        fprintf( p \"%slib_name: %s\\n\" indent lib_name )\n        fprintf( p \"%scell_name: %s\\n\" indent cell_name )\n\n        ; print rects\n        if( not(cv~>shapes) then\n            fprintf( p \"%srects: {}\\n\" indent )\n        else\n            rect_cnt = 0\n            fprintf( p \"%srects:\\n\" indent )\n            foreach( shape cv~>shapes\n                if( (shape~>objType == \"rect\") then\n                    rect_cnt++ \n                    ; print entry for rect\n                    indent = \"  \"\n                    fprintf( p \"%s%d:\\n\" indent rect_cnt )\n                    ; print rect master information.\n                    indent = \"    \"\n                    fprintf( p \"%slayer: %s %s\\n\" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))\n                    fprintf( p \"%sbBox: [[%f, %f], [%f, %f]]\\n\" indent \n                                 nthelem(1 nthelem(1 shape~>bBox)) nthelem(2 nthelem(1 shape~>bBox)) \n                                 nthelem(1 nthelem(2 shape~>bBox)) nthelem(2 nthelem(2 shape~>bBox)) \n                    );fprintf\n                )\n            );if\n            if((rect_cnt == 0) then\n                fprintf( p \"  {}\\n\" )\n           );if\n        )\n\n        ; print labels\n        indent = \"\"\n        if( not(cv~>shapes) then\n            fprintf( p \"%slabels: {}\\n\" indent )\n        else\n            label_cnt = 0\n            fprintf( p \"%slabels:\\n\" indent )\n            foreach( shape cv~>shapes\n                if( (shape~>objType == \"label\") then\n                    label_cnt++ \n                    ; print entry for label\n                    indent = \"  \"\n                    fprintf( p \"%s%d:\\n\" indent label_cnt )\n                    ; print label master information.\n                    indent = \"    \"\n                    fprintf( p \"%slabel: %s\\n\" indent shape~>theLabel )\n                    fprintf( p \"%slayer: %s %s\\n\" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))\n                    fprintf( p \"%sxy: [%f, %f]\\n\" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))\n                )\n                if( (shape~>objType == \"textDisplay\") then ;some labels are instantiated as text displays\n                    label_cnt++\n                    ; print entry for label\n                    indent = \"  \"\n                    fprintf( p \"%s%d:\\n\" indent label_cnt )\n                    ; print label master information.\n                    indent = \"    \"\n                    fprintf( p \"%slabel: %s\\n\" indent shape~>owner~>name )\n                    fprintf( p \"%slayer: %s %s\\n\" indent nthelem(1 shape~>lpp) nthelem(2 shape~>lpp))\n                    fprintf( p \"%sxy: [%f, %f]\\n\" indent nthelem(1 shape~>xy) nthelem(2 shape~>xy))\n                )\n            );if\n            if((label_cnt == 0) then\n                fprintf( p \"  {}\\n\" )\n           );if\n        )\n        \n        ; print instances\n        indent = \"\"\n        if( not(cv~>instances) then\n            fprintf( p \"%sinstances: {}\\n\" indent )\n        else\n            inst_cnt = 0\n            fprintf( p \"%sinstances:\\n\" indent )\n            foreach( inst cv~>instances\n                inst_cnt++\n                ; print entry for instance\n                indent = \"  \"\n                fprintf( p \"%s%s:\\n\" indent inst~>name )\n                ; print instance master information.\n                indent = \"    \"\n                fprintf( p \"%slib_name: %s\\n\" indent inst~>libName )\n                fprintf( p \"%scell_name: %s\\n\" indent inst~>cellName )\n                fprintf( p \"%sxy: [%f, %f]\\n\" indent nthelem(1 inst~>xy) nthelem(2 inst~>xy))\n                if( (inst~>objType == \"mosaic\") then\n                    fprintf( p \"%scols: %d\\n\" indent inst~>columns)\n                    fprintf( p \"%srows: %d\\n\" indent inst~>rows)\n                    fprintf( p \"%ssp_cols: %f\\n\" indent inst~>uX)\n                    fprintf( p \"%ssp_rows: %f\\n\" indent inst~>uY)\n                    fprintf( p \"%srotation: %s\\n\" indent car(inst~>tileArray))\n                    else\n                    fprintf( p \"%srotation: %s\\n\" indent inst~>orient)\n                );if\n            )\n            when(inst_cnt == 0\n                fprintf( p \"  {}\\n\" )\n            )\n        )\n        \n        ; close resources\n        close(p)\n        dbClose(cv)\n    )\n)\n\n; get a list of cells containing in the specficied library\nprocedure( get_cell_list(lib_name file_name \"tt\")\n    let( (lib cellname p)\n        lib=ddGetObj(lib_name)\n        p = outfile( file_name \"w\" )\n        fprintf( p \"%s: [\" lib_name)\n        foreach( cellname lib~>cells~>name\n            fprintf( p \"%s, \" cellname)\n        );foreach\n        fprintf( p \"] \\n\" )\n        ; close resources\n        close(p)\n    );let\n)\n\n; if library with lib_name does not exists, create a new\n; library with that name.  Otherwise, if erase is true,\n; remove all cells in that library.  Returns the library\n; database object.\nprocedure( create_or_erase_library(lib_name tech_lib lib_path erase \"tttg\")\n    let( (lib_obj)\n        if( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\") then\n            when( erase\n                ; delete all cells in the library\n                foreach( cell lib_obj~>cells\n                    unless( ddDeleteObj(cell)\n                        error(\"cannot delete cell %s in library %s\\n\" cell~>name lib_name)\n                    )\n                )\n            )\n            ddReleaseObj(lib_obj)\n            't\n        else\n            ; create library if not exist\n            when( and(lib_path (lib_path != \".\"))\n                lib_path = strcat(lib_path \"/\" lib_name)\n            )\n            lib_obj = ddCreateLib(lib_name lib_path)\n            ; attach technology file\n            techBindTechFile(lib_obj tech_lib)\n            ; close library\n            ddReleaseObj(lib_obj)\n            't\n        )\n    )\n)\n\n; copy all template cells to the given library.\n; template list is a list of three-element lists with the format\n; '(\"master_lib_name\" \"master_cell_name\" \"target_cell_name\")\n; any existing cellviews will be overwritten.\nprocedure( copy_templates_to_library(lib_name template_list \"tl\")\n    let( (current remaining src_gdm targ_gdm table master_lib master_cell target_cell key cnt\n          empty_spec targ_lib_obj test_cv)\n\n        current = template_list\n        remaining = '()\n        empty_spec = gdmCreateSpecList()\n        targ_lib_obj = ddGetObj(lib_name nil nil nil nil \"r\")\n\n        ; ccpCopy cannot copy the same cell to multiple different cells.\n        ; because of this, we need to copy a set of unique cells at a time,\n        ; hence the while loop.\n        while( current\n            ; Create GDMSpecList used to copy all cells\n            src_gdm = gdmCreateSpecList()\n            targ_gdm = gdmCreateSpecList()\n            ; table to keep track of seen cells.\n            table = makeTable(\"mytable\" 0)\n            ; Populate GDMSpecList\n            foreach( template_info current\n                master_lib = car(template_info)\n                master_cell = cadr(template_info)\n                target_cell = caddr(template_info)\n\n                ; check if we copied this cell on this iteration yet\n                key = list(master_lib master_cell)\n                if( table[key] == 1 then\n                    ; wait for the next iteration\n                    remaining = cons(template_info remaining)\n                else\n                    ; purge target cellview if exist\n                    when( targ_lib_obj\n                        test_cv = dbFindOpenCellView(targ_lib_obj target_cell \"schematic\")\n                        when( test_cv\n                            dbPurge(test_cv)\n                        )\n                        test_cv = dbFindOpenCellView(targ_lib_obj target_cell \"symbol\")\n                        when( test_cv\n                            dbPurge(test_cv)\n                        )\n                        ; hard remove adexl state if it exists\n                        test_cv = ddGetObj(lib_name target_cell \"adexl\")\n                        when( test_cv\n                            ddDeleteObj(test_cv)\n                        )\n                    )\n                    gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil \"CDBA\") src_gdm)\n                    gdmAddSpecToSpecList(gdmCreateSpec(lib_name target_cell nil nil \"CDBA\") targ_gdm)\n                    table[key] = 1\n                )\n            )\n            ; Perform copy\n            ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED nil nil \"\" \"\" 'CCP_UPDATE_FROM_LIBLIST empty_spec)\n\n            ; set current and remaining\n            current = remaining\n            remaining = '()\n\n            ; debug printing\n            ; printstruct(table)\n        )\n    )\n    't\n)\n\n; returns a unique terminal name in the given cellview.\n; name_base is the suffix of the returned terminal name.\nprocedure( get_unique_term_name( cvid name_base \"gt\")\n    let( (cnt new_term_name)\n        cnt = 1\n        sprintf( new_term_name \"temp%d_%s\" cnt name_base )\n        while( dbFindTermByName(cvid new_term_name)\n            cnt = cnt + 1\n            sprintf( new_term_name \"temp%d_%s\" cnt name_base )\n        )\n        new_term_name\n    )\n)\n\n; helper method to open pin master\nprocedure( open_pin_master(cvid pin_cv_info)\n    let( (pin_master mpin_lib mpin_cell mpin_view)\n        mpin_lib = car(pin_cv_info)\n        mpin_cell = cadr(pin_cv_info)\n        mpin_view = caddr(pin_cv_info)\n        unless( pin_master = dbOpenCellViewByType( mpin_lib mpin_cell mpin_view nil \"r\" )\n            dbClose(cvid)\n            error( \"Cannot find pin master cellview: %s__%s (%s)\" mpin_lib mpin_cell mpin_view)\n        )\n        pin_master\n    )\n)\n\n; update pins of a schematic\n; cvid is the opened cellview id of the schematic.  It must be in append mode.\n; pin_map is a list of two-element lists of old pin names and new pin names, respectively.\n; ipin, opin, and iopin are lists of three strings for input/output/inout pins, respectively.\n; first element is the pin master library, second element is the pin mater cell, and third element\n; is the pin master cellview.\nprocedure( update_schematic_pin(cvid pin_map new_pins ipin opin iopin \"glllll\")\n    let( (snap_dist cur_term_name new_term_name term pin pin_orient pin_location pin_direction\n          temp_new_term_name pin_master ipin_master opin_master iopin_master\n          pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type)\n\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n\n        ; open pin masters\n        ipin_master = open_pin_master(cvid ipin)\n        opin_master = open_pin_master(cvid opin)\n        iopin_master = open_pin_master(cvid iopin)\n        pin_master = nil\n\n        ; get new pin locations before any pin addition/substraction.\n        pin_xy_info = get_new_pin_locations(cvid snap_dist)\n\n        ; rename or remove pins\n        foreach( p pin_map\n            cur_term_name = car(p)\n            new_term_name = cadr(p)\n            ; printf(\"%s %s\\n\" cur_term_name new_term_name)\n            when(cur_term_name != new_term_name\n                unless( term = dbFindTermByName(cvid cur_term_name)\n                    dbClose(cvid)\n                    dbClose(ipin_master)\n                    dbClose(opin_master)\n                    dbClose(iopin_master)\n                    error( \"Terminal %s not found.\" cur_term_name )\n                )\n                when( term~>pinCount != 1\n                    dbClose(cvid)\n                    dbClose(ipin_master)\n                    dbClose(opin_master)\n                    dbClose(iopin_master)\n                    error( \"Terminal %s does not have exactly one pin.\" cur_term_name)\n                )\n                pin = car(term~>pins)\n\n                if( strlen(new_term_name) != 0 then\n                    ; rename pin\n                    pin_orient = pin~>fig~>orient\n                    pin_location = pin~>fig~>xy\n                    pin_direction = term~>direction\n\n                    ; create new pin figure\n                    cond( ( pin_direction == \"input\" pin_master = ipin_master)\n                          ( pin_direction == \"output\" pin_master = opin_master)\n                          ( 't pin_master = iopin_master)\n                    )\n\n                    ; delete pin\n                    unless( dbDeleteObject(pin~>fig)\n                        dbClose(cvid)\n                        dbClose(ipin_master)\n                        dbClose(opin_master)\n                        dbClose(iopin_master)\n                        error( \"Cannot delete pin for terminal %s\" cur_term_name )\n                    )\n\n                    ; create a temporary terminal with a unique name so we can change the number of bits without getting an error\n                    temp_new_term_name = get_unique_term_name(cvid new_term_name)\n                    schCreatePin(cvid pin_master temp_new_term_name pin_direction nil pin_location \"R0\" )\n\n                    ; now rename the new terminal\n                    new_term = dbFindTermByName(cvid temp_new_term_name )\n                    new_term~>name = new_term_name\n                else\n                    ; remove pin\n                    dbDeleteObject(pin~>fig)\n                )\n            )\n        )\n\n        ; add new pins\n        when( new_pins\n            ; get location for new pins\n            npin_xl = xCoord(car(pin_xy_info)) - 2 * snap_dist\n            npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist\n            npin_xr = xCoord(cadr(pin_xy_info)) \n            npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist\n            foreach( npin_info new_pins\n                npin_name = car(npin_info)\n                npin_type = cadr(npin_info)\n\n                ; verify that this pin does not exist yet\n                when(dbFindTermByName(cvid npin_name)\n                    dbClose(cvid)\n                    dbClose(ipin_master)\n                    dbClose(opin_master)\n                    dbClose(iopin_master)\n                    error( \"Terminal %s already exists\" npin_name)\n                )\n\n                ; get pin location based on pin type\n                cond( ( npin_type == \"input\" pin_master = ipin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)\n                      ( npin_type == \"output\" pin_master = opin_master pin_location = npin_xr:npin_yr npin_yr = npin_yr - 2 * snap_dist)\n                      ( 't pin_master = iopin_master pin_location = npin_xl:npin_yl npin_yl = npin_yl - 2 * snap_dist)\n                )\n                ; create pin\n                schCreatePin(cvid pin_master npin_name npin_type nil pin_location \"R0\")\n            )\n        )\n\n        dbClose(ipin_master)\n        dbClose(opin_master)\n        dbClose(iopin_master)\n    )\n)\n\n; find X and Y coordinates to insert new symbol pins\nprocedure( get_new_pin_locations(cvid snap_dist)\n    let( (pin bbox pin_x pin_y xl xr yl yr)\n        ; find the left-most/right-most pin X coordinates, and find the lowst\n        ; Y coordinate of the left-most/right-most pins\n        xl = nil\n        xr = nil\n        yl = nil\n        yr = nil\n        foreach( term cvid->terminals\n            when( term~>pinCount != 1\n                dbClose(cvid)\n                error( \"Terminal %s does not have exactly one pin\" term~>name)\n            )\n            pin = car(term~>pins)\n            bbox = pin~>fig~>bBox\n            pin_x = round2((xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0 / snap_dist)\n            pin_y = round2((yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0 / snap_dist)\n            if( xl == nil then\n                xl = pin_x\n                xr = pin_x\n                yl = pin_y\n                yr = pin_y\n            else\n                cond( (pin_x < xl xl = pin_x yl = pin_y)\n                      (pin_x == xl yl = min(yl pin_y)))\n                cond( (pin_x > xr xr = pin_x yr = pin_y)\n                      (pin_x == xr yr = min(yr pin_y)))\n            )\n        )\n        when(xl == nil\n            ; default values if schematic has no terminals\n            ; this usually means you have a testbench schematic\n            xl = 0\n            yl = 0\n            xr = 10\n            yr = 0\n        )\n        list((xl * snap_dist):(yl * snap_dist) (xr * snap_dist):(yr * snap_dist))\n    )\n)\n\n; update pins of a symbol\n; pin_map is a list of two-element lists, first element is old pin name, second element is new pin name.\n; sympin is a 3-element list of strings. first element is the pin master library,\n; second element is the pin mater cell, and third element is the pin master cellview.\n; simulators is a list of simulator names for which termOrder should be updated.\n; Usually simulators = '(\"auLvs\" \"auCdl\" \"spectre\" \"hspiceD\")\nprocedure( update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators \"ttllll\")\n    let( (snap_dist cvid pin_master cur_term_name new_term_name term pin bbox pin_x pin_y pin_location pin_direction\n          label_location label_rel_location temp_new_term_name new_term new_port_order cell_obj bc\n          mpin_lib mpin_cell mpin_view pin_xy_info npin_xl npin_yl npin_xr npin_yr npin_name npin_type\n          modified_pins)\n\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        modified_pins = nil\n        mpin_lib = car(sympin)\n        mpin_cell = cadr(sympin)\n        mpin_view = caddr(sympin)\n        unless( pin_master = dbOpenCellViewByType(mpin_lib mpin_cell mpin_view nil \"r\")\n            error(\"Cannot open symbol pin cellview %s__%s (%s).\" mpin_lib mpin_cell mpin_view)\n        )\n        unless( cvid = dbOpenCellViewByType(lib_name cell_name \"symbol\" nil \"a\")\n            dbClose(pin_master)\n            error(\"Cannot open cellview %s__%s (symbol).\" lib_name cell_name)\n        )\n\n        ; get new pin locations before any pin addition/substraction.\n        pin_xy_info = get_new_pin_locations(cvid snap_dist)\n\n        ; modify existing pins\n        new_port_order = tconc(nil \"\")\n        foreach( p pin_map\n            cur_term_name = car(p)\n            new_term_name = cadr(p)\n            new_port_order = tconc(new_port_order new_term_name)\n            when( cur_term_name != new_term_name\n                modified_pins = 't\n                ; printf(\"%s %s\\n\" cur_term_name new_term_name)\n                unless( term = dbFindTermByName(cvid cur_term_name)\n                    dbClose(pin_master)\n                    dbReopen(cvid, \"r\")\n                    dbClose(cvid)\n                    error( \"Terminal %s not found.\" cur_term_name )\n                )\n                when( term~>pinCount != 1\n                    dbClose(pin_master)\n                    dbReopen(cvid, \"r\")\n                    dbClose(cvid)\n                    error( \"Terminal %s does not have exactly one pin.\" cur_term_name)\n                )\n                pin = car(term~>pins)\n\n                if( strlen(new_term_name) != 0 then\n                    ; rename pin\n                    bbox = pin~>fig~>bBox\n                    pin_x = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0\n                    pin_y = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0\n                    pin_location = round2(pin_x / snap_dist) * snap_dist:round2(pin_y / snap_dist) * snap_dist\n                    pin_direction = term~>direction\n\n                    ; change label\n                    prog( (label_orientation label_font label_font_size label_type label_text)\n                        foreach( label pin~>fig~>children\n                            when( label~>objType == \"label\"\n                                label_location = label~>xy\n                                label_orientation = label~>orient\n                                label_rel_location = label~>justify\n                                label_font = label~>font\n                                label_font_size = label~>height\n                                label_type = label~>labelType\n                                label_text = label~>theLabel\n                                when( label_text == cur_term_name\n                                    schCreateSymbolLabel(cvid label_location \"pin label\" new_term_name label_rel_location\n                                                         label_orientation label_font label_font_size label_type)\n                                    return('t)\n                                )\n                            )\n                        )\n                        return(nil)\n                    )\n\n                    dbDeleteObject(pin~>fig)\n                    dbDeleteObject(pin)\n\n                    ;create a temporary terminal with a unique name so we can change the number of bits without getting an error\n                    temp_new_term_name = get_unique_term_name(cvid new_term_name)\n                    schCreateSymbolPin(cvid pin_master temp_new_term_name pin_direction pin_location \"R0\" )\n\n                    new_term = dbFindTermByName(cvid temp_new_term_name )\n                    dbDeleteObject(term)\n                    new_term~>name = new_term_name\n                else\n                    ; remove pin\n                    dbDeleteObject(pin~>fig)\n                    dbDeleteObject(pin)\n                    dbDeleteObject(term)\n                )\n            )\n        )\n\n        ; add new pins\n        when( new_pins\n            modified_pins = 't\n            ; get location for new pins\n            npin_xl = xCoord(car(pin_xy_info))\n            npin_yl = yCoord(car(pin_xy_info)) - 2 * snap_dist\n            npin_xr = xCoord(cadr(pin_xy_info))\n            npin_yr = yCoord(cadr(pin_xy_info)) - 2 * snap_dist\n            foreach( npin_info new_pins\n                npin_name = car(npin_info)\n                npin_type = cadr(npin_info)\n\n                ; verify that this pin does not exist yet\n                when(dbFindTermByName(cvid npin_name)\n                    dbClose(pin_master)\n                    dbReopen(cvid, \"r\")\n                    dbClose(cvid)\n                    error( \"Terminal %s already exists\" npin_name)\n                )\n\n                ; update pin order\n                new_port_order = tconc(new_port_order npin_name)\n\n                ; get pin location based on pin type\n                if( equal(npin_type \"output\") then\n                    label_location = npin_xr:npin_yr\n                    label_rel_location = \"lowerLeft\"\n                    npin_yr = npin_yr - 2 * snap_dist\n                else\n                    label_location = npin_xl:npin_yl\n                    label_rel_location = \"lowerRight\"\n                    npin_yl = npin_yl - 2 * snap_dist\n                )\n\n                ; create label and pin\n                schCreateSymbolLabel(cvid label_location \"pin label\" npin_name label_rel_location\n                                     \"R0\" \"stick\" snap_dist \"normalLabel\")\n                schCreateSymbolPin(cvid pin_master npin_name npin_type label_location \"R0\")\n            )\n        )\n\n        dbClose(pin_master)\n\n        when( modified_pins\n            ; update pin order\n            new_port_order = cdar(new_port_order)\n            schEditPinOrder(cvid new_port_order 't)\n            dbSave(cvid)\n\n            ; update termOrder for each simulators\n            cell_obj = ddGetObj(lib_name cell_name nil nil nil \"r\")\n            unless( bc = cdfGetBaseCellCDF(cell_obj)\n                ddReleaseObj(cell_obj)\n                dbReopen(cvid, \"r\")\n                dbClose(cvid)\n                error(\"Cannot find CDF parameters for %s__%s.  Delete generated cell and try again\" lib_name cell_name)\n            )\n            foreach( simu simulators\n                get(bc->simInfo simu)->termOrder = new_port_order\n            )\n            unless( cdfSaveCDF(bc)\n                ddReleaseObj(cell_obj)\n                dbReopen(cvid, \"r\")\n                dbClose(cvid)\n                error(\"Cannot save termOrder CDF for %s__%s.\" lib_name cell_name)\n            )\n            ddReleaseObj(cell_obj)\n        )\n        ; opening schematic will open all symbols inside that schematic.\n        ; as the result, dbClose may not close this symbol view.  To get rid\n        ; of edit lock, we use dbReopen so even if dbClose fails the edit lock\n        ; will be gone.\n        dbReopen(cvid, \"r\")\n        dbClose(cvid)\n    )\n)\n\n; record an association list from pin name to pin location in units of snap distances.\n; the pin name is sorted alphabetically so we can use the equal function to test\n; for equality.\nprocedure( get_instance_pin_info(inst \"g\")\n    let( (snap_dist term_name pin_fig xval yval inst_term_xy ans)\n        ans = nil\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        foreach( term inst->master->terminals\n            term_name = term~>name\n            ; get terminal coordinate in symbol\n            pin_fig = car(term~>pins)~>fig\n            bbox = pin_fig~>bBox\n            xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0\n            yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0\n            ; quantize to schematic snap spacing to avoid floating point rounding error.\n            inst_term_xy = round2(xval / snap_dist):round2(yval / snap_dist)\n            ans = cons(list(term_name inst_term_xy) ans)\n        )\n        sortcar(ans nil)\n    )\n)\n\n; get all the wire objects connected to terminals of the given instance.\n; we assume each terminal has exactly one pin with 1 wire connected, with a\n; single label on the wire.  The wire doesn't connect to anything else.\n; returns an association list from terminal name to a list of net name and wire figure object.\nprocedure( get_instance_terminal_wires(sch inst \"gg\")\n    let( (snap_dist term_name pin_fig xval yval inst_term_xy net_name ans net_map)\n        ans = nil\n        net_map = nil\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        foreach( inst_term inst~>instTerms\n            term_name = inst_term~>name\n            ; printf(\"terminal name: %s\\n\" term_name)\n            when( inst_term~>term~>pinCount != 1\n                dbClose(sch)\n                error(\"Terminal %s must have exactly one pin.\" term_name)\n            )\n            unless( pin_fig = car(inst_term~>term~>pins)~>fig\n                dbClose(sch)\n                error(\"Cannot find pin figure for terminal %s\" term_name)\n            )\n            ; get instance terminal coordinate in schematic\n            bbox = dbTransformBBox(pin_fig~>bBox inst~>transform)\n            ; printf(\"terminal pin fig bbox: %A\\n\" bbox)\n            xval = xCoord(car(bbox)) + (xCoord(cadr(bbox)) - xCoord(car(bbox))) / 2.0\n            yval = yCoord(car(bbox)) + (yCoord(cadr(bbox)) - yCoord(car(bbox))) / 2.0\n            ; quantize to schematic snap spacing to avoid floating point rounding error.\n            inst_term_xy = round2(xval / snap_dist) * snap_dist:round2(yval / snap_dist) * snap_dist\n            net_name = inst_term~>net~>name\n            net_map = cons(list(term_name net_name) net_map)\n            ; printf(\"terminal pin x/y: %A\\n\" inst_term_xy)\n            foreach( fig inst_term~>net~>figs\n                points = fig~>points\n                ; printf(\"figure points: %A\\n\" points)\n                when( member(inst_term_xy points)\n                    when( length(points) != 2\n                        error(\"pin for terminal %s must be connected to a single wire with label\" term_name)\n                    )\n                    ; printf(\"adding figure for terminal %s\\n\" term_name)\n                    ans = cons(list(term_name fig) ans)\n                )\n            )\n        )\n        list(ans net_map)\n    )\n)\n\n; Modify the instance terminal connections of the given instance.\n; we assume each terminal to modify has at most 1 wire connected,\n; if it exists, the wire connects to nothing else, and it has a label.\n; In this way, this function just have to change the label text.\n;\n; if wire_list is not empty, then that means each terminal has exactly one\n; wire connected.  This function will update the label on the wires according\n; to term_mapping.\n;\n; if wire_list is empty, then that means no wires are connected to terminals.\n; this function will attach labels directly to each terminal.  The labels are\n; determined first from term_mapping, then from net_map\n;\n; sch is the schematic database object.  Must be opened in append/write mode.\n; inst is the instance object to modify.\n; term_mapping is a list of key-value pairs, where keys are old net names,\n; and values are new net names.\nprocedure( modify_instance_terminal(sch inst wire_list net_map term_mapping \"gglll\")\n    let( (snap_dist key_val old_name new_name fig points mid_point new_wire inst_term inst_pin\n          bbox xval yval term_map_final db_term)\n        ; get schematic snap distance spacing.\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        if( wire_list then\n            foreach( wire_info wire_list\n                old_name = car(wire_info)\n                when(key_val = assoc(old_name term_mapping)\n                    new_name = cadr(key_val)\n                    fig = cadr(wire_info)\n                    points = fig~>points\n                    mid_point = foreach(mapcar (c1 c2) car(points) cadr(points) (c1 + c2) / 2.0)\n                    ; delete old wire, then add wire back with new label.\n                    schDelete(fig)\n                    new_wire = car(schCreateWire(sch \"draw\" \"full\" points snap_dist snap_dist 0))\n                    schCreateWireLabel(sch new_wire mid_point new_name \"lowerCenter\" \"R0\" \"stick\" 0.0625 nil)\n                )\n            )\n            't\n        else\n            ; combine net_map and term_mapping\n            term_map_final = copy(term_mapping)\n            foreach( net_info net_map\n                old_name = car(net_info)\n                unless( assoc(old_name term_map_final)\n                    ; add net mapping only if it's not in term_mapping\n                    term_map_final = cons(net_info term_map_final)\n                )\n            )\n            foreach( net_info term_map_final\n                old_name = car(net_info)\n                new_name = cadr(net_info)\n\n                when(db_term = dbFindTermByName(inst->master old_name)\n                    ; only create terminal that's present in the current master\n                    inst_term = dbCreateInstTerm(nil inst db_term)\n                    inst_pin = car(inst_term~>term~>pins)~>fig\n                    bbox = dbTransformBBox(inst_pin~>bBox inst~>transform)\n                    xval = (xCoord(car(bbox)) + xCoord(cadr(bbox))) / 2.0\n                    yval = (yCoord(car(bbox)) + yCoord(cadr(bbox))) / 2.0\n                    xval = round2(xval / snap_dist) * snap_dist\n                    yval = round2(yval / snap_dist) * snap_dist\n\n                    new_wire = car(schCreateWire(sch \"draw\" \"full\" list(xval-snap_dist:yval-snap_dist xval:yval)\n                                   snap_dist snap_dist 0))\n                    schCreateWireLabel(sch new_wire xval:yval new_name \"lowerCenter\" \"R0\" \"stick\" 0.0625 nil)\n                )\n            )\n            't\n        )\n    )\n)\n\n; Perform check-and-save on the given schematic database object, then close it.\nprocedure( check_and_save_schematic(sch \"g\")\n    let( (errs)\n        schSetEnv( \"checkHierSave\" 't)\n        schSetEnv( \"saveAction\" \"Save\")\n        errs = schCheckHier(sch \"schematic symbol\" \"\")\n        foreach( ex errs\n            warn( \"%s__%s (%s) has %d errors.\" car(ex)~>lib~>name car(ex)~>cellName car(ex)~>viewName cadr(ex))\n        )\n        ; make sure all edit locks are gone by reopening in read mode\n        dbReopen(sch, \"r\")\n        dbClose(sch)\n    )\n)\n\n\n; modify a schematic cell.  Used to convert copied template cells into concrete instantiation.\n;\n; inst_list is an association list of (inst_name, rinst_list) pairs.  Where:\n;\n; inst_name : name of the instance in the template cell.\n; rinst_list : a list of rinsts, which are instances to replace the original instance by.\n;              If this list is empty, the original instance should be deleted.  If the list\n;              has more than one element, we should array the original instance.\n;\n; Each rinst is a disembodied property lists, with the properties:\n;\n; rinst->name : the name of this rinst.\n; rinst->lib_name : the instance master library.\n; rinst->cell_name : the instance master cell.\n; rinst->params : an association list of the CDF params of this rinst.  The values are always string.\n; rinst->term_mapping : an association list of the modified terminal connections of this rinst.\n;                       if no connections are changed, this list should be empty.\n;\n; (You can read more about disembodied property lists and association list in the skill\n; language user guide).\n;\n; For each instance, this function does the following:\n; 1. Find the instance with the given name.\n; 2. If rinst_list is nil, delete this instance.\n; 3. If rinst_list has exactly one element:\n;    i.   rename the instance name to rinst's name.\n;    ii.  change the instance master of the instance.\n;    iii. change the CDF parameters (this should only happen with BAG primitives).\n;    iv.  change the port connections of this instance.\n; 4. If rinst_list has more than one element, for each additional element,\n;    copy the original instance and perform step 3 on that instance.\n;\n; This procedure allows one to delete or array any instances in the schematic template.\nprocedure( modify_schematic_content(sch_cv inst_list \"gl\")\n    let( (inst_obj inst_name rinst_list rinst_len cur_inst wire_list net_map par_val xl xr transform\n          snap_dist errmsg pin_info tmp_result)\n        snap_dist = schGetEnv(\"schSnapSpacing\")\n        foreach( inst inst_list\n            inst_name = car(inst)\n            unless( inst_obj = dbFindAnyInstByName(sch_cv inst_name)\n                dbClose(sch_cv)\n                error( \"Cannot find instance %s\" inst_name )\n            )\n            rinst_list = cadr(inst)\n            rinst_len = length(rinst_list)\n            last_inst = nil\n            if( rinst_len == 0 then\n                ; no instances to replace by, delete.\n                wire_list = car(get_instance_terminal_wires(sch_cv inst_obj))\n                ; delete wires connected to instance\n                foreach( wire_info wire_list\n                    schDelete(cadr(wire_info))\n                )\n                ; delete instance\n                dbDeleteObject(inst_obj)\n            else\n                cur_inst = nil\n                pin_info = nil\n                foreach( rinst rinst_list\n                    if( !cur_inst then\n                        cur_inst = inst_obj\n                        ; printf(\"inst %s lib = %s, cell = %s\\n\" inst_name inst_obj->master->libName inst_obj->master->cellName)\n                        tmp_result = get_instance_terminal_wires(sch_cv cur_inst)\n                        net_map = cadr(tmp_result)\n                        wire_list = car(tmp_result)\n                        pin_info = get_instance_pin_info(cur_inst)\n                        ; printf(\"%s wire_list: %A\\n\" inst_name wire_list)\n                        ; figure out bounding box for potential future array\n                        ; printf(\"instance %s bbox: %A\\n\" cur_inst~>name cur_inst~>bBox)\n                        xl = xCoord(car(cur_inst~>bBox))\n                        xr = xCoord(cadr(cur_inst~>bBox))\n                        foreach( wire_info wire_list\n                            ; printf(\"instance %s wire: %A %A\\n\" cur_inst~>name xCoord(car(cadr(wire_info)~>bBox)) xCoord(cadr(cadr(wire_info)~>bBox)))\n                            xl = min(xl xCoord(car(cadr(wire_info)~>bBox)))\n                            xr = max(xr xCoord(cadr(cadr(wire_info)~>bBox)))\n                        )\n                        transform = list(round2((xr - xl + snap_dist) / snap_dist) * snap_dist:0 \"R0\" 1.0)\n                        ; printf(\"instance %s transform: %A\\n\" cur_inst~>name transform)\n                    else\n                        ; more than 1 rinst, copy cur_inst, do not copy wires\n                        wire_list = nil\n                        ; copy instance\n                        cur_inst = dbCopyFig(cur_inst nil transform)\n                    )\n                    ; change instance name and master\n                    when(cur_inst->name != rinst->name\n                        cur_inst->name = rinst->name\n                    )\n                    schReplaceProperty(list(cur_inst) \"master\" sprintf(nil \"%s %s %s\" rinst->lib_name\n                                                                       rinst->cell_name cur_inst->viewName))\n                    ; set parameters\n                    foreach( cdf_par cdfGetInstCDF(cur_inst)~>parameters\n                        par_val = cadr(assoc(cdf_par->name rinst->params))\n                        ; change CDF parameter value only if specified in given parameters\n                        when( par_val != nil\n                            cdf_par->value = par_val\n                        )\n                    )\n                    when( wire_list\n                        ; if wire_list is not empty, check that the pins match.  If so, keep wires around,\n                        ; otherwise, delete wires\n                        unless( equal(pin_info get_instance_pin_info(cur_inst))\n                            ; delete wires connected to instance\n                            foreach( wire_info wire_list\n                                schDelete(cadr(wire_info))\n                            )\n                            wire_list = nil\n                        )\n                    )\n                    ; modify connections, keeping old wires around\n                    ; printf(\"instance %s wire_list: %A net_map: %A term_map: %A\\n\" cur_inst~>name wire_list net_map rinst->term_mapping)\n                    modify_instance_terminal(sch_cv cur_inst wire_list net_map rinst->term_mapping)\n                )\n            )\n        )\n    )\n)\n\n; given a copied template cell, modify it to a concrete schematic.\nprocedure( convert_template_cells(lib_name cell_name pin_map new_pins inst_list sympin ipin opin iopin simulators)\n    let( (sym_cv sch)\n        ; update symbol view first.\n        if( sym_cv = dbOpenCellViewByType(lib_name cell_name \"symbol\" nil \"r\") then\n            printf(\"*INFO* Updating %s__%s symbol pins.\\n\" lib_name cell_name)\n            update_symbol_pin(lib_name cell_name pin_map new_pins sympin simulators)\n        else\n            warn(\"Did not find symbol for %s__%s.  Skipping.  Is it testbench?\" lib_name cell_name)\n        )\n\n        ; attempt to open schematic in append mode\n        unless( sch = dbOpenCellViewByType(lib_name cell_name \"schematic\" nil \"a\")\n            error(\"Cannot open %s__%s (schematic) in append mode.\" lib_name cell_name)\n        )\n        ; update schematic content\n        printf(\"*INFO* Updating %s__%s instances and connections.\\n\" lib_name cell_name)\n        modify_schematic_content(sch inst_list)\n        ; update schematic pins\n        printf(\"*INFO* Updating %s__%s schematic pins.\\n\" lib_name cell_name)\n        update_schematic_pin(sch pin_map new_pins ipin opin iopin)\n        check_and_save_schematic(sch)\n    )\n)\n\n; create concrete schematics\nprocedure( create_concrete_schematic( lib_name tech_lib lib_path temp_file change_file\n                                      sympin ipin opin iopin simulators copy \"tttttlllllg\" )\n    let( (template_list change_list cell_name pin_map inst_list)\n        printf(\"*INFO* Reading template and change list from file\\n\")\n        template_list = parse_data_from_file( temp_file )\n        change_list = parse_data_from_file( change_file )\n        when( copy\n            printf(\"*INFO* Creating library: %s\\n\" lib_name)\n            create_or_erase_library( lib_name tech_lib lib_path nil )\n            printf(\"*INFO* Copying templates to library: %s\\n\" lib_name)\n            copy_templates_to_library( lib_name template_list )\n        )\n        foreach( change change_list\n            cell_name = change->name\n            pin_map = change->pin_map\n            new_pins = change->new_pins\n            inst_list = change->inst_list\n            printf(\"*INFO* Updating cell %s__%s\\n\" lib_name cell_name)\n            convert_template_cells( lib_name cell_name pin_map new_pins inst_list\n                                    sympin ipin opin iopin simulators )\n        )\n        't\n    )\n)\n\n; create a new layout view then instantiate a single pcell instance.\n; this method also copy all the labels in the pcell top level.  In this way LVS/PEX will\n; work correctly.\n; params is a list of (variable_name type_string value) lists.\n; pin_mapping is a list of (old_pin new_pin) lists.\nprocedure( create_layout_with_pcell(lib_name cell_name view_name inst_lib inst_cell params_f pin_mapping_f \"ttttttt\")\n    let( (lay_cv inst_master inst inst_shapes label_location label_orientation label_lpp\n          label_just label_font label_height label_type label_text params pin_mapping)\n        unless( lay_cv = dbOpenCellViewByType(lib_name cell_name view_name \"maskLayout\" \"w\")\n            error(\"Cannot open cellview %s__%s (%s).\" lib_name cell_name view_name)\n        )\n        unless( inst_master = dbOpenCellViewByType(inst_lib inst_cell \"layout\" \"maskLayout\" \"r\")\n            dbClose(lay_cv)\n            error(\"Cannot open cellview %s__%s (layout).\" inst_lib inst_cell)\n        )\n\n        params = parse_data_from_file(params_f)\n        pin_mapping = parse_data_from_file(pin_mapping_f)\n\n        inst = dbCreateParamInst(lay_cv inst_master \"XTOP\" '(0 0) \"R0\" 1 params)\n        inst_shapes = inst~>master~>shapes\n\n        foreach(shape inst_shapes\n            when( shape->objType == \"label\"\n                label_location = shape~>xy\n                label_orientation = shape~>orient\n                label_lpp = shape~>lpp\n                label_just = shape~>justify\n                label_font = shape~>font\n                label_height = shape~>height\n                label_type = shape~>labelType\n                label_text = shape~>theLabel\n                when( cadr(assoc(label_text pin_mapping))\n                    label_text = cadr(assoc(label_text pin_mapping))\n                )\n                dbCreateLabel(lay_cv label_lpp label_location label_text label_just label_orientation label_font label_height )\n            )\n        )\n\n        dbClose(inst_master)\n        dbSave(lay_cv)\n        dbClose(lay_cv)\n    )\n)\n\n; helper for creating a path segment\nprocedure( create_path_seg_helper(cv lay p0 p1 width start_s end_s)\n    let( (diag_ext info_list bext eext)\n        if( and(car(p0) != car(p1) cadr(p0) != cadr(p1)) then\n            diag_ext = width / 2\n            width = width * sqrt(2)\n        else\n            diag_ext = width * sqrt(2) / 2\n        )\n\n        bext = width/2\n        eext = width/2\n        if( start_s == \"round\" then\n            start_s = \"custom\"\n        else\n            when( start_s == \"truncate\"\n                bext = 0\n            )\n        )\n        if( end_s == \"round\" then\n            end_s = \"custom\"\n        else\n            when( end_s == \"truncate\"\n                eext = 0\n            )\n        )\n        info_list = list(bext eext list(diag_ext diag_ext width/2 diag_ext diag_ext width/2))\n        dbCreatePathSeg(cv lay p0 p1 width start_s end_s info_list)\n    )\n)\n\n\n; helper for creating a path\nprocedure( create_path_helper( cv path )\n    let( (lay width points estyle jstyle p0 p1 plen idx start_s end_s)\n        lay = path->layer\n        width = path->width\n        points = path->points\n        estyle = path->end_style\n        jstyle = path->join_style\n        p0 = nil\n        plen = length(points)\n        idx = 0\n        foreach( cur_point points\n            p1 = cur_point\n            when( idx > 0\n                if( idx == 1 then\n                    start_s = estyle\n                else\n                    start_s = jstyle\n                )\n                if( idx == plen - 1 then\n                    end_s = estyle\n                else\n                    end_s = jstyle\n                )\n                create_path_seg_helper(cv lay p0 p1 width start_s end_s)\n            )\n            p0 = p1\n            idx = idx + 1\n        )\n    )\n)\n\n\n; helper for creating a single layout view\nprocedure( create_layout_helper( cv tech_file inst_list rect_list via_list pin_list path_list\n                                 blockage_list boundary_list polygon_list \"ggllllllll\" )\n    let( (inst_cv obj via_def via_enc1 via_enc2 enc1 enc2 off1 off2 via_params make_pin_rect\n          pin_bb pin_w pin_h pin_xc pin_yc pin_orient label_h param_order orig_shape arr_dx arr_dy)\n\n        ; create instances\n        foreach( inst inst_list\n            if( inst_cv = dbOpenCellViewByType( inst->lib inst->cell inst->view nil \"r\" ) then\n\n                if( and( inst->num_rows==1 inst->num_cols==1) then\n                    if( inst->params != nil then\n                        ; create pcell instance\n                        obj = dbCreateParamInst(cv inst_cv inst->name inst->loc inst->orient 1 inst->params)\n                        ; execute parameter callbacks\n                        when( obj\n                            if( inst->param_order != nil then\n                                param_order = inst->param_order\n                            else\n                                param_order = mapcar( lambda( (x) car(x) ) inst->params )\n                            )\n                            CCSinvokeCdfCallbacks(obj ?order param_order)\n                        )\n                    else\n                        obj = dbCreateInst(cv inst_cv inst->name inst->loc inst->orient)\n                    )\n                else\n                    if( inst->params != nil then\n                        ; create pcell mosaic\n                        obj = dbCreateParamSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient\n                                                        inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols\n                                                        inst->params)\n                        ; execute parameter callbacks\n                        when( obj\n                            if( inst->param_order != nil then\n                                param_order = inst->param_order\n                            else\n                                param_order = mapcar( lambda( (x) car(x) ) inst->params )\n                            )\n                            CCSinvokeCdfCallbacks(obj ?order param_order)\n                        )\n                    else\n                        obj = dbCreateSimpleMosaic(cv inst_cv inst->name inst->loc inst->orient\n                                                   inst->num_rows inst->num_cols inst->sp_rows inst->sp_cols)\n                    )\n                )\n                unless( obj\n                    warn(\"Error creating instance %s of %s__%s (%s).  Skipping.\" inst->name inst->lib inst->cell inst->view)\n                )\n\n            else\n                warn(\"Cannot find instance %s__%s (%s).  Skipping.\" inst->lib inst->cell inst->view)\n            )\n        )\n\n        ; create rectangles\n        foreach( rect rect_list\n            orig_shape = dbCreateRect(cv rect->layer rect->bbox)\n            if( not(orig_shape) then\n                warn(\"Error creating rectangle of layer %A.  Skipping.\" rect->layer)\n            else\n                when( rect->arr_nx != nil\n                    for(icol 2 rect->arr_nx\n                        arr_dx = rect->arr_spx * (icol - 1)\n                        for(irow 1 rect->arr_ny\n                            arr_dy = rect->arr_spy * (irow - 1)\n                            dbCopyFig(orig_shape nil list(arr_dx:arr_dy \"R0\" 1))\n                        )\n                    )\n                    for(irow 2 rect->arr_ny\n                        arr_dy = rect->arr_spy * (irow - 1)\n                        dbCopyFig(orig_shape nil list(0:arr_dy \"R0\" 1))\n                    )\n                )\n            )\n        )\n\n        ; create paths\n        foreach( path path_list\n            create_path_helper(cv path)\n        )\n\n        ; create polygons\n        foreach( poly polygon_list\n            dbCreatePolygon(cv poly->layer poly->points)\n        )\n\n        ; create blockages\n        foreach( block blockage_list\n            if( block->btype == \"placement\" then\n                dbCreateAreaBlockage(cv block->points)\n            else\n                dbCreateLayerBlockage(cv block->layer block->btype block->points)\n            )\n        )\n\n        ; create boundaries\n        foreach( bound boundary_list\n            cond( (bound->btype == \"PR\"\n                   dbCreatePRBoundary(cv bound->points))\n                  (bound->btype == \"snap\"\n                   dbCreateSnapBoundary(cv bound->points))\n                  (bound->btype == \"area\"\n                   dbCreateAreaBoundary(cv bound->points))\n                  ('t\n                   warn(\"Unknown boundary type %s.  Skipping.\" bound->btype))\n            )\n        )\n\n        ; create vias\n        foreach( via via_list\n            if( via_def = techFindViaDefByName(tech_file via->id) then\n                ; compute via parameter list\n                via_enc1 = via->enc1\n                via_enc2 = via->enc2\n                enc1 = list( (car(via_enc1) + cadr(via_enc1)) / 2.0\n                             (caddr(via_enc1) + cadr(cddr(via_enc1))) / 2.0 )\n                enc2 = list( (car(via_enc2) + cadr(via_enc2)) / 2.0\n                             (caddr(via_enc2) + cadr(cddr(via_enc2))) / 2.0 )\n                off1 = list( (cadr(via_enc1) - car(via_enc1)) / 2.0\n                             (caddr(via_enc1) - cadr(cddr(via_enc1))) / 2.0 )\n                off2 = list( (cadr(via_enc2) - car(via_enc2)) / 2.0\n                             (caddr(via_enc2) - cadr(cddr(via_enc2))) / 2.0 )\n\n                via_params = list( list(\"cutRows\" via->num_rows)\n                                   list(\"cutColumns\" via->num_cols)\n                                   list(\"cutSpacing\" list(via->sp_cols via->sp_rows))\n                                   list(\"layer1Enc\" enc1)\n                                   list(\"layer2Enc\" enc2)\n                                   list(\"layer1Offset\" off1)\n                                   list(\"layer2Offset\" off2) )\n\n                ; if via width and height given, add to via_params\n                when( via->cut_width != nil\n                    via_params = cons( list(\"cutWidth\" via->cut_width) via_params)\n                )\n                when( via->cut_height != nil\n                    via_params = cons( list(\"cutHeight\" via->cut_height) via_params)\n                )\n\n                ; create actual via\n                orig_shape = dbCreateVia(cv via_def via->loc via->orient via_params)\n                if( not(orig_shape) then\n                    warn(\"Error creating via %s.  Skipping.\" via->id)\n                else\n                    when( via->arr_nx != nil\n                        for(icol 2 via->arr_nx\n                            arr_dx = via->arr_spx * (icol - 1)\n                            for(irow 1 via->arr_ny\n                                arr_dy = via->arr_spy * (irow - 1)\n                                dbCopyFig(orig_shape nil list(arr_dx:arr_dy \"R0\" 1))\n                            )\n                        )\n                        for(irow 2 via->arr_ny\n                            arr_dy = via->arr_spy * (irow - 1)\n                            dbCopyFig(orig_shape nil list(0:arr_dy \"R0\" 1))\n                        )\n                    )\n                )\n            else\n                warn(\"Via %s not found.  Skipping.\" via->id)\n            )\n        )\n\n        ; create pins\n        foreach( pin pin_list\n            pin_bb = pin->bbox\n            pin_w = caadr(pin_bb) - caar(pin_bb)\n            pin_h = cadr(cadr(pin_bb)) - cadr(car(pin_bb))\n            pin_xc = (caar(pin_bb) + caadr(pin_bb)) / 2.0\n            pin_yc = (cadr(car(pin_bb)) + cadr(cadr(pin_bb))) / 2.0\n\n            if( pin_w >= pin_h then\n                pin_orient = \"R0\"\n                label_h = pin_h\n            else\n                pin_orient = \"R90\"\n                label_h = pin_w\n            )\n\n            ; get make_pin_rect, true if both net_name and pin_name are non-empty\n            make_pin_rect = pin->net_name != \"\" && pin->pin_name != \"\"\n            when( pin->make_rect != nil\n                make_pin_rect = pin->make_rect\n            )\n            ; printf(\"make_pin_rect: %A\\n\" make_pin_rect)\n            ; create pin object only if make_pin_rect is True.\n            when( make_pin_rect != 0 && make_pin_rect != nil\n                ; printf(\"making pin.\\n\")\n                dbCreatePin( dbMakeNet(cv pin->net_name) dbCreateRect(cv pin->layer pin_bb) pin->pin_name )\n            )\n            ; printf(\"%A %A %A %A\\n\" pin->label pin->layer pin_xc pin_yc)\n            dbCreateLabel( cv pin->layer list(pin_xc pin_yc) pin->label \"centerCenter\" pin_orient \"roman\" label_h )\n        )\n    )\n)\n\n; create a new layout view with the given geometries\n; inst_f, rect_f, via_f, and pin_f are files containing list of disembodied property lists.\nprocedure( create_layout( lib_name view_name via_tech layout_f \"ttt\" )\n    let( (tech_file layout_info cell_name inst_list rect_list via_list pin_list\n          path_list blockage_list boundary_list polygon_list cv)\n\n        unless( tech_file = techGetTechFile(ddGetObj(via_tech))\n            error(\"Via technology file %s not found.\" via_tech)\n        )\n\n        layout_info = parse_data_from_file(layout_f)\n        foreach( info layout_info\n            cell_name = nthelem(1 info)\n            inst_list = nthelem(2 info)\n            rect_list = nthelem(3 info)\n            via_list = nthelem(4 info)\n            pin_list = nthelem(5 info)\n            path_list = nthelem(6 info)\n            blockage_list = nthelem(7 info)\n            boundary_list = nthelem(8 info)\n            polygon_list = nthelem(9 info)\n\n            unless( cv = dbOpenCellViewByType( lib_name cell_name view_name \"maskLayout\" \"w\" )\n                error(\"Cannot create new layout cell %s__%s (%s).\" lib_name cell_name view_name)\n            )\n\n            printf(\"Creating %s__%s (%s)\\n\" lib_name cell_name view_name)\n            create_layout_helper(cv tech_file inst_list rect_list via_list pin_list path_list\n                                 blockage_list boundary_list polygon_list)\n\n            dbSave(cv)\n            dbClose(cv)\n        )\n\n        t\n    )\n)\n\n; release write locks from all the given cellviews\nprocedure( release_write_locks( lib_name cell_view_list_f \"tt\" )\n    let( (cell_view_list lib_obj cv)\n        cell_view_list = parse_data_from_file(cell_view_list_f)\n        when( lib_obj = ddGetObj(lib_name nil nil nil nil \"r\")\n            foreach( info cell_view_list\n                when( cv = dbFindOpenCellView( lib_obj car(info) cadr(info) )\n                    dbReopen(cv, \"r\")\n                    dbClose(cv)\n                )\n            )\n            ddReleaseObj(lib_obj)\n        )\n        t\n    )\n)\n\n\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n;;  Simulation/Testbench related functions  ;;\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n\n\n; set an entry in an association list\n; returns the modified association list.\nprocedure( set_assoc_list(mylist mykey myval)\n    let( (tmp)\n        when( tmp = assoc(mykey mylist)\n            ; print(\"replacing\")\n            rplacd(tmp list(myval))\n        )\n    )\n    mylist\n)\n\n; Copy the schematic of a testbench, and replace the DUT instance.\n;\n; This procedure copies the schematic of a testbench to a new library and cell, then finds all\n; instances with the name prefix \"XDUT\", then change their instance master to dut_lib and dut_cell.\n;\nprocedure( copy_testbench(master_lib master_cell targ_lib\n                          dut_lib dut_cell tech_lib new_lib_path \"ttttttt\")\n    let( (tlib_obj sch replace_count inst_prefix new_master)\n        inst_prefix = \"XDUT\"\n\n        printf(\"Copying testbench %s__%s to %s__%s\\n\" master_lib master_cell targ_lib master_cell)\n\n        ; create target library if does not exist\n        unless( tlib_obj = ddGetObj(targ_lib nil nil nil nil \"r\")\n            when( and(new_lib_path (new_lib_path != \".\"))\n                new_lib_path = strcat(new_lib_path \"/\" lib_name)\n            )\n            tlib_obj = ddCreateLib(targ_lib new_lib_path)\n            ; attach technology file\n            techBindTechFile(tlib_obj tech_lib)\n        )\n\n        ; copy testbench to new library\n        src_gdm = gdmCreateSpecList()\n        gdmAddSpecToSpecList(gdmCreateSpec(master_lib master_cell nil nil \"CDBA\") src_gdm)\n        targ_gdm = gdmCreateSpecList()\n        gdmAddSpecToSpecList(gdmCreateSpec(targ_lib master_cell nil nil \"CDBA\") targ_gdm)\n        ccpCopy(src_gdm targ_gdm 't 'CCP_EXPAND_COMANAGED)\n\n        ; open copied schematic\n        unless( sch = dbOpenCellViewByType(tlib_obj master_cell \"schematic\" nil \"a\")\n            ddReleaseObj(tlib_obj)\n            error(\"Cannot open testbench schematic %s__%s\" targ_lib master_cell)\n        )\n\n        ; replace instances\n        replace_count = 0\n        sprintf(new_master \"%s %s symbol\" dut_lib dut_cell)\n        foreach( inst sch~>instances\n           when( strncmp( inst~>name inst_prefix strlen(inst_prefix) ) == 0\n               replace_count = replace_count + 1\n               schReplaceProperty(list(inst) \"master\" new_master)\n           )\n        )\n\n        ; save and close resources\n        check_and_save_schematic(sch)\n        ddReleaseObj(tlib_obj)\n\n        ; error if nothing is replaced\n        when( replace_count == 0\n            error(\"Cannot find any instances in %s__%s with name prefix %s\" targ_lib master_cell inst_prefix)\n        )\n        't\n    )\n)\n\n; opens an adexl session.  Returns a list of session name and setup database handle.\nprocedure( open_adexl_session(tb_lib tb_cell tb_view session_name mode \"ttttt\")\n    let( (session sdb)\n        unless( session = axlCreateSession(session_name)\n            error(\"Cannot create temporary adexl session: %s\" session_name)\n        )\n        unless( sdb = axlSetMainSetupDBLCV(session tb_lib tb_cell tb_view ?mode mode)\n            axlCloseSession(session)\n            error(\"Cannot load adexl database from %s__%s (%s)\" tb_lib tb_cell tb_view)\n        )\n        list(session sdb)\n    )\n)\n\n; Enables only the given corners in the simulation setup database.\nprocedure( enable_adexl_corners( sdb corner_list env_param_list \"gll\")\n    let( (env_name par_val_list corner)\n        foreach(cur_name cadr(axlGetCorners(sdb))\n            axlSetEnabled( axlGetCorner(sdb cur_name) member(cur_name corner_list) )\n        )\n        foreach(env_par_obj env_param_list\n            env_name = car(env_par_obj)\n            par_val_list = cadr(env_par_obj)\n            corner = axlGetCorner(sdb env_name)\n            foreach(par_val par_val_list\n                axlPutVar(corner car(par_val) cadr(par_val))\n            )\n        )\n    )\n)\n\n; Set testbench parameters\n; val_list is an association list from variable names to variable values as string, which\n; could be a constant value or a parametric sweep string\nprocedure( set_adexl_parameters(sdb par_val_list \"gl\")\n    foreach( var_spec par_val_list\n        axlPutVar(sdb car(var_spec) cadr(var_spec))\n    )\n)\n\n; Create a new config view for a testbench.\n;\n; lib_name : testbench library name.\n; cell_name : testbench cell name.\n; view_name : name of the config view (a testbench can have multiple config views)\n; libs : a string of global libraries, separated by spaces.\n; views : a string of cellviews to use, separated by spaces.\n; stops : a string of cellviews to stop at, separated by spaces.\nprocedure( create_config_view(lib_name cell_name view_name libs views stops \"tttttt\")\n    let( (conf conf_bag)\n        printf(\"Creating config view %s__%s (%s)\\n\" lib_name cell_name view_name)\n\n        unless( conf = hdbOpen(lib_name cell_name view_name \"w\")\n            error(\"Cannot open config view %s__%s (%s).\" lib_name cell_name view_name)\n        )\n        hdbSetTopCellViewName(conf lib_name cell_name \"schematic\")\n        hdbSetDefaultLibListString(conf libs)\n        hdbSetDefaultViewListString(conf views)\n        hdbSetDefaultStopListString(conf stops)\n        hdbSaveAs(conf lib_name cell_name view_name)\n\n        ; close configuration\n        conf_bag = hdbCreateConfigBag()\n        hdbAddConfigToBag(conf_bag conf)\n        hdbCloseConfigsInBag(conf_bag)\n    )\n)\n\n; edit the config view of a testbench.  Use to control whether we're simulating with\n; schematic or post-extraction.\n;\n; lib_name : testbench library name.\n; cell_name : testbench cell name.\n; view_name : name of the config view (a testbench can have multiple config views)\n; conf_list : a list of (<lib>, <cell>, <view>) configurations.  Where each entry\n;             means that view <view> should be used for the cell <cell> in library <lib>.\nprocedure( edit_config_view(lib_name cell_name view_name conf_list \"tttl\")\n    let( (conf lib cell view conf_bag netlist_list)\n        unless( conf = hdbOpen(lib_name cell_name view_name \"a\")\n            error(\"Cannot open config view %s__%s (%s).\" lib_name cell_name view_name)\n        )\n        netlist_list = '()\n        foreach( cell_config conf_list\n            lib = car(cell_config)\n            cell = cadr(cell_config)\n            view = caddr(cell_config)\n            if( view == \"netlist\" then\n                ; set to use extracted netlist\n                netlist_list = cons(list(lib cell) netlist_list)\n            else\n                ; set to use extracted cellview\n                hdbSetObjBindRule(conf list(list(lib cell nil nil))\n                                  list('hdbcBindingRule list(nil nil view)))\n            )\n        )\n        hdbSaveAs(conf lib_name cell_name view_name)\n\n        ; close configuration\n        conf_bag = hdbCreateConfigBag()\n        hdbAddConfigToBag(conf_bag conf)\n        hdbCloseConfigsInBag(conf_bag)\n\n        ; update netlist source files\n        edit_config_source_files(lib_name cell_name view_name netlist_list)\n    )\n)\n\n; HACKERMAN FUNCTION:\n; so as usual, cadence is so terrible they don't have skill API to set source files.\n; instead, spice/spectre source files are defined in a secret ASCII prop.cfg file.\n; this hacky method will create the right prop.cfg file for you.\nprocedure( edit_config_source_files(lib_name cell_name view_name netlist_list \"tttl\")\n    let( (p lib_dir cell_lib_dir)\n        lib_dir = get_lib_directory(lib_name)\n        p = outfile( sprintf(nil \"%s/%s/%s/%s\" lib_dir cell_name view_name \"prop.cfg\") \"w\" )\n        ; common header\n        fprintf( p \"file-format-id 1.1;\\ndefault\\n{\\n}\\n\" )\n        foreach( lib_cell netlist_list\n            lib = car(lib_cell)\n            cell = cadr(lib_cell)\n            cell_lib_dir = get_lib_directory(lib)\n            fprintf( p \"cell %s.%s\\n{\\n\" lib cell )\n            fprintf( p \"    non-inherited string prop sourcefile = \\\"%s/%s/netlist/netlist\\\";\\n}\\n\"\n                     cell_lib_dir cell )\n        )\n        close(p)\n    )\n)\n\n; Write testbench information to file.\nprocedure( write_testbench_info_to_file(sdb result_file output_list en_corner_list)\n    let( (p output_count)\n\n        ; write testbench information to result_file\n        p = outfile(result_file \"w\")\n\n        fprintf(p \"corners:\\n\")\n        foreach( corn cadr(axlGetCorners(sdb))\n            fprintf(p \"  - %s\\n\" corn)\n        )\n        fprintf(p \"enabled_corners:\\n\")\n        foreach( corn en_corner_list\n            fprintf(p \"  - %s\\n\" corn)\n        )\n        fprintf(p \"parameters:\\n\")\n        if( var_list = cadr(axlGetVars(sdb)) then\n            foreach( var_name var_list\n                fprintf(p \"  %s: \\\"%s\\\"\\n\" var_name axlGetVarValue(axlGetVar(sdb var_name)))\n            )\n        else\n            fprintf(p \"  {}\\n\")\n        )\n        fprintf(p \"outputs:\\n\")\n        output_count = 0\n        foreach( out_obj output_list\n            if( rexMatchp( \"\\\"\" out_obj->name) then\n                warn(\"Output expression name (%s) have quotes, skipping\" out_obj->name)\n            else\n                fprintf(p \"  \\\"%s\\\": !!str %A\\n\" out_obj->name out_obj->expression)\n                output_count = output_count + 1\n            )\n        )\n        when( output_count == 0\n            fprintf(p \"  {}\\n\")\n        )\n        close(p)\n    )\n)\n\n; Instantiates a testbench.\n;\n; Copy a testbench template to the desired location, replace instances, make config view,\n; and also setup corner settings in adexl.\n; this method will also record list of corners, global variables, and output expressions\n; to result_file\nprocedure( instantiate_testbench(tb_cell targ_lib\n                                 config_libs config_views config_stops\n                                 default_corner corner_file def_files\n                                 tech_lib result_file\n                                 \"tttttttltt\")\n    let( (session_name session_sdb session sdb test_names test_name test tool_args corner_list\n          ade_symbol ade_session output_list tmp_state_name state_obj success)\n\n        tmp_state_name = \"orig_state\"\n\n        ; check if temporary ADE session state already exists, if so, delete it\n        state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)\n        when( state_obj\n            success = ddDeleteObj(state_obj)\n            unless( success\n                error(\"Cannot delete orig_state cellview.\")\n            )\n        )\n\n        ; create config view\n        create_config_view(targ_lib tb_cell \"config\" config_libs config_views config_stops)\n\n        ; session_name = \"modify_adexl\"\n        session_name = sprintf(nil \"modify_adexl_%d\" bag_modify_adexl_counter)\n        bag_modify_adexl_counter = bag_modify_adexl_counter + 1\n\n        session_sdb = open_adexl_session(targ_lib tb_cell \"adexl\" session_name \"a\")\n        session = car(session_sdb)\n        sdb = cadr(session_sdb)\n\n        ; check that only one test is defined\n        test_names = cadr(axlGetTests(sdb))\n        when(length(test_names) != 1\n            axlCommitSetupDB(sdb)\n            axlCloseSetupDB(sdb)\n            axlCloseSession(session)\n            error(\"ADEXL testbench must have exactly 1 test defined.\")\n        )\n\n        ; save current test setup state\n        axlSaveSetupState(session \"adexl_default\" \"All\")\n\n        ; change all tests to use config view, and set all test's definition files\n        ; also get a list of defined output expressions\n        ; step 1: get ADE session\n        test_name = car(test_names)\n        ade_symbol = axlGetToolSession(session_name test_name)\n        ade_session = asiGetSession(ade_symbol)\n        ; step 2: save original ADE session\n        asiSaveState(ade_session ?name tmp_state_name ?option 'cellview ?lib targ_lib ?cell tb_cell)\n        ; step 3: change test library\n        test = axlGetTest(sdb test_name)\n        tool_args = axlGetTestToolArgs(test)\n        set_assoc_list(tool_args \"view\" \"config\")\n        set_assoc_list(tool_args \"lib\" targ_lib)\n        set_assoc_list(tool_args \"cell\" tb_cell)\n        axlSetTestToolArgs(test tool_args)\n        ; step 4: reopen ADE session, then load original ADE state\n        ade_symbol = axlGetToolSession(session_name test_name)\n        ade_session = asiGetSession(ade_symbol)\n        asiLoadState(ade_session ?name tmp_state_name ?option 'cellview)\n        asiSetEnvOptionVal(ade_session 'definitionFiles def_files)\n        output_list = setof(ele asiGetOutputList(ade_session) ele->name)\n        ; step 5: delete temporary ADE session state\n        state_obj = ddGetObj(targ_lib tb_cell tmp_state_name)\n        ddDeleteObj(state_obj)\n\n        axlMainAppSaveSetup(session_name)\n\n        ; load corner\n        unless(axlLoadCorners(sdb corner_file)\n            axlCommitSetupDB(sdb)\n            axlCloseSetupDB(sdb)\n            axlCloseSession(session)\n            error(\"Error loading corner file %s to %s__%s (%s)\" corner_file lib_name cell_name view_name)\n        )\n\n        ; set default corner\n        corner_list = list(default_corner)\n        enable_adexl_corners(sdb corner_list nil)\n\n        ; write testbench information to file\n        write_testbench_info_to_file(sdb result_file output_list corner_list)\n\n        ; save and close\n        axlSaveSetupState(session \"adexl_default\" \"All\")\n        axlSaveSetupState(session \"ocean_default\" \"All\")\n        axlMainAppSaveSetup(session_name)\n        axlCommitSetupDB(sdb)\n        axlCloseSetupDB(sdb)\n        axlCloseSession(session)\n    )\n)\n\n; Returns parameter and corner information of a testbench.\nprocedure( get_testbench_info(tb_lib tb_cell result_file \"ttt\")\n    let( (session_name session_sdb session sdb test_names test_name ade_symbol asi_sess\n        output_list corner_list en_list success)\n        session_name = \"read_adexl\"\n        session_sdb = open_adexl_session(tb_lib tb_cell \"adexl\" session_name \"r\")\n        session = car(session_sdb)\n        sdb = cadr(session_sdb)\n\n        ; check that only one test is defined\n        test_names = cadr(axlGetTests(sdb))\n        when(length(test_names) != 1\n            axlCommitSetupDB(sdb)\n            axlCloseSetupDB(sdb)\n            axlCloseSession(session)\n            error(\"ADEXL testbench must have exactly 1 test defined.\")\n        )\n\n        ; get output list\n        test_name = car(test_names)\n        ade_symbol = axlGetToolSession(session_name test_name)\n        asi_sess = sevEnvironment(ade_symbol)\n        output_list = setof(ele asiGetOutputList(asi_sess) ele->name)\n\n        ; get enabled corners\n        corner_list = cadr(axlGetCorners(sdb))\n        en_list = setof(corner corner_list axlGetEnabled(axlGetCorner(sdb corner)))\n\n        ; write testbench information to file\n        write_testbench_info_to_file(sdb result_file output_list en_list)\n\n        ; close\n        axlCommitSetupDB(sdb)\n        axlCloseSetupDB(sdb)\n        axlCloseSession(session)\n    )\n)\n\n; Configure run options.  Used to setup monte carlo parameters.\n; run_params is an association list of run options and their values.  The key \"mode\"\n; corresponds to the run mode.\nprocedure( set_run_options(session sdb run_params \"ggl\")\n    let( (run_mode opt_list run_opt)\n        when( run_mode = cadr(assoc(\"mode\" run_params))\n                  ; no options for single run/sweep mode.\n            cond( (run_mode == \"Single Run, Sweeps and Corners\"\n                   opt_list = nil)\n                  (run_mode == \"Monte Carlo Sampling\"\n                   opt_list = '(\"mcnumpoints\" \"mcmethod\") )\n                  ('t\n                      axlCloseSession(session)\n                      error(\"Unsupported run mode: %s\" run_mode) )\n            )\n            foreach( opt_name opt_list\n                when( opt_val = cadr(assoc(opt_name run_params))\n                    run_opt = axlPutRunOption(sdb run_mode opt_name)\n                    axlSetRunOptionValue(run_opt opt_val)\n                )\n            )\n            axlSetCurrentRunMode(sdb run_mode)\n        )\n    )\n)\n\n; modify the given testbench.\n; tb_lib and tb_cell describes the library and cell of the testbench to simulate.\n; conf_file contains the config view settings.\n; opt_file contains the association list of run mode options.\n; corner_file contains a list of corners to simulate.\n; param_file contains the association list of parameter values.\nprocedure( modify_testbench(tb_lib tb_cell conf_file opt_file corner_file param_file env_params_file \"ttttttt\")\n    let( (tmp_list session sdb conf_list run_params corner_list param_values env_param_values session_name)\n        sprintf(session_name \"bag_sim_adexl_%s\" getCurrentTime())\n\n        ; read inputs from file.\n        conf_list = parse_data_from_file(conf_file)\n        run_params = parse_data_from_file(opt_file)\n        corner_list = parse_data_from_file(corner_file)\n        param_values = parse_data_from_file(param_file)\n        env_param_values = parse_data_from_file(env_params_file)\n\n        ; modify config view\n        when( conf_list\n            edit_config_view(tb_lib tb_cell \"config\" conf_list)\n        )\n\n        tmp_list = open_adexl_session(tb_lib tb_cell \"adexl\" session_name \"a\")\n        session = car(tmp_list)\n        sdb = cadr(tmp_list)\n\n        ; change corners, parameters, and run options\n        enable_adexl_corners( sdb corner_list env_param_values)\n        set_adexl_parameters( sdb param_values )\n        set_run_options( session sdb run_params )\n\n        ; save and close\n        axlSaveSetupState(session \"adexl_default\" \"All\")\n        axlSaveSetupState(session \"ocean_default\" \"All\")\n        axlMainAppSaveSetup(session_name)\n        axlCommitSetupDB(sdb)\n        axlCloseSetupDB(sdb)\n        axlCloseSession(session)\n    )\n)\n\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n;;  BAG server related functions            ;;\n;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n\nprocedure( stdoutHandler(ipcId data)\n    let( (result result_str)\n        if( bag_server_started > 0 then\n            printf(\"*INFO* Evaluate expression from BAG process: %s\\n\" data)\n            if( result = errsetstring(data 't) then\n                sprintf(result_str \"%A\\n\" car(result))\n            else\n                sprintf(result_str \"%s\\n\" car(nthelem(5 errset.errset)))\n            )\n            printf(\"*INFO* Sending result to BAG process: %s\" result_str)\n            ipcWriteProcess(ipcId sprintf(nil \"%d\\n\" strlen(result_str)))\n            ipcWriteProcess(ipcId result_str)\n            't\n        else\n            if( data == \"BAG skill server has started.  Yay!\\n\" then\n                bag_server_started = 1\n                printf(\"*INFO* BAG skill server started.\\n\")\n            else\n                printf(\"*INFO* Waiting for BAG skill server.  Message: %s\\n\" data)\n            )\n        )\n    )\n)\n\nprocedure( stderrHandler(ipcId data)\n    warn(\"BAG server process error: %s\\n\" data)\n    warn(\"Shutting down BAG server.\")\n    ipcKillProcess(ipcId)\n    't\n)\n\nprocedure( exitHandler(ipcId exitId)\n    printf(\"*INFO* BAG server process exited with status: %d\\n\" exitId)\n    't\n)\n\nprocedure( start_bag_server()\n    bag_server_started = 0\n    printf(\"*INFO* Starting BAG server process.\\n\")\n    ipcBeginProcess(\"bash virt_server.sh\" \"\" 'stdoutHandler 'stderrHandler 'exitHandler \"\")\n)\n\nbag_server_started = 0\nbag_modify_adexl_counter = 0\nbag_proc = start_bag_server()\n"
  },
  {
    "path": "run_scripts/virt_server.sh",
    "content": "#!/usr/bin/env bash\n\nexport PYTHONPATH=\"${BAG_FRAMEWORK}\"\n\nexport cmd=\"-m bag.virtuoso run_skill_server\"\nexport min_port=5000\nexport max_port=9999\nexport port_file=\"BAG_server_port.txt\"\nexport log=\"skill_server.log\"\n\nexport cmd=\"${BAG_PYTHON} ${cmd} ${min_port} ${max_port} ${port_file} ${log}\"\nexec $cmd\n"
  },
  {
    "path": "setup.py",
    "content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nsetup(\n    name='bag',\n    version='2.0',\n    description='Berkeley Analog Generator',\n    classifiers=[\n        'Development Status :: 3 - Alpha',\n        'License :: OSI Approved :: BSD License',\n        'Operating System :: POSIX :: Linux',\n        'Programming Language :: Python :: 3.5',\n        'Programming Language :: Python :: 3.6',\n        'Programming Language :: Python :: 3.7',\n    ],\n    author='Eric Chang',\n    author_email='pkerichang@berkeley.edu',\n    packages=find_packages(),\n    python_requires='>=3.5',\n    install_requires=[\n        'setuptools>=18.5',\n        'PyYAML>=3.11',\n        'Jinja2>=2.9',\n        'numpy>=1.10',\n        'networkx>=1.11',\n        'pexpect>=4.0',\n        'pyzmq>=15.2.0',\n        'scipy>=0.17',\n        'matplotlib>=1.5',\n        'rtree',\n        'h5py',\n        'Shapely',\n    ],\n    extras_require={\n        'mdao': ['openmdao']\n    },\n    tests_require=[\n        'openmdao',\n        'pytest',\n    ],\n    package_data={\n        'bag.interface': ['templates/*'],\n        'bag.verification': ['templates/*'],\n    },\n)\n"
  },
  {
    "path": "tests/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "tests/__init__.py",
    "content": ""
  },
  {
    "path": "tests/layout/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "tests/layout/__init__.py",
    "content": ""
  },
  {
    "path": "tests/layout/routing/LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2018, Regents of the University of California\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "tests/layout/routing/__init__.py",
    "content": ""
  },
  {
    "path": "tests/layout/routing/test_fill.py",
    "content": "from itertools import product\n\nimport pytest\n\nfrom bag.layout.routing.fill import fill_symmetric_helper\n\n\ndef check_disjoint_union(outer_list, inner_list, start, stop):\n    # test outer list has 1 more element than inner list\n    assert len(outer_list) == len(inner_list) + 1\n\n    sintv, eintv = outer_list[0], outer_list[-1]\n    if inner_list:\n        # test outer list covers more range than inner list\n        assert sintv[0] <= inner_list[0][0] and eintv[1] >= inner_list[-1][1]\n    # test outer list touches both boundaries\n    assert sintv[0] == start and eintv[1] == stop\n\n    # test intervals are disjoint and union is equal to given interval\n    for idx in range(len(outer_list)):\n        intv1 = outer_list[idx]\n        # test interval is non-negative\n        assert intv1[0] <= intv1[1]\n        if idx < len(inner_list):\n            intv2 = inner_list[idx]\n            # test interval is non-negative\n            assert intv2[0] <= intv2[1]\n            # test interval abuts\n            assert intv1[1] == intv2[0]\n            assert intv2[1] == outer_list[idx + 1][0]\n\n\ndef check_symmetric(intv_list, start, stop):\n    # test given interval list is symmetric\n    flip_list = [(stop + start - b, stop + start - a) for a, b in reversed(intv_list)]\n    for i1, i2 in zip(intv_list, flip_list):\n        assert i1[0] == i2[0] and i1[1] == i2[1]\n\n\ndef check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, n, tot_intv, inc_sp, sp,\n                eq_sp_parity, num_diff_sp_max, num_fill, fill_first, start, stop, n_flen_max, sp_edge_tweak=False):\n    # check num_diff_sp is the same\n    assert num_diff_sp1 == num_diff_sp2\n    if n % 2 == eq_sp_parity and not sp_edge_tweak:\n        # check all spaces are the same\n        assert num_diff_sp1 == 0\n    else:\n        # check num_diff_sp is less than or equal to 1\n        assert num_diff_sp1 <= num_diff_sp_max\n    # test we get correct number of fill\n    assert len(fill_list) == num_fill\n    # test fill and space are disjoint and union is correct\n    if fill_first:\n        check_disjoint_union(fill_list, space_list, start, stop)\n    else:\n        check_disjoint_union(space_list, fill_list, start, stop)\n    # check symmetry\n    check_symmetric(fill_list, tot_intv[0], tot_intv[1])\n    check_symmetric(space_list, tot_intv[0], tot_intv[1])\n    # check fill has only two lengths, and they differ by 1\n    len_list = sorted(set((b - a) for a, b in fill_list))\n    assert len(len_list) <= n_flen_max\n    assert (len_list[-1] - len_list[0]) <= n_flen_max - 1\n\n    if space_list:\n        # check space has only two lengths, and they differ by 1\n        len_list = sorted(set((b - a) for a, b in space_list))\n        assert len(len_list) <= (2 if num_diff_sp1 > 0 else 1)\n        assert (len_list[-1] - len_list[0]) <= 1\n        # check that space is the right values\n        if len(len_list) == 1:\n            # if only one space, check that it is sp + inc only if num_diff_sp > 0\n            if num_diff_sp1 > 0:\n                sp_correct = sp + 1 if inc_sp else sp - 1\n            else:\n                sp_correct = sp\n            assert len_list[0] == sp_correct\n        else:\n            # check it has space sp and sp + inc_sp\n            if inc_sp:\n                assert len_list[0] == sp\n            else:\n                assert len_list[-1] == sp\n\n\ndef test_fill_symmetric_non_cyclic():\n    # test fill symmetric for non-cyclic\n    sp_list = [3, 4, 5]\n    inc_sp_list = [True, False]\n    offset_list = [0, 4, 7]\n    foe_list = [True, False]\n    area_max = 50\n    for sp, inc_sp, offset, foe in product(sp_list, inc_sp_list, offset_list, foe_list):\n        for area in range(sp + 1, area_max + 1):\n            tot_intv = offset, offset + area\n            for nfill in range(1, area - sp + 1):\n                nsp = nfill - 1 if foe else nfill + 1\n                # compute minimum possible footprint\n                if nfill % 2 == 1 or inc_sp:\n                    # minimum possible footprint\n                    min_footprint = nfill * 1 + nsp * sp\n                else:\n                    # if we have even fill and we can decrease space, then we can decrease middle space by 1\n                    min_footprint = nfill * 1 + nsp * sp - 1\n                if min_footprint > area:\n                    # test exception when drawing with no solution\n                    # we have no solution when minimum possible footprint > area\n                    with pytest.raises(ValueError):\n                        fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                              invert=False, fill_on_edge=foe, cyclic=False)\n                    with pytest.raises(ValueError):\n                        fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                              invert=True, fill_on_edge=foe, cyclic=False)\n                else:\n                    # get fill and space list\n                    fill_list, num_diff_sp1 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                                                    invert=False, fill_on_edge=foe, cyclic=False)\n                    space_list, num_diff_sp2 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                                                     invert=True, fill_on_edge=foe, cyclic=False)\n\n                    check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, nfill, tot_intv, inc_sp, sp,\n                                1, 1, nfill, foe, tot_intv[0], tot_intv[1], 2)\n\n\ndef test_fill_symmetric_cyclic_edge_fill():\n    # test fill symmetric for cyclic, fill on edge\n    sp_list = [3, 4, 5]\n    inc_sp_list = [True, False]\n    offset_list = [0, 4, 7]\n    area_max = 50\n    for sp, inc_sp, offset in product(sp_list, inc_sp_list, offset_list):\n        for area in range(sp + 1, area_max + 1):\n            tot_intv = offset, offset + area\n            for nfill in range(1, area - sp + 1):\n                nsp = nfill\n                if nfill % 2 == 0 or inc_sp:\n                    # minimum possible footprint.  Edge fill block must be even (hence the + 1)\n                    min_footprint = nfill * 1 + 1 + nsp * sp\n                else:\n                    # if we have odd fill and we can decrease space, then we can decrease middle space by 1\n                    min_footprint = nfill * 1 + 1 + nsp * sp - 1\n                if min_footprint > area:\n                    # test exception when drawing with no solution\n                    # we have no solution when minimum possible footprint > area\n                    with pytest.raises(ValueError):\n                        fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                              invert=False, fill_on_edge=True, cyclic=True)\n                    with pytest.raises(ValueError):\n                        fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                              invert=True, fill_on_edge=True, cyclic=True)\n                else:\n                    # get fill and space list\n                    fill_list, num_diff_sp1 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                                                    invert=False, fill_on_edge=True, cyclic=True)\n                    space_list, num_diff_sp2 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                                                     invert=True, fill_on_edge=True, cyclic=True)\n                    # test boundary fills centers on edge\n                    sintv, eintv = fill_list[0], fill_list[-1]\n                    assert (sintv[1] + sintv[0]) % 2 == 0 and (eintv[1] + eintv[0]) % 2 == 0\n                    assert (sintv[1] + sintv[0]) // 2 == tot_intv[0] and (eintv[1] + eintv[0]) // 2 == tot_intv[1]\n                    # test other properties\n                    check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, nfill, tot_intv, inc_sp, sp,\n                                0, 1, nfill + 1, True, sintv[0], eintv[1], 3)\n\n\ndef test_fill_symmetric_cyclic_edge_space():\n    # test fill symmetric for cyclic, space on edge\n    sp_list = [3, 4, 5]\n    inc_sp_list = [True, False]\n    offset_list = [0, 4, 7]\n    area_max = 50\n    for sp, inc_sp, offset in product(sp_list, inc_sp_list, offset_list):\n        for area in range(sp + 1, area_max + 1):\n            tot_intv = offset, offset + area\n            for nfill in range(1, area - sp + 1):\n                nsp = nfill\n                adj_sp = 1 if inc_sp else -1\n                sp_edge_tweak = sp % 2 == 1\n                if sp_edge_tweak:\n                    # minimum possible footprint.  Edge space block must be even (hence the + adj_sp)\n                    min_footprint = nfill * 1 + nsp * sp + adj_sp\n                else:\n                    min_footprint = nfill * 1 + nsp * sp\n                if nfill % 2 == 0 and not inc_sp:\n                    # if we have middle space block, we can subtract one more from middle.\n                    min_footprint -= 1\n                if min_footprint > area:\n                    # test exception when drawing with no solution\n                    # we have no solution when minimum possible footprint > area\n                    with pytest.raises(ValueError):\n                        fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                              invert=False, fill_on_edge=False, cyclic=True)\n                        print(area, nfill, sp, inc_sp)\n                    with pytest.raises(ValueError):\n                        fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                              invert=True, fill_on_edge=False, cyclic=True)\n                else:\n                    # get fill and space list\n                    fill_list, num_diff_sp1 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                                                    invert=False, fill_on_edge=False, cyclic=True)\n                    space_list, num_diff_sp2 = fill_symmetric_helper(area, nfill, sp, offset=offset, inc_sp=inc_sp,\n                                                                     invert=True, fill_on_edge=False, cyclic=True)\n\n                    # test boundary space centers on edge\n                    sintv, eintv = space_list[0], space_list[-1]\n                    assert (sintv[1] + sintv[0]) % 2 == 0 and (eintv[1] + eintv[0]) % 2 == 0\n                    assert (sintv[1] + sintv[0]) // 2 == tot_intv[0] and (eintv[1] + eintv[0]) // 2 == tot_intv[1]\n                    # test other properties\n                    check_props(fill_list, space_list, num_diff_sp1, num_diff_sp2, nfill, tot_intv, inc_sp, sp,\n                                1, 2, nfill, False, sintv[0], eintv[1], 2, sp_edge_tweak)\n"
  }
]